├── README.md ├── helm ├── init.sh └── min-cluster-allinone │ ├── apollo-min │ └── readme.MD │ ├── consul-min │ ├── consul-min-pv-local.yaml │ └── readme.MD │ ├── es-min │ ├── backup │ │ ├── loadbalance-es-min-c0.yaml │ │ ├── pv-nfs-es-min-c0-data.yaml │ │ ├── pv-nfs-es-min-c0-ingest.yaml │ │ ├── pv-nfs-es-min-c0-master.yaml │ │ ├── service-es-min-c0.yaml │ │ └── service-nodeport-es-min-c0-ingest.yaml │ ├── es-min-data-storageclass-local.yaml │ ├── es-min-data0-pv-local.yaml │ ├── es-min-ingest-storageclass-local.yaml │ ├── es-min-ingest0-pv-local.yaml │ ├── es-min-master-storageclass-local.yaml │ ├── es-min-master0-pv-local.yaml │ └── readme.MD │ ├── kibana-min │ └── readme.MD │ ├── min-cluster-redis │ ├── pv-local-redis-min-c0-master.yaml │ ├── pv-local-redis-min-c0-slave.yaml │ ├── storageclass-local-redis-min-c0-master.yaml │ └── storageclass-local-redis-min-c0-slave.yaml │ ├── mysql-min │ ├── mysql-min-pv-local.yaml │ ├── mysql-min-storageclass-local.yaml │ └── readme.MD │ ├── nginx-ingress-min │ ├── ingress-es-min-c0-kibana.yaml │ ├── ingress-es-min-c0.yaml │ ├── readme.MD │ ├── service-ingress.yaml.backup │ └── values.yaml │ ├── prometheus-min │ └── prometheus-min-pv-local.yaml │ ├── redis-ha-min │ ├── readme.MD │ ├── redis-ha-min-pv-local.yaml │ └── test.yaml │ └── skywalking-min │ └── readme.MD ├── images ├── K8S.png └── 千里行走.jpg ├── other ├── elk │ └── pro-cluster-demo │ │ ├── Readme..txt │ │ ├── ingress-es-c1-skywalking-kibana.yaml │ │ ├── ingress-es-c1-skywalking.yaml │ │ ├── pv-es-c1-skywalking-data.yaml │ │ ├── pv-es-c1-skywalking-ingest.yaml │ │ └── pv-es-c1-skywalking-master.yaml └── jenkins │ └── jenkins-deployment.yaml ├── product ├── init.sh └── standard │ ├── apollo-pro │ ├── README.md │ ├── apollo-adminservice │ │ ├── apollo-adminservice-configmap.yaml │ │ ├── apollo-adminservice-deployment.yaml │ │ └── apollo-adminservice-service.yaml │ ├── apollo-configservice-transition │ │ ├── apollo-configservice-transition-ingress.yaml │ │ └── apollo-configservice-transition-service.yaml │ ├── apollo-configservice │ │ ├── apollo-configservice-configmap.yaml │ │ ├── apollo-configservice-service.yaml │ │ └── apollo-configservice-statefulset.yaml │ └── apollo-portal │ │ ├── apollo-portal-configmap.yaml │ │ ├── apollo-portal-deployment.yaml │ │ ├── apollo-portal-ingress.yaml │ │ └── apollo-portal-service.yaml │ ├── apollo-skywalking-pro │ ├── README.md │ ├── apollo-adminservice │ │ ├── apollo-adminservice-configmap.yaml │ │ ├── apollo-adminservice-deployment.yaml │ │ └── apollo-adminservice-service.yaml │ ├── apollo-configservice-transition │ │ ├── apollo-configservice-transition-ingress.yaml │ │ └── apollo-configservice-transition-service.yaml │ ├── apollo-configservice │ │ ├── apollo-configservice-configmap.yaml │ │ ├── apollo-configservice-service.yaml │ │ └── apollo-configservice-statefulset.yaml │ ├── apollo-namespace.yaml │ ├── apollo-portal │ │ ├── apollo-portal-configmap.yaml │ │ ├── apollo-portal-deployment.yaml │ │ ├── apollo-portal-ingress.yaml │ │ └── apollo-portal-service.yaml │ └── images │ │ └── apollo-skywalking.png │ ├── cicd-pro │ ├── gitlab-pro │ │ ├── .gitlab-rc.yml.swp │ │ ├── deploy.sh │ │ ├── gitlab-rc.yml │ │ ├── gitlab-svc.yml │ │ ├── ingress-nginx-gitlab.yaml │ │ ├── postgresql-rc.yml │ │ ├── postgresql-svc.yml │ │ ├── readme.Md │ │ ├── redis-rc.yml │ │ ├── redis-svc.yml │ │ └── teardown.sh │ ├── jenkins-pro │ │ ├── ingress-nginx-jenkins.yaml │ │ ├── jenkins_deployment.yaml │ │ ├── jenkins_pv.yaml │ │ ├── jenkins_rbac.yaml │ │ └── jenkins_svc.yaml │ └── nexus-pro │ │ ├── deploy.sh │ │ ├── ingress-nginx-nexus.yaml │ │ └── repo-nexus-ns.yaml │ ├── elasticsearch-pro │ ├── README.md │ ├── es-c0-data-pv.yaml │ ├── es-c0-data-service.yaml │ ├── es-c0-data-statefulset.yaml │ ├── es-c0-ingest-deployment.yaml │ ├── es-c0-ingest-service.yaml │ ├── es-c0-kibana-deployment.yaml │ ├── es-c0-kibana-ingerss.yaml │ ├── es-c0-kibana-service.yaml │ ├── es-c0-master-deployment.yaml │ ├── es-c0-master-service.yaml │ └── es-c0-namespace.yaml │ ├── grafana-prometheus-pro │ ├── README.md │ ├── alertmanager │ │ ├── .alertmanager-for-middleware-dingtalk.yaml.swo │ │ ├── alertmanager-configmap.yaml │ │ ├── alertmanager-deployment.yaml │ │ ├── alertmanager-pv.yaml │ │ ├── alertmanager-pvc.yaml │ │ ├── alertmanager-service.yaml │ │ └── backup │ │ │ └── alertmanager-dingtalk.yaml.backup │ ├── exporter-mq-rocketmq │ │ ├── README.md │ │ ├── backup │ │ │ └── python-exporter-deployment-mq-rocketmq-c4.yaml │ │ ├── exporter-deployment-mq-rocketmq-c4.yaml │ │ ├── go-exporter-deployment-mq-rocketmq-c4.yaml │ │ └── images │ │ │ ├── mesage-unconsumed-count.jpg │ │ │ ├── rocketmq-first-resend-message-result.jpg │ │ │ └── rocketmq-first-send-message-result.jpg │ ├── exporter-redis │ │ └── exporter-deployment-redis-cluster.yaml │ ├── exporter-storage-mysql │ │ └── exporter-deployment-storage-mysql-apollo.yaml │ ├── grafana-prometheus-image-repo-secret.yaml │ ├── grafana-prometheus-namespace.yaml │ ├── grafana │ │ ├── grafana-ingress.yaml │ │ ├── grafana-pv.yaml │ │ ├── grafana-pvc.yaml │ │ ├── grafana-statefulset.yaml │ │ ├── grafana-svc.yaml │ │ └── provisioning │ │ │ ├── dashboards │ │ │ ├── kubernetes │ │ │ │ ├── Kubernetes _ API server.json │ │ │ │ ├── Kubernetes _ Compute Resources _ Cluster.json │ │ │ │ ├── Kubernetes _ Compute Resources _ Namespace (Pods).json │ │ │ │ ├── Kubernetes _ Compute Resources _ Namespace (Workloads).json │ │ │ │ ├── Kubernetes _ Compute Resources _ Node (Pods).json │ │ │ │ ├── Kubernetes _ Compute Resources _ Pod.json │ │ │ │ ├── Kubernetes _ Compute Resources _ Workload.json │ │ │ │ ├── Kubernetes _ Controller Manager.json │ │ │ │ ├── Kubernetes _ Kubelet.json │ │ │ │ ├── Kubernetes _ Networking _ Cluster.json │ │ │ │ ├── Kubernetes _ Networking _ Namespace (Pods).json │ │ │ │ ├── Kubernetes _ Networking _ Namespace (Workload).json │ │ │ │ ├── Kubernetes _ Networking _ Pod.json │ │ │ │ ├── Kubernetes _ Networking _ Workload.json │ │ │ │ ├── Kubernetes _ Persistent Volumes.json │ │ │ │ ├── Kubernetes _ Pods.json │ │ │ │ ├── Kubernetes _ Proxy.json │ │ │ │ ├── Kubernetes _ Scheduler.json │ │ │ │ ├── Kubernetes _ StatefulSets.json │ │ │ │ ├── Nodes.json │ │ │ │ ├── Prometheus Remote Write.json │ │ │ │ ├── Prometheus.json │ │ │ │ ├── USE Method _ Cluster.json │ │ │ │ └── USE Method _ Node.json │ │ │ ├── mq-rocketmq │ │ │ │ └── Message_Unconsumed_Number.json │ │ │ ├── provision.yml │ │ │ ├── redis │ │ │ │ ├── redis-cluster-dashboard.json │ │ │ │ └── redis-standard-dashboard.json │ │ │ ├── saf-app │ │ │ │ ├── Dashboard Internal Metrics.json │ │ │ │ ├── Dashboard.json │ │ │ │ ├── Incoming _ HTTP _ Drill.json │ │ │ │ ├── Incoming _ HTTP.json │ │ │ │ ├── Incoming _ Motan _ Drill.json │ │ │ │ ├── Incoming _ Motan.json │ │ │ │ ├── Incoming_Dubbo.json │ │ │ │ ├── Internal _ JVM.json │ │ │ │ ├── Internal _ Sentinel Stats.json │ │ │ │ ├── Outcoming_Dubbo.json │ │ │ │ ├── Outgoing _ HTTP _ CHttpBioClient.json │ │ │ │ ├── Outgoing _ HTTP _ HTTP Bio.json │ │ │ │ ├── Outgoing _ Motan _ Drill.json │ │ │ │ ├── Outgoing _ Motan.json │ │ │ │ ├── SAF_Incoming_HTTP-1581042488062.json │ │ │ │ ├── SAF_Incoming_HTTP_Detail-1581042545031.json │ │ │ │ ├── SAF_Outgoing_HTTP_HTTPBioClient-1581042564452.json │ │ │ │ ├── SAF_Outgoing_HTTP_HTTPBioClient_Detail-1581042575330.json │ │ │ │ └── SAF_Sentinel_Stat.json │ │ │ └── storage-mysql │ │ │ │ ├── Cross_Server_Graphs.json │ │ │ │ ├── Disk_Performance.json │ │ │ │ ├── Disk_Space.json │ │ │ │ ├── Galera_Graphs.json │ │ │ │ ├── MySQL_InnoDB_Metrics.json │ │ │ │ ├── MySQL_MyISAM_Metrics.json │ │ │ │ ├── MySQL_Overview.json │ │ │ │ ├── MySQL_Performance_Schema.json │ │ │ │ ├── MySQL_Query_Response_Time.json │ │ │ │ ├── MySQL_Replication.json │ │ │ │ ├── MySQL_Table_Statistics.json │ │ │ │ ├── MySQL_User_Statistics.json │ │ │ │ ├── Prometheus.json │ │ │ │ ├── Summary_Dashboard.json │ │ │ │ ├── System_Overview.json │ │ │ │ ├── TokuDB_Metrics.json │ │ │ │ └── Trends_Dashboard.json │ │ │ └── datasources │ │ │ └── datasource.yml │ ├── images │ │ ├── caf-cdn方案.jpg │ │ └── grafana-prometheus生产级实践.jpg │ ├── prometheus-mq-rocketmq │ │ ├── prometheus-mq-rocketmq-configmap.yaml │ │ ├── prometheus-mq-rocketmq-pv.yaml │ │ ├── prometheus-mq-rocketmq-pvc.yaml │ │ ├── prometheus-mq-rocketmq-role.yaml │ │ ├── prometheus-mq-rocketmq-service.yaml │ │ └── prometheus-mq-rocketmq-statefulset.yaml │ ├── prometheus-redis │ │ ├── prometheus-redis-configmap.yaml │ │ ├── prometheus-redis-ingress.yaml │ │ ├── prometheus-redis-pv.yaml │ │ ├── prometheus-redis-pvc.yaml │ │ ├── prometheus-redis-role.yaml │ │ ├── prometheus-redis-service.yaml │ │ └── prometheus-redis-statefulset.yaml │ ├── prometheus-saf │ │ ├── prometheus-saf-configmap.yaml │ │ ├── prometheus-saf-ingress.yaml │ │ ├── prometheus-saf-pv.yaml │ │ ├── prometheus-saf-pvc.yaml │ │ ├── prometheus-saf-role.yaml │ │ ├── prometheus-saf-service.yaml │ │ └── prometheus-saf-statefulset.yaml │ ├── prometheus-storage-mysql │ │ ├── prometheus-storage-mysql-configmap.yaml │ │ ├── prometheus-storage-mysql-ingress.yaml │ │ ├── prometheus-storage-mysql-pv.yaml │ │ ├── prometheus-storage-mysql-pvc.yaml │ │ ├── prometheus-storage-mysql-role.yaml │ │ ├── prometheus-storage-mysql-service.yaml │ │ └── prometheus-storage-mysql-statefulset.yaml │ └── prometheus-traefik │ │ ├── prometheus-traefik-configmap.yaml │ │ ├── prometheus-traefik-ingress.yaml │ │ ├── prometheus-traefik-pv.yaml │ │ ├── prometheus-traefik-pvc.yaml │ │ ├── prometheus-traefik-role.yaml │ │ ├── prometheus-traefik-service.yaml │ │ └── prometheus-traefik-statefulset.yaml │ ├── kube-prometheus-pro │ └── kube-prometheus-pro-0.3.0 │ │ ├── README.md │ │ ├── init.sh │ │ └── manifests │ │ ├── alertmanager-alertmanager.yaml │ │ ├── alertmanager-secret.yaml │ │ ├── alertmanager-service.yaml │ │ ├── alertmanager-serviceAccount.yaml │ │ ├── alertmanager-serviceMonitor.yaml │ │ ├── custom_by_hepy │ │ ├── alertmanager.yaml │ │ ├── dingtalk-webhook.yaml │ │ ├── grafana-ingress.yaml │ │ ├── prometheus-k8s-ingress.yaml │ │ └── prometheus-k8s-pv.yaml │ │ ├── grafana-dashboardDatasources.yaml │ │ ├── grafana-dashboardDefinitions.yaml │ │ ├── grafana-dashboardSources.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-service.yaml │ │ ├── grafana-serviceAccount.yaml │ │ ├── grafana-serviceMonitor.yaml │ │ ├── kube-state-metrics-clusterRole.yaml │ │ ├── kube-state-metrics-clusterRoleBinding.yaml │ │ ├── kube-state-metrics-deployment.yaml │ │ ├── kube-state-metrics-role.yaml │ │ ├── kube-state-metrics-roleBinding.yaml │ │ ├── kube-state-metrics-service.yaml │ │ ├── kube-state-metrics-serviceAccount.yaml │ │ ├── kube-state-metrics-serviceMonitor.yaml │ │ ├── node-exporter-clusterRole.yaml │ │ ├── node-exporter-clusterRoleBinding.yaml │ │ ├── node-exporter-daemonset.yaml │ │ ├── node-exporter-service.yaml │ │ ├── node-exporter-serviceAccount.yaml │ │ ├── node-exporter-serviceMonitor.yaml │ │ ├── prometheus-adapter-apiService.yaml │ │ ├── prometheus-adapter-clusterRole.yaml │ │ ├── prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml │ │ ├── prometheus-adapter-clusterRoleBinding.yaml │ │ ├── prometheus-adapter-clusterRoleBindingDelegator.yaml │ │ ├── prometheus-adapter-clusterRoleServerResources.yaml │ │ ├── prometheus-adapter-configMap.yaml │ │ ├── prometheus-adapter-deployment.yaml │ │ ├── prometheus-adapter-roleBindingAuthReader.yaml │ │ ├── prometheus-adapter-service.yaml │ │ ├── prometheus-adapter-serviceAccount.yaml │ │ ├── prometheus-clusterRole.yaml │ │ ├── prometheus-clusterRoleBinding.yaml │ │ ├── prometheus-operator-serviceMonitor.yaml │ │ ├── prometheus-prometheus.yaml │ │ ├── prometheus-roleBindingConfig.yaml │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ │ ├── prometheus-roleConfig.yaml │ │ ├── prometheus-roleSpecificNamespaces.yaml │ │ ├── prometheus-rules.yaml │ │ ├── prometheus-service.yaml │ │ ├── prometheus-serviceAccount.yaml │ │ ├── prometheus-serviceMonitor.yaml │ │ ├── prometheus-serviceMonitorApiserver.yaml │ │ ├── prometheus-serviceMonitorCoreDNS.yaml │ │ ├── prometheus-serviceMonitorKubeControllerManager.yaml │ │ ├── prometheus-serviceMonitorKubeScheduler.yaml │ │ ├── prometheus-serviceMonitorKubelet.yaml │ │ └── setup │ │ ├── 0namespace-namespace.yaml │ │ ├── prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0prometheusCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ │ ├── prometheus-operator-clusterRole.yaml │ │ ├── prometheus-operator-clusterRoleBinding.yaml │ │ ├── prometheus-operator-deployment.yaml │ │ ├── prometheus-operator-service.yaml │ │ └── prometheus-operator-serviceAccount.yaml │ ├── kube-prometheus │ ├── alertmanager │ │ ├── alertmanager-alertmanager.yaml │ │ ├── alertmanager-secret.yaml │ │ ├── alertmanager-service.yaml │ │ ├── alertmanager-serviceAccount.yaml │ │ └── alertmanager-serviceMonitor.yaml │ └── prometheus-operator │ │ ├── 0namespace-namespace.yaml │ │ ├── prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0prometheusCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ │ ├── prometheus-operator-clusterRole.yaml │ │ ├── prometheus-operator-clusterRoleBinding.yaml │ │ ├── prometheus-operator-deployment.yaml │ │ ├── prometheus-operator-service.yaml │ │ └── prometheus-operator-serviceAccount.yaml │ ├── loadbalance-pro │ └── cdn-release-web-prod-loadbalance-slb.yaml │ ├── rocketmq-pro │ ├── backup │ │ └── dledger │ │ │ ├── broker-pv.yaml │ │ │ ├── namesrv-pv.yaml │ │ │ ├── rocketmq-c3-broker-dledger-a-configmap.yml │ │ │ ├── rocketmq-c3-broker-dledger-a-statefulset.yml │ │ │ ├── rocketmq-c3-namesrv-service.yml │ │ │ └── rocketmq-c3-namesrv-statefulset.yml │ └── rocketmq-ms-cluster-pro │ │ ├── README.md │ │ ├── images │ │ └── rocketmq-console.png │ │ ├── rocketmq-broker-configmap.yaml │ │ ├── rocketmq-broker-pv.yaml │ │ ├── rocketmq-broker-statefulset.yaml │ │ ├── rocketmq-console-ingress.yaml │ │ ├── rocketmq-console-pv.yaml │ │ ├── rocketmq-console-pvc.yaml │ │ ├── rocketmq-console-service.yaml │ │ ├── rocketmq-console-statefulset.yaml │ │ ├── rocketmq-namespace.yaml │ │ ├── rocketmq-namesrv-pv.yaml │ │ ├── rocketmq-namesrv-service.yaml │ │ └── rocketmq-namesrv-statefulset.yaml │ ├── sentinel-dashboard-pro │ ├── ingress-nginx-sentinel-dashboard.yaml │ ├── sentinel-dashboard-configmap.yaml │ ├── sentinel-dashboard-service.yaml │ ├── sentinel-dashboard-statefulset.yaml │ └── sentinel-namespace.yaml │ ├── skywalking-pro │ ├── README.md │ ├── nohup.out │ ├── skywalking-1:链路skywalking-3.2.6性能压测与报告.doc │ ├── skywalking-c0-configmap.yaml │ ├── skywalking-c0-namespace.yaml │ ├── skywalking-c0-oap-deployment.yaml │ ├── skywalking-c0-oap-service.yaml │ ├── skywalking-c0-ui-deployment.yaml │ ├── skywalking-c0-ui-ingress.yaml │ └── skywalking-c0-ui-service.yaml │ └── traefik-pro │ ├── 01_config.yml │ ├── 02_cluster-role.yml │ ├── 03_CRD.yml │ ├── 03_middleware.yml │ ├── 04_service.yml │ ├── 05_deployment.yml │ ├── 06_ingress-routes.yml │ ├── create-namespace.sh │ ├── ingress.yaml │ ├── traefik-pv.yaml │ └── traefik-pvc.yaml ├── wayne └── min-cluster-allinone │ └── apollo-min │ ├── deploy.sh │ ├── readme.MD │ ├── service-apollo-admin-server-dev.yaml │ ├── service-apollo-config-server-dev.yaml │ └── service-apollo-portal-server.yaml └── yaml ├── init.sh └── min-cluster-allinone ├── apollo-min ├── apollo-configservice-transition-ingress-dev.yaml ├── apollo-configservice-transition-service-dev.yaml ├── demo-microservice │ └── prod-configservice.yaml ├── deploy.sh ├── readme.MD ├── service-apollo-admin-server-dev.yaml ├── service-apollo-config-server-dev.yaml └── service-apollo-portal-server.yaml ├── consul-min ├── consul-min-pv-local.yaml ├── consul-min-service.yaml └── readme.MD ├── dubbo-admin ├── README.MD ├── dubbo-admin-deployment.yaml ├── dubbo-admin-service.yaml └── ingress-nginx-dubbo-admin.yaml ├── es-min ├── deploy.sh ├── es-min-data-statefulset.yaml ├── es-min-data-storageclass-local.yaml ├── es-min-data0-pv-local.yaml ├── es-min-ingest-statefulset.yaml ├── es-min-ingest-storageclass-local.yaml ├── es-min-ingest0-pv-local.yaml ├── es-min-master-statefulset.yaml ├── es-min-master-storageclass-local.yaml ├── es-min-master0-pv-local.yaml ├── es-min-pvc.yaml ├── es-min-service.yaml └── readme.MD ├── gitlab ├── backup │ ├── gitlab-rc.yml │ ├── gitlab-svc.yml │ ├── postgresql-rc.yml │ ├── postgresql-svc.yml │ ├── redis-rc.yml │ └── redis-svc.yml ├── deploy.sh ├── gitlab-rc.yml ├── gitlab-svc.yml ├── postgresql-rc.yml ├── postgresql-svc.yml ├── readme.Md ├── redis-rc.yml ├── redis-svc.yml └── teardown.sh ├── ingress-nginx-min ├── backup │ ├── deploy.sh │ ├── nginx-ingress-min-backend-deployment.yaml │ ├── nginx-ingress-min-backend-service.yaml │ ├── nginx-ingress-min-clusterrole.yaml │ ├── nginx-ingress-min-clusterrolebinding.yaml │ ├── nginx-ingress-min-configmap.yaml │ ├── nginx-ingress-min-controller-deployment.yaml │ ├── nginx-ingress-min-controller-service.yaml │ ├── nginx-ingress-min-role.yaml │ ├── nginx-ingress-min-rolebinding.yaml │ └── nginx-ingress-min-serviceaccount.yaml ├── deploy.sh ├── ingress-nginx-backend-deployment.yaml ├── ingress-nginx-backend-service.yaml ├── ingress-nginx-deployoment.yaml ├── ingress-nginx-service.yaml ├── proxy │ ├── deploy.sh │ ├── ingress-nginx-apollo-config.yaml │ ├── ingress-nginx-apollo-portal.yaml │ ├── ingress-nginx-consul.yaml │ ├── ingress-nginx-es-min-kibana.yaml │ ├── ingress-nginx-es-min.yaml │ ├── ingress-nginx-gitlab.yaml │ ├── ingress-nginx-grafana-k8s.yaml │ ├── ingress-nginx-jenkins.yaml │ ├── ingress-nginx-nexus.yaml │ ├── ingress-nginx-prometheus-k8s.yaml │ ├── ingress-nginx-rocketmq-min-c0-console.yaml │ ├── ingress-nginx-skywalking-ui.yaml │ └── ingress-nginx-wayne.yaml └── readme.MD ├── istio ├── istio-1.4.2 │ ├── istio-demo-grafana-ingress.yaml │ ├── istio-demo-jaeger-ingress.yaml │ └── istio-demo-kiali-ingress.yaml └── istio-1.8.3 │ ├── istio-demo-grafana-ingress.yaml │ ├── istio-demo-jaeger-ingress.yaml │ ├── istio-demo-kiali-ingress.yaml │ ├── istio-demo-prometheus-ingress.yaml │ └── istio-demo-zipkin-ingress.yaml ├── jenkins ├── deploy.sh ├── jenkins-deployment.yaml ├── jenkins-slave-pv-local.yaml ├── jenkins-slave-pvc.yaml └── readme.MD ├── kafka-min ├── kafka-deployment.yaml ├── kafka-namespace.yaml └── kafka-service.yaml ├── kibana-min ├── backup │ ├── kibana-min-deployment.yaml │ ├── kibana-min-service.yaml │ └── readme.MD ├── deploy.sh ├── kibana-deployment.yaml └── kibana-service.yaml ├── kube-prometheus ├── backup │ ├── prometheus-min-alertmanager.yaml │ ├── prometheus-min-clusterrolebinding.yaml │ ├── prometheus-min-configmap.yaml │ ├── prometheus-min-kube-state-metrics.yaml │ ├── prometheus-min-node-exporter.yaml │ ├── prometheus-min-pushgateway.yaml │ ├── prometheus-min-pv-local.yaml │ ├── prometheus-min-pvc.yaml │ ├── prometheus-min-server.yaml │ ├── prometheus-min-service.yaml │ ├── prometheus-min-serviceaccount.yaml │ └── prometheus-min-serviceaccount.yaml.backup ├── deploy.sh ├── kube-prometheus-manifests │ ├── 00namespace-namespace.yaml │ ├── 0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ ├── 0prometheus-operator-0prometheusCustomResourceDefinition.yaml │ ├── 0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ ├── 0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ ├── 0prometheus-operator-clusterRole.yaml │ ├── 0prometheus-operator-clusterRoleBinding.yaml │ ├── 0prometheus-operator-deployment.yaml │ ├── 0prometheus-operator-service.yaml │ ├── 0prometheus-operator-serviceAccount.yaml │ ├── 0prometheus-operator-serviceMonitor.yaml │ ├── alertmanager-alertmanager.yaml │ ├── alertmanager-secret.yaml │ ├── alertmanager-service.yaml │ ├── alertmanager-serviceAccount.yaml │ ├── alertmanager-serviceMonitor.yaml │ ├── grafana-dashboardDatasources.yaml │ ├── grafana-dashboardDefinitions.yaml │ ├── grafana-dashboardSources.yaml │ ├── grafana-deployment.yaml │ ├── grafana-service.yaml │ ├── grafana-serviceAccount.yaml │ ├── grafana-serviceMonitor.yaml │ ├── kube-state-metrics-clusterRole.yaml │ ├── kube-state-metrics-clusterRoleBinding.yaml │ ├── kube-state-metrics-deployment.yaml │ ├── kube-state-metrics-role.yaml │ ├── kube-state-metrics-roleBinding.yaml │ ├── kube-state-metrics-service.yaml │ ├── kube-state-metrics-serviceAccount.yaml │ ├── kube-state-metrics-serviceMonitor.yaml │ ├── node-exporter-clusterRole.yaml │ ├── node-exporter-clusterRoleBinding.yaml │ ├── node-exporter-daemonset.yaml │ ├── node-exporter-service.yaml │ ├── node-exporter-serviceAccount.yaml │ ├── node-exporter-serviceMonitor.yaml │ ├── prometheus-adapter-apiService.yaml │ ├── prometheus-adapter-clusterRole.yaml │ ├── prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml │ ├── prometheus-adapter-clusterRoleBinding.yaml │ ├── prometheus-adapter-clusterRoleBindingDelegator.yaml │ ├── prometheus-adapter-clusterRoleServerResources.yaml │ ├── prometheus-adapter-configMap.yaml │ ├── prometheus-adapter-deployment.yaml │ ├── prometheus-adapter-roleBindingAuthReader.yaml │ ├── prometheus-adapter-service.yaml │ ├── prometheus-adapter-serviceAccount.yaml │ ├── prometheus-clusterRole.yaml │ ├── prometheus-clusterRoleBinding.yaml │ ├── prometheus-prometheus.yaml │ ├── prometheus-roleBindingConfig.yaml │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ ├── prometheus-roleConfig.yaml │ ├── prometheus-roleSpecificNamespaces.yaml │ ├── prometheus-rules.yaml │ ├── prometheus-service.yaml │ ├── prometheus-serviceAccount.yaml │ ├── prometheus-serviceMonitor.yaml │ ├── prometheus-serviceMonitorApiserver.yaml │ ├── prometheus-serviceMonitorCoreDNS.yaml │ ├── prometheus-serviceMonitorKubeControllerManager.yaml │ ├── prometheus-serviceMonitorKubeScheduler.yaml │ └── prometheus-serviceMonitorKubelet.yaml ├── quickstart │ ├── bundle.yaml │ └── deploy.sh └── readme.MD ├── mysql-min ├── deploy.sh ├── mysql-min-deployment.yaml ├── mysql-min-pv-local.yaml ├── mysql-min-pv-nfs.yaml ├── mysql-min-pvc.yaml ├── mysql-min-secret.yaml ├── mysql-min-service.yaml ├── mysql-min-storageclass-local.yaml └── readme.MD ├── nacos ├── create-namespace.sh ├── ingress-nginx-nacos.yaml ├── mysql-master-local.yaml ├── mysql-slave-local.yaml └── nacos-quick-start.yaml ├── nexus ├── deploy.sh └── repo-nexus-ns.yaml ├── nginx ├── conf.d │ ├── dev.apollo-config.future.com.conf │ ├── dev.apollo-portal.future.com.conf │ ├── es-min-admin.future.com.conf │ ├── es-min-kibana.future.com.conf │ ├── gitlab.future.com.conf │ ├── grafana-k8s.future.conf │ ├── jenkins.future.com.conf │ ├── pro-rocketmq-min-c0.console.future.com.conf │ ├── prometheus-app.future.conf │ ├── prometheus-k8s.future.conf │ ├── repo-nexus.future.com.conf │ ├── skywalking-ui.future.com.conf │ └── wayne.future.com.conf ├── nginx.conf └── nginx.conf.desc ├── openresty-min ├── openresty-configmap.yaml ├── openresty-deployment.yaml ├── openresty-pv.yaml ├── openresty-pvc.yaml └── openresty-storageclass.yaml ├── redis-cluster-min ├── backup-1 │ ├── deploy.sh │ ├── readme.MD │ ├── redis-cluster-min-namespace.yaml │ ├── redis-cluster-min-pv-local.yaml │ ├── redis-cluster-min-service.yaml │ ├── redis-cluster-min-statefulset.yaml │ └── redis.conf ├── backup-2 │ ├── readme.MD │ ├── redis-cluster-min-configmap.yaml │ ├── redis-cluster-min-master.yaml │ ├── redis-cluster-min-namespace.yaml │ ├── redis-cluster-min-pv-local.yaml │ ├── redis-cluster-min-secret.yaml │ ├── redis-cluster-min-service.yaml │ └── redis-cluster-min-slave.yaml ├── backup-3 │ ├── deploy.sh │ ├── readme.MD │ ├── redis-cluster-min-namespace.yaml │ ├── redis-cluster-min-pv-local.yaml │ ├── redis-cluster-min-service.yaml │ ├── redis-cluster-min-statefulset.yaml │ └── redis.conf ├── deploy.sh ├── readme.MD ├── redis-configmap.yaml ├── redis-pv-local.yaml ├── redis-service.yaml └── redis-statefulset.yaml ├── redis-ha-min ├── deploy.sh ├── redis-ha-min-configmap.yaml ├── redis-ha-min-namespace.yaml ├── redis-ha-min-pv-local.yaml ├── redis-ha-min-serviceAccount.yaml ├── redis-ha-min-statefulset.yaml └── redis-ha-min-svc.yaml ├── rocketmq-min ├── deploy.sh ├── readme.md ├── rocketmq-min-c0-broker-master-prod.yaml ├── rocketmq-min-c0-broker-master0-pv-local.yaml ├── rocketmq-min-c0-broker-slave-prod.yaml ├── rocketmq-min-c0-broker-slave0-pv-local.yaml ├── rocketmq-min-c0-console-ng-prod.yaml └── rocketmq-min-c0-namesrv-prod.yaml ├── sentinel-dashboard ├── deploy.sh ├── sentinel-dashboard-deployment.yaml ├── sentinel-dashboard-ingress.yaml └── sentinel-dashboard-service.yaml ├── skywalking-min ├── deploy.sh ├── skywalking-min-oap-configmap.yaml ├── skywalking-min-oap-deployment.yaml ├── skywalking-min-oap-namespace.yaml ├── skywalking-min-oap-service.yaml ├── skywalking-min-oap-serviceaccount.yaml ├── skywalking-min-ui-deployment.yaml └── skywalking-min-ui-service.yaml ├── tekton ├── dashboard │ ├── make-and-push-private-image.sh │ ├── tekton-dashboard-ingress.yaml │ └── tekton-dashboard-release.yaml └── pipeline │ ├── pull-docker-image.sh │ ├── pull-from-hub.sh │ ├── push-to-hub.sh │ ├── release.yaml │ ├── release.yaml.origin │ └── retag-docker-image.sh ├── tidb-cluster-min ├── tidb-cluster-pd-pv-local.yaml ├── tidb-cluster-tikv-pv-local.yaml ├── values-tidb-cluster.yaml └── values-tidb-operator.yaml ├── traefik ├── create-namespace.sh ├── tls.crt ├── tls.key ├── traefik-crd.yaml ├── traefik-ingress.yaml ├── traefik-ingressrouter.yaml ├── traefik-rbac.yaml ├── traefik-tlsoption.yaml ├── traefik-traefikservices.yaml └── traefik.yaml ├── wayne-min ├── configmap.yaml ├── deploy-dependency.sh ├── deploy.sh ├── deployment.yaml ├── rabbitmq.yaml └── service.yaml └── zookeeper-min ├── backup ├── zookeeper-min-pv.yaml └── zookeeper-min-statefulset.yaml ├── deploy.sh ├── readme.MD ├── zookeeper-min-pv.yaml └── zookeeper-min-statefulset.yaml /helm/min-cluster-allinone/apollo-min/readme.MD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/helm/min-cluster-allinone/apollo-min/readme.MD -------------------------------------------------------------------------------- /helm/min-cluster-allinone/consul-min/consul-min-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: consul-min-pv-local 5 | namespace: consul 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | persistentVolumeReclaimPolicy: Retain 13 | local: 14 | path: /datavip/k8s-data/consul-min-pv-local 15 | nodeAffinity: 16 | required: 17 | nodeSelectorTerms: 18 | - matchExpressions: 19 | - key: kubernetes.io/hostname 20 | operator: In 21 | values: 22 | - future 23 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/consul-min/readme.MD: -------------------------------------------------------------------------------- 1 | helm install --name consul --namespace=consul stable/consul --set 'Replicas=1,StorageClass=consul-data-pv' 2 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/backup/loadbalance-es-min-c0.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.kubernetes.io/alicloud-loadbalancer-address-type: intranet 6 | service.beta.kubernetes.io/alicloud-loadbalancer-force-override-listeners: "true" 7 | service.beta.kubernetes.io/alicloud-loadbalancer-backend-label: "app=es-min-c0-ingest" 8 | labels: 9 | app: es-min-c0-loadbalance 10 | name: es-min-c0-loadbalance 11 | namespace: elasticsearch 12 | spec: 13 | ports: 14 | - name: http 15 | port: 9200 16 | protocol: TCP 17 | targetPort: 9200 18 | - name: transport 19 | port: 9300 20 | targetPort: 9300 21 | protocol: TCP 22 | selector: 23 | app: es-min-c0-ingest 24 | sessionAffinity: None 25 | type: LoadBalancer 26 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/backup/pv-nfs-es-min-c0-data.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-es-min-c0-data0 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: "pv-es-min-c0-data" 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | path: /es/es-min-c0/pv-es-min-c0-data0 15 | server: xx.nas.aliyuncs.com 16 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/backup/pv-nfs-es-min-c0-ingest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-es-min-c0-ingest0 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: "pv-es-min-c0-ingest" 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | path: /es/es-min-c0/pv-es-min-c0-ingest0 15 | server: xx.nas.aliyuncs.com 16 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/backup/pv-nfs-es-min-c0-master.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-es-min-c0-master0 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: "pv-es-min-c0-master" 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | path: /es/es-min-c0/pv-es-min-c0-master0 15 | server: xx.nas.aliyuncs.com 16 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/backup/service-es-min-c0.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: es-min-c0-ingest 5 | namespace: elasticsearch 6 | labels: 7 | app: es-min-c0-ingest-service 8 | annotations: 9 | service.beta.kubernetes.io/alicloud-loadbalancer-address-type: intranet 10 | service.beta.kubernetes.io/alicloud-loadbalancer-force-override-listeners: "true" 11 | spec: 12 | ports: 13 | - name: http 14 | port: 9200 15 | protocol: TCP 16 | targetPort: 9200 17 | - name: transport 18 | port: 9300 19 | protocol: TCP 20 | targetPort: 9300 21 | selector: 22 | app: es-min-c0-ingest 23 | chart: elasticsearch-6.4.3 24 | heritage: Tiller 25 | release: es-min-c0-ingest 26 | type: ClusterIP 27 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/backup/service-nodeport-es-min-c0-ingest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: es-min-c0-ingest 5 | namespace: elasticsearch 6 | labels: 7 | app: es-min-c0-ingest #服务名(这个需要与xxxx-rc.yaml文件中命名的k8s-app字段一致) 8 | kubernetes.io/name: es-min-c0-ingest #"服务名" 9 | spec: 10 | type: NodePort 11 | selector: 12 | app: es-min-c0-ingest #服务名 13 | ports: 14 | - name: transport #服务名-sv 15 | port: 9300 #8081(pod上的端口) 16 | targetPort: 9300 #8081(service上的端口) 17 | #nodePort: 30964(node上的端口,如果不设置则由kubernets自动分配) 18 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/es-min-data-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: es-min-data-pv-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/es-min-data0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-min-data0-pv-local 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "es-min-data-pv-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/es-min-data0-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/es-min-ingest-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: es-min-ingest-pv-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/es-min-ingest0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-min-ingest0-pv-local 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "es-min-ingest-pv-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/es-min-ingest0-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/es-min-master-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: es-min-master-pv-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/es-min/es-min-master0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-min-master0-pv-local 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "es-min-master-pv-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/es-min-master0-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/kibana-min/readme.MD: -------------------------------------------------------------------------------- 1 | # (1).下载镜像 2 | 3 | docker pull kibana:6.4.3 4 | 5 | 重命名镜像为:docker.elastic.co/kibana/kibana:6.4.3 6 | docker images |grep kibana |awk '{print "docker tag ",$1":"$2,$1":"$2}' |sed -e 's#kibana#docker\.elastic\.co\/kibana\/kibana#2' |sh -x 7 | 8 | Add the elastic helm charts repo 9 | helm repo add elastic https://helm.elastic.co 10 | 11 | 12 | # (2).helm部署 13 | helm install --name es-min-kibana elastic/kibana --namespace es-min-kibana --version 6.4.3 --set elasticsearchHosts=http://es-min-ingest.es-min:9200,elasticsearchURL=http://es-min-ingest.es-min:9200 14 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/min-cluster-redis/pv-local-redis-min-c0-master.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-local-redis-min-c0-master0 5 | namespace: redis 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "pv-local-redis-min-c0-master" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/pv-local-redis-min-c0-master0 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/min-cluster-redis/pv-local-redis-min-c0-slave.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-local-redis-min-c0-slave0 5 | namespace: redis 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "pv-local-redis-min-c0-slave" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/pv-local-redis-min-c0-slave0 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/min-cluster-redis/storageclass-local-redis-min-c0-master.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: pv-local-redis-min-c0-master 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/min-cluster-redis/storageclass-local-redis-min-c0-slave.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: pv-local-redis-min-c0-slave 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/mysql-min/mysql-min-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mysql-min-pv-local 5 | namespace: mysql 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "mysql-min-storageclass-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/mysql-min-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/mysql-min/mysql-min-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: mysql-min-storageclass-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/mysql-min/readme.MD: -------------------------------------------------------------------------------- 1 | 部署命令: 2 | 3 | ``` 4 | kubectl apply -f mysql-min-pv-local.yaml 5 | kubectl apply -f mysql-min-storageclass-local.yaml 6 | 7 | helm install --name mysql-min stable/mysql --namespace=mysql-min --set "persistence.size=1Gi,persistence.storageClass=mysql-min-storageclass-local,mysqlUser=apollo,mysqlPassword=admin,mysqlRootPassword=root" 8 | ``` 9 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/nginx-ingress-min/ingress-es-min-c0-kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | # nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | # nginx.ingress.kubernetes.io/rewrite-target: / 7 | # kubernetes.io/ingress.class: biz 8 | labels: 9 | app: kibana 10 | name: es-min-c0-kibana 11 | namespace: kibana 12 | spec: 13 | rules: 14 | - host: es-min-c0-kibana.future.coohua.com 15 | #- host: es-min-c0-kibana 16 | http: 17 | paths: 18 | - backend: 19 | serviceName: es-min-c0-kibana 20 | servicePort: 5601 21 | path: / 22 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/nginx-ingress-min/ingress-es-min-c0.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: elasticsearch 10 | name: es-min-c0 11 | namespace: elasticsearch 12 | spec: 13 | rules: 14 | - host: es-min-c0-admin.future.coohua.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: es-min-c0-ingest 19 | servicePort: 9200 20 | path: / 21 | 22 | - host: es-min-c0.future.coohua.com 23 | transport: 24 | paths: 25 | - backend: 26 | serviceName: es-min-c0-ingest 27 | servicePort: 9300 28 | path: / 29 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/nginx-ingress-min/readme.MD: -------------------------------------------------------------------------------- 1 | 下载镜像: 2 | docker pull ibmcom/defaultbackend 3 | 重命名镜像为:k8s.gcr.io/defaultbackend:1.4 4 | docker images |grep defaultbackend |awk '{print "docker tag ",$1":"$2,$1":"$2}' |sed -e 's#ibmcom\/defaultbackend#k8s\.gcr\.io\/defaultbackend#2' |sh -x 5 | 6 | (1).nginx-ingress deploy: 7 | helm install --name ingress-min --namespace ingress --set "controller.service.externalIPs[0]=47.92.123.228,controller.service.type=NodePort" stable/nginx-ingress -------------------------------------------------------------------------------- /helm/min-cluster-allinone/nginx-ingress-min/service-ingress.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-min 5 | namespace: ingress 6 | labels: 7 | app.kubernetes.io/name: ingress-nginx 8 | app.kubernetes.io/part-of: ingress-nginx 9 | spec: 10 | type: LoadBalancer 11 | ports: 12 | - name: http 13 | port: 80 14 | targetPort: 80 15 | protocol: TCP 16 | - name: https 17 | port: 443 18 | targetPort: 443 19 | protocol: TCP 20 | - name: 9300-tcp 21 | port: 9000 22 | targetPort: 9300 23 | protocol: TCP 24 | selector: 25 | app.kubernetes.io/name: ingress-nginx 26 | app.kubernetes.io/part-of: ingress-nginx 27 | 28 | #--- 29 | #apiVersion: v1 30 | #kind: ConfigMap 31 | #metadata: 32 | # name: tcp-services 33 | # namespace: ingress 34 | #data: 35 | # 9300: "elasticsearch/es-min-c0-ingest:9300" 36 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/redis-ha-min/readme.MD: -------------------------------------------------------------------------------- 1 | helm install stable/redis-ha --name redis-ha-min --namespace redis-ha-min --set 'replicas=1' 2 | -------------------------------------------------------------------------------- /helm/min-cluster-allinone/skywalking-min/readme.MD: -------------------------------------------------------------------------------- 1 | (1).部署skywalking oap: 2 | 3 | 注意: 4 | 需要先安装es-min,skywalking oap的es指向的是es-min的service。 5 | 6 | helm install c7n/skywalking-oap \ 7 | --set elasticsearch.host=es-min-ingest.es-min:9200 \ 8 | --set env.JAVA_OPTS="-Xms1024M -Xmx1024M" \ 9 | --version 0.1.0 \ 10 | --name skywalking-oap \ 11 | --namespace skywalking-min 12 | 13 | (2).安装skywalking ui 14 | 15 | 注意: 16 | 需要先安装ingress。 17 | 18 | helm install c7n/skywalking-ui \ 19 | --set service.enabled=true \ 20 | --set ingress.enabled=true \ 21 | --set ingress."hosts[0]"=skywalking.future.com \ 22 | --set env.JAVA_OPTS="-Xms1024M -Xmx1024M" \ 23 | --version 0.1.1 \ 24 | --name skywalking-ui \ 25 | --namespace skywalking-min 26 | -------------------------------------------------------------------------------- /images/K8S.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/images/K8S.png -------------------------------------------------------------------------------- /images/千里行走.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/images/千里行走.jpg -------------------------------------------------------------------------------- /other/elk/pro-cluster-demo/ingress-es-c1-skywalking-kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | #nginx.ingress.kubernetes.io/rewrite-target: / 7 | kubernetes.io/ingress.class: biz 8 | labels: 9 | app: kibana 10 | name: kibana-c1-admin-skywalking 11 | namespace: elasticsearch 12 | spec: 13 | rules: 14 | - host: kibana-c1-admin-skywalking.biz.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: kibana-kibana 19 | servicePort: 5601 20 | path: / 21 | -------------------------------------------------------------------------------- /other/elk/pro-cluster-demo/ingress-es-c1-skywalking.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | #nginx.ingress.kubernetes.io/rewrite-target: / 7 | kubernetes.io/ingress.class: biz 8 | labels: 9 | app: es-c1-skywalking 10 | name: es-c1-skywalking 11 | namespace: elasticsearch 12 | spec: 13 | rules: 14 | - host: es-c1-skywalking-admin 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: elasticsearch-master 19 | servicePort: 9200 20 | path: / 21 | - host: es-c1-skywalking 22 | http: 23 | paths: 24 | - backend: 25 | serviceName: elasticsearch-master 26 | servicePort: 9300 27 | path: / 28 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-adminservice/apollo-adminservice-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: apollo-adminservice 5 | namespace: inc 6 | labels: 7 | inc-app: apollo-adminservice 8 | data: 9 | application-github.properties: | 10 | spring.datasource.url=jdbc:mysql://mysql-min.mysql-min:3306/DevApolloConfigDB?characterEncoding=utf8 11 | spring.datasource.username=apollo 12 | spring.datasource.password=admin 13 | spring.jpa.database-platform=org.hibernate.dialect.MySQL5Dialect 14 | eureka.service.url=http://apollo-configservice/eureka/ 15 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-adminservice/apollo-adminservice-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: apollo-adminservice 3 | labels: 4 | inc-app: apollo-adminservice 5 | namespace: inc 6 | spec: 7 | selector: 8 | inc-app: apollo-adminservice 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8090 12 | port: 80 13 | name: apollo-adminservice-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-configservice/apollo-configservice-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: apollo-configservice 5 | namespace: inc 6 | labels: 7 | inc-app: apollo-configservice 8 | data: 9 | application-github.properties: | 10 | spring.datasource.url=jdbc:mysql://mysql-min.mysql-min:3306/DevApolloConfigDB?characterEncoding=utf8 11 | spring.datasource.username=apollo 12 | spring.datasource.password=admin 13 | spring.jpa.database-platform=org.hibernate.dialect.MySQL5Dialect 14 | eureka.service.url=http://apollo-configservice 15 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-configservice/apollo-configservice-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: apollo-configservice 3 | labels: 4 | inc-app: apollo-configservice 5 | namespace: inc 6 | spec: 7 | selector: 8 | inc-app: apollo-configservice 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8080 12 | port: 80 13 | name: apollo-configservice-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-portal/apollo-portal-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: apollo-portal 5 | namespace: inc 6 | labels: 7 | inc-app: apollo-portal 8 | data: 9 | application-github.properties: | 10 | spring.datasource.url=jdbc:mysql://mysql-min.mysql-min:3306/ApolloPortalDB?characterEncoding=utf8 11 | spring.datasource.username=apollo 12 | spring.datasource.password=admin 13 | spring.jpa.database-platform=org.hibernate.dialect.MySQL5Dialect 14 | eureka.service.url=http://apollo-configservice/eureka/ 15 | apollo-env.properties: 'pro.meta=http://apollo-configservice' 16 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-portal/apollo-portal-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: apollo-portal 5 | namespace: inc 6 | labels: 7 | inc-app: apollo-portal 8 | #pro:开启,挂载负载均衡 9 | #annotations: 10 | # kubernetes.io/ingress.class: inc 11 | annotations: 12 | nginx.ingress.kubernetes.io/affinity: cookie 13 | spec: 14 | tls: [] 15 | rules: 16 | - host: pro-apollo-portal.inc.com 17 | http: 18 | paths: 19 | - backend: 20 | serviceName: apollo-portal 21 | servicePort: 80 22 | path: / 23 | -------------------------------------------------------------------------------- /product/standard/apollo-pro/apollo-portal/apollo-portal-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: apollo-portal 3 | labels: 4 | inc-app: apollo-portal 5 | namespace: inc 6 | spec: 7 | selector: 8 | inc-app: apollo-portal 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8070 12 | port: 80 13 | name: apollo-portal-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/apollo-skywalking-pro/apollo-adminservice/apollo-adminservice-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: apollo-adminservice 3 | labels: 4 | inc-app: apollo-adminservice 5 | namespace: apollo 6 | spec: 7 | selector: 8 | inc-app: apollo-adminservice 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8090 12 | port: 80 13 | name: apollo-adminservice-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/apollo-skywalking-pro/apollo-configservice/apollo-configservice-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: apollo-configservice 3 | labels: 4 | inc-app: apollo-configservice 5 | namespace: apollo 6 | spec: 7 | selector: 8 | inc-app: apollo-configservice 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8080 12 | port: 80 13 | name: apollo-configservice-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/apollo-skywalking-pro/apollo-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: apollo 5 | spec: 6 | finalizers: 7 | - kubernetes 8 | 9 | -------------------------------------------------------------------------------- /product/standard/apollo-skywalking-pro/apollo-portal/apollo-portal-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: apollo-portal 5 | namespace: apollo 6 | labels: 7 | inc-app: apollo-portal 8 | #pro:开启,挂载负载均衡 9 | #annotations: 10 | # kubernetes.io/ingress.class: inc 11 | annotations: 12 | nginx.ingress.kubernetes.io/affinity: cookie 13 | spec: 14 | tls: [] 15 | rules: 16 | - host: pro-apollo-portal.inc.com 17 | http: 18 | paths: 19 | - backend: 20 | serviceName: apollo-portal 21 | servicePort: 80 22 | path: / 23 | -------------------------------------------------------------------------------- /product/standard/apollo-skywalking-pro/apollo-portal/apollo-portal-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: apollo-portal 3 | labels: 4 | inc-app: apollo-portal 5 | namespace: apollo 6 | spec: 7 | selector: 8 | inc-app: apollo-portal 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8070 12 | port: 80 13 | name: apollo-portal-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/apollo-skywalking-pro/images/apollo-skywalking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/apollo-skywalking-pro/images/apollo-skywalking.png -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/.gitlab-rc.yml.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/cicd-pro/gitlab-pro/.gitlab-rc.yml.swp -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace gitlab 2 | kubectl apply -f gitlab-rc.yml 3 | kubectl apply -f gitlab-svc.yml 4 | kubectl apply -f postgresql-rc.yml 5 | kubectl apply -f postgresql-svc.yml 6 | kubectl apply -f redis-rc.yml 7 | kubectl apply -f redis-svc.yml 8 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/gitlab-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab 5 | namespace: gitlab 6 | labels: 7 | name: gitlab 8 | spec: 9 | type: LoadBalancer 10 | ports: 11 | - name: http 12 | port: 80 13 | targetPort: http 14 | - name: ssh 15 | port: 22 16 | targetPort: ssh 17 | selector: 18 | name: gitlab 19 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/ingress-nginx-gitlab.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | name: gitlab 10 | name: gitlab 11 | namespace: gitlab 12 | spec: 13 | rules: 14 | - host: gitlab.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: gitlab 19 | servicePort: 80 20 | path: / 21 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/postgresql-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: postgresql 5 | labels: 6 | name: postgresql 7 | namespace: gitlab 8 | spec: 9 | ports: 10 | - name: postgres 11 | port: 5432 12 | targetPort: postgres 13 | selector: 14 | name: postgresql 15 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/readme.Md: -------------------------------------------------------------------------------- 1 | 参考资料: 2 | 1.阿里云Kubernetes实战1–集群搭建与服务暴露 3 | https://juejin.im/entry/5bcd7648f265da0ab719e6cb 4 | 2.gitlab官方github: 5 | https://github.com/sameersbn/docker-gitlab 6 | 3.Kubernetes部署GitLab 7 | https://cloud.tencent.com/info/135fc564b2298d0dfebe4d6aa09669c2.html 8 | 9 | yaml文件来自于: 10 | https://github.com/sameersbn/docker-gitlab/tree/master/kubernetes 11 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/redis-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | namespace: gitlab 6 | labels: 7 | name: redis 8 | spec: 9 | ports: 10 | - name: redis 11 | port: 6379 12 | targetPort: redis 13 | selector: 14 | name: redis 15 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/gitlab-pro/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -o pipefail 4 | 5 | if ! command -v kubectl > /dev/null; then 6 | echo "kubectl command not installed" 7 | exit 1 8 | fi 9 | 10 | # delete the services 11 | for svc in *-svc.yml 12 | do 13 | echo -n "Deleting $svc... " 14 | kubectl -f $svc delete 15 | done 16 | 17 | # delete the replication controllers 18 | for rc in *-rc.yml 19 | do 20 | echo -n "Deleting $rc... " 21 | kubectl -f $rc delete 22 | done 23 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/jenkins-pro/ingress-nginx-jenkins.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: jenkins 10 | name: jenkins 11 | namespace: abcdocker 12 | spec: 13 | rules: 14 | - host: jenkins.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: jenkins 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/jenkins-pro/jenkins_pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: opspv 5 | spec: 6 | capacity: 7 | storage: 10Gi 8 | accessModes: 9 | - ReadWriteMany 10 | persistentVolumeReclaimPolicy: Delete 11 | nfs: 12 | server: 172.26.237.196 13 | path: /data1/k8s-vloume 14 | 15 | 16 | --- 17 | kind: PersistentVolumeClaim 18 | apiVersion: v1 19 | metadata: 20 | name: opspvc 21 | namespace: abcdocker 22 | spec: 23 | accessModes: 24 | - ReadWriteMany 25 | resources: 26 | requests: 27 | storage: 10Gi 28 | 29 | #注意修改NFS挂载目录以及NFS Server 30 | ##nfs后面的目录是nfs挂载的目录,因为pod会执行mount -t ip:/data1/所以后面是nfs挂载目录,而并非是宿主机的目录 31 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/jenkins-pro/jenkins_svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: jenkins 5 | namespace: abcdocker 6 | labels: 7 | app: jenkins 8 | spec: 9 | selector: 10 | app: jenkins 11 | type: NodePort 12 | ports: 13 | - name: web 14 | port: 8080 15 | targetPort: web 16 | nodePort: 30002 17 | - name: agent 18 | port: 50000 19 | targetPort: agent 20 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/nexus-pro/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f repo-nexus-ns.yaml 2 | -------------------------------------------------------------------------------- /product/standard/cicd-pro/nexus-pro/ingress-nginx-nexus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | name: repo-nexus 9 | namespace: repo-nexus 10 | spec: 11 | rules: 12 | - host: repo-nexus.future.com 13 | http: 14 | paths: 15 | - backend: 16 | serviceName: repo-nexus 17 | servicePort: 8081 18 | path: / 19 | -------------------------------------------------------------------------------- /product/standard/elasticsearch-pro/es-c0-data-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: es-c0-data-headless-prod 6 | namespace: elasticsearch 7 | labels: 8 | wayne-app: es-c0 9 | wayne-ns: elasticsearch 10 | app: es-c0-data-headless-prod 11 | spec: 12 | selector: 13 | app: es-c0-data-prod 14 | ports: 15 | - name: http 16 | port: 9200 17 | protocol: TCP 18 | targetPort: 9200 19 | type: ClusterIP 20 | clusterIP: None 21 | sessionAffinity: None 22 | 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: es-c0-data-prod 28 | namespace: elasticsearch 29 | labels: 30 | wayne-app: es-c0 31 | wayne-ns: elasticsearch 32 | app: es-c0-data-prod 33 | spec: 34 | selector: 35 | app: es-c0-data-prod 36 | ports: 37 | - name: http 38 | port: 9200 39 | protocol: TCP 40 | targetPort: 9200 41 | - name: transport 42 | port: 9300 43 | protocol: TCP 44 | targetPort: 9300 45 | type: ClusterIP 46 | sessionAffinity: None 47 | -------------------------------------------------------------------------------- /product/standard/elasticsearch-pro/es-c0-ingest-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: es-c0-ingest-headless-prod 6 | namespace: elasticsearch 7 | labels: 8 | wayne-app: es-c0 9 | wayne-ns: elasticsearch 10 | app: es-c0-ingest-headless-prod 11 | spec: 12 | selector: 13 | app: es-c0-ingest-prod 14 | ports: 15 | - name: http 16 | port: 9200 17 | protocol: TCP 18 | targetPort: 9200 19 | type: ClusterIP 20 | clusterIP: None 21 | sessionAffinity: None 22 | 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: es-c0-ingest-prod 28 | namespace: elasticsearch 29 | labels: 30 | wayne-app: es-c0 31 | wayne-ns: elasticsearch 32 | app: es-c0-ingest-prod 33 | spec: 34 | selector: 35 | app: es-c0-ingest-prod 36 | ports: 37 | - name: http 38 | port: 9200 39 | protocol: TCP 40 | targetPort: 9200 41 | - name: transport 42 | port: 9300 43 | protocol: TCP 44 | targetPort: 9300 45 | type: ClusterIP 46 | sessionAffinity: None 47 | -------------------------------------------------------------------------------- /product/standard/elasticsearch-pro/es-c0-kibana-ingerss.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: es-c0-kibana-prod 6 | namespace: elasticsearch 7 | labels: 8 | wayne-app: es-c0 9 | wayne-ns: elasticsearch 10 | app: es-c0-kibana-prod 11 | #生产级配置,用于挂载云厂商的SLB 12 | #annotations: 13 | # kubernetes.io/ingress.class: inc 14 | spec: 15 | tls: [] 16 | rules: 17 | - host: es-c0-kibana-prod.inc-inc.com 18 | http: 19 | paths: 20 | - backend: 21 | serviceName: es-c0-kibana-prod 22 | servicePort: 5601 23 | path: / 24 | -------------------------------------------------------------------------------- /product/standard/elasticsearch-pro/es-c0-kibana-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: es-c0-kibana-prod 6 | namespace: elasticsearch 7 | labels: 8 | wayne-app: es-c0 9 | wayne-ns: elasticsearch 10 | app: es-c0-kibana-prod 11 | spec: 12 | selector: 13 | app: es-c0-kibana-prod 14 | ports: 15 | - name: http 16 | port: 5601 17 | targetPort: 5601 18 | protocol: TCP 19 | type: ClusterIP 20 | -------------------------------------------------------------------------------- /product/standard/elasticsearch-pro/es-c0-master-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: es-c0-master-headless-prod 6 | labels: 7 | wayne-app: es-c0 8 | wayne-ns: elasticsearch 9 | app: es-c0-master-headless-prod 10 | namespace: elasticsearch 11 | spec: 12 | selector: 13 | app: es-c0-master-prod 14 | ports: 15 | - name: http 16 | port: 9200 17 | protocol: TCP 18 | targetPort: 9200 19 | type: ClusterIP 20 | clusterIP: None 21 | sessionAffinity: None 22 | 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: es-c0-master-prod 28 | labels: 29 | wayne-app: es-c0 30 | wayne-ns: elasticsearch 31 | app: es-c0-master-prod 32 | namespace: elasticsearch 33 | spec: 34 | selector: 35 | app: es-c0-master-prod 36 | ports: 37 | - name: http 38 | port: 9200 39 | protocol: TCP 40 | targetPort: 9200 41 | - name: transport 42 | port: 9300 43 | protocol: TCP 44 | targetPort: 9300 45 | type: ClusterIP 46 | sessionAffinity: None 47 | -------------------------------------------------------------------------------- /product/standard/elasticsearch-pro/es-c0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: elasticsearch 5 | spec: 6 | finalizers: 7 | - kubernetes 8 | 9 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/alertmanager/.alertmanager-for-middleware-dingtalk.yaml.swo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/grafana-prometheus-pro/alertmanager/.alertmanager-for-middleware-dingtalk.yaml.swo -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/alertmanager/alertmanager-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-alertmanager 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | storageClassName: "" 10 | resources: 11 | requests: 12 | storage: 100Gi 13 | 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/alertmanager/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: alertmanager 5 | namespace: monitoring 6 | labels: 7 | app: alertmanager 8 | spec: 9 | ports: 10 | - port: 9093 11 | name: http 12 | type: NodePort 13 | selector: 14 | app: alertmanager 15 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/alertmanager/backup/alertmanager-dingtalk.yaml.backup: -------------------------------------------------------------------------------- 1 | global: 2 | resolve_timeout: 1m 3 | 4 | route: 5 | group_by: ['alertname'] 6 | group_wait: 5s 7 | group_interval: 10s 8 | repeat_interval: 1m 9 | receiver: 'webhook' 10 | routes: 11 | - match: 12 | debug: true 13 | receiver: dingtalk2 14 | group_by: [fullurl, host] 15 | 16 | receivers: 17 | - name: 'webhook' 18 | webhook_configs: 19 | - send_resolved: false 20 | url: http://localhost:8060/dingtalk/webhook1/send #webhook1是启动钉钉的profile名 21 | - name: 'dingtalk2' 22 | webhook_configs: 23 | - send_resolved: false 24 | url: http://localhost:8061/dingtalk/webhook2/send #webhook1是启动钉钉的profile名 25 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/README.md: -------------------------------------------------------------------------------- 1 | # 基础镜像 2 | 3 | 用python实现的一个rocketmq-exporter,主要提供消息堆积数监控。 4 | 5 | https://github.com/hepyu/hpy-rocketmq-exporter 6 | 7 | 实际上我们的生产环境还有很多其他维度的监控,但是和我们的框架相关,就不列入这里了。 8 | 9 | 最终效果如下图: 10 | 11 | 12 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/images/mesage-unconsumed-count.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/images/mesage-unconsumed-count.jpg -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/images/rocketmq-first-resend-message-result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/images/rocketmq-first-resend-message-result.jpg -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/images/rocketmq-first-send-message-result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/grafana-prometheus-pro/exporter-mq-rocketmq/images/rocketmq-first-send-message-result.jpg -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/grafana-prometheus-image-repo-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | .dockerconfigjson: 你的镜像仓库的加密后密码 4 | kind: Secret 5 | metadata: 6 | name: inc 7 | namespace: monitoringing 8 | type: kubernetes.io/dockerconfigjson -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/grafana-prometheus-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/grafana/grafana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: grafana-biz 6 | name: grafana-biz 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: monitor-basicservice.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: grafana-biz 15 | servicePort: 3000 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/grafana/grafana-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-metrics-grafana 5 | namespace: monitoring 6 | spec: 7 | capacity: 8 | storage: 10Gi 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Retain 12 | #表示使用本地存储 13 | local: 14 | path: /datavip/k8s-data/pv-metrics-grafana 15 | #使用local pv时必须定义nodeAffinity,Kubernetes Scheduler需要使用PV的nodeAffinity描述信息来保证Pod能够调度到有对应local volume的Node上。 16 | #创建local PV之前,你需要先保证有对应的storageClass已经创建。 17 | nodeAffinity: 18 | required: 19 | nodeSelectorTerms: 20 | - matchExpressions: 21 | - key: kubernetes.io/hostname 22 | operator: In 23 | values: 24 | #future是pod需要分不到的主机名,这台主机上开启了local-pv资源。 25 | - future 26 | #生产环境使用云存储 27 | #nfs: 28 | # server: 你的存储地址 29 | # path: /pv-metrics-grafana 30 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/grafana/grafana-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-metrics-grafana 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: "" 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/grafana/grafana-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana-biz 5 | namespace: monitoring 6 | labels: 7 | app: grafana-biz 8 | spec: 9 | ports: 10 | - port: 3000 11 | name: http 12 | type: NodePort 13 | selector: 14 | app: grafana-biz 15 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/images/caf-cdn方案.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/grafana-prometheus-pro/images/caf-cdn方案.jpg -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/images/grafana-prometheus生产级实践.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/grafana-prometheus-pro/images/grafana-prometheus生产级实践.jpg -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-mq-rocketmq/prometheus-mq-rocketmq-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-metrics-mq-rocketmq-promethues 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: "metrics-mq-rocketmq-promethues" 10 | resources: 11 | requests: 12 | storage: 100Gi 13 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-mq-rocketmq/prometheus-mq-rocketmq-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-mq-rocketmq 5 | namespace: monitoring 6 | subjects: 7 | - kind: ServiceAccount 8 | name: default 9 | namespace: monitoring 10 | roleRef: 11 | kind: ClusterRole 12 | name: admin 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-mq-rocketmq/prometheus-mq-rocketmq-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-mq-rocketmq 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-mq-rocketmq 8 | spec: 9 | ports: 10 | - port: 9090 11 | name: prom-graph 12 | type: NodePort 13 | selector: 14 | app: prometheus-mq-rocketmq 15 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-redis/prometheus-redis-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus-redis 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-redis 8 | data: 9 | prometheus.yml: > 10 | global: 11 | scrape_interval: 60s 12 | evaluation_interval: 60s 13 | 14 | scrape_configs: 15 | 16 | - job_name: 'redis-monitor' 17 | kubernetes_sd_configs: 18 | - role: pod 19 | 20 | relabel_configs: 21 | - source_labels: [__meta_kubernetes_pod_label_exporter, __meta_kubernetes_namespace] 22 | action: keep 23 | regex: redis;monitoring 24 | - source_labels: [__meta_kubernetes_pod_container_port_number] 25 | action: keep 26 | target_label: 9121 27 | 28 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-redis/prometheus-redis-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: prometheus-redis 6 | name: prometheus-redis 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: prometheus-redis.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: prometheus-redis 15 | servicePort: 9090 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-redis/prometheus-redis-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-metrics-redis-promethues 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: "metrics-redis-promethues" 10 | resources: 11 | requests: 12 | storage: 100Gi 13 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-redis/prometheus-redis-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-redis 5 | namespace: monitoring 6 | subjects: 7 | - kind: ServiceAccount 8 | name: default 9 | namespace: monitoring 10 | roleRef: 11 | kind: ClusterRole 12 | name: admin 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-redis/prometheus-redis-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-redis 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-redis 8 | spec: 9 | ports: 10 | - port: 9090 11 | name: prom-graph 12 | type: NodePort 13 | selector: 14 | app: prometheus-redis 15 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-saf/prometheus-saf-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: prometheus-saf 6 | name: prometheus-saf 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: prometheus-saf.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: prometheus-saf 15 | servicePort: 9090 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-saf/prometheus-saf-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-metrics-saf-promethues 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: "metrics-saf-promethues" 10 | resources: 11 | requests: 12 | storage: 100Gi 13 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-saf/prometheus-saf-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-saf 5 | namespace: monitoring 6 | subjects: 7 | - kind: ServiceAccount 8 | name: default 9 | namespace: monitoring 10 | roleRef: 11 | kind: ClusterRole 12 | name: admin 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-saf/prometheus-saf-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-saf 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-saf 8 | spec: 9 | ports: 10 | - port: 9090 11 | name: prom-graph 12 | type: NodePort 13 | selector: 14 | app: prometheus-saf 15 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-storage-mysql/prometheus-storage-mysql-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus-storage-mysql 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-storage-mysql 8 | data: 9 | prometheus.yml: > 10 | global: 11 | scrape_interval: 60s 12 | evaluation_interval: 60s 13 | 14 | scrape_configs: 15 | 16 | - job_name: 'mysql-monitor' 17 | kubernetes_sd_configs: 18 | - role: pod 19 | 20 | relabel_configs: 21 | - source_labels: [__meta_kubernetes_pod_label_exporter, __meta_kubernetes_namespace] 22 | action: keep 23 | regex: mysql;monitoring 24 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-storage-mysql/prometheus-storage-mysql-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: prometheus-storage-mysql 6 | name: prometheus-storage-mysql 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: prometheus-storage-mysql.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: prometheus-storage-mysql 15 | servicePort: 9090 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-storage-mysql/prometheus-storage-mysql-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-metrics-storage-mysql-promethues 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: "metrics-storage-mysql-promethues" 10 | resources: 11 | requests: 12 | storage: 100Gi 13 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-storage-mysql/prometheus-storage-mysql-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-storage-mysql 5 | namespace: monitoring 6 | subjects: 7 | - kind: ServiceAccount 8 | name: default 9 | namespace: monitoring 10 | roleRef: 11 | kind: ClusterRole 12 | name: admin 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-storage-mysql/prometheus-storage-mysql-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-storage-mysql 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-storage-mysql 8 | spec: 9 | ports: 10 | - port: 9090 11 | name: prom-graph 12 | type: NodePort 13 | selector: 14 | app: prometheus-storage-mysql 15 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-traefik/prometheus-traefik-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus-traefik 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-traefik 8 | data: 9 | prometheus.yml: > 10 | global: 11 | scrape_interval: 60s 12 | evaluation_interval: 60s 13 | 14 | alerting: 15 | alertmanagers: 16 | - static_configs: 17 | - targets: 18 | #定义报警地址 19 | - alertmanager:9093 20 | 21 | rule_files: 22 | #- "traefik-alert-rule.yaml" 23 | 24 | scrape_configs: 25 | 26 | - job_name: 'traefik-monitor' 27 | kubernetes_sd_configs: 28 | - role: pod 29 | 30 | relabel_configs: 31 | - source_labels: [__meta_kubernetes_pod_label_appType, __meta_kubernetes_namespace] 32 | action: keep 33 | regex: traefik;traefik 34 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-traefik/prometheus-traefik-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: prometheus-traefik 6 | name: prometheus-traefik 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: prometheus-traefik.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: prometheus-traefik 15 | servicePort: 9090 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-traefik/prometheus-traefik-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pv-metrics-traefik-promethues 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: "metrics-traefik-promethues" 10 | resources: 11 | requests: 12 | storage: 100Gi 13 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-traefik/prometheus-traefik-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-traefik 5 | namespace: monitoring 6 | subjects: 7 | - kind: ServiceAccount 8 | name: default 9 | namespace: monitoring 10 | roleRef: 11 | kind: ClusterRole 12 | name: admin 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /product/standard/grafana-prometheus-pro/prometheus-traefik/prometheus-traefik-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-traefik 5 | namespace: monitoring 6 | labels: 7 | app: prometheus-traefik 8 | spec: 9 | ports: 10 | - port: 9090 11 | name: prom-graph 12 | type: NodePort 13 | selector: 14 | app: prometheus-traefik 15 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/README.md: -------------------------------------------------------------------------------- 1 | [容器化部署文档](https://mp.weixin.qq.com/s?__biz=Mzg4MDEzMDM4MA==&mid=2247484368&idx=1&sn=9e4fa149db49326d5fc8b3975aa6ffe8&chksm=cf78a3f3f80f2ae5f3fee861916ee30ee98f878a44231fe05d4a06be5fe1be7be08c3c1291d1&token=1272019437&lang=zh_CN#rd) 2 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/init.sh: -------------------------------------------------------------------------------- 1 | mkdir /datavip/k8s-data/prometheus-k8s-0 2 | mkdir /datavip/k8s-data/prometheus-k8s-1 3 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | baseImage: quay.io/prometheus/alertmanager 10 | nodeSelector: 11 | #kubernetes.io/os: linux 12 | node.type: monitoring 13 | replicas: 3 14 | securityContext: 15 | fsGroup: 2000 16 | runAsNonRoot: true 17 | runAsUser: 1000 18 | serviceAccountName: alertmanager-main 19 | version: v0.18.0 20 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | alertmanager.yaml: Imdsb2JhbCI6CiAgInJlc29sdmVfdGltZW91dCI6ICI1bSIKInJlY2VpdmVycyI6Ci0gIm5hbWUiOiAibnVsbCIKInJvdXRlIjoKICAiZ3JvdXBfYnkiOgogIC0gImpvYiIKICAiZ3JvdXBfaW50ZXJ2YWwiOiAiNW0iCiAgImdyb3VwX3dhaXQiOiAiMzBzIgogICJyZWNlaXZlciI6ICJudWxsIgogICJyZXBlYXRfaW50ZXJ2YWwiOiAiMTJoIgogICJyb3V0ZXMiOgogIC0gIm1hdGNoIjoKICAgICAgImFsZXJ0bmFtZSI6ICJXYXRjaGRvZyIKICAgICJyZWNlaXZlciI6ICJudWxsIg== 4 | kind: Secret 5 | metadata: 6 | name: alertmanager-main 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/custom_by_hepy/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | resolve_timeout: 5m 3 | route: 4 | group_by: ['job'] 5 | group_wait: 30s 6 | group_interval: 5m 7 | repeat_interval: 12h 8 | receiver: webhook 9 | receivers: 10 | - name: 'webhook' 11 | webhook_configs: 12 | - url: 'http://webhook-dingtalk.monitoring.svc.cluster.local:8060/dingtalk/default-webhook-dingtalk/send' 13 | send_resolved: true 14 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/custom_by_hepy/grafana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: monitor-kubernetes.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: grafana 15 | servicePort: 3000 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/custom_by_hepy/prometheus-k8s-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: prometheus-k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | rules: 10 | - host: prometheus-k8s.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: prometheus-k8s 15 | servicePort: 9090 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "", 9 | "name": "0", 10 | "options": { 11 | "path": "/grafana-dashboard-definitions/0" 12 | }, 13 | "orgId": 1, 14 | "type": "file" 15 | } 16 | ] 17 | } 18 | kind: ConfigMap 19 | metadata: 20 | name: grafana-dashboards 21 | namespace: monitoring 22 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 3000 12 | targetPort: http 13 | selector: 14 | app: grafana 15 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - interval: 15s 9 | port: http 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kube-state-metrics 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kube-state-metrics 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/kube-state-metrics-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - apiGroups: 14 | - extensions 15 | resourceNames: 16 | - kube-state-metrics 17 | resources: 18 | - deployments 19 | verbs: 20 | - get 21 | - update 22 | - apiGroups: 23 | - apps 24 | resourceNames: 25 | - kube-state-metrics 26 | resources: 27 | - deployments 28 | verbs: 29 | - get 30 | - update 31 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/kube-state-metrics-roleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: kube-state-metrics 10 | subjects: 11 | - kind: ServiceAccount 12 | name: kube-state-metrics 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-main 12 | port: 8443 13 | targetPort: https-main 14 | - name: https-self 15 | port: 9443 16 | targetPort: https-self 17 | selector: 18 | app: kube-state-metrics 19 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/kube-state-metrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | port: https-main 14 | relabelings: 15 | - action: labeldrop 16 | regex: (pod|service|endpoint|namespace) 17 | scheme: https 18 | scrapeTimeout: 30s 19 | tlsConfig: 20 | insecureSkipVerify: true 21 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 22 | interval: 30s 23 | port: https-self 24 | scheme: https 25 | tlsConfig: 26 | insecureSkipVerify: true 27 | jobLabel: k8s-app 28 | selector: 29 | matchLabels: 30 | k8s-app: kube-state-metrics 31 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: node-exporter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: node-exporter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: node-exporter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https 12 | port: 9100 13 | targetPort: https 14 | selector: 15 | app: node-exporter 16 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/node-exporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | relabelings: 14 | - action: replace 15 | regex: (.*) 16 | replacement: $1 17 | sourceLabels: 18 | - __meta_kubernetes_pod_node_name 19 | targetLabel: instance 20 | scheme: https 21 | tlsConfig: 22 | insecureSkipVerify: true 23 | jobLabel: k8s-app 24 | selector: 25 | matchLabels: 26 | k8s-app: node-exporter 27 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | group: metrics.k8s.io 7 | groupPriorityMinimum: 100 8 | insecureSkipTLSVerify: true 9 | service: 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | version: v1beta1 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-adapter 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes 10 | - namespaces 11 | - pods 12 | - services 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 6 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | name: system:aggregated-metrics-reader 9 | rules: 10 | - apiGroups: 11 | - metrics.k8s.io 12 | resources: 13 | - pods 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-adapter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-adapter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: resource-metrics:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: resource-metrics-server-resources 5 | rules: 6 | - apiGroups: 7 | - metrics.k8s.io 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: resource-metrics-auth-reader 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: extension-apiserver-authentication-reader 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-adapter 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 6443 13 | selector: 14 | name: prometheus-adapter 15 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-adapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - honorLabels: true 13 | port: http 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: controller 17 | app.kubernetes.io/name: prometheus-operator 18 | app.kubernetes.io/version: v0.34.0 19 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: prometheus-k8s-config 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-k8s 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: coredns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-serviceMonitorKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | metricRelabelings: 12 | - action: drop 13 | regex: etcd_(debugging|disk|request|server).* 14 | sourceLabels: 15 | - __name__ 16 | port: http-metrics 17 | jobLabel: k8s-app 18 | namespaceSelector: 19 | matchNames: 20 | - kube-system 21 | selector: 22 | matchLabels: 23 | k8s-app: kube-controller-manager 24 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/prometheus-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: http-metrics 12 | jobLabel: k8s-app 13 | namespaceSelector: 14 | matchNames: 15 | - kube-system 16 | selector: 17 | matchLabels: 18 | k8s-app: kube-scheduler 19 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/setup/0namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/setup/prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/setup/prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: http 14 | port: 8080 15 | targetPort: http 16 | selector: 17 | app.kubernetes.io/component: controller 18 | app.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus-pro/kube-prometheus-pro-0.3.0/manifests/setup/prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/alertmanager/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | baseImage: quay.io/prometheus/alertmanager 10 | #nodeSelector: 11 | # kubernetes.io/os: linux 12 | #生产环境配置3 13 | #replicas: 3 14 | replicas: 2 15 | securityContext: 16 | fsGroup: 2000 17 | runAsNonRoot: true 18 | runAsUser: 1000 19 | serviceAccountName: alertmanager-main 20 | version: v0.18.0 21 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/alertmanager/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | alertmanager.yaml: Imdsb2JhbCI6CiAgInJlc29sdmVfdGltZW91dCI6ICI1bSIKInJlY2VpdmVycyI6Ci0gIm5hbWUiOiAibnVsbCIKInJvdXRlIjoKICAiZ3JvdXBfYnkiOgogIC0gImpvYiIKICAiZ3JvdXBfaW50ZXJ2YWwiOiAiNW0iCiAgImdyb3VwX3dhaXQiOiAiMzBzIgogICJyZWNlaXZlciI6ICJudWxsIgogICJyZXBlYXRfaW50ZXJ2YWwiOiAiMTJoIgogICJyb3V0ZXMiOgogIC0gIm1hdGNoIjoKICAgICAgImFsZXJ0bmFtZSI6ICJXYXRjaGRvZyIKICAgICJyZWNlaXZlciI6ICJudWxsIg== 4 | kind: Secret 5 | metadata: 6 | name: alertmanager-main 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/alertmanager/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/alertmanager/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/alertmanager/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/prometheus-operator/0namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/prometheus-operator/prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/prometheus-operator/prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: http 14 | port: 8080 15 | targetPort: http 16 | selector: 17 | app.kubernetes.io/component: controller 18 | app.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /product/standard/kube-prometheus/prometheus-operator/prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.34.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/backup/dledger/rocketmq-c3-broker-dledger-a-configmap.yml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: rocketmq-c3-broker-prod-dledger-a 3 | labels: 4 | wayne-app: rocketmq-c3-broker-prod 5 | wayne-ns: coohua 6 | app: rocketmq-c3-broker-prod-dledger-a 7 | data: 8 | application-github.properties: > 9 | brokerClusterName = RaftCluster brokerName=RaftNode00 listenPort=30931 10 | namesrvAddr=127.0.0.1:9876 storePathRootDir=/tmp/rmqstore/node02 11 | storePathCommitLog=/tmp/rmqstore/node02/commitlog enableDLegerCommitLog=true 12 | dLegerGroup=RaftNode00 13 | dLegerPeers=n0-127.0.0.1:40911;n1-127.0.0.1:40912;n2-127.0.0.1:40913 ## must 14 | be unique dLegerSelfId=n2 sendMessageThreadPoolNums=16 15 | -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/backup/dledger/rocketmq-c3-namesrv-service.yml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: rocketmq-c3-namesrv-prod-server 3 | labels: 4 | wayne-app: rocketmq-c3-namesrv-prod 5 | wayne-ns: coohua 6 | app: rocketmq-c3-namesrv-prod-server 7 | spec: 8 | selector: 9 | app: rocketmq-c3-namesrv-prod-server 10 | ports: 11 | - protocol: TCP 12 | port: 9876 13 | targetPort: 9876 14 | name: rocketmq-c3-namesrv-prod-server-9876 15 | type: ClusterIP 16 | clusterIP: None 17 | sessionAffinity: ClientIP -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/images/rocketmq-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hepyu/k8s-app-config/2ecc4219c7942f1706e25195a384b4cebb370eca/product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/images/rocketmq-console.png -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/rocketmq-console-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rocketmq-c4-console-prod-server 5 | namespace: inc 6 | labels: 7 | wayne-app: rocketmq-c4-console-prod 8 | wayne-ns: inc 9 | app: rocketmq-c4-console-prod-server 10 | #生产级配置,挂载云厂商的loadbalance 11 | #annotations: 12 | # kubernetes.io/ingress.class: biz 13 | spec: 14 | tls: [] 15 | rules: 16 | - host: pro-rocketmq-c4-k8s.inc.com 17 | http: 18 | paths: 19 | - backend: 20 | serviceName: rocketmq-c4-console-prod-server 21 | servicePort: 8080 22 | path: / 23 | -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/rocketmq-console-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: rocketmq-c4-console-prod-server 5 | namespace: inc 6 | labels: 7 | app: rocketmq-c4-console-prod-server 8 | wayne-app: rocketmq-c4-console-prod 9 | wayne-ns: inc 10 | spec: 11 | accessModes: 12 | - ReadWriteMany 13 | storageClassName: rocketmq-c4-console 14 | resources: 15 | requests: 16 | storage: 10Gi 17 | selector: 18 | matchLabels: {} 19 | -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/rocketmq-console-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rocketmq-c4-console-prod-server 5 | namespace: inc 6 | labels: 7 | wayne-app: rocketmq-c4-console-prod 8 | wayne-ns: inc 9 | app: rocketmq-c4-console-prod-server 10 | spec: 11 | selector: 12 | app: rocketmq-c4-console-prod-server 13 | ports: 14 | - port: 8080 15 | targetPort: 8080 16 | name: rocketmq-c4-console-prod-server-8080 17 | type: ClusterIP 18 | clusterIP: None 19 | sessionAffinity: ClientIP 20 | -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/rocketmq-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: inc 5 | spec: 6 | finalizers: 7 | - kubernetes 8 | -------------------------------------------------------------------------------- /product/standard/rocketmq-pro/rocketmq-ms-cluster-pro/rocketmq-namesrv-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rocketmq-c4-namesrv-prod-server 5 | namespace: inc 6 | labels: 7 | wayne-app: rocketmq-c4-namesrv-prod 8 | wayne-ns: inc 9 | app: rocketmq-c4-namesrv-prod-server 10 | spec: 11 | selector: 12 | app: rocketmq-c4-namesrv-prod-server 13 | ports: 14 | - protocol: TCP 15 | port: 9876 16 | targetPort: 9876 17 | name: rocketmq-c4-namesrv-prod-server-9876 18 | type: ClusterIP 19 | clusterIP: None 20 | sessionAffinity: ClientIP 21 | -------------------------------------------------------------------------------- /product/standard/sentinel-dashboard-pro/ingress-nginx-sentinel-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | inc-app: sentinel-dashboard 10 | name: sentinel-dashboard 11 | namespace: sentinel 12 | spec: 13 | rules: 14 | - host: sentinel-dashboard.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: sentinel-dashboard 19 | servicePort: 80 20 | path: / 21 | -------------------------------------------------------------------------------- /product/standard/sentinel-dashboard-pro/sentinel-dashboard-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: sentinel-dashboard 5 | namespace: sentinel 6 | labels: 7 | inc-app: sentinel-dashboard 8 | data: 9 | JAVA_OPTS: >- 10 | -Dserver.port=8080 -Dcsp.sentinel.dashboard.server=localhost:8080 -Dproject.name=sentinel-dashboard -Djava.security.egd=file:/dev/./urandom -Dcsp.sentinel.api.port=8719 11 | -Duser.timezone=Asia/Shanghai -Dclient.encoding.override=UTF-8 -Dfile.encoding=UTF-8 12 | -Djava.security.egd=file:/dev/./urandom 13 | -Dlogging.file=/data/logs/sentinel-dashboard-1.7.1.log -XX:HeapDumpPath=/data/logs/ 14 | -------------------------------------------------------------------------------- /product/standard/sentinel-dashboard-pro/sentinel-dashboard-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: sentinel-dashboard 3 | labels: 4 | inc-app: sentinel-dashboard 5 | namespace: sentinel 6 | spec: 7 | selector: 8 | inc-app: sentinel-dashboard 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8080 12 | port: 80 13 | name: sentinel-dashboard-80 14 | type: ClusterIP 15 | sessionAffinity: ClientIP 16 | apiVersion: v1 17 | kind: Service 18 | -------------------------------------------------------------------------------- /product/standard/sentinel-dashboard-pro/sentinel-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: sentinel 5 | spec: 6 | finalizers: 7 | - kubernetes 8 | 9 | -------------------------------------------------------------------------------- /product/standard/skywalking-pro/nohup.out: -------------------------------------------------------------------------------- 1 | 6.4.0: Pulling from apache/skywalking-oap-server 2 | 4fe2ade4980c: Already exists 3 | 6fc58a8d4ae4: Already exists 4 | 819f4a45746c: Already exists 5 | 5338c48afff4: Pulling fs layer 6 | 3f2779143228: Pulling fs layer 7 | 3f2779143228: Download complete 8 | 5338c48afff4: Retrying in 5 seconds 9 | 5338c48afff4: Retrying in 4 seconds 10 | 5338c48afff4: Retrying in 3 seconds 11 | 5338c48afff4: Retrying in 2 seconds 12 | 5338c48afff4: Retrying in 1 second 13 | 5338c48afff4: Verifying Checksum 14 | 5338c48afff4: Download complete 15 | 5338c48afff4: Pull complete 16 | 3f2779143228: Pull complete 17 | Digest: sha256:68c5210e9e1607ccab1e2bce5e587ec582abd0574a9aa537555d75f17a2d3fbe 18 | Status: Image is up to date for apache/skywalking-oap-server:6.4.0 19 | docker.io/apache/skywalking-oap-server:6.4.0 20 | -------------------------------------------------------------------------------- /product/standard/skywalking-pro/skywalking-c0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: skywalking 5 | spec: 6 | finalizers: 7 | - kubernetes 8 | 9 | -------------------------------------------------------------------------------- /product/standard/skywalking-pro/skywalking-c0-oap-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: skywalking-c0-oap 6 | labels: 7 | wayne-app: skywalking-c0 8 | wayne-ns: skywalking 9 | app: skywalking-c0-oap 10 | namespace: skywalking 11 | spec: 12 | selector: 13 | app: skywalking-c0-oap 14 | ports: 15 | - name: rest 16 | port: 12800 17 | protocol: TCP 18 | targetPort: 12800 19 | - name: grpc 20 | port: 11800 21 | protocol: TCP 22 | targetPort: 11800 23 | type: ClusterIP 24 | sessionAffinity: None 25 | 26 | -------------------------------------------------------------------------------- /product/standard/skywalking-pro/skywalking-c0-ui-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: skywalking-c0-ui 6 | namespace: skywalking 7 | labels: 8 | wayne-app: skywalking-c0 9 | wayne-ns: skywalking 10 | app: skywalking-c0-ui 11 | #生产配置,可以挂在第三方云厂商的负载均衡 12 | #annotations: 13 | # kubernetes.io/ingress.class: inc 14 | spec: 15 | tls: [] 16 | rules: 17 | - host: skywalking-c0-ui.inc-inc.com 18 | http: 19 | paths: 20 | - backend: 21 | serviceName: skywalking-c0-ui 22 | servicePort: 8080 23 | path: / 24 | -------------------------------------------------------------------------------- /product/standard/skywalking-pro/skywalking-c0-ui-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: skywalking-c0-ui 6 | labels: 7 | wayne-app: skywalking-c0 8 | wayne-ns: skywalking 9 | app: skywalking-c0-ui 10 | namespace: skywalking 11 | spec: 12 | selector: 13 | choerodon.io/infra: skywalking-c0-ui 14 | choerodon.io/release: skywalking-c0-ui 15 | ports: 16 | - name: http 17 | port: 8080 18 | protocol: TCP 19 | targetPort: 8080 20 | type: ClusterIP 21 | sessionAffinity: None 22 | -------------------------------------------------------------------------------- /product/standard/traefik-pro/03_middleware.yml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: admin-auth 5 | spec: 6 | basicAuth: 7 | secret: authsecret 8 | 9 | --- 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: authsecret 14 | namespace: traefik 15 | 16 | data: 17 | users: |2 18 | dGVzdDokYXByMSRINnVza2trVyRJZ1hMUDZld1RyU3VCa1RycUU4d2ovCnRlc3QyOiRhcHIxJGQ5 19 | aHI5SEJCJDRIeHdnVWlyM0hQNEVzZ2dQL1FObzAK 20 | -------------------------------------------------------------------------------- /product/standard/traefik-pro/04_service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: traefik 6 | namespace: traefik 7 | spec: 8 | ports: 9 | - protocol: TCP 10 | name: web 11 | port: 80 12 | - protocol: TCP 13 | name: websecure 14 | port: 443 15 | selector: 16 | app: traefik 17 | type: LoadBalancer 18 | 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: traefik-admin 24 | namespace: traefik 25 | spec: 26 | ports: 27 | - protocol: TCP 28 | name: admin 29 | port: 8080 30 | selector: 31 | app: traefik 32 | 33 | --- 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | name: whoami 38 | namespace: traefik 39 | spec: 40 | ports: 41 | - protocol: TCP 42 | name: web 43 | port: 80 44 | selector: 45 | app: whoami 46 | -------------------------------------------------------------------------------- /product/standard/traefik-pro/create-namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace traefik 2 | -------------------------------------------------------------------------------- /product/standard/traefik-pro/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: traefik-web-ui-v2.1 6 | namespace: traefik 7 | spec: 8 | rules: 9 | - host: traefik-v2.1-admin.inc-inc.com 10 | http: 11 | paths: 12 | - path: / 13 | backend: 14 | serviceName: traefik-admin 15 | servicePort: 8080 16 | -------------------------------------------------------------------------------- /product/standard/traefik-pro/traefik-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: traefik 5 | namespace: traefik 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: traefik 13 | persistentVolumeReclaimPolicy: Retain 14 | #表示使用本地存储 15 | local: 16 | path: /datavip/k8s-data/traefik 17 | #使用local pv时必须定义nodeAffinity,Kubernetes Scheduler需要使用PV的nodeAffinity描述信息来保证Pod能够调度到有对应local volume的Node上。 18 | #创建local PV之前,你需要先保证有对应的storageClass已经创建。 19 | nodeAffinity: 20 | required: 21 | nodeSelectorTerms: 22 | - matchExpressions: 23 | - key: kubernetes.io/hostname 24 | operator: In 25 | values: 26 | #future是pod需要分不到的主机名,这台主机上开启了local-pv资源。 27 | - future 28 | -------------------------------------------------------------------------------- /product/standard/traefik-pro/traefik-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | #当启用PVC 保护 alpha 功能时,如果用户删除了一个 pod 正在使用的 PVC,则该 PVC 不会被立即删除。PVC 的删除将被推迟,直到 PVC 不再被任何 pod 使用。 7 | #可以看到,当 PVC 的状态为 Teminatiing 时,PVC 受到保护,Finalizers 列表中包含 kubernetes.io/pvc-protection: 8 | finalizers: 9 | - kubernetes.io/pvc-protection 10 | name: pvc-tmp 11 | namespace: traefik 12 | spec: 13 | #PV 的访问模式(accessModes)有三种: 14 | #ReadWriteOnce(RWO):是最基本的方式,可读可写,但只支持被单个 Pod 挂载。 15 | #ReadOnlyMany(ROX):可以以只读的方式被多个 Pod 挂载。 16 | #ReadWriteMany(RWX):这种存储可以以读写的方式被多个 Pod 共享。 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 1Gi 22 | storageClassName: traefik 23 | #表示使用本地磁盘,实际生产中一般都使用nfs。 24 | volumeMode: Filesystem 25 | volumeName: traefik 26 | # status: 27 | # accessModes: 28 | # - ReadWriteOnce 29 | # capacity: 30 | # storage: 1Gi 31 | kind: List 32 | -------------------------------------------------------------------------------- /wayne/min-cluster-allinone/apollo-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f service-apollo-admin-server-dev.yaml 2 | kubectl apply -f service-apollo-config-server-dev.yaml 3 | kubectl apply -f service-apollo-portal-server.yaml 4 | -------------------------------------------------------------------------------- /wayne/min-cluster-allinone/apollo-min/readme.MD: -------------------------------------------------------------------------------- 1 | # (1).前序执行 2 | 3 | apollo依赖mysql, 需要根据如下链接先行部署mysql到k8s。 4 | https://github.com/hepyu/k8s-app-config/blob/master/yaml/min-cluster-allinone/mysql-min/readme.MD 5 | 6 | 如果使用容器外部的mysql,需要自行修改。 7 | 8 | 9 | # (2).apollo部署 10 | 11 | 这是apollo的最小部署,config/admin/portal都是只有1个实例。 12 | 13 | ``` 14 | kubectl apply -f service-apollo-config-server-dev.yaml 15 | kubectl apply -f service-apollo-admin-server-dev.yaml 16 | kubectl apply -f service-apollo-portal-server.yaml 17 | ``` 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/apollo-min/apollo-configservice-transition-ingress-dev.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: pro-apollo-configservice001 5 | namespace: inc 6 | labels: 7 | wayne-app: pro-apollo 8 | wayne-ns: inc 9 | app: pro-apollo-configservice001 10 | #annotations: 11 | # kubernetes.io/ingress.class: inc 12 | spec: 13 | tls: [] 14 | rules: 15 | - host: dev-apollo-configservice 16 | http: 17 | paths: 18 | - backend: 19 | serviceName: dev-apollo-configservice 20 | servicePort: 80 21 | path: / 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/apollo-min/apollo-configservice-transition-service-dev.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | metadata: 3 | name: dev-apollo-configservice 4 | labels: 5 | inc-app: dev-apollo-configservice 6 | namespace: inc 7 | spec: 8 | selector: 9 | #inc-app: apollo-configservice 10 | app: pod-apollo-config-server-dev 11 | ports: 12 | - protocol: TCP 13 | targetPort: 8080 14 | port: 80 15 | name: dev-apollo-configservice 16 | type: ClusterIP 17 | sessionAffinity: ClientIP 18 | apiVersion: v1 19 | kind: Service 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/apollo-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace inc 2 | kubectl apply -f service-apollo-admin-server-dev.yaml 3 | kubectl apply -f service-apollo-config-server-dev.yaml 4 | kubectl apply -f service-apollo-portal-server.yaml 5 | kubectl apply -f apollo-configservice-transition-service-dev.yaml 6 | kubectl apply -f apollo-configservice-transition-ingress-dev.yaml 7 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/apollo-min/readme.MD: -------------------------------------------------------------------------------- 1 | # (1).前序执行 2 | 3 | apollo依赖mysql, 需要根据如下链接先行部署mysql到k8s。 4 | https://github.com/hepyu/k8s-app-config/blob/master/yaml/min-cluster-allinone/mysql-min/readme.MD 5 | 6 | 如果使用容器外部的mysql,需要自行修改。 7 | 8 | 9 | # (2).apollo部署 10 | 11 | 这是apollo的最小部署,config/admin/portal都是只有1个实例。 12 | 13 | ``` 14 | kubectl apply -f service-apollo-config-server-dev.yaml 15 | kubectl apply -f service-apollo-admin-server-dev.yaml 16 | kubectl apply -f service-apollo-portal-server.yaml 17 | ``` 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/consul-min/consul-min-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: consul-min-pv-local 5 | namespace: consul 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | storageClassName: 'consul-data-pv' 11 | accessModes: 12 | - ReadWriteOnce 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/consul-min-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/consul-min/consul-min-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | chart: consul-3.7.1 6 | component: consul-consul 7 | heritage: Tiller 8 | release: consul 9 | name: consul-ui 10 | namespace: consul 11 | spec: 12 | ports: 13 | - name: http 14 | nodePort: 32744 15 | port: 8500 16 | protocol: TCP 17 | targetPort: 8500 18 | selector: 19 | component: consul-consul 20 | sessionAffinity: None 21 | type: NodePort 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/consul-min/readme.MD: -------------------------------------------------------------------------------- 1 | helm install --name consul --namespace=consul stable/consul --set 'Replicas=1,StorageClass=consul-data-pv' 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/dubbo-admin/README.MD: -------------------------------------------------------------------------------- 1 | 使用官方镜像: 2 | 3 | https://hub.docker.com/r/apache/dubbo-admin 4 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/dubbo-admin/dubbo-admin-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: List 2 | apiVersion: v1 3 | items: 4 | - apiVersion: apps/v1beta1 5 | kind: Deployment 6 | metadata: 7 | name: dubbo-admin 8 | namespace: dubbo 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | name: dubbo-admin 14 | labels: 15 | app: dubbo-admin 16 | spec: 17 | containers: 18 | - image: apache/dubbo-admin 19 | name: dubbo-admin 20 | env: 21 | - name: admin.registry.address 22 | value: "zookeeper://zk-hs.zookeeper-min:2181" 23 | - name: admin.config-center 24 | value: "zookeeper://zk-hs.zookeeper-min:2181" 25 | - name: admin.metadata-report.address 26 | value: "zookeeper://zk-hs.zookeeper-min:2181" 27 | ports: 28 | - name: http 29 | containerPort: 8080 30 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/dubbo-admin/dubbo-admin-service.yaml: -------------------------------------------------------------------------------- 1 | kind: List 2 | apiVersion: v1 3 | items: 4 | - apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: dubbo-admin 8 | namespace: dubbo 9 | spec: 10 | ports: 11 | - name: http 12 | port: 8080 13 | targetPort: 8080 14 | selector: 15 | app: dubbo-admin 16 | type: ClusterIP 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/dubbo-admin/ingress-nginx-dubbo-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | # nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | # nginx.ingress.kubernetes.io/rewrite-target: / 7 | # kubernetes.io/ingress.class: biz 8 | labels: 9 | app: dubbo-admin 10 | name: dubbo-admin 11 | namespace: dubbo 12 | spec: 13 | rules: 14 | - host: dubbo-admin.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: dubbo-admin 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace es-min 2 | 3 | kubectl apply -f es-min-data-storageclass-local.yaml 4 | kubectl apply -f es-min-ingest-storageclass-local.yaml 5 | kubectl apply -f es-min-master-storageclass-local.yaml 6 | 7 | kubectl apply -f es-min-data0-pv-local.yaml 8 | kubectl apply -f es-min-ingest0-pv-local.yaml 9 | kubectl apply -f es-min-master0-pv-local.yaml 10 | 11 | kubectl apply -f es-min-data-statefulset.yaml 12 | kubectl apply -f es-min-ingest-statefulset.yaml 13 | kubectl apply -f es-min-master-statefulset.yaml 14 | 15 | kubectl apply -f es-min-pvc.yaml 16 | kubectl apply -f es-min-service.yaml 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/es-min-data-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: es-min-data-pv-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/es-min-data0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-min-data0-pv-local 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "es-min-data-pv-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/es-min-data0-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/es-min-ingest-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: es-min-ingest-pv-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/es-min-ingest0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-min-ingest0-pv-local 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "es-min-ingest-pv-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/es-min-ingest0-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/es-min-master-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: es-min-master-pv-local 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/es-min/es-min-master0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-min-master0-pv-local 5 | namespace: elasticsearch 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "es-min-master-pv-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | local: 15 | path: /datavip/k8s-data/es-min-master0-pv-local 16 | nodeAffinity: 17 | required: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/hostname 21 | operator: In 22 | values: 23 | - future 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/backup/gitlab-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab 5 | labels: 6 | name: gitlab 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - name: http 11 | port: 80 12 | targetPort: http 13 | - name: ssh 14 | port: 22 15 | targetPort: ssh 16 | selector: 17 | name: gitlab 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/backup/postgresql-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: postgresql 5 | labels: 6 | name: postgresql 7 | spec: 8 | ports: 9 | - name: postgres 10 | port: 5432 11 | targetPort: postgres 12 | selector: 13 | name: postgresql 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/backup/redis-rc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: redis 5 | spec: 6 | replicas: 1 7 | selector: 8 | name: redis 9 | template: 10 | metadata: 11 | name: redis 12 | labels: 13 | name: redis 14 | spec: 15 | containers: 16 | - name: redis 17 | image: sameersbn/redis 18 | ports: 19 | - name: redis 20 | containerPort: 6379 21 | volumeMounts: 22 | - mountPath: /var/lib/redis 23 | name: data 24 | livenessProbe: 25 | exec: 26 | command: 27 | - redis-cli 28 | - ping 29 | initialDelaySeconds: 30 30 | timeoutSeconds: 5 31 | readinessProbe: 32 | exec: 33 | command: 34 | - redis-cli 35 | - ping 36 | initialDelaySeconds: 5 37 | timeoutSeconds: 1 38 | volumes: 39 | - name: data 40 | emptyDir: {} 41 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/backup/redis-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | labels: 6 | name: redis 7 | spec: 8 | ports: 9 | - name: redis 10 | port: 6379 11 | targetPort: redis 12 | selector: 13 | name: redis 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace gitlab 2 | kubectl apply -f gitlab-rc.yml 3 | kubectl apply -f gitlab-svc.yml 4 | kubectl apply -f postgresql-rc.yml 5 | kubectl apply -f postgresql-svc.yml 6 | kubectl apply -f redis-rc.yml 7 | kubectl apply -f redis-svc.yml 8 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/gitlab-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab 5 | namespace: gitlab 6 | labels: 7 | name: gitlab 8 | spec: 9 | type: LoadBalancer 10 | ports: 11 | - name: http 12 | port: 80 13 | targetPort: http 14 | - name: ssh 15 | port: 22 16 | targetPort: ssh 17 | selector: 18 | name: gitlab 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/postgresql-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: postgresql 5 | labels: 6 | name: postgresql 7 | namespace: gitlab 8 | spec: 9 | ports: 10 | - name: postgres 11 | port: 5432 12 | targetPort: postgres 13 | selector: 14 | name: postgresql 15 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/readme.Md: -------------------------------------------------------------------------------- 1 | 参考资料: 2 | 1.阿里云Kubernetes实战1–集群搭建与服务暴露 3 | https://juejin.im/entry/5bcd7648f265da0ab719e6cb 4 | 2.gitlab官方github: 5 | https://github.com/sameersbn/docker-gitlab 6 | 3.Kubernetes部署GitLab 7 | https://cloud.tencent.com/info/135fc564b2298d0dfebe4d6aa09669c2.html 8 | 9 | yaml文件来自于: 10 | https://github.com/sameersbn/docker-gitlab/tree/master/kubernetes 11 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/redis-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | namespace: gitlab 6 | labels: 7 | name: redis 8 | spec: 9 | ports: 10 | - name: redis 11 | port: 6379 12 | targetPort: redis 13 | selector: 14 | name: redis 15 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/gitlab/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -o pipefail 4 | 5 | if ! command -v kubectl > /dev/null; then 6 | echo "kubectl command not installed" 7 | exit 1 8 | fi 9 | 10 | # delete the services 11 | for svc in *-svc.yml 12 | do 13 | echo -n "Deleting $svc... " 14 | kubectl -f $svc delete 15 | done 16 | 17 | # delete the replication controllers 18 | for rc in *-rc.yml 19 | do 20 | echo -n "Deleting $rc... " 21 | kubectl -f $rc delete 22 | done 23 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/backup/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace nginx-ingress-min 2 | kubectl apply -f nginx-ingress-min-serviceaccount.yaml 3 | 4 | kubectl apply -f nginx-ingress-min-configmap.yaml 5 | kubectl apply -f nginx-ingress-min-role.yaml 6 | kubectl apply -f nginx-ingress-min-rolebinding.yaml 7 | 8 | kubectl apply -f nginx-ingress-min-clusterrolebinding.yaml 9 | kubectl apply -f nginx-ingress-min-clusterrole.yaml 10 | 11 | 12 | kubectl apply -f nginx-ingress-min-backend-deployment.yaml 13 | kubectl apply -f nginx-ingress-min-backend-service.yaml 14 | kubectl apply -f nginx-ingress-min-controller-deployment.yaml 15 | kubectl apply -f nginx-ingress-min-controller-service.yaml 16 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/backup/nginx-ingress-min-backend-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: nginx-ingress 6 | component: default-backend 7 | release: nginx-ingress-min 8 | name: nginx-ingress-min-default-backend 9 | namespace: nginx-ingress-min 10 | spec: 11 | ports: 12 | - name: http 13 | port: 80 14 | protocol: TCP 15 | targetPort: http 16 | selector: 17 | app: nginx-ingress 18 | component: default-backend 19 | release: nginx-ingress-min 20 | sessionAffinity: None 21 | type: ClusterIP 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/backup/nginx-ingress-min-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: ingress-controller-leader-nginx 7 | namespace: nginx-ingress-min 8 | - apiVersion: v1 9 | data: 10 | enable-vts-status: "false" 11 | kind: ConfigMap 12 | metadata: 13 | labels: 14 | app: nginx-ingress 15 | component: controller 16 | release: nginx-ingress-min 17 | name: nginx-ingress-min-controller 18 | namespace: nginx-ingress-min 19 | kind: List 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/backup/nginx-ingress-min-controller-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: nginx-ingress 6 | component: controller 7 | release: nginx-ingress-min 8 | name: nginx-ingress-min-controller 9 | namespace: nginx-ingress-min 10 | spec: 11 | externalTrafficPolicy: Cluster 12 | ports: 13 | - name: http 14 | nodePort: 31873 15 | port: 80 16 | protocol: TCP 17 | targetPort: http 18 | - name: https 19 | nodePort: 30235 20 | port: 443 21 | protocol: TCP 22 | targetPort: https 23 | selector: 24 | app: nginx-ingress 25 | component: controller 26 | release: nginx-ingress-min 27 | sessionAffinity: None 28 | type: LoadBalancer 29 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/backup/nginx-ingress-min-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | labels: 7 | app: nginx-ingress 8 | release: nginx-ingress-min 9 | name: nginx-ingress-min 10 | namespace: nginx-ingress-min 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: nginx-ingress-min 15 | subjects: 16 | - kind: ServiceAccount 17 | name: nginx-ingress-min 18 | namespace: nginx-ingress-min 19 | kind: List 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/backup/nginx-ingress-min-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | labels: 7 | app: nginx-ingress 8 | release: nginx-ingress-min 9 | name: nginx-ingress-min 10 | namespace: nginx-ingress-min 11 | secrets: 12 | - name: nginx-ingress-min-token 13 | kind: List 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace ingress-nginx 2 | 3 | #backend deploy 4 | kubectl apply -f ingress-nginx-backend-deployment.yaml 5 | kubectl apply -f ingress-nginx-backend-service.yaml 6 | #controller deploy 7 | kubectl apply -f ingress-nginx-deployoment.yaml 8 | kubectl apply -f ingress-nginx-service.yaml 9 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/ingress-nginx-backend-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: ingress-nginx 6 | component: default-backend 7 | release: ingress-nginx 8 | name: ingress-nginx-default-backend 9 | namespace: ingress-nginx 10 | spec: 11 | ports: 12 | - name: http 13 | port: 80 14 | protocol: TCP 15 | targetPort: http 16 | selector: 17 | app: ingress-nginx 18 | component: default-backend 19 | release: ingress-nginx 20 | sessionAffinity: None 21 | type: ClusterIP 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/ingress-nginx-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: ingress-nginx 5 | namespace: ingress-nginx 6 | labels: 7 | app.kubernetes.io/name: ingress-nginx 8 | app.kubernetes.io/part-of: ingress-nginx 9 | spec: 10 | externalTrafficPolicy: Local 11 | #type: LoadBalancer 12 | type: NodePort 13 | externalIPs: 14 | - 39.98.43.48 15 | selector: 16 | app.kubernetes.io/name: ingress-nginx 17 | app.kubernetes.io/part-of: ingress-nginx 18 | ports: 19 | - name: http 20 | port: 80 21 | targetPort: http 22 | targetPort: 80 23 | nodePort: 30834 24 | - name: https 25 | port: 443 26 | targetPort: https 27 | 28 | --- 29 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f ingress-nginx-apollo-config.yaml 2 | kubectl apply -f ingress-nginx-apollo-portal.yaml 3 | kubectl apply -f ingress-nginx-consul.yaml 4 | #kubectl apply -f ingress-nginx-es-min-kibana.yaml 5 | kubectl apply -f ingress-nginx-es-min.yaml 6 | kubectl apply -f ingress-nginx-gitlab.yaml 7 | #kubectl apply -f ingress-nginx-grafana-k8s.yaml 8 | #kubectl apply -f ingress-nginx-jenkins.yaml 9 | #kubectl apply -f ingress-nginx-nexus.yaml 10 | #kubectl apply -f ingress-nginx-prometheus-k8s.yaml 11 | kubectl apply -f ingress-nginx-rocketmq-min-c0-console.yaml 12 | kubectl apply -f ingress-nginx-skywalking-ui.yaml 13 | #kubectl apply -f ingress-nginx-wayne.yaml 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-apollo-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: service-apollo-config-server-dev 10 | name: service-apollo-config-server-dev 11 | namespace: inc 12 | spec: 13 | rules: 14 | - host: dev.apollo-config.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: service-apollo-config-server-dev 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-apollo-portal.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: service-apollo-portal-server 10 | name: service-apollo-portal-server 11 | namespace: inc 12 | spec: 13 | rules: 14 | - host: dev.apollo-portal.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: service-apollo-portal-server 19 | servicePort: 8070 20 | path: / 21 | 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-consul.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: consul 10 | name: consul 11 | namespace: consul 12 | spec: 13 | rules: 14 | - host: consul.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: consul-ui 19 | servicePort: 8500 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-es-min-kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | # nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | # nginx.ingress.kubernetes.io/rewrite-target: / 7 | # kubernetes.io/ingress.class: biz 8 | labels: 9 | app: kibana 10 | name: es-min-kibana 11 | namespace: es-min-kibana 12 | spec: 13 | rules: 14 | - host: es-min-kibana.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: kibana 19 | servicePort: 5601 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-es-min.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: elasticsearch 10 | name: es-min 11 | namespace: es-min 12 | spec: 13 | rules: 14 | - host: es-min-admin.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: es-min-ingest 19 | servicePort: 9200 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-gitlab.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | name: gitlab 10 | name: gitlab 11 | namespace: gitlab 12 | spec: 13 | rules: 14 | - host: gitlab.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: gitlab 19 | servicePort: 80 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-grafana-k8s.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: grafana 10 | name: grafana-k8s 11 | namespace: monitoring 12 | spec: 13 | rules: 14 | - host: grafana-k8s.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: grafana 19 | servicePort: 3000 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-jenkins.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: jenkins 10 | name: jenkins 11 | namespace: jenkins 12 | spec: 13 | rules: 14 | - host: jenkins.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: jenkins2 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-nexus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | name: repo-nexus 9 | namespace: repo-nexus 10 | spec: 11 | rules: 12 | - host: repo-nexus.future.com 13 | http: 14 | paths: 15 | - backend: 16 | serviceName: repo-nexus 17 | servicePort: 8081 18 | path: / 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-prometheus-k8s.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | prometheus: k8s 10 | name: prometheus-k8s 11 | namespace: monitoring 12 | spec: 13 | rules: 14 | - host: prometheus-k8s.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: prometheus-k8s 19 | servicePort: 9090 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-rocketmq-min-c0-console.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | #nginx.ingress.kubernetes.io/rewrite-target: / 7 | labels: 8 | app: service-rocketmq-min-c0-console-ng-prod 9 | name: rocketmq-min-c0-console 10 | namespace: rocketmq-min 11 | spec: 12 | rules: 13 | - host: pro-rocketmq-min-c0.console.future.com 14 | http: 15 | paths: 16 | - backend: 17 | serviceName: service-rocketmq-min-c0-console-ng-prod 18 | servicePort: 8080 19 | path: / 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-skywalking-ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: skywalking-ui 10 | name: skywalking-ui 11 | namespace: skywalking-min 12 | spec: 13 | rules: 14 | - host: skywalking-ui.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: skywalking-ui 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/proxy/ingress-nginx-wayne.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: infra-wayne 10 | name: infra-wayne 11 | namespace: wayne 12 | spec: 13 | rules: 14 | - host: wayne.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: infra-wayne 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/ingress-nginx-min/readme.MD: -------------------------------------------------------------------------------- 1 | #backend deploy 2 | kubectl apply -f ingress-nginx-backend-deployment.yaml 3 | kubectl apply -f ingress-nginx-backend-service.yaml 4 | #controller deploy 5 | kubectl apply -f ingress-nginx-deployoment.yaml 6 | kubectl apply -f ingress-nginx-service.yaml 7 | #进入ingress的pod 8 | kubectl exec -it -n ingress-nginx nginx-ingress-controller-5c4b565896-6kk9n bash 9 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.4.2/istio-demo-grafana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: grafana-istio-demo 6 | name: grafana-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: grafana-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: grafana 15 | servicePort: 3000 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.4.2/istio-demo-jaeger-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: jaeger-istio-demo 6 | name: jaeger-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: jaeger-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: jaeger-query 15 | servicePort: 16686 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.4.2/istio-demo-kiali-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: kiali-istio-demo 6 | name: kiali-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: kiali-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: kiali 15 | servicePort: 20001 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.8.3/istio-demo-grafana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: grafana-istio-demo 6 | name: grafana-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: grafana-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: grafana 15 | servicePort: 3000 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.8.3/istio-demo-jaeger-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: jaeger-istio-demo 6 | name: jaeger-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: jaeger-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: tracing 15 | servicePort: 16686 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.8.3/istio-demo-kiali-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: kiali-istio-demo 6 | name: kiali-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: kiali-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: kiali 15 | servicePort: 20001 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.8.3/istio-demo-prometheus-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: prometheus-istio-demo 6 | name: prometheus-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: prometheus-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: prometheus 15 | servicePort: 9090 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/istio/istio-1.8.3/istio-demo-zipkin-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | labels: 5 | app: zipkin-istio-demo 6 | name: zipkin-istio-demo 7 | namespace: istio-system 8 | spec: 9 | rules: 10 | - host: zipkin-istio-demo.inc-inc.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: zipkin 15 | servicePort: 9411 16 | path: / 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/jenkins/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace jenkins 2 | kubectl apply -f jenkins-deployment.yaml 3 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/jenkins/jenkins-slave-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: slave-jnlp-pv-local 5 | namespace: mysql 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteMany 12 | storageClassName: "slave-jnlp-storageclass-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | #表示使用本地存储 15 | local: 16 | path: /datavip/k8s-data/slave-jnlp-pv-local 17 | #使用local pv时必须定义nodeAffinity,Kubernetes Scheduler需要使用PV的nodeAffinity描述信息来保证Pod能够调度到有对应local volume的Node上。 18 | #创建local PV之前,你需要先保证有对应的storageClass已经创建。 19 | nodeAffinity: 20 | required: 21 | nodeSelectorTerms: 22 | - matchExpressions: 23 | - key: kubernetes.io/hostname 24 | operator: In 25 | values: 26 | #future是pod需要分不到的主机名,这台主机上开启了local-pv资源。 27 | - future 28 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/jenkins/readme.MD: -------------------------------------------------------------------------------- 1 | 参考资料: 2 | 1.kubernetes从入门到实践 3 | https://www.kancloud.cn/huyipow/kubernetes/716441 4 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kafka-min/kafka-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kafka-min 6 | labels: 7 | app: kafka 8 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kibana-min/backup/kibana-min-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: kibana 6 | release: es-min-kibana 7 | name: es-min-kibana-kibana 8 | namespace: es-min-kibana 9 | spec: 10 | ports: 11 | - name: http 12 | port: 5601 13 | protocol: TCP 14 | targetPort: 5601 15 | selector: 16 | app: kibana 17 | release: es-min-kibana 18 | sessionAffinity: None 19 | type: ClusterIP 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kibana-min/backup/readme.MD: -------------------------------------------------------------------------------- 1 | # 容器化步骤 2 | 3 | 顺次执行容器化命令: 4 | ``` 5 | kubectl apply -f kibana-min-deployment.yaml 6 | kubectl apply -f kibana-min-service.yaml 7 | ``` 8 | 9 | 或者直接执行本目录下的deploy.sh: 10 | ``` 11 | sh deploy.sh 12 | ``` 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kibana-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace es-min-kibana 2 | kubectl apply -f kibana-deployment.yaml 3 | kubectl apply -f kibana-service.yaml 4 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kibana-min/kibana-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: List 2 | apiVersion: v1 3 | items: 4 | - apiVersion: apps/v1beta1 5 | kind: Deployment 6 | metadata: 7 | name: kibana 8 | namespace: es-min-kibana 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | name: kibana 14 | labels: 15 | app: kibana 16 | spec: 17 | containers: 18 | - image: docker.elastic.co/kibana/kibana:6.4.3 19 | name: kibana 20 | env: 21 | - name: ELASTICSEARCH_URL 22 | value: "http://es-min-ingest.es-min:9200" 23 | ports: 24 | - name: http 25 | containerPort: 5601 26 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kibana-min/kibana-service.yaml: -------------------------------------------------------------------------------- 1 | kind: List 2 | apiVersion: v1 3 | items: 4 | - apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: kibana 8 | namespace: es-min-kibana 9 | spec: 10 | ports: 11 | - name: http 12 | port: 5601 13 | targetPort: 5601 14 | selector: 15 | app: kibana 16 | type: ClusterIP 17 | 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f kube-prometheus-manifests 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/00namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/0prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | apps.kubernetes.io/component: controller 6 | apps.kubernetes.io/name: prometheus-operator 7 | apps.kubernetes.io/version: v0.30.0 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/0prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | apps.kubernetes.io/component: controller 6 | apps.kubernetes.io/name: prometheus-operator 7 | apps.kubernetes.io/version: v0.30.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: http 14 | port: 8080 15 | targetPort: http 16 | selector: 17 | apps.kubernetes.io/component: controller 18 | apps.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/0prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | apps.kubernetes.io/component: controller 6 | apps.kubernetes.io/name: prometheus-operator 7 | apps.kubernetes.io/version: v0.30.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/0prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | apps.kubernetes.io/component: controller 6 | apps.kubernetes.io/name: prometheus-operator 7 | apps.kubernetes.io/version: v0.30.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - honorLabels: true 13 | port: http 14 | selector: 15 | matchLabels: 16 | apps.kubernetes.io/component: controller 17 | apps.kubernetes.io/name: prometheus-operator 18 | apps.kubernetes.io/version: v0.30.0 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | baseImage: quay.io/prometheus/alertmanager 10 | nodeSelector: 11 | beta.kubernetes.io/os: linux 12 | replicas: 3 13 | securityContext: 14 | fsGroup: 2000 15 | runAsNonRoot: true 16 | runAsUser: 1000 17 | serviceAccountName: alertmanager-main 18 | version: v0.17.0 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | alertmanager.yaml: Imdsb2JhbCI6CiAgInJlc29sdmVfdGltZW91dCI6ICI1bSIKInJlY2VpdmVycyI6Ci0gIm5hbWUiOiAibnVsbCIKInJvdXRlIjoKICAiZ3JvdXBfYnkiOgogIC0gImpvYiIKICAiZ3JvdXBfaW50ZXJ2YWwiOiAiNW0iCiAgImdyb3VwX3dhaXQiOiAiMzBzIgogICJyZWNlaXZlciI6ICJudWxsIgogICJyZXBlYXRfaW50ZXJ2YWwiOiAiMTJoIgogICJyb3V0ZXMiOgogIC0gIm1hdGNoIjoKICAgICAgImFsZXJ0bmFtZSI6ICJXYXRjaGRvZyIKICAgICJyZWNlaXZlciI6ICJudWxsIg== 4 | kind: Secret 5 | metadata: 6 | name: alertmanager-main 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "", 9 | "name": "0", 10 | "options": { 11 | "path": "/grafana-dashboard-definitions/0" 12 | }, 13 | "orgId": 1, 14 | "type": "file" 15 | } 16 | ] 17 | } 18 | kind: ConfigMap 19 | metadata: 20 | name: grafana-dashboards 21 | namespace: monitoring 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 3000 12 | targetPort: http 13 | selector: 14 | app: grafana 15 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - interval: 15s 9 | port: http 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kube-state-metrics 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kube-state-metrics 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/kube-state-metrics-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - apiGroups: 14 | - extensions 15 | resourceNames: 16 | - kube-state-metrics 17 | resources: 18 | - deployments 19 | verbs: 20 | - get 21 | - update 22 | - apiGroups: 23 | - apps 24 | resourceNames: 25 | - kube-state-metrics 26 | resources: 27 | - deployments 28 | verbs: 29 | - get 30 | - update 31 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/kube-state-metrics-roleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: kube-state-metrics 10 | subjects: 11 | - kind: ServiceAccount 12 | name: kube-state-metrics 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-main 12 | port: 8443 13 | targetPort: https-main 14 | - name: https-self 15 | port: 9443 16 | targetPort: https-self 17 | selector: 18 | app: kube-state-metrics 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/kube-state-metrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | port: https-main 14 | scheme: https 15 | scrapeTimeout: 30s 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 19 | interval: 30s 20 | port: https-self 21 | scheme: https 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | jobLabel: k8s-app 25 | selector: 26 | matchLabels: 27 | k8s-app: kube-state-metrics 28 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: node-exporter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: node-exporter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: node-exporter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https 12 | port: 9100 13 | targetPort: https 14 | selector: 15 | app: node-exporter 16 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/node-exporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | scheme: https 14 | tlsConfig: 15 | insecureSkipVerify: true 16 | jobLabel: k8s-app 17 | selector: 18 | matchLabels: 19 | k8s-app: node-exporter 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | group: metrics.k8s.io 7 | groupPriorityMinimum: 100 8 | insecureSkipTLSVerify: true 9 | service: 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | version: v1beta1 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-adapter 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes 10 | - namespaces 11 | - pods 12 | - services 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 6 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | name: system:aggregated-metrics-reader 9 | rules: 10 | - apiGroups: 11 | - metrics.k8s.io 12 | resources: 13 | - pods 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-adapter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-adapter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: resource-metrics:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: resource-metrics-server-resources 5 | rules: 6 | - apiGroups: 7 | - metrics.k8s.io 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: resource-metrics-auth-reader 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: extension-apiserver-authentication-reader 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-adapter 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 6443 13 | selector: 14 | name: prometheus-adapter 15 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-adapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: k8s 7 | namespace: monitoring 8 | spec: 9 | alerting: 10 | alertmanagers: 11 | - name: alertmanager-main 12 | namespace: monitoring 13 | port: web 14 | baseImage: quay.io/prometheus/prometheus 15 | nodeSelector: 16 | beta.kubernetes.io/os: linux 17 | replicas: 2 18 | resources: 19 | requests: 20 | memory: 400Mi 21 | ruleSelector: 22 | matchLabels: 23 | prometheus: k8s 24 | role: alert-rules 25 | securityContext: 26 | fsGroup: 2000 27 | runAsNonRoot: true 28 | runAsUser: 1000 29 | serviceAccountName: prometheus-k8s 30 | serviceMonitorNamespaceSelector: {} 31 | serviceMonitorSelector: {} 32 | version: v2.10.0 33 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: prometheus-k8s-config 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-k8s 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: coredns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-serviceMonitorKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | metricRelabelings: 12 | - action: drop 13 | regex: etcd_(debugging|disk|request|server).* 14 | sourceLabels: 15 | - __name__ 16 | port: http-metrics 17 | jobLabel: k8s-app 18 | namespaceSelector: 19 | matchNames: 20 | - kube-system 21 | selector: 22 | matchLabels: 23 | k8s-app: kube-controller-manager 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/kube-prometheus-manifests/prometheus-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: http-metrics 12 | jobLabel: k8s-app 13 | namespaceSelector: 14 | matchNames: 15 | - kube-system 16 | selector: 17 | matchLabels: 18 | k8s-app: kube-scheduler 19 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/quickstart/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace prometheus-operator 2 | 3 | kubectl apply -f bundle.yaml 4 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/kube-prometheus/readme.MD: -------------------------------------------------------------------------------- 1 | offical github: 2 | 3 | https://github.com/coreos/prometheus-operator 4 | https://github.com/coreos/kube-prometheus 5 | 6 | yaml文件来自于kubkube-prometheus/manifests/.;下载后直接执行kubectl apply -f . 7 | 8 | docker pull siriuszg/addon-resizer:1.8.4 9 | 10 | docker images |grep addon-resizer |awk '{print "docker tag ",$1":"$2,$1":"$2}' |sed -e 's#siriuszg#k8s.gcr.io#2' |sh -x 11 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace mysql-min 2 | kubectl apply -f mysql-min-storageclass-local.yaml 3 | kubectl apply -f mysql-min-pv-local.yaml 4 | kubectl apply -f mysql-min-pvc.yaml 5 | kubectl apply -f mysql-min-secret.yaml 6 | kubectl apply -f mysql-min-service.yaml 7 | kubectl apply -f mysql-min-deployment.yaml 8 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/mysql-min-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mysql-min-pv-local 5 | namespace: mysql 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "mysql-min-storageclass-local" 13 | persistentVolumeReclaimPolicy: Retain 14 | #表示使用本地存储 15 | local: 16 | path: /datavip/k8s-data/mysql-min-pv-local 17 | #使用local pv时必须定义nodeAffinity,Kubernetes Scheduler需要使用PV的nodeAffinity描述信息来保证Pod能够调度到有对应local volume的Node上。 18 | #创建local PV之前,你需要先保证有对应的storageClass已经创建。 19 | nodeAffinity: 20 | required: 21 | nodeSelectorTerms: 22 | - matchExpressions: 23 | - key: kubernetes.io/hostname 24 | operator: In 25 | values: 26 | #future是pod需要分不到的主机名,这台主机上开启了local-pv资源。 27 | - future 28 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/mysql-min-pv-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mysql-min-pv-local 5 | namespace: mysql 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "mysql-min-storageclass-nfs" 13 | persistentVolumeReclaimPolicy: Retain 14 | #存储采用nfs 15 | nfs: 16 | path: /mysql-min 17 | server: xxx.nas.aliyuncs.com 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/mysql-min-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | #将mysql数据库的所有user的password配置到secret,统一管理 4 | mysql-password: YWRtaW4= 5 | mysql-root-password: OVplTmswRGdoSA== 6 | kind: Secret 7 | metadata: 8 | labels: 9 | app: mysql-min 10 | release: mysql-min 11 | name: mysql-min 12 | namespace: mysql-min 13 | #Secret有三种类型: 14 | #Opaque:base64编码格式的Secret,用来存储密码、密钥等;但数据也通过base64 –decode解码得到原始数据,所有加密性很弱。 15 | #kubernetes.io/dockerconfigjson:用来存储私有docker registry的认证信息。 16 | #kubernetes.io/service-account-token: 用于被serviceaccount引用。serviceaccout创建时Kubernetes会默认创建对应的secret。Pod如果使用了serviceaccount,对应的secret会自动挂载到Pod目录/run/secrets/ kubernetes.io/serviceaccount中。 17 | type: Opaque 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/mysql-min-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mysql-min 6 | release: mysql-min 7 | name: mysql-min 8 | namespace: mysql-min 9 | spec: 10 | ports: 11 | - name: mysql 12 | port: 3306 13 | protocol: TCP 14 | targetPort: mysql 15 | selector: 16 | app: mysql-min 17 | #目前sessionAffinity可以提供"None"與"ClientIP"两种设定: 18 | #None: 以round robin的方式轮询下面的Pods。 19 | #ClientIP: 以client ip的方式固定request到同一台机器。 20 | sessionAffinity: None 21 | type: ClusterIP 22 | #status: 23 | # loadBalancer: {} 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/mysql-min-storageclass-local.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: mysql-min-storageclass-local 5 | #指定存储类的供应者,比如aws, nfs等,具体取值参考官方说明。 6 | #存储类有一个供应者的参数域,此参数域决定PV使用什么存储卷插件。参数必需进行设置 7 | #由于demo中使用的是本地存储,所以这里写kubernetes.io/no-provisioner. 8 | provisioner: kubernetes.io/no-provisioner 9 | #volumeBindingMode 参数将延迟PVC绑定,直到 pod 被调度。 10 | volumeBindingMode: WaitForFirstConsumer 11 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/mysql-min/readme.MD: -------------------------------------------------------------------------------- 1 | # (1).前述 2 | 3 | 实际生产环境中,为了稳定和高可用(晚上睡觉踏实),我们并不会把mysql装在k8s集群中,一般是用阿里云的RDS或者自己在高性能机器上搭建mysql。 4 | 5 | 但是,对于dev, test环境,我们完全可以把mysql部署到各自的k8s集群,非常有助于提升效率,而且还有助于k8s的经验积累. 6 | 7 | # (2).exec order 8 | 9 | ``` 10 | kubectl apply -f mysql-min-storageclass-local.yaml 11 | kubectl apply -f mysql-min-pv-local.yaml 12 | kubectl apply -f mysql-min-pvc.yaml 13 | kubectl apply -f mysql-min-secret.yaml 14 | kubectl apply -f mysql-min-deployment.yaml 15 | kubectl apply -f mysql-min-service.yaml 16 | ``` 17 | 18 | 如果使用nfs存储,使用mysql-min-pv-nfs.yaml代替mysql-min-pv-local.yaml,同时要注意修改pvc中的storageClass,要改成nfs标识(无实际意义,更规范): 19 | 20 | kubectl apply -f mysql-min-pv-nfs.yaml 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nacos/create-namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace nacos 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nacos/ingress-nginx-nacos.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: nacos-headless 10 | name: nacos 11 | namespace: nacos 12 | spec: 13 | rules: 14 | - host: nacos.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: nacos-headless 19 | servicePort: 8848 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nexus/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f repo-nexus-ns.yaml 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/dev.apollo-config.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name dev.apollo-config.future.com; 4 | 5 | location / { 6 | proxy_pass http://dev.apollo-config; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/dev.apollo-config.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/dev.apollo-portal.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name dev.apollo-portal.future.com; 4 | 5 | location / { 6 | proxy_pass http://dev.apollo-portal; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/dev.apollo-portal.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/es-min-admin.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name es-min-admin.future.com; 4 | 5 | location / { 6 | proxy_pass http://es-min-admin; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/es-min-admin.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/es-min-kibana.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name es-min-kibana.future.com; 4 | 5 | location / { 6 | proxy_pass http://es-min-kibana; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/es-min-kibana.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/gitlab.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name gitlab.future.com; 4 | 5 | location / { 6 | proxy_pass http://gitlab; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/gitlab.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/grafana-k8s.future.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name grafana-k8s.future.com; 4 | 5 | location / { 6 | proxy_pass http://grafana-k8s; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/grafana-k8s.future.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/jenkins.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name jenkins.future.com; 4 | 5 | location / { 6 | proxy_pass http://jenkins; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/jenkins.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/pro-rocketmq-min-c0.console.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name pro-rocketmq-min-c0.console.future.com; 4 | 5 | location / { 6 | proxy_pass http://pro-rocketmq-min-c0.console; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/pro-rocketmq-min-c0.console.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/prometheus-app.future.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name prometheus-app.future.com; 4 | 5 | location / { 6 | proxy_pass http://prometheus-app; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/prometheus-app.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/prometheus-k8s.future.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name prometheus-k8s.future.com; 4 | 5 | location / { 6 | proxy_pass http://prometheus-k8s; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/prometheus-k8s.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/repo-nexus.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name repo-nexus.future.com; 4 | 5 | location / { 6 | proxy_pass http://repo-nexus; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/repo-nexus.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/skywalking-ui.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name skywalking-ui.future.com; 4 | 5 | location / { 6 | proxy_pass http://skywalking-ui; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/skywalking-ui.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/nginx/conf.d/wayne.future.com.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name wayne.future.com; 4 | 5 | location / { 6 | proxy_pass http://wayne; 7 | proxy_set_header Host $host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | } 11 | 12 | access_log /data/logs/nginx/wayne.future.com.log access; 13 | } 14 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/openresty-min/openresty-storageclass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: StorageClass 3 | apiVersion: storage.k8s.io/v1 4 | metadata: 5 | name: openresty-lualib-storageclass-local 6 | #指定存储类的供应者,比如aws, nfs等,具体取值参考官方说明。 7 | #存储类有一个供应者的参数域,此参数域决定PV使用什么存储卷插件。参数必需进行设置 8 | #由于demo中使用的是本地存储,所以这里写kubernetes.io/no-provisioner. 9 | provisioner: kubernetes.io/no-provisioner 10 | #volumeBindingMode 参数将延迟PVC绑定,直到 pod 被调度。 11 | volumeBindingMode: WaitForFirstConsumer 12 | --- 13 | kind: StorageClass 14 | apiVersion: storage.k8s.io/v1 15 | metadata: 16 | name: openresty-subconf-storageclass-local 17 | #指定存储类的供应者,比如aws, nfs等,具体取值参考官方说明。 18 | #存储类有一个供应者的参数域,此参数域决定PV使用什么存储卷插件。参数必需进行设置 19 | #由于demo中使用的是本地存储,所以这里写kubernetes.io/no-provisioner. 20 | provisioner: kubernetes.io/no-provisioner 21 | #volumeBindingMode 参数将延迟PVC绑定,直到 pod 被调度。 22 | volumeBindingMode: WaitForFirstConsumer 23 | 24 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-1/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f redis-cluster-min-namespace.yaml 2 | kubectl create configmap redis-conf --namespace=redis-cluster-min --from-file=redis.conf 3 | 4 | kubectl apply -f redis-cluster-min-service.yaml 5 | #kubectl apply -f redis-cluster-min-secret.yaml 6 | kubectl apply -f redis-cluster-min-pv-local.yaml 7 | kubectl apply -f redis-cluster-min-statefulset.yaml 8 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-1/readme.MD: -------------------------------------------------------------------------------- 1 | kubectl run -i --tty centos --image=centos --restart=Never /bin/bash 2 | 3 | yum install centos-release-scl-rh -y 4 | yum install rh-ruby23 -y 5 | scl enable rh-ruby23 bash 6 | ruby -v 7 | 8 | gem install redis 9 | 10 | wget https://github.com/antirez/redis/archive/5.0.5.tar.gz 11 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-1/redis-cluster-min-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: redis-cluster-min 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-1/redis-cluster-min-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: redis-service 6 | namespace: redis-cluster-min 7 | labels: 8 | app: redis 9 | spec: 10 | ports: 11 | - name: redis-port 12 | port: 6379 13 | clusterIP: None 14 | selector: 15 | app: redis 16 | appCluster: redis-cluster 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-1/redis.conf: -------------------------------------------------------------------------------- 1 | #开启Redis的AOF持久化 2 | appendonly yes 3 | #集群模式打开 4 | cluster-enabled yes 5 | cluster-config-file /var/lib/redis/nodes.conf 6 | #节点超时时间 7 | cluster-node-timeout 5000 8 | #AOF持久化文件存在的位置 9 | dir /var/lib/redis 10 | #开启的端口 11 | port 6379 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-2/readme.MD: -------------------------------------------------------------------------------- 1 | kubectl run -i --tty centos --image=centos --restart=Never /bin/bash 2 | 3 | yum install centos-release-scl-rh -y 4 | yum install rh-ruby23 -y 5 | scl enable rh-ruby23 bash 6 | ruby -v 7 | 8 | gem install redis 9 | 10 | wget https://github.com/antirez/redis/archive/5.0.5.tar.gz 11 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-2/redis-cluster-min-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: redis-cluster-min 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-2/redis-cluster-min-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | redis-password: cW5LSDFPQU5FZw== 4 | kind: Secret 5 | metadata: 6 | labels: 7 | app: redis 8 | release: redis-cluster-min 9 | name: redis-cluster-min 10 | namespace: redis-cluster-min 11 | type: Opaque 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-3/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f redis-cluster-min-namespace.yaml 2 | kubectl create configmap redis-conf --namespace=redis-cluster-min --from-file=redis.conf 3 | 4 | kubectl apply -f redis-cluster-min-service.yaml 5 | #kubectl apply -f redis-cluster-min-secret.yaml 6 | kubectl apply -f redis-cluster-min-pv-local.yaml 7 | kubectl apply -f redis-cluster-min-statefulset.yaml 8 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-3/readme.MD: -------------------------------------------------------------------------------- 1 | redis-cli --cluster-replicas 1 --cluster create 10.244.0.23:6379 10.244.0.37:6379 10.244.0.39:6379 10.244.0.40:6379 10.244.0.41:6379 10.244.0.42:6379 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-3/redis-cluster-min-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: redis-cluster-min 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-3/redis-cluster-min-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: redis-service 6 | namespace: redis-cluster-min 7 | labels: 8 | app: redis 9 | spec: 10 | ports: 11 | - name: redis-port 12 | port: 6379 13 | clusterIP: None 14 | selector: 15 | app: redis 16 | appCluster: redis-cluster 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/backup-3/redis.conf: -------------------------------------------------------------------------------- 1 | #开启Redis的AOF持久化 2 | appendonly yes 3 | #集群模式打开 4 | cluster-enabled yes 5 | cluster-config-file /var/lib/redis/nodes.conf 6 | #节点超时时间 7 | cluster-node-timeout 5000 8 | #AOF持久化文件存在的位置 9 | dir /var/lib/redis 10 | #开启的端口 11 | port 6379 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace redis-cluster-min 2 | kubectl apply -f redis-configmap.yaml 3 | kubectl apply -f redis-service.yaml 4 | kubectl apply -f redis-statefulset.yaml 5 | kubectl apply -f redis-pv-local.yaml 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/readme.MD: -------------------------------------------------------------------------------- 1 | 具体如何使用参见文章: 2 | 3 | kubernetes-20:redis-cluster容器化 4 | 5 | https://mp.weixin.qq.com/s?__biz=Mzg4MDEzMDM4MA==&mid=2247484309&idx=1&sn=ba82ec8a81fe74782c143a5b25ec5684&chksm=cf78a3b6f80f2aa0c0e27e0eeddf01bce9126b0c943484d05f35c8427e361ae797497d89b733&token=358824879&lang=zh_CN#rd 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/redis-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | namespace: redis-cluster-min 6 | name: redis-hkc 7 | labels: 8 | app: redis-hkc 9 | data: 10 | fix-ip.sh: | 11 | #!/bin/sh 12 | CLUSTER_CONFIG="/data/nodes.conf" 13 | if [ -f ${CLUSTER_CONFIG} ]; then 14 | if [ -z "${POD_IP}" ]; then 15 | echo "Unable to determine Pod IP address!" 16 | exit 1 17 | fi 18 | echo "Updating my IP to ${POD_IP} in ${CLUSTER_CONFIG}" 19 | sed -i.bak -e "/myself/ s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/${POD_IP}/" ${CLUSTER_CONFIG} #改nodes.conf中本机的ip地址,因为pod每次重启后会导致ip改变。 20 | fi 21 | exec "$@" 22 | redis.conf: |+ 23 | port 9720 24 | maxmemory 1000mb 25 | cluster-enabled yes 26 | cluster-require-full-coverage no 27 | cluster-node-timeout 15000 28 | cluster-config-file /data/nodes.conf 29 | cluster-migration-barrier 1 30 | appendonly yes 31 | protected-mode no 32 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-cluster-min/redis-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | namespace: redis-cluster-min 6 | name: redis-hkc 7 | labels: 8 | app: redis-hkc 9 | spec: 10 | ports: 11 | - port: 9720 12 | targetPort: 9720 13 | name: client 14 | - port: 19720 15 | targetPort: 19720 16 | name: gossip 17 | clusterIP: None #创建Headless service 18 | selector: 19 | app: redis-hkc 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-ha-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f . 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-ha-min/redis-ha-min-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: redis-ha-min 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/redis-ha-min/redis-ha-min-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app: redis-ha-min 6 | name: redis-ha-min 7 | namespace: redis-ha-min 8 | #secrets: 9 | #- name: redis-ha-min-token-kjqqm 10 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/rocketmq-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace rocketmq-min 2 | kubectl apply -f rocketmq-min-c0-namesrv-prod.yaml 3 | kubectl apply -f rocketmq-min-c0-broker-master0-pv-local.yaml 4 | kubectl apply -f rocketmq-min-c0-broker-master-prod.yaml 5 | kubectl apply -f rocketmq-min-c0-broker-slave0-pv-local.yaml 6 | kubectl apply -f rocketmq-min-c0-broker-slave-prod.yaml 7 | kubectl apply -f rocketmq-min-c0-console-ng-prod.yaml 8 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/rocketmq-min/readme.md: -------------------------------------------------------------------------------- 1 | kubectl create secret docker-registry hpy --docker-server= --docker-username= --docker-password= --docker-email= -n rocketmq 2 | 3 | (1).rocketmq容器化 4 | 5 | 1.建立pv存储 6 | kubectl apply -f pv-rocketmq-min-c0-broker-master.yaml 7 | kubectl apply -f pv-rocketmq-min-c0-broker-slave.yaml 8 | 9 | 2.rocketmq容器化 10 | kubectl apply -f rocketmq-min-c0-namesrv-prod.yaml 11 | kubectl apply -f rocketmq-min-c0-broker-master-prod.yaml 12 | kubectl apply -f rocketmq-min-c0-broker-slave-prod.yaml 13 | 14 | 3.rocketmq-console容器化 15 | kubectl apply -f rocketmq-min-c0-console-ng-prod.yaml 16 | 17 | (2).ingress代理rocketmq-consle 18 | 19 | kubectl apply -f ingress-rocketmq-min-c0-console.yaml 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/rocketmq-min/rocketmq-min-c0-broker-master0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: rocketmq-min-c0-broker-master0-pv-local 5 | namespace: rocketmq-min 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "rocketmq-min-c0-broker-master" 13 | persistentVolumeReclaimPolicy: Retain 14 | #表示使用本地存储 15 | local: 16 | path: /datavip/k8s-data/rocketmq-min-c0-broker-master0-pv-local 17 | #使用local pv时必须定义nodeAffinity,Kubernetes Scheduler需要使用PV的nodeAffinity描述信息来保证Pod能够调度到有对应local volume的Node上。 18 | #创建local PV之前,你需要先保证有对应的storageClass已经创建。 19 | nodeAffinity: 20 | required: 21 | nodeSelectorTerms: 22 | - matchExpressions: 23 | - key: kubernetes.io/hostname 24 | operator: In 25 | values: 26 | #future是pod需要分不到的主机名,这台主机上开启了local-pv资源。 27 | - future 28 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/rocketmq-min/rocketmq-min-c0-broker-slave0-pv-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: rocketmq-min-c0-broker-slave0-pv-local 5 | namespace: rocketmq-min 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: "rocketmq-min-c0-broker-slave" 13 | persistentVolumeReclaimPolicy: Retain 14 | #表示使用本地存储 15 | local: 16 | path: /datavip/k8s-data/rocketmq-min-c0-broker-slave0-pv-local 17 | #使用local pv时必须定义nodeAffinity,Kubernetes Scheduler需要使用PV的nodeAffinity描述信息来保证Pod能够调度到有对应local volume的Node上。 18 | #创建local PV之前,你需要先保证有对应的storageClass已经创建。 19 | nodeAffinity: 20 | required: 21 | nodeSelectorTerms: 22 | - matchExpressions: 23 | - key: kubernetes.io/hostname 24 | operator: In 25 | values: 26 | #future是pod需要分不到的主机名,这台主机上开启了local-pv资源。 27 | - future 28 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/sentinel-dashboard/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace sentinel 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/sentinel-dashboard/sentinel-dashboard-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | #apiVersion: v1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | #nginx.ingress.kubernetes.io/ssl-redirect: "false" 7 | #nginx.ingress.kubernetes.io/rewrite-target: / 8 | labels: 9 | app: sentinel-dashboard 10 | name: sentinel-dashboard 11 | namespace: sentinel 12 | spec: 13 | rules: 14 | - host: sentinel-dashboard.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: sentinel-dashboard 19 | servicePort: 80 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/sentinel-dashboard/sentinel-dashboard-service.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: sentinel-dashboard 3 | labels: 4 | app: sentinel-dashboard 5 | namespace: sentinel 6 | spec: 7 | selector: 8 | app: sentinel-dashboard 9 | ports: 10 | - protocol: TCP 11 | targetPort: 8080 12 | port: 80 13 | name: dashboard 14 | - protocol: TCP 15 | targetPort: 8719 16 | port: 8719 17 | name: api 18 | type: ClusterIP 19 | sessionAffinity: ClientIP 20 | apiVersion: v1 21 | kind: Service 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/skywalking-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f . 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/skywalking-min/skywalking-min-oap-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: skywalking-min 6 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/skywalking-min/skywalking-min-oap-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | service: oap 6 | name: oap 7 | namespace: skywalking-min 8 | spec: 9 | ports: 10 | - name: rest 11 | port: 12800 12 | protocol: TCP 13 | targetPort: 12800 14 | - name: grpc 15 | port: 11800 16 | protocol: TCP 17 | targetPort: 11800 18 | selector: 19 | app: oap 20 | sessionAffinity: None 21 | type: ClusterIP 22 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/skywalking-min/skywalking-min-oap-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: skywalking-oap 6 | namespace: skywalking-min 7 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/skywalking-min/skywalking-min-ui-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | choerodon.io/infra: skywalking-ui 6 | choerodon.io/release: skywalking-ui 7 | name: skywalking-ui 8 | namespace: skywalking-min 9 | spec: 10 | ports: 11 | - name: http 12 | port: 8080 13 | protocol: TCP 14 | targetPort: 8080 15 | selector: 16 | choerodon.io/infra: skywalking-ui 17 | choerodon.io/release: skywalking-ui 18 | sessionAffinity: None 19 | type: ClusterIP 20 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/tekton/dashboard/make-and-push-private-image.sh: -------------------------------------------------------------------------------- 1 | docker pull gcr.io/tekton-releases/github.com/tektoncd/dashboard/cmd/dashboard 2 | docker tag gcr.io/tekton-releases/github.com/tektoncd/dashboard/cmd/dashboard hpy253215039/tektoncd-dashboard-cmd-dashboard 3 | docker push hpy253215039/tektoncd-dashboard-cmd-dashboard 4 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/tekton/dashboard/tekton-dashboard-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: tekton-dashboard 5 | namespace: tekton-pipelines 6 | labels: 7 | app: tekton-dashboard 8 | spec: 9 | tls: [] 10 | rules: 11 | - host: tekton-dashboard 12 | http: 13 | paths: 14 | - backend: 15 | serviceName: tekton-dashboard 16 | servicePort: 9097 17 | path: / 18 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/tekton/pipeline/pull-docker-image.sh: -------------------------------------------------------------------------------- 1 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/controller:v0.20.1 2 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/kubeconfigwriter:v0.20.1 3 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.20.1 4 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/entrypoint:v0.20.1 5 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/nop:v0.20.1 6 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/imagedigestexporter:v0.20.1 7 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/pullrequest-init:v0.20.1 8 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/vendor/github.com/googlecloudplatform/cloud-builders/gcs-fetcher/cmd/gcs-fetcher:v0.20.1 9 | docker pull gcr.io/google.com/cloudsdktool/cloud-sdk 10 | docker pull gcr.io/distroless/base 11 | docker pull gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/webhook:v0.20.1 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/tekton/pipeline/pull-from-hub.sh: -------------------------------------------------------------------------------- 1 | docker pull hpy253215039/tektoncd-pipeline-cmd-controller:v0.20.1 2 | docker pull hpy253215039/tektoncd-pipeline-cmd-kubeconfigwriter:v0.20.1 3 | docker pull hpy253215039/tektoncd-pipeline-cmd-git-init:v0.20.1 4 | docker pull hpy253215039/tektoncd-pipeline-cmd-entrypoint:v0.20.1 5 | docker pull hpy253215039/tektoncd-pipeline-cmd-nop:v0.20.1 6 | docker pull hpy253215039/tektoncd-pipeline-cmd-imagedigestexporter:v0.20.1 7 | docker pull hpy253215039/tektoncd-pipeline-cmd-pullrequest-init:v0.20.1 8 | docker pull hpy253215039/tektoncd-pipeline-vendor-googlecloudplatform-cloud-builders-gcs-fetcher:v0.20.1 9 | docker pull hpy253215039/google-cloudsdktool-cloud-sdk 10 | docker pull hpy253215039/distroless-base 11 | docker pull hpy253215039/tektoncd-pipeline-cmd-webhook:v0.20.1 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/tekton/pipeline/push-to-hub.sh: -------------------------------------------------------------------------------- 1 | docker push hpy253215039/tektoncd-pipeline-cmd-controller:v0.20.1 2 | docker push hpy253215039/tektoncd-pipeline-cmd-kubeconfigwriter:v0.20.1 3 | docker push hpy253215039/tektoncd-pipeline-cmd-git-init:v0.20.1 4 | docker push hpy253215039/tektoncd-pipeline-cmd-entrypoint:v0.20.1 5 | docker push hpy253215039/tektoncd-pipeline-cmd-nop:v0.20.1 6 | docker push hpy253215039/tektoncd-pipeline-cmd-imagedigestexporter:v0.20.1 7 | docker push hpy253215039/tektoncd-pipeline-cmd-pullrequest-init:v0.20.1 8 | docker push hpy253215039/tektoncd-pipeline-vendor-googlecloudplatform-cloud-builders-gcs-fetcher:v0.20.1 9 | docker push hpy253215039/google-cloudsdktool-cloud-sdk 10 | docker push hpy253215039/distroless-base 11 | docker push hpy253215039/tektoncd-pipeline-cmd-webhook:v0.20.1 12 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/traefik/create-namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace cattle-system 2 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/traefik/traefik-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | # nginx.ingress.kubernetes.io/ssl-redirect: "false" 6 | # nginx.ingress.kubernetes.io/rewrite-target: / 7 | # kubernetes.io/ingress.class: biz 8 | #labels: 9 | #app: kibana 10 | name: traefik-admin 11 | namespace: kube-system 12 | spec: 13 | rules: 14 | - host: traefik-admin.future.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: traefik-ingress-service 19 | servicePort: 8080 20 | path: / 21 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/traefik/traefik-tlsoption.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: TLSOption 3 | metadata: 4 | name: mytlsoption 5 | namespace: kube-system 6 | 7 | spec: 8 | minversion: VersionTLS12 9 | snistrict: true 10 | ciphersuites: 11 | - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 12 | - TLS_RSA_WITH_AES_256_GCM_SHA384 13 | - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 14 | - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 15 | - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 16 | - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 17 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/traefik/traefik-traefikservices.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: TraefikService 3 | metadata: 4 | name: traefik-webui-traefikservice 5 | namespace: kube-system 6 | 7 | spec: 8 | weighted: 9 | services: 10 | - name: traefik-ingress-service 11 | weight: 1 12 | port: 8080 13 | 14 | --- 15 | apiVersion: traefik.containo.us/v1alpha1 16 | kind: TraefikService 17 | metadata: 18 | name: rancher-traefikservice 19 | namespace: cattle-system 20 | 21 | spec: 22 | weighted: 23 | services: 24 | - name: rancher 25 | weight: 1 26 | port: 80 27 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/wayne-min/deploy-dependency.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace wayne 2 | kubectl apply -f rabbitmq.yaml 3 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/wayne-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl create namespace wayne 2 | kubectl apply -f configmap.yaml 3 | kubectl apply -f service.yaml 4 | kubectl apply -f deployment.yaml 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/wayne-min/rabbitmq.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: rabbitmq-wayne 5 | namespace: wayne 6 | labels: 7 | app: rabbitmq-wayne 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: rabbitmq-wayne 13 | template: 14 | metadata: 15 | labels: 16 | app: rabbitmq-wayne 17 | spec: 18 | containers: 19 | - name: rabbitmq 20 | image: 'rabbitmq:3.7.8-management' 21 | resources: 22 | limits: 23 | cpu: '0.1' 24 | memory: 1Gi 25 | requests: 26 | cpu: '0.1' 27 | memory: 1Gi 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | labels: 33 | app: rabbitmq-wayne 34 | name: rabbitmq-wayne 35 | namespace: wayne 36 | spec: 37 | ports: 38 | - port: 5672 39 | protocol: TCP 40 | targetPort: 5672 41 | selector: 42 | app: rabbitmq-wayne 43 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/wayne-min/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: infra-wayne 6 | name: infra-wayne 7 | namespace: wayne 8 | spec: 9 | type: NodePort 10 | ports: 11 | - port: 8080 12 | protocol: TCP 13 | targetPort: 8080 14 | selector: 15 | app: infra-wayne 16 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/zookeeper-min/deploy.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f zookeeper-min-statefulset.yaml 2 | kubectl apply -f zookeeper-min-pv.yaml 3 | 4 | #kubectl delete pv zookeeper-min-data0-pv-local zookeeper-min-data1-pv-local zookeeper-min-data2-pv-local 5 | -------------------------------------------------------------------------------- /yaml/min-cluster-allinone/zookeeper-min/readme.MD: -------------------------------------------------------------------------------- 1 | 参考资料: 2 | 1.Running ZooKeeper, A Distributed System Coordinator 3 | https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/ 4 | 2.运行 ZooKeeper, 一个 CP 分布式系统 5 | https://kubernetes.io/zh/docs/tutorials/stateful-application/zookeeper/ 6 | 7 | 重点参考文章: 8 | https://blog.csdn.net/ywq935/article/details/81748273 9 | --------------------------------------------------------------------------------