├── CICD
├── EFK
│ ├── README.md
│ ├── es-service.yaml
│ ├── es-statefulset.yaml
│ ├── fluentd-es-configmap.yaml
│ ├── fluentd-es-ds.yaml
│ ├── kibana-deployment.yaml
│ └── kibana-service.yaml
├── README.md
├── elk
│ ├── README.md
│ ├── elasticsearch.yaml
│ ├── kibana.yaml
│ └── log-pilot.yaml
├── gitlab
│ ├── LICENSE
│ ├── README.md
│ ├── gitlab-ns.yaml
│ ├── gitlab-runner
│ │ ├── gitlab-runner-docker-configmap.yaml
│ │ └── gitlab-runner-docker-deployment.yaml
│ ├── gitlab
│ │ ├── gitlab-config-storage.yaml
│ │ ├── gitlab-deployment.yaml
│ │ ├── gitlab-storage.yaml
│ │ ├── gitlab-svc-nodeport.yaml
│ │ ├── gitlab-svc.yaml
│ │ ├── postgresql-deployment.yaml
│ │ ├── postgresql-storage.yaml
│ │ ├── postgresql-svc.yaml
│ │ ├── redis-deployment.yaml
│ │ ├── redis-storage.yaml
│ │ ├── redis-svc.yaml
│ │ └── storage.yaml
│ ├── ingress
│ │ ├── configmap.yaml
│ │ ├── default-backend-deployment.yaml
│ │ ├── default-backend-svc.yaml
│ │ ├── gitlab-ingress.yaml
│ │ ├── nginx-ingress-lb.yaml
│ │ └── nginx-settings-configmap.yaml
│ └── minio
│ │ ├── minio-deployment.yaml
│ │ └── minio-svc.yaml
├── i4_prometheus
│ ├── node-exporter
│ │ ├── README.md
│ │ └── prometheus-node-exporter.yaml
│ └── prometeheus
│ │ ├── README.md
│ │ ├── prometeheus-svc.yaml
│ │ ├── prometheus-rbac.yaml
│ │ ├── prometheus-volume.yaml
│ │ ├── prometheus.configmap.yaml
│ │ └── prometheus.deploy.yaml
├── istio
│ ├── deploy.md
│ └── helm部署istio.md
├── istio安装.md
├── jenkins.yaml
│ ├── README.md
│ ├── jenkins.yaml
│ ├── jenkins_svc.yaml
│ ├── jenkins_volume.yaml
│ └── rbac.yaml
├── jenkins
│ ├── README.md
│ ├── jenkins.yaml
│ ├── namespace
│ │ ├── build-ns.yml
│ │ ├── jenkins.yml
│ │ ├── my-k8s-job-ns.groovy
│ │ ├── my-k8s-job-yaml.groovy
│ │ ├── prod-ns.yml
│ │ └── test-k8s-job.groovy
│ └── nfs-pv.yaml
├── jenkins_CICD
│ ├── 1、Jenkins Slave.md.md
│ ├── 2、Jenkins Pipeline.md
│ └── 3、Jenkins BlueOcean.md
├── kafka
│ ├── README.md
│ ├── kafka-manager.yaml
│ ├── kafka.yaml
│ └── zookeeper.yaml
├── pinpoint
│ └── README.md
├── prometheus
│ ├── README.md
│ ├── alertmanager-config-configmap.yaml
│ ├── alertmanager-deploy.yaml
│ ├── alertmanager-service.yaml
│ ├── alertmanager-templates-configmap.yaml
│ ├── grafana-deploy.yaml
│ ├── grafana-ingress.yaml
│ ├── grafana-net-2-dashboard-batch.yaml
│ ├── grafana-net-2-dashboard-configmap.yaml
│ ├── grafana-service.yaml
│ ├── kube-state-metrics-ServiceAccount.yaml
│ ├── kube-state-metrics-deploy.yaml
│ ├── kube-state-metrics-service.yaml
│ ├── monitor-node-disk-daemonset.yaml
│ ├── node-exporter-daemonset.yaml
│ ├── node-exporter-service.yaml
│ ├── prometheus-config-configmap.yaml
│ ├── prometheus-deploy.yaml
│ ├── prometheus-k8s-ServiceAccount.yaml
│ ├── prometheus-ns.yaml
│ ├── prometheus-rules-configmap.yaml
│ ├── prometheus-secret.yaml
│ └── prometheus-service.yaml
├── tekton
│ └── readme.md
├── 其他
├── 各种方式部署prometheus
│ ├── README.md
│ ├── helm部署promtheus.md
│ ├── 二进制部署prometheus监控k8s.md
│ ├── 定制化grafana.md
│ ├── 自定义 Prometheus Operator 监控项.md
│ └── 解决prometheus部署报错问题.md
├── 基于 K8S 构建企业 Jenkins CICD 平台.md
└── 部署本地的jenkins的cicd.md
├── Docker和K8s 解决容器内时区不一致方案.md
├── Docker的内核知识.md
├── K8S Pod Eviction 机制.md
├── K8S多集群管理.md
├── K8S集群删除与添加节点.md
├── Kubernetes 映射外部服务到集群内部的场景.md
├── Kubernetes 调度GPU.md
├── Kubernetes之etcd操作.md
├── LXCFS.md
├── Pod跨namespace名称空间访问Service服务.md
├── RBAC
├── RBAC.md
├── RBAC权限管理.md
└── readme.md
├── README.md
├── Traefik.md
├── Velero安装.md
├── Zabbix通过api监控k8s
├── README.md
├── get_k8s
├── get_k8s.py
├── k8s模板.xml
└── k8s监控精简版.pdf
├── calico
├── README.md
├── allow-all-ingress.yaml
├── deny-all-egress.yaml
├── deny-all-ingress.yaml
├── egress-allow-some-destinations.yaml
├── image
│ ├── BGP1.png
│ ├── calico BGP实现.png
│ ├── calico2.png
│ └── calico3.png
├── ingress-allow-someclients.yaml
├── myapp-allow.yaml
├── namespace-internal-traffic.yaml
├── nginx-allow-all.yaml
├── ns-and-pods.yaml
├── testing-netpol-denyall.yaml
└── 安装使用.md
├── cert-manager.md
├── configmap.md
├── curl访问api.md
├── dashboard.md
├── deploy
├── CentOS7+docker+k8s(v1.15.3) 通过kubeadm部署.pdf
├── Rancher管理集群.md
├── haproxy高可用.md
├── image
│ ├── README.md
│ ├── kube4.png
│ ├── kube5.png
│ ├── kube6.png
│ ├── kuber1.png
│ ├── kuber2.png
│ └── kuber3.png
├── kubeadm 下载脚本
├── kubeadm部署kubernetes v1.14.1高可用集群.md
├── kubeasz
├── kubernetes 二进制安装.md
├── kubernetes1.13.1+etcd3.3.10+flanneld0.10集群部署.md
├── kubespray.md
├── kubespray安装kubernetes.md
├── nginx高可用.md
├── readme.md
├── 生产环境Kubernetes1.13.6集群部署.md
├── 纯手动部署二进制
├── 高可用k8s_kubeadm部署.md
└── 高可用k8s_二进制部署.md
├── etcd实践操作记录.md
├── etcd集群备份与恢复.md
├── flannel.md
├── helm
├── Helm部署文档.md
├── helm v2迁移到v3.md
├── helm2.0.md
├── helm3.0.md
├── helm安装和回滚.md
├── helm模板内置函数.md
├── jenkins
│ └── 自定义values部署jenkins
│ │ ├── README.md
│ │ ├── jenkins-values.yaml
│ │ ├── my-k8s-job-yaml.groovy
│ │ └── 垮名称空间
│ │ ├── build-ns.yml
│ │ ├── my-k8s-job-ns.groovy
│ │ └── prod-ns.yml
└── 自定义chart仓库.md
├── host文件的域名解析.md
├── hpa.md
├── ingress-nginx
├── Ingress-nginx获取真实客户端IP.md
├── README.md
├── ingress-myapp.yaml
├── mandatory.yaml
├── myapp.yaml
├── service-nodeport.yaml
└── tomcat
│ ├── README.md
│ ├── ingress-tomcat-tls.yaml
│ ├── ingress-tomcat.yaml
│ └── tomcat.yaml
├── ingress
├── README.md
├── canary
│ ├── ingress-common.yaml
│ ├── ingress-compose.yaml
│ ├── ingress-cookie.yaml
│ ├── ingress-header.yaml
│ ├── ingress-weight.yaml
│ ├── web-canary-a.yaml
│ └── web-canary-b.yaml
├── custom-header-global.yaml
├── custom-header-spec-ingress.yaml
├── ingress-session.yaml
├── nginx-config.yaml
├── tcp-config.yaml
└── tls
│ ├── gen-secret.sh
│ └── web-ingress.yaml
├── istio
├── Istio介绍
│ ├── 1. istio是什么.md
│ └── 2. istio的架构.md
├── istio的部署
│ ├── 1. istio在k8s中部署.md
│ └── 2. 应用案例部署.md
├── readme.md
├── uploads
│ ├── istio
│ │ └── images
│ │ │ ├── m_002a23141689031438a2bf3b79bdd57e_r.png
│ │ │ ├── m_04dadc0d0eb36cffb2011e3432f61b85_r.png
│ │ │ ├── m_0501058aa69fa3d40bb60d4544066105_r.png
│ │ │ ├── m_0986034bf92f784777699a208c5e0e39_r.png
│ │ │ ├── m_0a582cc549d38794501aca3a8463416a_r.png
│ │ │ ├── m_0af8d80faa215cd2200bdd1d2c0bfd00_r.png
│ │ │ ├── m_0be04515e35d04e35dd8c2bdd9cf3238_r.png
│ │ │ ├── m_0ce4e5e7e4dacb41c961df97cdfbdd26_r.png
│ │ │ ├── m_0f60886b3ea392e40b2bdf2d73bf1e99_r.png
│ │ │ ├── m_10b0928c098369a5a19f9f429e106861_r.png
│ │ │ ├── m_116505e375412bd0a19bb190c82ae06f_r.png
│ │ │ ├── m_14cf55cf52eb6492dc99c35589d356c0_r.png
│ │ │ ├── m_16b9eeffff8e7563035a9d9dca86f211_r.png
│ │ │ ├── m_1f81547fff73bbb8c6e59c8eeae41400_r.png
│ │ │ ├── m_22654d05023204d2d845806a2f9db49a_r.png
│ │ │ ├── m_2a4ab8dfbcc4507c815bc1e6115f3e6b_r.png
│ │ │ ├── m_2abe98f2f311391302fc893c467375cd_r.png
│ │ │ ├── m_38b960799d160d1dc64203ab7cb5dfd3_r.png
│ │ │ ├── m_3a206b313d8fb05bcbfab13fa2619079_r.png
│ │ │ ├── m_3caa8a514df15d157be71a4f8d15bbf1_r.png
│ │ │ ├── m_40118b60e5cc481b7370398eece3c960_r.png
│ │ │ ├── m_41aa0f362d38a30448324e257792539b_r.png
│ │ │ ├── m_448fccf6e01ac2632735c85dda6280ef_r.png
│ │ │ ├── m_488a67f580f9b85897b81e409690f61c_r.png
│ │ │ ├── m_5e5ec9cedc9fbcf6f72b25500222426c_r.png
│ │ │ ├── m_65bda16db28891547c45edcbe679a65e_r.png
│ │ │ ├── m_6716c81d0837294d630321803a742bdb_r.png
│ │ │ ├── m_67831fddf20acdd33b6e8062dfb01553_r.png
│ │ │ ├── m_697be29d1c4ddf999d7ff145bbd3ba7e_r.png
│ │ │ ├── m_6d949388e270a8d4de070696d235f8ab_r.png
│ │ │ ├── m_6eb7dd47854172edcf61dc942db77955_r.png
│ │ │ ├── m_7de4c1822f1178c2138303c3166fcdb3_r.png
│ │ │ ├── m_816f091cbc8b4c4b43a878d37d19eb46_r.png
│ │ │ ├── m_81e343eacd3d664259a85a00d4c14840_r.png
│ │ │ ├── m_93dd3360c6d8722572fac74c52d4ab8e_r.png
│ │ │ ├── m_99364e52c6af7a9b580dbd79f6d0ce05_r.png
│ │ │ ├── m_9e35f74168545a1c5cbee1aedc109b6a_r.png
│ │ │ ├── m_a3a04cdbb732c3278a949954e2229ef3_r.png
│ │ │ ├── m_b1d4054d60a2fae0cb290d94ad34733c_r.png
│ │ │ ├── m_b35a5e435ec3e294dd262bb96489ecd5_r.png
│ │ │ ├── m_b5e951e8e9a44f01967037f0ee2dbd11_r.png
│ │ │ ├── m_b65b9dda7c7cb761cc608329024dde6f_r.png
│ │ │ ├── m_b7217573ee7da48538ffb60ce25603e3_r.png
│ │ │ ├── m_b7290720de91626b9fecabdc04c0698f_r.png
│ │ │ ├── m_c6ed5d3bdf9f74dbec606ca193c57d89_r.png
│ │ │ ├── m_d33b4d30470394ebbf1c2d9ef3ce30eb_r.png
│ │ │ ├── m_d5e596750091f1d61c61d1ea6ca97fd9_r.png
│ │ │ ├── m_e4476b175dd201ba8ae4bf879fecfa45_r.png
│ │ │ ├── m_e6317a219391d7b6cbcac2a6ff97c081_r.png
│ │ │ ├── m_e7a9c485a2161eae50c7001d27e9b4f9_r.png
│ │ │ ├── m_f7ae9666322d68bc0af6ca222a1a90eb_r.png
│ │ │ ├── m_fd2508509cff3877a032f896cdc4f68a_r.png
│ │ │ └── m_fe293c28a82dec2b0b1bed9fea8cdc16_r.png
│ └── istioistio-1c57rplb7u6a1
│ │ └── images
│ │ ├── m_5609970a0e7667880f68ad5e30c6c86d_r.png
│ │ ├── m_5ab06b5b4c94f1edc950781a6daca29a_r.png
│ │ ├── m_c384791278a780157af5eab154c1e183_r.png
│ │ └── m_e81a1056aa7ae4424c10fcc965eb2d67_r.png
├── 安全
│ ├── 授权
│ │ ├── 1. 针对HTTP流量授权.md
│ │ ├── 2. 针对TCP流量的授权.md
│ │ ├── 3. 带有deny动作的授权策略.md
│ │ ├── 4. 在Ingress gateway上授权.md
│ │ └── 5. 授权策略信任域的迁移.md
│ ├── 认证
│ │ ├── 1. 认证原理篇.md
│ │ └── 2. 双向TLS迁移.md
│ └── 证书管理
│ │ ├── 1. 使用已存在的CA证书.md
│ │ ├── 2. Istio DNS证书管理
│ │ └── 2. Istio DNS证书管理.md
├── 服务网格概述
│ ├── 1. 服务网格历史.md
│ └── 2. 服务网格优势.md
├── 流量管理
│ ├── Egress gateway
│ │ ├── 1. 访问外部服务.md
│ │ ├── 2. 带有TLS源的出口.md
│ │ └── 3. 使用外部代理通信.md
│ ├── Ingress gateway
│ │ ├── 1. Ingress Gateway.md
│ │ ├── 2. Ingress 在k8s中的应用.md
│ │ ├── 3. 安全网关.md
│ │ └── 4. 不带TLS终结器的入口网关.md
│ ├── 流量管理原理
│ │ ├── 1. 虚拟服务.md
│ │ ├── 2. 目的规则.md
│ │ ├── 3. Gateway.md
│ │ ├── 4. 服务条目.md
│ │ ├── 5. sidecar.md
│ │ ├── 6. 网络可靠性和测试.md
│ │ └── 本章知识点.md
│ └── 流量管理案例篇
│ │ ├── 1. 请求的路由.md
│ │ ├── 2. 故障注入.md
│ │ ├── 3. 微服务金丝雀发布.md
│ │ ├── 4. TCP流量整形.md
│ │ ├── 5. 请求的超时.md
│ │ ├── 6. 断路器.md
│ │ └── 7. 流量的镜像.md
├── 观察和遥测
│ ├── 1. 从tcp服务收集度量.md
│ ├── 2. 从Prometheus查询度量.md
│ ├── 3. 从grafana中对度量可视化.md
│ ├── 4. 获得envoy的访问日志.md
│ ├── 5. Jager链路追踪.md
│ ├── 6. zipkin 链路追踪.md
│ ├── 7. 网格可视化.md
│ └── 8. 远程访问遥测组件.md
└── 配置详解
│ ├── DestinationRule详解.md
│ ├── ServiceEntry详解.md
│ ├── VirtualService资源详解.md
│ ├── gateway资源详解.md
│ └── readme.md
├── k8s命令自动补全.md
├── k8s添加role.md
├── k8s部署kafka.md
├── k8s集群GPU支持.md
├── kubeadm部署集群升级.md
├── kubectl.md
├── kubernetes yaml模板与示例.md
├── kubernetes之pause容器.md
├── label.md
├── metallb LoadBalancer.md
├── metrics-server.md
├── pod健康检查.md
├── pod升级回滚.md
├── pod污点和容忍度.md
├── pod的常见状态及生命周期.md
├── pod调度及亲和性.md
├── pod资源及控制器.md
├── resource限制.md
├── secret.md
├── service.md
├── volume
├── PV PVC.md
├── StorageClass
│ ├── K8S 使用Ceph存储.md
│ ├── K8S使用ceph-csi持久化存储之RBD.md
│ ├── local volume.md
│ ├── rbd.md
│ ├── readme.md
│ ├── 使用RBD作为StorageClass.md
│ ├── 使用glusterfs存储类创建pvc.md
│ └── 使用nfs作为动态供给.md
└── volume存储.md
├── 修改节点运行pod数.md
├── 基于Containerd实现Kubernetes-v1.25.0高可用集群.md
├── 基于Docker实现Kubernetes-v1.25.0高可用集群.md
├── 将pod运行在master节点.md
├── 故障处理.md
├── 服务发现
├── coreDNS安装.md
├── kubedns服务发现.md
├── kubedns部署.md
├── kubernetes官方部署coredns.md
├── 服务发现.md
└── 环境变量服务发现.md
└── 资料.md
/CICD/EFK/es-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: elasticsearch-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: elasticsearch-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | kubernetes.io/name: "Elasticsearch"
11 | spec:
12 | ports:
13 | - port: 9200
14 | protocol: TCP
15 | targetPort: db
16 | selector:
17 | k8s-app: elasticsearch-logging
18 |
--------------------------------------------------------------------------------
/CICD/EFK/fluentd-es-configmap.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: fluentd-es-config-v0.1.4
5 | namespace: kube-system
6 | labels:
7 | addonmanager.kubernetes.io/mode: Reconcile
8 | data:
9 | system.conf: |-
10 |
11 | root_dir /tmp/fluentd-buffers/
12 |
13 | containers.input.conf: |-
14 |
15 | @id fluentd-containers.log
16 | @type tail
17 | path /var/log/containers/*.log
18 | pos_file /var/log/es-containers.log.pos
19 | time_format %Y-%m-%dT%H:%M:%S.%NZ
20 | tag raw.kubernetes.*
21 | read_from_head true
22 |
23 | @type multi_format
24 |
25 | format json
26 | time_key time
27 | time_format %Y-%m-%dT%H:%M:%S.%NZ
28 |
29 |
30 | format /^(?
33 |
34 |
35 | # Detect exceptions in the log output and forward them as one log entry.
36 |
37 | @id raw.kubernetes
38 | @type detect_exceptions
39 | remove_tag_prefix raw
40 | message log
41 | stream stream
42 | multiline_flush_interval 5
43 | max_bytes 500000
44 | max_lines 1000
45 |
46 | output.conf: |-
47 | # Enriches records with Kubernetes metadata
48 |
49 | @type kubernetes_metadata
50 |
51 |
52 | @id elasticsearch
53 | @type elasticsearch
54 | @log_level info
55 | include_tag_key true
56 | host elasticsearch-logging
57 | port 9200
58 | logstash_format true
59 |
60 | @type file
61 | path /var/log/fluentd-buffers/kubernetes.system.buffer
62 | flush_mode interval
63 | retry_type exponential_backoff
64 | flush_thread_count 2
65 | flush_interval 5s
66 | retry_forever
67 | retry_max_interval 30
68 | chunk_limit_size 2M
69 | queue_limit_length 8
70 | overflow_action block
71 |
72 |
73 |
--------------------------------------------------------------------------------
/CICD/EFK/kibana-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: kibana-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kibana-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | spec:
11 | replicas: 1
12 | selector:
13 | matchLabels:
14 | k8s-app: kibana-logging
15 | template:
16 | metadata:
17 | labels:
18 | k8s-app: kibana-logging
19 | annotations:
20 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
21 | spec:
22 | containers:
23 | - name: kibana-logging
24 | image: docker.elastic.co/kibana/kibana-oss:6.2.4
25 | resources:
26 | # need more cpu upon initialization, therefore burstable class
27 | limits:
28 | cpu: 1000m
29 | requests:
30 | cpu: 100m
31 | env:
32 | - name: ELASTICSEARCH_URL
33 | value: http://elasticsearch-logging:9200
34 | ports:
35 | - containerPort: 5601
36 | name: ui
37 | protocol: TCP
38 |
--------------------------------------------------------------------------------
/CICD/EFK/kibana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kibana-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kibana-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | kubernetes.io/name: "Kibana"
11 | spec:
12 | type: NodePort
13 | ports:
14 | - port: 5601
15 | protocol: TCP
16 | targetPort: ui
17 | selector:
18 | k8s-app: kibana-logging
19 |
--------------------------------------------------------------------------------
/CICD/README.md:
--------------------------------------------------------------------------------
1 | https://www.cnblogs.com/miaocunf/p/11694943.html
2 |
3 | https://www.yp14.cn/
4 |
5 | https://www.cnblogs.com/passzhang/
6 |
7 |
8 | https://www.cnblogs.com/yuhaohao/p/13292085.html
9 |
--------------------------------------------------------------------------------
/CICD/elk/README.md:
--------------------------------------------------------------------------------
1 | log-pilot官方介绍
2 | github地址:https://github.com/AliyunContainerService/log-pilot
3 | log-pilot官方介绍:https://yq.aliyun.com/articles/674327
4 | log-pilot官方搭建:https://yq.aliyun.com/articles/674361?spm=a2c4e.11153940.0.0.21ae21c3mTKwWS
5 |
6 |
7 | 安装
8 | ```
9 | kubectl apply -f .
10 | 查看elasticsearch
11 | kubectl get svc -n kube-system
12 | kubectl get statefulset -n kube-system
13 | 查看log-pilot
14 | kubectl get ds -n kube-system
15 | 查看kibana
16 | kubectl get deploy -n kube-system
17 |
18 | 修改hosts解析,对应ingress,http访问
19 | ```
20 |
21 | 查看log-pilot日志
22 | ```
23 | docker ps |grep log-pilot
24 | docker logs -f 131b426829ac
25 | ```
26 |
--------------------------------------------------------------------------------
/CICD/elk/kibana.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: kibana
6 | namespace: kube-system
7 | labels:
8 | component: kibana
9 | spec:
10 | selector:
11 | component: kibana
12 | ports:
13 | - name: http
14 | port: 80
15 | targetPort: http
16 | ---
17 | #ingress
18 | apiVersion: extensions/v1beta1
19 | kind: Ingress
20 | metadata:
21 | name: kibana
22 | namespace: kube-system
23 | spec:
24 | rules:
25 | - host: kibana.mooc.com
26 | http:
27 | paths:
28 | - path: /
29 | backend:
30 | serviceName: kibana
31 | servicePort: 80
32 | ---
33 | apiVersion: apps/v1beta1
34 | kind: Deployment
35 | metadata:
36 | name: kibana
37 | namespace: kube-system
38 | labels:
39 | component: kibana
40 | spec:
41 | replicas: 1
42 | selector:
43 | matchLabels:
44 | component: kibana
45 | template:
46 | metadata:
47 | labels:
48 | component: kibana
49 | spec:
50 | containers:
51 | - name: kibana
52 | image: registry.cn-hangzhou.aliyuncs.com/imooc/kibana:5.5.1
53 | env:
54 | - name: CLUSTER_NAME
55 | value: docker-cluster
56 | - name: ELASTICSEARCH_URL
57 | value: http://elasticsearch-api:9200/
58 | resources:
59 | limits:
60 | cpu: 1000m
61 | requests:
62 | cpu: 100m
63 | ports:
64 | - containerPort: 5601
65 | name: http
66 |
67 |
--------------------------------------------------------------------------------
/CICD/elk/log-pilot.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: DaemonSet
4 | metadata:
5 | name: log-pilot
6 | namespace: kube-system
7 | labels:
8 | k8s-app: log-pilot
9 | kubernetes.io/cluster-service: "true"
10 | spec:
11 | template:
12 | metadata:
13 | labels:
14 | k8s-app: log-es
15 | kubernetes.io/cluster-service: "true"
16 | version: v1.22
17 | spec:
18 | tolerations:
19 | - key: node-role.kubernetes.io/master
20 | effect: NoSchedule
21 | serviceAccountName: dashboard-admin
22 | containers:
23 | - name: log-pilot
24 | image: registry.cn-hangzhou.aliyuncs.com/imooc/log-pilot:0.9-filebeat
25 | resources:
26 | limits:
27 | memory: 200Mi
28 | requests:
29 | cpu: 100m
30 | memory: 200Mi
31 | env:
32 | - name: "FILEBEAT_OUTPUT"
33 | value: "elasticsearch"
34 | - name: "ELASTICSEARCH_HOST"
35 | value: "elasticsearch-api"
36 | - name: "ELASTICSEARCH_PORT"
37 | value: "9200"
38 | - name: "ELASTICSEARCH_USER"
39 | value: "elastic"
40 | - name: "ELASTICSEARCH_PASSWORD"
41 | value: "changeme"
42 | volumeMounts:
43 | - name: sock
44 | mountPath: /var/run/docker.sock
45 | - name: root
46 | mountPath: /host
47 | readOnly: true
48 | - name: varlib
49 | mountPath: /var/lib/filebeat
50 | - name: varlog
51 | mountPath: /var/log/filebeat
52 | securityContext:
53 | capabilities:
54 | add:
55 | - SYS_ADMIN
56 | terminationGracePeriodSeconds: 30
57 | volumes:
58 | - name: sock
59 | hostPath:
60 | path: /var/run/docker.sock
61 | - name: root
62 | hostPath:
63 | path: /
64 | - name: varlib
65 | hostPath:
66 | path: /var/lib/filebeat
67 | type: DirectoryOrCreate
68 | - name: varlog
69 | hostPath:
70 | path: /var/log/filebeat
71 | type: DirectoryOrCreate
72 |
73 |
--------------------------------------------------------------------------------
/CICD/gitlab/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016 Sergey Nuzhdin
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab-ns.yaml:
--------------------------------------------------------------------------------
1 | kind: Namespace
2 | apiVersion: v1
3 | metadata:
4 | name: gitlab
5 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab-runner/gitlab-runner-docker-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: gitlab-runner-docker
5 | namespace: gitlab
6 | data:
7 | config.toml: |
8 | concurrent = 4
9 | check_interval = 1
10 |
11 | [[runners]]
12 | name = "gitlab-docker-runner"
13 | url = "http://gitlab.gitlab/ci"
14 | token = ""
15 | executor = "docker"
16 | [runners.docker]
17 | tls_verify = false
18 | image = "python:3.5"
19 | privileged = true
20 | disable_cache = false
21 | volumes = ["/cache"]
22 | [runners.cache]
23 | Type = "s3"
24 | ServerAddress = "http://minio.gitlab/"
25 | AccessKey = ""
26 | SecretKey = ""
27 | BucketName = "runner"
28 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab-runner/gitlab-runner-docker-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: gitlab-runner-docker
5 | namespace: gitlab
6 | spec:
7 | replicas: 1
8 | template:
9 | metadata:
10 | labels:
11 | name: docker-runner
12 | app: gitlab-runner
13 | spec:
14 | containers:
15 | - name: gitlab-runner-docker
16 | image: gitlab/gitlab-runner:v1.8.0
17 | imagePullPolicy: Always
18 | resources:
19 | limits:
20 | memory: 500Mi
21 | cpu: 600m
22 | requests:
23 | memory: 500Mi
24 | cpu: 600m
25 | volumeMounts:
26 | - name: config
27 | mountPath: /etc/gitlab-runner
28 | - name: var-run-docker-sock
29 | mountPath: /var/run/docker.sock
30 | volumes:
31 | - name: var-run-docker-sock
32 | hostPath:
33 | path: /var/run/docker.sock
34 | - name: config
35 | configMap:
36 | name: gitlab-runner-docker
37 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/gitlab-config-storage.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: gitlab-config-storage
5 | namespace: gitlab
6 | annotations:
7 | volume.beta.kubernetes.io/storage-class: fast
8 | spec:
9 | accessModes:
10 | - ReadWriteMany
11 | resources:
12 | requests:
13 | storage: 1Gi
14 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/gitlab-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: gitlab
5 | namespace: gitlab
6 | spec:
7 | replicas: 1
8 | template:
9 | metadata:
10 | labels:
11 | name: gitlab
12 | app: gitlab
13 | spec:
14 | containers:
15 | - name: gitlab
16 | image: gitlab/gitlab-ce:8.15.4-ce.1
17 | imagePullPolicy: Always
18 | env:
19 | - name: GITLAB_OMNIBUS_CONFIG
20 | value: |
21 | external_url "http://gitlab.example.com"
22 | postgresql['enable']=false
23 | gitlab_rails['db_host'] = 'gitlab-postgresql'
24 | gitlab_rails['db_password']='+BP52QIxpT/flVCMpL3KXA=='
25 | gitlab_rails['db_username']='gitlab'
26 | gitlab_rails['db_database']='gitlab_production'
27 | redis['enable'] = false
28 | gitlab_rails['redis_host']='gitlab-redis'
29 | manage_accounts['enable'] = true
30 | manage_storage_directories['manage_etc'] = false
31 | gitlab_shell['auth_file'] = '/gitlab-data/ssh/authorized_keys'
32 | git_data_dir '/gitlab-data/git-data'
33 | gitlab_rails['shared_path'] = '/gitlab-data/shared'
34 | gitlab_rails['uploads_directory'] = '/gitlab-data/uploads'
35 | gitlab_ci['builds_directory'] = '/gitlab-data/builds'
36 | ports:
37 | - name: http
38 | containerPort: 80
39 | - name: ssh
40 | containerPort: 22
41 | volumeMounts:
42 | - name: config
43 | mountPath: /etc/gitlab
44 | - name: data
45 | mountPath: /gitlab-data
46 | livenessProbe:
47 | httpGet:
48 | path: /help
49 | port: 80
50 | initialDelaySeconds: 180
51 | timeoutSeconds: 15
52 | readinessProbe:
53 | httpGet:
54 | path: /help
55 | port: 80
56 | initialDelaySeconds: 15
57 | timeoutSeconds: 1
58 | volumes:
59 | - name: data
60 | persistentVolumeClaim:
61 | claimName: gitlab-rails-storage
62 | - name: config
63 | persistentVolumeClaim:
64 | claimName: gitlab-config-storage
65 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/gitlab-storage.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: gitlab-rails-storage
5 | namespace: gitlab
6 | annotations:
7 | volume.beta.kubernetes.io/storage-class: fast
8 | spec:
9 | accessModes:
10 | - ReadWriteMany
11 | resources:
12 | requests:
13 | storage: 30Gi
14 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/gitlab-svc-nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: gitlab-nodeport
5 | namespace: gitlab
6 | labels:
7 | name: gitlab
8 | spec:
9 | type: NodePort
10 | selector:
11 | name: gitlab
12 | ports:
13 | - name: ssh
14 | port: 22
15 | targetPort: ssh
16 | - name: http
17 | port: 80
18 | targetPort: http
19 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/gitlab-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: gitlab
5 | namespace: gitlab
6 | labels:
7 | name: gitlab
8 | spec:
9 | type: LoadBalancer
10 | selector:
11 | name: gitlab
12 | ports:
13 | - name: http
14 | port: 80
15 | targetPort: http
16 | - name: ssh
17 | port: 1022
18 | targetPort: ssh
19 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/postgresql-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: gitlab-postgresql
5 | namespace: gitlab
6 | spec:
7 | replicas: 1
8 | template:
9 | metadata:
10 | labels:
11 | name: gitlab-postgresql
12 | spec:
13 | containers:
14 | - name: postgresql
15 | image: postgres:9.5.3
16 | imagePullPolicy: Always
17 | env:
18 | - name: POSTGRES_USER
19 | value: gitlab
20 | - name: POSTGRES_PASSWORD
21 | value: +BP52QIxpT/flVCMpL3KXA==
22 | - name: POSTGRES_DB
23 | value: gitlab_production
24 | - name: DB_EXTENSION
25 | value: pg_trgm
26 | ports:
27 | - name: postgres
28 | containerPort: 5432
29 | volumeMounts:
30 | - mountPath: /var/lib/postgresql
31 | name: data
32 | livenessProbe:
33 | exec:
34 | command:
35 | - pg_isready
36 | - -h
37 | - localhost
38 | - -U
39 | - postgres
40 | initialDelaySeconds: 30
41 | timeoutSeconds: 5
42 | readinessProbe:
43 | exec:
44 | command:
45 | - pg_isready
46 | - -h
47 | - localhost
48 | - -U
49 | - postgres
50 | initialDelaySeconds: 5
51 | timeoutSeconds: 1
52 | volumes:
53 | - name: data
54 | persistentVolumeClaim:
55 | claimName: gitlab-postgresql-storage
56 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/postgresql-storage.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: gitlab-postgresql-storage
5 | namespace: gitlab
6 | annotations:
7 | volume.beta.kubernetes.io/storage-class: fast
8 | spec:
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 30Gi
14 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/postgresql-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: gitlab-postgresql
5 | namespace: gitlab
6 | labels:
7 | name: gitlab-postgresql
8 | spec:
9 | ports:
10 | - name: postgres
11 | port: 5432
12 | targetPort: postgres
13 | selector:
14 | name: gitlab-postgresql
15 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/redis-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: gitlab-redis
5 | namespace: gitlab
6 | spec:
7 | replicas: 1
8 | template:
9 | metadata:
10 | labels:
11 | name: gitlab-redis
12 | spec:
13 | containers:
14 | - name: redis
15 | image: redis:3.2.4
16 | ports:
17 | - name: redis
18 | containerPort: 6379
19 | volumeMounts:
20 | - mountPath: /var/lib/redis
21 | name: data
22 | livenessProbe:
23 | exec:
24 | command:
25 | - redis-cli
26 | - ping
27 | initialDelaySeconds: 30
28 | timeoutSeconds: 5
29 | readinessProbe:
30 | exec:
31 | command:
32 | - redis-cli
33 | - ping
34 | initialDelaySeconds: 5
35 | timeoutSeconds: 1
36 | volumes:
37 | - name: data
38 | persistentVolumeClaim:
39 | claimName: gitlab-redis-storage
40 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/redis-storage.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: gitlab-redis-storage
5 | namespace: gitlab
6 | annotations:
7 | volume.beta.kubernetes.io/storage-class: fast
8 | spec:
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 5Gi
14 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/redis-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: gitlab-redis
5 | namespace: gitlab
6 | labels:
7 | name: gitlab-redis
8 | spec:
9 | selector:
10 | name: gitlab-redis
11 | ports:
12 | - name: redis
13 | port: 6379
14 | targetPort: redis
15 |
--------------------------------------------------------------------------------
/CICD/gitlab/gitlab/storage.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1beta1
2 | kind: StorageClass
3 | metadata:
4 | name: fast
5 | namespace: gitlab
6 | provisioner: kubernetes.io/gce-pd
7 | parameters:
8 | type: pd-ssd
9 |
--------------------------------------------------------------------------------
/CICD/gitlab/ingress/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: tcp-gitlab-configmap
5 | namespace: gitlab
6 | data:
7 | 1022: "gitlab/gitlab:1022"
8 |
--------------------------------------------------------------------------------
/CICD/gitlab/ingress/default-backend-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: default-http-backend
5 | namespace: gitlab
6 | spec:
7 | replicas: 1
8 | selector:
9 | app: default-http-backend
10 | template:
11 | metadata:
12 | labels:
13 | app: default-http-backend
14 | spec:
15 | terminationGracePeriodSeconds: 60
16 | containers:
17 | - name: default-http-backend
18 | # Any image is permissable as long as:
19 | # 1. It serves a 404 page at /
20 | # 2. It serves 200 on a /healthz endpoint
21 | image: gcr.io/google_containers/defaultbackend:1.0
22 | livenessProbe:
23 | httpGet:
24 | path: /healthz
25 | port: 8080
26 | scheme: HTTP
27 | initialDelaySeconds: 30
28 | timeoutSeconds: 5
29 | ports:
30 | - containerPort: 8080
31 | resources:
32 | limits:
33 | cpu: 10m
34 | memory: 20Mi
35 | requests:
36 | cpu: 10m
37 | memory: 20Mi
38 |
--------------------------------------------------------------------------------
/CICD/gitlab/ingress/default-backend-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: default-http-backend
5 | namespace: gitlab
6 | spec:
7 | ports:
8 | - port: 80
9 | targetPort: 8080
10 | protocol: TCP
11 | selector:
12 | app: default-http-backend
13 |
--------------------------------------------------------------------------------
/CICD/gitlab/ingress/gitlab-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: gitlab
5 | namespace: gitlab
6 | labels:
7 | name: gitlab
8 | spec:
9 | rules:
10 | - host: git.example.com
11 | http:
12 | paths:
13 | - path: /
14 | backend:
15 | serviceName: gitlab
16 | servicePort: 80
17 | - host: git-ssh.example.com
18 | http:
19 | paths:
20 | - path: /
21 | backend:
22 | serviceName: gitlab
23 | servicePort: 1022
24 |
--------------------------------------------------------------------------------
/CICD/gitlab/ingress/nginx-ingress-lb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: nginx-ingress-lb
5 | spec:
6 | template:
7 | metadata:
8 | labels:
9 | name: nginx-ingress-lb
10 | spec:
11 | terminationGracePeriodSeconds: 60
12 | containers:
13 | - image: gcr.io/google_containers/nginx-ingress-controller:0.8.3
14 | name: nginx-ingress-lb
15 | imagePullPolicy: Always
16 | livenessProbe:
17 | httpGet:
18 | path: /healthz
19 | port: 10254
20 | scheme: HTTP
21 | initialDelaySeconds: 30
22 | timeoutSeconds: 5
23 | env:
24 | - name: POD_NAME
25 | valueFrom:
26 | fieldRef:
27 | fieldPath: metadata.name
28 | - name: POD_NAMESPACE
29 | valueFrom:
30 | fieldRef:
31 | fieldPath: metadata.namespace
32 | ports:
33 | - containerPort: 80
34 | hostPort: 80
35 | - containerPort: 443
36 | hostPort: 4443
37 | - containerPort: 1022
38 | hostPort: 1022
39 | args:
40 | - /nginx-ingress-controller
41 | - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
42 | - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-ingress-configmap
43 | - --nginx-configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf
44 |
--------------------------------------------------------------------------------
/CICD/gitlab/ingress/nginx-settings-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: nginx-load-balancer-conf
5 | namespace: gitlab
6 | data:
7 | body-size: "0"
8 | proxy-connect-timeout: "10"
9 | proxy-read-timeout: "360"
10 | proxy-send-imeout: "360"
11 |
--------------------------------------------------------------------------------
/CICD/gitlab/minio/minio-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: minio
5 | namespace: gitlab
6 | spec:
7 | replicas: 1
8 | template:
9 | metadata:
10 | labels:
11 | name: minio
12 | app: minio
13 | spec:
14 | containers:
15 | - name: minio
16 | image: minio/minio:RELEASE.2016-08-21T02-44-47Z
17 | resources:
18 | limits:
19 | cpu: 100m
20 | memory: 100Mi
21 | requests:
22 | cpu: 100m
23 | memory: 100Mi
24 | volumeMounts:
25 | - name: data-store
26 | mountPath: /export
27 | ports:
28 | - containerPort: 9000
29 | name: http
30 | protocol: TCP
31 | volumes:
32 | - name: data-store
33 | emptyDir: {}
34 |
--------------------------------------------------------------------------------
/CICD/gitlab/minio/minio-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: minio
5 | namespace: gitlab
6 | labels:
7 | name: minio
8 | app: minio
9 | spec:
10 | selector:
11 | name: minio
12 | ports:
13 | - name: http
14 | port: 9000
15 | protocol: TCP
16 |
--------------------------------------------------------------------------------
/CICD/i4_prometheus/node-exporter/prometheus-node-exporter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: node-exporter
5 | namespace: kube-system
6 | labels:
7 | name: node-exporter
8 | spec:
9 | template:
10 | metadata:
11 | labels:
12 | name: node-exporter
13 | spec:
14 | hostPID: true
15 | hostIPC: true
16 | hostNetwork: true
17 | containers:
18 | - name: node-exporter
19 | image: prom/node-exporter:v0.16.0
20 | ports:
21 | - containerPort: 9100
22 | resources:
23 | requests:
24 | cpu: 0.15
25 | securityContext:
26 | privileged: true
27 | args:
28 | - --path.procfs
29 | - /host/proc
30 | - --path.sysfs
31 | - /host/sys
32 | - --collector.filesystem.ignored-mount-points
33 | - '"^/(sys|proc|dev|host|etc)($|/)"'
34 | volumeMounts:
35 | - name: dev
36 | mountPath: /host/dev
37 | - name: proc
38 | mountPath: /host/proc
39 | - name: sys
40 | mountPath: /host/sys
41 | - name: rootfs
42 | mountPath: /rootfs
43 | tolerations:
44 | - key: "node-role.kubernetes.io/master"
45 | operator: "Exists"
46 | effect: "NoSchedule"
47 | volumes:
48 | - name: proc
49 | hostPath:
50 | path: /proc
51 | - name: dev
52 | hostPath:
53 | path: /dev
54 | - name: sys
55 | hostPath:
56 | path: /sys
57 | - name: rootfs
58 | hostPath:
59 | path: /
60 |
--------------------------------------------------------------------------------
/CICD/i4_prometheus/prometeheus/prometeheus-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: prometheus
5 | namespace: kube-system
6 | labels:
7 | app: prometheus
8 | spec:
9 | selector:
10 | app: prometheus
11 | type: NodePort
12 | ports:
13 | - name: web
14 | port: 9090
15 | targetPort: http
16 |
--------------------------------------------------------------------------------
/CICD/i4_prometheus/prometeheus/prometheus-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus
5 | namespace: kube-system
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRole
9 | metadata:
10 | name: prometheus
11 | rules:
12 | - apiGroups:
13 | - ""
14 | resources:
15 | - nodes
16 | - services
17 | - endpoints
18 | - pods
19 | - nodes/proxy
20 | verbs:
21 | - get
22 | - list
23 | - watch
24 | - apiGroups:
25 | - ""
26 | resources:
27 | - configmaps
28 | - nodes/metrics
29 | verbs:
30 | - get
31 | - nonResourceURLs:
32 | - /metrics
33 | verbs:
34 | - get
35 | ---
36 | apiVersion: rbac.authorization.k8s.io/v1beta1
37 | kind: ClusterRoleBinding
38 | metadata:
39 | name: prometheus
40 | roleRef:
41 | apiGroup: rbac.authorization.k8s.io
42 | kind: ClusterRole
43 | name: prometheus
44 | subjects:
45 | - kind: ServiceAccount
46 | name: prometheus
47 | namespace: kube-system
48 |
--------------------------------------------------------------------------------
/CICD/i4_prometheus/prometeheus/prometheus-volume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: prometheus
5 | spec:
6 | capacity:
7 | storage: 10Gi
8 | accessModes:
9 | - ReadWriteOnce
10 | persistentVolumeReclaimPolicy: Recycle
11 | nfs:
12 | server: 10.4.82.138
13 | path: /data/k8s
14 |
15 | ---
16 | apiVersion: v1
17 | kind: PersistentVolumeClaim
18 | metadata:
19 | name: prometheus
20 | namespace: kube-system
21 | spec:
22 | accessModes:
23 | - ReadWriteOnce
24 | resources:
25 | requests:
26 | storage: 10Gi
27 |
--------------------------------------------------------------------------------
/CICD/i4_prometheus/prometeheus/prometheus.deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: prometheus
5 | namespace: kube-system
6 | labels:
7 | app: prometheus
8 | spec:
9 | template:
10 | metadata:
11 | labels:
12 | app: prometheus
13 | spec:
14 | serviceAccountName: prometheus
15 | containers:
16 | - image: prom/prometheus:v2.4.3
17 | name: prometheus
18 | command:
19 | - "/bin/prometheus"
20 | args:
21 | - "--config.file=/etc/prometheus/prometheus.yml"
22 | - "--storage.tsdb.path=/prometheus"
23 | - "--storage.tsdb.retention=30d"
24 | - "--web.enable-admin-api" # 控制对admin HTTP API的访问,其中包括删除时间序列等功能
25 | - "--web.enable-lifecycle" # 支持热更新,直接执行localhost:9090/-/reload立即生效
26 | ports:
27 | - containerPort: 9090
28 | protocol: TCP
29 | name: http
30 | volumeMounts:
31 | - mountPath: "/prometheus"
32 | subPath: prometheus
33 | name: data
34 | - mountPath: "/etc/prometheus"
35 | name: config-volume
36 | resources:
37 | requests:
38 | cpu: 100m
39 | memory: 512Mi
40 | limits:
41 | cpu: 100m
42 | memory: 512Mi
43 | securityContext:
44 | runAsUser: 0
45 | volumes:
46 | - name: data
47 | persistentVolumeClaim:
48 | claimName: prometheus
49 | - configMap:
50 | name: prometheus-config
51 | name: config-volume
52 |
--------------------------------------------------------------------------------
/CICD/istio/deploy.md:
--------------------------------------------------------------------------------
1 | 安装
2 | ---
3 | ```
4 | wget https://github.com/istio/istio/releases/download/1.4.2/istio-1.4.2-linux.tar.gz
5 | tar istio-1.4.2-linux.tar.gz
6 | cd istio-1.4.2
7 | cp /bin/istioctl /usr/bin/
8 | istiotl manifst apply --set profile=demo
9 | kubectl get pods -n istio-system
10 | kubectl get svc -n istio-system
11 | ```
12 |
13 | 卸载
14 | ---
15 | ```
16 | istioctl manifest generate --set profile=demo | kubectl delete -f -
17 | ```
18 |
19 | 部署httpbin Web示例:
20 | ---
21 | ```
22 | cd istio-1.4.2/samples/httpbin
23 | ```
24 |
25 | 手动注入
26 | ```
27 | kubectl apply -f <(istioctl kube-inject -f httpbin-nodeport.yaml)
28 | 或者
29 | istioctl kube-inject -f httpbin-nodeport.yaml |kubectl apply -f -
30 | ```
31 |
32 | 自动注入
33 | ```
34 | kubectl label namespace default istio-injection=enabled
35 |
36 | kubectl apply -f httpbin-gateway.yaml
37 | ```
38 |
39 | NodePort访问地址
40 | http://IP:31928
41 |
--------------------------------------------------------------------------------
/CICD/istio安装.md:
--------------------------------------------------------------------------------
1 | 官方文档
2 | https://istio.io/zh/docs/setup/kubernetes/install/helm/
3 |
4 | https://blog.csdn.net/chenleiking/article/details/79785493
5 |
--------------------------------------------------------------------------------
/CICD/jenkins.yaml/jenkins.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: jenkins
6 | namespace: kube-ops
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: jenkins
12 | spec:
13 | terminationGracePeriodSeconds: 10
14 | serviceAccountName: jenkins
15 | containers:
16 | - name: jenkins
17 | image: jenkins/jenkins:lts
18 | imagePullPolicy: IfNotPresent
19 | ports:
20 | - containerPort: 8080
21 | name: web
22 | protocol: TCP
23 | - containerPort: 50000
24 | name: agent
25 | protocol: TCP
26 | resources:
27 | limits:
28 | cpu: 1000m
29 | memory: 1Gi
30 | requests:
31 | cpu: 500m
32 | memory: 512Mi
33 | livenessProbe:
34 | httpGet:
35 | path: /login
36 | port: 8080
37 | initialDelaySeconds: 60
38 | timeoutSeconds: 5
39 | failureThreshold: 12 # ~2 minutes
40 | readinessProbe:
41 | httpGet:
42 | path: /login
43 | port: 8080
44 | initialDelaySeconds: 60
45 | timeoutSeconds: 5
46 | failureThreshold: 12 # ~2 minutes
47 | volumeMounts:
48 | - name: jenkinshome
49 | subPath: jenkins
50 | mountPath: /var/jenkins_home
51 | env:
52 | - name: LIMITS_MEMORY
53 | valueFrom:
54 | resourceFieldRef:
55 | resource: limits.memory
56 | divisor: 1Mi
57 | - name: JAVA_OPTS
58 | value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai
59 | securityContext:
60 | fsGroup: 1000
61 | volumes:
62 | - name: jenkinshome
63 | persistentVolumeClaim:
64 | claimName: opspvc
65 |
--------------------------------------------------------------------------------
/CICD/jenkins.yaml/jenkins_svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: jenkins
5 | namespace: kube-ops
6 | labels:
7 | app: jenkins
8 | spec:
9 | selector:
10 | app: jenkins
11 | type: NodePort
12 | ports:
13 | - name: web
14 | port: 8080
15 | targetPort: web
16 | nodePort: 30001
17 | - name: agent
18 | port: 50000
19 | targetPort: agent
20 |
--------------------------------------------------------------------------------
/CICD/jenkins.yaml/jenkins_volume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: opspv
5 | spec:
6 | capacity:
7 | storage: 20Gi
8 | accessModes:
9 | - ReadWriteMany
10 | persistentVolumeReclaimPolicy: Delete
11 | nfs:
12 | server: 10.151.30.57
13 | path: /data/k8s
14 |
15 | ---
16 | kind: PersistentVolumeClaim
17 | apiVersion: v1
18 | metadata:
19 | name: opspvc
20 | namespace: kube-ops
21 | spec:
22 | accessModes:
23 | - ReadWriteMany
24 | resources:
25 | requests:
26 | storage: 20Gi
27 |
--------------------------------------------------------------------------------
/CICD/jenkins.yaml/rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: jenkins
5 | namespace: kube-ops
6 |
7 | ---
8 |
9 | kind: ClusterRole
10 | apiVersion: rbac.authorization.k8s.io/v1beta1
11 | metadata:
12 | name: jenkins
13 | rules:
14 | - apiGroups: ["extensions", "apps"]
15 | resources: ["deployments"]
16 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
17 | - apiGroups: [""]
18 | resources: ["services"]
19 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
20 | - apiGroups: [""]
21 | resources: ["pods"]
22 | verbs: ["create","delete","get","list","patch","update","watch"]
23 | - apiGroups: [""]
24 | resources: ["pods/exec"]
25 | verbs: ["create","delete","get","list","patch","update","watch"]
26 | - apiGroups: [""]
27 | resources: ["pods/log"]
28 | verbs: ["get","list","watch"]
29 | - apiGroups: [""]
30 | resources: ["secrets"]
31 | verbs: ["get"]
32 |
33 | ---
34 | apiVersion: rbac.authorization.k8s.io/v1beta1
35 | kind: ClusterRoleBinding
36 | metadata:
37 | name: jenkins
38 | roleRef:
39 | apiGroup: rbac.authorization.k8s.io
40 | kind: ClusterRole
41 | name: jenkins
42 | subjects:
43 | - kind: ServiceAccount
44 | name: jenkins
45 | namespace: kube-ops
46 |
--------------------------------------------------------------------------------
/CICD/jenkins/README.md:
--------------------------------------------------------------------------------
1 | 一、部署
2 | 1、部署
3 | ``` > # kubectl apply -f jenkins.yaml ```
4 | 2、web访问
5 | ``` http://192.168.20.171:30080/jenkins ```
6 | 3、获取密码
7 | ``` > # kubectl -n jenkins exec jenkins-0 -it -- cat /var/jenkins_home/secrets/initialAdminPassword ```
8 |
9 | 二、配置ci
10 | 1、安装kubernetes插件
11 | 
12 | 2、进入系统设置,到最后
13 | 
14 | 3、添加一个云
15 | 
16 | 4、获取集群IP地址添加到下图kuberneted地址里
17 | 
18 | 5、配置jenkins连接到api
19 | 
20 | 6、添加一个job测试
21 | 
22 | 7、配置pipeline脚本
23 | ```
24 | podTemplate(
25 | label: 'kubernetes',
26 | containers: [
27 | containerTemplate(name: 'maven', image: 'maven:alpine', ttyEnabled: true, command: 'cat'),
28 | containerTemplate(name: 'golang', image: 'golang:alpine', ttyEnabled: true, command: 'cat')
29 | ]
30 | ) {
31 | node('kubernetes') {
32 | container('maven') {
33 | stage('build') {
34 | sh 'mvn --version'
35 | }
36 | stage('unit-test') {
37 | sh 'java -version'
38 | }
39 | }
40 | container('golang') {
41 | stage('deploy') {
42 | sh 'go version'
43 | }
44 | }
45 | }
46 | }
47 | ```
48 | 
49 | 8、构建
50 | 
51 | 9、查看控制台
52 | 
53 | 10、控制台输出完成,任务执行成功
54 | 
55 | 11、命令查看build空间会多出一个job任务,任务完成后会消失
56 | 
57 | 12、查看管理节点,会多出一个主机,任务完成后会消失
58 | 
59 |
--------------------------------------------------------------------------------
/CICD/jenkins/namespace/build-ns.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: go-demo-3-build
5 |
6 | ---
7 |
8 | apiVersion: v1
9 | kind: ServiceAccount
10 | metadata:
11 | name: build
12 | namespace: go-demo-3-build
13 |
14 | ---
15 |
16 | apiVersion: rbac.authorization.k8s.io/v1beta1
17 | kind: RoleBinding
18 | metadata:
19 | name: build
20 | namespace: go-demo-3-build
21 | roleRef:
22 | apiGroup: rbac.authorization.k8s.io
23 | kind: ClusterRole
24 | name: admin
25 | subjects:
26 | - kind: ServiceAccount
27 | name: build
28 |
29 |
30 |
--------------------------------------------------------------------------------
/CICD/jenkins/namespace/jenkins.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: jenkins-role-binding
5 | namespace: go-demo-3-build
6 | labels:
7 | app: jenkins
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: cluster-admin
12 | subjects:
13 | - kind: ServiceAccount
14 | name: jenkins
15 | namespace: jenkins
16 |
--------------------------------------------------------------------------------
/CICD/jenkins/namespace/my-k8s-job-ns.groovy:
--------------------------------------------------------------------------------
1 | podTemplate(
2 | label: "kubernetes",
3 | namespace: "go-demo-3-build",
4 | serviceAccount: "build",
5 | yaml: """
6 | apiVersion: v1
7 | kind: Pod
8 | spec:
9 | containers:
10 | - name: kubectl
11 | image: aishangwei/kubectl
12 | command: ["sleep"]
13 | args: ["100000"]
14 | - name: oc
15 | image: aishangwei/openshift-client
16 | command: ["sleep"]
17 | args: ["100000"]
18 | - name: golang
19 | image: golang:1.9
20 | command: ["sleep"]
21 | args: ["100000"]
22 | - name: helm
23 | image: aishangwei/helm:2.8.2
24 | command: ["sleep"]
25 | args: ["100000"]
26 | """
27 | ) {
28 | node("kubernetes") {
29 | container("kubectl") {
30 | stage("kubectl") {
31 | sh "kubectl version"
32 | }
33 | }
34 | container("oc") {
35 | stage("oc") {
36 | sh "oc version"
37 | }
38 | }
39 | container("golang") {
40 | stage("golang") {
41 | sh "go version"
42 | }
43 | }
44 | container("helm") {
45 | stage("helm") {
46 | sh "helm version --tiller-namespace go-demo-3-build"
47 | }
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/CICD/jenkins/namespace/my-k8s-job-yaml.groovy:
--------------------------------------------------------------------------------
1 | podTemplate(label: "kubernetes", yaml: """
2 | apiVersion: v1
3 | kind: Pod
4 | spec:
5 | containers:
6 | - name: kubectl
7 | image: aishangwei/kubectl
8 | command: ["sleep"]
9 | args: ["100000"]
10 | - name: oc
11 | image: aishangwei/openshift-client
12 | command: ["sleep"]
13 | args: ["100000"]
14 | - name: golang
15 | image: golang:1.9
16 | command: ["sleep"]
17 | args: ["100000"]
18 | - name: helm
19 | image: aishangwei/helm:2.8.2
20 | command: ["sleep"]
21 | args: ["100000"]
22 | """
23 | ) {
24 | node("kubernetes") {
25 | container("kubectl") {
26 | stage("kubectl") {
27 | sh "kubectl version"
28 | }
29 | }
30 | container("oc") {
31 | stage("oc") {
32 | sh "oc version"
33 | }
34 | }
35 | container("golang") {
36 | stage("golang") {
37 | sh "go version"
38 | }
39 | }
40 | container("helm") {
41 | stage("helm") {
42 | sh "helm version"
43 | }
44 | }
45 | }
46 | }
47 |
48 |
--------------------------------------------------------------------------------
/CICD/jenkins/namespace/prod-ns.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: go-demo-3
5 |
6 | ---
7 |
8 | apiVersion: rbac.authorization.k8s.io/v1beta1
9 | kind: RoleBinding
10 | metadata:
11 | name: build
12 | namespace: go-demo-3
13 | roleRef:
14 | apiGroup: rbac.authorization.k8s.io
15 | kind: ClusterRole
16 | name: admin
17 | subjects:
18 | - kind: ServiceAccount
19 | name: build
20 | namespace: go-demo-3-build
21 |
22 | ---
23 |
24 | apiVersion: v1
25 | kind: LimitRange
26 | metadata:
27 | name: build
28 | namespace: go-demo-3
29 | spec:
30 | limits:
31 | - default:
32 | memory: 200Mi
33 | cpu: 0.2
34 | defaultRequest:
35 | memory: 100Mi
36 | cpu: 0.1
37 | max:
38 | memory: 500Mi
39 | cpu: 0.5
40 | min:
41 | memory: 10Mi
42 | cpu: 0.05
43 | type: Container
44 |
45 | ---
46 |
47 | apiVersion: v1
48 | kind: ResourceQuota
49 | metadata:
50 | name: build
51 | namespace: go-demo-3
52 | spec:
53 | hard:
54 | requests.cpu: 2
55 | requests.memory: 3Gi
56 | limits.cpu: 3
57 | limits.memory: 4Gi
58 | pods: 15
59 |
--------------------------------------------------------------------------------
/CICD/jenkins/namespace/test-k8s-job.groovy:
--------------------------------------------------------------------------------
1 | podTemplate(
2 | label: 'kubernetes',
3 | containers: [
4 | containerTemplate(name: 'maven', image: 'maven:alpine', ttyEnabled: true, command: 'cat'),
5 | containerTemplate(name: 'golang', image: 'golang:alpine', ttyEnabled: true, command: 'cat')
6 | ]
7 | ) {
8 | node('kubernetes') {
9 | container('maven') {
10 | stage('build') {
11 | sh 'mvn --version'
12 | }
13 | stage('unit-test') {
14 | sh 'java -version'
15 | }
16 | }
17 | container('golang') {
18 | stage('deploy') {
19 | sh 'go version'
20 | }
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/CICD/jenkins/nfs-pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv001
5 | labels:
6 | name: pv001
7 | spec:
8 | nfs:
9 | path: /data/nfs1
10 | server: 192.168.20.174
11 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
12 | capacity:
13 | storage: 2Gi
14 | ---
15 | apiVersion: v1
16 | kind: PersistentVolume
17 | metadata:
18 | name: pv002
19 | labels:
20 | name: pv002
21 | spec:
22 | nfs:
23 | path: /data/nfs2
24 | server: 192.168.20.174
25 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
26 | capacity:
27 | storage: 2Gi
28 | ---
29 | apiVersion: v1
30 | kind: PersistentVolume
31 | metadata:
32 | name: pv003
33 | labels:
34 | name: pv003
35 | spec:
36 | nfs:
37 | path: /data/nfs3
38 | server: 192.168.20.174
39 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
40 | capacity:
41 | storage: 2Gi
42 | ---
43 | apiVersion: v1
44 | kind: PersistentVolume
45 | metadata:
46 | name: pv004
47 | labels:
48 | name: pv004
49 | spec:
50 | nfs:
51 | path: /data/nfs4
52 | server: 192.168.20.174
53 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
54 | capacity:
55 | storage: 5Gi
56 | ---
57 | apiVersion: v1
58 | kind: PersistentVolume
59 | metadata:
60 | name: pv005
61 | labels:
62 | name: pv005
63 | spec:
64 | nfs:
65 | path: /data/nfs5
66 | server: 192.168.20.174
67 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
68 | capacity:
69 | storage: 5Gi
70 | ---
71 | apiVersion: v1
72 | kind: PersistentVolume
73 | metadata:
74 | name: pv006
75 | labels:
76 | name: pv006
77 | spec:
78 | nfs:
79 | path: /data/nfs6
80 | server: 192.168.20.174
81 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
82 | capacity:
83 | storage: 2Gi
84 | ---
85 | apiVersion: v1
86 | kind: PersistentVolume
87 | metadata:
88 | name: pv007
89 | labels:
90 | name: pv007
91 | spec:
92 | nfs:
93 | path: /data/nfs7
94 | server: 192.168.20.174
95 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
96 | capacity:
97 | storage: 2Gi
98 | ---
99 | apiVersion: v1
100 | kind: PersistentVolume
101 | metadata:
102 | name: pv008
103 | labels:
104 | name: pv008
105 | spec:
106 | nfs:
107 | path: /data/nfs8
108 | server: 192.168.20.174
109 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
110 | capacity:
111 | storage: 2Gi
112 | ---
113 | apiVersion: v1
114 | kind: PersistentVolume
115 | metadata:
116 | name: pv009
117 | labels:
118 | name: pv009
119 | spec:
120 | nfs:
121 | path: /data/nfs9
122 | server: 192.168.20.174
123 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
124 | capacity:
125 | storage: 8Gi
126 | ---
127 | apiVersion: v1
128 | kind: PersistentVolume
129 | metadata:
130 | name: pv010
131 | labels:
132 | name: pv010
133 | spec:
134 | nfs:
135 | path: /data/nfs10
136 | server: 192.168.20.174
137 | accessModes: ["ReadWriteMany","ReadWriteOnce"]
138 | capacity:
139 | storage: 8Gi
140 |
--------------------------------------------------------------------------------
/CICD/kafka/README.md:
--------------------------------------------------------------------------------
1 | Kubernetes 部署 Zookeeper、Kafka、kafka-manager 的部署文件
2 |
--------------------------------------------------------------------------------
/CICD/kafka/kafka-manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kafka-manager
5 | labels:
6 | app: kafka-manager
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: kafka
11 | port: 9000
12 | targetPort: 9000
13 | nodePort: 30900
14 | selector:
15 | app: kafka-manager
16 | ---
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | metadata:
20 | name: kafka-manager
21 | labels:
22 | app: kafka-manager
23 | spec:
24 | replicas: 1
25 | selector:
26 | matchLabels:
27 | app: kafka-manager
28 | template:
29 | metadata:
30 | labels:
31 | app: kafka-manager
32 | spec:
33 | containers:
34 | - name: kafka-manager
35 | image: zenko/kafka-manager:1.3.3.22
36 | imagePullPolicy: IfNotPresent
37 | ports:
38 | - name: kafka-manager
39 | containerPort: 9000
40 | protocol: TCP
41 | env:
42 | - name: ZK_HOSTS
43 | value: "zookeeper:2181"
44 | livenessProbe:
45 | httpGet:
46 | path: /api/health
47 | port: kafka-manager
48 | readinessProbe:
49 | httpGet:
50 | path: /api/health
51 | port: kafka-manager
52 | resources:
53 | limits:
54 | cpu: 500m
55 | memory: 512Mi
56 | requests:
57 | cpu: 250m
58 | memory: 256Mi
59 |
--------------------------------------------------------------------------------
/CICD/pinpoint/README.md:
--------------------------------------------------------------------------------
1 | 主流系统:zipkin、skywalking、pinpoint
2 |
3 | https://blog.csdn.net/sanyaoxu_2/article/details/88671043
4 |
5 |
6 | skywalking
7 | https://blog.csdn.net/qq924862077/article/details/89409746
8 |
9 | Docker部署:
10 | ```
11 | git clone https://github.com/naver/pinpoint-docker.git
12 | cd pinpoint-docker
13 | docker-compose pull && docker-compose up -d
14 | ```
15 | 访问页面
16 | http://IP:8079
17 |
18 | pinpoint agent部署
19 | tomcat
20 | ```
21 | # catalina.sh
22 | CATALINA_OPTS="$CATALINA_OPTS -javaagent:$AGENT_PATH/pinpoint-bootstrap-$VERSIONO.jar"
23 | CATALINA_OPTS="$CATALINA_OPTS -Dpinpoint.agentId=$AGENT_ID"
24 | CATALINA_OPTS="$CATALINA_OPTS -Dpinpoint.applicationName=$APPLICATION_NAME"
25 | ````
26 | jar
27 | ```
28 | java -jar -javaagent:$AGENT_PATH/pinpoint-bootstrap-$VERSION.jar -Dpinpoint.agentID=$AGENT_ID -Dpinpoint.applicationName=$APPLICATION_NAME xxx.jar
29 | ```
30 |
--------------------------------------------------------------------------------
/CICD/prometheus/README.md:
--------------------------------------------------------------------------------
1 | ```
2 | deploy prometheus namespase
3 | > $ kubectl apply -f prometheus-ns.yaml
4 |
5 | deploy export
6 | > $ kubectl apply -f node-exporter-daemonset.yaml
7 | > $ kubectl apply -f node-exporter-service.yaml
8 |
9 | deploy kube-state-metrics
10 | > $ kubectl apply -f kube-state-metrics-ServiceAccount.yaml
11 | > $ kubectl apply -f kube-state-metrics-deploy.yaml
12 | > $ kubectl apply -f kube-state-metrics-service.yaml
13 |
14 | deploy disk monitor
15 | > $ kubectl apply -f monitor-node-disk-daemonset.yaml
16 |
17 | deploy prometheus
18 | > $ kubectl apply -f prometheus-config-configmap.yaml
19 | > $ kubectl apply -f prometheus-k8s-ServiceAccount.yaml
20 | > $ kubectl apply -f prometheus-rules-configmap.yaml
21 | > $ kubectl apply -f prometheus-secret.yaml
22 | > $ kubectl apply -f prometheus-deploy.yaml
23 | > $ kubectl apply -f prometheus-service.yaml
24 |
25 | deploy grafana
26 | > $ kubectl apply -f grafana-net-2-dashboard-configmap.yaml
27 | > $ kubectl apply -f grafana-deploy.yaml
28 | > $ kubectl apply -f grafana-service.yaml
29 | > $ kubectl apply -f grafana-net-2-dashboard-batch.yaml
30 | > $ kubectl apply -f grafana-ingress.yaml
31 |
32 | deploy alertmanager
33 | > $ kubectl apply -f alertmanager-config-configmap.yaml
34 | > $ kubectl apply -f alertmanager-templates-configmap.yaml
35 | > $ kubectl apply -f alertmanager-deploy.yaml
36 | > $ kubectl apply -f alertmanager-service.yaml
37 | ```
38 |
--------------------------------------------------------------------------------
/CICD/prometheus/alertmanager-config-configmap.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: alertmanager
5 | namespace: monitoring
6 | data:
7 | config.yml: |-
8 | global:
9 | smtp_smarthost: 'smtp.126.com:25'
10 | smtp_from: 'xxxxx@126.com'
11 | smtp_auth_username: 'xxxx@126.com'
12 | smtp_auth_password: 'xxxxxxx'
13 | smtp_require_tls: false
14 |
15 | route:
16 | group_by: ['instance']
17 | group_wait: 30s
18 | group_interval: 5m
19 | repeat_interval: 7m
20 | receiver: email
21 | routes:
22 | - match:
23 | severity: critical
24 | receiver: email
25 | - match_re:
26 | severity: ^(warning|critical)$
27 | receiver: support_team
28 |
29 | receivers:
30 | - name: 'email'
31 | email_configs:
32 | - to: 'xxxxxx@qq.com'
33 | - name: 'support_team'
34 | email_configs:
35 | - to: 'xxxxxx@qq.com'
36 | - name: 'pager'
37 | email_configs:
38 | - to: 'alert-pager@example.com'
39 |
--------------------------------------------------------------------------------
/CICD/prometheus/alertmanager-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: alertmanager
5 | namespace: monitoring
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: alertmanager
11 | template:
12 | metadata:
13 | name: alertmanager
14 | labels:
15 | app: alertmanager
16 | spec:
17 | containers:
18 | - name: alertmanager
19 | image: quay.io/prometheus/alertmanager:v0.7.1
20 | args:
21 | - '-config.file=/etc/alertmanager/config.yml'
22 | - '-storage.path=/alertmanager'
23 | ports:
24 | - name: alertmanager
25 | containerPort: 9093
26 | volumeMounts:
27 | - name: config-volume
28 | mountPath: /etc/alertmanager
29 | - name: templates-volume
30 | mountPath: /etc/alertmanager-templates
31 | - name: alertmanager
32 | mountPath: /alertmanager
33 | volumes:
34 | - name: config-volume
35 | configMap:
36 | name: alertmanager
37 | - name: templates-volume
38 | configMap:
39 | name: alertmanager-templates
40 | - name: alertmanager
41 | emptyDir: {}
42 |
--------------------------------------------------------------------------------
/CICD/prometheus/alertmanager-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | prometheus.io/scrape: 'true'
6 | prometheus.io/path: '/metrics'
7 | labels:
8 | name: alertmanager
9 | name: alertmanager
10 | namespace: monitoring
11 | spec:
12 | selector:
13 | app: alertmanager
14 | type: NodePort
15 | ports:
16 | - name: alertmanager
17 | protocol: TCP
18 | port: 9093
19 | targetPort: 9093
20 |
--------------------------------------------------------------------------------
/CICD/prometheus/grafana-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: grafana-core
5 | namespace: monitoring
6 | labels:
7 | app: grafana
8 | component: core
9 | spec:
10 | replicas: 1
11 | template:
12 | metadata:
13 | labels:
14 | app: grafana
15 | component: core
16 | spec:
17 | containers:
18 | - image: grafana/grafana:4.2.0
19 | name: grafana-core
20 | imagePullPolicy: IfNotPresent
21 | # env:
22 | resources:
23 | # keep request = limit to keep this container in guaranteed class
24 | limits:
25 | cpu: 100m
26 | memory: 100Mi
27 | requests:
28 | cpu: 100m
29 | memory: 100Mi
30 | env:
31 | # The following env variables set up basic auth twith the default admin user and admin password.
32 | - name: GF_AUTH_BASIC_ENABLED
33 | value: "true"
34 | - name: GF_SECURITY_ADMIN_USER
35 | valueFrom:
36 | secretKeyRef:
37 | name: grafana
38 | key: admin-username
39 | - name: GF_SECURITY_ADMIN_PASSWORD
40 | valueFrom:
41 | secretKeyRef:
42 | name: grafana
43 | key: admin-password
44 | - name: GF_AUTH_ANONYMOUS_ENABLED
45 | value: "false"
46 | # - name: GF_AUTH_ANONYMOUS_ORG_ROLE
47 | # value: Admin
48 | # does not really work, because of template variables in exported dashboards:
49 | # - name: GF_DASHBOARDS_JSON_ENABLED
50 | # value: "true"
51 | readinessProbe:
52 | httpGet:
53 | path: /login
54 | port: 3000
55 | # initialDelaySeconds: 30
56 | # timeoutSeconds: 1
57 | volumeMounts:
58 | - name: grafana-persistent-storage
59 | mountPath: /var/lib/grafana
60 | volumes:
61 | - name: grafana-persistent-storage
62 | emptyDir: {}
63 |
--------------------------------------------------------------------------------
/CICD/prometheus/grafana-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | spec:
7 | rules:
8 | - host: www.node01.com
9 | http:
10 | paths:
11 | - path: /
12 | backend:
13 | serviceName: grafana
14 | servicePort: 3000
15 |
--------------------------------------------------------------------------------
/CICD/prometheus/grafana-net-2-dashboard-batch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: grafana-import-dashboards
5 | namespace: monitoring
6 | labels:
7 | app: grafana
8 | component: import-dashboards
9 | spec:
10 | template:
11 | metadata:
12 | name: grafana-import-dashboards
13 | labels:
14 | app: grafana
15 | component: import-dashboards
16 | spec:
17 | serviceAccountName: prometheus-k8s
18 | initContainers:
19 | - name: wait-for-grafana
20 | image: giantswarm/tiny-tools
21 | args:
22 | - /bin/sh
23 | - -c
24 | - >
25 | set -x;
26 | while [ $(curl -sw '%{http_code}' "http://grafana:3000" -o /dev/null) -ne 200]; do
27 | echo '.'
28 | sleep 15;
29 | done
30 | containers:
31 | - name: grafana-import-dashboards
32 | image: giantswarm/tiny-tools
33 | command: ["/bin/sh", "-c"]
34 | workingDir: /opt/grafana-import-dashboards
35 | args:
36 | - >
37 | for file in *-datasource.json ; do
38 | if [ -e "$file" ] ; then
39 | echo "importing $file" &&
40 | curl --silent --fail --show-error \
41 | --request POST http://${GF_ADMIN_USER}:${GF_ADMIN_PASSWORD}@grafana:3000/api/datasources \
42 | --header "Content-Type: application/json" \
43 | --data-binary "@$file" ;
44 | echo "" ;
45 | fi
46 | done ;
47 | for file in *-dashboard.json ; do
48 | if [ -e "$file" ] ; then
49 | echo "importing $file" &&
50 | ( echo '{"dashboard":'; \
51 | cat "$file"; \
52 | echo ',"overwrite":true,"inputs":[{"name":"DS_PROMETHEUS","type":"datasource","pluginId":"prometheus","value":"prometheus"}]}' ) \
53 | | jq -c '.' \
54 | | curl --silent --fail --show-error \
55 | --request POST http://${GF_ADMIN_USER}:${GF_ADMIN_PASSWORD}@grafana:3000/api/dashboards/import \
56 | --header "Content-Type: application/json" \
57 | --data-binary "@-" ;
58 | echo "" ;
59 | fi
60 | done
61 |
62 | env:
63 | - name: GF_ADMIN_USER
64 | valueFrom:
65 | secretKeyRef:
66 | name: grafana
67 | key: admin-username
68 | - name: GF_ADMIN_PASSWORD
69 | valueFrom:
70 | secretKeyRef:
71 | name: grafana
72 | key: admin-password
73 | volumeMounts:
74 | - name: config-volume
75 | mountPath: /opt/grafana-import-dashboards
76 | restartPolicy: Never
77 | volumes:
78 | - name: config-volume
79 | configMap:
80 | name: grafana-import-dashboards
81 |
--------------------------------------------------------------------------------
/CICD/prometheus/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | labels:
7 | app: grafana
8 | component: core
9 | spec:
10 | type: NodePort
11 | ports:
12 | - port: 3000
13 | selector:
14 | app: grafana
15 | component: core
16 |
--------------------------------------------------------------------------------
/CICD/prometheus/kube-state-metrics-ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1beta1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: kube-state-metrics
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: kube-state-metrics
10 | subjects:
11 | - kind: ServiceAccount
12 | name: kube-state-metrics
13 | namespace: monitoring
14 | ---
15 | apiVersion: rbac.authorization.k8s.io/v1beta1
16 | kind: ClusterRole
17 | metadata:
18 | name: kube-state-metrics
19 | rules:
20 | - apiGroups: [""]
21 | resources:
22 | - nodes
23 | - pods
24 | - services
25 | - resourcequotas
26 | - replicationcontrollers
27 | - limitranges
28 | verbs: ["list", "watch"]
29 | - apiGroups: ["extensions"]
30 | resources:
31 | - daemonsets
32 | - deployments
33 | - replicasets
34 | verbs: ["list", "watch"]
35 | ---
36 | apiVersion: v1
37 | kind: ServiceAccount
38 | metadata:
39 | name: kube-state-metrics
40 | namespace: monitoring
41 |
--------------------------------------------------------------------------------
/CICD/prometheus/kube-state-metrics-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: kube-state-metrics
5 | namespace: monitoring
6 | spec:
7 | replicas: 1
8 | template:
9 | metadata:
10 | labels:
11 | app: kube-state-metrics
12 | spec:
13 | serviceAccountName: kube-state-metrics
14 | containers:
15 | - name: kube-state-metrics
16 | image: gcr.io/google_containers/kube-state-metrics:v0.5.0
17 | ports:
18 | - containerPort: 8080
19 |
--------------------------------------------------------------------------------
/CICD/prometheus/kube-state-metrics-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | prometheus.io/scrape: 'true'
6 | name: kube-state-metrics
7 | namespace: monitoring
8 | labels:
9 | app: kube-state-metrics
10 | spec:
11 | ports:
12 | - name: kube-state-metrics
13 | port: 8080
14 | protocol: TCP
15 | selector:
16 | app: kube-state-metrics
17 |
--------------------------------------------------------------------------------
/CICD/prometheus/monitor-node-disk-daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: node-directory-size-metrics
5 | namespace: monitoring
6 | annotations:
7 | description: |
8 | This `DaemonSet` provides metrics in Prometheus format about disk usage on the nodes.
9 | The container `read-du` reads in sizes of all directories below /mnt and writes that to `/tmp/metrics`. It only reports directories larger then `100M` for now.
10 | The other container `caddy` just hands out the contents of that file on request via `http` on `/metrics` at port `9102` which are the defaults for Prometheus.
11 | These are scheduled on every node in the Kubernetes cluster.
12 | To choose directories from the node to check, just mount them on the `read-du` container below `/mnt`.
13 | spec:
14 | template:
15 | metadata:
16 | labels:
17 | app: node-directory-size-metrics
18 | annotations:
19 | prometheus.io/scrape: 'true'
20 | prometheus.io/port: '9102'
21 | description: |
22 | This `Pod` provides metrics in Prometheus format about disk usage on the node.
23 | The container `read-du` reads in sizes of all directories below /mnt and writes that to `/tmp/metrics`. It only reports directories larger then `100M` for now.
24 | The other container `caddy` just hands out the contents of that file on request on `/metrics` at port `9102` which are the defaults for Prometheus.
25 | This `Pod` is scheduled on every node in the Kubernetes cluster.
26 | To choose directories from the node to check just mount them on `read-du` below `/mnt`.
27 | spec:
28 | containers:
29 | - name: read-du
30 | image: giantswarm/tiny-tools
31 | imagePullPolicy: Always
32 | # FIXME threshold via env var
33 | # The
34 | command:
35 | - fish
36 | - --command
37 | - |
38 | touch /tmp/metrics-temp
39 | while true
40 | for directory in (du --bytes --separate-dirs --threshold=100M /mnt)
41 | echo $directory | read size path
42 | echo "node_directory_size_bytes{path=\"$path\"} $size" \
43 | >> /tmp/metrics-temp
44 | end
45 | mv /tmp/metrics-temp /tmp/metrics
46 | sleep 300
47 | end
48 | volumeMounts:
49 | - name: host-fs-var
50 | mountPath: /mnt/var
51 | readOnly: true
52 | - name: metrics
53 | mountPath: /tmp
54 | - name: caddy
55 | image: dockermuenster/caddy:0.9.3
56 | command:
57 | - "caddy"
58 | - "-port=9102"
59 | - "-root=/var/www"
60 | ports:
61 | - containerPort: 9102
62 | volumeMounts:
63 | - name: metrics
64 | mountPath: /var/www
65 | volumes:
66 | - name: host-fs-var
67 | hostPath:
68 | path: /var
69 | - name: metrics
70 | emptyDir:
71 | medium: Memory
72 |
--------------------------------------------------------------------------------
/CICD/prometheus/node-exporter-daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: prometheus-node-exporter
5 | namespace: monitoring
6 | labels:
7 | app: prometheus
8 | component: node-exporter
9 | spec:
10 | template:
11 | metadata:
12 | name: prometheus-node-exporter
13 | labels:
14 | app: prometheus
15 | component: node-exporter
16 | spec:
17 | containers:
18 | - image: prom/node-exporter:v0.14.0
19 | name: prometheus-node-exporter
20 | ports:
21 | - name: prom-node-exp
22 | #^ must be an IANA_SVC_NAME (at most 15 characters, ..)
23 | containerPort: 9100
24 | hostPort: 9100
25 | hostNetwork: true
26 | hostPID: true
27 |
--------------------------------------------------------------------------------
/CICD/prometheus/node-exporter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | prometheus.io/scrape: 'true'
6 | name: prometheus-node-exporter
7 | namespace: monitoring
8 | labels:
9 | app: prometheus
10 | component: node-exporter
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - name: prometheus-node-exporter
15 | port: 9100
16 | protocol: TCP
17 | selector:
18 | app: prometheus
19 | component: node-exporter
20 | type: ClusterIP
21 |
--------------------------------------------------------------------------------
/CICD/prometheus/prometheus-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: prometheus-core
5 | namespace: monitoring
6 | labels:
7 | app: prometheus
8 | component: core
9 | spec:
10 | replicas: 1
11 | template:
12 | metadata:
13 | name: prometheus-main
14 | labels:
15 | app: prometheus
16 | component: core
17 | spec:
18 | serviceAccountName: prometheus-k8s
19 | containers:
20 | - name: prometheus
21 | image: prom/prometheus:v1.7.0
22 | args:
23 | - '-storage.local.retention=12h'
24 | - '-storage.local.memory-chunks=500000'
25 | - '-config.file=/etc/prometheus/prometheus.yaml'
26 | - '-alertmanager.url=http://alertmanager:9093/'
27 | ports:
28 | - name: webui
29 | containerPort: 9090
30 | resources:
31 | requests:
32 | cpu: 500m
33 | memory: 500M
34 | limits:
35 | cpu: 500m
36 | memory: 500M
37 | volumeMounts:
38 | - name: config-volume
39 | mountPath: /etc/prometheus
40 | - name: rules-volume
41 | mountPath: /etc/prometheus-rules
42 | volumes:
43 | - name: config-volume
44 | configMap:
45 | name: prometheus-core
46 | - name: rules-volume
47 | configMap:
48 | name: prometheus-rules
49 |
--------------------------------------------------------------------------------
/CICD/prometheus/prometheus-k8s-ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: prometheus
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: prometheus
9 | subjects:
10 | - kind: ServiceAccount
11 | name: prometheus-k8s
12 | namespace: monitoring
13 | ---
14 | apiVersion: rbac.authorization.k8s.io/v1beta1
15 | kind: ClusterRole
16 | metadata:
17 | name: prometheus
18 | rules:
19 | - apiGroups: [""]
20 | resources:
21 | - nodes
22 | - nodes/proxy
23 | - services
24 | - endpoints
25 | - pods
26 | verbs: ["get", "list", "watch"]
27 | - apiGroups: [""]
28 | resources:
29 | - configmaps
30 | verbs: ["get"]
31 | - nonResourceURLs: ["/metrics"]
32 | verbs: ["get"]
33 | ---
34 | apiVersion: v1
35 | kind: ServiceAccount
36 | metadata:
37 | name: prometheus-k8s
38 | namespace: monitoring
39 |
--------------------------------------------------------------------------------
/CICD/prometheus/prometheus-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: monitoring
5 |
--------------------------------------------------------------------------------
/CICD/prometheus/prometheus-rules-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | creationTimestamp: null
5 | name: prometheus-rules
6 | namespace: monitoring
7 | data:
8 | cpu-usage.rules: |
9 | ALERT NodeCPUUsage
10 | IF (100 - (avg by (instance) (irate(node_cpu{name="node-exporter",mode="idle"}[5m])) * 100)) > 75
11 | FOR 2m
12 | LABELS {
13 | severity="page"
14 | }
15 | ANNOTATIONS {
16 | SUMMARY = "{{$labels.instance}}: High CPU usage detected",
17 | DESCRIPTION = "{{$labels.instance}}: CPU usage is above 75% (current value is: {{ $value }})"
18 | }
19 | instance-availability.rules: |
20 | ALERT InstanceDown
21 | IF up == 0
22 | FOR 1m
23 | LABELS { severity = "page" }
24 | ANNOTATIONS {
25 | summary = "Instance {{ $labels.instance }} down",
26 | description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minute.",
27 | }
28 | low-disk-space.rules: |
29 | ALERT NodeLowRootDisk
30 | IF ((node_filesystem_size{mountpoint="/root-disk"} - node_filesystem_free{mountpoint="/root-disk"} ) / node_filesystem_size{mountpoint="/root-disk"} * 100) > 75
31 | FOR 2m
32 | LABELS {
33 | severity="page"
34 | }
35 | ANNOTATIONS {
36 | SUMMARY = "{{$labels.instance}}: Low root disk space",
37 | DESCRIPTION = "{{$labels.instance}}: Root disk usage is above 75% (current value is: {{ $value }})"
38 | }
39 |
40 | ALERT NodeLowDataDisk
41 | IF ((node_filesystem_size{mountpoint="/data-disk"} - node_filesystem_free{mountpoint="/data-disk"} ) / node_filesystem_size{mountpoint="/data-disk"} * 100) > 75
42 | FOR 2m
43 | LABELS {
44 | severity="page"
45 | }
46 | ANNOTATIONS {
47 | SUMMARY = "{{$labels.instance}}: Low data disk space",
48 | DESCRIPTION = "{{$labels.instance}}: Data disk usage is above 75% (current value is: {{ $value }})"
49 | }
50 | mem-usage.rules: |
51 | ALERT NodeSwapUsage
52 | IF (((node_memory_SwapTotal-node_memory_SwapFree)/node_memory_SwapTotal)*100) > 75
53 | FOR 2m
54 | LABELS {
55 | severity="page"
56 | }
57 | ANNOTATIONS {
58 | SUMMARY = "{{$labels.instance}}: Swap usage detected",
59 | DESCRIPTION = "{{$labels.instance}}: Swap usage usage is above 75% (current value is: {{ $value }})"
60 | }
61 |
62 | ALERT NodeMemoryUsage
63 | IF (((node_memory_MemTotal-node_memory_MemFree-node_memory_Cached)/(node_memory_MemTotal)*100)) > 75
64 | FOR 2m
65 | LABELS {
66 | severity="page"
67 | }
68 | ANNOTATIONS {
69 | SUMMARY = "{{$labels.instance}}: High memory usage detected",
70 | DESCRIPTION = "{{$labels.instance}}: Memory usage is above 75% (current value is: {{ $value }})"
71 | }
72 |
73 |
--------------------------------------------------------------------------------
/CICD/prometheus/prometheus-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | data:
4 | admin-password: YWRtaW4=
5 | admin-username: YWRtaW4=
6 | metadata:
7 | name: grafana
8 | namespace: monitoring
9 | type: Opaque
10 |
--------------------------------------------------------------------------------
/CICD/prometheus/prometheus-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: prometheus
5 | namespace: monitoring
6 | labels:
7 | app: prometheus
8 | component: core
9 | annotations:
10 | prometheus.io/scrape: 'true'
11 | spec:
12 | type: NodePort
13 | ports:
14 | - port: 9090
15 | protocol: TCP
16 | name: webui
17 | selector:
18 | app: prometheus
19 | component: core
20 |
--------------------------------------------------------------------------------
/CICD/tekton/readme.md:
--------------------------------------------------------------------------------
1 | https://github.com/tektoncd
2 |
3 | https://tekton.dev/
4 |
5 |
6 | https://github.com/tektoncd/cli/tree/main/docs/cmd
7 |
--------------------------------------------------------------------------------
/CICD/其他:
--------------------------------------------------------------------------------
1 | rook
2 | https://github.com/mykubernetes/rook/tree/master/cluster/examples/kubernetes/ceph
3 |
4 | prometheus
5 | https://github.com/mykubernetes/prometheus-operator/tree/master/contrib/kube-prometheus/manifests
6 |
7 | prometheus
8 | https://github.com/mykubernetes/prometheus/blob/master/manifests-all.yaml
9 |
10 | k8s插件
11 | https://github.com/kubernetes-incubator
12 |
--------------------------------------------------------------------------------
/CICD/各种方式部署prometheus/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/CICD/各种方式部署prometheus/定制化grafana.md:
--------------------------------------------------------------------------------
1 | 配置变量
2 | label_values(kube_pod_info{pod=~".*zookeeper.*"}, pod)
3 | 
4 |
5 | 编写变量规则
6 | jvm_memory_bytes_used{pod="$pod"}
7 | 
8 |
9 | 查看
10 | 
11 |
--------------------------------------------------------------------------------
/Docker和K8s 解决容器内时区不一致方案.md:
--------------------------------------------------------------------------------
1 | Docker/K8s 解决容器内时区不一致方案
2 | ===================================
3 | 使用 docker 容器启动服务后,如果使用默认 Centos 系统作为基础镜像,就会出现系统时区不一致的问题,因为默认 Centos 系统时间为 UTC 协调世界时 (Universal Time Coordinated),一般本地所属时区为 CST(+8 时区,上海时间),时间上刚好相差 8 个小时。
4 |
5 | ```
6 | # 查看本地时间
7 | $ date
8 | Wed Mar 6 16:41:08 CST 2019
9 |
10 | # 查看容器内 centos 系统默认时区
11 | $ docker run -it centos /bin/sh
12 | sh-4.2# date
13 | Wed Mar 6 08:41:45 UTC 2019
14 | ```
15 |
16 | 1、Dockerfile中处理
17 | 可以直接修改 Dockerfile,在构建系统基础镜像或者基于基础镜像再次构建业务镜像时,添加时区修改配置即可。
18 | ```
19 | $ cat Dockerfile.date
20 | FROM centos
21 |
22 | RUN rm -f /etc/localtime \
23 | && ln -sv /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
24 | && echo "Asia/Shanghai" > /etc/timezone
25 |
26 | # 构建容器镜像
27 | $ docker build -t centos7-date:test -f Dockerfile.date .
28 | Sending build context to Docker daemon 4.426GB
29 | Step 1/2 : FROM centos
30 | ---> 1e1148e4cc2c
31 | Step 2/2 : RUN rm -f /etc/localtime && ln -sv /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo "Asia/Shanghai" > /etc/timezone
32 | ---> Running in fe2e931c3cf2
33 | '/etc/localtime' -> '/usr/share/zoneinfo/Asia/Shanghai'
34 | Removing intermediate container fe2e931c3cf2
35 | ---> 2120143141c8
36 | Successfully built 2120143141c8
37 | Successfully tagged centos7-date:test
38 |
39 | $ docker run -it centos7-date:test /bin/sh
40 | sh-4.2# date
41 | Wed Mar 6 16:40:01 CST 2019
42 | ```
43 | 可以看到,系统时间正常了,个人比较推荐这种方式
44 |
45 | 2、容器启动时处理
46 | 在容器启动时通过挂载主机时区配置到容器内,前提是主机时区配置文件正常。
47 | ```
48 | # 挂载本地 /etc/localtime 到容器内覆盖配置
49 | $ docker run -it -v /etc/localtime:/etc/localtime centos /bin/sh
50 | sh-4.2# date
51 | Wed Mar 6 16:42:38 CST 2019
52 |
53 | # 或者挂载本地 /usr/share/zoneinfo/Asia/Shanghai 到容器内覆盖配置
54 | $ docker run -it -v /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime centos /bin/sh
55 | sh-4.2# date
56 | Wed Mar 6 16:42:52 CST 2019
57 | ```
58 | 以上两种方式,其实原理都一样,在 Centos 系统中,/usr/share/zoneinfo/Asia/Shanghai 和 /etc/localtime 二者是一致的,我们一般会将二者软连接或者直接 cp 覆盖。
59 |
60 | 3、进入容器内处理
61 | 还有一种方式,就是进入到容器内处理,如果容器删除后重新启动新的容器,还需要我们进入到容器内配置,非常不方便。
62 | ```
63 | # 进入到容器内部配置
64 | $ docker run -it centos /bin/sh
65 | sh-4.2# date
66 | Wed Mar 6 08:43:29 UTC 2019
67 | sh-4.2# rm -f /etc/localtime && ln -sv /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
68 | '/etc/localtime' -> '/usr/share/zoneinfo/Asia/Shanghai'
69 | sh-4.2# date
70 | Wed Mar 6 16:43:54 CST 2019
71 | ```
72 |
73 | 4、k8s 解决容器时间不一致
74 | 通过挂载主机时间配置的方式解决
75 | ```
76 | $ cat busy-box-test.yaml
77 | apiVersion: v1
78 | kind: Pod
79 | metadata:
80 | name: busy-box-test
81 | namespace: default
82 | spec:
83 | restartPolicy: OnFailure
84 | containers:
85 | - name: busy-box-test
86 | image: busybox
87 | imagePullPolicy: IfNotPresent
88 | volumeMounts:
89 | - name: date-config
90 | mountPath: /etc/localtime
91 | command: ["sleep", "60000"]
92 | volumes:
93 | - name: date-config
94 | hostPath:
95 | path: /etc/localtime
96 |
97 | ```
98 | 注意:如果主机 /etc/localtime 已存在且时区正确的话,可以直接挂载,如果本地 /etc/localtime 不存在或时区不正确的话,那么可以直接挂载 /usr/share/zoneinfo/Asia/Shanghai 到容器内 /etc/localtime,都是可行的。
99 |
100 |
101 |
102 |
103 |
--------------------------------------------------------------------------------
/K8S多集群管理.md:
--------------------------------------------------------------------------------
1 | 在实际生产环境中,往往需要维护多个k8s集群,在多个环境和节点之间切换,影响工作效率,不符合devops的理念,因此尝试在单个节点下面维护多个k8s集群。
2 |
3 | ## 1) 模拟存在两套k8s集群
4 |
5 | 第一个k8s集群:
6 | ```
7 | [root@k8smaster ~]# kubectl get nodes
8 | NAME STATUS ROLES VERSION
9 | k8smaster Ready controlplane v1.23.1
10 | k8slave Ready worker v1.23.1
11 | ```
12 | - k8smaster:182.168.40.180
13 | - k8slave:192.168.40.181
14 |
15 | 第二个k8s集群
16 | ```
17 | [root@k8smaster2]# kubectl get nodes
18 | NAME STATUS ROLES VERSION
19 | k8smaster2 Ready controlplane v1.23.1
20 | k8slave2 Ready worker v1.23.1
21 | ```
22 | - k8smaster2:192.168.40.185
23 | - k8slave2: 192.168.40.186
24 |
25 | ## 2) kubeconfig文件
26 |
27 | 查看kubeconfig文件可以使用kubectl config命令,也可以直接查看/root/.kube/config(默认位置)
28 |
29 | k8smaster集群
30 | ```
31 | [root@k8smaster ~]#kubectl config view
32 | apiVersion: v1
33 | clusters:
34 | - cluster:
35 | certificate-authority-data: DATA+OMITTED
36 | server: https://192.168.40.180:6443
37 | name: kubernetes
38 | contexts:
39 | - context:
40 | cluster: kubernetes
41 | user: kubernetes-admin
42 | name: kubernetes-admin@kubernetes
43 | current-context: kubernetes-admin@kubernetes
44 | kind: Config
45 | preferences: {}
46 | users:
47 | - name: kubernetes-admin
48 | user:
49 | client-certificate-data: REDACTED
50 | client-key-data: REDACTED
51 | ```
52 |
53 | k8smaster2集群
54 | ```
55 | [root@k8smaster2~]# kubectl config view
56 | apiVersion: v1
57 | clusters:
58 | - cluster:
59 | certificate-authority-data: DATA+OMITTED
60 | server: https://192.168.40.185:6443
61 | name: kubernetes
62 | contexts:
63 | - context:
64 | cluster: kubernetes
65 | user: kubernetes-admin
66 | name: kubernetes-admin@kubernetes
67 | current-context: kubernetes-admin@kubernetes
68 | kind: Config
69 | preferences: {}
70 | users:
71 | - name:kubernetes-admin
72 | user:
73 | client-certificate-data: REDACTED
74 | client-key-data: REDACTED
75 | ```
76 |
77 | ### 3) 在k8smaster上配置k8smaster2的cluster、user、context
78 |
79 | a、添加cluster
80 | ```
81 | [root@k8smaster]#kubectl config set-cluster k8smaster2 --server=https://192.168.40.185:6443--insecure-skip-tls-verify=true
82 | ```
83 |
84 | b、添加user
85 | ```
86 | [root@k8smaster]#kubeadm token create --print-join-command
87 | [root@k8smaster]#kubectl config set-credentials k8smaster2-user --token= clknqa.km25oi82urcuja9u
88 | ```
89 |
90 | c、添加context
91 | ```
92 | [root@k8smaster]# kubectl config set-context k8smaster2-context--cluster= k8smaster2 --user=k8smaster2-user
93 | ```
94 |
95 | d、切换context管理k8s集群
96 | ```
97 | [root@k8smaster]#kubectl config use-context k8smaster2-context
98 | ```
99 |
100 | 至此,在k8smaster节点上维护了两个k8s集群,按照同样的办法可以添加更多的k8s集群,只是通过不同的context进行切换。
101 |
--------------------------------------------------------------------------------
/K8S集群删除与添加节点.md:
--------------------------------------------------------------------------------
1 | 一、删除node节点
2 | ---
3 | 1、先查看一下这个node节点上的pod信息
4 | ```
5 | kubectl get pod -o wide
6 | ```
7 |
8 | 2、驱逐这个node节点上的pod
9 | ```
10 | # kubectl drain node02 --delete-local-data --force --ignore-daemonsets
11 | ```
12 |
13 | 3、删除这个node节点
14 | ```
15 | # kubectl delete nodes node02
16 | ```
17 |
18 | 4、然后在node02这个节点上执行如下命令:
19 | ```
20 | kubeadm reset
21 | systemctl stop kubelet
22 | systemctl stop docker
23 | rm -rf /var/lib/cni/
24 | rm -rf /var/lib/kubelet/*
25 | rm -rf /etc/cni/
26 | ifconfig cni0 down
27 | ifconfig flannel.1 down
28 | ifconfig docker0 down
29 | ip link delete cni0
30 | ip link delete flannel.1
31 | systemctl start docker
32 | systemctl start kubelet
33 | ```
34 | 如果不做上面的操作的话会导致这个节点上的pod无法启动,具体报错信息为:networkPlugin cni failed to set up pod "alertmanager-main-1_monitoring" network: failed to set bridge ad has an IP address different from 10.244.5.1/24 ,意思是已经集群网络cni已经有一个不同于10.244.51.1/24 的网络地址,所以需要执行上述命令重置节点网络。
35 |
36 |
37 | 二、重新加入这个node节点
38 | ---
39 | 节点加入集群的命令格式:kubeadm join --token : --discovery-token-ca-cert-hash sha256:
40 |
41 | 由于默认token的有效期为24小时,当过期之后,该token就不可用了,解决方法如下:
42 |
43 | 重新生成新的token ==> kubeadm token create
44 |
45 | 1.查看当前的token列表
46 | ```
47 | kubeadm token list
48 | ```
49 |
50 | 2.重新生成新的token
51 | ```
52 | kubeadm token create
53 | ```
54 |
55 | 3.再次查看当前的token列表
56 | ```
57 | kubeadm token list
58 | ```
59 |
60 | 4.获取ca证书sha256编码hash值
61 | ```
62 | openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
63 | ```
64 |
65 | 5.节点加入集群
66 | ```
67 | kubeadm join 172.16.40.2:58443 --token 369tcl.oe4punpoj9gaijh7(新的token) --discovery-token-ca-cert-hash sha256:7ae10591aa593c2c36fb965d58964a84561e9ccd416ffe7432550a0d0b7e4f90(ca证书sha256编码hash值)
68 | ```
69 | 再次在master节点查看node发现已经加入到集群了。
70 |
71 | 6、或者直接在创建token的时候生成产全部命令
72 | ```
73 | kubeadm token create --print-join-command
74 | ```
75 |
--------------------------------------------------------------------------------
/Kubernetes 调度GPU.md:
--------------------------------------------------------------------------------
1 | Kubernetes管理GPU应用
2 |
3 | 官方说明
4 | https://devblogs.nvidia.com/gpu-containers-runtime/
5 |
6 | https://www.cnblogs.com/breezey/p/11801122.html
7 | https://www.jianshu.com/p/8b84c597ce03
8 |
9 |
10 |
11 | https://github.com/NVIDIA/nvidia-docker
12 |
13 | https://github.com/NVIDIA/nvidia-container-runtime
14 |
15 | https://github.com/NVIDIA/k8s-device-plugin
16 |
17 | 在kubernetes中使用GPU资源
18 |
19 | 安装步骤
20 |
21 | 1.节点安装NVIDIA驱动
22 | ```
23 | rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
24 | rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
25 | yum install -y kmod-nvidia
26 |
27 | 验证
28 | # nvidia-smi
29 | ```
30 |
31 |
32 | 2.安装nvidia-docker2 # 注意不是nvidia-container-toolkit
33 | ```
34 | distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
35 | curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.repo | sudo tee /etc/yum.repos.d/nvidia-docker.repo
36 |
37 | yum install -y nvidia-docker2
38 |
39 | pkill -SIGHUP dockerd
40 | ```
41 |
42 | 3.修改docker配置文件
43 | ```
44 | # vim /etc/docker/daemon.json
45 | {
46 | "default-runtime": "nvidia",
47 | "runtimes": {
48 | "nvidia": {
49 | "path": "/usr/bin/nvidia-container-runtime",
50 | "runtimeArgs": []
51 | }
52 | }
53 |
54 | # systemctl restart docker
55 | ```
56 |
57 |
58 |
59 | 3.安装Nvidia-device-plugin插件
60 | ```
61 | kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/1.0.0-beta4/nvidia-device-plugin.yml
62 | # URL https://github.com/NVIDIA/k8s-device-plugin
63 |
64 | 验证安装
65 | # kubectl get pod -n kube-system |grep nvidia
66 | nvidia-device-plugin-daemonset-76gm6 1/1 Running 2 20d
67 |
68 | ```
69 | 4.验证node是否成功识别gpu资源
70 | ```
71 | kubectl describe node nodeName
72 | ```
73 |
74 |
75 | 多个pod共享一张GPU
76 | - 不行,pod在创建的时候请求gpu最低是卡级别,一张显卡只能分配给一个pod。但是一个pod是由多个容器组成的,所以同一个pod的容器可以共享分配给当前pod的所有GPU。
77 |
78 | 多个docker容器共享一张GPU
79 | - 可以。通过nvidia-docker启动的容器可以共享一张GPU。因为容器是进程级的程序所以分配GPU可以达到显存级。
80 |
81 |
82 |
--------------------------------------------------------------------------------
/Zabbix通过api监控k8s/get_k8s:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | /usr/bin/python3 /usr/local/zabbix/share/zabbix/externalscripts/k8s/get_k8s.py get_pod
3 | /usr/bin/python3 /usr/local/zabbix/share/zabbix/externalscripts/k8s/get_k8s.py get_health
4 | /usr/bin/python3 /usr/local/zabbix/share/zabbix/externalscripts/k8s/get_k8s.py get_node
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Zabbix通过api监控k8s/k8s监控精简版.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/Zabbix通过api监控k8s/k8s监控精简版.pdf
--------------------------------------------------------------------------------
/calico/allow-all-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: allow-all-ingress
5 | spec:
6 | podSelector: {}
7 | policyTypes:
8 | - Ingress
9 | ingress:
10 | - {}
11 |
--------------------------------------------------------------------------------
/calico/deny-all-egress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: deny-all-egress
5 | spec:
6 | podSelector: {}
7 | policyTypes: ["Egress"]
8 |
--------------------------------------------------------------------------------
/calico/deny-all-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: deny-all-ingress
5 | spec:
6 | podSelector: {}
7 | policyTypes:
8 | - Ingress
9 |
--------------------------------------------------------------------------------
/calico/egress-allow-some-destinations.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: allow-tomcat-egress
5 | spec:
6 | podSelector:
7 | matchLabels:
8 | app: tomcat
9 | policyTypes: ["Egress"]
10 | egress:
11 | - to:
12 | - podSelector:
13 | matchLabels:
14 | app: nginx
15 | ports:
16 | - protocol: TCP
17 | port: 80
18 | - to:
19 | - podSelector:
20 | matchLabels:
21 | app: mysql
22 | ports:
23 | - protocol: TCP
24 | port: 3306
25 |
--------------------------------------------------------------------------------
/calico/image/BGP1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/calico/image/BGP1.png
--------------------------------------------------------------------------------
/calico/image/calico BGP实现.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/calico/image/calico BGP实现.png
--------------------------------------------------------------------------------
/calico/image/calico2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/calico/image/calico2.png
--------------------------------------------------------------------------------
/calico/image/calico3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/calico/image/calico3.png
--------------------------------------------------------------------------------
/calico/ingress-allow-someclients.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: allow-myapp-ingress
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | run: myapp
10 | policyTypes: ["Ingress"]
11 | ingress:
12 | - from:
13 | - ipBlock:
14 | cidr: 10.244.0.0/16
15 | except:
16 | - 10.244.3.0/24
17 | ports:
18 | - protocol: TCP
19 | port: 80
20 |
--------------------------------------------------------------------------------
/calico/myapp-allow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: myapp-allow
5 | namespace: testing
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: myapp
10 | ingress:
11 | - from:
12 | - podSelector:
13 | matchLabels:
14 | app: nginx
15 | ports:
16 | - port: 80
17 | - from:
18 | - namespaceSelector:
19 | matchLabels:
20 | ns: kube-system
21 | egress:
22 | - to:
23 | - podSelector:
24 | matchLabels:
25 | app: nginx
26 | - to:
27 | - namespaceSelector:
28 | matchLabels:
29 | ns: kube-system
30 | policyTypes:
31 | - Ingress
32 | - Egress
33 |
--------------------------------------------------------------------------------
/calico/namespace-internal-traffic.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: namespace-deny-all
5 | namespace: default
6 | spec:
7 | policyTypes: ["Ingress","Egress"]
8 | podSelector: {}
9 | ---
10 | apiVersion: networking.k8s.io/v1
11 | kind: NetworkPolicy
12 | metadata:
13 | name: namespace-internal-traffic
14 | namespace: default
15 | spec:
16 | policyTypes: ["Ingress","Egress"]
17 | podSelector: {}
18 | ingress:
19 | - from:
20 | - namespaceSelector:
21 | matchExpressions:
22 | - key: name
23 | operator: In
24 | values: ["default","kube-system"]
25 | egress:
26 | - to:
27 | - namespaceSelector:
28 | matchExpressions:
29 | - key: name
30 | operator: In
31 | values: ["default","kube-system"]
32 |
--------------------------------------------------------------------------------
/calico/nginx-allow-all.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: nginx-allow-all
5 | namespace: testing
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: nginx
10 | ingress:
11 | - ports:
12 | - port: 80
13 | - from:
14 | - namespaceSelector:
15 | matchLabels:
16 | ns: kube-system
17 | egress:
18 | - to:
19 | policyTypes:
20 | - Ingress
21 | - Egress
22 |
--------------------------------------------------------------------------------
/calico/ns-and-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: testing
5 | labels:
6 | env: testing
7 | ---
8 | apiVersion: v1
9 | kind: Service
10 | metadata:
11 | name: nginx
12 | namespace: testing
13 | spec:
14 | selector:
15 | app: nginx
16 | ---
17 | apiVersion: v1
18 | kind: Pod
19 | metadata:
20 | name: nginx
21 | namespace: testing
22 | labels:
23 | app: nginx
24 | spec:
25 | containers:
26 | - name: nginx
27 | image: nginx:alpine
28 | ports:
29 | - name: nginx
30 | containerPort: 80
31 | ---
32 | apiVersion: v1
33 | kind: Service
34 | metadata:
35 | name: myapp
36 | namespace: testing
37 | spec:
38 | selector:
39 | app: myapp
40 | ---
41 | apiVersion: v1
42 | kind: Pod
43 | metadata:
44 | name: myapp
45 | namespace: testing
46 | labels:
47 | app: myapp
48 | spec:
49 | containers:
50 | - name: myapp
51 | image: ikubernetes/myapp:v1
52 | ports:
53 | - name: myapp
54 | containerPort: 80
55 |
--------------------------------------------------------------------------------
/calico/testing-netpol-denyall.yaml:
--------------------------------------------------------------------------------
1 |
2 | 11 lines (10 sloc) 171 Bytes
3 | apiVersion: networking.k8s.io/v1
4 | kind: NetworkPolicy
5 | metadata:
6 | name: deny-all-traffic
7 | namespace: testing
8 | spec:
9 | podSelector: {}
10 | policyTypes:
11 | - Ingress
12 | - Egress
13 |
--------------------------------------------------------------------------------
/calico/安装使用.md:
--------------------------------------------------------------------------------
1 | 官网
2 | https://docs.projectcalico.org
3 | 安装calico
4 | https://docs.projectcalico.org/v3.1/getting-started/kubernetes/
5 | 整合flannel安装calico
6 | https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/flannel
7 |
8 | 1、安装
9 | ```
10 | kubectl apply -f \
11 | https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
12 |
13 | kubectl apply -f \
14 | https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml
15 | ```
16 | 2、说明文档
17 | ``` # kubectl explain networkpolicy.spec ```
18 |
19 | 3、规则测试
20 |
21 | 1)创建两个测试名称空间
22 | ```
23 | # kubectl create namespace dev
24 | # kubectl create namespace prod
25 | ```
26 | 2)配置ingress规则
27 | ```
28 | # cat deny-all-ingress.yaml
29 | apiVersion: networking.k8s.io/v1
30 | kind: NetworkPolicy
31 | metadata:
32 | name: deny-all-ingress
33 | spec:
34 | podSelector: {}
35 | policyTypes:
36 | - Ingress
37 | ```
38 | 3)配置一个web测试pod
39 | ```
40 | # cat pod-a.yaml
41 | apiVersion: v1
42 | kind: Pod
43 | metadata:
44 | name: pod1
45 | spec:
46 | containers:
47 | - name: myapp
48 | image: ikubernetes/myapp:v1
49 | ```
50 | 4)应用规则到dev
51 | ``` # kubectl apply -f deny-all-ingress.yaml -n dev ```
52 | 5)运行pod到两个不同名称空间测试
53 | ```
54 | kubectl apply -f pod-a.yaml -n dev
55 | # curl 10.244.1.2
56 |
57 | # kubectl apply -f pod-a.yaml -n prod
58 | # curl 10.244.1.3
59 | Hello MyApp | Version: v1 | Pod Name
60 | ```
61 |
62 | 6)允许所有规则
63 | ```
64 | # cat allow-all-ingress.yaml
65 | apiVersion: networking.k8s.io/v1
66 | kind: NetworkPolicy
67 | metadata:
68 | name: allow-all-ingress
69 | spec:
70 | podSelector: {}
71 | policyTypes:
72 | - Ingress
73 | ingress:
74 | - {}
75 |
76 | # kubectl apply -f deny-all-ingress.yaml -n dev
77 | # curl 10.244.1.2
78 | Hello MyApp | Version: v1 | Pod Name
79 | # curl 10.244.1.3
80 | Hello MyApp | Version: v1 | Pod Name
81 | ```
82 | 7)允许一组pod可以访问
83 | ```
84 | # kubectl label pods pod1 app=myapp -n dev
85 | # cat allow-netpol-demo.yaml
86 | apiVersion: networking.k8s.io/v1
87 | kind: NetworkPolicy
88 | metadata:
89 | name: allow-myapp-ingress
90 | spec:
91 | podSelector:
92 | matchLabels:
93 | app: myapp
94 | ingress:
95 | - from:
96 | - ipBlock:
97 | cidr: 10.244.0.0/16
98 | except:
99 | - 10.244.1.2/32
100 | ports:
101 | - protocol: TCP
102 | port: 80
103 | - protocol: TCP
104 | port: 443
105 |
106 | # kubectl apply -f allow-netpol-demo.yaml -n dev
107 |
108 | # curl 10.244.1.2
109 | Hello MyApp | Version: v1 | Pod Name
110 | # curl 10.244.1.3
111 | Hello MyApp | Version: v1 | Pod Name
112 | ```
113 |
--------------------------------------------------------------------------------
/cert-manager.md:
--------------------------------------------------------------------------------
1 | 官网
2 | https://cert-manager.readthedocs.io/en/latest/getting-started/install/index.html
3 |
4 | 官方chart
5 | https://github.com/jetstack/cert-manager
6 |
7 | 安装
8 | 1、创建namespace
9 | ```
10 | # kubectl create namespace cert-manager
11 | ```
12 |
13 | 2、为cert-manager添加lable,让cert-manager忽略该namespace的有效性的检查
14 | ```
15 | # kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true
16 | ```
17 |
18 | 3、安装crd
19 | ```
20 | # kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.10/deploy/manifests/00-crds.yaml
21 | ```
22 |
23 | 4、安装cert-manager
24 | ```
25 | # helm install \
26 | --name cert-manager \
27 | --namespace cert-manager \
28 | --version v0.10.0 \
29 | jetstack/cert-manager
30 | ```
31 |
32 | 5、查看安装情况
33 | ```
34 | # kubectl get pods --namespace cert-manager
35 |
36 | NAME READY STATUS RESTARTS AGE
37 | cert-manager-5c6866597-zw7kh 1/1 Running 0 2m
38 | cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m
39 | cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m
40 | ```
41 |
42 | https://cloud.tencent.com/developer/article/1326543
43 |
--------------------------------------------------------------------------------
/deploy/CentOS7+docker+k8s(v1.15.3) 通过kubeadm部署.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/CentOS7+docker+k8s(v1.15.3) 通过kubeadm部署.pdf
--------------------------------------------------------------------------------
/deploy/image/README.md:
--------------------------------------------------------------------------------
1 | 图片
2 |
--------------------------------------------------------------------------------
/deploy/image/kube4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/image/kube4.png
--------------------------------------------------------------------------------
/deploy/image/kube5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/image/kube5.png
--------------------------------------------------------------------------------
/deploy/image/kube6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/image/kube6.png
--------------------------------------------------------------------------------
/deploy/image/kuber1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/image/kuber1.png
--------------------------------------------------------------------------------
/deploy/image/kuber2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/image/kuber2.png
--------------------------------------------------------------------------------
/deploy/image/kuber3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/deploy/image/kuber3.png
--------------------------------------------------------------------------------
/deploy/kubeadm 下载脚本:
--------------------------------------------------------------------------------
1 | images=(
2 | kube-apiserver:v1.12.1
3 | kube-controller-manager:v1.12.1
4 | kube-scheduler:v1.12.1
5 | kube-proxy:v1.12.1
6 | pause:3.1
7 | etcd:3.2.24
8 | coredns:1.2.2
9 | )
10 |
11 | for imageName in ${images[@]} ; do
12 | docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
13 | docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
14 | docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
15 | done
16 |
17 |
18 | 或者用 keveon 源
19 | 例子
20 | docker pull keveon/kube-apiserver:v1.12.1
21 | docker pull mirrorgooglecontainers/kube-apiserver-amd64:v1.13.2
22 |
23 | 此仓库定期克隆google镜像并重命名anjia0532/google-containers.
24 | docker pull anjia0532/google-containers.kube-apiserver-amd64:${K8S_VERSION}
25 |
26 | http://mirror.azure.cn/help/gcr-proxy-cache.html
27 |
--------------------------------------------------------------------------------
/deploy/kubeasz:
--------------------------------------------------------------------------------
1 | https://github.com/easzlab/kubeasz
2 |
--------------------------------------------------------------------------------
/deploy/readme.md:
--------------------------------------------------------------------------------
1 | 查看公钥证书
2 | ```
3 | #自建CA,生成ca.key与ca.crt
4 | openssl x509 -in ca.crt -noout -text
5 |
6 | #apiserver的私钥与公钥证书
7 | openssl x509 -in apiserver.crt -noout -text
8 | ```
9 |
10 |
11 |
12 | Weave Scope安装
13 | ---
14 | - Kubernetes 监控工具 Weave Scope
15 | 参考官方文档:https://www.weave.works/docs/scope/latest/installing/#k8s
16 |
17 |
18 | 1.安装Weave Scopea
19 | ```
20 | kubectl apply --namespace weave -f "https://cloud.weave.works/k8s/scope.yaml?k8s-version=$(kubectl version | base64 | tr -d '\n')"
21 | namespace/weave created
22 | serviceaccount/weave-scope created
23 | clusterrole.rbac.authorization.k8s.io/weave-scope created
24 | clusterrolebinding.rbac.authorization.k8s.io/weave-scope created
25 | deployment.apps/weave-scope-app created
26 | service/weave-scope-app created
27 | deployment.apps/weave-scope-cluster-agent created
28 | daemonset.apps/weave-scope-agent created
29 | ```
30 |
31 | 2.资源查看
32 | ```
33 | # kubectl get all -n weave
34 | NAME READY STATUS RESTARTS AGE
35 | pod/weave-scope-agent-hx4t2 1/1 Running 0 103s
36 | pod/weave-scope-agent-vmbqr 1/1 Running 0 103s
37 | pod/weave-scope-agent-zd8x7 1/1 Running 0 103s
38 | pod/weave-scope-app-b99fb9585-77rld 1/1 Running 0 104s
39 | pod/weave-scope-cluster-agent-58f5b5454-vnckm 1/1 Running 0 103s
40 |
41 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
42 | service/weave-scope-app ClusterIP 10.99.31.182 80/TCP 105s
43 |
44 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
45 | daemonset.apps/weave-scope-agent 3 3 3 3 0 104s
46 |
47 | NAME READY UP-TO-DATE AVAILABLE AGE
48 | deployment.apps/weave-scope-app 1/1 1 1 105s
49 | deployment.apps/weave-scope-cluster-agent 1/1 1 1 105s
50 |
51 | NAME DESIRED CURRENT READY AGE
52 | replicaset.apps/weave-scope-app-b99fb9585 1 1 1 105s
53 | replicaset.apps/weave-scope-cluster-agent-58f5b5454 1 1 1 105s
54 | ```
55 |
56 |
57 | 3.对外访问
58 | ```
59 | kubectl patch svc $(kubectl get svc -n weave |grep weave-scope-app |awk '{print $1}') -p '{"spec":{"type": "NodePort"}}' -n weave
60 | ```
61 |
62 | 4.登录url:http://172.27.9.131:30022/
63 |
--------------------------------------------------------------------------------
/deploy/纯手动部署二进制:
--------------------------------------------------------------------------------
1 |
2 | https://www.kancloud.cn/huyipow/kubernetes/531982
3 |
4 |
5 | https://www.qikqiak.com/post/manual-install-high-available-kubernetes-cluster/#2-%E5%88%9B%E5%BB%BAca-%E8%AF%81%E4%B9%A6%E5%92%8C%E5%AF%86%E9%92%A5-a-id-create-ca-a
6 |
--------------------------------------------------------------------------------
/helm/Helm部署文档.md:
--------------------------------------------------------------------------------
1 | # Helm部署文档
2 | **首先你需要保证部署helm的节点必须可以正常执行kubectl**
3 | ### 1. Helm客户端安装
4 | ##### 下载
5 | Helm是一个二进制文件,我们直接到github的release去下载就可以,地址如下:
6 | https://github.com/helm/helm/releases
7 | > 由于国内网络原因,无法科学上网的同学可以到我的网盘上下载,版本是2.13.1-linux-amd64。
8 | > 链接: https://pan.baidu.com/s/1bu-cpjVaSVGVXuWvWoqHEw
9 | > 提取码: 5wds
10 |
11 | ##### 安装
12 | ```bash
13 | # 解压
14 | $ tar -zxvf helm-v2.13.1-linux-amd64.tar.gz
15 | $ mv linux-amd64/helm /usr/local/bin/
16 |
17 | # 没配置环境变量的需要先配置好
18 | $ export PATH=$PATH:/usr/local/bin/
19 |
20 | # 验证
21 | $ helm version
22 | ```
23 |
24 | ### 2. Tiller安装
25 | Tiller 是以 Deployment 方式部署在 Kubernetes 集群中的,由于 Helm 默认会去 storage.googleapis.com 拉取镜像,我们这里就默认无法科学上网的情况:
26 | ```bash
27 | # 指向阿里云的仓库
28 | $ helm init --client-only --stable-repo-url https://aliacs-app-catalog.oss-cn-hangzhou.aliyuncs.com/charts/
29 | $ helm repo add incubator https://aliacs-app-catalog.oss-cn-hangzhou.aliyuncs.com/charts-incubator/
30 | $ helm repo update
31 |
32 | # 因为官方的镜像无法拉取,使用-i指定自己的镜像
33 | $ helm init --service-account tiller --upgrade -i registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.13.1 --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
34 |
35 | # 创建TLS认证服务端
36 | $ helm init --service-account tiller --upgrade -i registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.13.1 --tiller-tls-cert /etc/kubernetes/ssl/tiller001.pem --tiller-tls-key /etc/kubernetes/ssl/tiller001-key.pem --tls-ca-cert /etc/kubernetes/ssl/ca.pem --tiller-namespace kube-system --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
37 | ```
38 | ### 3. 给Tiller授权
39 | 因为 Helm 的服务端 Tiller 是一个部署在 Kubernetes 中的 Deployment,它会去访问ApiServer去对集群进行操作。目前的 Tiller 部署时默认没有定义授权的 ServiceAccount,这会导致访问 API Server 时被拒绝。所以我们需要明确为 Tiller 部署添加授权。
40 |
41 |
42 | ```bash
43 | # 创建serviceaccount
44 | $ kubectl create serviceaccount --namespace kube-system tiller
45 | # 创建角色绑定
46 | $ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
47 | ```
48 |
49 |
50 |
51 | ### 4. 验证
52 | ```bash
53 | # 查看Tiller的serviceaccount,需要跟我们创建的名字一致:tiller
54 | $ kubectl get deploy --namespace kube-system tiller-deploy -o yaml|grep serviceAccount
55 |
56 | # 验证pods
57 | $ kubectl -n kube-system get pods|grep tiller
58 |
59 | # 验证版本
60 | $ helm version
61 | ```
62 |
63 |
--------------------------------------------------------------------------------
/helm/helm安装和回滚.md:
--------------------------------------------------------------------------------
1 | helm 部署jenkins
2 | =============
3 | 1、部署jenkins名称空间
4 | ```
5 | # cat jenkins-ns.yaml
6 | apiVersion: v1
7 | kind: Namespace
8 | metadata:
9 | name: jenkins
10 |
11 | ---
12 |
13 | apiVersion: v1
14 | kind: Namespace
15 | metadata:
16 | name: build
17 |
18 | ---
19 |
20 | apiVersion: v1
21 | kind: ServiceAccount
22 | metadata:
23 | name: jenkins
24 | namespace: jenkins
25 |
26 | ---
27 |
28 | kind: Role
29 | apiVersion: rbac.authorization.k8s.io/v1beta1
30 | metadata:
31 | name: jenkins
32 | namespace: build
33 | rules:
34 | - apiGroups: [""]
35 | resources: ["pods", "pods/exec", "pods/log"]
36 | verbs: ["*"]
37 | - apiGroups: [""]
38 | resources: ["secrets"]
39 | verbs: ["get"]
40 |
41 | ---
42 |
43 | apiVersion: rbac.authorization.k8s.io/v1beta1
44 | kind: RoleBinding
45 | metadata:
46 | name: jenkins
47 | namespace: build
48 | roleRef:
49 | apiGroup: rbac.authorization.k8s.io
50 | kind: Role
51 | name: jenkins
52 | subjects:
53 | - kind: ServiceAccount
54 | name: jenkins
55 | namespace: jenkins
56 |
57 |
58 |
59 | # kubectl apply -f jenkins-ns.yaml
60 | ```
61 |
62 | 2、搜索jenkins
63 | ```
64 | # helm search jenkins
65 | NAME CHART VERSION APP VERSION DESCRIPTION
66 | stable/jenkins 0.35.2 lts Open source continuous integration server. It supports mu...
67 | ```
68 |
69 | 3、部署文档版jenkins
70 | ``` helm install stable/jenkins --name jenkins --namespace jenkins ```
71 |
72 | 4、web访问
73 | http://192.168.101.66:NodePort/jenkins
74 |
75 | 5、获取密码
76 | ``` kubectl -n jenkins get secret jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode; echo ```
77 |
78 | 6、查看jenkins详细信息
79 | ``` helm inspect stable/jenkins ```
80 |
81 | 7、查看状态
82 | ``` helm status jenkins ```
83 |
84 | 8、删除jenkins
85 | 1)不删除chart文件
86 | ``` helm delete jenkins ```
87 | 查看状态会显示jenkins
88 | ``` helm status jenkins ```
89 | 2)同时删除chart文件
90 | ``` helm delete jenkins --purge ```
91 | 查看状态不会显示jenkins
92 | ``` helm status jenkins ```
93 |
94 | Helm 定制安装和回滚
95 | ============
96 | 1、搜索jenkins
97 | ``` helm search jenkins ```
98 |
99 | 2、查看 values 文件
100 | ``` helm inspect stable/jenkins ```
101 |
102 | 3、查看全部信息
103 | ``` helm inspect stable/jenkins ```
104 |
105 | 4、根据查看信息的描述定制部署jenkins修改镜像标签
106 | ``` helm install stable/jenkins --name jenkins --namespace jenkins --set Master.ImageTag=2.112-alpine ```
107 |
108 | 5、web访问
109 | http://192.168.101.66:NodePort
110 |
111 | 6、更新新版本jenkins
112 | ``` helm upgrade jenkins stable/jenkins --set Master.ImageTag=2.116-alpine --reuse-values ```
113 |
114 | 7、查看jenkins更新版本
115 | ``` helm list ```
116 |
117 | 8、回滚上一个版本
118 | ``` helm rollback jenkins 0 ```
119 |
120 | 9、查看当前版本
121 | ``` helm list ```
122 |
--------------------------------------------------------------------------------
/helm/helm模板内置函数.md:
--------------------------------------------------------------------------------
1 | 1、定义chart
2 | ```
3 | # helm create mychart
4 | Creating mychart
5 | # tree mychart/
6 | mychart/
7 | ├── charts
8 | ├── Chart.yaml
9 | ├── templates
10 | │ ├── deployment.yaml
11 | │ ├── _helpers.tpl
12 | │ ├── ingress.yaml
13 | │ ├── NOTES.txt
14 | │ └── service.yaml
15 | └── values.yaml
16 | 2 directories, 7 files
17 | ```
18 | - NOTES.txt:chart 的 “帮助⽂本”。这会在⽤户运⾏ helm install 时显示给⽤户。
19 | - deployment.yaml:创建 Kubernetes deployment 的基本 manifest
20 | - service.yaml:为 deployment 创建 service 的基本 manifest
21 | - ingress.yaml: 创建 ingress 对象的资源清单⽂件
22 | - _helpers.tpl:放置模板助⼿的地⽅,可以在整个 chart 中重复使⽤
23 |
24 |
25 | 2、内置对象
26 | - Release:这个对象描述了 release 本身。它⾥⾯有⼏个对象:
27 | - Release.Name:release 名称
28 | - Release.Time:release 的时间
29 | - Release.Namespace:release 的 namespace(如果清单未覆盖)
30 | - Release.Service:release 服务的名称(始终是 Tiller)。
31 | - Release.Revision:此 release 的修订版本号,从1开始累加。
32 | - Release.IsUpgrade:如果当前操作是升级或回滚,则将其设置为 true。
33 | - Release.IsInstall:如果当前操作是安装,则设置为 true。
34 | - Values:从 values.yaml ⽂件和⽤户提供的⽂件传⼊模板的值。默认情况下,Values 是空的。
35 | - Chart: Chart.yaml ⽂件的内容。所有的 Chart 对象都将从该⽂件中获取。chart 指南中Charts Guide列出了可⽤字段,可以前往查看。
36 | - Files:这提供对 chart 中所有⾮特殊⽂件的访问。虽然⽆法使⽤它来访问模板,但可以使⽤它来访问 chart 中的其他⽂件。请参阅 "访问⽂件" 部分。
37 | - Files.Get 是⼀个按名称获取⽂件的函数(.Files.Get config.ini)
38 | - Files.GetBytes 是将⽂件内容作为字节数组⽽不是字符串获取的函数。这对于像图⽚这样的东⻄很有⽤。
39 | - Capabilities:这提供了关于 Kubernetes 集群⽀持的功能的信息。
40 | - Capabilities.APIVersions 是⼀组版本信息。
41 | - Capabilities.APIVersions.Has $version 指示是否在群集上启⽤版本(batch/v1)。
42 | - Capabilities.KubeVersion 提供了查找 Kubernetes 版本的⽅法。它具有以下值:Major,Minor,GitVersion,GitCommit,GitTreeState,BuildDate,GoVersion,Compiler,和Platform。
43 | - Capabilities.TillerVersion 提供了查找 Tiller 版本的⽅法。它具有以下值:SemVer,GitCommit,和 GitTreeState。
44 | - Template:包含有关正在执⾏的当前模板的信息
45 | - Name:到当前模板的⽂件路径(例如 mychart/templates/mytemplate.yaml)
46 | - BasePath:当前 chart 模板⽬录的路径(例如 mychart/templates)。
47 |
--------------------------------------------------------------------------------
/helm/jenkins/自定义values部署jenkins/README.md:
--------------------------------------------------------------------------------
1 | 1、设置web访问地址
2 | ``` # JENKINS_ADDR=www.jenkins.com ```
3 |
4 | 2、部署
5 | ``` # helm install stable/jenkins --name jenkins --namespace jenkins --values helm/jenkins-values.yml --set Master.HostName=$JENKINS_ADDR ```
6 |
7 | 3、web访问
8 | http://192.168.101.66/jenkins
9 |
10 | 4、获取密码
11 | ```
12 | # JENKINS_PASS=$(kubectl -n jenkins get secret jenkins \
13 | -o jsonpath="{.data.jenkins-admin-password}" \
14 | | base64 --decode; echo)
15 |
16 | # echo $JENKINS_PASS
17 | ```
18 |
--------------------------------------------------------------------------------
/helm/jenkins/自定义values部署jenkins/jenkins-values.yaml:
--------------------------------------------------------------------------------
1 | Master:
2 | ImageTag: "2.138.2"
3 | Cpu: "500m"
4 | Memory: "500Mi"
5 | ServiceType: ClusterIP
6 | ServiceAnnotations:
7 | service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
8 | InstallPlugins:
9 | - durable-task:1.26
10 | - blueocean:1.9.0
11 | - credentials:2.1.18
12 | - git:3.9.1
13 | - git-client:2.7.3
14 | - kubernetes:1.12.4
15 | - pipeline-utility-steps:2.1.0
16 | - ssh-slaves:1.28.1
17 | - ssh-agent:1.17
18 | - jdk-tool:1.1
19 | Ingress:
20 | Annotations:
21 | kubernetes.io/ingress.class: "nginx"
22 | nginx.ingress.kubernetes.io/ssl-redirect: "false"
23 | nginx.ingress.kubernetes.io/proxy-body-size: 50m
24 | nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
25 | ingress.kubernetes.io/ssl-redirect: "false"
26 | ingress.kubernetes.io/proxy-body-size: 50m
27 | ingress.kubernetes.io/proxy-request-buffering: "off"
28 | HostName: jenkins.aishangwei.net
29 | rbac:
30 | install: true
31 | roleBindingKind: RoleBinding
32 |
--------------------------------------------------------------------------------
/helm/jenkins/自定义values部署jenkins/my-k8s-job-yaml.groovy:
--------------------------------------------------------------------------------
1 | podTemplate(label: "kubernetes", yaml: """
2 | apiVersion: v1
3 | kind: Pod
4 | spec:
5 | containers:
6 | - name: kubectl
7 | image: aishangwei/kubectl
8 | command: ["sleep"]
9 | args: ["100000"]
10 | - name: oc
11 | image: aishangwei/openshift-client
12 | command: ["sleep"]
13 | args: ["100000"]
14 | - name: golang
15 | image: golang:1.9
16 | command: ["sleep"]
17 | args: ["100000"]
18 | - name: helm
19 | image: aishangwei/helm:2.8.2
20 | command: ["sleep"]
21 | args: ["100000"]
22 | """
23 | ) {
24 | node("kubernetes") {
25 | container("kubectl") {
26 | stage("kubectl") {
27 | sh "kubectl version"
28 | }
29 | }
30 | container("oc") {
31 | stage("oc") {
32 | sh "oc version"
33 | }
34 | }
35 | container("golang") {
36 | stage("golang") {
37 | sh "go version"
38 | }
39 | }
40 | container("helm") {
41 | stage("helm") {
42 | sh "helm version"
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/helm/jenkins/自定义values部署jenkins/垮名称空间/build-ns.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: go-demo-3-build
5 |
6 | ---
7 |
8 | apiVersion: v1
9 | kind: ServiceAccount
10 | metadata:
11 | name: build
12 | namespace: go-demo-3-build
13 |
14 | ---
15 |
16 | apiVersion: rbac.authorization.k8s.io/v1beta1
17 | kind: RoleBinding
18 | metadata:
19 | name: build
20 | namespace: go-demo-3-build
21 | roleRef:
22 | apiGroup: rbac.authorization.k8s.io
23 | kind: ClusterRole
24 | name: admin
25 | subjects:
26 | - kind: ServiceAccount
27 | name: build
28 |
29 |
30 |
--------------------------------------------------------------------------------
/helm/jenkins/自定义values部署jenkins/垮名称空间/my-k8s-job-ns.groovy:
--------------------------------------------------------------------------------
1 | podTemplate(
2 | label: "kubernetes",
3 | namespace: "go-demo-3-build",
4 | serviceAccount: "build",
5 | yaml: """
6 | apiVersion: v1
7 | kind: Pod
8 | spec:
9 | containers:
10 | - name: kubectl
11 | image: aishangwei/kubectl
12 | command: ["sleep"]
13 | args: ["100000"]
14 | - name: oc
15 | image: aishangwei/openshift-client
16 | command: ["sleep"]
17 | args: ["100000"]
18 | - name: golang
19 | image: golang:1.9
20 | command: ["sleep"]
21 | args: ["100000"]
22 | - name: helm
23 | image: aishangwei/helm:2.8.2
24 | command: ["sleep"]
25 | args: ["100000"]
26 | """
27 | ) {
28 | node("kubernetes") {
29 | container("kubectl") {
30 | stage("kubectl") {
31 | sh "kubectl version"
32 | }
33 | }
34 | container("oc") {
35 | stage("oc") {
36 | sh "oc version"
37 | }
38 | }
39 | container("golang") {
40 | stage("golang") {
41 | sh "go version"
42 | }
43 | }
44 | container("helm") {
45 | stage("helm") {
46 | sh "helm version --tiller-namespace go-demo-3-build"
47 | }
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/helm/jenkins/自定义values部署jenkins/垮名称空间/prod-ns.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: go-demo-3
5 |
6 | ---
7 |
8 | apiVersion: rbac.authorization.k8s.io/v1beta1
9 | kind: RoleBinding
10 | metadata:
11 | name: build
12 | namespace: go-demo-3
13 | roleRef:
14 | apiGroup: rbac.authorization.k8s.io
15 | kind: ClusterRole
16 | name: admin
17 | subjects:
18 | - kind: ServiceAccount
19 | name: build
20 | namespace: go-demo-3-build
21 |
22 | ---
23 |
24 | apiVersion: v1
25 | kind: LimitRange
26 | metadata:
27 | name: build
28 | namespace: go-demo-3
29 | spec:
30 | limits:
31 | - default:
32 | memory: 200Mi
33 | cpu: 0.2
34 | defaultRequest:
35 | memory: 100Mi
36 | cpu: 0.1
37 | max:
38 | memory: 500Mi
39 | cpu: 0.5
40 | min:
41 | memory: 10Mi
42 | cpu: 0.05
43 | type: Container
44 |
45 | ---
46 |
47 | apiVersion: v1
48 | kind: ResourceQuota
49 | metadata:
50 | name: build
51 | namespace: go-demo-3
52 | spec:
53 | hard:
54 | requests.cpu: 2
55 | requests.memory: 3Gi
56 | limits.cpu: 3
57 | limits.memory: 4Gi
58 | pods: 15
59 |
--------------------------------------------------------------------------------
/helm/自定义chart仓库.md:
--------------------------------------------------------------------------------
1 | 安装 ChartMuseum仓库
2 | ================
3 | chart仓库,nginx,http等都可以作为chart仓库
4 | https://jimmysong.io/posts/manage-kubernetes-native-app-with-helm/
5 | 1、安装ChartMuseum
6 | 链接:https://pan.baidu.com/s/15kczilfAGHGFeTOWNw5Pww 提取码:alp8
7 | ```
8 | # wget https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum
9 | # chmod +x chartmuseum
10 | # mv chartmuseum /usr/local/bin/
11 | ```
12 |
13 | 2、启动chartmuseum服务
14 | ``` # chartmuseum --debug --port=8089 --storage="local" --storage-local-rootdir="./chartstorage" --basic-auth-user admin --basic-auth-pass admin123 ```
15 |
16 | 3、检查健康状态
17 | ``` # curl http://192.168.101.67:8089/health ```
18 |
19 | 4、访问ChartMuseum仓库
20 | ``` # curl -u admin:admin123 http://192.168.101.67:8089/index.yaml ```
21 |
22 | 5、后台运行
23 | ``` # nohup chartmuseum --debug --port=8089 --storage="local" --storage-local-rootdir="./chartstorage" --basic-auth-user admin --basic-auth-pass admin123 > ./chartmuseum.out 2>&1 & ```
24 |
25 |
26 |
27 | 添加自定义chart仓库
28 | ===============
29 |
30 | 1、添加自定义仓库
31 | ``` # helm repo add chartmuseum http://192.168.101.67:8089 --username admin --password admin123 ```
32 |
33 | 2、安装上传自定义仓库插件
34 | ``` # helm plugin install https://github.com/chartmuseum/helm-push ```
35 |
36 | 3、将本地文件上传到chartmuseum仓库
37 | ``` # helm push jenkins chartmuseum --username admin --password admin123 ```
38 | 注:jenkins是目录
39 |
40 | 4、查看上传文件信息
41 | ``` # curl http://192.168.101.67:8089/index.yaml -u admin:admin123 ```
42 |
43 | 5、搜索chartmuseum仓库内容,发现是空的,这时候需要更新仓库
44 | ```# helm search chartmuseum/ ```
45 |
46 | 6、更新仓库
47 | ``` # helm repo update ```
48 |
49 | 7、再次搜索
50 | ``` # helm search chartmuseum/ ```
51 |
52 | 8、查看详细信息
53 | ``` # helm inspect chartmuseum/jenkins ```
54 |
55 | 9、删除文件
56 | ``` # curl -XDELETE "http://192.168.101.67:8089/api/charts/jenkins/0.1.0" -u admin:admin123 ```
57 |
--------------------------------------------------------------------------------
/host文件的域名解析.md:
--------------------------------------------------------------------------------
1 | 1、配置镜像中的 Hosts 文件
2 | ```
3 | $ vim centos-deployment.yaml
4 | kind: Deployment
5 | apiVersion: apps/v1
6 | metadata:
7 | name: centos7
8 | labels:
9 | app: centos7
10 | spec:
11 | replicas: 1
12 | selector:
13 | matchLabels:
14 | app: centos7
15 | template:
16 | metadata:
17 | labels:
18 | app: centos7
19 | spec:
20 | #-------------------------------------------
21 | hostAliases: #配置hosts文件
22 | - ip: "192.168.2.1" #配置解析的IP
23 | hostnames:
24 | - "www.baidu.com" #配置域名
25 | #-------------------------------------------
26 | containers:
27 | - name: service-provider
28 | image: centos:7.7.1908
29 | command:
30 | - "/bin/sh"
31 | args:
32 | - "-c"
33 | - "while true; do sleep 999999; done"
34 | ```
35 |
36 | 2、部署
37 | ```
38 | $ kubectl apply -f centos-deployment.yaml
39 | ```
40 |
41 | 3、查找部署的 CentOS 的 Pod
42 | ```
43 | $ kubectl get pod | grep centos7
44 | centos7-585dd57b95-qsx2c 1/1 Running 0 5m30s
45 | ```
46 |
47 | 4、进入 Pod 内部
48 | ```
49 | $ kubectl exec -it centos7-585dd57b95-qsx2c -n mydlqcloud /bin/bash
50 | ```
51 |
52 | 5、查看镜像中的 Hosts 文件
53 | ```
54 | $ cat /etc/hosts
55 | # Kubernetes-managed hosts file.
56 | 127.0.0.1 localhost
57 | ::1 localhost ip6-localhost ip6-loopback
58 | fe00::0 ip6-localnet
59 | fe00::0 ip6-mcastprefix
60 | fe00::1 ip6-allnodes
61 | fe00::2 ip6-allrouters
62 | 10.244.39.240 centos7-585dd57b95-qsx2c
63 | # Entries added by HostAliases.
64 | 192.168.2.1 www.baidu.com
65 | ```
66 |
67 | 6、测试
68 | ```
69 | $ ping www.baidu.com
70 | PING www.baidu.com (192.168.2.1) 56(84) bytes of data.
71 | 64 bytes from www.baidu.com (192.168.2.1): icmp_seq=1 ttl=127 time=0.248 ms
72 | 64 bytes from www.baidu.com (192.168.2.1): icmp_seq=2 ttl=127 time=0.274 ms
73 | 64 bytes from www.baidu.com (192.168.2.1): icmp_seq=3 ttl=127 time=0.294 m
74 | ```
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/ingress-nginx/ingress-myapp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-myapp
5 | namespace: default
6 | annotations:
7 | kubernetes.io/ingress.class: "nginx"
8 | spec:
9 | rules:
10 | - host: www.myapp.com
11 | http:
12 | paths:
13 | - path:
14 | backend:
15 | serviceName: myapp
16 | servicePort: 80
17 |
--------------------------------------------------------------------------------
/ingress-nginx/myapp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: myapp
5 | namespace: default
6 | spec:
7 | selector:
8 | app: myapp
9 | release: canary
10 | ports:
11 | - name: http
12 | targetPort: 80
13 | port: 80
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: myapp-deploy
19 | namespace: default
20 | spec:
21 | replicas: 3
22 | selector:
23 | matchLabels:
24 | app: myapp
25 | release: canary
26 | template:
27 | metadata:
28 | labels:
29 | app: myapp
30 | release: canary
31 | spec:
32 | containers:
33 | - name: myapp
34 | image: ikubernetes/myapp:v2
35 | ports:
36 | - name: http
37 | containerPort: 80
38 |
--------------------------------------------------------------------------------
/ingress-nginx/service-nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ingress-nginx
5 | namespace: ingress-nginx
6 | labels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/part-of: ingress-nginx
9 | spec:
10 | type: NodePort
11 | ports:
12 | - name: http
13 | port: 80
14 | targetPort: 80
15 | protocol: TCP
16 | nodePort: 30080
17 | - name: https
18 | port: 443
19 | targetPort: 443
20 | protocol: TCP
21 | nodePort: 30443
22 | selector:
23 | app.kubernetes.io/name: ingress-nginx
24 | app.kubernetes.io/part-of: ingress-nginx
25 |
26 | ---
27 |
--------------------------------------------------------------------------------
/ingress-nginx/tomcat/README.md:
--------------------------------------------------------------------------------
1 | 1、创建https证书和secret
2 | ``` openssl genrsa -out tls.key 2048 ```
3 | ``` openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=Beijing/L=Beijing/O=devOps/CN=wwww.tomcat.com ```
4 | ``` kubectl create secret tls tomcat-ingress-secret --cert=tls.crt --key=tls.key ```
5 | 2、部署tomcat服务
6 | tomcat.yaml
7 | 3、部署非https的tomcat前后端
8 | ingress-tomcat.yaml
9 | 4、部署https的前后端
10 | ingress-tomcat-tls.yaml
11 |
--------------------------------------------------------------------------------
/ingress-nginx/tomcat/ingress-tomcat-tls.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-tomcat-tls
5 | namespace: default
6 | annotations:
7 | kubernetes.io/ingress.class: "nginx"
8 | spec:
9 | tls:
10 | - hosts:
11 | - www.tomcat.com
12 | secretName: tomcat-ingress-secret
13 | rules:
14 | - host: wwww.tomcat.com
15 | http:
16 | paths:
17 | - path:
18 | backend:
19 | serviceName: tomcat
20 | servicePort: 8080
21 |
--------------------------------------------------------------------------------
/ingress-nginx/tomcat/ingress-tomcat.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-tomcat
5 | namespace: default
6 | annotations:
7 | kubernetes.io/ingress.class: "nginx"
8 | spec:
9 | rules:
10 | - host: www.tomcat.com
11 | http:
12 | paths:
13 | - path:
14 | backend:
15 | serviceName: tomcat
16 | servicePort: 8080
17 |
--------------------------------------------------------------------------------
/ingress-nginx/tomcat/tomcat.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: tomcat
5 | namespace: default
6 | spec:
7 | selector:
8 | app: tomcat
9 | release: canary
10 | ports:
11 | - name: http
12 | targetPort: 8080
13 | port: 8080
14 | - name: ajp
15 | targetPort: 8009
16 | port: 8009
17 | ---
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | name: tomcat-deploy
22 | namespace: default
23 | spec:
24 | replicas: 3
25 | selector:
26 | matchLabels:
27 | app: tomcat
28 | release: canary
29 | template:
30 | metadata:
31 | labels:
32 | app: tomcat
33 | release: canary
34 | spec:
35 | containers:
36 | - name: tomcat
37 | image: tomcat:8.5.32-jre8-alpine
38 | ports:
39 | - name: http
40 | containerPort: 8080
41 | - name: ajp
42 | containerPort: 8009
43 |
--------------------------------------------------------------------------------
/ingress/canary/ingress-common.yaml:
--------------------------------------------------------------------------------
1 | #ingress
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: web-canary-a
6 | namespace: canary
7 | spec:
8 | rules:
9 | - host: canary.mooc.com
10 | http:
11 | paths:
12 | - path: /
13 | backend:
14 | serviceName: web-canary-a
15 | servicePort: 80
16 |
--------------------------------------------------------------------------------
/ingress/canary/ingress-compose.yaml:
--------------------------------------------------------------------------------
1 | #ingress
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: web-canary-b
6 | namespace: canary
7 | annotations:
8 | nginx.ingress.kubernetes.io/canary: "true"
9 | nginx.ingress.kubernetes.io/canary-by-header: "web-canary"
10 | nginx.ingress.kubernetes.io/canary-by-cookie: "web-canary"
11 | nginx.ingress.kubernetes.io/canary-weight: "90"
12 | spec:
13 | rules:
14 | - host: canary.mooc.com
15 | http:
16 | paths:
17 | - path: /
18 | backend:
19 | serviceName: web-canary-b
20 | servicePort: 80
21 |
--------------------------------------------------------------------------------
/ingress/canary/ingress-cookie.yaml:
--------------------------------------------------------------------------------
1 | #ingress
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: web-canary-b
6 | namespace: canary
7 | annotations:
8 | nginx.ingress.kubernetes.io/canary: "true"
9 | nginx.ingress.kubernetes.io/canary-by-cookie: "web-canary"
10 | spec:
11 | rules:
12 | - host: canary.mooc.com
13 | http:
14 | paths:
15 | - path: /
16 | backend:
17 | serviceName: web-canary-b
18 | servicePort: 80
19 |
--------------------------------------------------------------------------------
/ingress/canary/ingress-header.yaml:
--------------------------------------------------------------------------------
1 | #ingress
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: web-canary-b
6 | namespace: canary
7 | annotations:
8 | nginx.ingress.kubernetes.io/canary: "true"
9 | nginx.ingress.kubernetes.io/canary-by-header: "web-canary"
10 | spec:
11 | rules:
12 | - host: canary.mooc.com
13 | http:
14 | paths:
15 | - path: /
16 | backend:
17 | serviceName: web-canary-b
18 | servicePort: 80
19 |
--------------------------------------------------------------------------------
/ingress/canary/ingress-weight.yaml:
--------------------------------------------------------------------------------
1 | #ingress
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: web-canary-b
6 | namespace: canary
7 | annotations:
8 | nginx.ingress.kubernetes.io/canary: "true"
9 | nginx.ingress.kubernetes.io/canary-weight: "90"
10 | spec:
11 | rules:
12 | - host: canary.mooc.com
13 | http:
14 | paths:
15 | - path: /
16 | backend:
17 | serviceName: web-canary-b
18 | servicePort: 80
19 |
--------------------------------------------------------------------------------
/ingress/canary/web-canary-a.yaml:
--------------------------------------------------------------------------------
1 | #deploy
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: web-canary-a
6 | namespace: canary
7 | spec:
8 | strategy:
9 | rollingUpdate:
10 | maxSurge: 25%
11 | maxUnavailable: 25%
12 | type: RollingUpdate
13 | selector:
14 | matchLabels:
15 | app: web-canary-a
16 | replicas: 1
17 | template:
18 | metadata:
19 | labels:
20 | app: web-canary-a
21 | spec:
22 | containers:
23 | - name: web-canary-a
24 | image: hub.mooc.com/kubernetes/web:v1
25 | ports:
26 | - containerPort: 8080
27 | livenessProbe:
28 | tcpSocket:
29 | port: 8080
30 | initialDelaySeconds: 20
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | successThreshold: 1
34 | timeoutSeconds: 5
35 | readinessProbe:
36 | httpGet:
37 | path: /hello?name=test
38 | port: 8080
39 | scheme: HTTP
40 | initialDelaySeconds: 20
41 | periodSeconds: 10
42 | failureThreshold: 1
43 | successThreshold: 1
44 | timeoutSeconds: 5
45 | ---
46 | #service
47 | apiVersion: v1
48 | kind: Service
49 | metadata:
50 | name: web-canary-a
51 | namespace: canary
52 | spec:
53 | ports:
54 | - port: 80
55 | protocol: TCP
56 | targetPort: 8080
57 | selector:
58 | app: web-canary-a
59 | type: ClusterIP
60 |
61 |
--------------------------------------------------------------------------------
/ingress/canary/web-canary-b.yaml:
--------------------------------------------------------------------------------
1 | #deploy
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: web-canary-b
6 | namespace: canary
7 | spec:
8 | strategy:
9 | rollingUpdate:
10 | maxSurge: 25%
11 | maxUnavailable: 25%
12 | type: RollingUpdate
13 | selector:
14 | matchLabels:
15 | app: web-canary-b
16 | replicas: 1
17 | template:
18 | metadata:
19 | labels:
20 | app: web-canary-b
21 | spec:
22 | containers:
23 | - name: web-canary-b
24 | image: hub.mooc.com/kubernetes/springboot-web:v1
25 | ports:
26 | - containerPort: 8080
27 | livenessProbe:
28 | tcpSocket:
29 | port: 8080
30 | initialDelaySeconds: 20
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | successThreshold: 1
34 | timeoutSeconds: 5
35 | readinessProbe:
36 | httpGet:
37 | path: /hello?name=test
38 | port: 8080
39 | scheme: HTTP
40 | initialDelaySeconds: 20
41 | periodSeconds: 10
42 | failureThreshold: 1
43 | successThreshold: 1
44 | timeoutSeconds: 5
45 | ---
46 | #service
47 | apiVersion: v1
48 | kind: Service
49 | metadata:
50 | name: web-canary-b
51 | namespace: canary
52 | spec:
53 | ports:
54 | - port: 80
55 | protocol: TCP
56 | targetPort: 8080
57 | selector:
58 | app: web-canary-b
59 | type: ClusterIP
60 |
61 |
--------------------------------------------------------------------------------
/ingress/custom-header-global.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | data:
4 | proxy-set-headers: "ingress-nginx/custom-headers"
5 | metadata:
6 | name: nginx-configuration
7 | namespace: ingress-nginx
8 | labels:
9 | app.kubernetes.io/name: ingress-nginx
10 | app.kubernetes.io/part-of: ingress-nginx
11 | ---
12 | apiVersion: v1
13 | kind: ConfigMap
14 | data:
15 | X-Different-Name: "true"
16 | X-Request-Start: t=${msec}
17 | X-Using-Nginx-Controller: "true"
18 | metadata:
19 | name: custom-headers
20 | namespace: ingress-nginx
21 |
--------------------------------------------------------------------------------
/ingress/custom-header-spec-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | annotations:
5 | nginx.ingress.kubernetes.io/configuration-snippet: |
6 | more_set_headers "Request-Id: $req_id";
7 | name: web-demo
8 | namespace: dev
9 | spec:
10 | rules:
11 | - host: web-dev.mooc.com
12 | http:
13 | paths:
14 | - backend:
15 | serviceName: web-demo
16 | servicePort: 80
17 | path: /
18 |
--------------------------------------------------------------------------------
/ingress/ingress-session.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | annotations:
5 | nginx.ingress.kubernetes.io/affinity: cookie
6 | nginx.ingress.kubernetes.io/session-cookie-hash: sha1
7 | nginx.ingress.kubernetes.io/session-cookie-name: route
8 | name: web-demo
9 | namespace: dev
10 | spec:
11 | rules:
12 | - host: web-dev.mooc.com
13 | http:
14 | paths:
15 | - backend:
16 | serviceName: web-demo
17 | servicePort: 80
18 | path: /
19 |
--------------------------------------------------------------------------------
/ingress/nginx-config.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: nginx-configuration
5 | namespace: ingress-nginx
6 | labels:
7 | app: ingress-nginx
8 | data:
9 | proxy-body-size: "64m"
10 | proxy-read-timeout: "180"
11 | proxy-send-timeout: "180"
12 |
--------------------------------------------------------------------------------
/ingress/tcp-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: tcp-services
5 | namespace: ingress-nginx
6 | data:
7 | "30000": dev/web-demo:80
8 |
--------------------------------------------------------------------------------
/ingress/tls/gen-secret.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout mooc.key -out mooc.crt -subj "/CN=*.mooc.com/O=*.mooc.com"
4 |
5 | kubectl create secret tls mooc-tls --key mooc.key --cert mooc.crt
6 |
--------------------------------------------------------------------------------
/ingress/tls/web-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: web-demo
5 | namespace: dev
6 | spec:
7 | rules:
8 | - host: web-dev.mooc.com
9 | http:
10 | paths:
11 | - backend:
12 | serviceName: web-demo
13 | servicePort: 80
14 | path: /
15 | tls:
16 | - hosts:
17 | - web-dev.mooc.com
18 | secretName: mooc-tls
19 |
--------------------------------------------------------------------------------
/istio/Istio介绍/1. istio是什么.md:
--------------------------------------------------------------------------------
1 | #### 1. 为什么使用Istio
2 | 通过负载平衡、service-to-service身份验证、监视等方法,Istio可以轻松地创建部署的服务网络,而服务代码中的代码更改很少或没有更改。您可以通过在整个环境中部署一个特殊的sidecar代理来为服务添加Istio支持,该代理可以拦截微服务之间的所有网络通信,然后使用其控制平面功能来配置和管理Istio,其中包括:
3 |
4 | - HTTP、gRPC、WebSocket和TCP流量的自动负载平衡。
5 | - 使用丰富的路由规则、重试、故障转移和故障注入对流量行为进行细粒度控制。
6 | - 支持访问控制、速率限制和配额的可插拔策略层和配置API。
7 | - 集群内所有流量的自动度量、日志和跟踪,包括集群入口和出口。
8 | - 在具有强大的基于身份的身份验证和授权的集群中实现安全的服务到服务通信。
9 |
10 | #### 2. Istio的核心功能
11 |
12 | ##### 2.1 流量管理
13 | Istio的简单规则配置和流量路由允许您控制服务之间的流量和API调用流。Istio简化了服务级属性(如断路器、超时和重试)的配置,并且简化了设置重要任务(如a /B测试、金丝雀测试和按百分比划分的分阶段测试)的工作。
14 |
15 | 有了更好的流量可视性和开箱即用故障恢复功能,您可以在问题产生之前捕获问题,使调用更可靠,网络更健壮。
16 |
17 | ##### 2.2 安全
18 | Istio的安全功能使开发人员可以专注于应用程序级别的安全。Istio提供了底层的安全通信通道,并按比例管理服务通信的身份验证、授权和加密。通过Istio,服务通信在缺省情况下是安全的,允许您在不同的协议和运行时之间一致地实施策略——所有这些都只需要很少或没有应用程序更改。
19 |
20 | 虽然Istio是平台独立的,与Kubernetes(或基础设施)网络策略一起使用,但是它的好处更大,包括能够在网络和应用层上保护点到点或服务到服务的通信。
21 |
22 | ##### 2.3 观察
23 | Istio的健壮跟踪、监视和日志功能使您能够深入了解服务网格部署。通过Istio的监视功能,可以真正理解服务性能如何影响上游和下游的事情,而它的自定义仪表板提供了对所有服务的性能的可见性,并让您看到该性能如何影响其他流程。
24 |
25 | ##### 2.4 平台的支持
26 | Istio是独立于平台的,设计用于在各种环境中运行,包括跨云、内部环境、Kubernetes、Mesos等等。您可以在Kubernetes或Nomad上部署Istio。Istio目前支持:
27 |
28 | - Service deployment on Kubernetes
29 |
30 | - Services registered with Consul
31 |
32 | - Services running on individual virtual machines
--------------------------------------------------------------------------------
/istio/Istio介绍/2. istio的架构.md:
--------------------------------------------------------------------------------
1 | stio服务网格在逻辑上分为数据平面和控制平面。
2 |
3 | - 数据平面由一组部署为边车的智能代理(Envoy)组成。这些代理负责协调和控制微服务之间的所有网络通信。他们还收集和报告所有网格流量的遥测数据。
4 |
5 | - 控制平面管理并将代理配置为路由流量。
6 |
7 | 下图显示了构成每个平面的不同组件:(下图来自官网)
8 |
9 | 
10 |
11 | Istio中的交通分为数据平面交通和控制平面交通。数据平面流量是指工作负载的业务逻辑发送和接收的消息。控制平面交通是指在Istio组件之间发送的配置和控制消息来对网格的行为进行编程。Istio中的流量管理专门指数据平面流量。
12 |
13 | #### 1. 组件
14 |
15 | ##### 1.1 Envoy
16 | Istio使用Envoy代理的扩展版本。Envoy是用c++开发的高性能代理,用于协调服务网格中所有服务的所有入站和出站流量。Envoy代理是唯一与数据通信交互的Istio组件。
17 |
18 | Envoy代理被部署为服务的边车,从逻辑上讲,它增加了Envoy的许多内置特性:
19 |
20 | - Dynamic service discovery
21 |
22 | - Load balancing
23 |
24 | - TLS termination
25 |
26 | - HTTP/2 and gRPC proxies
27 |
28 | - Circuit breakers
29 |
30 | - Health checks
31 |
32 | - Staged rollouts with %-based traffic split
33 |
34 | - Fault injection
35 |
36 | - Rich metrics
37 |
38 | 这种sidecar部署允许Istio提取大量关于流量行为的信号作为属性。Istio可以使用这些属性来执行策略决策,并将它们发送到监控系统,以提供关于整个网格的行为的信息。
39 |
40 | sidecar代理模型还允许您向现有部署添加Istio功能,而不需要重新架构或重写代码。
41 |
42 | 由envoy代理启用的一些Istio功能和任务包括:
43 |
44 | - Traffic control features: enforce fine-grained traffic control with rich routing rules for HTTP, gRPC, WebSocket, and TCP traffic.
45 |
46 | - Network resiliency features: setup retries, failovers, circuit breakers, and fault injection.
47 |
48 | - Security and authentication features: enforce security policies and enforce access control and rate limiting defined through the configuration API.
49 |
50 | - Pluggable extensions model based on WebAssembly that allows for custom policy enforcement and telemetry generation for mesh traffic.
51 |
52 | ##### 1.2 Pilot
53 | Pilot针对提供智能路由(比如,A/B测试、金丝雀部署)的Envoy Sidecar,流量管理的能力提供了服务发现。针对弹性提供了超时,重试,断路保护等功能。
54 |
55 | Pilot将控制流量行为的高级路由规则转换为特定于环境的配置,并在运行时将它们传播到边车。Pilot将特定于平台的服务发现机制抽象出来,并将它们合成为任何符合Envoy API的sidecar都可以使用的标准格式。
56 |
57 | 下图显示了平台适配器和Envoy代理如何交互:
58 |
59 | 
60 |
61 | 1. 平台启动一个服务的新实例,该实例通知其平台适配器。
62 |
63 | 2. 平台适配器使用Pilot抽象模型注册实例。
64 |
65 | 3. Pilot将分发流量规则和配置给Envoy代理,以说明更改的原因。
66 |
67 | 这种耦合允许istio运行在比如kubernetes、Consul或者Nomad等平台上。
68 |
69 | ##### 1.3 Citadel
70 | Citadel支持强大的服务对服务和终端用户身份验证,内置身份和凭证管理。您可以使用Citadel来升级服务网格中的未加密流量。使用Citadel,运营商可以执行基于服务身份的策略,而不是基于相对不稳定的第3层或第4层网络标识符。从0.5版开始,您可以使用Istio的授权特性来控制谁可以访问您的服务。
71 |
72 | ##### 1.4 Gallery
73 | Galley是Istio的配置验证、注入、处理和分发组件。它负责将其余的Istio组件与从底层平台(例如Kubernetes)获取用户配置的细节隔离开来。
74 |
75 |
76 | #### 2. 设计目标
77 | - 最大化的透明度:为了采用Istio,操作人员或开发人员需要做尽可能少的工作,才能从系统中获得真正的价值。为此,Istio可以自动将自己注入到服务之间的所有网络路径中。Istio使用sidecar代理来捕获流量,并在可能的情况下,在不更改已部署的应用程序代码的情况下,自动对网络层进行编程,以通过这些代理路由流量。在Kubernetes中,代理被注入到pods中,通过编写iptables规则捕获流量。一旦sidecar代理被注入并且流量路由被处理,Istio可以协调所有的流量。这个原则也适用于性能。当将Istio应用于部署时,操作人员会看到所提供功能的资源成本的最小增加。组件和api的设计必须考虑到性能和可伸缩性。
78 |
79 | - 扩展性:随着操作人员和开发人员越来越依赖于Istio提供的功能,系统必须随着他们的需求而增长。当我们继续添加新特性时,最大的需求是扩展策略系统的能力,与其他策略和控制源的集成,以及将关于网格行为的信号传播到其他系统进行分析的能力。策略运行时支持用于插入其他服务的标准扩展机制。
80 |
81 | - 可移植性:使用Istio的生态系统在许多方面都有所不同。Istio必须在任何云环境或本地环境中以最小的努力运行。将基于isti的服务移植到新环境的任务必须是琐碎的。使用Istio,您可以操作部署到多个环境中的单个服务。例如,可以在多个云上部署冗余。
82 |
83 | - 策略的一致性:策略在服务之间的API调用上的应用提供了对网格行为的大量控制。然而,将策略应用于API级别上不一定表示的资源也同样重要。例如,对ML训练任务消耗的CPU数量应用配额比对发起工作的调用应用配额更有用。为此,Istio使用自己的API将策略系统维护为一个独立的服务,而不是将策略系统集成到代理sidecar中,从而允许服务根据需要直接与之集成。
--------------------------------------------------------------------------------
/istio/istio的部署/2. 应用案例部署.md:
--------------------------------------------------------------------------------
1 | #### 1. 部署案例应用
2 |
3 | 1. 部署Bookinfo的案例应用
4 |
5 | ```shell
6 | $ kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
7 | ```
8 |
9 | 2. 当每个pod准备就绪后,Istio sidecar也会随之展开。
10 |
11 | ```shell
12 | $ kubectl get service
13 | $ kubectl get pods
14 | ```
15 |
16 | 3. 校验所有的工作是否正常
17 |
18 | ```shell
19 | $ kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*"
20 | ```
21 |
22 | #### 2. 打开应用给外面访问
23 | 1. Bookinfo应用程序已部署,但不能从外部访问。要使其可访问,您需要创建一个Istio Ingress网关,它将路径映射到网格边缘的一个路由,
24 |
25 | ```shell
26 | $ kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
27 | ```
28 |
29 | 2. 查看创建的gateway
30 |
31 | ```shell
32 | $ kubectl get gateway
33 | ```
34 |
35 | #### 3. 确定ingress的IP和端口号
36 |
37 | 1. 查看Ingress-gateway服务的IP和端口
38 |
39 | ```shell
40 | $ kubectl get svc istio-ingressgateway -n istio-system
41 |
42 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
43 | istio-ingressgateway LoadBalancer 10.96.29.94 192.168.20.88 15020:30714/TCP,80:32518/TCP,443:32721/TCP,15029:31316/TCP,15030:30930/TCP,15031:32012/TCP,15032:31724/TCP,31400:31016/TCP,15443:31582/TCP 71m
44 | ```
45 |
46 | >[info] 从上面的信息可以看出服务的IP为`192.168.20.88`, 接下来进行访问测试我们部署的应用。
47 |
48 | 2. 在浏览器中访问`http://192.168.20.88/product-page`校验访问
49 |
50 | 
51 |
52 | #### 4. 在Dashboard中查捍安装的应用
53 |
54 | Istio通过演示安装安装了几个可选的仪表板。Kiali仪表板通过显示拓扑并指示网格的健康状况,帮助您了解服务网格的结构。
55 |
56 | 1. 修改kiali的服务,类型为LoadBalancer.
57 |
58 | ```shell
59 | # 执行如下命令,编辑kiali的服务
60 | [root@c72082 istio-1.6.0]# kubectl edit svc kiali -n istio-system
61 |
62 | # 把type: cluster 改为 LoadBalancer
63 | ```
64 | 
65 |
66 | 2. 再次查看服务
67 |
68 | ```shell
69 | $ kubectl get svc kiali -n istio-system
70 |
71 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
72 | kiali LoadBalancer 10.101.155.85 192.168.20.89 20001:30445/TCP 80m
73 | ```
74 |
75 | 3. 使用浏览器访问`http://192.168.20.89:20001/kiali/`,访问结果如下所示:
76 | 
77 |
78 | - 初始用户名和密码均为`admin`
79 | 
80 |
81 | #### 5. 移除
82 |
83 | - Istio uninstall将分层次删除在Istio -system名称空间下的RBAC权限和所有资源。忽略不存在的资源的错误是安全的,因为它们可能是分层删除的。
84 |
85 | ```shell
86 | $ istioctl manifest generate --set profile=demo | kubectl delete -f -
87 | ```
88 |
89 | - `istio-system` 的名称空间默认不会删除的,假如不再需要的话,执行如下命令进行删 除。
90 |
91 | ```shell
92 | $ kubectl delete namespace istio-system
93 | ```
--------------------------------------------------------------------------------
/istio/readme.md:
--------------------------------------------------------------------------------
1 | https://istio.io/latest/zh/docs/examples/bookinfo/
2 |
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_002a23141689031438a2bf3b79bdd57e_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_002a23141689031438a2bf3b79bdd57e_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_04dadc0d0eb36cffb2011e3432f61b85_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_04dadc0d0eb36cffb2011e3432f61b85_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0501058aa69fa3d40bb60d4544066105_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0501058aa69fa3d40bb60d4544066105_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0986034bf92f784777699a208c5e0e39_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0986034bf92f784777699a208c5e0e39_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0a582cc549d38794501aca3a8463416a_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0a582cc549d38794501aca3a8463416a_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0af8d80faa215cd2200bdd1d2c0bfd00_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0af8d80faa215cd2200bdd1d2c0bfd00_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0be04515e35d04e35dd8c2bdd9cf3238_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0be04515e35d04e35dd8c2bdd9cf3238_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0ce4e5e7e4dacb41c961df97cdfbdd26_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0ce4e5e7e4dacb41c961df97cdfbdd26_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_0f60886b3ea392e40b2bdf2d73bf1e99_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_0f60886b3ea392e40b2bdf2d73bf1e99_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_10b0928c098369a5a19f9f429e106861_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_10b0928c098369a5a19f9f429e106861_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_116505e375412bd0a19bb190c82ae06f_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_116505e375412bd0a19bb190c82ae06f_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_14cf55cf52eb6492dc99c35589d356c0_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_14cf55cf52eb6492dc99c35589d356c0_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_16b9eeffff8e7563035a9d9dca86f211_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_16b9eeffff8e7563035a9d9dca86f211_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_1f81547fff73bbb8c6e59c8eeae41400_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_1f81547fff73bbb8c6e59c8eeae41400_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_22654d05023204d2d845806a2f9db49a_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_22654d05023204d2d845806a2f9db49a_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_2a4ab8dfbcc4507c815bc1e6115f3e6b_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_2a4ab8dfbcc4507c815bc1e6115f3e6b_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_2abe98f2f311391302fc893c467375cd_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_2abe98f2f311391302fc893c467375cd_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_38b960799d160d1dc64203ab7cb5dfd3_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_38b960799d160d1dc64203ab7cb5dfd3_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_3a206b313d8fb05bcbfab13fa2619079_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_3a206b313d8fb05bcbfab13fa2619079_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_3caa8a514df15d157be71a4f8d15bbf1_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_3caa8a514df15d157be71a4f8d15bbf1_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_40118b60e5cc481b7370398eece3c960_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_40118b60e5cc481b7370398eece3c960_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_41aa0f362d38a30448324e257792539b_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_41aa0f362d38a30448324e257792539b_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_448fccf6e01ac2632735c85dda6280ef_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_448fccf6e01ac2632735c85dda6280ef_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_488a67f580f9b85897b81e409690f61c_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_488a67f580f9b85897b81e409690f61c_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_5e5ec9cedc9fbcf6f72b25500222426c_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_5e5ec9cedc9fbcf6f72b25500222426c_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_65bda16db28891547c45edcbe679a65e_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_65bda16db28891547c45edcbe679a65e_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_6716c81d0837294d630321803a742bdb_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_6716c81d0837294d630321803a742bdb_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_67831fddf20acdd33b6e8062dfb01553_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_67831fddf20acdd33b6e8062dfb01553_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_697be29d1c4ddf999d7ff145bbd3ba7e_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_697be29d1c4ddf999d7ff145bbd3ba7e_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_6d949388e270a8d4de070696d235f8ab_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_6d949388e270a8d4de070696d235f8ab_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_6eb7dd47854172edcf61dc942db77955_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_6eb7dd47854172edcf61dc942db77955_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_7de4c1822f1178c2138303c3166fcdb3_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_7de4c1822f1178c2138303c3166fcdb3_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_816f091cbc8b4c4b43a878d37d19eb46_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_816f091cbc8b4c4b43a878d37d19eb46_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_81e343eacd3d664259a85a00d4c14840_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_81e343eacd3d664259a85a00d4c14840_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_93dd3360c6d8722572fac74c52d4ab8e_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_93dd3360c6d8722572fac74c52d4ab8e_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_99364e52c6af7a9b580dbd79f6d0ce05_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_99364e52c6af7a9b580dbd79f6d0ce05_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_9e35f74168545a1c5cbee1aedc109b6a_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_9e35f74168545a1c5cbee1aedc109b6a_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_a3a04cdbb732c3278a949954e2229ef3_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_a3a04cdbb732c3278a949954e2229ef3_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_b1d4054d60a2fae0cb290d94ad34733c_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_b1d4054d60a2fae0cb290d94ad34733c_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_b35a5e435ec3e294dd262bb96489ecd5_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_b35a5e435ec3e294dd262bb96489ecd5_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_b5e951e8e9a44f01967037f0ee2dbd11_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_b5e951e8e9a44f01967037f0ee2dbd11_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_b65b9dda7c7cb761cc608329024dde6f_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_b65b9dda7c7cb761cc608329024dde6f_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_b7217573ee7da48538ffb60ce25603e3_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_b7217573ee7da48538ffb60ce25603e3_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_b7290720de91626b9fecabdc04c0698f_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_b7290720de91626b9fecabdc04c0698f_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_c6ed5d3bdf9f74dbec606ca193c57d89_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_c6ed5d3bdf9f74dbec606ca193c57d89_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_d33b4d30470394ebbf1c2d9ef3ce30eb_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_d33b4d30470394ebbf1c2d9ef3ce30eb_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_d5e596750091f1d61c61d1ea6ca97fd9_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_d5e596750091f1d61c61d1ea6ca97fd9_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_e4476b175dd201ba8ae4bf879fecfa45_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_e4476b175dd201ba8ae4bf879fecfa45_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_e6317a219391d7b6cbcac2a6ff97c081_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_e6317a219391d7b6cbcac2a6ff97c081_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_e7a9c485a2161eae50c7001d27e9b4f9_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_e7a9c485a2161eae50c7001d27e9b4f9_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_f7ae9666322d68bc0af6ca222a1a90eb_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_f7ae9666322d68bc0af6ca222a1a90eb_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_fd2508509cff3877a032f896cdc4f68a_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_fd2508509cff3877a032f896cdc4f68a_r.png
--------------------------------------------------------------------------------
/istio/uploads/istio/images/m_fe293c28a82dec2b0b1bed9fea8cdc16_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istio/images/m_fe293c28a82dec2b0b1bed9fea8cdc16_r.png
--------------------------------------------------------------------------------
/istio/uploads/istioistio-1c57rplb7u6a1/images/m_5609970a0e7667880f68ad5e30c6c86d_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istioistio-1c57rplb7u6a1/images/m_5609970a0e7667880f68ad5e30c6c86d_r.png
--------------------------------------------------------------------------------
/istio/uploads/istioistio-1c57rplb7u6a1/images/m_5ab06b5b4c94f1edc950781a6daca29a_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istioistio-1c57rplb7u6a1/images/m_5ab06b5b4c94f1edc950781a6daca29a_r.png
--------------------------------------------------------------------------------
/istio/uploads/istioistio-1c57rplb7u6a1/images/m_c384791278a780157af5eab154c1e183_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istioistio-1c57rplb7u6a1/images/m_c384791278a780157af5eab154c1e183_r.png
--------------------------------------------------------------------------------
/istio/uploads/istioistio-1c57rplb7u6a1/images/m_e81a1056aa7ae4424c10fcc965eb2d67_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mykubernetes/kubernetes/b003c6c32e9a6680c1578b520f7b2dbb6e3d10ec/istio/uploads/istioistio-1c57rplb7u6a1/images/m_e81a1056aa7ae4424c10fcc965eb2d67_r.png
--------------------------------------------------------------------------------
/istio/安全/证书管理/1. 使用已存在的CA证书.md:
--------------------------------------------------------------------------------
1 | 该任务显示了如何使用已经存在的根证书管理配置istio证书授权机构,签发证书和key。
2 |
3 | 默认的,Istio's CA产生一个自签的根证书和key, 并且使用它们去签发工作流的证书。Istio's CA可以使用管理员指定的证书和key去签发工作流的证书。
4 |
5 | #### 1. 使用已存的证书和key
6 |
7 | 下面步骤是把证书和key、ca-cert.pem,ca-key.pem, root-cert.pem和cert-chain.pem转换成kubernetes secret.
8 |
9 | ```shell
10 | $ kubectl create namespace istio-system
11 | $ kubectl create secret generic cacerts -n istio-system --from-file=samples/certs/ca-cert.pem \
12 | --from-file=samples/certs/ca-key.pem --from-file=samples/certs/root-cert.pem \
13 | --from-file=samples/certs/cert-chain.pem
14 | ```
15 |
16 | 2. 使用demo的配置文件部署istio
17 |
18 | Istio的CA将会从挂载的secret文件读取证书和key
19 |
20 | ```shell
21 | $ istioctl manifest apply --set profile=demo
22 | ```
23 |
24 | #### 2. 部署案例服务
25 |
26 | 1. 部署httpbin和sleep的案例服务
27 |
28 | ```shell
29 | $ kubectl create ns foo
30 | $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
31 | $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
32 | ```
33 |
34 | 2. 在foo名称空间中部署一个针对工作流的策略,仅接受双向TLS流量
35 |
36 | ```shell
37 | $ kubectl apply -n foo -f - < httpbin-proxy-cert.txt
54 | ```
55 |
56 | 2. 在证书链上分析证书
57 |
58 | ```shell
59 | $ sed -n '/-----BEGIN CERTIFICATE-----/{:start /-----END CERTIFICATE-----/!{N;b start};/.*/p}' httpbin-proxy-cert.txt > certs.pem
60 | awk 'BEGIN {counter=0;} /BEGIN CERT/{counter++} { print > "proxy-cert-" counter ".pem"}' < certs.pem
61 |
62 | ```
63 |
64 | 3. 校验根证书
65 |
66 | ```shell
67 | $ openssl x509 -in samples/certs/root-cert.pem -text -noout > /tmp/root-cert.crt.txt
68 | $ openssl x509 -in ./proxy-cert-3.pem -text -noout > /tmp/pod-root-cert.crt.txt
69 | $ diff -s /tmp/root-cert.crt.txt /tmp/pod-root-cert.crt.txt
70 | ```
71 |
72 | 4. 校验CA证书
73 |
74 | ```shell
75 | $ openssl x509 -in samples/certs/ca-cert.pem -text -noout > /tmp/ca-cert.crt.txt
76 | $ openssl x509 -in ./proxy-cert-2.pem -text -noout > /tmp/pod-cert-chain-ca.crt.txt
77 | $ diff -s /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt
78 | ```
79 |
80 | 5. 从根证书到工作流证书校验证书链
81 |
82 | ```shell
83 | $ openssl verify -CAfile <(cat samples/certs/ca-cert.pem samples/certs/root-cert.pem) ./proxy-cert-1.pem
84 | ```
85 |
86 | 6. 清空本实验
87 |
88 | ```shell
89 | $ kubectl delete secret cacerts -n istio-system
90 | $ kubectl delete ns foo istio-system
91 | ```
--------------------------------------------------------------------------------
/istio/安全/证书管理/2. Istio DNS证书管理:
--------------------------------------------------------------------------------
1 | 本小节演示了如何使用Chiron来管理DNS证书, 一个轻量的组件连接到Istiod, 使用Kubernetes CA APIs,而不维护它自己的私有key. 使用这个feature有以下优势。
2 |
3 | - 不像istiod, 这个功能不会请求维护一个私有签名的key, 它增强了安全。
4 |
5 | - 简化了根证书分布到TLS客户端,客户端不再需要等待istiod去产生和分布它的CA证书。
6 |
7 | #### 1. 准备工作
8 |
9 | 1. 安装istio时指定DNS证书配置
10 |
11 | ```shell
12 | $ cat < ./istio.yaml
13 | apiVersion: install.istio.io/v1alpha1
14 | kind: IstioOperator
15 | spec:
16 | values:
17 | global:
18 | certificates:
19 | - secretName: dns.example1-service-account
20 | dnsNames: [example1.istio-system.svc, example1.istio-system]
21 | - secretName: dns.example2-service-account
22 | dnsNames: [example2.istio-system.svc, example2.istio-system]
23 | EOF
24 | $ istioctl manifest apply -f ./istio.yaml
25 |
26 | ```
27 |
28 | #### 2. DNS证书提供和管理
29 |
30 | Istio根据您提供的配置为DNS证书提供DNS名称和秘密名称。所提供的DNS证书由Kubernetes CA签名,并存储在您的配置之后的密钥中。Istio还管理DNS证书的生命周期,包括它们的循环和重新生成。
31 |
32 | #### 3. 配置dns证书
33 |
34 | 在上面的istioctl manifest apply命令中,用于配置Istio的IstioControlPlane定制资源包含一个示例DNS证书配置。在其中,dnsNames字段指定证书中的DNS名称,而secretName字段指定用于存储证书和密钥的Kubernetes secret的名称。
35 |
36 | #### 4. 检查DNS证书的供给
37 |
38 | 在配置Istio去产生DNS证书后,把它们存储在你选择的密钥中。你可以校验供给的证书,和检查是否工作。
39 |
40 | 去检查 istio产生的`dns.example1-service-account` DNS证书在本案例中, 这个证书包含配置的dns名称, 你需要从kubernetes中获得密钥,分析它,和解码它, 使用以下命令查看它的内容。
41 |
42 | ```shell
43 | $ kubectl get secret dns.example1-service-account -n istio-system -o jsonpath="{.data['cert-chain\.pem']}" | base64 --decode | openssl x509 -in /dev/stdin -text -noout
44 | ```
45 |
46 | 这个文本输出应该包含如下:
47 |
48 | ```shell
49 | X509v3 Subject Alternative Name:
50 | DNS:example1.istio-system.svc, DNS:example1.istio-system
51 | ```
52 |
53 | #### 5. 重新产生DNS证书
54 |
55 | 如果由于错误操作删除的话,Istio也可以重新产生证书。接下来,我们演示一下
56 |
57 | 1. 删除存储DNS证书的密钥
58 |
59 | ```shell
60 | $ c
61 | ```
62 |
63 | 2. 然后,使用如下命令查看重新生成的证书
64 |
65 | ```shell
66 | $ sleep 10; kubectl get secret dns.example1-service-account -n istio-system -o jsonpath="{.data['cert-chain\.pem']}" | base64 --decode | openssl x509 -in /dev/stdin -text -noout
67 | ```
68 |
69 | 3. 输出应该包含如下所示
70 |
71 | ```shell
72 | X509v3 Subject Alternative Name:
73 | DNS:example1.istio-system.svc, DNS:example1.istio-system
74 | ```
75 |
76 | #### 6. 移除本实验
77 |
78 | ```shell
79 | $ kubectl delete ns istio-system
80 | ```
--------------------------------------------------------------------------------
/istio/安全/证书管理/2. Istio DNS证书管理.md:
--------------------------------------------------------------------------------
1 | 本小节演示了如何使用Chiron来管理DNS证书, 一个轻量的组件连接到Istiod, 使用Kubernetes CA APIs,而不维护它自己的私有key. 使用这个feature有以下优势。
2 |
3 | - 不像istiod, 这个功能不会请求维护一个私有签名的key, 它增强了安全。
4 |
5 | - 简化了根证书分布到TLS客户端,客户端不再需要等待istiod去产生和分布它的CA证书。
6 |
7 | #### 1. 准备工作
8 |
9 | 1. 安装istio时指定DNS证书配置
10 |
11 | ```shell
12 | $ cat < ./istio.yaml
13 | apiVersion: install.istio.io/v1alpha1
14 | kind: IstioOperator
15 | spec:
16 | values:
17 | global:
18 | certificates:
19 | - secretName: dns.example1-service-account
20 | dnsNames: [example1.istio-system.svc, example1.istio-system]
21 | - secretName: dns.example2-service-account
22 | dnsNames: [example2.istio-system.svc, example2.istio-system]
23 | EOF
24 | $ istioctl manifest apply -f ./istio.yaml
25 |
26 | ```
27 |
28 | #### 2. DNS证书提供和管理
29 |
30 | Istio根据您提供的配置为DNS证书提供DNS名称和秘密名称。所提供的DNS证书由Kubernetes CA签名,并存储在您的配置之后的密钥中。Istio还管理DNS证书的生命周期,包括它们的循环和重新生成。
31 |
32 | #### 3. 配置dns证书
33 |
34 | 在上面的istioctl manifest apply命令中,用于配置Istio的IstioControlPlane定制资源包含一个示例DNS证书配置。在其中,dnsNames字段指定证书中的DNS名称,而secretName字段指定用于存储证书和密钥的Kubernetes secret的名称。
35 |
36 | #### 4. 检查DNS证书的供给
37 |
38 | 在配置Istio去产生DNS证书后,把它们存储在你选择的密钥中。你可以校验供给的证书,和检查是否工作。
39 |
40 | 去检查 istio产生的`dns.example1-service-account` DNS证书在本案例中, 这个证书包含配置的dns名称, 你需要从kubernetes中获得密钥,分析它,和解码它, 使用以下命令查看它的内容。
41 |
42 | ```shell
43 | $ kubectl get secret dns.example1-service-account -n istio-system -o jsonpath="{.data['cert-chain\.pem']}" | base64 --decode | openssl x509 -in /dev/stdin -text -noout
44 | ```
45 |
46 | 这个文本输出应该包含如下:
47 |
48 | ```shell
49 | X509v3 Subject Alternative Name:
50 | DNS:example1.istio-system.svc, DNS:example1.istio-system
51 | ```
52 |
53 | #### 5. 重新产生DNS证书
54 |
55 | 如果由于错误操作删除的话,Istio也可以重新产生证书。接下来,我们演示一下
56 |
57 | 1. 删除存储DNS证书的密钥
58 |
59 | ```shell
60 | $ kubectl delete secret dns.example1-service-account -n istio-system
61 | ```
62 |
63 | 2. 然后,使用如下命令查看重新生成的证书
64 |
65 | ```shell
66 | $ sleep 10; kubectl get secret dns.example1-service-account -n istio-system -o jsonpath="{.data['cert-chain\.pem']}" | base64 --decode | openssl x509 -in /dev/stdin -text -noout
67 | ```
68 |
69 | 3. 输出应该包含如下所示
70 |
71 | ```shell
72 | X509v3 Subject Alternative Name:
73 | DNS:example1.istio-system.svc, DNS:example1.istio-system
74 | ```
75 |
76 | #### 6. 移除本实验
77 |
78 | ```shell
79 | $ kubectl delete ns istio-system
80 | ```
--------------------------------------------------------------------------------
/istio/服务网格概述/1. 服务网格历史.md:
--------------------------------------------------------------------------------
1 | 要讨论服务网格( Service Mesh ),就必须提到微服务。微服务( Microservices )自2012年被提出以来,就继承了传统SOA 架构的基础, 并在理论和工程实践中形
2 | 成新的标准,热度不断攀升, 甚至有成为默认软件架构的趋势。
3 |
4 | #### 1. 微服务应该具备的特点:
5 |
6 | - 在结构上,将原有的从技术角度拆分的组件,升级为从业务角度拆分的独立运行的服务,这些服务具备各自的实现平台,并且独占自有数据,在服务之间以智能端点和哑管道的方式通信。
7 |
8 | - 在工程上,从产品而非项目的角度进行设计,强调迭代、自动化和面向故障的设计方法。
9 |
10 |
11 | #### 2. 微服务的好处与坏处
12 |
13 | ##### 2.1 好处
14 |
15 | - 提高应用的伸缩性
16 |
17 | - 方便部门或业务之间的协作
18 |
19 | - 提高自动化程度,减少增耗
20 |
21 | ##### 2.2 坏处
22 |
23 | - 实例数量急剧增长,对部署和运维自动化要求更高
24 |
25 | - 使用网络调用API,因此对网络依赖更强
26 |
27 | - 调用链路变长,分布式跟踪成为必选(当然你不选,谁也没有办法)
28 |
29 | - 日志分散,跟踪和分析难度加大
30 |
31 | - 服务分散,易受攻击
32 |
33 | - 自动伸缩、路由管理、故障控制、存储共享等。
34 |
35 | >[info]因此出现了kubernetes解决微服务架构产生的一些问题。在进程级别为微服务提供了部署、调度、伸缩、监控、日志等功能 。但是通信和联系更加复杂了,其中的观测和服务质量保障成为微服务方案的短板,因此service mesh登场了。
36 |
37 | #### 3. SerivceMesh相关发展历程
38 |
39 | ##### 3.1 Spring Cloud
40 | 2015年,Spring Cloud诞生。它定义了一系列的标准特性,如智能路由、熔断机制、服务注册与发现等。并提供了对应的库和组件来实现这些标准特性。
41 |
42 | 但Spring Cloud缺点也有,如下所示:
43 |
44 | - 用户需要学习和熟悉各组件的“语言”并分别运维,增加了应用门槛。
45 |
46 | - 需要在代码给别对组件进行控制,不能够多语言协作。
47 |
48 | - 自身没有对调度、资源、Devops的相关支持。
49 |
50 | ##### 3.2 Linkerd
51 |
52 | 2016年年初,由两位Twitter工程师开发了Linkerd项目,并打出了“The services must mesh”的口号,成为了Service Mesh的第一批布道者。
53 |
54 | Linkerd 很好地结合了Kubernetes 所提供的功能,以此为基础,在每个Kubernetes
55 | Node 上都部署运行一个L ink erd 实例,用代理的方式将加入Mes h 的Pod 通信转接
56 | 给Linkerd ,这样Linkerd 就能在通信链路中完成对通信的控制和监控。
57 |
58 | 
59 |
60 | Linkerd相比先前说的Spring Cloud完成了以下:
61 |
62 | - 无须侵入工作负载的代码,直接进行通信监视和管理。
63 |
64 | - 提供了统一的配置方式,用于管理服务之间的通信和边缘通信。
65 |
66 | - 对kubernetes的支持,当然还支持其它底层平台。
67 |
68 | ##### 3.3 Istio
69 |
70 | 2017年5月,Goolge、IBM、Lyft宣布了Istio的诞生。Istio 以Envoy 为数据平面,通过S idecar 的方式让Envoy 同业务容器一起运行,并劫持其通信, 接受控制平面的统一管理,在此基础上为服务之间的通信提供丰富的连接、控制、观察、安全等特性。
--------------------------------------------------------------------------------
/istio/服务网格概述/2. 服务网格优势.md:
--------------------------------------------------------------------------------
1 | #### 1. 服务网格定义
2 | 服务网格是一个专注于处理服务间通信的基础设施层,它负责在现代云原生应用组成的复杂服务拓扑中可靠地传递请求。
3 |
4 |
5 | 1. 服务网格特点如下:
6 |
7 | - 轻量级的网络代理
8 |
9 | - 应用无感知
10 |
11 | - 应用之间的流量由服务网格接管
12 |
13 | - 服务间的调用可能出现的超时、重试、监控、追踪等工作下沉到服务网格层处理。
14 |
15 | 如下图所示: 深色代表应用,清灰色代表网格中轻量级的网络代理。代理之间可以相互通信,而应用之间的通信完全由代理来进行。如果只看代理部分,可以看到一个网状结构,服务网格由此得名。
16 | 
17 |
18 | 网格一般由数据平面和控制平面组成,数据平面负责在服务中部署一个称为“边车”(sidecar)的请求代理,控制平面负责请求代理之间的交互,以及用户与请求代理的交互。
19 |
20 | #### 2. 服务网格优势
21 | 随着服务数量增长,每个服务都需要自己管理复杂的服务间的网络通信,也让开发人员头疼。也变得越来难以管理,这要求服务治理包含很多功能。例如:服务发现、负载均衡、故障转移、服务度量指标收集和监控等。
--------------------------------------------------------------------------------
/istio/流量管理/Ingress gateway/2. Ingress 在k8s中的应用.md:
--------------------------------------------------------------------------------
1 | 该任务描述了如何使用Ingress资源配置Istio,来实暴露一个服务到服务网格集群的外部。
2 |
3 | >[warning] 使用`Istio Gateway`,而不是使用Ingress,主要是因为可以使用Istio提供的所有功能。比如丰富的流量管理和安全的功能。
4 |
5 | #### 1. 准备工作
6 |
7 | 1. 部署服务
8 |
9 | ```shell
10 | $ kubectl apply -f samples/httpbin/httpbin.yaml
11 | ```
12 |
13 | 2. 确定Ingress的IP和端口号
14 |
15 | ```shell
16 | $ kubectl get svc istio-ingressgateway -n istio-system
17 | $ export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
18 | $ export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
19 | $ export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
20 | $ export TCP_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="tcp")].port}')
21 |
22 | ```
23 |
24 | #### 2. 使用Ingress 资源配置ingress
25 |
26 | Kubernetes Ingress资源暴露HTTP和HTTPS的资源给集群的外部,来让外部访问。
27 |
28 | 1. 创建Istio `Gateway`
29 |
30 | ```shell
31 | $ kubectl apply -f - <.:9411` 安装选项。
12 |
13 | 2. 部署Bookinfo案例
14 |
15 | ```shell
16 | $ kubectl label namespace default istio-injection=enabled
17 | $ kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
18 | $ kubectl get services
19 | $ kubectl get pods
20 | $ kubectl exec -it "$(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}')" -c ratings -- curl productpage:9080/productpage | grep -o ".*"
21 | ```
22 |
23 | #### 2. 访问Dashboard
24 |
25 | 针对测试,你可能会使用端口转发,假如你部署了Jager到istio-system名称空间。
26 |
27 | ```shell
28 | $ istioctl dashboard jaeger
29 | ```
30 |
31 | #### 3. 使用Bookinfo案例产生追踪
32 |
33 | 1. 当Bookinfo应用处于运行状态时,访问`http://$GATEWAY_URL/productpage` 一次或更多次去产生追踪信息
34 |
35 | 去查看追踪数据,你必须发送请求到你的服务, 这请求的数量依靠于你Istio的案例速率 是当安装istio的时候设置的,默认的速率是1%。 在第一次追踪可见之前你需要至少发送100个请求。 使用如下命令,发送100个请求到productpage服务。
36 |
37 | ```shell
38 | $ for i in `seq 1 100`; do curl -s -o /dev/null http://$GATEWAY_URL/productpage; done
39 | ```
40 |
41 | 2. 然后通过Dashboard查看如下所示
42 | 
43 |
44 | 3. 单击顶部的最新跟踪,查看与/productpage的最新请求对应的详细信息
45 |
46 | 
47 |
48 | 4. 跟踪由一系列spans,每个span对应于一个Bookinfo服务,调用/ productpage请求的执行期间,或内部Istio组件,例如:istio-ingressgateway。
49 |
50 | #### 4. 清空
51 |
52 | 1. 移除任何istioctl进程
53 |
54 | ```shell
55 | $ killall istioctl
56 | ```
--------------------------------------------------------------------------------
/istio/观察和遥测/6. zipkin 链路追踪.md:
--------------------------------------------------------------------------------
1 | 完成此任务后,您将了解如何让应用程序参与使用Zipkin进行跟踪,而不考虑用于构建应用程序的语言、框架或平台。
2 |
3 | 该任务使用Bookinfo示例作为示例应用程序。
4 |
5 | 要了解Istio如何处理跟踪,请访问该任务的概述。
6 |
7 | #### 1. 准备工作
8 |
9 | - 在demo/test环境中设置`--set values.tracing.enabled=true` 和 `--set values.tracing.provider=zipkin` 安装选项去开启开箱即用。
10 |
11 | - 在生产环境中引用已经存在的Zipkin实例,然后设置`--set values.global.tracer.zipkin.address=.:9411` 安装选项。
12 |
13 | 2. 部署Bookinfo案例
14 |
15 |
16 | #### 2. 访问dashboard
17 |
18 | ```shell
19 | $ istioctl dashboard zipkin
20 | ```
21 |
22 | #### 3. 使用Bookinfo案例产生追踪
23 |
24 | 1. 当Bookinfo应用处于运行状态时,访问`http://$GATEWAY_URL/productpage` 一次或更多次去产生追踪信息
25 |
26 | 去查看追踪数据,你必须发送请求到你的服务, 这请求的数量依靠于你Istio的案例速率 是当安装istio的时候设置的,默认的速率是1%。 在第一次追踪可见之前你需要至少发送100个请求。 使用如下命令,发送100个请求到productpage服务。
27 |
28 | ```shell
29 | $ for i in `seq 1 100`; do curl -s -o /dev/null http://$GATEWAY_URL/productpage; done
30 | ```
31 |
32 | 2. 查看Dashboard如下图所示:
33 |
34 | 
35 |
36 | 3. 在顶部点击最近的追踪,去查看最近到/productpage请求的详情
37 |
38 | 
39 |
40 |
41 | 4. 这个追踪是由一系列的spans组成,每个spancs相对应于一个Bookfinfo服务, 在执行/productpage请求或者内部istio组进行调用,比如`istio-ingressgateway`
42 |
43 | 5. 清空本实验
44 |
45 | - 移除任何istioctl进程
46 |
47 | ```shell
48 | $ killall istioctl
49 | ```
--------------------------------------------------------------------------------
/istio/配置详解/readme.md:
--------------------------------------------------------------------------------
1 | https://blog.csdn.net/hxpjava1
2 |
--------------------------------------------------------------------------------
/k8s命令自动补全.md:
--------------------------------------------------------------------------------
1 | 在k8s_1.3版本之前,设置kubectl命令自动补全是通过以下的方式
2 | ```
3 | source ./contrib/completions/bash/kubectl
4 | ```
5 |
6 | 1.3版本以后,kubectl添加了一个completions的命令, 该命令可用于自动补全
7 | ```
8 | source <(kubectl completion bash)
9 | ```
10 |
11 | k8s命令自动补全
12 | ```
13 | yum install -y bash-completion
14 | source /usr/share/bash-completion/bash_completion
15 | source <(kubectl completion bash)
16 | echo "source <(kubectl completion bash)" >> ~/.bashrc
17 | ```
18 |
--------------------------------------------------------------------------------
/k8s添加role.md:
--------------------------------------------------------------------------------
1 | 给k8s添加角色
2 | ```
3 | 1、查看角色
4 | # kubectl get nodes
5 | NAME STATUS ROLES AGE VERSION
6 | k8s-node01 NotReady 2m9s v1.13.1
7 | k8s-node02 NotReady 2m16s v1.13.1
8 |
9 | 2、添加角色
10 | kubectl label node k8s-node01 node-role.kubernetes.io/master=
11 | kubectl label node k8s-node01 node-role.kubernetes.io/node=
12 |
13 | 3、查看角色
14 | # kubectl get nodes
15 | NAME STATUS ROLES AGE VERSION
16 | k8s-node01 NotReady k8s-node01,k8s-node01 2m9s v1.13.1
17 | k8s-node02 NotReady 2m16s v1.13.1
18 | ```
19 |
--------------------------------------------------------------------------------
/kubeadm部署集群升级.md:
--------------------------------------------------------------------------------
1 | 升级Kubernetes集群
2 | ===
3 | 升级master节点
4 | ---
5 |
6 | 1、列出kubeadm所有可用的版本
7 | ``` # yum list --showduplicates kubeadm --disableexcludes=kubernetes ```
8 |
9 | 2、升级kubeadm为最新版本
10 | ``` # yum upgrade -y kubeadm-1.14.2-0 --disableexcludes=kubernetes ```
11 |
12 | 3、查看kubeadm版本
13 | ``` # kubeadm version ```
14 |
15 | 4、查看kubeadm的升级计划
16 | ``` # kubeadm upgrade plan ```
17 |
18 | 5、通过kubeadm upgrade plan命令输出的可升级执行的命令升级
19 | ``` # kubeadm upgrade apply v1.14.2 ```
20 |
21 |
22 | 6、升级kubectl和kubelet两个包
23 | ``` # yum upgrade -y kubelet-1.14.2-0 kubectl-1.14.2-0 --disableexcludes=kubernetes ```
24 |
25 | 7、获取kubelet新版本的配置
26 | ``` # kubeadm upgrade node config --kubelet-version v1.14.2 ```
27 |
28 | 8、重启kubelet
29 | ```
30 | # systemctl daemon-reload
31 | # systemctl restart kubelet
32 | ```
33 |
34 | 9、升级第二台master
35 | ``` # yum install -y kubeadm-1.14.2-0 kubelet-1.14.2-0 kubectl-1.14.2-0 --disableexcludes=kubernetes ```
36 |
37 | 10、升级控制平面
38 | ``` # kubeadm upgrade node experimental-control-plane ```
39 |
40 | 11、更新kubelet的配置
41 | ``` # kubeadm upgrade node config --kubelet-version v1.14.2 ```
42 |
43 | 12、重启kubelet
44 | ```
45 | # systemctl daemon-reload
46 | # systemctl restart kubelet
47 | ```
48 |
49 | 升级work节点
50 | ---
51 | 1、在master节点将升级node01节点的pod驱逐走
52 | ``` # kubectl drain node01 --ignore-daemonsets --force ```
53 |
54 | 2、在master节点查看node01节点是否为不可调动状态
55 | ```
56 | # kubectl get nodes
57 | NAME STATUS POLES AGE VERSION
58 | node01 Ready,SchedulingDisabled 26d v1.14.1
59 | ```
60 |
61 | 3、在node节点升级node01节点三个软件包
62 | ``` # yum upgrade -y kubeadm-1.14.1-0 kubelet-1.14.1-0 kubectl-1.14.1-0--disableexcludes=kubernetes ```
63 |
64 | 4、在node节点更新kubelet的配置
65 | ``` # kubeadm upgrade node config --kubelet-version v1.14.2 ```
66 |
67 | 5、在node节点重启kubelet
68 | ```
69 | # systemctl daemon-reload
70 | # systemctl restart kubelet
71 | ```
72 |
73 | 6、在master节点将node01节点设置成可调度节点
74 | ``` # kubectl uncordon node01 ```
75 |
76 | 7、在master节点查看work节点是否为可调动状态
77 | ```
78 | # kubectl get nodes
79 | NAME STATUS POLES AGE VERSION
80 | node01 Ready 26d v1.14.2
81 | ```
82 |
--------------------------------------------------------------------------------
/label.md:
--------------------------------------------------------------------------------
1 | 获取所有标签
2 | ```
3 | # kubectl get pods --show-labels
4 | ```
5 |
6 | 获取标签为app和run的值
7 | ```
8 | # kubectl get pods -L app,run
9 | ```
10 |
11 | 获取带app标签的pod
12 | ```
13 | # kubectl get pods -l app
14 | ```
15 |
16 | 获取带app标签的pod并显示此pod的所有标签
17 | ```
18 | # kubectl get pods -l app --show-lables
19 | ```
20 |
21 | 获取标签为release=canary的
22 | ```
23 | # kubectl get pods -l release=canary
24 | ```
25 |
26 | 获取标签为release!=canary的
27 | ```
28 | # kubectl get pods -l release!=canary
29 | ```
30 |
31 | 获取集合
32 | ```
33 | key value
34 | # kubectl get pods -l "release in (canary,beta,alpha)"
35 | # kubectl get pods -l "release notin (canary,beta,alpha)"
36 | ```
37 |
38 | 给pod打标签
39 | ```
40 | # kubectl label pods pod-demon release=canary
41 | ```
42 |
43 | 修改pod标签
44 | ```
45 | # kubectl label pods pod-demon release=stable --overwrite
46 | ```
47 |
48 | 给节点打标签
49 | ```
50 | # kubectl label node node01 disktype=ssd
51 | ```
52 |
53 | 查看节点标签
54 | ```
55 | # kubectl get nodes --show-labels
56 | ```
57 |
58 |
59 | ```
60 | #deploy
61 | apiVersion: apps/v1
62 | kind: Deployment
63 | metadata:
64 | name: web-demo
65 | namespace: dev
66 | spec:
67 | selector:
68 | matchLabels:
69 | app: web-demo
70 | matchExpressions:
71 | - {key: group, operator: In, values: [dev,test]}
72 | replicas: 1
73 | template:
74 | metadata:
75 | labels:
76 | group: dev
77 | app: web-demo
78 | spec:
79 | containers:
80 | - name: web-demo
81 | image: hub.mooc.com/kubernetes/web:v1
82 | ports:
83 | - containerPort: 8080
84 | nodeSelector:
85 | disktype: ssd
86 | ---
87 | #service
88 | apiVersion: v1
89 | kind: Service
90 | metadata:
91 | name: web-demo
92 | namespace: dev
93 | spec:
94 | ports:
95 | - port: 80
96 | protocol: TCP
97 | targetPort: 8080
98 | selector:
99 | app: web-demo
100 | type: ClusterIP
101 |
102 | ---
103 | #ingress
104 | apiVersion: extensions/v1beta1
105 | kind: Ingress
106 | metadata:
107 | name: web-demo
108 | namespace: dev
109 | spec:
110 | rules:
111 | - host: web-dev.mooc.com
112 | http:
113 | paths:
114 | - path: /
115 | backend:
116 | serviceName: web-demo
117 | servicePort: 80
118 | ```
119 |
--------------------------------------------------------------------------------
/pod升级回滚.md:
--------------------------------------------------------------------------------
1 |
2 | 暂停更新
3 | ```
4 | kubectl rollout pause deploy web-rollingupdate -n dev
5 | ```
6 | 恢复更新
7 | ```
8 | kubectl rollout resume deploy web-rollingupdate -n dev
9 | ```
10 | 回滚到上一个版本
11 | ```
12 | kubectl rollout undo deploy web-rollingupdate -n dev
13 | ```
14 | 查看历史版本
15 | ```
16 | kubectl rollout history deploy web-rollingupdate -n dev
17 | ```
18 | 查看当前状态
19 | ```
20 | kubectl rollout status deploy web-rollingupdate -n dev
21 | ```
22 |
23 | 测试
24 | ---
25 | 1、部署一个简单的 Nginx 应用
26 | ```
27 | # cat nginx-deployment.yaml
28 | apiVersion: extensions/v1beta1
29 | kind: Deployment
30 | metadata:
31 | name: nginx-deployment
32 | spec:
33 | replicas: 3
34 | template:
35 | metadata:
36 | labels:
37 | app: nginx
38 | spec:
39 | containers:
40 | - name: nginx
41 | image: nginx:1.7.9
42 | ports:
43 | - containerPort: 80
44 | ```
45 |
46 | ```
47 | # kubectl create -f https://kubernetes.io/docs/user-guide/nginx-deployment.yaml --record=true
48 | ```
49 | - --record参数可以记录命令,我们可以很方便的查看每次revision的变化
50 |
51 | 2、扩容
52 | ```
53 | # kubectl scale deployment nginx-deployment --replicas 10
54 | ```
55 |
56 | 3、通过horizontal pod autoscaling实现为Deployment设置自动扩展
57 | ```
58 | # kubectl autoscale deployment nginx-deployment --min=10 --max=15 --cpu-percent=80
59 | ```
60 |
61 | 4、更新镜像
62 | ```
63 | # 语法:kubectl set image deployment/nginx-deployment containers-name=image -n namespace
64 | # kubectl set image deployment/nginx-deployment nginx=nginx:1.9.1
65 | deployment.extensions/nginx-deployment image updated
66 | ```
67 |
68 | 5、回滚
69 | ```
70 | # kubectl rollout undo deployment/nginx-deployment
71 | deployment.extensions/nginx-deployment rolled back
72 | ```
73 |
74 | 6、查看回滚的状态
75 | ```
76 | # kubectl rollout status deployment/nginx-deployment
77 | ```
78 |
79 | 7、查看历史更新版本
80 | ```
81 | # kubectl rollout history deployment/nginx-deployment
82 | deployment.extensions/nginx-deployment
83 | REVISION CHANGE-CAUSE
84 | 1 kubectl apply --filename=nginx-deployment.yaml --record=true
85 | ```
86 |
87 | 8、回滚上一个版本和回滚指定版本
88 | ```
89 | # kubectl rollout undo deployment/nginx-deployment
90 | # kubectl rollout undo deployment/nginx-deployment --to-revision=2 # 可以使用 --revision参数指定某个历史版本
91 | ```
92 |
93 | 9、暂停的更新
94 | ```
95 | # kubectl rollout pause deployment/nginx-deployment
96 | ```
97 |
--------------------------------------------------------------------------------
/修改节点运行pod数.md:
--------------------------------------------------------------------------------
1 | Kubernetes Pod调度失败问题(Insufficient pods)
2 |
3 |
4 | Kubernetes的node默认最大pod数量为110个,所有node都达到110个时无法再调度,出现如下报错信息
5 | ```
6 | 0/3 nodes are available: 1 node(s) had taints that the pod didn't tolerate, 2 Insufficient pods
7 | ```
8 |
9 | 解决办法:
10 |
11 | 修改/etc/sysconfig/kubelet配置文件,添加--max-pods配置,然后重启kubelet服务,修改后文件内容如下
12 | ```
13 | # vim /etc/sysconfig/kubelet
14 | KUBELET_EXTRA_ARGS="--fail-swap-on=false --max-pods=300"
15 | ```
16 |
--------------------------------------------------------------------------------
/将pod运行在master节点.md:
--------------------------------------------------------------------------------
1 | 一、去掉污点
2 | ---
3 | 查看节点调度情况
4 | ```
5 | [root@k8s001 ~]# kubectl get node
6 | NAME STATUS ROLES AGE VERSION
7 | 172.16.33.22 Ready master 3d v1.13.5
8 | 172.16.33.23 Ready node 3d v1.13.5
9 | 172.16.33.24 Ready node 3d v1.13.5
10 | [root@k8s001 ~]# kubectl describe node 172.16.33.22
11 | ......
12 | Events:
13 | Type Reason Age From Message
14 | ---- ------ ---- ---- -------
15 | Normal NodeNotSchedulable 11s (x2 over 3d) kubelet, 172.16.33.22 Node 172.16.33.22 status is now: NodeNotSchedulable
16 | ......
17 | ```
18 | 去除污点
19 | ```
20 | [root@k8s001 ~]# kubectl uncordon 172.16.33.22
21 | ```
22 |
23 | 二、增加污点容忍
24 | ---
25 | ```
26 | [root@k8s001 ~]# cat pod.yaml
27 | ......
28 | spec:
29 | tolerations:
30 | - key: node-role.kubernetes.io/master
31 | operator: Exists
32 | effect: NoSchedule
33 | containers:
34 | - name: nginx-pod
35 | ......
36 | ```
37 |
--------------------------------------------------------------------------------
/服务发现/服务发现.md:
--------------------------------------------------------------------------------
1 | 1、集群内部的服务发现
2 | 2、集群外部访问集群内部的服务发现
3 | 3、集群内部访问集群外部的服务发现
4 |
5 | 1)集群内部的服务发现
6 | podA通过DNS+ClusterIP方式,podA通过dns指向coreDNS服务,由coreDNS服务进行解析到service资源,Server会绑定一个或多个podB
7 |
8 | podA通过HeadlessService(无头服务)资源,podA通过内部dns指向coreDNS服务,由coreDNS进行解析直接匹配到对应的podB上,不经过service
9 |
10 | 2)集群内部访问集群外部
11 | 将集群外部的服务比如mysql的ip+port,在集群内部将地址直接写死
12 |
13 | 配置service资源,并且配置endpoint资源由endpoint资源写死mysql地址,让podA通过dns解析到对应的service上
14 |
15 | 3)集群外部访问集群内部
16 | 通过nodePort在service暴露端口,使集群外部服务直接到达集群内部
17 |
18 | 通过hostPort,服务跑着某台主机上,然后在该主机开启一个端口,进行监听
19 |
20 | 通过ingress方式
21 |
22 |
--------------------------------------------------------------------------------
/服务发现/环境变量服务发现.md:
--------------------------------------------------------------------------------
1 | Kubernetes 采用了环境变量的方法,每个 Pod 启动的时候,会通过环境变量设置所有服务的 IP 和 port 信息,这样 Pod 中的应用可以通过读取环境变量来获取依赖服务的地址信息,这种方法使用起来相对简单,但是有一个很大的问题就是依赖的服务必须在 Pod 启动之前就存在,不然是不会被注入到环境变量中的
2 |
3 |
4 | 1、首先创建一个nginx服务进行测试
5 | ```
6 | # cat test-nginx.yaml
7 | apiVersion: apps/v1beta1
8 | kind: Deployment
9 | metadata:
10 | name: nginx-deploy
11 | labels:
12 | k8s-app: nginx-demo
13 | spec:
14 | replicas: 2
15 | template:
16 | metadata:
17 | labels:
18 | app: nginx
19 | spec:
20 | containers:
21 | - name: nginx
22 | image: nginx:1.7.9
23 | ports:
24 | - containerPort: 80
25 |
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: nginx-service
31 | labels:
32 | name: nginx-service
33 | spec:
34 | ports:
35 | - port: 5000
36 | targetPort: 80
37 | selector:
38 | app: nginx
39 | ```
40 |
41 | 创建nginx服务
42 | ```
43 | # kubectl create -f test-nginx.yaml
44 | deployment.apps "nginx-deploy" created
45 | service "nginx-service" created
46 |
47 |
48 | # kubectl get pod
49 | NAME READY STATUS RESTARTS AGE
50 | nginx-deploy-76bf4969df-2z22n 1/1 Running 0 8m27s
51 | nginx-deploy-76bf4969df-srt9d 1/1 Running 0 8m27s
52 |
53 | # kubectl get svc
54 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
55 | kubernetes ClusterIP 10.96.0.1 443/TCP 170d
56 | nginx-service ClusterIP 10.107.190.90 5000/TCP 8m57s
57 | ```
58 |
59 | 2、创建一个普通的Pod,观察下该Pod中的环境变量是否包含上面的nginx-service的服务信息
60 | ```
61 | # cat test-pod.yaml
62 | apiVersion: v1
63 | kind: Pod
64 | metadata:
65 | name: test-pod
66 | spec:
67 | containers:
68 | - name: test-service-pod
69 | image: busybox
70 | command: ["/bin/sh", "-c", "env"]
71 | ```
72 |
73 | 然后创建该测试的Pod
74 | ```
75 | # kubectl create -f test-pod.yaml
76 | pod "test-pod" created
77 | ```
78 |
79 | 等Pod创建完成后,我们查看日志信息
80 | ```
81 | # kubectl logs test-pod
82 | KUBERNETES_SERVICE_PORT=443
83 | KUBERNETES_PORT=tcp://10.96.0.1:443
84 | HOSTNAME=test-pod
85 | SHLVL=1
86 | HOME=/root
87 | NGINX_SERVICE_PORT_5000_TCP_ADDR=10.107.190.90
88 | NGINX_SERVICE_PORT_5000_TCP_PORT=5000
89 | NGINX_SERVICE_PORT_5000_TCP_PROTO=tcp
90 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
91 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
92 | NGINX_SERVICE_SERVICE_HOST=10.107.190.90
93 | KUBERNETES_PORT_443_TCP_PORT=443
94 | NGINX_SERVICE_PORT_5000_TCP=tcp://10.107.190.90:5000
95 | KUBERNETES_PORT_443_TCP_PROTO=tcp
96 | NGINX_SERVICE_PORT=tcp://10.107.190.90:5000
97 | NGINX_SERVICE_SERVICE_PORT=5000
98 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
99 | KUBERNETES_SERVICE_PORT_HTTPS=443
100 | KUBERNETES_SERVICE_HOST=10.96.0.1
101 | PWD=/
102 | ```
103 | 可以看到打印了很多环境变量处理,其中就包括我们刚刚创建的nginx-service这个服务,有HOST、PORT、PROTO、ADDR等,也包括其他已经存在的Service的环境变量,现在如果需要在这个Pod里面访问nginx-service的服务,直接通过NGINX_SERVICE_SERVICE_HOST和NGINX_SERVICE_SERVICE_PORT就可以,但是如果这个Pod启动起来的时候如果nginx-service服务还没启动起来,在环境变量中我们是无法获取到这些信息的,当然我们可以通过initContainer之类的方法来确保nginx-service启动后再启动Pod,但是这种方法毕竟增加了 Pod 启动的复杂性,所以这不是最优的方法。
104 |
105 |
--------------------------------------------------------------------------------
/资料.md:
--------------------------------------------------------------------------------
1 | | kubernetes书籍 | 网址 |
2 | |---------------|------|
3 | | kubernetes handbook | https://jimmysong.io/book/ |
4 | | 和我一步步部署 kubernetes 集群 | https://github.com/opsnull/follow-me-install-kubernetes-cluster |
5 | | 从Docker到Kubernetes进阶 | https://www.qikqiak.com/k8s-book/ |
6 | | kubernetes中文文档 | https://www.kubernetes.org.cn/k8s |
7 | | Kubernetes 中文指南/云原生应用架构实践手册 | https://jimmysong.io/kubernetes-handbook/ |
8 | | Kubernetes 学习笔记 | https://www.huweihuang.com/kubernetes-notes/ |
9 | | Envoy 官方文档中文版 | http://www.servicemesher.com/envoy/ |
10 | | kubernetes-handbook | https://github.com/feiskyer/kubernetes-handbook |
11 | | Docker — 从入门到实践 | https://github.com/yeasy/docker_practice/blob/master/SUMMARY.md |
12 | | 博客 | https://blog.csdn.net/qq_32641153 |
13 | | 小豆丁技术栈 | http://www.mydlq.club/about/menu/ |
14 | | kubernetes官方中文文档 | https://kubernetes.io/zh/docs/concepts/ |
15 | | 语雀 | https://www.yuque.com/duduniao/k8s |
16 | | K8S训练营 | https://www.qikqiak.com/k8strain/maintain/cluster/ |
17 | | kubernetes入门到实践 | https://www.kancloud.cn/huyipow/kubernetes/531982 |
18 | | etcd3学习笔记 | https://skyao.gitbooks.io/learning-etcd3/content/documentation/op-guide/ |
19 | | Etcd官方文档中文版 | https://www.bookstack.cn/read/etcd/documentation-op-guide-clustering.md |
20 | | etcd学习笔记 | https://github.com/skyao/learning-etcd3 |
21 | | etcd选举 | http://www.xuyasong.com/?p=1983#i-7 |
22 | | Kube-bench | https://github.com/aquasecurity/kube-bench#download-and-install-binarie |
23 | | 博客 | https://www.cnblogs.com/yuhaohao/ |
24 | | 博客 | https://www.cuiliangblog.cn/detail/article/29 |
25 | | k8s-cluster | https://github.com/qjpoo/k8s-cluster |
26 | | k8s-applicatin | https://github.com/happinesslijian/k8s-application |
27 | | 博客 | https://blog.51cto.com/goome/p_1 |
28 | | 运维 | https://github.com/miaocunfa/OpsNotes |
29 | | 博客 | https://www.zhangzhuo.ltd/ |
30 | | 博客 | https://mp.weixin.qq.com/mp/appmsgalbum?__biz=MzIwNDA3ODg3OQ==&action=getalbum&album_id=1835825486392770561&scene=173&from_msgid=2647999703&from_itemidx=1&count=3&nolastread=1#wechat_redirect |
31 | | 博客 | https://blog.csdn.net/qq_42987484/category_9539473.html |
32 |
--------------------------------------------------------------------------------