├── .gitignore
├── elk [k8s kube]
├── readme.md
├── es-设置JVM参数.md
├── 01.single
│ ├── cm
│ │ ├── logstash
│ │ │ ├── logstash.yml
│ │ │ └── pipeline
│ │ │ │ ├── stdio.conf
│ │ │ │ └── nginx.conf
│ │ ├── kibana
│ │ │ └── kibana.yml
│ │ ├── es
│ │ │ └── elasticsearch.yml
│ │ └── nginx
│ │ │ └── nginx.conf
│ ├── 00.cmd.sh
│ ├── 01.svc.yaml
│ ├── readme.md
│ └── 03.ds.yaml
├── 02.single-xpack
│ ├── cm
│ │ ├── logstash
│ │ │ ├── logstash.yml
│ │ │ └── pipeline
│ │ │ │ ├── stdio.conf
│ │ │ │ └── nginx.conf
│ │ ├── kibana
│ │ │ ├── kibana.keystore
│ │ │ └── kibana.yml
│ │ ├── es
│ │ │ └── elasticsearch.yml
│ │ └── nginx
│ │ │ └── nginx.conf
│ ├── 00.cmd.sh
│ ├── readme.md
│ ├── 01.svc.yaml
│ └── 03.ds.yaml
├── 04.es-cluster-xpack [sts statefulset]
│ ├── cm
│ │ ├── logstash
│ │ │ ├── logstash.yml
│ │ │ └── pipeline
│ │ │ │ ├── stdio.conf
│ │ │ │ └── nginx.conf
│ │ ├── es
│ │ │ ├── elastic-certificates.p12
│ │ │ └── elasticsearch.yml
│ │ ├── kibana
│ │ │ └── kibana.yml
│ │ └── nginx
│ │ │ └── nginx.conf
│ ├── 00.cmd.sh
│ ├── 01.svc.yaml
│ ├── readme.md
│ ├── 03.deploy-kibana.yaml
│ └── 04.ds.yaml
├── 03.es-cluster [sts statefulset 7.x 7.2.0]
│ ├── cm
│ │ ├── logstash
│ │ │ ├── logstash.yml
│ │ │ └── pipeline
│ │ │ │ ├── stdio.conf
│ │ │ │ └── nginx.conf
│ │ ├── kibana
│ │ │ └── kibana.yml
│ │ ├── es
│ │ │ └── elasticsearch.yml
│ │ └── nginx
│ │ │ └── nginx.conf
│ ├── readme.md
│ ├── 00.cmd.sh
│ ├── 01.svc.yaml
│ ├── 03.deploy-kibana.yaml
│ └── 04.ds.yaml
├── 05.es-cluster-xpack-head [sts statefulset]
│ ├── cm
│ │ ├── logstash
│ │ │ ├── logstash.yml
│ │ │ └── pipeline
│ │ │ │ ├── stdio.conf
│ │ │ │ └── nginx.conf
│ │ ├── es
│ │ │ ├── elastic-certificates.p12
│ │ │ └── elasticsearch.yml
│ │ ├── kibana
│ │ │ └── kibana.yml
│ │ └── nginx
│ │ │ └── nginx.conf
│ ├── 00.cmd.sh
│ ├── 05.head.yaml
│ ├── readme.md
│ ├── 03.deploy-kibana.yaml
│ ├── 01.svc.yaml
│ └── 04.ds.yaml
├── 06.es-cluster [sts statefulset 5.x 5.5.0]
│ ├── readme.md
│ ├── cm
│ │ ├── kibana
│ │ │ └── kibana.yml
│ │ └── es
│ │ │ └── elasticsearch.yml
│ ├── 00.cmd.sh
│ ├── 03.kibana.yaml
│ └── 02.es-data.yaml
├── kibana-配置[映射 环境变量].md
├── es-配置.md
├── 问题处理
│ ├── es启动报错-Native controller process has stopped.md
│ └── kibana启动报错-Request Timeout after 30000ms.md
├── kibana使用方法.md
└── logstash-配置[映射 环境变量].md
├── harbor
├── config
│ ├── cert
│ │ ├── server.crt
│ │ └── server.key
│ ├── core
│ │ ├── key
│ │ ├── app.conf
│ │ └── env
│ ├── db
│ │ └── env
│ ├── registryctl
│ │ ├── env
│ │ └── config.yml
│ ├── jobservice
│ │ ├── env
│ │ └── config.yml
│ ├── log
│ │ └── logrotate.conf
│ ├── registry
│ │ ├── config.yml
│ │ └── root.crt
│ └── nginx
│ │ ├── server.crt
│ │ └── server.key
├── exports
├── kuber
│ ├── 08-portal.yaml
│ ├── 03-redis.yaml
│ ├── 02-db.yaml
│ ├── 10-ing.yaml
│ ├── 04-registry.yaml
│ ├── 01-cmd.sh
│ ├── 07-jobservice.yaml
│ ├── 09-proxy.yaml
│ ├── 05-registryctl.yaml
│ └── 06-core.yaml
└── readme.md
├── elk [dc]
├── 03.elk-filebeat
│ ├── logstash.md
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ └── pipeline
│ │ │ └── main.conf
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── filebeat
│ │ └── config
│ │ │ └── filebeat.yml
│ ├── readme.md
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ └── docker-compose.yaml
├── 02.elk-logstash-lb
│ ├── logstash.md
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ ├── pipeline01
│ │ │ └── nginx.conf
│ │ └── pipeline02
│ │ │ └── nginx.conf
│ ├── doc
│ │ └── src
│ │ │ └── 001.png
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── readme.md
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ └── docker-compose.yaml
├── 04.elk-filebeat-kafka
│ ├── logstash.md
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ └── pipeline
│ │ │ └── main.conf
│ ├── readme.md
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── filebeat
│ │ └── config
│ │ │ └── filebeat.yml
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ └── docker-compose.yaml
├── 07.elk-cluster-5.5.0
│ ├── readme.md
│ ├── data
│ │ └── logs
│ │ │ └── nginx
│ │ │ ├── access.log
│ │ │ ├── error.log
│ │ │ └── access_json.log
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── logstash
│ │ └── config
│ │ │ └── logstash.conf
│ ├── kibana 5.5.0 的配置.md
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ ├── logstash 5.5.0无法启动的问题.md
│ └── docker-compose.yaml
├── 05.elk-filebeat-kafka-logpilot
│ ├── logstash.md
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ └── pipeline
│ │ │ └── main.conf
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ └── readme.md
├── 01.elk-single
│ ├── readme.md
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ └── pipeline
│ │ │ ├── stdio.conf
│ │ │ └── nginx.conf
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ └── docker-compose.yaml
├── 08.elk-cluster-5.5.0-xpack
│ ├── data
│ │ └── logs
│ │ │ └── nginx
│ │ │ ├── access.log
│ │ │ ├── error.log
│ │ │ └── access_json.log
│ ├── readme.md
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── kibana-ERR_TOO_MANY_REDIRECTS 重定向次数过多.md
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── logstash
│ │ └── config
│ │ │ └── logstash.conf
│ └── nginx
│ │ └── config
│ │ └── nginx.conf
├── 06.elk-cluster-filebeat
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ └── pipeline
│ │ │ └── main.conf
│ ├── filebeat
│ │ └── config
│ │ │ └── filebeat.yml
│ ├── readme.md
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ └── docker-compose.yaml
├── 09.elk-cluster-logstash-grok
│ ├── logstash
│ │ ├── config
│ │ │ └── logstash.yml
│ │ ├── grok
│ │ │ └── patterns
│ │ ├── pipeline02
│ │ │ └── nginx.conf
│ │ └── pipeline01
│ │ │ └── nginx.conf
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── docker-compose.yaml
│ └── readme.md
├── 10.elk-filebeat-grok-pipeline
│ ├── kibana
│ │ └── config
│ │ │ └── kibana.yml
│ ├── nginx
│ │ └── config
│ │ │ └── nginx.conf
│ ├── elasticsearch
│ │ └── config
│ │ │ └── elasticsearch.yml
│ ├── filebeat
│ │ └── config
│ │ │ └── filebeat.yml
│ ├── docker-compose.yaml
│ └── readme.md
└── readme.md
├── python-rq
├── project
│ ├── jobs
│ │ ├── __init__.py
│ │ └── producer.py
│ ├── requirements.txt
│ └── main.py
├── producer.dockerfile
├── readme.md
└── docker-compose.yml
├── celery [python 分布式 dc]
├── project
│ ├── tasks
│ │ ├── __init__.py
│ │ └── task.py
│ ├── requirements.txt
│ ├── worker.py
│ └── producer.py
├── producer.dockerfile
├── readme.md
└── docker-compose.yml
├── redis-cluster
├── 00-cmd.sh
├── 03.svc.yaml
├── 01.headless.yaml
├── cm
│ └── redis.conf
└── 02.sts.yaml
├── rancher
├── import-to-rancher
│ ├── 01-ns.yaml
│ ├── 02-rbac.yaml
│ ├── 03-deploy.yaml
│ └── 04-ds.yaml
├── readme.md
└── RKE
│ ├── ubuntu下kuber环境准备.md
│ ├── readme.md
│ └── rke_config.txt
├── ansible [dc]
├── ansible
│ └── hosts
├── docker-compose.yml
├── dockerfile
└── readme.md
├── prometheus
├── kube-state-metrics
│ ├── readme.md
│ ├── 03.svc.yaml
│ ├── 02.deploy.yaml
│ └── 01.rbac.yaml
├── 00-cmd.sh
├── docs
│ ├── kube-state-metrics.md
│ ├── grafana使用.1.添加数据源.md
│ ├── grafana使用.2.添加仪表盘[dashboard].md
│ └── grafana ingress配置.md
├── 04.ingress.yaml
├── 01.rbac.yaml
├── config
│ ├── rules
│ │ ├── kubelet.yaml
│ │ ├── deploy.yaml
│ │ ├── prometheus.yaml
│ │ └── nodes.yaml
│ ├── blackbox-exporter.yaml
│ └── node-exporter.yaml
├── readme.md
├── 03.grafana.yaml
├── exporters
│ ├── 02-blackbox-exporter.yaml
│ └── 01-node-exporter.yaml
└── 02.deploy.yaml
├── readme.md
└── redis-cluster[dc]
├── config
└── nginx
│ └── nginx.conf
└── docker-compose.yaml
/.gitignore:
--------------------------------------------------------------------------------
1 | **/data
2 |
--------------------------------------------------------------------------------
/elk [k8s kube]/readme.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/harbor/config/cert/server.crt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/harbor/config/cert/server.key:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/logstash.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/harbor/config/core/key:
--------------------------------------------------------------------------------
1 | MBgtJvo3AxRlPewM
--------------------------------------------------------------------------------
/python-rq/project/jobs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/logstash.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/logstash.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/readme.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/project/tasks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/harbor/config/db/env:
--------------------------------------------------------------------------------
1 | POSTGRES_PASSWORD=123456
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/logstash.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [k8s kube]/es-设置JVM参数.md:
--------------------------------------------------------------------------------
1 | # es-设置JVM参数
2 |
3 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/data/logs/nginx/access.log:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | ## kibana配置
4 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/data/logs/nginx/access.log:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/cm/logstash/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: 0.0.0.0
2 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/logstash/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: 0.0.0.0
2 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/logstash/config/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: "0.0.0.0"
--------------------------------------------------------------------------------
/redis-cluster/00-cmd.sh:
--------------------------------------------------------------------------------
1 | kubectl create configmap redis-conf --from-file=./cm/redis.conf
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/readme.md:
--------------------------------------------------------------------------------
1 | 本示例中仅展示了 x-pack 的安装与配置方法, 并未直接包含该插件(需要重新封装镜像).
2 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/logstash/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: 0.0.0.0
2 |
--------------------------------------------------------------------------------
/harbor/config/registryctl/env:
--------------------------------------------------------------------------------
1 | CORE_SECRET=ByKTSuHfWbQNX6rL
2 | JOBSERVICE_SECRET=avxVu42h9Ny5OhvY
3 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/cm/logstash/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: 0.0.0.0
2 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/logstash/logstash.yml:
--------------------------------------------------------------------------------
1 | http.host: 0.0.0.0
2 |
--------------------------------------------------------------------------------
/python-rq/project/requirements.txt:
--------------------------------------------------------------------------------
1 | requests>=2.21.0
2 | rq>=0.13.0
3 | redis>=3.0.1
4 | rq-dashboard>=0.3.12
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/project/requirements.txt:
--------------------------------------------------------------------------------
1 | celery>=4.2.1
2 | redis>=3.0.1
3 | requests>=2.21.0
4 | flower>=0.9.2
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/logstash/grok/patterns:
--------------------------------------------------------------------------------
1 | MYIP [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}
2 |
--------------------------------------------------------------------------------
/rancher/import-to-rancher/01-ns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: cattle-system
6 |
--------------------------------------------------------------------------------
/harbor/config/core/app.conf:
--------------------------------------------------------------------------------
1 | appname = Harbor
2 | runmode = dev
3 | enablegzip = true
4 |
5 | [dev]
6 | httpport = 8080
7 |
--------------------------------------------------------------------------------
/harbor/config/jobservice/env:
--------------------------------------------------------------------------------
1 | CORE_SECRET=ByKTSuHfWbQNX6rL
2 | JOBSERVICE_SECRET=avxVu42h9Ny5OhvY
3 | CORE_URL=http://core-svc:8080
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/logstash/pipeline/stdio.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 5000
4 | }
5 | }
6 | output{
7 | stdout{
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/doc/src/001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/generals-space/devops-cloud-stack/HEAD/elk [dc]/02.elk-logstash-lb/doc/src/001.png
--------------------------------------------------------------------------------
/ansible [dc]/ansible/hosts:
--------------------------------------------------------------------------------
1 | [all]
2 | slave-[01:05]
3 |
4 | [web]
5 | slave-01
6 | slave-02
7 | slave-03
8 |
9 | [db]
10 | slave-04
11 | slave-05
12 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/cm/logstash/pipeline/stdio.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 5000
4 | }
5 | }
6 | output{
7 | stdout{
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/prometheus/kube-state-metrics/readme.md:
--------------------------------------------------------------------------------
1 | [kubernetes/kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)项目`example/standard`目录有下部署文件.
2 |
3 |
--------------------------------------------------------------------------------
/rancher/readme.md:
--------------------------------------------------------------------------------
1 | [Rancher RKE 使用手册](https://www.bookstack.cn/books/rancher-rke)
2 | [Rancher v2.x 使用手册](https://www.bookstack.cn/books/rancher-v2.x)
3 |
4 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/logstash/pipeline/stdio.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 5000
4 | }
5 | }
6 | output{
7 | stdout{
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/elk [dc]/10.elk-filebeat-grok-pipeline/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts: [ "http://elasticsearch:9200" ]
4 |
--------------------------------------------------------------------------------
/harbor/config/registryctl/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | protocol: "http"
3 | port: 8080
4 | log_level: "INFO"
5 |
6 | #https_config:
7 | # cert: "server.crt"
8 | # key: "server.key"
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/logstash/pipeline/stdio.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 5000
4 | }
5 | }
6 | output{
7 | stdout{
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/cm/logstash/pipeline/stdio.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 5000
4 | }
5 | }
6 | output{
7 | stdout{
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/logstash/pipeline/stdio.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 5000
4 | }
5 | }
6 | output{
7 | stdout{
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/harbor/config/log/logrotate.conf:
--------------------------------------------------------------------------------
1 | /var/log/docker/*.log {
2 | rotate 50
3 | size 200M
4 | copytruncate
5 | compress
6 | missingok
7 | nodateext
8 | }
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | ```
6 | nginx01 -> filebeat01 ─┐
7 | ├─> kafka -> logstash -> es
8 | nginx02 -> filebeat02 ─┘
9 | ```
10 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | ELK: 日志收集系统
2 |
3 | celery: 分布式任务调度系统
4 |
5 | python-rq: 分布式任务调度系统
6 |
7 | harbor: 容器镜像仓库服务
8 |
9 | prometheus: 普罗米修斯, 容器集群监控服务
10 |
11 | redis-cluster: redis集群服务
12 |
13 |
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/project/worker.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from tasks.task import app
4 |
5 | if __name__ == '__main__':
6 | ## 以worker的身份运行, 作用类似于`celery -A tasks.task worker`
7 | app.worker_main()
8 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts: [ "http://elasticsearch:9200" ]
4 |
5 | elasticsearch.username: elastic
6 | elasticsearch.password: "123456"
7 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.url: http://esc-master-0:9200
4 |
5 | elasticsearch.username: elastic
6 | elasticsearch.password: "123456"
7 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/readme.md:
--------------------------------------------------------------------------------
1 | 1. 预先确定 sts replicas 数量;
2 | 2. 规定 headless service 名称与 sts 名称相同, 以便于确定各节点的 node.name 与 discovery 地址.
3 | 3. 修改 es sts 的 replicas 数量同时修改 elasticsearch.yml 配置文件.
4 |
--------------------------------------------------------------------------------
/elk [k8s kube]/06.es-cluster [sts statefulset 5.x 5.5.0]/readme.md:
--------------------------------------------------------------------------------
1 | 1. 预先确定 sts replicas 数量;
2 | 2. 规定 headless service 名称与 sts 名称相同, 以便于确定各节点的 node.name 与 discovery 地址.
3 | 3. 修改 es sts 的 replicas 数量同时修改 elasticsearch.yml 配置文件.
4 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts: [ "http://elasticsearch:9200" ]
4 |
5 | elasticsearch.username: elastic
6 | elasticsearch.password: "123456"
7 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts: [ "http://elasticsearch:9200" ]
4 |
5 | elasticsearch.username: elastic
6 | elasticsearch.password: "123456"
7 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/es/elastic-certificates.p12:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/generals-space/devops-cloud-stack/HEAD/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/es/elastic-certificates.p12
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts: [ "http://elasticsearch:9200" ]
4 |
5 | elasticsearch.username: elastic
6 | elasticsearch.password: "123456"
7 |
--------------------------------------------------------------------------------
/elk [k8s kube]/06.es-cluster [sts statefulset 5.x 5.5.0]/cm/kibana/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: 0.0.0.0
3 | server.port: 5601
4 |
5 | elasticsearch.url: http://es-master-svc:9200
6 | elasticsearch.requestTimeout: 60000
7 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/filebeat/config/filebeat.yml:
--------------------------------------------------------------------------------
1 | filebeat:
2 | inputs:
3 | - type: log
4 | enabled: true
5 | paths:
6 | - /var/log/nginx/access_json.log
7 | output:
8 | logstash:
9 | hosts: ["logstash:5044"]
10 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | 1. [ELK + Filebeat 搭建日志系统](http://beckjin.com/2017/12/10/elk/)
6 |
7 | 2. [Plugins Inputs Beats](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html)
8 |
9 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/es/elastic-certificates.p12:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/generals-space/devops-cloud-stack/HEAD/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/es/elastic-certificates.p12
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts: [ "http://elasticsearch:9200" ]
4 |
5 | elasticsearch.username: elastic
6 | elasticsearch.password: "123456"
7 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/filebeat/config/filebeat.yml:
--------------------------------------------------------------------------------
1 | filebeat:
2 | inputs:
3 | - type: log
4 | enabled: true
5 | paths:
6 | - /var/log/nginx/access_json.log
7 | output:
8 | logstash:
9 | hosts: ["logstash:5044"]
10 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | 1. [ELK + Filebeat 搭建日志系统](http://beckjin.com/2017/12/10/elk/)
6 |
7 | 2. [Plugins Inputs Beats](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html)
8 |
9 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/cm/kibana/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: 0.0.0.0
3 | server.port: 5601
4 |
5 | #### 这里的 es 为 service 名称
6 | ## elasticsearch.hosts: [ "http://es:9200" ]
7 | elasticsearch.username: elastic
8 | elasticsearch.password: "123456"
9 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/kibana/kibana.keystore:
--------------------------------------------------------------------------------
1 | 1:OABHPIHvpI3ootPUAJBggtU7WEQc2v7HzuEs2PhOVVdbPIZIUItvcxD6ZUf2tKJtjr41uVqk3M9T+j2uwXA+MoQuf9ine0cjDIBYEYn5KDcY2gAwzwtX4jNqPdtdVU2AnJdS8D5apjfeflh/w71UXn1vtzYlcjdVVEaxHE8TdLEndJIHB3DuURLlqSrutE2oP/npwsTP9qAi1M7xKKId8Fwy
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/kibana/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: 0.0.0.0
3 | server.port: 5601
4 |
5 | #### 这里的 es 为 service 名称
6 | ## elasticsearch.hosts: [ "http://es:9200" ]
7 | elasticsearch.username: elastic
8 | elasticsearch.password: "123456"
9 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.url: http://esc-master-0:9200
4 |
5 | ## xpack.security.enabled: true
6 | ## elasticsearch.username: elastic
7 | ## elasticsearch.password: changeme
8 |
--------------------------------------------------------------------------------
/prometheus/00-cmd.sh:
--------------------------------------------------------------------------------
1 | k create cm prometheus-config -n monitoring --from-file=./config/prometheus.yaml
2 | k create cm prometheus-rules -n monitoring --from-file=./config/rules
3 | k create cm blackbox-exporter-config -n monitoring --from-file=config.yml=./config/blackbox-exporter.yaml
4 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/filebeat/config/filebeat.yml:
--------------------------------------------------------------------------------
1 | filebeat:
2 | inputs:
3 | - type: log
4 | enabled: true
5 | paths:
6 | - /var/log/nginx/access_json.log
7 | output:
8 | kafka:
9 | enabled: true
10 | hosts: ["kafka:9092"]
11 | topic: "nginx"
12 |
--------------------------------------------------------------------------------
/elk [k8s kube]/kibana-配置[映射 环境变量].md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [Install Kibana with Docker](https://www.elastic.co/guide/en/kibana/current/docker.html)
4 | - 配置文件字段与环境变量间的转换规则(点号用下划线代替)
5 | 2. [Configure Kibana](https://www.elastic.co/guide/en/kibana/current/settings.html)
6 | - kibana配置文件详解
7 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/kibana/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: 0.0.0.0
3 | server.port: 5601
4 |
5 | #### 这里的 es 为 service 名称
6 | ## elasticsearch.hosts: [ "http://es:9200" ]
7 | elasticsearch.username: elastic
8 | elasticsearch.password: "123456"
9 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/cm/kibana/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: 0.0.0.0
3 | server.port: 5601
4 |
5 | #### 这里的 es 为 service 名称
6 | ## elasticsearch.hosts: [ "http://es:9200" ]
7 | elasticsearch.username: elastic
8 | elasticsearch.password: "123456"
9 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/kibana/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: 0.0.0.0
3 | server.port: 5601
4 |
5 | #### 这里的 es 为 service 名称
6 | ## elasticsearch.hosts: [ "http://es:9200" ]
7 | elasticsearch.username: elastic
8 | elasticsearch.password: "123456"
9 |
--------------------------------------------------------------------------------
/rancher/RKE/ubuntu下kuber环境准备.md:
--------------------------------------------------------------------------------
1 | 使用rke也是需要预先安装依赖的, 包括防火墙, 内核模块, docker.
2 |
3 | kubectl的源还要参考kubernetes的官方文档, 从kubeadm文档中挑出需要的步骤好了. 但是官方的源太慢了, 这个其实也可以从阿里云的kubernetes源中找到.
4 |
5 | ubuntu下docker的安装步骤可见阿里云的镜像站. 然后不要忘了给ubuntu用户赋予docker的执行权限.
6 |
7 | ```
8 | usermod -aG docker ubuntu
9 | ```
10 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | http.port: 9200
5 | ## path.data: /home/elasticsearch
6 | bootstrap.memory_lock: false
7 | bootstrap.system_call_filter: false
8 | cluster.initial_master_nodes: ["es-01"]
9 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | http.port: 9200
5 | ## path.data: /home/elasticsearch
6 | bootstrap.memory_lock: false
7 | bootstrap.system_call_filter: false
8 | cluster.initial_master_nodes: ["es-01"]
9 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | http.port: 9200
5 | ## path.data: /home/elasticsearch
6 | bootstrap.memory_lock: false
7 | bootstrap.system_call_filter: false
8 | cluster.initial_master_nodes: ["es-01"]
9 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts:
4 | - http://esc-master-0:9200
5 | - http://esc-master-1:9200
6 | - http://esc-master-2:9200
7 |
8 | elasticsearch.username: elastic
9 | elasticsearch.password: "123456"
10 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | http.port: 9200
5 | ## path.data: /home/elasticsearch
6 | bootstrap.memory_lock: false
7 | bootstrap.system_call_filter: false
8 | cluster.initial_master_nodes: ["es-01"]
9 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | server.name: kibana
2 | server.host: "0.0.0.0"
3 | elasticsearch.hosts:
4 | - http://esc-master-0:9200
5 | - http://esc-master-1:9200
6 | - http://esc-master-2:9200
7 |
8 | elasticsearch.username: elastic
9 | elasticsearch.password: "123456"
10 |
--------------------------------------------------------------------------------
/python-rq/producer.dockerfile:
--------------------------------------------------------------------------------
1 | ## docker build -f producer.dockerfile -t generals/rq-producer:1.0.1 .
2 | FROM generals/python3
3 |
4 | WORKDIR /project
5 | COPY ./project .
6 |
7 | RUN pip3 install --upgrade pip
8 | RUN pip3 install -r requirements.txt
9 |
10 | ## 在CMD中使用环境变量时不能用数组形式
11 | CMD tail -f /etc/profile
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | http.port: 9200
5 | ## path.data: /home/elasticsearch
6 | bootstrap.memory_lock: false
7 | bootstrap.system_call_filter: false
8 | cluster.initial_master_nodes: ["es-01"]
9 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 8080;
3 | root /usr/share/nginx/html;
4 | access_log /var/log/nginx/access_json.log;
5 | location / {
6 | }
7 |
8 | error_page 404 /404.html;
9 | location = /40x.html {
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/elk [dc]/10.elk-filebeat-grok-pipeline/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 8080;
3 | root /usr/share/nginx/html;
4 | access_log /var/log/nginx/access_json.log json;
5 | location / {
6 | }
7 |
8 | error_page 404 /404.html;
9 | location = /40x.html {
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/redis-cluster/03.svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis-access-service
5 | labels:
6 | app: redis
7 | spec:
8 | ports:
9 | - name: redis-port
10 | protocol: "TCP"
11 | port: 6379
12 | targetPort: 6379
13 | selector:
14 | app: redis
15 | appCluster: redis-cluster
16 |
--------------------------------------------------------------------------------
/elk [dc]/10.elk-filebeat-grok-pipeline/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | http.port: 9200
5 | ## path.data: /home/elasticsearch
6 | bootstrap.memory_lock: false
7 | bootstrap.system_call_filter: false
8 | cluster.initial_master_nodes: ["es-01"]
9 |
10 |
--------------------------------------------------------------------------------
/redis-cluster/01.headless.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis-service
5 | labels:
6 | app: redis
7 | spec:
8 | ports:
9 | - name: redis-port
10 | port: 6379
11 | ## 这里是headless service的核心
12 | clusterIP: None
13 | selector:
14 | app: redis
15 | appCluster: redis-cluster
16 |
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/producer.dockerfile:
--------------------------------------------------------------------------------
1 | ## docker build -f producer.dockerfile -t generals/celery-producer:1.0.1 .
2 | FROM generals/python36
3 |
4 | WORKDIR /project
5 | COPY ./project .
6 |
7 | RUN pip3 install --upgrade pip
8 | RUN pip3 install -r requirements.txt
9 |
10 | ## 在CMD中使用环境变量时不能用数组形式
11 | CMD tail -f /etc/profile
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | 1. [Logstash filter 的使用](https://techlog.cn/article/list/10182917)
6 |
7 | 相比第一个实例, 本实例将nginx部署了2个节点, 同时也部署了2个logstash实例, 分别处理这两个nginx节点的日志, 用来模拟实际场景中多项目, 多节点的情况.
8 |
9 | 在操作上与示例01没什么区别, 区别只在于pipeline目录中, 配置文件使用filter为每个节点添加了一`_nodename`字段, 用于区别不同节点.
10 |
11 | 
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/project/producer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import time
4 | from tasks.task import write_into_redis
5 |
6 | ## 常规操作, 把任务下发出去, 由于write_into_redis()是被app.task装饰过的方法(即task对象),
7 | ## 使用`delay`调用时, 会自动下发到实例化app时传入的broker实例中...
8 | while True:
9 | time.sleep(5)
10 | write_into_redis.delay('https://www.baidu.com')
11 |
--------------------------------------------------------------------------------
/elk [k8s kube]/06.es-cluster [sts statefulset 5.x 5.5.0]/00.cmd.sh:
--------------------------------------------------------------------------------
1 | ## 本质上说, --from-file 指定一个文件创建的 cm, 还是一个目录, 只不过目录下只有一个文件而已.
2 | ## 如果命令行中指定的是一个目录, 则会包含该目录下所有文件.
3 | kubectl create configmap es-config --from-file=./cm/es/elasticsearch.yml
4 | kubectl create configmap kibana-config --from-file=./cm/kibana/kibana.yml
5 |
6 | ## kubectl delete cm es-config kibana-config
7 |
--------------------------------------------------------------------------------
/python-rq/project/jobs/producer.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from redis import Redis
3 |
4 | def count_words_at_url(url):
5 | resp = requests.get(url)
6 | return len(resp.text.split())
7 |
8 | redis_conn = Redis(host='redis-serv', port=6379, db=1)
9 |
10 | def write_into_redis(url):
11 | length = count_words_at_url(url)
12 | redis_conn.lpush('words_count', length)
13 |
--------------------------------------------------------------------------------
/prometheus/docs/kube-state-metrics.md:
--------------------------------------------------------------------------------
1 | # kube-state-metrics
2 |
3 | 参考文章
4 |
5 | 1. [容器监控实践—kube-state-metrics](https://www.jianshu.com/p/2c899452ab5a)
6 | - `prometheus.io/scrape: 'true'`注解将metric暴露给prometheus
7 | - `metric-server`/`heapster`的作用是为HPA等组件提供决策指标支持.
8 | 2. [kubernetes/kube-state-metrics](https://github.com/kubernetes/kube-state-metrics/tree/master/examples/standard)
9 |
10 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/cm/es/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | ## 对客户端提供服务的端口
5 | http.port: 9200
6 | ## 集群内与其他节点交互的端口
7 | transport.tcp.port: 9300
8 | ## path.data: /home/elasticsearch
9 | bootstrap.memory_lock: false
10 | bootstrap.system_call_filter: false
11 | ## 这里的数组成员为各节点的 node.name 值.
12 | cluster.initial_master_nodes: ["es-01"]
13 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/kibana-ERR_TOO_MANY_REDIRECTS 重定向次数过多.md:
--------------------------------------------------------------------------------
1 | # kibana-ERR_TOO_MANY_REDIRECTS 重定向次数过多
2 |
3 | 参考文章
4 |
5 | 最开始按照官方文档安装了`x-pack`插件, 重启集群后访问`kibana`, 浏览器显示了如下错误.
6 |
7 | 
8 |
9 | 网上找了一些方法, 都没用, 后来发现是我配置文件里写了两个`username`字段, 没写`password`字段...
10 |
11 | 总之`kibana`出现这个问题一般是因为`es`的用户名密码不正确.
12 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/logstash/pipeline/main.conf:
--------------------------------------------------------------------------------
1 | input {
2 | kafka {
3 | bootstrap_servers => "kafka:9092"
4 | topics => ["nginx"]
5 | codec => "json"
6 | }
7 | }
8 | output {
9 | elasticsearch {
10 | hosts => "elasticsearch:9200"
11 | user => "elastic"
12 | password => "123456"
13 | index => "nginx-log-%{+YYYY.MM.dd}"
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/logstash/pipeline/main.conf:
--------------------------------------------------------------------------------
1 | input {
2 | kafka {
3 | bootstrap_servers => "kafka:9092"
4 | topics => ["nginx"]
5 | codec => "json"
6 | }
7 | }
8 | output {
9 | elasticsearch {
10 | hosts => "elasticsearch:9200"
11 | user => "elastic"
12 | password => "123456"
13 | index => "nginx-log-%{+YYYY.MM.dd}"
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: esc
2 | network.host: 0.0.0.0
3 | http.port: 9200
4 | ## path.data: /home/elasticsearch
5 | bootstrap.memory_lock: false
6 | bootstrap.system_call_filter: false
7 | cluster.initial_master_nodes:
8 | - esc-master-0
9 | - esc-master-1
10 | - esc-master-2
11 | discovery.seed_hosts:
12 | - esc-master-0
13 | - esc-master-1
14 | - esc-master-2
15 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: esc
2 | network.host: 0.0.0.0
3 | http.port: 9200
4 | ## path.data: /home/elasticsearch
5 | bootstrap.memory_lock: false
6 | bootstrap.system_call_filter: false
7 | cluster.initial_master_nodes:
8 | - esc-master-0
9 | - esc-master-1
10 | - esc-master-2
11 | discovery.seed_hosts:
12 | - esc-master-0
13 | - esc-master-1
14 | - esc-master-2
15 |
--------------------------------------------------------------------------------
/ansible [dc]/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | master:
5 | image: generals/ansible-node
6 | volumes:
7 | - ./ansible/hosts:/etc/ansible/hosts
8 | slave-01:
9 | image: generals/ansible-node
10 | slave-02:
11 | image: generals/ansible-node
12 | slave-03:
13 | image: generals/ansible-node
14 | slave-04:
15 | image: generals/ansible-node
16 | slave-05:
17 | image: generals/ansible-node
18 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/logstash/pipeline/main.conf:
--------------------------------------------------------------------------------
1 | input {
2 | beats {
3 | port => 5044
4 | codec => "json"
5 | ## logstash会在filebeat没有消息的时候断开连接, 这个字段设置空闲的时间, 单位: 秒
6 | client_inactivity_timeout => 36000
7 | }
8 | }
9 | output {
10 | elasticsearch {
11 | hosts => "elasticsearch:9200"
12 | user => "elastic"
13 | password => "123456"
14 | index => "nginx-log-%{+YYYY.MM.dd}"
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/logstash/pipeline/main.conf:
--------------------------------------------------------------------------------
1 | input {
2 | beats {
3 | port => 5044
4 | codec => "json"
5 | ## logstash会在filebeat没有消息的时候断开连接, 这个字段设置空闲的时间, 单位: 秒
6 | client_inactivity_timeout => 36000
7 | }
8 | }
9 | output {
10 | elasticsearch {
11 | hosts => "elasticsearch:9200"
12 | user => "elastic"
13 | password => "123456"
14 | index => "nginx-log-%{+YYYY.MM.dd}"
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/es/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | node.name: es-01
3 | network.host: 0.0.0.0
4 | ## 对客户端提供服务的端口
5 | http.port: 9200
6 | ## 集群内与其他节点交互的端口
7 | transport.tcp.port: 9300
8 | ## path.data: /home/elasticsearch
9 | bootstrap.memory_lock: false
10 | bootstrap.system_call_filter: false
11 | ## 这里的数组成员为各节点的 node.name 值.
12 | cluster.initial_master_nodes: ["es-01"]
13 |
14 | ## 这条配置表示开启xpack认证机制
15 | xpack.security.enabled: true
16 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: esc
2 | node.name: ${ES_NODE_NAME}
3 | ## 默认为 false
4 | node.master: ${ES_NODE_MASTER:false}
5 | node.data: ${ES_NODE_DATA:false}
6 |
7 | network.host: 0.0.0.0
8 | http.port: 9200
9 | ## path.data: /home/elasticsearch
10 | bootstrap.memory_lock: false
11 | bootstrap.system_call_filter: false
12 | discovery.zen.ping.unicast.hosts:
13 | - esc-master-0
14 | - esc-master-1
15 | - esc-master-2
16 |
--------------------------------------------------------------------------------
/elk [dc]/10.elk-filebeat-grok-pipeline/filebeat/config/filebeat.yml:
--------------------------------------------------------------------------------
1 | filebeat:
2 | inputs:
3 | - type: log
4 | enabled: true
5 | paths:
6 | - /var/log/nginx/access_json.log
7 | fileds:
8 | log_topics: nginx-log
9 | output:
10 | elasticsearch:
11 | enabled: true
12 | hosts: ["elasticsearch:9200"]
13 | ## 这里的 pipeline 是在 es 中创建的,
14 | ## 可以为不同的 input 指定不同的 pipeline 名称
15 | pipeline: '%{[log_topics]}'
16 |
17 | logging.level: debug
18 |
--------------------------------------------------------------------------------
/prometheus/04.ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: monitoring
5 | spec:
6 | rules:
7 | - host: grafana.kube.com
8 | http:
9 | paths:
10 | - path: /
11 | backend:
12 | serviceName: grafana
13 | servicePort: 3000
14 | - host: prometheus.kube.com
15 | http:
16 | paths:
17 | - path: /
18 | backend:
19 | serviceName: prometheus
20 | servicePort: 9090
21 |
--------------------------------------------------------------------------------
/elk [k8s kube]/es-配置.md:
--------------------------------------------------------------------------------
1 | # es-配置
2 |
3 | 参考文章
4 |
5 | 1. [官方文档 Configuring Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html)
6 | - es有3个配置文件(后两者可见 es 是一个典型的 java 工程):
7 | 1. `elasticsearch.yml`用于配置 es 本身的行为
8 | 2. `jvm.options`用于配置 es 的 jvm 参数
9 | 3. `log4j2.properties`用于配置 es 的日志格式
10 | 2. [Install Elasticsearch with Docker](https://www.elastic.co/guide/en/elasticsearch/reference/5.5/docker.html)
11 | - 配置文件字段与环境变量间的转换规则
12 |
--------------------------------------------------------------------------------
/prometheus/kube-state-metrics/03.svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.7
7 | name: kube-state-metrics
8 | namespace: kube-system
9 | spec:
10 | clusterIP: None
11 | ports:
12 | - name: http-metrics
13 | port: 8080
14 | targetPort: http-metrics
15 | - name: telemetry
16 | port: 8081
17 | targetPort: telemetry
18 | selector:
19 | app.kubernetes.io/name: kube-state-metrics
--------------------------------------------------------------------------------
/prometheus/01.rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: monitoring
5 |
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1beta1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: monitoring-default-admin
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | ## 将 monitoring 下的 default 用户绑定到 cluster-admin 角色, 拥有超级权限.
18 | name: default
19 | namespace: monitoring
20 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/elasticsearch/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: esc
2 | node.name: ${ES_NODE_NAME}
3 | ## 默认为 false
4 | node.master: ${ES_NODE_MASTER:false}
5 | node.data: ${ES_NODE_DATA:false}
6 |
7 | network.host: 0.0.0.0
8 | http.port: 9200
9 | ## path.data: /home/elasticsearch
10 | bootstrap.memory_lock: false
11 | bootstrap.system_call_filter: false
12 | discovery.zen.ping.unicast.hosts:
13 | - esc-master-0
14 | - esc-master-1
15 | - esc-master-2
16 |
17 | ## xpack.security.enabled: true
18 |
--------------------------------------------------------------------------------
/redis-cluster[dc]/config/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user nginx;
2 |
3 | events {
4 | worker_connections 10240;
5 | }
6 | stream {
7 | upstream redis-server {
8 | least_conn;
9 | server mcp-redis:6379 max_fails=1 fail_timeout=10s;
10 | }
11 | server {
12 | listen 6379;
13 | proxy_pass redis-server;
14 | proxy_timeout 30m;
15 | ## 注意: 这里的 proxy_pass 是直接放在server块下的,
16 | ## 而且不是 http 模块, 而是 stream 块.
17 | ## 因为后端是 https, 只能使用 4 层转发, 否则无法传输证书.
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/data/logs/nginx/error.log:
--------------------------------------------------------------------------------
1 | 2020/09/16 03:59:12 [error] 7#7: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 172.25.0.1, server: , request: "GET /favicon.ico HTTP/1.1", host: "localhost:9001", referrer: "http://localhost:9001/"
2 | 2020/09/16 03:59:12 [error] 7#7: *1 open() "/usr/share/nginx/html/404.html" failed (2: No such file or directory), client: 172.25.0.1, server: , request: "GET /favicon.ico HTTP/1.1", host: "localhost:9001", referrer: "http://localhost:9001/"
3 |
--------------------------------------------------------------------------------
/redis-cluster/cm/redis.conf:
--------------------------------------------------------------------------------
1 | #开启Redis的AOF持久化
2 | appendonly yes
3 | #集群模式打开
4 | cluster-enabled yes
5 | #节点超时时间
6 | cluster-node-timeout 5000
7 | #AOF持久化文件存在的位置
8 | dir /var/lib/redis
9 | #开启的端口
10 | port 6379
11 | ## cluster-conf-file: 选项设定了保存节点配置文件的路径.
12 | ## 如果这个配置文件不存在, 每个节点在启动的时候都为他自身指定了一个新的ID存档到这个文件中,
13 | ## 实例会一直使用同一个ID, 在集群中保持一个独一无二的(Unique)名字.
14 | ## 每个节点都是用ID而不是IP或者端口号来记录其他节点, 因为在k8s中, IP地址是不固定的,
15 | ## 而这个独一无二的标识符(Identifier)则会在节点的整个生命周期中一直保持不变,
16 | ## 这个文件里面存放的是节点ID
17 | cluster-config-file /var/lib/redis/nodes.conf
18 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/data/logs/nginx/error.log:
--------------------------------------------------------------------------------
1 | 2020/09/16 03:59:12 [error] 7#7: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 172.25.0.1, server: , request: "GET /favicon.ico HTTP/1.1", host: "localhost:9001", referrer: "http://localhost:9001/"
2 | 2020/09/16 03:59:12 [error] 7#7: *1 open() "/usr/share/nginx/html/404.html" failed (2: No such file or directory), client: 172.25.0.1, server: , request: "GET /favicon.ico HTTP/1.1", host: "localhost:9001", referrer: "http://localhost:9001/"
3 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/logstash/pipeline/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | hosts => "elasticsearch:9200"
13 | user => "elastic"
14 | password => "123456"
15 | index => "nginx-log-%{+YYYY.MM.dd}"
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/logstash/config/logstash.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | hosts => "esc-master-0:9200"
13 | user => "elastic"
14 | password => "123456"
15 | index => "nginx-log-%{+YYYY.MM.dd}"
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/python-rq/project/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from rq import Queue
4 | from redis import Redis
5 | import time
6 | from jobs.producer import write_into_redis, count_words_at_url
7 |
8 | redis_conn = Redis(host='redis-serv', port=6379)
9 |
10 | q = Queue(connection=redis_conn)
11 |
12 | while True:
13 | time.sleep(5)
14 | ## job = q.enqueue(count_words_at_url, 'https://www.baidu.com')
15 | job = q.enqueue(write_into_redis, 'https://www.baidu.com')
16 |
17 | ## 删除此任务队列对象, 同时会删除该队列中所有任务.
18 | ## 之后就不能再使用, 除非重新实例化
19 | ## q.delete(delete_jobs=True)
20 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/logstash/config/logstash.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | hosts => "esc-master-0:9200"
13 | user => "elastic"
14 | password => "changeme"
15 | index => "nginx-log-%{+YYYY.MM.dd}"
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/cm/logstash/pipeline/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | ## 这里的 es 为 elasticsearch 的 service 名称
13 | hosts => "es:9200"
14 | user => "elastic"
15 | password => "123456"
16 | index => "nginx-log-%{+YYYY.MM.dd}"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/logstash/pipeline/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | ## 这里的 es 为 elasticsearch 的 service 名称
13 | hosts => "es:9200"
14 | user => "elastic"
15 | password => "123456"
16 | index => "nginx-log-%{+YYYY.MM.dd}"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/elk [dc]/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | es容器启动需要宿主机的`vm.max_map_count`大于262144, 否则在启动过程中会报如下错误然后退出, 记得要提前设置
4 |
5 | ```
6 | elasticsearch_1 | ERROR: [1] bootstrap checks failed
7 | elasticsearch_1 | [1]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
8 | ```
9 |
10 | ## 示例列表
11 |
12 | 1. [ELK单节点最简示例](./01.elk-single/readme.md)
13 | 2. [ELK多节点, 多logstash](./02.elk-logstash-lb/readme.md)
14 | 3. [ELK+filebeat](./03.elk-filebeat/readme.md)
15 | 4. [ELK+filebeat+kafka](./04.elk-filebeat-kafka/readme.md)
16 | 5. [ELK+filebeat+kafka+阿里云logpilot](./05.elk-filebeat-kafka-logpilot/readme.md)
17 |
18 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/00.cmd.sh:
--------------------------------------------------------------------------------
1 | ## 本质上说, --from-file 指定一个文件创建的 cm, 还是一个目录, 只不过目录下只有一个文件而已.
2 | ## 如果命令行中指定的是一个目录, 则会包含该目录下所有文件.
3 | kubectl create configmap es-config --from-file=./cm/es/elasticsearch.yml
4 | kubectl create configmap kibana-config --from-file=./cm/kibana/kibana.yml
5 | kubectl create configmap logstash-config --from-file=./cm/logstash/logstash.yml
6 | kubectl create configmap logstash-pipeline-config --from-file=./cm/logstash/pipeline
7 | kubectl create configmap nginx-config --from-file=./cm/nginx/nginx.conf
8 |
9 | ## kubectl delete cm es-config kibana-config logstash-config logstash-pipeline-config nginx-config
10 |
--------------------------------------------------------------------------------
/harbor/exports:
--------------------------------------------------------------------------------
1 | ## redis, database, registry的目录属主为root, 其他的为nfsnobody
2 | /mnt/nfsvol/harbor/redis 192.168.7.0/24(rw,sync,no_all_squash,no_root_squash)
3 | /mnt/nfsvol/harbor/database 192.168.7.0/24(rw,sync,no_all_squash,no_root_squash)
4 | /mnt/nfsvol/harbor/ca_download 192.168.7.0/24(rw,sync,no_all_squash,root_squash)
5 | /mnt/nfsvol/harbor/job_logs 192.168.7.0/24(rw,sync,no_all_squash,root_squash)
6 | /mnt/nfsvol/harbor/psc 192.168.7.0/24(rw,sync,no_all_squash,root_squash)
7 | /mnt/nfsvol/harbor/registry 192.168.7.0/24(rw,sync,no_all_squash,no_root_squash)
8 | /mnt/nfsvol/harbor/proxy_nginx 192.168.7.0/24(rw,sync,no_all_squash,root_squash)
9 |
--------------------------------------------------------------------------------
/prometheus/docs/grafana使用.1.添加数据源.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | 
4 |
5 | 
6 |
7 | 然后填写prometheus集群的相关信息.
8 |
9 | 
10 |
11 | 这里只填写 HTTP 下的 url 即可, 因为也没有配置 basic auth 认证, 其他的以后再说, 保存后出现如下提示说明正常.
12 |
13 | 
14 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/logstash/pipeline/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | ## 这里的 es-cluster 为 elasticsearch 的 service 名称
13 | hosts => "es-cluster:9200"
14 | user => "elastic"
15 | password => "123456"
16 | index => "nginx-log-%{+YYYY.MM.dd}"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/cm/logstash/pipeline/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | ## 这里的 es-cluster 为 elasticsearch 的 service 名称
13 | hosts => "es-cluster:9200"
14 | user => "elastic"
15 | password => "123456"
16 | index => "nginx-log-%{+YYYY.MM.dd}"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/logstash/pipeline/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | output {
10 | if [type] == "nginx-log"{
11 | elasticsearch {
12 | ## 这里的 es-cluster 为 elasticsearch 的 service 名称
13 | hosts => "es-cluster:9200"
14 | user => "elastic"
15 | password => "123456"
16 | index => "nginx-log-%{+YYYY.MM.dd}"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/kibana 5.5.0 的配置.md:
--------------------------------------------------------------------------------
1 | # kibana 5.5.0 的配置
2 |
3 | kibana 5.5.0 与 7.2.0 相比界面少了很多功能.
4 |
5 | 首页进入后显示如下界面.
6 |
7 | 
8 |
9 | 我们按照`logstash`中配置的`output`格式, 创建以`nginx-log-*`为前缀的索引.
10 |
11 | 
12 |
13 | 点击"创建", 显示如下界面.
14 |
15 | 
16 |
17 | 点击左侧边栏的"Discover".
18 |
19 | 
20 |
21 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/00.cmd.sh:
--------------------------------------------------------------------------------
1 | ## 本质上说, --from-file 指定一个文件创建的 cm, 还是一个目录, 只不过目录下只有一个文件而已.
2 | ## 如果命令行中指定的是一个目录, 则会包含该目录下所有文件.
3 | kubectl create configmap es-config --from-file=./cm/es/elasticsearch.yml
4 | kubectl create configmap kibana-config --from-file=./cm/kibana/kibana.yml
5 | kubectl create configmap logstash-config --from-file=./cm/logstash/logstash.yml
6 | kubectl create configmap logstash-pipeline-config --from-file=./cm/logstash/pipeline
7 | kubectl create configmap nginx-config --from-file=./cm/nginx/nginx.conf
8 |
9 | ## kubectl delete cm es-config kibana-config logstash-config logstash-pipeline-config nginx-config
10 |
--------------------------------------------------------------------------------
/prometheus/config/rules/kubelet.yaml:
--------------------------------------------------------------------------------
1 | groups:
2 | - name: kubelet.rules
3 | rules:
4 | - alert: K8SKubeletDown
5 | expr: up{job="kubelet"} == 0
6 | for: 5m
7 | labels:
8 | severity: Warning
9 | annotations:
10 | description: "Node: {{$labels.kubernetes_io_hostname}} kubelet service is unreachable for 5 minutes."
11 | - alert: K8SKubeletTooManyPods
12 | expr: kubelet_running_pod_count > 100
13 | for: 3m
14 | labels:
15 | severity: Warning
16 | annotations:
17 | description: Kubelet {{$labels.instance}} is running {{$value}} pods, close to the limit of 110
18 | summary: Kubelet is close to pod limit
--------------------------------------------------------------------------------
/python-rq/readme.md:
--------------------------------------------------------------------------------
1 | 这个工程搭建了python-rq集群, 需要先创建producer和worker镜像, 集群启动后可以访问`localhost:8080`来查看`rq-dashboard`的webui.
2 |
3 | 与我之前想的不同, 调度系统其实只需要编写**任务**对象, 由**生产者**放入队列, 就可以不用管了. 所谓的worker只是`rq worker`启动的运行进程, 不需要编写代码. 所要运行的任务由worker从队列中取出后直接执行. 可以启动任意多对象.
4 |
5 | 需要注意的是, 对于worker节点, 所要执行的**任务方法**必须是**可导入**的. 不管是放在`sys.path`还是要进入到工程目录里的相对路径. 我觉得应该是相对于入队列时导入的时候的路径.
6 |
7 | 实际上, 在celery官方文档里都说了
8 |
9 | > The celery program can be used to start the worker (**you need to run the worker in the directory above project**)
10 |
11 | 实验了下, 在生产者中定义的任务使用redis连接操作, 在worker中执行时也是可以的.
12 |
13 | 重点在于任务的返回值, 和各种异常状况的处理. 关于异常状态在redis中的存储方式和过期时间还有重试机制, 由于文档太少(官方文档写的很没条理), 先不看了. 对比完celery再来看这个问题.
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/readme.md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [python之celery使用详解一](https://www.cnblogs.com/cwp-bg/p/8759638.html)
4 |
5 | 参考文章1从常规编程思路中讲解了celery的架构, 以及任务如何发布, worker如何启动等操作. 比其他相当于照抄官方文档的文章好了很多.
6 |
7 | 在celery中, app对象的功能十分全面, 可以装饰常规函数为task对象, 也可以在代码中写为以worker角色运行, 实例化时的参数指定了这个task会发送到broker的地址, 作为worker时也可以从这个指定的broker中获取任务.
8 |
9 | ------
10 |
11 | celery中有任务队列和结果队列, 与rq相比多了一个结果队列. 不过结果队列默认是不启用的, 毕竟是异步任务, 不需要立即得到结果, 就算想要得到结果也应该是先把下发的任务入库, worker执行完成时再把结果入库, 当然还很可能包含执行时间, 异常状态什么的.
12 |
13 | 另外, 当初十分不理解celery到底是干啥的, 其实主要是因为不理解ta的调度模型. 在我自己构想的任务调度模型中, 是传统的生产者+消费者+消息队列结构. 不过在学习celery时, 就会发现ta好像没有生产者的概念, 而是先通过定义任务对象, 然后直接启动消费者就可以开始执行了. 这让我对任务对象的来源非常疑惑 - 总得让我有一个循环去创建任务吧...
14 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/logstash/pipeline01/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | filter {
10 | mutate {
11 | ## 使用过滤器添加节点名称, 以便分类
12 | add_field => {"_nodename" => "node-01"}
13 | }
14 | }
15 | output {
16 | if [type] == "nginx-log"{
17 | elasticsearch {
18 | hosts => "elasticsearch:9200"
19 | user => "elastic"
20 | password => "123456"
21 | index => "nginx-log-%{+YYYY.MM.dd}"
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/logstash/pipeline02/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | codec => json ## 由于nginx的日志配置为json格式, 所以这里的codec指定为json.
5 | start_position => "beginning"
6 | type => "nginx-log"
7 | }
8 | }
9 | filter {
10 | mutate {
11 | ## 使用过滤器添加节点名称, 以便分类
12 | add_field => {"_nodename" => "node-02"}
13 | }
14 | }
15 | output {
16 | if [type] == "nginx-log"{
17 | elasticsearch {
18 | hosts => "elasticsearch:9200"
19 | user => "elastic"
20 | password => "123456"
21 | index => "nginx-log-%{+YYYY.MM.dd}"
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/cm/es/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | ## sts 生成的Pod的名称
3 | node.name: ${POD_NAME}
4 | network.host: 0.0.0.0
5 | ## 对客户端提供服务的端口
6 | http.port: 9200
7 | ## 集群内与其他节点交互的端口
8 | transport.tcp.port: 9300
9 | ## path.data: /home/elasticsearch
10 | bootstrap.memory_lock: false
11 | bootstrap.system_call_filter: false
12 | ## 这里的数组成员为各节点的 node.name 值.
13 | cluster.initial_master_nodes:
14 | - ${CLUSTER_NAME}-0
15 | - ${CLUSTER_NAME}-1
16 | - ${CLUSTER_NAME}-2
17 | ## 配置该节点会与哪些候选地址进行通信, 端口可以不写, 默认为9300.
18 | discovery.seed_hosts:
19 | - ${CLUSTER_NAME}-0.${SVC_NAME}
20 | - ${CLUSTER_NAME}-1.${SVC_NAME}
21 | - ${CLUSTER_NAME}-2.${SVC_NAME}
22 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/logstash/pipeline02/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | start_position => "beginning"
5 | type => "nginx-log"
6 | }
7 | }
8 | filter {
9 | grok {
10 | match => {
11 | "message" => "%{IP:client} - - \[%{HTTPDATE:timestamp}\] \"%{WORD:method} %{URIPATHPARAM:uri} HTTP/%{NUMBER:httpversion}\" %{NUMBER:status} %{NUMBER:bytes} \"-\" \"%{GREEDYDATA:agent}\""
12 | }
13 | }
14 | }
15 | output {
16 | if [type] == "nginx-log"{
17 | elasticsearch {
18 | hosts => "esc-master-0:9200"
19 | index => "nginx-log-%{+YYYY.MM.dd}"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/harbor/kuber/08-portal.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: portal-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 80
11 | targetPort: 80
12 | selector:
13 | name: portal-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: portal
19 | namespace: harbor
20 | labels:
21 | name: portal
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: portal-apps
28 | spec:
29 | containers:
30 | - name: portal-app
31 | image: goharbor/harbor-portal:v1.8.2
32 | imagePullPolicy: IfNotPresent
33 |
--------------------------------------------------------------------------------
/elk [k8s kube]/问题处理/es启动报错-Native controller process has stopped.md:
--------------------------------------------------------------------------------
1 | # es启动报错-Native controller process has stopped
2 | 参考文章
3 |
4 | 1. [Elasticsearch修改network后启动失败](https://www.cnblogs.com/phpper/p/9803934.html)
5 | 2. [elasticsearch启动报错: Native controller process has stopped - no new native processes can be started](https://blog.csdn.net/K_Lily/article/details/105320221)
6 |
7 | ```
8 | {"type": "server", "timestamp": "2020-06-21T09:17:24,362+0000", "level": "INFO", "component": "o.e.x.m.p.NativeController", "cluster.name": "elasticsearch", "node.name": "es-01", "message": "Native controller process has stopped - no new native processes can be started" }
9 | ```
10 |
11 | 实际上, 只在 initContainers 中加一句`sysctl -w vm.max_map_count=655300`就可以了.
12 |
--------------------------------------------------------------------------------
/python-rq/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | redis-serv:
5 | image: redis:4-alpine
6 | ports:
7 | - 6379:6379
8 | volumes:
9 | - ./data/redis:/data
10 | dashboard:
11 | image: generals/rq-producer
12 | ports:
13 | - 8080:8080
14 | command: rq-dashboard --bind 0.0.0.0 --port 8080 --redis-host=redis-serv --redis-port=6379
15 | producer-node:
16 | image: generals/rq-producer
17 | hostname: producer
18 | volumes:
19 | - ./project:/project
20 | command: ["python", "main.py"]
21 | worker-node:
22 | image: generals/rq-producer
23 | hostname: worker
24 | volumes:
25 | - ./project:/project
26 | command: rq worker --url redis://redis-serv:6379/0
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/00.cmd.sh:
--------------------------------------------------------------------------------
1 | ## 本质上说, --from-file 指定一个文件创建的 cm, 还是一个目录, 只不过目录下只有一个文件而已.
2 | ## 如果命令行中指定的是一个目录, 则会包含该目录下所有文件.
3 | kubectl create configmap es-config --from-file=./cm/es/elasticsearch.yml
4 | kubectl create configmap es-certs --from-file=./cm/es/elastic-certificates.p12
5 | kubectl create configmap kibana-config --from-file=./cm/kibana/kibana.yml
6 | kubectl create configmap logstash-config --from-file=./cm/logstash/logstash.yml
7 | kubectl create configmap logstash-pipeline-config --from-file=./cm/logstash/pipeline
8 | kubectl create configmap nginx-config --from-file=./cm/nginx/nginx.conf
9 |
10 | ## kubectl delete cm es-config kibana-config logstash-config logstash-pipeline-config nginx-config es-certs
11 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/00.cmd.sh:
--------------------------------------------------------------------------------
1 | ## 本质上说, --from-file 指定一个文件创建的 cm, 还是一个目录, 只不过目录下只有一个文件而已.
2 | ## 如果命令行中指定的是一个目录, 则会包含该目录下所有文件.
3 | kubectl create configmap es-config --from-file=./cm/es/elasticsearch.yml
4 | kubectl create configmap es-certs --from-file=./cm/es/elastic-certificates.p12
5 | kubectl create configmap kibana-config --from-file=./cm/kibana/kibana.yml
6 | kubectl create configmap logstash-config --from-file=./cm/logstash/logstash.yml
7 | kubectl create configmap logstash-pipeline-config --from-file=./cm/logstash/pipeline
8 | kubectl create configmap nginx-config --from-file=./cm/nginx/nginx.conf
9 |
10 | ## kubectl delete cm es-config kibana-config logstash-config logstash-pipeline-config nginx-config es-certs
11 |
--------------------------------------------------------------------------------
/prometheus/docs/grafana使用.2.添加仪表盘[dashboard].md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | 选择`Prometheus State`项, 点击`Import`, 这是grafana为prometheus预置的仪表盘.
4 |
5 | 
6 |
7 | 成功后, 到`Dashboard`的管理页面查看.
8 |
9 | 
10 |
11 | 点击可以直接进入该表项.
12 |
13 | 
14 |
15 | 注意此时首页还是空的
16 |
17 | 
18 |
19 | 我们的`Prometheus State`面板在左下方`Recently Viewed`记录中有显示.
20 |
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/project/tasks/task.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from celery import Celery
4 | from redis import Redis
5 | import requests
6 |
7 | app = Celery('celery-app', broker='redis://:celery_redis_pwd@redis-serv:6379/0')
8 | redis_conn = Redis(host='redis-serv', port=6379,password='celery_redis_pwd', db=1)
9 |
10 | def count_words_at_url(url):
11 | resp = requests.get(url)
12 | return len(resp.text.split())
13 |
14 | ## task装饰器, 把常规函数包装成任务对象.
15 | ## 之后可以在worker中注册(注册的意义就是表示这个worker用于处理这种任务的)
16 | ## 然后也可以在自己的系统中调用这个函数(通过task对象的delay()方法调用, 这样会把任务发送到broker, worker就会收到了)
17 | @app.task(name="write-count-to-redis")
18 | def write_into_redis(url):
19 | length = count_words_at_url(url)
20 | redis_conn.lpush('words_count', length)
21 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/logstash/pipeline01/nginx.conf:
--------------------------------------------------------------------------------
1 | input {
2 | file {
3 | path => "/var/log/nginx/access_json.log"
4 | start_position => "beginning"
5 | type => "nginx-log"
6 | }
7 | }
8 | filter {
9 | grok {
10 | match => {
11 | "message" => "%{IP:client} - - \[%{HTTPDATE:timestamp}\] \"%{WORD:method} %{URIPATHPARAM:uri} HTTP/%{NUMBER:httpversion}\" %{NUMBER:status} %{NUMBER:bytes} \"-\" \"%{GREEDYDATA:agent}\""
12 | }
13 | }
14 | }
15 | output {
16 | if [type] == "nginx-log"{
17 | elasticsearch {
18 | ## hosts => "esc-master-0:9200"
19 | hosts => ["esc-master-0:9200","esc-master-1:9200","esc-master-2:9200"]
20 | index => "nginx-log-%{+YYYY.MM.dd}"
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/05.head.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 es-head-xxx
6 | name: es-head
7 | labels:
8 | app: es-head
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
14 | ## 表示一种管理关系
15 | app: es-head
16 | template:
17 | metadata:
18 | labels:
19 | app: es-head
20 | spec:
21 | containers:
22 | - name: es-head
23 | image: mobz/elasticsearch-head:5
24 | imagePullPolicy: IfNotPresent
25 | env:
26 | ## 这里的 es 指的是 service 名称
27 | - name: ELASTICSEARCH_HOSTS
28 | value: http://es-cluster:9200
29 |
--------------------------------------------------------------------------------
/elk [k8s kube]/06.es-cluster [sts statefulset 5.x 5.5.0]/cm/es/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: ${CLUSTER_NAME}
2 | ## sts 生成的Pod的名称
3 | node.name: ${POD_NAME}
4 | network.host: 0.0.0.0
5 | ## 对客户端提供服务的端口
6 | http.port: 9200
7 | ## 集群内与其他节点交互的端口
8 | transport.tcp.port: 9300
9 |
10 | node.master: ${IS_MASTER}
11 | node.data: ${IS_DATA}
12 |
13 | ## path.data: /home/elasticsearch
14 | bootstrap.memory_lock: false
15 |
16 | discovery.zen.minimum_master_nodes: 2
17 | ## 配置该节点会与哪些候选地址进行通信, 端口可以不写, 默认为9300.
18 | discovery.zen.ping.unicast.hosts:
19 | - ${MASTER_NAME}-0.${MASTER_SVC_NAME}:9300
20 | - ${MASTER_NAME}-1.${MASTER_SVC_NAME}:9300
21 | - ${MASTER_NAME}-2.${MASTER_SVC_NAME}:9300
22 |
23 | ## path.repo: /data/CLUSTER_NAME/${POD_NAME}/data/data_back
24 | ## path.logs: /data/CLUSTER_NAME/${POD_NAME}/log
25 | ## path.data: /data/CLUSTER_NAME/${POD_NAME}/data
26 | ##
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/cm/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/00.cmd.sh:
--------------------------------------------------------------------------------
1 | ## 本质上说, --from-file 指定一个文件创建的 cm, 还是一个目录, 只不过目录下只有一个文件而已.
2 | ## 如果命令行中指定的是一个目录, 则会包含该目录下所有文件.
3 | kubectl create configmap es-config --from-file=./cm/es/elasticsearch.yml
4 | kubectl create configmap kibana-config --from-file=./cm/kibana/kibana.yml
5 | kubectl create configmap logstash-config --from-file=./cm/logstash/logstash.yml
6 | kubectl create configmap logstash-pipeline-config --from-file=./cm/logstash/pipeline
7 | kubectl create configmap nginx-config --from-file=./cm/nginx/nginx.conf
8 |
9 | #### kibana 密文密码配置, 其中内容为
10 | ## elasticsearch.username: elastic
11 | ## elasticsearch.password: "123456"
12 | ##
13 | ## kubectl create configmap kibana-keystore --from-file=./cm/kibana/kibana.keystore
14 |
15 | ## kubectl delete cm es-config kibana-config logstash-config logstash-pipeline-config nginx-config kibana-keystore
16 |
17 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/cm/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/nginx/config/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/cm/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format json '{"@timestamp":"$time_iso8601",'
2 | '"@version":"1",'
3 | '"client":"$remote_addr",'
4 | '"url":"$uri",'
5 | '"status":"$status",'
6 | '"domain":"$host",'
7 | '"host":"$server_addr",'
8 | '"size":$body_bytes_sent,'
9 | '"responsetime":$request_time,'
10 | '"referer": "$http_referer",'
11 | '"ua": "$http_user_agent"'
12 | '}';
13 |
14 | server {
15 | listen 8080;
16 | root /usr/share/nginx/html;
17 | access_log /var/log/nginx/access_json.log json;
18 | location / {
19 | }
20 |
21 | error_page 404 /404.html;
22 | location = /40x.html {
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/ansible [dc]/dockerfile:
--------------------------------------------------------------------------------
1 | ## docker build --no-cache=true -f dockerfile -t generals/ansible-node .
2 | FROM generals/python3
3 |
4 | ################################################################
5 | ## docker镜像通用设置
6 | LABEL author=general
7 | LABEL email="generals.space@gmail.com"
8 | ## 环境变量, 使docker容器支持中文
9 | ENV LANG C.UTF-8
10 |
11 | RUN yum install -y openssh-server openssh-clients \
12 | && yum clean all \
13 | && rm -rf /var/cache/yum
14 |
15 | RUN ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -P '' \
16 | && ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -P '' \
17 | && ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -P '' \
18 | && ssh-keygen -t rsa -f /root/.ssh/id_rsa -P '' \
19 | && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys \
20 | && echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config
21 |
22 | RUN pip3 install ansible
23 |
24 | CMD /sbin/sshd && tail -f /etc/profile
25 |
--------------------------------------------------------------------------------
/elk [k8s kube]/kibana使用方法.md:
--------------------------------------------------------------------------------
1 | ## kibana使用
2 |
3 | 当所有工作都完成后, 访问kibana的webUI: http://localhost:5601.
4 |
5 | 实际场景中, 会有多个节点, 多种服务将日志发送到es, 要选出我们想要的服务, 需要先为其添加索引.
6 |
7 | 进入主界面后, 点击左侧Management -> Elasticsearch[Index Management], 可以看到如下结果
8 |
9 | 
10 |
11 | 可以看到是按logstash中配置的`nginx-log-%{+YYYY.MM.dd}`格式来的.
12 |
13 | > 注意: logstash 需要有 nginx log 目录的读取权限, kuber 自行创建的共享目录可能权限不正确, 最好手动修改`/var/log/nginx`为`755`.
14 |
15 | 选择Kibana[Index patterns] -> Create index pattern, 输入`nginx-log-*`忽略日期建立索引.
16 |
17 | 
18 |
19 | 
20 |
21 | 然后点击左侧Discover.
22 |
23 | 
24 |
25 | 可以看到我们创建的索引已经出现在左侧, 如果有多个索引, 会有下拉框供用户选择, 表示不同的项目.
26 |
--------------------------------------------------------------------------------
/elk [dc]/10.elk-filebeat-grok-pipeline/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | elasticsearch:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
11 | volumes:
12 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
13 | kibana:
14 | image: kibana:7.2.0
15 | ports:
16 | - 5601:5601
17 | volumes:
18 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
19 | nginx:
20 | image: nginx:1.12.0
21 | ports:
22 | - 9001:8080
23 | volumes:
24 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
25 | - ./data/logs/nginx:/var/log/nginx
26 | filebeat:
27 | image: elastic/filebeat:7.2.0
28 | volumes:
29 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
30 | - ./data/logs/nginx:/var/log/nginx
31 |
--------------------------------------------------------------------------------
/celery [python 分布式 dc]/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | redis-serv:
5 | image: redis:4-alpine
6 | ports:
7 | - 6379:6379
8 | volumes:
9 | - ./data/redis:/data
10 | command: ["redis-server", "--requirepass", "celery_redis_pwd"]
11 | flower: ## web ui
12 | image: generals/celery-producer
13 | hostname: producer
14 | ports:
15 | - 8080:8080
16 | ## 在command中使用环境变量需要用字符串形式, 不能用数组形式
17 | command: celery flower --port=8080 --broker=redis://:celery_redis_pwd@redis-serv:6379/0
18 | producer-node:
19 | image: generals/celery-producer
20 | hostname: producer
21 | volumes:
22 | - ./project:/project
23 | command: ["python", "producer.py"]
24 | worker-node:
25 | image: generals/celery-producer
26 | hostname: worker
27 | volumes:
28 | - ./project:/project
29 | ## command: ["python", "worker.py"]
30 | command: ["celery", "-A", "tasks.task", "worker"]
31 |
--------------------------------------------------------------------------------
/harbor/kuber/03-redis.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: redis-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 6379
11 | targetPort: 6379
12 | selector:
13 | name: redis-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: redis
19 | namespace: harbor
20 | labels:
21 | name: redis
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: redis-apps
28 | spec:
29 | containers:
30 | - name: redis-app
31 | image: goharbor/redis-photon:v1.8.2
32 | imagePullPolicy: IfNotPresent
33 | volumeMounts:
34 | - name: nfs-data-vol
35 | mountPath: /var/lib/redis
36 | volumes:
37 | - name: nfs-data-vol
38 | nfs:
39 | server: 192.168.7.14
40 | path: /mnt/nfsvol/harbor/redis
41 |
--------------------------------------------------------------------------------
/harbor/config/registry/config.yml:
--------------------------------------------------------------------------------
1 | version: 0.1
2 | log:
3 | level: info
4 | fields:
5 | service: registry
6 | storage:
7 | cache:
8 | layerinfo: redis
9 | filesystem:
10 | rootdirectory: /storage
11 | maintenance:
12 | uploadpurging:
13 | enabled: false
14 | delete:
15 | enabled: true
16 | redis:
17 | addr: redis-svc:6379
18 | password:
19 | db: 1
20 | http:
21 | addr: :5000
22 | secret: placeholder
23 | debug:
24 | addr: localhost:5001
25 | auth:
26 | token:
27 | issuer: harbor-token-issuer
28 | realm: https://harbor.generals.space/service/token
29 | rootcertbundle: /etc/registry/root.crt
30 | service: harbor-registry
31 | validation:
32 | disabled: true
33 | notifications:
34 | endpoints:
35 | - name: harbor
36 | disabled: false
37 | url: http://core-svc:8080/service/notifications
38 | timeout: 3000ms
39 | threshold: 5
40 | backoff: 1s
41 | compatibility:
42 | schema1:
43 | enabled: true
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/01.svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: kibana
6 | labels:
7 | app: kibana
8 | spec:
9 | ports:
10 | - port: 5601
11 | name: webui
12 | targetPort: 5601
13 | nodePort: 30601
14 | selector:
15 | ## 注意: service 的 selector 需要指定的是
16 | ## Deployment -> spec -> template -> labels,
17 | ## 而不是 Deployment -> metadata -> lables.
18 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
19 | app: kibana
20 | type: NodePort
21 |
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: nginx
27 | labels:
28 | app: nginx
29 | spec:
30 | ports:
31 | - port: 8080
32 | name: nginx
33 | targetPort: 8080
34 | nodePort: 31080
35 | selector:
36 | ## 注意: service 的 selector 需要指定的是
37 | ## Deployment -> spec -> template -> labels,
38 | ## 而不是 Deployment -> metadata -> lables.
39 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
40 | app: nginx
41 | type: NodePort
42 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/01.svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: kibana
6 | labels:
7 | app: kibana
8 | spec:
9 | ports:
10 | - port: 5601
11 | name: webui
12 | targetPort: 5601
13 | nodePort: 30601
14 | selector:
15 | ## 注意: service 的 selector 需要指定的是
16 | ## Deployment -> spec -> template -> labels,
17 | ## 而不是 Deployment -> metadata -> lables.
18 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
19 | app: kibana
20 | type: NodePort
21 |
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: nginx
27 | labels:
28 | app: nginx
29 | spec:
30 | ports:
31 | - port: 8080
32 | name: nginx
33 | targetPort: 8080
34 | nodePort: 31080
35 | selector:
36 | ## 注意: service 的 selector 需要指定的是
37 | ## Deployment -> spec -> template -> labels,
38 | ## 而不是 Deployment -> metadata -> lables.
39 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
40 | app: nginx
41 | type: NodePort
42 |
--------------------------------------------------------------------------------
/elk [dc]/10.elk-filebeat-grok-pipeline/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | 1. [ELK + Filebeat 搭建日志系统](http://beckjin.com/2017/12/10/elk/)
6 | 2. [Plugins Inputs Beats](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html)
7 |
8 | filebeat 只有采集的功能, 而 logstash 除了采集, 还有对数据进行预处理的能力. 比如为不同主机上的日志添加主机名及IP字段, 添加采集时间等, 之后存入es后也方便查询.
9 |
10 | 在 es 5.x 时, 内置了 logstash 的预处理功能, 可以用 filebeat 这种轻量采集工具代替 logstash 了.
11 |
12 | 在使用`docker-compose up -d`启动集群后, 需要登录 kibana, 在`Dev Tools`界面发起如下请求创建`pipeline`.
13 |
14 | ```json
15 | PUT _ingest/pipeline/nginx-log
16 | {
17 | "description" : "用 grok 插件处理 nignx 日志",
18 | "processors": [
19 | {
20 | "grok": {
21 | "field": "message",
22 | "patterns": ["%{IP:client} - - \[%{HTTPDATE:timestamp}\] \"%{WORD:method} %{URIPATHPARAM:uri} HTTP/%{NUMBER:httpversion}\" %{NUMBER:status} %{NUMBER:bytes} \"-\" \"%{GREEDYDATA:agent}\""]
23 | }
24 | }
25 | ]
26 | }
27 | ```
28 |
29 | 上面`pipeline`的内容与示例09中, logstash 配置的`filter.grok`部分的内容一致.
30 |
--------------------------------------------------------------------------------
/prometheus/config/blackbox-exporter.yaml:
--------------------------------------------------------------------------------
1 | ## modules 列举出了所有可用的探测方式.
2 | modules:
3 | http:
4 | prober: http
5 | http:
6 | method: GET
7 | #下面两个参数要加上,表示支持http版本和http状态码的监控
8 | valid_http_versions: ["HTTP/1.1", "HTTP/2"]
9 | valid_status_codes: [200]
10 | http_2xx:
11 | prober: http
12 | http_post_2xx:
13 | prober: http
14 | http:
15 | method: POST
16 | tcp_connect:
17 | prober: tcp
18 | pop3s_banner:
19 | prober: tcp
20 | tcp:
21 | query_response:
22 | - expect: "^+OK"
23 | tls: true
24 | tls_config:
25 | insecure_skip_verify: false
26 | ssh_banner:
27 | prober: tcp
28 | tcp:
29 | query_response:
30 | - expect: "^SSH-2.0-"
31 | irc_banner:
32 | prober: tcp
33 | tcp:
34 | query_response:
35 | - send: "NICK prober"
36 | - send: "USER prober prober prober :prober"
37 | - expect: "PING :([^ ]+)"
38 | send: "PONG ${1}"
39 | - expect: "^:[^ ]+ 001"
40 | icmp:
41 | prober: icmp
42 |
--------------------------------------------------------------------------------
/elk [dc]/05.elk-filebeat-kafka-logpilot/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | 1. [BEATS 轻量型数据采集器](https://www.elastic.co/cn/products/beats)
6 | - beats组件的各类
7 | 2. [log-pilot](https://github.com/AliyunContainerService/log-pilot)
8 |
9 | 官网中列举出了7种beat组件, 用来收集各种不同的日志. 但是在容器化部署时, 由于容器要求进程在前端运行, 所以日志一般会输出到stdout.
10 |
11 | 但是这7种beat组件没有一种能收集stdout的日志, 所以有一段时间我们是把dockerfile中的CMD指令写作`tail -f /etc/profile`, 而项目进程以守护进程的形式运行的.
12 |
13 | 但这又带来一个问题, linux本身拥有OOM机制, 强制杀死占用资源最多的进程. docker中的进程在前端运行时, pid为1, 被kill后相当于容器被stop, 此时重启机制可以生效, 不会出现服务挂掉的情况. 而如果使用`tail -f`的CMD, 项目进程被kill掉不影响docker本身的运行, 于是可能出现容器还在, 但是项目进程不在了的情况.
14 |
15 | 阿里云的log-pilot工具可以应对这种情况, 本实验主要的目的就是验证ta的功能.
16 |
17 | log-pilot最大的功能就是可以收集stdout的日志...???
18 |
19 | 一定要注意`aliyun.logs.XXX`的标签配置中, `XXX`既是发往kafka的topic名称, 需要在logstash的input字段指定相同名称, 也是发往es的index名称, 如果直接发往es, 记得也要与此匹配.
20 |
21 | nginx-01和nginx-02验证log-pilot对日志文件的收集功能, 而nginx-03则是验证其对stdout日志的收集功能(nginx-03没有挂载共享目录).
22 |
23 | 由于docker的stdout日志实际上也是写在宿主的`/var/lib/docker/containers/xxx`目录的文件中, 而且log-pilot拥有docker.sock的权限, 所以我猜想log-pilot实际上只是确定了stdout日志的存放路径, 而处理过程与普通日志文件的处理方法没有区别.
24 |
--------------------------------------------------------------------------------
/harbor/kuber/02-db.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: harbor-db-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 5432
11 | targetPort: 5432
12 | selector:
13 | name: harbor-db-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: harbor-db
19 | namespace: harbor
20 | labels:
21 | name: harbor-db
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: harbor-db-apps
28 | spec:
29 | containers:
30 | - name: harbor-db-app
31 | image: goharbor/harbor-db:v1.8.2
32 | imagePullPolicy: IfNotPresent
33 | envFrom:
34 | - configMapRef:
35 | name: harbor-db-env
36 | volumeMounts:
37 | - name: nfs-data-vol
38 | mountPath: /var/lib/postgresql/data
39 | volumes:
40 | - name: nfs-data-vol
41 | nfs:
42 | server: 192.168.7.14
43 | path: /mnt/nfsvol/harbor/database
44 |
--------------------------------------------------------------------------------
/ansible [dc]/readme.md:
--------------------------------------------------------------------------------
1 | 可以在master执行`ansible all|web|db -m ping`查看各分组输出.
2 |
3 | ```
4 | [root@1052c369ff71 ssh]# ansible web -m ping
5 | slave-02 | SUCCESS => {
6 | "ansible_facts": {
7 | "discovered_interpreter_python": "/usr/bin/python"
8 | },
9 | "changed": false,
10 | "ping": "pong"
11 | }
12 | slave-01 | SUCCESS => {
13 | "ansible_facts": {
14 | "discovered_interpreter_python": "/usr/bin/python"
15 | },
16 | "changed": false,
17 | "ping": "pong"
18 | }
19 | slave-03 | SUCCESS => {
20 | "ansible_facts": {
21 | "discovered_interpreter_python": "/usr/bin/python"
22 | },
23 | "changed": false,
24 | "ping": "pong"
25 | }
26 | ```
27 |
28 | ```
29 | # ansible db -m ping
30 | slave-05 | SUCCESS => {
31 | "ansible_facts": {
32 | "discovered_interpreter_python": "/usr/bin/python"
33 | },
34 | "changed": false,
35 | "ping": "pong"
36 | }
37 | slave-04 | SUCCESS => {
38 | "ansible_facts": {
39 | "discovered_interpreter_python": "/usr/bin/python"
40 | },
41 | "changed": false,
42 | "ping": "pong"
43 | }
44 | ```
45 |
--------------------------------------------------------------------------------
/harbor/kuber/10-ing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: harbor-ing
5 | namespace: harbor
6 | annotations:
7 | nginx.ingress.kubernetes.io/proxy-body-size: "0"
8 | spec:
9 | tls:
10 | - hosts:
11 | - harbor.generals.space
12 | secretName: https-certs
13 | rules:
14 | - host: harbor.generals.space
15 | http:
16 | paths:
17 | - path: /
18 | backend:
19 | serviceName: portal-svc
20 | servicePort: 80
21 | - path: /c/
22 | backend:
23 | serviceName: core-svc
24 | servicePort: 8080
25 | - path: /api/
26 | backend:
27 | serviceName: core-svc
28 | servicePort: 8080
29 | - path: /chartrepo/
30 | backend:
31 | serviceName: core-svc
32 | servicePort: 8080
33 | - path: /v2/
34 | backend:
35 | serviceName: core-svc
36 | servicePort: 8080
37 | - path: /service/
38 | backend:
39 | serviceName: core-svc
40 | servicePort: 8080
41 |
--------------------------------------------------------------------------------
/prometheus/readme.md:
--------------------------------------------------------------------------------
1 | # prometheus
2 |
3 | ## 1.
4 |
5 | 在创建deploy之前, 需要先创建secret资源, 不然deploy会一直pending.
6 |
7 | 在独立的etcd集群部署中, 下面这条命令可以直接执行.
8 |
9 | ```
10 | kubectl -n monitoring create secret generic etcd-certs --from-file=/etc/etcd/ssl/ca.crt --from-file=/etc/etcd/ssl/server.crt --from-file=/etc/etcd/ssl/server.key
11 | ```
12 |
13 | 如果是内置在集群的etcd, 由于`/etc/kubernetes/pki/etcd`目录下存放着etcd需要的所有密钥文件, 可以执行如下命令.
14 |
15 | ```
16 | kubectl -n monitoring create secret generic etcd-certs --from-file=/etc/kubernetes/pki/etcd
17 | ```
18 |
19 | ## 2.
20 |
21 | 修改`/etc/kubernetes/manifests/`目录下`kube-controller-manager.yaml`与`kube-scheduler.yaml`, 将其中的`--address=127.0.0.1`修改为`--address=0.0.0.0`.
22 |
23 | 注意: 所有master节点止的`controller manager`和`scheduler`的配置都要修改.
24 |
25 | 然后还要为ta们创建service资源, 以便prometheus能够访问到.
26 |
27 | ## 3.
28 |
29 | 部署完成, 通过`IP:port`的形式访问, 可以得到如下界面.
30 |
31 | 
32 |
33 | 
34 |
35 | `grafana`可以通过`admin/admin`的默认用户名密码进行登录, prometheus则不需要登录.
36 |
--------------------------------------------------------------------------------
/elk [dc]/01.elk-single/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | elasticsearch:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
11 | ELASTIC_PASSWORD: 123456
12 | volumes:
13 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
14 | logstash:
15 | image: logstash:7.2.0
16 | ports:
17 | - 5000:5000
18 | - 9600:9600
19 | environment:
20 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
21 | volumes:
22 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
23 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
24 | - ./data/logs/nginx:/var/log/nginx
25 | kibana:
26 | image: kibana:7.2.0
27 | ports:
28 | - 5601:5601
29 | volumes:
30 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
31 | nginx:
32 | image: nginx:1.12.0
33 | ports:
34 | - 9001:8080
35 | volumes:
36 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf:ro
37 | - ./data/logs/nginx:/var/log/nginx
38 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/cm/es/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | ## sts 生成的Pod的名称
3 | node.name: ${POD_NAME}
4 | network.host: 0.0.0.0
5 | ## network.publish_host: ${POD_NAME}.${SVC_NAME}
6 | ## network.publish_host: ${POD_IP}
7 | ## 对客户端提供服务的端口
8 | http.port: 9200
9 | ## 集群内与其他节点交互的端口
10 | transport.tcp.port: 9300
11 | ## path.data: /home/elasticsearch
12 | bootstrap.memory_lock: false
13 | bootstrap.system_call_filter: false
14 | ## 这里的数组成员为各节点的 node.name 值.
15 | cluster.initial_master_nodes:
16 | - ${CLUSTER_NAME}-0
17 | - ${CLUSTER_NAME}-1
18 | - ${CLUSTER_NAME}-2
19 | ## 配置该节点会与哪些候选地址进行通信, 端口可以不写, 默认为9300.
20 | discovery.seed_hosts:
21 | - ${CLUSTER_NAME}-0.${SVC_NAME}
22 | - ${CLUSTER_NAME}-1.${SVC_NAME}
23 | - ${CLUSTER_NAME}-2.${SVC_NAME}
24 |
25 | ## 这条配置表示开启xpack认证机制
26 | xpack.security.enabled: true
27 | xpack.security.transport.ssl.enabled: true
28 | xpack.security.transport.ssl.verification_mode: certificate
29 | xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
30 | xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
31 |
--------------------------------------------------------------------------------
/rancher/RKE/readme.md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [General Linux Requirements](https://rancher.com/docs/rke/latest/en/os/#general-linux-requirements)
4 |
5 | rke版本: 1.0.0
6 |
7 | 当前目录的`cluster.yml`为`rke up`所需要的配置文件, 在各节点安装好依赖后可根据此文件启动安装流程. `cluster_full.yml`则是使用`rke config`通过交互式命令生成的配置, 交互的内容可见`rke_config.txt`. `cluster_full.yml`只填写了必填字段, 还有很多是留空的, 不适合备份与重用.
8 |
9 | 每个node可能有3种角色:
10 |
11 | 1. control plane
12 | 2. etcd
13 | 3. worker
14 |
15 | 单节点时这3个都要.
16 |
17 | rke要求部署节点时使用非root用户的权限, 否则ssh将失败. 见参考文章1.
18 |
19 | ```
20 | WARN[0000] Failed to set up SSH tunneling for host [192.168.0.211]: Can't retrieve Docker Info: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
21 | ...省略
22 | FATA[0000] Cluster must have at least one etcd plane host: failed to connect to the following etcd host(s) [192.168.0.211]
23 | ```
24 |
25 | ```
26 | user add ubuntu
27 | usermod -aG docker ubuntu
28 | ```
29 |
30 | `rke up`之后会生成2个额外文件:
31 |
32 | 1. `kube_config_cluster`: `kubectl`配置文件.
33 | 2. `cluster.rkestate`: 包含整个集群所有的配置, 包括所有组件的证书和密钥, 比前者内容要丰富得多.
34 |
35 | ## reset集群
36 |
37 | `rke`有一个`remove`命令, 在`cluster.yml`所在目录下执行`rke remove`即可.
38 |
--------------------------------------------------------------------------------
/harbor/config/jobservice/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #Protocol used to serve
3 | protocol: "http"
4 |
5 | #Config certification if use 'https' protocol
6 | #https_config:
7 | # cert: "server.crt"
8 | # key: "server.key"
9 |
10 | #Server listening port
11 | port: 8080
12 |
13 | #Worker pool
14 | worker_pool:
15 | #Worker concurrency
16 | workers: 10
17 | backend: "redis"
18 | #Additional config if use 'redis' backend
19 | redis_pool:
20 | #redis://[arbitrary_username:password@]ipaddress:port/database_index
21 | redis_url: redis://redis-svc:6379/2
22 | namespace: "harbor_job_service_namespace"
23 | #Loggers for the running job
24 | job_loggers:
25 | - name: "STD_OUTPUT" # logger backend name, only support "FILE" and "STD_OUTPUT"
26 | level: "INFO" # INFO/DEBUG/WARNING/ERROR/FATAL
27 | - name: "FILE"
28 | level: "INFO"
29 | settings: # Customized settings of logger
30 | base_dir: "/var/log/jobs"
31 | sweeper:
32 | duration: 1 #days
33 | settings: # Customized settings of sweeper
34 | work_dir: "/var/log/jobs"
35 |
36 | #Loggers for the job service
37 | loggers:
38 | - name: "STD_OUTPUT" # Same with above
39 | level: "INFO"
--------------------------------------------------------------------------------
/harbor/kuber/04-registry.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: registry-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 5000
11 | targetPort: 5000
12 | selector:
13 | name: registry-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: registry
19 | namespace: harbor
20 | labels:
21 | name: registry
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: registry-apps
28 | spec:
29 | containers:
30 | - name: registry-app
31 | image: goharbor/registry-photon:v2.7.1-patch-2819-v1.8.2
32 | imagePullPolicy: IfNotPresent
33 | volumeMounts:
34 | - name: nfs-registry-vol
35 | mountPath: /storage
36 | - name: registry-cfg-vol
37 | mountPath: /etc/registry
38 | volumes:
39 | - name: registry-cfg-vol
40 | configMap:
41 | name: registry-cfg-map
42 | - name: nfs-registry-vol
43 | nfs:
44 | server: 192.168.7.14
45 | path: /mnt/nfsvol/harbor/registry
46 |
--------------------------------------------------------------------------------
/harbor/kuber/01-cmd.sh:
--------------------------------------------------------------------------------
1 | kubectl create ns harbor
2 |
3 | kubectl create configmap harbor-db-env -n harbor --from-env-file=../config/db/env
4 | kubectl create configmap registryctl-env -n harbor --from-env-file=../config/registryctl/env
5 | kubectl create configmap core-env -n harbor --from-env-file=../config/core/env
6 | kubectl create configmap jobservice-env -n harbor --from-env-file=../config/jobservice/env
7 |
8 | kubectl create configmap registry-cfg-map -n harbor --from-file=../config/registry
9 | kubectl create configmap registryctl-cfg-map -n harbor --from-file=../config/registryctl
10 | kubectl create configmap core-cfg-map -n harbor --from-file=../config/core
11 | kubectl create configmap jobservice-cfg-map -n harbor --from-file=../config/jobservice
12 |
13 | kubectl create configmap proxy-crt-map -n harbor --from-file=../config/nginx
14 | cp ../config/nginx/nginx.conf /mnt/nfsvol/harbor/proxy_nginx/
15 | chown nfsnobody:nfsnobody /mnt/nfsvol/harbor/proxy_nginx/nginx.conf
16 |
17 | ## kubectl create configmap proxy-cfg-map -n harbor --from-file=../config/nginx/nginx.conf
18 |
19 | ## ingress
20 |
21 | kubectl create secret tls https-certs -n harbor --cert=../config/nginx/server.crt --key=../config/nginx/server.key
22 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/readme.md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [6,ELK进阶--elasticsearch-head-master](https://www.jianshu.com/p/83d12b0ca4c0)
4 | 2. [elasticsearch-head-master插件的安装与使用](https://blog.csdn.net/tripleDemo/article/details/100998309)
5 | 3. [Elasticsearch-head 插件安装](https://www.jianshu.com/p/c2b5d4590c3e)
6 | - `http://172.20.1.187:9100/?auth_user=elastic&auth_password=123456`
7 | 4. [when elasticsearch x-pack is enabled head can't connnect](https://github.com/mobz/elasticsearch-head/issues/304)
8 | - `boroborome`的回答真是救了命了...
9 |
10 | 相比于前面的示例, 本示例中增加了 [mobz/elasticsearch-head](https://github.com/mobz/elasticsearch-head) 工程, 当然也有ta的 Service(`NodePort`类型).
11 |
12 | 并且, 由于我们需要在 head 工程的 webUI 中输入 es 集群的地址进行监控管理, 所以我们还要加一个 es-cluster 对外的 NodePort 服务, 名为`es-cluster-public`(因为`headless service`不能同时为`NodePort`类型, 只能另外再建一个).
13 |
14 | 需要注意的是, 当 es 开启了 xpack 安全认证时, head 连接 es 的方法, 挺别致的...
15 |
16 | 
17 |
18 | 在地址输入 head 的地址 xxx:9100 时, 要带上目标 es 集群的用户名和密码作为参数, 然后在打开界面的输入框中在填写 es 集群的地址时, 就不用写用户名和密码了...
19 |
20 | 地址栏中的认证参数分别为`auth_user`和`auth_password`.
21 |
22 | 其实 head 只是一个过滤产品, 貌似 6.x 之后的 kibana 就可以实现更高级更美观的监控管理功能, head 也就随之没落了.
23 |
--------------------------------------------------------------------------------
/prometheus/03.grafana.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: grafana
6 | namespace: monitoring
7 | labels:
8 | app: grafana
9 | spec:
10 | selector:
11 | name: grafana
12 | ports:
13 | - name: http
14 | protocol: TCP
15 | port: 3000
16 | nodePort: 30300
17 | type: NodePort
18 | ---
19 | apiVersion: apps/v1
20 | kind: Deployment
21 | metadata:
22 | name: grafana
23 | namespace: monitoring
24 | spec:
25 | replicas: 1
26 | selector:
27 | matchLabels:
28 | name: grafana
29 | template:
30 | metadata:
31 | labels:
32 | name: grafana
33 | spec:
34 | containers:
35 | - name: grafana
36 | image: grafana/grafana:7.1.5
37 | env:
38 | - name: GF_AUTH_BASIC_ENABLED
39 | value: "false"
40 | - name: GF_AUTH_ANONYMOUS_ENABLED
41 | value: "true"
42 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE
43 | value: Viewer
44 | ports:
45 | - name: http
46 | containerPort: 3000
47 | resources:
48 | requests:
49 | memory: 100Mi
50 | cpu: 100m
51 | limits:
52 | memory: 200Mi
53 | cpu: 200m
54 |
--------------------------------------------------------------------------------
/harbor/config/nginx/server.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDUzCCAjugAwIBAgIJAItxJfrOWb4rMA0GCSqGSIb3DQEBCwUAMEAxHjAcBgNV
3 | BAMMFWhhcmJvci5nZW5lcmFscy5zcGFjZTEeMBwGA1UECgwVaGFyYm9yLmdlbmVy
4 | YWxzLnNwYWNlMB4XDTE5MDkwNDAyNDIxNVoXDTIwMDkwMzAyNDIxNVowQDEeMBwG
5 | A1UEAwwVaGFyYm9yLmdlbmVyYWxzLnNwYWNlMR4wHAYDVQQKDBVoYXJib3IuZ2Vu
6 | ZXJhbHMuc3BhY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1qPAr
7 | yL7YZ10nbKdZVLCfXyp4XUIGBtT/SGsng6DIFKpGrQt8wEZESEYySImw6SeNsQHX
8 | AY81CTSI09wJLSlprMcI3IQ8Niyif3lbcSVbPmqHCxteeRynx9niDABtwrZypiQp
9 | 0HMs4iI/OD7kf/BxlcHDz1hyF9FiSkiWssz8+a0GAJyhPbkLlDXyIDmpP5fsJb5t
10 | cGa3HRAc7HMpqpLqyniaNR23XqEpuOZ62/0/NciolGU7EyItodLRvFNfEDr7dYmy
11 | QTw+XZrA8QySXKdCIEg1Xpk17qLUwLlAcTsio4ZLvmAmIu2ujd5TNHlwKykNL+2j
12 | GIi20WK17QoEcrl3AgMBAAGjUDBOMB0GA1UdDgQWBBSAn7+ikedyG22H5ImZg/15
13 | vrnZvjAfBgNVHSMEGDAWgBSAn7+ikedyG22H5ImZg/15vrnZvjAMBgNVHRMEBTAD
14 | AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA1oNqlL136evGpEDv+OoZ3JPhIob9h7ctb
15 | GYo5eDckCHx7u/9w8m1TZHQTHZ4g9XcELcD4opvRCz9oHVL/YINyaaZ6fVshHEiK
16 | 8ZE6PAZN9dYsAzqv83+907Kg368B7jbBU+F2/P+M09pfRwyhDr+2twu7kqaa3cOx
17 | KmYsmJLwSopa1SSjtuZOOpIt/t3bYCaLLKyKTheg+owofhskefNxQ0ccQxfSsEyA
18 | n+4V9lTE7ECe/eIPnMqXbXwlEaiVXPUkCd/yGfj319GlDlyr4XzRSciswor5fDSP
19 | NJQhzPelr3tJ1c8gAwoZLmM8vfhFolrs90DlPzPl3duekh7CPpqs
20 | -----END CERTIFICATE-----
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/cm/es/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: elasticsearch
2 | ## sts 生成的Pod的名称
3 | node.name: ${POD_NAME}
4 | network.host: 0.0.0.0
5 | ## network.publish_host: ${POD_NAME}.${SVC_NAME}
6 | ## network.publish_host: ${POD_IP}
7 | ## 对客户端提供服务的端口
8 | http.port: 9200
9 | ## 集群内与其他节点交互的端口
10 | transport.tcp.port: 9300
11 | ## path.data: /home/elasticsearch
12 | bootstrap.memory_lock: false
13 | bootstrap.system_call_filter: false
14 | ## 这里的数组成员为各节点的 node.name 值.
15 | cluster.initial_master_nodes:
16 | - ${CLUSTER_NAME}-0
17 | - ${CLUSTER_NAME}-1
18 | - ${CLUSTER_NAME}-2
19 | ## 配置该节点会与哪些候选地址进行通信, 端口可以不写, 默认为9300.
20 | discovery.seed_hosts:
21 | - ${CLUSTER_NAME}-0.${SVC_NAME}
22 | - ${CLUSTER_NAME}-1.${SVC_NAME}
23 | - ${CLUSTER_NAME}-2.${SVC_NAME}
24 |
25 | ## 这条配置表示开启xpack认证机制
26 | xpack.security.enabled: true
27 | xpack.security.transport.ssl.enabled: true
28 | xpack.security.transport.ssl.verification_mode: certificate
29 | xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
30 | xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
31 |
32 | ## 以下配置用于 head 工程获取 es 信息
33 | http.cors.enabled: true
34 | http.cors.allow-origin: "*"
35 | http.cors.allow-headers: Authorization
36 |
--------------------------------------------------------------------------------
/harbor/kuber/07-jobservice.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: jobservice-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 8080
11 | targetPort: 8080
12 | selector:
13 | name: jobservice-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: jobservice
19 | namespace: harbor
20 | labels:
21 | name: jobservice
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: jobservice-apps
28 | spec:
29 | containers:
30 | - name: jobservice-app
31 | image: goharbor/harbor-jobservice:v1.8.2
32 | imagePullPolicy: IfNotPresent
33 | envFrom:
34 | - configMapRef:
35 | name: jobservice-env
36 |
37 | volumeMounts:
38 | - name: jobservice-cfg-vol
39 | mountPath: /etc/jobservice
40 | - name: nfs-jobservice-vol
41 | mountPath: /var/log/jobs
42 |
43 | volumes:
44 | - name: jobservice-cfg-vol
45 | configMap:
46 | name: jobservice-cfg-map
47 | - name: nfs-jobservice-vol
48 | nfs:
49 | server: 192.168.7.14
50 | path: /mnt/nfsvol/harbor/job_logs
51 |
--------------------------------------------------------------------------------
/prometheus/kube-state-metrics/02.deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.7
7 | name: kube-state-metrics
8 | namespace: kube-system
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app.kubernetes.io/name: kube-state-metrics
14 | template:
15 | metadata:
16 | labels:
17 | app.kubernetes.io/name: kube-state-metrics
18 | app.kubernetes.io/version: 1.9.7
19 | spec:
20 | containers:
21 | - image: quay.io/coreos/kube-state-metrics:v1.9.7
22 | livenessProbe:
23 | httpGet:
24 | path: /healthz
25 | port: 8080
26 | initialDelaySeconds: 5
27 | timeoutSeconds: 5
28 | name: kube-state-metrics
29 | ports:
30 | - containerPort: 8080
31 | name: http-metrics
32 | - containerPort: 8081
33 | name: telemetry
34 | readinessProbe:
35 | httpGet:
36 | path: /
37 | port: 8081
38 | initialDelaySeconds: 5
39 | timeoutSeconds: 5
40 | securityContext:
41 | runAsUser: 65534
42 | nodeSelector:
43 | kubernetes.io/os: linux
44 | serviceAccountName: kube-state-metrics
45 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/readme.md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [干货 | Elasticsearch 7.1免费安全功能全景认知](https://blog.csdn.net/laoyang360/article/details/90554761)
4 | - es 安全机制的演变历程
5 | 2. [<十三>ELK-学习笔记–elasticsearch-7.x使用xpack进行安全认证](http://www.eryajf.net/3500.html)
6 | - 单机与集群环境开启安全认证的实际操作示例.
7 |
8 | 集群间节点的认证是通过密钥实现的, 密钥配置与用户名密码的配置并不冲突.
9 |
10 | 如下两条命令均一路回车即可, 不需要给秘钥再添加密码
11 |
12 | ```
13 | /usr/share/elasticsearch/bin/elasticsearch-certutil ca
14 | /usr/share/elasticsearch/bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
15 | ```
16 |
17 | 默认会在`/usr/share/elasticsearch/`目录下, 分别生成`elastic-stack-ca.p12`和`elastic-certificates.p12`文件. 可以使用`-out`选项指定生成的文件路径, 如下
18 |
19 | ```
20 | /usr/share/elasticsearch/bin/elasticsearch-certutil ca -out /tmp/xxx-ca.p12
21 | ```
22 |
23 | 其实之后在配置文件中只会用到`elastic-certificates.p12`, 不需要`elastic-stack-ca.p12`, 所以上述命令可以只执行第2步, 不需要生成ca文件.
24 |
25 | ```yaml
26 | ## 这条配置表示开启xpack认证机制
27 | xpack.security.enabled: true
28 | xpack.security.transport.ssl.enabled: true
29 | xpack.security.transport.ssl.verification_mode: certificate
30 | xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
31 | xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
32 | ```
33 |
34 | 上述配置可以与用户名密码一同设置, 这样对 setup 的集群也可以生效.
35 |
--------------------------------------------------------------------------------
/harbor/config/core/env:
--------------------------------------------------------------------------------
1 | CONFIG_PATH=/etc/core/app.conf
2 | UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
3 | _REDIS_URL=redis-svc:6379,100,
4 | SYNC_REGISTRY=false
5 | CHART_CACHE_DRIVER=redis
6 | _REDIS_URL_REG=redis://redis-svc:6379/1
7 |
8 | PORT=8080
9 | LOG_LEVEL=info
10 | EXT_ENDPOINT=https://harbor.generals.space
11 | DATABASE_TYPE=postgresql
12 | POSTGRESQL_HOST=harbor-db-svc
13 | POSTGRESQL_PORT=5432
14 | POSTGRESQL_USERNAME=postgres
15 | POSTGRESQL_PASSWORD=123456
16 | POSTGRESQL_DATABASE=registry
17 | POSTGRESQL_SSLMODE=disable
18 | REGISTRY_URL=http://registry-svc:5000
19 | TOKEN_SERVICE_URL=http://core-svc:8080/service/token
20 | HARBOR_ADMIN_PASSWORD=123456
21 | MAX_JOB_WORKERS=10
22 | CORE_SECRET=ByKTSuHfWbQNX6rL
23 | JOBSERVICE_SECRET=avxVu42h9Ny5OhvY
24 | ADMIRAL_URL=
25 | WITH_NOTARY=False
26 | WITH_CLAIR=False
27 | CLAIR_DB_PASSWORD=123456
28 | CLAIR_DB_HOST=postgresql
29 | CLAIR_DB_PORT=5432
30 | CLAIR_DB_USERNAME=postgres
31 | CLAIR_DB=clair
32 | CLAIR_DB_SSLMODE=disable
33 | CORE_URL=http://core-svc:8080
34 | JOBSERVICE_URL=http://jobservice-svc:8080
35 | CLAIR_URL=http://clair:6060
36 | NOTARY_URL=http://notary-server:4443
37 | REGISTRY_STORAGE_PROVIDER_NAME=filesystem
38 | READ_ONLY=false
39 | RELOAD_KEY=
40 | CHART_REPOSITORY_URL=http://chartmuseum:9999
41 | REGISTRY_CONTROLLER_URL=http://registryctl-svc:8080
42 | WITH_CHARTMUSEUM=False
--------------------------------------------------------------------------------
/harbor/kuber/09-proxy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: proxy-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: http-port
10 | port: 80
11 | targetPort: 80
12 | - name: https-port
13 | port: 443
14 | targetPort: 443
15 | selector:
16 | name: proxy-apps
17 | ---
18 | apiVersion: extensions/v1beta1
19 | kind: Deployment
20 | metadata:
21 | name: proxy
22 | namespace: harbor
23 | labels:
24 | name: proxy
25 | spec:
26 | replicas: 1
27 | template:
28 | metadata:
29 | labels:
30 | name: proxy-apps
31 | spec:
32 | containers:
33 | - name: proxy-app
34 | image: goharbor/nginx-photon:v1.8.2
35 | imagePullPolicy: IfNotPresent
36 | volumeMounts:
37 | - name: proxy-crt-vol
38 | mountPath: /etc/cert
39 | ## 这个其实是配置文件目录, 但是nginx进程会创建多个temp目录,
40 | ## 通过configmap挂载的卷不可写, 所以这里改成用nfs挂载,
41 | ## 注意其中需要事先存在nginx.conf文件才行.
42 | - name: nfs-nginx-vol
43 | mountPath: /etc/nginx
44 |
45 | volumes:
46 | - name: proxy-crt-vol
47 | configMap:
48 | name: proxy-crt-map
49 | - name: nfs-nginx-vol
50 | nfs:
51 | server: 192.168.7.14
52 | path: /mnt/nfsvol/harbor/proxy_nginx
53 |
--------------------------------------------------------------------------------
/prometheus/exporters/02-blackbox-exporter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: blackbox-exporter
6 | name: blackbox-exporter
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: blackbox
11 | port: 9115
12 | protocol: TCP
13 | nodePort: 30915
14 | selector:
15 | app: blackbox-exporter
16 | ## type: ClusterIP
17 | type: NodePort
18 | ---
19 | apiVersion: apps/v1
20 | kind: Deployment
21 | metadata:
22 | labels:
23 | app: blackbox-exporter
24 | name: blackbox-exporter
25 | namespace: monitoring
26 | spec:
27 | replicas: 1
28 | selector:
29 | matchLabels:
30 | app: blackbox-exporter
31 | template:
32 | metadata:
33 | labels:
34 | app: blackbox-exporter
35 | spec:
36 | volumes:
37 | - name: vol-config
38 | configMap:
39 | name: blackbox-exporter-config
40 | containers:
41 | - image: prom/blackbox-exporter:v0.17.0
42 | imagePullPolicy: IfNotPresent
43 | name: blackbox-exporter
44 | ## 开启调试
45 | ## command:
46 | ## - /bin/blackbox_exporter
47 | ## - --config.file=/etc/blackbox_exporter/config.yml
48 | ## - --log.level=debug
49 | volumeMounts:
50 | - name: vol-config
51 | mountPath: /etc/blackbox_exporter
52 |
--------------------------------------------------------------------------------
/redis-cluster[dc]/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | ## redis-cli -a 12345678 --cluster create --cluster-yes --cluster-replicas 1 \
4 | ## $(dig +short +search mcp-middleware-mcp-redis-6):6379 \
5 | ## $(dig +short +search mcp-middleware-mcp-redis-1):6379 \
6 | ## $(dig +short +search mcp-middleware-mcp-redis-2):6379 \
7 | ## $(dig +short +search mcp-middleware-mcp-redis-3):6379 \
8 | ## $(dig +short +search mcp-middleware-mcp-redis-4):6379 \
9 | ## $(dig +short +search mcp-middleware-mcp-redis-5):6379
10 |
11 | mcp-redis:
12 | image: registry.cn-hangzhou.aliyuncs.com/generals-space/redis:5.0.8.1
13 | restart: always
14 | deploy:
15 | mode: replicated
16 | replicas: 6
17 | networks:
18 | - mcp-bridge
19 | ## 多副本时无法映射同样的端口
20 | ## ports:
21 | ## - 6379:6379
22 | environment:
23 | - TZ=Asia/Shanghai
24 | - LANG=C.UTF-8
25 | command:
26 | - bash
27 | - -c
28 | - redis-server --requirepass 12345678 --cluster-enabled yes
29 | ## redis 不需要挂载数据卷, 否则集群重启时可能会出问题.
30 | ## volumes:
31 | ## - /opt/data/redis:/data
32 |
33 | mcp-lb-nginx:
34 | image: nginx:1.12.0
35 | volumes:
36 | - ./config/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
37 | depends_on:
38 | - mcp-redis
39 | networks:
40 | - mcp-bridge
41 | ports:
42 | - 6379:6379
43 |
44 | networks:
45 | mcp-bridge:
46 | driver: bridge
47 |
--------------------------------------------------------------------------------
/elk [k8s kube]/logstash-配置[映射 环境变量].md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [一文快速上手Logstash](https://cloud.tencent.com/developer/article/1353068)
4 | 2. [logstash收集TCP端口日志](https://www.cnblogs.com/Dev0ps/p/9314551.html)
5 | 3. [Using Environment Variables in the Configuration](https://www.elastic.co/guide/en/logstash/current/environment-variables.html)
6 | - 配置文件中使用环境变量, 以及默认配置的使用
7 |
8 | ## 认识
9 |
10 | 在ELK系统里, E与K已经十分固定, 没什么可定制的空间. 业内对日志收集层面的调整和优化倒是层出不穷, filebeat+kafka前置于logstash, 也可以直接将日志收集到elesticsearch. 所以关于ELK的使用实例主要集中在日志收集的方式展示上, 当然还有kibana的日志查询和聚合语句的使用.
11 |
12 | ## logstash配置解释
13 |
14 | `./logstash/pipeline`目录下有几个处理文件.
15 |
16 | ### `stdio.conf`
17 |
18 | 读取tcp 5000端口的消息作为日志, 应该用到了tcp插件. 这也是docker-compose中为logstash服务映射5000端口的原因: 方便在宿主机上进行测试. 向这个商品发送的消息会在logstash容器的标准输出中打印.
19 |
20 | ```
21 | $ telnet logstash 5000
22 | Trying 172.24.0.5...
23 | Connected to logstash.
24 | Escape character is '^]'.
25 | hello world
26 | good for u
27 | ^]
28 | telnet> quit
29 | Connection closed.
30 | ```
31 |
32 | ```
33 | {
34 | "@timestamp" => 2019-07-24T03:31:32.339Z,
35 | "message" => "hello world\r",
36 | "@version" => "1",
37 | "host" => "elk_nginx_1.elk_default",
38 | "port" => 35246
39 | }
40 | {
41 | "@timestamp" => 2019-07-24T03:31:35.493Z,
42 | "message" => "good for u\r",
43 | "@version" => "1",
44 | "host" => "elk_nginx_1.elk_default",
45 | "port" => 35246
46 | }
47 | ```
48 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/03.deploy-kibana.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 kibana-xxx
6 | name: kibana
7 | labels:
8 | app: kibana
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
14 | ## 表示一种管理关系
15 | app: kibana
16 | template:
17 | metadata:
18 | labels:
19 | app: kibana
20 | spec:
21 | containers:
22 | - name: kibana
23 | image: kibana:7.2.0
24 | imagePullPolicy: IfNotPresent
25 | env:
26 | ## 这里的 es 指的是 service 名称
27 | - name: ELASTICSEARCH_HOSTS
28 | value: http://es-cluster:9200
29 | ## 其实 username 和 password 在配置文件或是环境变量中定义都可以,
30 | ## 但是貌似使用环境变量的时候, 密码不能定义为 123456, 哪怕已经用双引号包裹.
31 | ## 所以这里把这两个 key 注释掉, 写在了配置文件里...
32 | ## - name: ELASTICSEARCH_USERNAME
33 | ## value: "elastic"
34 | ## - name: ELASTICSEARCH_PASSWORD
35 | ## value: "123456"
36 | volumeMounts:
37 | - name: kibana-config-vol
38 | mountPath: /usr/share/kibana/config/kibana.yml
39 | subPath: kibana.yml
40 | volumes:
41 | - name: kibana-config-vol
42 | configMap:
43 | name: kibana-config
44 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/03.deploy-kibana.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 kibana-xxx
6 | name: kibana
7 | labels:
8 | app: kibana
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
14 | ## 表示一种管理关系
15 | app: kibana
16 | template:
17 | metadata:
18 | labels:
19 | app: kibana
20 | spec:
21 | containers:
22 | - name: kibana
23 | image: kibana:7.2.0
24 | imagePullPolicy: IfNotPresent
25 | env:
26 | ## 这里的 es 指的是 service 名称
27 | - name: ELASTICSEARCH_HOSTS
28 | value: http://es-cluster:9200
29 | ## 其实 username 和 password 在配置文件或是环境变量中定义都可以,
30 | ## 但是貌似使用环境变量的时候, 密码不能定义为 123456, 哪怕已经用双引号包裹.
31 | ## 所以这里把这两个 key 注释掉, 写在了配置文件里...
32 | ## - name: ELASTICSEARCH_USERNAME
33 | ## value: "elastic"
34 | ## - name: ELASTICSEARCH_PASSWORD
35 | ## value: "123456"
36 | volumeMounts:
37 | - name: kibana-config-vol
38 | mountPath: /usr/share/kibana/config/kibana.yml
39 | subPath: kibana.yml
40 | volumes:
41 | - name: kibana-config-vol
42 | configMap:
43 | name: kibana-config
44 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/03.deploy-kibana.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 kibana-xxx
6 | name: kibana
7 | labels:
8 | app: kibana
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
14 | ## 表示一种管理关系
15 | app: kibana
16 | template:
17 | metadata:
18 | labels:
19 | app: kibana
20 | spec:
21 | containers:
22 | - name: kibana
23 | image: kibana:7.2.0
24 | imagePullPolicy: IfNotPresent
25 | env:
26 | ## 这里的 es 指的是 service 名称
27 | - name: ELASTICSEARCH_HOSTS
28 | value: http://es-cluster:9200
29 | ## 其实 username 和 password 在配置文件或是环境变量中定义都可以,
30 | ## 但是貌似使用环境变量的时候, 密码不能定义为 123456, 哪怕已经用双引号包裹.
31 | ## 所以这里把这两个 key 注释掉, 写在了配置文件里...
32 | ## - name: ELASTICSEARCH_USERNAME
33 | ## value: "elastic"
34 | ## - name: ELASTICSEARCH_PASSWORD
35 | ## value: "123456"
36 | volumeMounts:
37 | - name: kibana-config-vol
38 | mountPath: /usr/share/kibana/config/kibana.yml
39 | subPath: kibana.yml
40 | volumes:
41 | - name: kibana-config-vol
42 | configMap:
43 | name: kibana-config
44 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/readme.md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [干货 | Elasticsearch 7.1免费安全功能全景认知](https://blog.csdn.net/laoyang360/article/details/90554761)
4 | - es 安全机制的演变历程
5 | 2. [<十三>ELK-学习笔记–elasticsearch-7.x使用xpack进行安全认证](http://www.eryajf.net/3500.html)
6 | - 单机与集群环境开启安全认证的实际操作示例.
7 |
8 | 前面创建的 es 集群都没有密码(虽然 yaml 中已经配置过了, 但都不生效), 不管是`curl es:9200`, 还是打开 kibana 的 webUI, 都不需要, 生产环境是绝对禁止的.
9 |
10 | 所以这里我们尝试加上密码认证. 单机环境与集群环境开启密码认证的操作不同, 所以同样要区别对待.
11 |
12 | 其实只要在`es`配置文件`elasticsearch.yml`文件中添加如下一行就可以了.
13 |
14 | ```yaml
15 | ## 这条配置表示开启xpack认证机制
16 | xpack.security.enabled: true
17 | ```
18 |
19 | 参考文章2中说还要再加上一条`xpack.security.transport.ssl.enabled: true`, 否则 es 无法启动, 但是我在测试的时候不需要添加这条.
20 |
21 | 之后再使用 curl 访问, 就要带上用户名与密码了.
22 |
23 | ```console
24 | $ curl es:9200/_cat/health
25 | {"error":{"root_cause":[{"type":"security_exception","reason":"missing authentication credentials for REST request [/_cat/health]","header":{"WWW-Authenticate":"Basic realm=\"security\" charset=\"UTF-8\""}}],"type":"security_exception","reason":"missing authentication credentials for REST request [/_cat/health]","header":{"WWW-Authenticate":"Basic realm=\"security\" charset=\"UTF-8\""}},"status":401}
26 |
27 | $ curl -u elastic:123456 es:9200/_cat/health
28 | 1592817746 09:22:26 elasticsearch green 1 1 2 2 0 0 0 0 - 100.0%
29 | ```
30 |
31 | 同时, 再访问 kibana 也是需要密码的.
32 |
33 | 
34 |
35 | > 注意: 此时 kibana 与 logstash 配置文件中的密码就是必须的了, 否则启动会失败.
36 |
--------------------------------------------------------------------------------
/harbor/kuber/05-registryctl.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: registryctl-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 8080
11 | targetPort: 8080
12 | selector:
13 | name: registryctl-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: registryctl
19 | namespace: harbor
20 | labels:
21 | name: registryctl
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: registryctl-apps
28 | spec:
29 | containers:
30 | - name: registryctl-app
31 | image: goharbor/harbor-registryctl:v1.8.2
32 | imagePullPolicy: IfNotPresent
33 | envFrom:
34 | - configMapRef:
35 | name: registryctl-env
36 | volumeMounts:
37 | - name: nfs-registryctl-vol
38 | mountPath: /storage
39 | - name: registry-cfg-vol
40 | mountPath: /etc/registry
41 | - name: registryctl-cfg-vol
42 | mountPath: /etc/registryctl
43 | volumes:
44 | - name: registry-cfg-vol
45 | configMap:
46 | name: registry-cfg-map
47 | - name: registryctl-cfg-vol
48 | configMap:
49 | name: registryctl-cfg-map
50 | - name: nfs-registryctl-vol
51 | nfs:
52 | server: 192.168.7.14
53 | path: /mnt/nfsvol/harbor/registry
54 |
--------------------------------------------------------------------------------
/prometheus/exporters/01-node-exporter.yaml:
--------------------------------------------------------------------------------
1 | ## node exporter 的网络模式为 hostNetwork, 不需要 NodePort 的 Service
2 | ---
3 | apiVersion: apps/v1
4 | kind: DaemonSet
5 | metadata:
6 | name: node-exporter
7 | namespace: monitoring
8 | spec:
9 | selector:
10 | matchLabels:
11 | name: node-exporter
12 | template:
13 | metadata:
14 | labels:
15 | name: node-exporter
16 | spec:
17 | securityContext:
18 | runAsNonRoot: true
19 | runAsUser: 65534
20 | hostNetwork: true
21 | hostPID: true
22 | containers:
23 | - name: node-exporter
24 | image: prom/node-exporter:v0.17.0
25 | args:
26 | - "--path.procfs=/host/proc"
27 | - "--path.sysfs=/host/sys"
28 | ports:
29 | - name: metrics
30 | containerPort: 9100
31 | hostPort: 9100
32 | resources:
33 | requests:
34 | memory: 1024Mi
35 | cpu: 1024m
36 | limits:
37 | memory: 2048Mi
38 | cpu: 2048m
39 | volumeMounts:
40 | - name: proc
41 | mountPath: /host/proc
42 | readOnly: true
43 | - name: sys
44 | mountPath: /host/sys
45 | readOnly: true
46 | tolerations:
47 | - effect: NoSchedule
48 | operator: Exists
49 | volumes:
50 | - name: proc
51 | hostPath:
52 | path: /proc
53 | - name: sys
54 | hostPath:
55 | path: /sys
56 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/01.svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: es
6 | labels:
7 | app: es
8 | spec:
9 | ports:
10 | ## 对客户端提供服务的端口
11 | - port: 9200
12 | name: client
13 | targetPort: 9200
14 | ## 集群内与其他节点交互的端口
15 | - port: 9300
16 | name: node
17 | targetPort: 9300
18 | selector:
19 | ## 注意: service 的 selector 需要指定的是
20 | ## Deployment -> spec -> template -> labels,
21 | ## 而不是 Deployment -> metadata -> lables.
22 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
23 | app: es
24 |
25 | ---
26 | apiVersion: v1
27 | kind: Service
28 | metadata:
29 | name: kibana
30 | labels:
31 | app: kibana
32 | spec:
33 | ports:
34 | - port: 5601
35 | name: webui
36 | targetPort: 5601
37 | nodePort: 30601
38 | selector:
39 | ## 注意: service 的 selector 需要指定的是
40 | ## Deployment -> spec -> template -> labels,
41 | ## 而不是 Deployment -> metadata -> lables.
42 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
43 | app: kibana
44 | type: NodePort
45 |
46 | ---
47 | apiVersion: v1
48 | kind: Service
49 | metadata:
50 | name: nginx
51 | labels:
52 | app: nginx
53 | spec:
54 | ports:
55 | - port: 8080
56 | name: nginx
57 | targetPort: 8080
58 | nodePort: 31080
59 | selector:
60 | ## 注意: service 的 selector 需要指定的是
61 | ## Deployment -> spec -> template -> labels,
62 | ## 而不是 Deployment -> metadata -> lables.
63 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
64 | app: nginx
65 | type: NodePort
66 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/01.svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: es
6 | labels:
7 | app: es
8 | spec:
9 | ports:
10 | ## 对客户端提供服务的端口
11 | - port: 9200
12 | name: client
13 | targetPort: 9200
14 | ## 集群内与其他节点交互的端口
15 | - port: 9300
16 | name: node
17 | targetPort: 9300
18 | selector:
19 | ## 注意: service 的 selector 需要指定的是
20 | ## Deployment -> spec -> template -> labels,
21 | ## 而不是 Deployment -> metadata -> lables.
22 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
23 | app: es
24 |
25 | ---
26 | apiVersion: v1
27 | kind: Service
28 | metadata:
29 | name: kibana
30 | labels:
31 | app: kibana
32 | spec:
33 | ports:
34 | - port: 5601
35 | name: webui
36 | targetPort: 5601
37 | nodePort: 30601
38 | selector:
39 | ## 注意: service 的 selector 需要指定的是
40 | ## Deployment -> spec -> template -> labels,
41 | ## 而不是 Deployment -> metadata -> lables.
42 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
43 | app: kibana
44 | type: NodePort
45 |
46 | ---
47 | apiVersion: v1
48 | kind: Service
49 | metadata:
50 | name: nginx
51 | labels:
52 | app: nginx
53 | spec:
54 | ports:
55 | - port: 8080
56 | name: nginx
57 | targetPort: 8080
58 | nodePort: 31080
59 | selector:
60 | ## 注意: service 的 selector 需要指定的是
61 | ## Deployment -> spec -> template -> labels,
62 | ## 而不是 Deployment -> metadata -> lables.
63 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
64 | app: nginx
65 | type: NodePort
66 |
--------------------------------------------------------------------------------
/harbor/readme.md:
--------------------------------------------------------------------------------
1 | # 关于Harbor
2 |
3 | ## 写在前面
4 |
5 | 官方文档写的真烂, 不仅烂, 还过时. harbor本身都1.8了, kuber相关的部署配置还是1.2.
6 |
7 | 下面分析一下harbor的部署流程.
8 |
9 | 按照官方最主要的部署文档来说, 其实下载一个release版本的压缩包, 解压后执行其中的`install.sh`就可以了. 这个脚本会根据解压后的`harbor.yml`文件中的相关配置, 生成各组件需要的配置文件, 同时也会自动创建密钥对, 从docker hub下载相关镜像, 最后通过docker-compose启动.
10 |
11 | 需要注意的是, 生成配置文件的步骤并没有在`install.sh`脚本, 而是通过一个`goharbor/prepare`镜像来完成.
12 |
13 | 真的是骚操作了, 我本来还想读一下这个脚本的...
14 |
15 | 不过最终我也没通过官方文档或是阅读脚本把kuber的配置文件搞定...
16 |
17 | 而是通过执行`install.sh`一遍, 将harbor各组件启动完成, 并且各配置字段都算比较熟悉之后再移植过去的.
18 |
19 | > 当前目录下的`docker-compose.yml`就是`install.sh`生成的, 我借鉴了其中的volume配置, 编写了对应的kuber部署配置文件.
20 |
21 | 我使用NFS提供了存储服务, NFS Server的配置可以参考`exports`文件.
22 |
23 | ## `harbor.yml`理解
24 |
25 | ------
26 |
27 | 首先`hostname`字段是必须要定义的, 同时`https`也要解开相关注释, 因为即使在web界面上无所谓, 可以通过http访问, 但使用`docker login`要求服务必须是https接口(自签发证书也行).
28 |
29 | 但是安装脚本并不会帮你生成证书和密钥, 我们需要事先创建好, 然后修改`https.certificate`和`https.private_key`这两处路径.
30 |
31 | ------
32 |
33 | `database.password`字段定义了数据库的密码(postgres超级用户), 安装脚本会自动下载postgres和redis镜像并启动.
34 |
35 | 如果你想指定各组件使用外部的数据库和redis服务, 需要修改`external_database`和`external_redis`部分.
36 |
37 | ...我好像直接就把两个`external`的注释解开了, 默认配置没试过, 最后数据库和redis的指向还是安装脚本启动的那两个.
38 |
39 | 由于整个服务最终是用docker-compose启动的, 所以数据库/redis的host都要写成compose配置中的服务名称.
40 |
41 | 安装脚本下载的是harbor封装过的数据库镜像, 其中默认创建了3个数据库, 对应harbor, clair和notray 3个服务. 不过后两者我没启用, 先不管ta.
42 |
43 | ------
44 |
45 | 最终生成的`docker-compose.yml`同目录的还有一个`common`目录, 是各组件用到的配置文件, 通过volume挂载到容器内部.
46 |
47 | ------
48 |
49 | clair, jobservice, log这3个部分可以不用动.
50 |
--------------------------------------------------------------------------------
/elk [dc]/02.elk-logstash-lb/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | elasticsearch:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
11 | ELASTIC_PASSWORD: 123456
12 | volumes:
13 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
14 | logstash-01:
15 | image: logstash:7.2.0
16 | environment:
17 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
18 | volumes:
19 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
20 | - ./logstash/pipeline01:/usr/share/logstash/pipeline:ro
21 | - ./data/logs/nginx01:/var/log/nginx
22 | logstash-02:
23 | image: logstash:7.2.0
24 | environment:
25 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
26 | volumes:
27 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
28 | - ./logstash/pipeline02:/usr/share/logstash/pipeline:ro
29 | - ./data/logs/nginx02:/var/log/nginx
30 | kibana:
31 | image: kibana:7.2.0
32 | ports:
33 | - 5601:5601
34 | volumes:
35 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
36 | nginx-01:
37 | image: nginx:1.12.0
38 | ports:
39 | - 9001:8080
40 | volumes:
41 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
42 | - ./data/logs/nginx01:/var/log/nginx
43 | nginx-02:
44 | image: nginx:1.12.0
45 | ports:
46 | - 9002:8080
47 | volumes:
48 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
49 | - ./data/logs/nginx02:/var/log/nginx
50 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/readme.md:
--------------------------------------------------------------------------------
1 | 为了体现 sts 在分布式集群, 有状态服务中的作用, 这里我们使用 deployment 资源来部署 es.
2 |
3 | kibana 也是 deployment.
4 |
5 | logstash 用于收集各主机上的日志, 采集的目标就是 nginx, 所以这两个需要使用 daemonset 类型进行部署.
6 |
7 | 按照这个结构部署的 ELK, 就算已经在 yaml 中写明了用户名密码分别为`elastic/123456` 访问 kibana webUI 是不需要密码的, 访问 es 的 http 接口也不需要使用密码.
8 |
9 | ```console
10 | $ curl es:9200/_cat/health
11 | 1592740248 11:50:48 elasticsearch green 1 1 2 2 0 0 0 0 - 100.0%
12 | ```
13 |
14 | ## 配置文件后缀必须为 .yml
15 |
16 | 如果命名为 .yaml, 容器将无法启动(`CrashLoopBackOff`状态), 查看日志有如下报错.
17 |
18 | ```console
19 | $ k logs -f fb17c160244c
20 | Exception in thread "main" SettingsException[elasticsearch.yaml was deprecated in 5.5.0 and must be renamed to elasticsearch.yml]
21 | at org.elasticsearch.node.InternalSettingsPreparer.prepareEnvironment(InternalSettingsPreparer.java:72)
22 | at org.elasticsearch.cli.EnvironmentAwareCommand.createEnv(EnvironmentAwareCommand.java:95)
23 | at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:86)
24 | at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:124)
25 | at org.elasticsearch.cli.MultiCommand.execute(MultiCommand.java:77)
26 | at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:124)
27 | at org.elasticsearch.cli.Command.main(Command.java:90)
28 | at org.elasticsearch.common.settings.KeyStoreCli.main(KeyStoreCli.java:41)
29 | ```
30 |
31 | ## 密码 123456
32 |
33 | es 本身可以将密码设置为123456(yaml里用双引号包裹即可), 但是 kibana 不行, 就算有双引号, 也会启动失败, 有如下日志.
34 |
35 | ```
36 | FATAL Error: [elasticsearch.password]: expected value of type [string] but got [number]
37 | ```
38 |
39 | ...不过貌似在配置文件中写 123456 就没问题...难道说只是不能在环境变量里写?
40 |
--------------------------------------------------------------------------------
/harbor/kuber/06-core.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: core-svc
6 | namespace: harbor
7 | spec:
8 | ports:
9 | - name: stardard-port
10 | port: 8080
11 | targetPort: 8080
12 | selector:
13 | name: core-apps
14 | ---
15 | apiVersion: extensions/v1beta1
16 | kind: Deployment
17 | metadata:
18 | name: core
19 | namespace: harbor
20 | labels:
21 | name: core
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: core-apps
28 | spec:
29 | containers:
30 | - name: core-app
31 | image: goharbor/harbor-core:v1.8.2
32 | imagePullPolicy: IfNotPresent
33 |
34 | envFrom:
35 | - configMapRef:
36 | name: core-env
37 |
38 | volumeMounts:
39 | - name: core-cfg-vol
40 | mountPath: /etc/core
41 | - name: nfs-core-data-vol
42 | mountPath: /data
43 | - name: nfs-core-ca-vol
44 | mountPath: /etc/core/ca
45 | - name: nfs-core-psc-vol
46 | mountPath: /etc/core/token
47 | volumes:
48 | - name: core-cfg-vol
49 | configMap:
50 | name: core-cfg-map
51 | ## nfs共享目录
52 | - name: nfs-core-data-vol
53 | nfs:
54 | server: 192.168.7.14
55 | path: /mnt/nfsvol/harbor
56 | - name: nfs-core-ca-vol
57 | nfs:
58 | server: 192.168.7.14
59 | path: /mnt/nfsvol/harbor/ca_download
60 | - name: nfs-core-psc-vol
61 | nfs:
62 | server: 192.168.7.14
63 | path: /mnt/nfsvol/harbor/psc
64 |
--------------------------------------------------------------------------------
/elk [dc]/03.elk-filebeat/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | elasticsearch:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
11 | ELASTIC_PASSWORD: 123456
12 | volumes:
13 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
14 | logstash:
15 | image: logstash:7.2.0
16 | environment:
17 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
18 | volumes:
19 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
20 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
21 | kibana:
22 | image: kibana:7.2.0
23 | ports:
24 | - 5601:5601
25 | volumes:
26 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
27 | nginx-01:
28 | image: nginx:1.12.0
29 | ports:
30 | - 9001:8080
31 | volumes:
32 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
33 | - ./data/logs/nginx01:/var/log/nginx
34 | filebeat-01:
35 | hostname: node-01
36 | image: elastic/filebeat:7.2.0
37 | volumes:
38 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
39 | - ./data/logs/nginx01:/var/log/nginx
40 | nginx-02:
41 | image: nginx:1.12.0
42 | ports:
43 | - 9002:8080
44 | volumes:
45 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
46 | - ./data/logs/nginx02:/var/log/nginx
47 | filebeat-02:
48 | hostname: node-02
49 | image: elastic/filebeat:7.2.0
50 | volumes:
51 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
52 | - ./data/logs/nginx02:/var/log/nginx
53 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/data/logs/nginx/access_json.log:
--------------------------------------------------------------------------------
1 | {"@timestamp":"2020-09-16T03:59:12+00:00","@version":"1","client":"172.25.0.1","url":"/index.html","status":"200","domain":"localhost","host":"172.25.0.8","size":612,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
2 | {"@timestamp":"2020-09-16T03:59:12+00:00","@version":"1","client":"172.25.0.1","url":"/404.html","status":"404","domain":"localhost","host":"172.25.0.8","size":571,"responsetime":0.000,"referer": "http://localhost:9001/","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
3 | {"@timestamp":"2020-09-16T06:32:46+00:00","@version":"1","client":"172.26.0.1","url":"/index.html","status":"304","domain":"localhost","host":"172.26.0.3","size":0,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
4 | {"@timestamp":"2020-09-16T06:59:16+00:00","@version":"1","client":"172.26.0.1","url":"/index.html","status":"304","domain":"localhost","host":"172.26.0.3","size":0,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
5 | {"@timestamp":"2020-09-16T07:03:37+00:00","@version":"1","client":"172.26.0.1","url":"/index.html","status":"304","domain":"localhost","host":"172.26.0.3","size":0,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
6 |
--------------------------------------------------------------------------------
/elk [dc]/08.elk-cluster-5.5.0-xpack/data/logs/nginx/access_json.log:
--------------------------------------------------------------------------------
1 | {"@timestamp":"2020-09-16T03:59:12+00:00","@version":"1","client":"172.25.0.1","url":"/index.html","status":"200","domain":"localhost","host":"172.25.0.8","size":612,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
2 | {"@timestamp":"2020-09-16T03:59:12+00:00","@version":"1","client":"172.25.0.1","url":"/404.html","status":"404","domain":"localhost","host":"172.25.0.8","size":571,"responsetime":0.000,"referer": "http://localhost:9001/","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
3 | {"@timestamp":"2020-09-16T06:32:46+00:00","@version":"1","client":"172.26.0.1","url":"/index.html","status":"304","domain":"localhost","host":"172.26.0.3","size":0,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
4 | {"@timestamp":"2020-09-16T06:59:16+00:00","@version":"1","client":"172.26.0.1","url":"/index.html","status":"304","domain":"localhost","host":"172.26.0.3","size":0,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
5 | {"@timestamp":"2020-09-16T07:03:37+00:00","@version":"1","client":"172.26.0.1","url":"/index.html","status":"304","domain":"localhost","host":"172.26.0.3","size":0,"responsetime":0.000,"referer": "-","ua": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"}
6 |
--------------------------------------------------------------------------------
/harbor/config/nginx/server.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC1qPAryL7YZ10n
3 | bKdZVLCfXyp4XUIGBtT/SGsng6DIFKpGrQt8wEZESEYySImw6SeNsQHXAY81CTSI
4 | 09wJLSlprMcI3IQ8Niyif3lbcSVbPmqHCxteeRynx9niDABtwrZypiQp0HMs4iI/
5 | OD7kf/BxlcHDz1hyF9FiSkiWssz8+a0GAJyhPbkLlDXyIDmpP5fsJb5tcGa3HRAc
6 | 7HMpqpLqyniaNR23XqEpuOZ62/0/NciolGU7EyItodLRvFNfEDr7dYmyQTw+XZrA
7 | 8QySXKdCIEg1Xpk17qLUwLlAcTsio4ZLvmAmIu2ujd5TNHlwKykNL+2jGIi20WK1
8 | 7QoEcrl3AgMBAAECggEADfNoZmRnS6U8gS+V9oVLfyHG2DGJRX820lgUapdwPI1h
9 | ukXF0RHPE6VdRrVNZbmyIO6MDp//CHqpfyyOBGJkgMoVCvgsa5dZfC0/+6bH0KcN
10 | dUuEEZBJhBUUBYkYa1V9v3ZE5rUsczD2olZGdjPhGkf+9nOw/ioYGW1M/83Bclf1
11 | LWWSZugKuDnS8wzsennI9rTFiozM1PMOizi2hacPaJwc35dTTDReuDBGL2TqwL1Q
12 | qRObHMbTgPFRrfuaZxSDKtIvcG50MMpupDJMPPuIl61Z+uYRxyoGIKF945XvBJ1E
13 | YLQpDSF5gy/gf8Hhrk6Lorn+/IPAXWN0uxmQTlVLoQKBgQDf1jMMZWC4ntEGPWE7
14 | aCWH9ynN7WFax9GJExil/+A8gStBSZvgeBqOdIwjXek5xbRleTetRaToffCzROnu
15 | mklo6lxGym4VL95Zi5V9Mfrn6u4Jbf6J+3PQkFuHZKapd4nWjW2DKjiKy0Yn+6Lo
16 | yMcwC9wjGu2STJdqe2PYRpvdZwKBgQDPw0V19qpPGSoteXHbIzsnE0MJAB4iRuOW
17 | LonOkaAq/cNbI2vFsGeulBABPDmCIajJdAPzZk0zPQB9ojipgJ9wY/BNZzHHhWLZ
18 | t+3s5tPolLGtf2ZVDaCmSYBHa8wQcfcZY9oOgtTwj+TMFGiLz+r4LXIE5eyO5iQX
19 | iGToTHupcQKBgEZpk1Vq7qolC4vy8wtFD8ldrwDgk6eDBB8iqrZNb7SYzYqUJTzP
20 | i0sUzqCxghRyZPUm1KTlK6MXOwipvQ0Z683dK3n5Y/e2PzPb/QKNHmimNosw1smB
21 | GEWbo69LskHNIMclZjXEqlS+7SrWtcCUqFQs5usEJYfIIDmW5ym5zH4xAoGAa6O9
22 | vwoW3ngpM/oxlNlB8lK2bllTQ2r6yno+B3nHQ77JkdReJ8PnU7l+OUWcfeE2c3jZ
23 | +ybLZHp8YJrFRHxRHoeInnGShs6Ckk4KxKwqqdsv2rWgrPwseTqnbVlaFkMVRGBJ
24 | Py9lZ2UFov4H1Dfags34vrToxHhTxqVxsjC9+OECgYEApRCfl9d9R8zY+WzRTVEd
25 | f720QzHEJ/qR9/rq7sz7KHHO2LvztuNuXu8/oxBo6RVYjiiH4Q0gbr8OZZcKsGqu
26 | botYRLdnwKrgVxOd3KVEmBx+PwRvsPJaTtc5HFXlqSWzmqaT99n1tJ8RFyr6C03F
27 | O6oONEjZg9gu7H7eVqI1Wpw=
28 | -----END PRIVATE KEY-----
--------------------------------------------------------------------------------
/harbor/config/registry/root.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIE0zCCArugAwIBAgIJAIN/E6+lqrO/MA0GCSqGSIb3DQEBCwUAMAAwHhcNMTkw
3 | OTAzMDIwMDI1WhcNMjkwODMxMDIwMDI1WjAAMIICIjANBgkqhkiG9w0BAQEFAAOC
4 | Ag8AMIICCgKCAgEAwjsPxrqTMbJOG0SbGZj8Bt/xyiwMkYJnJTFyvnlj8oig8pjC
5 | Kehbvt1oXTS+BF7fWg3IAujiArq2FH2CW3erAxH0GPFRifPr5noTeyO/CVuSblnq
6 | ZxJhFuvZ9m2hqqhU46ZmX77Wjgv8Nk2o8bON0dRpsREqgMSPcSPHTUPMkeENYe5z
7 | VXBkQPjQQR2TQElRfn0quZL9PPvd5kF/wY3JdQJ1Enfe9/Qn3/kgI4vepCu2uBMv
8 | xpN/w+8XdpW0/44pRJ+24iC+2w4uS+DxT8dQ0lG2iKHyysDPeTqlvFAHjpCnafJv
9 | Q0ciNBgJqmD8M1xFmZjCk/B8ubzOa+13F9+Azzrf2ahq2zzc7sdtQP34oF5aRm6W
10 | Lq4SNeSB1ulFefvjPYzwtmgnDwIBS/BbXEmIf+hUwKdfS1i6SMXX467rMZL4qBvY
11 | 9/l1NdZ5DBBWjm4504E6jEk/s6QbU/4/P4nFx+lqHpktEhMTcqtTkZ2sBjhCAh3x
12 | 2M/F0Omj6rAsrHLRRhsYOPLDeOe1qk9yLdCn/LahmvqbDhDKE2zSNCo5G1GAhNXv
13 | FneSGgvwcqFkbYs2ipn+F50LXiy/lnvWqtdWeM8GdqL9s/XuVqr2oPc1GzQoRUi7
14 | +BvR0y9dVjqsCIXyut1gQZMKhAq64pnO44gIpnB8odC6UtCCN956jqN1pisCAwEA
15 | AaNQME4wHQYDVR0OBBYEFDllskNAU56iPgYJ6FVqDUWuFSbdMB8GA1UdIwQYMBaA
16 | FDllskNAU56iPgYJ6FVqDUWuFSbdMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL
17 | BQADggIBAG71MM2nbysQLyfOFoT6Q+rissPYOzvJwZeNJuxjxgjKVNUnLfC31OUc
18 | 6qUD7hFaH+KbnCYjzjVoQm1zlFsxKWPiQq5swS8SbYOarOlKa9J8/NfSKlRhxL4r
19 | MP/9oUiA6yse9irIoR2JQksGFbwQ88GlMH7Jkf574APBhthZCm2iEwwTsRbvVWDI
20 | tvteB21H36BsnBLeIBOAtPL/2MRrABFyLJqQIUBNF/1fNYyvJnk+cfQ+MOd1b5c3
21 | bfg7Jc8efJgCEtyknxjCTSUX6MNVzwB67QN5WXf79cNV4E59MYJLFJlFb6+y74A0
22 | Kp2LwJMldaNWURW2FIrPt1kpDDJshx4Pslc6MC2/RqfisJIbpjwnAw6lxvGjzUTN
23 | +avdPeChagwpe04OPIPdDKPT65XTWdcImS0D9plcwivCLhz6QyVFt2DIuz5K4q6g
24 | l2QSOByds2tuVxOlrblQsrLLQd2Zo8xRgA90geqwYA837XHfJ5LZ5R+9yHuNdFkS
25 | OUpvFM7p4KARmwlzItNnkfnKGXV7/pB0hFTwmZ1yZW188NNeK3rbF7avqc2JKuLX
26 | hHfzXLIu2evD6+Cx5UR9wMQhJQiV9+kKqzlyXb4QWC8ZhmX61MJQMxfM/YMviqxP
27 | yE+OMRuHBq2ey9na3cSANnAWY4j2dDN8o+kHiFNEPeeMzwsjz4rN
28 | -----END CERTIFICATE-----
--------------------------------------------------------------------------------
/elk [k8s kube]/06.es-cluster [sts statefulset 5.x 5.5.0]/03.kibana.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: kibana-svc
6 | labels:
7 | app: kibana-svc
8 | spec:
9 | ports:
10 | - port: 5601
11 | name: webui
12 | targetPort: 5601
13 | nodePort: 30601
14 | selector:
15 | ## 注意: service 的 selector 需要指定的是
16 | ## Deployment -> spec -> template -> labels,
17 | ## 而不是 Deployment -> metadata -> lables.
18 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
19 | app: kibana
20 | type: NodePort
21 |
22 | ---
23 | apiVersion: apps/v1
24 | kind: StatefulSet
25 | metadata:
26 | ## deploy 生成的 pod 的名称也是 kibana-xxx
27 | name: kibana
28 | labels:
29 | app: kibana
30 | spec:
31 | ## headless service名称
32 | serviceName: kibana-svc
33 | replicas: 1
34 | selector:
35 | matchLabels:
36 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
37 | ## 表示一种管理关系
38 | app: kibana
39 | template:
40 | metadata:
41 | labels:
42 | app: kibana
43 | spec:
44 | containers:
45 | - name: kibana
46 | image: kibana:5.5.0
47 | imagePullPolicy: IfNotPresent
48 | env:
49 | ## 这里的 es 指的是 service 名称
50 | - name: ELASTICSEARCH_HOSTS
51 | value: http://es-master-svc:9200
52 | ## 其实 username 和 password 在配置文件或是环境变量中定义都可以,
53 | ## 但是貌似使用环境变量的时候, 密码不能定义为 123456, 哪怕已经用双引号包裹.
54 | ## 所以这里把这两个 key 注释掉, 写在了配置文件里...
55 | ## - name: ELASTICSEARCH_USERNAME
56 | ## value: "elastic"
57 | ## - name: ELASTICSEARCH_PASSWORD
58 | ## value: "123456"
59 | volumeMounts:
60 | - name: kibana-config-vol
61 | mountPath: /usr/share/kibana/config/kibana.yml
62 | subPath: kibana.yml
63 | volumes:
64 | - name: kibana-config-vol
65 | configMap:
66 | name: kibana-config
67 |
--------------------------------------------------------------------------------
/rancher/import-to-rancher/02-rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: proxy-clusterrole-kubeapiserver
6 | rules:
7 | - apiGroups: [""]
8 | resources:
9 | - nodes/metrics
10 | - nodes/proxy
11 | - nodes/stats
12 | - nodes/log
13 | - nodes/spec
14 | verbs: ["get", "list", "watch", "create"]
15 | ---
16 | apiVersion: rbac.authorization.k8s.io/v1
17 | kind: ClusterRoleBinding
18 | metadata:
19 | name: proxy-role-binding-kubernetes-master
20 | roleRef:
21 | apiGroup: rbac.authorization.k8s.io
22 | kind: ClusterRole
23 | name: proxy-clusterrole-kubeapiserver
24 | subjects:
25 | - apiGroup: rbac.authorization.k8s.io
26 | kind: User
27 | name: kube-apiserver
28 |
29 | ---
30 |
31 | apiVersion: v1
32 | kind: ServiceAccount
33 | metadata:
34 | name: cattle
35 | namespace: cattle-system
36 |
37 | ---
38 |
39 | apiVersion: rbac.authorization.k8s.io/v1beta1
40 | kind: ClusterRoleBinding
41 | metadata:
42 | name: cattle-admin-binding
43 | namespace: cattle-system
44 | labels:
45 | cattle.io/creator: "norman"
46 | subjects:
47 | - kind: ServiceAccount
48 | name: cattle
49 | namespace: cattle-system
50 | roleRef:
51 | kind: ClusterRole
52 | name: cattle-admin
53 | apiGroup: rbac.authorization.k8s.io
54 |
55 | ---
56 |
57 | apiVersion: v1
58 | kind: Secret
59 | metadata:
60 | name: cattle-credentials-8c84cbd
61 | namespace: cattle-system
62 | type: Opaque
63 | data:
64 | url: "aHR0cHM6Ly8xOTIuMTY4LjAuMTAx"
65 | token: "ajY0N2w5bXRwZmg3NjVwa2JmaHN0YzlqZmo2Nm5ncGY1OTRqeDc1d3cyZG42YnJyZDJ2ZjRu"
66 |
67 | ---
68 |
69 | apiVersion: rbac.authorization.k8s.io/v1
70 | kind: ClusterRole
71 | metadata:
72 | name: cattle-admin
73 | labels:
74 | cattle.io/creator: "norman"
75 | rules:
76 | - apiGroups:
77 | - '*'
78 | resources:
79 | - '*'
80 | verbs:
81 | - '*'
82 | - nonResourceURLs:
83 | - '*'
84 | verbs:
85 | - '*'
86 |
--------------------------------------------------------------------------------
/elk [dc]/04.elk-filebeat-kafka/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | elasticsearch:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
11 | ELASTIC_PASSWORD: 123456
12 | volumes:
13 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
14 | zookeeper:
15 | image: zookeeper
16 | ports:
17 | - "2181:2181"
18 | kafka:
19 | image: wurstmeister/kafka # docker hub上star最多, pull次数最多的非官方镜像(因为没有官方镜像)
20 | ports:
21 | - 9092:9092
22 | environment:
23 | KAFKA_ADVERTISED_HOST_NAME: kafka
24 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
25 | volumes:
26 | - ./data/logs/kafka:/kafka
27 | logstash:
28 | image: logstash:7.2.0
29 | environment:
30 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
31 | volumes:
32 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
33 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
34 | kibana:
35 | image: kibana:7.2.0
36 | ports:
37 | - 5601:5601
38 | volumes:
39 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
40 | nginx-01:
41 | image: nginx:1.12.0
42 | ports:
43 | - 9001:8080
44 | volumes:
45 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
46 | - ./data/logs/nginx01:/var/log/nginx
47 | filebeat-01:
48 | hostname: node-01
49 | image: elastic/filebeat:7.2.0
50 | volumes:
51 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
52 | - ./data/logs/nginx01:/var/log/nginx
53 | nginx-02:
54 | image: nginx:1.12.0
55 | ports:
56 | - 9002:8080
57 | volumes:
58 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
59 | - ./data/logs/nginx02:/var/log/nginx
60 | filebeat-02:
61 | hostname: node-02
62 | image: elastic/filebeat:7.2.0
63 | volumes:
64 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
65 | - ./data/logs/nginx02:/var/log/nginx
66 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/01.svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: es-cluster-public
6 | labels:
7 | app: es-cluster
8 | spec:
9 | ## 区别于 es-cluster 的 headless service,
10 | ## 此处的 service 由 head 服务来访问
11 | ports:
12 | - port: 9200
13 | name: http
14 | targetPort: 9200
15 | nodePort: 30920
16 | selector:
17 | ## 注意: service 的 selector 需要指定的是
18 | ## Deployment -> spec -> template -> labels,
19 | ## 而不是 Deployment -> metadata -> lables.
20 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
21 | app: es-cluster
22 | type: NodePort
23 |
24 | ---
25 | apiVersion: v1
26 | kind: Service
27 | metadata:
28 | name: kibana
29 | labels:
30 | app: kibana
31 | spec:
32 | ports:
33 | - port: 5601
34 | name: webui
35 | targetPort: 5601
36 | nodePort: 30601
37 | selector:
38 | ## 注意: service 的 selector 需要指定的是
39 | ## Deployment -> spec -> template -> labels,
40 | ## 而不是 Deployment -> metadata -> lables.
41 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
42 | app: kibana
43 | type: NodePort
44 |
45 | ---
46 | apiVersion: v1
47 | kind: Service
48 | metadata:
49 | name: nginx
50 | labels:
51 | app: nginx
52 | spec:
53 | ports:
54 | - port: 8080
55 | name: nginx
56 | targetPort: 8080
57 | nodePort: 31080
58 | selector:
59 | ## 注意: service 的 selector 需要指定的是
60 | ## Deployment -> spec -> template -> labels,
61 | ## 而不是 Deployment -> metadata -> lables.
62 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
63 | app: nginx
64 | type: NodePort
65 |
66 | ---
67 | apiVersion: v1
68 | kind: Service
69 | metadata:
70 | name: es-head
71 | labels:
72 | app: es-head
73 | spec:
74 | ports:
75 | - port: 9100
76 | name: webui
77 | targetPort: 9100
78 | nodePort: 30910
79 | selector:
80 | ## 注意: service 的 selector 需要指定的是
81 | ## Deployment -> spec -> template -> labels,
82 | ## 而不是 Deployment -> metadata -> lables.
83 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
84 | app: es-head
85 | type: NodePort
86 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/logstash 5.5.0无法启动的问题.md:
--------------------------------------------------------------------------------
1 | # logstash 5.5.0无法启动的问题
2 |
3 | 参考文章
4 |
5 | 1. [logstash启动报配置文件错误Expected one of #, input, filter, output at line 1, column 1 (byte 1) after](https://blog.csdn.net/Crazy_T_B/article/details/79422602)
6 | 2. [Expected one of #, input, filter, output at line 2, column 1 (byte 2): Logstash](https://blog.csdn.net/wyqlxy/article/details/52583639)
7 |
8 | 我按照之前 7.2.0 版本的方式部署 5.5.0 的 ELK, 但是`logstash`总也无法启动, 日志信息中也没有什么重要的信息.
9 |
10 | 后来把`logstash`的配置改成如下, 进到容器里面看了看.
11 |
12 | ```yaml
13 | logstash:
14 | image: logstash:5.5.0
15 | environment:
16 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
17 | volumes:
18 | - ./logstash/config/logstash.conf:/usr/share/logstash/config/logstash.conf:ro
19 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
20 | command: ["tail", "-f", "/docker-entrypoint.sh"]
21 | ```
22 |
23 | 使用如下命令启动服务.
24 |
25 | ```
26 | logstash --log.level=debug --path.config=/usr/share/logstash/config/logstash.yml
27 | ```
28 |
29 | 打印了如下错误日志就直接退出了.
30 |
31 | ```
32 | 06:19:29.041 [LogStash::Runner] ERROR logstash.agent - Cannot create pipeline {:reason=>"Expected one of #, input, filter, output at line 1, column 1 (byte 1) after ", :backtrace=>["/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:59:in `initialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:156:in `initialize'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:286:in `create_pipeline'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:95:in `register_pipeline'", "/usr/share/logstash/logstash-core/lib/logstash/runner.rb:314:in `execute'", "/usr/share/logstash/vendor/bundle/jruby/1.9/gems/clamp-0.6.5/lib/clamp/command.rb:67:in `run'", "/usr/share/logstash/logstash-core/lib/logstash/runner.rb:209:in `run'", "/usr/share/logstash/vendor/bundle/jruby/1.9/gems/clamp-0.6.5/lib/clamp/command.rb:132:in `run'", "/usr/share/logstash/lib/bootstrap/environment.rb:71:in `(root)'"]}
33 | ```
34 |
35 | 最开始我以为是我的 pipeline 格式写的不正确, 参考文章1中也说可能是字符集的原因, 但我核对了下明明没错啊.
36 |
37 | 后来又找到了参考文章2, 发现他们把 pipeline 的内容写到了 conf 文件里, 于是尝试把`pipeline`的内容放到`conf`文件, 把原来的`yml`配置文件移除, 然后启动成功了.
38 |
39 | `logstash` 5.5.0 版本中, 可配置的字段是在命令行传入的, 不需要单独的配置文件.
40 |
--------------------------------------------------------------------------------
/rancher/import-to-rancher/03-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | metadata:
6 | name: cattle-cluster-agent
7 | namespace: cattle-system
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: cattle-cluster-agent
12 | template:
13 | metadata:
14 | labels:
15 | app: cattle-cluster-agent
16 | spec:
17 | affinity:
18 | nodeAffinity:
19 | requiredDuringSchedulingIgnoredDuringExecution:
20 | nodeSelectorTerms:
21 | - matchExpressions:
22 | - key: beta.kubernetes.io/os
23 | operator: NotIn
24 | values:
25 | - windows
26 | preferredDuringSchedulingIgnoredDuringExecution:
27 | - weight: 100
28 | preference:
29 | matchExpressions:
30 | - key: node-role.kubernetes.io/controlplane
31 | operator: In
32 | values:
33 | - "true"
34 | - weight: 1
35 | preference:
36 | matchExpressions:
37 | - key: node-role.kubernetes.io/etcd
38 | operator: In
39 | values:
40 | - "true"
41 | serviceAccountName: cattle
42 | tolerations:
43 | - operator: Exists
44 | containers:
45 | - name: cluster-register
46 | imagePullPolicy: IfNotPresent
47 | env:
48 | - name: CATTLE_SERVER
49 | value: "https://192.168.0.101"
50 | - name: CATTLE_CA_CHECKSUM
51 | value: "64947eb2035c958586a3151725a5485f3ecbc5e98a9657c59c9ca13bf14d3c8b"
52 | - name: CATTLE_CLUSTER
53 | value: "true"
54 | - name: CATTLE_K8S_MANAGED
55 | value: "true"
56 | image: rancher/rancher-agent:v2.3.3
57 | volumeMounts:
58 | - name: cattle-credentials
59 | mountPath: /cattle-credentials
60 | readOnly: true
61 | volumes:
62 | - name: cattle-credentials
63 | secret:
64 | secretName: cattle-credentials-8c84cbd
65 | defaultMode: 320
66 |
--------------------------------------------------------------------------------
/prometheus/02.deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: prometheus
6 | namespace: monitoring
7 | spec:
8 | ports:
9 | - name: web
10 | port: 9090
11 | protocol: TCP
12 | targetPort: 9090
13 | nodePort: 30090
14 | selector:
15 | name: prometheus
16 | type: NodePort
17 | ---
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | name: prometheus
22 | namespace: monitoring
23 | spec:
24 | replicas: 1
25 | selector:
26 | matchLabels:
27 | name: prometheus
28 | strategy:
29 | rollingUpdate:
30 | maxUnavailable: 1
31 | template:
32 | metadata:
33 | labels:
34 | name: prometheus
35 | spec:
36 | containers:
37 | - name: prometheus
38 | image: prom/prometheus:v2.20.1
39 | args:
40 | - "--config.file=/etc/prometheus/prometheus.yaml"
41 | ## TSDB 数据的存储路径
42 | ## - "--storage.tsdb.path=/prometheus"
43 | ## 保留多长时间的数据
44 | - "--storage.tsdb.retention=168h"
45 | ## 非常重要, 开启热更新支持, 有了这个参数之后, prometheus.yml 配置文件只要更新了,
46 | ## 通过执行http://localhost:9090/-/reload就会立即生效, 所以一定要加上这个参数
47 | - "--web.enable-lifecycle"
48 | ## 开启此选项, 可以操作数据库功能
49 | - "--web.enable-admin-api"
50 | ports:
51 | - name: web
52 | containerPort: 9090
53 | volumeMounts:
54 | - name: config
55 | mountPath: /etc/prometheus
56 | ## - name: rules
57 | ## mountPath: /etc/prometheus/rules
58 | - mountPath: /var/run/secrets/etcd-sercret
59 | name: etcd-secret
60 | - mountPath: /etc/localtime
61 | name: time
62 | readOnly: true
63 | volumes:
64 | - name: config
65 | configMap:
66 | name: prometheus-config
67 | - name: rules
68 | configMap:
69 | name: prometheus-rules
70 | - name: etcd-secret
71 | secret:
72 | defaultMode: 511
73 | secretName: etcd-certs
74 | - name: time
75 | hostPath:
76 | path: /etc/localtime
77 | type: ""
78 |
--------------------------------------------------------------------------------
/prometheus/docs/grafana ingress配置.md:
--------------------------------------------------------------------------------
1 | 参考文章
2 |
3 | 1. [nginx代理grafana](https://www.cnblogs.com/wurijie/p/11109673.html)
4 | 2. [Prometheus + Grafana(三)nginx 设置反向代理](https://www.cnblogs.com/caoweixiong/p/12155712.html)
5 |
6 | 本来想通过ingress, 将prometheus与grafana配置成相同域名及端口, 仅通过uri路径做相应转发的, 但最终发现不行...
7 |
8 | nginx的规则并不是万能的, 很多vue/react这种项目的静态资源路径是固定的(可通过配置进行变更)
9 |
10 | ```yaml
11 | apiVersion: extensions/v1beta1
12 | kind: Ingress
13 | metadata:
14 | name: monitoring
15 | annotations:
16 | nginx.ingress.kubernetes.io/rewrite-target: /$2
17 | spec:
18 | rules:
19 | - host: dev.kube.com
20 | http:
21 | paths:
22 | - path: /grafana(/|$)(.*)
23 | backend:
24 | serviceName: grafana
25 | servicePort: 3000
26 |
27 | ```
28 |
29 | 当我们访问`http://IP:port/grafana`时, 通过`rewrite-target`(其实就是nginx中的`proxy_path`), 将首页请求转发到后端grafana服务, 但是紧接着的静态资源仍然会以`/`作为根路径.
30 |
31 | 浏览器会以`http://IP:port/public/xxx`这种形式请求静态资源.
32 |
33 | ```html
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 | ```
46 |
47 | 我们没有办法在发起首页请求时, 将`public/xxx`也变更为`/grafana/public/xxx`.
48 |
49 | 
50 |
51 | 如果再额外加一句`/public`的路径规则, 虽然可以实现目前的要求, 但当其他工程也在`dev.kube.com`域名下拥有`/public`路径的静态资源时, 会被误定位到grafana服务.
52 |
53 | 所以要么直接通过NortPort, 要么就用域名进行区分吧.
54 |
55 | ------
56 |
57 | 参考文章1, 2有写到手动修改 grafana/prometheus 的代理前缀, 即通过配置将`public/xxx`变更为指定的前缀, 如`grafana/public/xxx`, 这样就可以匹配到了.
58 |
59 | 其实之前在做运维时, 一般是一个服务开一个端口, 或是一个域名, 在同一个端口下用路径区分多个服务的(就算有通过路径转发的, 也是转发到多个api类型的后端接口, 静态资源这种, 没搞过).
60 |
--------------------------------------------------------------------------------
/rancher/RKE/rke_config.txt:
--------------------------------------------------------------------------------
1 | rke_linux-amd64 config
2 | [+] Cluster Level SSH Private Key Path [~/.ssh/id_rsa]:
3 | [+] Number of Hosts [1]: 3
4 | [+] SSH Address of host (1) [none]: 192.168.0.211
5 | [+] SSH Port of host (1) [22]:
6 | [+] SSH Private Key Path of host (192.168.0.211) [none]: ~/.ssh/id_rsa
7 | [+] SSH User of host (192.168.0.211) [ubuntu]: root
8 | [+] Is host (192.168.0.211) a Control Plane host (y/n)? [y]: y
9 | [+] Is host (192.168.0.211) a Worker host (y/n)? [n]: n
10 | [+] Is host (192.168.0.211) an etcd host (y/n)? [n]: y
11 | [+] Override Hostname of host (192.168.0.211) [none]: rke-master-01
12 | [+] Internal IP of host (192.168.0.211) [none]: 192.168.0.211
13 | [+] Docker socket path on host (192.168.0.211) [/var/run/docker.sock]:
14 | [+] SSH Address of host (2) [none]: 192.168.0.214
15 | [+] SSH Port of host (2) [22]:
16 | [+] SSH Private Key Path of host (192.168.0.214) [none]: ~/.ssh/id_rsa
17 | [+] SSH User of host (192.168.0.214) [ubuntu]: root
18 | [+] Is host (192.168.0.214) a Control Plane host (y/n)? [y]: n
19 | [+] Is host (192.168.0.214) a Worker host (y/n)? [n]: y
20 | [+] Is host (192.168.0.214) an etcd host (y/n)? [n]: n
21 | [+] Override Hostname of host (192.168.0.214) [none]: rke-worker-01
22 | [+] Internal IP of host (192.168.0.214) [none]: 192.168.0.214
23 | [+] Docker socket path on host (192.168.0.214) [/var/run/docker.sock]:
24 | [+] SSH Address of host (3) [none]: 192.168.0.215
25 | [+] SSH Port of host (3) [22]:
26 | [+] SSH Private Key Path of host (192.168.0.215) [none]: ~/.ssh/id_rsa
27 | [+] SSH User of host (192.168.0.215) [ubuntu]: root
28 | [+] Is host (192.168.0.215) a Control Plane host (y/n)? [y]: n
29 | [+] Is host (192.168.0.215) a Worker host (y/n)? [n]: y
30 | [+] Is host (192.168.0.215) an etcd host (y/n)? [n]: n
31 | [+] Override Hostname of host (192.168.0.215) [none]: rke-worker-02
32 | [+] Internal IP of host (192.168.0.215) [none]: 192.168.0.215
33 | [+] Docker socket path on host (192.168.0.215) [/var/run/docker.sock]:
34 | [+] Network Plugin Type (flannel, calico, weave, canal) [canal]: calico
35 | [+] Authentication Strategy [x509]:
36 | [+] Authorization Mode (rbac, none) [rbac]:
37 | [+] Kubernetes Docker image [rancher/hyperkube:v1.16.3-rancher1]:
38 | [+] Cluster domain [cluster.local]: k8s-server-lb
39 | [+] Service Cluster IP Range [10.43.0.0/16]:
40 | [+] Enable PodSecurityPolicy [n]:
41 | [+] Cluster Network CIDR [10.42.0.0/16]:
42 | [+] Cluster DNS Service IP [10.43.0.10]:
43 | [+] Add addon manifest URLs or YAML files [no]:
44 |
--------------------------------------------------------------------------------
/elk [k8s kube]/01.single/03.ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 logstash-xxx
6 | name: logstash
7 | labels:
8 | app: logstash
9 | spec:
10 | selector:
11 | matchLabels:
12 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
13 | ## 表示一种管理关系
14 | app: logstash
15 | template:
16 | metadata:
17 | labels:
18 | app: logstash
19 | spec:
20 | containers:
21 | - name: logstash
22 | image: logstash:7.2.0
23 | imagePullPolicy: IfNotPresent
24 | env:
25 | - name: LS_JAVA_OPTS
26 | value: "-Xmx256m -Xms256m"
27 | volumeMounts:
28 | - name: logstash-config-vol
29 | mountPath: /usr/share/logstash/config/logstash.yml
30 | subPath: logstash.yml
31 | - name: logstash-pipeline-config-vol
32 | mountPath: /usr/share/logstash/pipeline
33 | - name: nginx-log-vol
34 | mountPath: /var/log/nginx
35 | volumes:
36 | - name: logstash-config-vol
37 | configMap:
38 | name: logstash-config
39 | - name: logstash-pipeline-config-vol
40 | configMap:
41 | name: logstash-pipeline-config
42 | - name: nginx-log-vol
43 | hostPath:
44 | path: /var/log/kuber-nginx
45 |
46 | ---
47 | apiVersion: apps/v1
48 | kind: DaemonSet
49 | metadata:
50 | ## deploy 生成的 pod 的名称也是 nginx-xxx
51 | name: nginx
52 | labels:
53 | app: nginx
54 | spec:
55 | selector:
56 | matchLabels:
57 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
58 | ## 表示一种管理关系
59 | app: nginx
60 | template:
61 | metadata:
62 | labels:
63 | app: nginx
64 | spec:
65 | containers:
66 | ## 官方镜像直接将日志打印到标准输出.
67 | ## 我们希望将日志输出到文件中, 由 logstash 完成采集.
68 | - name: nginx
69 | image: registry.cn-hangzhou.aliyuncs.com/generals-space/centos7
70 | command: ["tail", "-f", "/etc/os-release"]
71 | imagePullPolicy: IfNotPresent
72 | volumeMounts:
73 | - name: nginx-config-vol
74 | mountPath: /etc/nginx/conf.d
75 | - name: nginx-log-vol
76 | mountPath: /var/log/nginx
77 | volumes:
78 | - name: nginx-config-vol
79 | configMap:
80 | name: nginx-config
81 | - name: nginx-log-vol
82 | hostPath:
83 | path: /var/log/kuber-nginx
84 |
--------------------------------------------------------------------------------
/elk [k8s kube]/02.single-xpack/03.ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 logstash-xxx
6 | name: logstash
7 | labels:
8 | app: logstash
9 | spec:
10 | selector:
11 | matchLabels:
12 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
13 | ## 表示一种管理关系
14 | app: logstash
15 | template:
16 | metadata:
17 | labels:
18 | app: logstash
19 | spec:
20 | containers:
21 | - name: logstash
22 | image: logstash:7.2.0
23 | imagePullPolicy: IfNotPresent
24 | env:
25 | - name: LS_JAVA_OPTS
26 | value: "-Xmx256m -Xms256m"
27 | volumeMounts:
28 | - name: logstash-config-vol
29 | mountPath: /usr/share/logstash/config/logstash.yml
30 | subPath: logstash.yml
31 | - name: logstash-pipeline-config-vol
32 | mountPath: /usr/share/logstash/pipeline
33 | - name: nginx-log-vol
34 | mountPath: /var/log/nginx
35 | volumes:
36 | - name: logstash-config-vol
37 | configMap:
38 | name: logstash-config
39 | - name: logstash-pipeline-config-vol
40 | configMap:
41 | name: logstash-pipeline-config
42 | - name: nginx-log-vol
43 | hostPath:
44 | path: /var/log/kuber-nginx
45 |
46 | ---
47 | apiVersion: apps/v1
48 | kind: DaemonSet
49 | metadata:
50 | ## deploy 生成的 pod 的名称也是 nginx-xxx
51 | name: nginx
52 | labels:
53 | app: nginx
54 | spec:
55 | selector:
56 | matchLabels:
57 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
58 | ## 表示一种管理关系
59 | app: nginx
60 | template:
61 | metadata:
62 | labels:
63 | app: nginx
64 | spec:
65 | containers:
66 | ## 官方镜像直接将日志打印到标准输出.
67 | ## 我们希望将日志输出到文件中, 由 logstash 完成采集.
68 | - name: nginx
69 | image: registry.cn-hangzhou.aliyuncs.com/generals-space/centos7
70 | command: ["tail", "-f", "/etc/os-release"]
71 | imagePullPolicy: IfNotPresent
72 | volumeMounts:
73 | - name: nginx-config-vol
74 | mountPath: /etc/nginx/conf.d
75 | - name: nginx-log-vol
76 | mountPath: /var/log/nginx
77 | volumes:
78 | - name: nginx-config-vol
79 | configMap:
80 | name: nginx-config
81 | - name: nginx-log-vol
82 | hostPath:
83 | path: /var/log/kuber-nginx
84 |
--------------------------------------------------------------------------------
/elk [k8s kube]/04.es-cluster-xpack [sts statefulset]/04.ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 logstash-xxx
6 | name: logstash
7 | labels:
8 | app: logstash
9 | spec:
10 | selector:
11 | matchLabels:
12 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
13 | ## 表示一种管理关系
14 | app: logstash
15 | template:
16 | metadata:
17 | labels:
18 | app: logstash
19 | spec:
20 | containers:
21 | - name: logstash
22 | image: logstash:7.2.0
23 | imagePullPolicy: IfNotPresent
24 | env:
25 | - name: LS_JAVA_OPTS
26 | value: "-Xmx256m -Xms256m"
27 | volumeMounts:
28 | - name: logstash-config-vol
29 | mountPath: /usr/share/logstash/config/logstash.yml
30 | subPath: logstash.yml
31 | - name: logstash-pipeline-config-vol
32 | mountPath: /usr/share/logstash/pipeline
33 | - name: nginx-log-vol
34 | mountPath: /var/log/nginx
35 | volumes:
36 | - name: logstash-config-vol
37 | configMap:
38 | name: logstash-config
39 | - name: logstash-pipeline-config-vol
40 | configMap:
41 | name: logstash-pipeline-config
42 | - name: nginx-log-vol
43 | hostPath:
44 | path: /var/log/kuber-nginx
45 |
46 | ---
47 | apiVersion: apps/v1
48 | kind: DaemonSet
49 | metadata:
50 | ## deploy 生成的 pod 的名称也是 nginx-xxx
51 | name: nginx
52 | labels:
53 | app: nginx
54 | spec:
55 | selector:
56 | matchLabels:
57 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
58 | ## 表示一种管理关系
59 | app: nginx
60 | template:
61 | metadata:
62 | labels:
63 | app: nginx
64 | spec:
65 | containers:
66 | ## 官方镜像直接将日志打印到标准输出.
67 | ## 我们希望将日志输出到文件中, 由 logstash 完成采集.
68 | - name: nginx
69 | image: registry.cn-hangzhou.aliyuncs.com/generals-space/centos7
70 | command: ["tail", "-f", "/etc/os-release"]
71 | imagePullPolicy: IfNotPresent
72 | volumeMounts:
73 | - name: nginx-config-vol
74 | mountPath: /etc/nginx/conf.d
75 | - name: nginx-log-vol
76 | mountPath: /var/log/nginx
77 | volumes:
78 | - name: nginx-config-vol
79 | configMap:
80 | name: nginx-config
81 | - name: nginx-log-vol
82 | hostPath:
83 | path: /var/log/kuber-nginx
84 |
--------------------------------------------------------------------------------
/elk [k8s kube]/03.es-cluster [sts statefulset 7.x 7.2.0]/04.ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 logstash-xxx
6 | name: logstash
7 | labels:
8 | app: logstash
9 | spec:
10 | selector:
11 | matchLabels:
12 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
13 | ## 表示一种管理关系
14 | app: logstash
15 | template:
16 | metadata:
17 | labels:
18 | app: logstash
19 | spec:
20 | containers:
21 | - name: logstash
22 | image: logstash:7.2.0
23 | imagePullPolicy: IfNotPresent
24 | env:
25 | - name: LS_JAVA_OPTS
26 | value: "-Xmx256m -Xms256m"
27 | volumeMounts:
28 | - name: logstash-config-vol
29 | mountPath: /usr/share/logstash/config/logstash.yml
30 | subPath: logstash.yml
31 | - name: logstash-pipeline-config-vol
32 | mountPath: /usr/share/logstash/pipeline
33 | - name: nginx-log-vol
34 | mountPath: /var/log/nginx
35 | volumes:
36 | - name: logstash-config-vol
37 | configMap:
38 | name: logstash-config
39 | - name: logstash-pipeline-config-vol
40 | configMap:
41 | name: logstash-pipeline-config
42 | - name: nginx-log-vol
43 | hostPath:
44 | path: /var/log/kuber-nginx
45 |
46 | ---
47 | apiVersion: apps/v1
48 | kind: DaemonSet
49 | metadata:
50 | ## deploy 生成的 pod 的名称也是 nginx-xxx
51 | name: nginx
52 | labels:
53 | app: nginx
54 | spec:
55 | selector:
56 | matchLabels:
57 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
58 | ## 表示一种管理关系
59 | app: nginx
60 | template:
61 | metadata:
62 | labels:
63 | app: nginx
64 | spec:
65 | containers:
66 | ## 官方镜像直接将日志打印到标准输出.
67 | ## 我们希望将日志输出到文件中, 由 logstash 完成采集.
68 | - name: nginx
69 | image: registry.cn-hangzhou.aliyuncs.com/generals-space/centos7
70 | command: ["tail", "-f", "/etc/os-release"]
71 | imagePullPolicy: IfNotPresent
72 | volumeMounts:
73 | - name: nginx-config-vol
74 | mountPath: /etc/nginx/conf.d
75 | - name: nginx-log-vol
76 | mountPath: /var/log/nginx
77 | volumes:
78 | - name: nginx-config-vol
79 | configMap:
80 | name: nginx-config
81 | - name: nginx-log-vol
82 | hostPath:
83 | path: /var/log/kuber-nginx
84 |
--------------------------------------------------------------------------------
/elk [k8s kube]/05.es-cluster-xpack-head [sts statefulset]/04.ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | ## deploy 生成的 pod 的名称也是 logstash-xxx
6 | name: logstash
7 | labels:
8 | app: logstash
9 | spec:
10 | selector:
11 | matchLabels:
12 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
13 | ## 表示一种管理关系
14 | app: logstash
15 | template:
16 | metadata:
17 | labels:
18 | app: logstash
19 | spec:
20 | containers:
21 | - name: logstash
22 | image: logstash:7.2.0
23 | imagePullPolicy: IfNotPresent
24 | env:
25 | - name: LS_JAVA_OPTS
26 | value: "-Xmx256m -Xms256m"
27 | volumeMounts:
28 | - name: logstash-config-vol
29 | mountPath: /usr/share/logstash/config/logstash.yml
30 | subPath: logstash.yml
31 | - name: logstash-pipeline-config-vol
32 | mountPath: /usr/share/logstash/pipeline
33 | - name: nginx-log-vol
34 | mountPath: /var/log/nginx
35 | volumes:
36 | - name: logstash-config-vol
37 | configMap:
38 | name: logstash-config
39 | - name: logstash-pipeline-config-vol
40 | configMap:
41 | name: logstash-pipeline-config
42 | - name: nginx-log-vol
43 | hostPath:
44 | path: /var/log/kuber-nginx
45 |
46 | ---
47 | apiVersion: apps/v1
48 | kind: DaemonSet
49 | metadata:
50 | ## deploy 生成的 pod 的名称也是 nginx-xxx
51 | name: nginx
52 | labels:
53 | app: nginx
54 | spec:
55 | selector:
56 | matchLabels:
57 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
58 | ## 表示一种管理关系
59 | app: nginx
60 | template:
61 | metadata:
62 | labels:
63 | app: nginx
64 | spec:
65 | containers:
66 | ## 官方镜像直接将日志打印到标准输出.
67 | ## 我们希望将日志输出到文件中, 由 logstash 完成采集.
68 | - name: nginx
69 | image: registry.cn-hangzhou.aliyuncs.com/generals-space/centos7
70 | command: ["tail", "-f", "/etc/os-release"]
71 | imagePullPolicy: IfNotPresent
72 | volumeMounts:
73 | - name: nginx-config-vol
74 | mountPath: /etc/nginx/conf.d
75 | - name: nginx-log-vol
76 | mountPath: /var/log/nginx
77 | volumes:
78 | - name: nginx-config-vol
79 | configMap:
80 | name: nginx-config
81 | - name: nginx-log-vol
82 | hostPath:
83 | path: /var/log/kuber-nginx
84 |
--------------------------------------------------------------------------------
/prometheus/config/rules/deploy.yaml:
--------------------------------------------------------------------------------
1 | groups:
2 | - name: deploy-pod.rules
3 | rules:
4 | - alert: kube_statefulset_replicas_unavailable
5 | expr: kube_statefulset_status_replicas < kube_statefulset_replicas
6 | for: 5m
7 | labels:
8 | severity: Warning
9 | annotations:
10 | description: 'statefulset {{$labels.statefulset}} has {{$value}} replicas, which is less than desired'
11 | summary: '{{$labels.statefulset}}: has inssuficient replicas.'
12 | - alert: daemonsets_misscheduled
13 | expr: kube_daemonset_status_number_misscheduled > 0
14 | for: 3m
15 | labels:
16 | severity: Warning
17 | annotations:
18 | description: 'Daemonset {{$labels.daemonset}} is running where it is not supposed to run'
19 | summary: 'Daemonsets not scheduled correctly'
20 | - alert: daemonsets_not_scheduled
21 | expr: kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0
22 | for: 3m
23 | labels:
24 | severity: Warning
25 | annotations:
26 | description: '{{ $value }} of Daemonset {{$labels.daemonset}} scheduled which is less than desired number'
27 | summary: 'Less than desired number of daemonsets scheduled'
28 | - alert: deployment_replicas_unavailable
29 | expr: kube_deployment_status_replicas_unavailable{deployment!~"default-http-backend-tbj-.*|nginx-ingress-controller-tbj-.*"} > 0
30 | for: 3m
31 | labels:
32 | severity: Warning
33 | annotations:
34 | description: 'deployment {{$labels.deployment}} has {{$value}} replicas unavailable'
35 | summary: '{{$labels.deployment}}: has inssuficient replicas.'
36 | - alert: deployment_generation_mismatch
37 | expr: kube_deployment_status_observed_generation{job="kube-state-metrics"} != kube_deployment_metadata_generation{job="kube-state-metrics"}
38 | for: 5m
39 | labels:
40 | severity: High
41 | annotations:
42 | message: "Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment}} does not match, this indicates that the Deployment has failed but has not been rolled back."
43 | - alert: KubeDaemonSetNotScheduled
44 | expr: kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"}
45 | - kube_daemonset_status_current_number_scheduled{job="kube-state-metrics"}
46 | > 0
47 | for: 5m
48 | labels:
49 | severity: Warning
50 | annotations:
51 | message: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.'
52 |
--------------------------------------------------------------------------------
/elk [dc]/06.elk-cluster-filebeat/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | esc-master-0:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | node.name: esc-master-0
11 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
12 | volumes:
13 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
14 | esc-master-1:
15 | image: elasticsearch:7.2.0
16 | environment:
17 | node.name: esc-master-1
18 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
19 | volumes:
20 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
21 | esc-master-2:
22 | image: elasticsearch:7.2.0
23 | environment:
24 | node.name: esc-master-2
25 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
26 | volumes:
27 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
28 | esc-data-0:
29 | image: elasticsearch:7.2.0
30 | environment:
31 | node.name: esc-data-0
32 | node.master: "false"
33 | node.data: "true"
34 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
35 | volumes:
36 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
37 | logstash:
38 | image: logstash:7.2.0
39 | environment:
40 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
41 | volumes:
42 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
43 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
44 | kibana:
45 | image: kibana:7.2.0
46 | ports:
47 | - 5601:5601
48 | volumes:
49 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
50 | nginx-01:
51 | image: nginx:1.12.0
52 | ports:
53 | - 9001:8080
54 | volumes:
55 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
56 | - ./data/logs/nginx01:/var/log/nginx
57 | filebeat-01:
58 | hostname: node-01
59 | image: elastic/filebeat:7.2.0
60 | volumes:
61 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
62 | - ./data/logs/nginx01:/var/log/nginx
63 | nginx-02:
64 | image: nginx:1.12.0
65 | ports:
66 | - 9002:8080
67 | volumes:
68 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
69 | - ./data/logs/nginx02:/var/log/nginx
70 | filebeat-02:
71 | hostname: node-02
72 | image: elastic/filebeat:7.2.0
73 | volumes:
74 | - ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
75 | - ./data/logs/nginx02:/var/log/nginx
76 |
--------------------------------------------------------------------------------
/elk [dc]/07.elk-cluster-5.5.0/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | esc-master-0:
5 | image: elasticsearch:5.5.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | ## 5.5.0 版本不支持在环境变量中直接使用 node.name 这样的键
11 | ES_NODE_NAME: esc-master-0
12 | ES_NODE_MASTER: "true"
13 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
14 | volumes:
15 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
16 | - ./data/esc-master-0/data:/usr/share/elasticsearch/data
17 | esc-master-1:
18 | image: elasticsearch:5.5.0
19 | environment:
20 | ## 5.5.0 版本不支持在环境变量中直接使用 node.name 这样的键
21 | ES_NODE_NAME: esc-master-1
22 | ES_NODE_MASTER: "true"
23 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
24 | volumes:
25 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
26 | - ./data/esc-master-1/data:/usr/share/elasticsearch/data
27 | esc-master-2:
28 | image: elasticsearch:5.5.0
29 | environment:
30 | ## 5.5.0 版本不支持在环境变量中直接使用 node.name 这样的键
31 | ES_NODE_NAME: esc-master-2
32 | ES_NODE_MASTER: "true"
33 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
34 | volumes:
35 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
36 | - ./data/esc-master-2/data:/usr/share/elasticsearch/data
37 | esc-data-0:
38 | image: elasticsearch:5.5.0
39 | environment:
40 | ## 5.5.0 版本不支持在环境变量中直接使用 node.name 这样的键
41 | ES_NODE_NAME: esc-data-0
42 | ES_NODE_DATA: "true"
43 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
44 | volumes:
45 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
46 | - ./data/esc-data-0/data:/usr/share/elasticsearch/data
47 | logstash:
48 | image: logstash:5.5.0
49 | environment:
50 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
51 | volumes:
52 | - ./logstash/config/logstash.conf:/usr/share/logstash/config/logstash.conf:ro
53 | - ./data/logs/nginx:/var/log/nginx
54 | ## logstash 5.5.0 貌似没有单独的 pipeline 目录
55 | ## - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
56 | command: ["logstash", "-f", "/usr/share/logstash/config/logstash.conf"]
57 | kibana:
58 | image: kibana:5.5.0
59 | ports:
60 | - 5601:5601
61 | volumes:
62 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
63 | nginx:
64 | image: nginx:1.12.0
65 | ports:
66 | - 9001:8080
67 | volumes:
68 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf:ro
69 | - ./data/logs/nginx:/var/log/nginx
70 |
--------------------------------------------------------------------------------
/rancher/import-to-rancher/04-ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: apps/v1
4 | kind: DaemonSet
5 | metadata:
6 | name: cattle-node-agent
7 | namespace: cattle-system
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: cattle-agent
12 | template:
13 | metadata:
14 | labels:
15 | app: cattle-agent
16 | spec:
17 | affinity:
18 | nodeAffinity:
19 | requiredDuringSchedulingIgnoredDuringExecution:
20 | nodeSelectorTerms:
21 | - matchExpressions:
22 | - key: beta.kubernetes.io/os
23 | operator: NotIn
24 | values:
25 | - windows
26 | hostNetwork: true
27 | serviceAccountName: cattle
28 | tolerations:
29 | - operator: Exists
30 | containers:
31 | - name: agent
32 | image: rancher/rancher-agent:v2.3.3
33 | imagePullPolicy: IfNotPresent
34 | env:
35 | - name: CATTLE_NODE_NAME
36 | valueFrom:
37 | fieldRef:
38 | fieldPath: spec.nodeName
39 | - name: CATTLE_SERVER
40 | value: "https://192.168.0.101"
41 | - name: CATTLE_CA_CHECKSUM
42 | value: "64947eb2035c958586a3151725a5485f3ecbc5e98a9657c59c9ca13bf14d3c8b"
43 | - name: CATTLE_CLUSTER
44 | value: "false"
45 | - name: CATTLE_K8S_MANAGED
46 | value: "true"
47 | - name: CATTLE_AGENT_CONNECT
48 | value: "true"
49 | volumeMounts:
50 | - name: cattle-credentials
51 | mountPath: /cattle-credentials
52 | - name: k8s-ssl
53 | mountPath: /etc/kubernetes
54 | - name: var-run
55 | mountPath: /var/run
56 | - name: run
57 | mountPath: /run
58 | - name: docker-certs
59 | mountPath: /etc/docker/certs.d
60 | securityContext:
61 | privileged: true
62 | volumes:
63 | - name: k8s-ssl
64 | hostPath:
65 | path: /etc/kubernetes
66 | type: DirectoryOrCreate
67 | - name: var-run
68 | hostPath:
69 | path: /var/run
70 | type: DirectoryOrCreate
71 | - name: run
72 | hostPath:
73 | path: /run
74 | type: DirectoryOrCreate
75 | - name: cattle-credentials
76 | secret:
77 | secretName: cattle-credentials-8c84cbd
78 | defaultMode: 320
79 | - hostPath:
80 | path: /etc/docker/certs.d
81 | type: DirectoryOrCreate
82 | name: docker-certs
83 | updateStrategy:
84 | type: RollingUpdate
85 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | esc-master-0:
5 | image: elasticsearch:7.2.0
6 | ports:
7 | - 9200:9200
8 | - 9300:9300
9 | environment:
10 | node.name: esc-master-0
11 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
12 | volumes:
13 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
14 | esc-master-1:
15 | image: elasticsearch:7.2.0
16 | environment:
17 | node.name: esc-master-1
18 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
19 | volumes:
20 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
21 | esc-master-2:
22 | image: elasticsearch:7.2.0
23 | environment:
24 | node.name: esc-master-2
25 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
26 | volumes:
27 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
28 | esc-data-0:
29 | image: elasticsearch:7.2.0
30 | environment:
31 | node.name: esc-data-0
32 | node.master: "false"
33 | node.data: "true"
34 | ES_JAVA_OPTS: "-Xmx256m -Xms256m"
35 | volumes:
36 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
37 | kibana:
38 | image: kibana:7.2.0
39 | ports:
40 | - 5601:5601
41 | volumes:
42 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
43 | nginx-01:
44 | image: nginx:1.12.0
45 | ports:
46 | - 9001:8080
47 | volumes:
48 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
49 | - ./data/logs/nginx01:/var/log/nginx
50 | logstash-01:
51 | image: logstash:7.2.0
52 | environment:
53 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
54 | volumes:
55 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
56 | - ./logstash/pipeline01:/usr/share/logstash/pipeline:ro
57 | ## - ./logstash/grok/patterns:/usr/share/logstash/grok/patterns:ro
58 | - ./data/logs/nginx01:/var/log/nginx
59 | nginx-02:
60 | image: nginx:1.12.0
61 | ports:
62 | - 9002:8080
63 | volumes:
64 | - ./nginx/config/nginx.conf:/etc/nginx/conf.d/nginx.conf
65 | - ./data/logs/nginx02:/var/log/nginx
66 | logstash-02:
67 | image: logstash:7.2.0
68 | environment:
69 | LS_JAVA_OPTS: "-Xmx256m -Xms256m"
70 | volumes:
71 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
72 | - ./logstash/pipeline02:/usr/share/logstash/pipeline:ro
73 | ## - ./logstash/grok/patterns:/usr/share/logstash/grok/patterns:ro
74 | - ./data/logs/nginx02:/var/log/nginx
75 |
--------------------------------------------------------------------------------
/prometheus/config/node-exporter.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | annotations:
6 | prometheus.io/port: "9100"
7 | prometheus.io/scrape: "true"
8 | name: node-exporter
9 | namespace: monitoring
10 | spec:
11 | ports:
12 | - name: metrics
13 | port: 9100
14 | protocol: TCP
15 | targetPort: 9100
16 | selector:
17 | k8s-app: node-exporter
18 | type: ClusterIP
19 | ---
20 | apiVersion: apps/v1
21 | kind: DaemonSet
22 | metadata:
23 | labels:
24 | k8s-app: node-exporter
25 | version: v0.17.0
26 | name: node-exporter
27 | namespace: monitoring
28 | spec:
29 | selector:
30 | matchLabels:
31 | k8s-app: node-exporter
32 | version: v0.17.0
33 | template:
34 | metadata:
35 | annotations:
36 | scheduler.alpha.kubernetes.io/critical-pod: ""
37 | labels:
38 | k8s-app: node-exporter
39 | version: v0.17.0
40 | spec:
41 | containers:
42 | - args:
43 | - --path.procfs=/host/proc
44 | - --path.sysfs=/host/sys
45 | - --path.rootfs=/mnt
46 | - --collector.processes
47 | - --collector.ntp
48 | image: prom/node-exporter:v0.17.0
49 | imagePullPolicy: IfNotPresent
50 | name: prometheus-node-exporter
51 | ports:
52 | - containerPort: 9100
53 | hostPort: 9100
54 | name: metrics
55 | protocol: TCP
56 | resources:
57 | limits:
58 | cpu: 100m
59 | memory: 500Mi
60 | requests:
61 | cpu: 100m
62 | memory: 500Mi
63 | securityContext:
64 | privileged: true
65 | procMount: Default
66 | volumeMounts:
67 | - mountPath: /host/proc
68 | name: proc
69 | readOnly: true
70 | - mountPath: /host/sys
71 | name: sys
72 | readOnly: true
73 | - mountPath: /mnt
74 | name: root
75 | readOnly: true
76 | - mountPath: /etc/localtime
77 | name: host-time
78 | readOnly: true
79 | dnsPolicy: ClusterFirst
80 | hostNetwork: true
81 | hostPID: true
82 | restartPolicy: Always
83 | schedulerName: default-scheduler
84 | volumes:
85 | - hostPath:
86 | path: /proc
87 | type: ""
88 | name: proc
89 | - hostPath:
90 | path: /sys
91 | type: ""
92 | name: sys
93 | - hostPath:
94 | path: /
95 | type: ""
96 | name: root
97 | - hostPath:
98 | path: /etc/localtime
99 | type: ""
100 | name: host-time
101 |
--------------------------------------------------------------------------------
/redis-cluster/02.sts.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: redis-app
5 | spec:
6 | ## headless service名称
7 | serviceName: "redis-service"
8 | selector:
9 | matchLabels:
10 | app: redis
11 | appCluster: redis-cluster
12 | replicas: 6
13 | template:
14 | metadata:
15 | labels:
16 | app: redis
17 | appCluster: redis-cluster
18 | spec:
19 | terminationGracePeriodSeconds: 20
20 | ## ## pod的反亲和性定义, 告诉调度器尽量不要调度到包含app标签为redis的节点上.
21 | ## ## 已经存在redis的node上尽量不要再分配redis pod了.
22 | ## affinity:
23 | ## podAntiAffinity:
24 | ## ## 我觉得最难理解的就是这个字段了.
25 | ## ## 应该是在调度期间遵循这个规定, 在运行期间就不用关心的意思吧?
26 | ## preferredDuringSchedulingIgnoredDuringExecution:
27 | ## - weight: 100
28 | ## podAffinityTerm:
29 | ## labelSelector:
30 | ## matchExpressions:
31 | ## - key: app
32 | ## operator: In
33 | ## values:
34 | ## - redis
35 | ## topologyKey: kubernetes.io/hostname
36 | containers:
37 | - name: redis
38 | image: redis:5.0
39 | # command: redis-server /etc/redis/redis.conf --protected-mode no
40 | command:
41 | - redis-server
42 | args:
43 | - /etc/redis/redis.conf
44 | #允许外网访问
45 | - --protected-mode
46 | ## 这个参数需要用引号包裹, 否则会被kuber解析成布尔类型, redis在启动时会出错.
47 | - "no"
48 | resources:
49 | requests:
50 | # m代表千分之,相当于0.1 个cpu资源
51 | cpu: 100m
52 | # 内存100m大小
53 | memory: 100Mi
54 | ports:
55 | - name: redis
56 | containerPort: 6379
57 | protocol: TCP
58 | - name: cluster
59 | containerPort: 16379
60 | protocol: TCP
61 | volumeMounts:
62 | #挂载configmap生成的文件
63 | - name: redis-conf
64 | mountPath: /etc/redis
65 | - name: redis-data
66 | mountPath: /var/lib/redis
67 | volumes:
68 | #引用configMap
69 | - name: redis-conf
70 | configMap:
71 | name: redis-conf
72 | items:
73 | - key: redis.conf
74 | path: redis.conf
75 | ## pvc持久卷声明模板, 只有statefulset需要声明这样的模板, daemonset与deployment则不需要.
76 | ## 这里生成的pvc将会和pod一样被加上0, 1, 2等后缀.
77 | ## 内容上其实与PVC资源的部署文件相同.
78 | volumeClaimTemplates:
79 | - metadata:
80 | ## 生成的每个Pod都会绑定一个独立的PVC资源
81 | name: redis-data
82 | spec:
83 | accessModes:
84 | - ReadWriteMany
85 | resources:
86 | requests:
87 | storage: 200M
88 | ## 指定storage class
89 | ## rancher的`local-path`不支持`readWriteMany`访问模式,
90 | ## 所以这里我们使用`nfs-provisioner`存储.
91 | ## storageClassName: local-path
92 | storageClassName: nfs-storage
93 |
--------------------------------------------------------------------------------
/prometheus/config/rules/prometheus.yaml:
--------------------------------------------------------------------------------
1 | groups:
2 | - name: prometheus.rules
3 | rules:
4 | - alert: PrometheusConfigReloadFailed
5 | expr: prometheus_config_last_reload_successful == 0
6 | for: 3m
7 | labels:
8 | severity: Warning
9 | annotations:
10 | description: Reloading Prometheus' configuration has failed for {{$labels.namespace}}/{{$labels.pod}}
11 | - alert: PrometheusNotificationQueueRunningFull
12 | expr: predict_linear(prometheus_notifications_queue_length[5m], 60 * 30) > prometheus_notifications_queue_capacity
13 | for: 3m
14 | labels:
15 | severity: Warning
16 | annotations:
17 | description: Prometheus' alert notification queue is running full for {{$labels.namespace}}/{{
18 | $labels.pod}}
19 | - alert: PrometheusErrorSendingAlerts
20 | expr: rate(prometheus_notifications_errors_total[5m]) / rate(prometheus_notifications_sent_total[5m])
21 | > 0.03
22 | for: 3m
23 | labels:
24 | severity: High
25 | annotations:
26 | description: Errors while sending alerts from Prometheus {{$labels.namespace}}/{{
27 | $labels.pod}} to Alertmanager {{$labels.Alertmanager}}
28 | - alert: PrometheusNotConnectedToAlertmanagers
29 | expr: prometheus_notifications_alertmanagers_discovered < 1
30 | for: 3m
31 | labels:
32 | severity: Warning
33 | annotations:
34 | description: Prometheus {{ $labels.namespace }}/{{ $labels.pod}} is not connected
35 | to any Alertmanagers
36 | - alert: PrometheusTSDBReloadsFailing
37 | expr: increase(prometheus_tsdb_reloads_failures_total[2h]) > 0
38 | for: 12h
39 | labels:
40 | severity: Warning
41 | annotations:
42 | description: '{{$labels.job}} at {{$labels.instance}} had {{$value | humanize}}
43 | reload failures over the last four hours.'
44 | summary: Prometheus has issues reloading data blocks from disk
45 | - alert: PrometheusTSDBCompactionsFailing
46 | expr: increase(prometheus_tsdb_compactions_failed_total[2h]) > 0
47 | for: 12h
48 | labels:
49 | severity: Warning
50 | annotations:
51 | description: '{{$labels.job}} at {{$labels.instance}} had {{$value | humanize}}
52 | compaction failures over the last four hours.'
53 | summary: Prometheus has issues compacting sample blocks
54 | - alert: PrometheusTSDBWALCorruptions
55 | expr: tsdb_wal_corruptions_total > 0
56 | for: 4h
57 | labels:
58 | severity: Warning
59 | annotations:
60 | description: '{{$labels.job}} at {{$labels.instance}} has a corrupted write-ahead
61 | log (WAL).'
62 | summary: Prometheus write-ahead log is corrupten
63 |
--------------------------------------------------------------------------------
/elk [dc]/09.elk-cluster-logstash-grok/readme.md:
--------------------------------------------------------------------------------
1 | # ELK
2 |
3 | 参考文章
4 |
5 | 1. [logstash grok插件语法介绍](https://blog.csdn.net/qq_34021712/article/details/79746413)
6 | 2. [logstash之grok过滤](https://blog.csdn.net/yanggd1987/article/details/50486779)
7 | - 我们的生产环境中,日志格式往往使用的是普通的格式,因此就不得不使用logstash的filter/grok进行过滤
8 | - nginx 配置中的 log_format 内置变量与 grok 模式的对应关系, 值得收藏.
9 | 3. [logstash-plugins/logstash-patterns-core](https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns)
10 | - logstash 附加的 grok 模式(不过没有 nginx 的)
11 |
12 | 本示例基于示例[02.elk-logstash-lb](), 在使用两个 logstash 实例分别采集两个 nginx 实例的日志, 并直接传入 es 之外, 修改了 pipeline 配置中的`filter`字段, 使用`grok`过滤.
13 |
14 | > 通过 kibana 创建 index pattern 的操作不变.
15 |
16 | 话说, 我之前有想过对 nginx 的日志进行采集, 当时考虑的, 要么是修改 nginx 的配置文件, 将其日志格式按照一定规则配置, 要么是在日志处理端进行正则匹配, 通过正则分组得到各字段的值. 不过当时没有实现, 现在通过 elk 都遇到了...
17 |
18 | ------
19 |
20 | 示例[02.elk-logstash-lb]()中 nginx 的日志格式配置成了 json, 在 pipeline 的`input`块中, 就可以使用`codec => json`进行解析, 得到各成员字段的值了.
21 |
22 | 现在将 nginx 配置中的 json 日志格式移除, 重启 nginx, 同时移除 logstash 配置中`codec => json`, 否则 logstash 在处理常规类型的 nginx 日志时会报如下错误.
23 |
24 | ```
25 | [2020-10-13T10:49:50,060][ERROR][logstash.codecs.json ] JSON parse error, original data now in message field {:error=>#, :data=>"172.19.0.1 - - [13/Oct/2020:10:49:49 +0000] \"GET / HTTP/1.1\" 200 612 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44\""}
27 | ```
28 |
29 | ------
30 |
31 | 然后修改`filter`字段, 原来的`filter`字段如下
32 |
33 | ```conf
34 | filter {
35 | mutate {
36 | ## 使用过滤器添加节点名称, 以便分类
37 | add_field => {"_nodename" => "node-01"}
38 | }
39 | }
40 | ```
41 |
42 | 于是来自 node-01 主机的 nginx 日志就会自动添加上一个`_nodename`字段, 如下
43 |
44 | 
45 |
46 | 现在我们将其换成`grok`.
47 |
48 | 由于 nginx 的原生日志格式如下
49 |
50 | ```
51 | 172.19.0.1 - - [13/Oct/2020:10:55:05 +0000] "GET / HTTP/1.1" 200 612 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"
52 | ```
53 |
54 | 我们在`logstash`配置中添加如下过滤器配置
55 |
56 | ```conf
57 | filter {
58 | grok {
59 | match => {
60 | "message" => "%{IP:client} - - \[%{HTTPDATE:timestamp}\] \"%{WORD:method} %{URIPATHPARAM:uri} HTTP/%{NUMBER:httpversion}\" %{NUMBER:status} %{NUMBER:bytes} \"-\" \"%{GREEDYDATA:agent}\""
61 | }
62 | }
63 | }
64 | ```
65 |
66 | 重启 logstash 容器, 再次访问 nginx, kibana得到的日志如下
67 |
68 | 
69 |
70 | 可以看到, 结果中出现了`message`字段, 其值为该行日志的所有内容, 其余字段的值分别赋值给了`client`, `timestamp`等字段.
71 |
72 |
--------------------------------------------------------------------------------
/prometheus/kube-state-metrics/01.rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.7
7 | name: kube-state-metrics
8 | namespace: kube-system
9 | ---
10 | apiVersion: rbac.authorization.k8s.io/v1
11 | kind: ClusterRole
12 | metadata:
13 | labels:
14 | app.kubernetes.io/name: kube-state-metrics
15 | app.kubernetes.io/version: 1.9.7
16 | name: kube-state-metrics
17 | rules:
18 | - apiGroups:
19 | - ""
20 | resources:
21 | - configmaps
22 | - secrets
23 | - nodes
24 | - pods
25 | - services
26 | - resourcequotas
27 | - replicationcontrollers
28 | - limitranges
29 | - persistentvolumeclaims
30 | - persistentvolumes
31 | - namespaces
32 | - endpoints
33 | verbs:
34 | - list
35 | - watch
36 | - apiGroups:
37 | - extensions
38 | resources:
39 | - daemonsets
40 | - deployments
41 | - replicasets
42 | - ingresses
43 | verbs:
44 | - list
45 | - watch
46 | - apiGroups:
47 | - apps
48 | resources:
49 | - statefulsets
50 | - daemonsets
51 | - deployments
52 | - replicasets
53 | verbs:
54 | - list
55 | - watch
56 | - apiGroups:
57 | - batch
58 | resources:
59 | - cronjobs
60 | - jobs
61 | verbs:
62 | - list
63 | - watch
64 | - apiGroups:
65 | - autoscaling
66 | resources:
67 | - horizontalpodautoscalers
68 | verbs:
69 | - list
70 | - watch
71 | - apiGroups:
72 | - authentication.k8s.io
73 | resources:
74 | - tokenreviews
75 | verbs:
76 | - create
77 | - apiGroups:
78 | - authorization.k8s.io
79 | resources:
80 | - subjectaccessreviews
81 | verbs:
82 | - create
83 | - apiGroups:
84 | - policy
85 | resources:
86 | - poddisruptionbudgets
87 | verbs:
88 | - list
89 | - watch
90 | - apiGroups:
91 | - certificates.k8s.io
92 | resources:
93 | - certificatesigningrequests
94 | verbs:
95 | - list
96 | - watch
97 | - apiGroups:
98 | - storage.k8s.io
99 | resources:
100 | - storageclasses
101 | - volumeattachments
102 | verbs:
103 | - list
104 | - watch
105 | - apiGroups:
106 | - admissionregistration.k8s.io
107 | resources:
108 | - mutatingwebhookconfigurations
109 | - validatingwebhookconfigurations
110 | verbs:
111 | - list
112 | - watch
113 | - apiGroups:
114 | - networking.k8s.io
115 | resources:
116 | - networkpolicies
117 | verbs:
118 | - list
119 | - watch
120 | - apiGroups:
121 | - coordination.k8s.io
122 | resources:
123 | - leases
124 | verbs:
125 | - list
126 | - watch
127 | ---
128 | apiVersion: rbac.authorization.k8s.io/v1
129 | kind: ClusterRoleBinding
130 | metadata:
131 | labels:
132 | app.kubernetes.io/name: kube-state-metrics
133 | app.kubernetes.io/version: 1.9.7
134 | name: kube-state-metrics
135 | roleRef:
136 | apiGroup: rbac.authorization.k8s.io
137 | kind: ClusterRole
138 | name: kube-state-metrics
139 | subjects:
140 | - kind: ServiceAccount
141 | name: kube-state-metrics
142 | namespace: kube-system
143 |
--------------------------------------------------------------------------------
/prometheus/config/rules/nodes.yaml:
--------------------------------------------------------------------------------
1 | groups:
2 | - name: nodes.rules
3 | rules:
4 | - record: instance:node_cpu:rate:sum
5 | expr: sum(rate(node_cpu{mode!="idle",mode!="iowait",mode!~"^(?:guest.*)$"}[3m])) BY (instance)
6 | - record: instance:node_filesystem_usage:sum
7 | expr: sum((node_filesystem_size{mountpoint="/"} - node_filesystem_free{mountpoint="/"})) BY (instance)
8 | - record: instance:node_network_receive_bytes:rate:sum
9 | expr: sum(rate(node_network_receive_bytes[3m])) BY (instance)
10 | - record: instance:node_network_transmit_bytes:rate:sum
11 | expr: sum(rate(node_network_transmit_bytes[3m])) BY (instance)
12 | - record: instance:node_cpu:ratio
13 | expr: sum(rate(node_cpu{mode!="idle"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu) BY (instance, cpu)) BY (instance)
14 | - record: cluster:node_cpu:sum_rate5m
15 | expr: sum(rate(node_cpu{mode!="idle"}[5m]))
16 | - record: cluster:node_cpu:ratio
17 | expr: cluster:node_cpu:rate5m / count(sum(node_cpu) BY (instance, cpu))
18 | - alert: NodeDiskRunningFull
19 | expr: predict_linear(node_filesystem_free[6h], 3600 * 24) < 0
20 | for: 30m
21 | labels:
22 | severity: Warning
23 | annotations:
24 | description: device {{$labels.device}} on node {{$labels.instance}} is running full within the next 24 hours (mounted at {{$labels.mountpoint}})
25 | - alert: KubeNodeNotReady
26 | annotations:
27 | description: '{{ $labels.node }} has been unready for more than 5 minutes.'
28 | expr: |
29 | kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0
30 | for: 5m
31 | labels:
32 | severity: Warning
33 | - alert: FdExhaustionClose
34 | expr: predict_linear(fd_utilization[3m], 3600) > 1
35 | for: 3m
36 | labels:
37 | severity: High
38 | annotations:
39 | description: '{{ $labels.job }}: {{ $labels.namespace }}/{{ $labels.pod }} instance will exhaust in file/socket descriptors within the next hour'
40 | summary: file descriptors soon exhausted
41 | - alert: NodeMemoryUsage
42 | expr: (node_memory_MemTotal - (node_memory_MemFree+node_memory_Buffers+node_memory_Cached )) / node_memory_MemTotal * 100 > 80
43 | for: 1m
44 | labels:
45 | severity: Warning
46 | annotations:
47 | summary: "{{$labels.instance}}: High Memory usage detected"
48 | description: "{{$labels.instance}}: Memory usage is above 80% (current value is:{{ $value }})"
49 | - alert: NodeCPUUsage
50 | expr: (100 - (avg by (instance) (irate(node_cpu{mode="idle"}[5m])) * 100)) > 60
51 | for: 1m
52 | labels:
53 | severity: High
54 | annotations:
55 | summary: "{{$labels.instance}}: High CPU usage detected."
56 | description: "{{$labels.instance}}: CPU usage is above 60% (current value is:{{ $value }})."
57 |
--------------------------------------------------------------------------------
/elk [k8s kube]/问题处理/kibana启动报错-Request Timeout after 30000ms.md:
--------------------------------------------------------------------------------
1 | # kibana启动报错-Request Timeout after 30000ms
2 |
3 | 参考文章
4 |
5 | 1. [kibana报错Request Timeout after 30000ms故障解决](https://blog.csdn.net/qq_40907977/article/details/104499178)
6 | - es资源设置太小导致 kibana 请求超时
7 | 2. [elasticsearch 7.2.0安装](https://blog.csdn.net/u011311291/article/details/100041912)
8 | - Elasticsearch cluster did not respond with license information.
9 | 3. [解决es集群启动完成后报master_not_discovered_exception](https://blog.csdn.net/qq_20042935/article/details/105274464)
10 | - 只启动了一个 es 节点, 按道理它就是 master 节点
11 |
12 | 集群 setup 完成后, 发现 kibana 总是陷入`CrashLoopBackOff`, 一直重启, 查看日志有如下输出.
13 |
14 | ```console
15 | $ k logs -f kibana-XXXXXXXXX
16 | {"type":"log","@timestamp":"2020-06-21T11:08:02Z","tags":["fatal","root"],"pid":1,"message":"{ Error: Request Timeout after 30000ms\n at /usr/share/kibana/node_modules/elasticsearch/src/lib/transport.js:362:15\n at Timeout. (/usr/share/kibana/node_modul
17 | es/elasticsearch/src/lib/transport.js:391:7)\n at ontimeout (timers.js:436:11)\n at tryOnTimeout (timers.js:300:5)\n at listOnTimeout (timers.js:263:5)\n at Timer.processTimers (timers.js:223:10)\n status: undefined,\n displayName: 'RequestTimeout',\n mes
18 | sage: 'Request Timeout after 30000ms',\n body: undefined,\n isBoom: true,\n isServer: true,\n data: null,\n output:\n { statusCode: 503,\n payload:\n { statusCode: 503,\n error: 'Service Unavailable',\n message: 'Request Timeout after 30000
19 | ms' },\n headers: {} },\n reformat: [Function],\n [Symbol(SavedObjectsClientErrorCode)]: 'SavedObjectsClient/esUnavailable' }"}
20 |
21 | FATAL Error: Request Timeout after 30000ms
22 |
23 | ```
24 |
25 | 但这个日志比较笼统, 不能准确定位问题. 比如参考文章1就说可能是由于 es 资源设置太小, 导致查询处理较慢, 但尝试修改了也没用.
26 |
27 | 通过 web 界面访问 kibana, 发现一片空白, 有输出如下信息.
28 |
29 | ```
30 | Kibana server is not ready yet
31 | ```
32 |
33 | 后来我又回去翻了翻 kibana 的日志, 发现有很多像下面这样重复的信息.
34 |
35 | ```
36 | {"type":"log","@timestamp":"2020-06-21T11:39:25Z","tags":["status","plugin:rollup@7.2.0","error"],"pid":1,"state":"red","message":"Status changed from yellow to red - [data] Elasticsearch cluster did not respond with license information.","prevState":"yellow","prevMsg":
37 | "Waiting for Elasticsearch"}
38 | {"type":"log","@timestamp":"2020-06-21T11:39:25Z","tags":["status","plugin:remote_clusters@7.2.0","error"],"pid":1,"state":"red","message":"Status changed from yellow to red - [data] Elasticsearch cluster did not respond with license information.","prevState":"yellow","
39 | prevMsg":"Waiting for Elasticsearch"}
40 | ```
41 |
42 | 参考文章2中说需要打开`node.name`的注释(所以跟什么 license 完全没关系), 我联想到`node.name`与`cluster.initial_master_nodes`的关系. 我之前认为后者(数组)中的成员应该是节点的通信地址, 所以写的是 es 的 service 信息.
43 |
44 | ```yaml
45 | node.name: es-01
46 | ## 这里的数组成员为 service 名称
47 | cluster.initial_master_nodes: ["es"]
48 | ```
49 |
50 | 于是找了一个测试容器, 使用 curl 发送请求, 真的出错了.
51 |
52 | ```console
53 | ## es 是 service 名称
54 | $ curl es:9200/_cat/health
55 | {"error":{"root_cause":[{"type":"master_not_discovered_exception","reason":null}],"type":"master_not_discovered_exception","reason":null},"status":503}
56 | ```
57 |
58 | 这个报错就精确了很多.
59 |
60 | 于是把`cluster.initial_master_nodes`的成员改成了`node.name`的值, 重启 es, 接口可以正常访问, kibana也正常了.
61 |
62 | ```console
63 | $ curl es:9200/_cat/health
64 | 1592740248 11:50:48 elasticsearch green 1 1 2 2 0 0 0 0 - 100.0%
65 | ```
66 |
--------------------------------------------------------------------------------
/elk [k8s kube]/06.es-cluster [sts statefulset 5.x 5.5.0]/02.es-data.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: es-data-svc
6 | labels:
7 | app: es-data-svc
8 | spec:
9 | ports:
10 | ## 对客户端提供服务的端口
11 | - port: 9200
12 | name: client
13 | targetPort: 9200
14 | ## 集群内与其他节点交互的端口
15 | - port: 9300
16 | name: node
17 | targetPort: 9300
18 | selector:
19 | ## 注意: service 的 selector 需要指定的是
20 | ## Deployment -> spec -> template -> labels,
21 | ## 而不是 Deployment -> metadata -> lables.
22 | ## 可以说, Service与Deployment最终指向的目标都是Pod资源.
23 | app: es-data
24 | ## 这里是headless service的核心
25 | clusterIP: None
26 |
27 | ---
28 | apiVersion: apps/v1
29 | kind: StatefulSet
30 | metadata:
31 | ## deploy 生成的 pod 的名称也是 es-xxx
32 | name: es-data
33 | labels:
34 | app: es-data
35 | spec:
36 | ## headless service名称
37 | serviceName: es-data-svc
38 | ## 这里的 replicas 是确定的, 配置文件里的
39 | replicas: 2
40 | selector:
41 | matchLabels:
42 | ## 这里的 label 是与下面的 template -> metadata -> label 匹配的,
43 | ## 表示一种管理关系
44 | app: es-data
45 | template:
46 | metadata:
47 | labels:
48 | app: es-data
49 | spec:
50 | initContainers:
51 | ## elasticsearch 会对配置文件目录进行写入操作, 而通过 configmap 挂载的目录是只读的,
52 | ## es 在写入时会导致异常退出, 这里用一个 initContainers 做一个拷贝工作.
53 | - name: config
54 | image: busybox
55 | ## 注意: 这里用超级权限修改容器内部的 sysctl 内核参数, 会同时影响宿主机本身的参数.
56 | command:
57 | - sysctl
58 | - -w
59 | - vm.max_map_count=655300
60 | securityContext:
61 | privileged: true
62 | containers:
63 | - name: es-data
64 | image: elasticsearch:5.5.0
65 | imagePullPolicy: IfNotPresent
66 | env:
67 | ## CLUSTER_NAME 写 sts 的名称
68 | - name: CLUSTER_NAME
69 | value: es
70 | - name: MASTER_NAME
71 | value: es-master
72 | ## MASTER_SVC_NAME 要求与 headless service 名称相同.
73 | - name: MASTER_SVC_NAME
74 | value: es-master-svc
75 | ## 用 POD_NAME 作 es 节点的 node.name,
76 | - name: POD_NAME
77 | valueFrom:
78 | fieldRef:
79 | fieldPath: metadata.name
80 | ## jvm 参数在一个独立的配置文件 jvm.options 中.
81 | - name: ES_JAVA_OPTS
82 | value: "-Xmx512m -Xms512m"
83 | - name: IS_MASTER
84 | value: "false"
85 | - name: IS_DATA
86 | value: "true"
87 | ## 5.x默认未安装 xpack 插件, 无法设置密码
88 | ##
89 | ## es配置文件中的所有键应该都可以在 env 中直接设置, 比如
90 | ## name: xpack.security.enabled
91 | ## value: 'true'
92 | ## 但是, username/password 这两个参数我找了很多地方, 没有找到在配置文件中的键名.
93 | ## 所有文章都是通过环境变量来设置的.
94 | ## 貌似这些环境变量(包括上面的 java opts), 都在 es 内部被通过 ${ES_XXX} 进行了替换,
95 | ## 即, 这些环境的优先级很高.
96 | ## - name: ELASTIC_USERNAME
97 | ## value: "elastic"
98 | ## - name: ELASTIC_PASSWORD
99 | ## value: "123456"
100 | volumeMounts:
101 | - name: es-config-vol
102 | mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
103 | ## subPath 的意思是, 该 configmap 并不影响整个目录, 只影响 elasticsearch.yml 这一文件.
104 | subPath: elasticsearch.yml
105 | volumes:
106 | - name: es-config-vol
107 | configMap:
108 | name: es-config
109 |
--------------------------------------------------------------------------------