├── 09-parse-file
├── README.md
├── 02-ini
│ ├── 01-ini.sh
│ └── README.md
└── 01-yaml
│ ├── README.md
│ └── 01-yaml.sh
├── 01-installation-scripts
├── 36-flexgw
│ └── README.md
├── 03-Jumpserver
│ └── README.md
├── 02-Zabbix
│ └── 01-by-ansible
│ │ ├── roles
│ │ └── zabbix_agent
│ │ │ ├── vars
│ │ │ └── main.yml
│ │ │ ├── handlers
│ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ └── main.yml
│ │ ├── zabbix_agent.yml
│ │ ├── README.md
│ │ └── hosts
├── 37-wireguard
│ └── README.md
├── 25-RocketMQ
│ ├── data
│ │ └── users.properties
│ ├── docker-compose_rocketmq-dashbord.yaml
│ └── readme.md
├── 35-consul
│ ├── README.md
│ └── k8s-yaml
│ │ ├── README.md
│ │ ├── storageclass-nfs-dev-01.yaml
│ │ ├── consul-client.yaml
│ │ └── provisioner-nfs-dev-01.yaml
├── 06-Gitlab
│ ├── README.md
│ ├── docker-compose.yaml
│ └── 01-install-gitlab-bydocker.sh
├── 19-rsyncd
│ └── README.md
├── 09-Redis
│ └── docker-compose.yaml
├── 16-kafka
│ ├── Eagle for Apache Kafka
│ │ ├── docker-compose.yaml
│ │ └── 01-install-Kafka-Eagle.sh
│ ├── 03-kafka可视化工具.md
│ ├── kafka-manager
│ │ ├── start-kafka-manager.sh
│ │ ├── docker-compose.yml
│ │ ├── Dockerfile
│ │ └── README.md
│ ├── 02-kafka集群配置.md
│ ├── KnowStreaming
│ │ └── docker-compose.yaml
│ └── kaf
│ │ └── install-kaf.sh
├── 29-code-push-server
│ ├── code-push-server
│ │ ├── Dockerfile
│ │ └── process.json
│ ├── docker-compose.yml
│ ├── config.js
│ └── README.md
├── 01-MySQL
│ ├── 03-备份、恢复 参考命令.md
│ ├── docker-compose.yaml
│ └── 02-mysql GTID主从复制.md
├── 28-Node.js
│ ├── README.md
│ ├── 02-install-nvm.sh
│ └── 01-install-Node.js.sh
├── 26-Nexus
│ └── docker-compose.yaml
├── 12-MongoDB
│ └── docker-compose.yaml
├── 43-goenv
│ ├── README.md
│ └── 01-install-goenv.sh
├── 24-PostgreSQL
│ ├── 03-backup-postgresql.sh
│ ├── docker-compose.yaml
│ ├── 04-restore-postgresql.sh
│ └── 02-pg流复制配置.md
├── 05-Jenkins
│ ├── docker-compose.yaml
│ └── 01-install-jenkins-bydocker.sh
├── 33-flink
│ └── docker-compose.yaml
├── 32-nacos
│ └── docker-compose.yaml
├── 20-nfs
│ ├── README.md
│ └── 01-start-nfs-service.sh
├── 27-yapi
│ └── docker-compose.yaml
├── 14-zookeeper
│ ├── 02-zookeeper集群配置.md
│ ├── 03-install-zkui.sh
│ └── k8s-yaml
│ │ └── zookeeper.yaml
├── 42-sonarqube
│ ├── docker-compose.yaml
│ └── README.md
├── 17-rabbitmq
│ └── 02-rabbitmq集群配置.md
├── 04-Docker
│ └── 02-show-container-ip.sh
├── 07-Nginx-Tengine
│ ├── README.md
│ └── 02-nginx 国内外流量分流.conf.sample
├── 18-sftpgo
│ └── README.md
├── 31-clickhouse
│ ├── 02-clickhouse集群配置.md
│ └── 01-install-clickhouse.sh
├── 40-ffmpeg
│ └── 01-install-ffmpeg.sh
├── 41-pyenv
│ └── 01-install-pyenv.sh
├── 38-sqlite3
│ └── 01-update-sqlite3.sh
├── 10-GoAccess
│ └── 01-install-goaccess.sh
└── 44-shc
│ └── 01-install-shc.sh
├── 08-ssl-tools
├── 01-ssl-gen
│ ├── .gitignore
│ ├── docs
│ │ ├── output.png
│ │ ├── chrome-certs-ok.png
│ │ ├── chrome-certs-details.png
│ │ ├── chrome-settings-certs.png
│ │ ├── chrome-settings-certs-imported.png
│ │ └── chrome-trust.md
│ ├── flush.sh
│ ├── gen.root.sh
│ ├── README.md
│ ├── gen.cert.sh
│ └── ca.cnf
└── 02-ssl-check
│ └── README.md
├── 03-Dockerfile
├── 01-nacos
│ ├── cluster.conf
│ ├── README.md
│ ├── Dockerfile
│ └── entrypoint.sh
├── 02-feely-sys
│ ├── setting.conf
│ ├── Dockerfile
│ └── entrypoint.sh
├── 05-java
│ └── Dockerfile
├── 03-centos
│ └── Dockerfile
└── 04-rocksdb
│ ├── verify.sh
│ └── Dockerfile
├── 07-java-tools
└── README.md
├── 10-pve-vmware-tools
└── 01-pve-to-vmware
│ ├── README.md
│ └── pve_to_vmware.sh
├── 04-disk-tools
├── 03-delete-empty-dir.sh
├── 01-Create-Swap
│ └── 01-create-swap.sh
├── 02-Create-LVM
│ ├── 01-create-lvm-by-fdisk.sh
│ ├── 03-create-lvm-by-gdisk.sh
│ └── 02-create-lvm-by-parted.sh
└── 04-wipe-data-disk.sh
├── 05-system-tools
├── 01-check-package-manager.sh
├── 07-show-file-create-time.sh
├── 04-tcp-connection-state-counter.sh
└── 06-update-kernel.sh
├── 06-Antivirus-tools
└── 01-kill-miner-proc.sh
├── 02-elasticsearch-tools
├── 01-clean-single-es-index-by-date.sh
└── 02-clean-date-format-es-index-by-date.sh
├── README.md
└── 11-k8s-tools
└── 01-create-k8s-admin-user.sh
/09-parse-file/README.md:
--------------------------------------------------------------------------------
1 | ### 脚本作用
2 | 用纯 `shell` 的方式读取配置文件,例如 `.yml`、`.ini` 等
--------------------------------------------------------------------------------
/01-installation-scripts/36-flexgw/README.md:
--------------------------------------------------------------------------------
1 | 建了个项目,请参考 [本项目](https://github.com/zhegeshijiehuiyouai/FlexGW)
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/.gitignore:
--------------------------------------------------------------------------------
1 | /.idea/
2 | /out/
3 | /deploy-passport.sh
4 | /cert.all.sh
5 | /*.crt
6 |
--------------------------------------------------------------------------------
/01-installation-scripts/03-Jumpserver/README.md:
--------------------------------------------------------------------------------
1 | ## 2022.09.22更新
2 | 官方部署方案:https://github.com/jumpserver/installer
--------------------------------------------------------------------------------
/01-installation-scripts/02-Zabbix/01-by-ansible/roles/zabbix_agent/vars/main.yml:
--------------------------------------------------------------------------------
1 | #zabbix-server服务器ip
2 | Myserver: 47.103.100.200
3 |
--------------------------------------------------------------------------------
/01-installation-scripts/02-Zabbix/01-by-ansible/zabbix_agent.yml:
--------------------------------------------------------------------------------
1 | #-hosts: all
2 | - hosts: 192.168.1.81
3 | roles:
4 | - zabbix_agent
5 |
--------------------------------------------------------------------------------
/03-Dockerfile/01-nacos/cluster.conf:
--------------------------------------------------------------------------------
1 | #The format is like this ==> ip:port
2 | 192.168.1.1:8848
3 | 192.168.1.2:8848
4 | 192.168.1.3:8848
5 |
--------------------------------------------------------------------------------
/07-java-tools/README.md:
--------------------------------------------------------------------------------
1 | ***2021年5月14日,新增[https://github.com/oldratlee/useful-scripts](https://github.com/oldratlee/useful-scripts)项目中的部分脚本***
--------------------------------------------------------------------------------
/01-installation-scripts/37-wireguard/README.md:
--------------------------------------------------------------------------------
1 | Fork自[https://github.com/teddysun/across](https://github.com/teddysun/across),修改了回显样式,汉化了回显内容。
2 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/docs/output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhegeshijiehuiyouai/RoadToDevOps/HEAD/08-ssl-tools/01-ssl-gen/docs/output.png
--------------------------------------------------------------------------------
/01-installation-scripts/02-Zabbix/01-by-ansible/roles/zabbix_agent/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart zabbix-agent
2 | service: name=zabbix-agent state=restarted
3 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/docs/chrome-certs-ok.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhegeshijiehuiyouai/RoadToDevOps/HEAD/08-ssl-tools/01-ssl-gen/docs/chrome-certs-ok.png
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/docs/chrome-certs-details.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhegeshijiehuiyouai/RoadToDevOps/HEAD/08-ssl-tools/01-ssl-gen/docs/chrome-certs-details.png
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/docs/chrome-settings-certs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhegeshijiehuiyouai/RoadToDevOps/HEAD/08-ssl-tools/01-ssl-gen/docs/chrome-settings-certs.png
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/docs/chrome-settings-certs-imported.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhegeshijiehuiyouai/RoadToDevOps/HEAD/08-ssl-tools/01-ssl-gen/docs/chrome-settings-certs-imported.png
--------------------------------------------------------------------------------
/01-installation-scripts/25-RocketMQ/data/users.properties:
--------------------------------------------------------------------------------
1 | # 该文件支持热修改,即添加和修改用户时,不需要重新启动dashboard
2 | # 格式, 每行定义一个用户, username=password[,N] #N是可选项,可以为0 (普通用户); 1 (管理员)
3 | admin=dashboard,1
4 | user1=user111,0
--------------------------------------------------------------------------------
/03-Dockerfile/02-feely-sys/setting.conf:
--------------------------------------------------------------------------------
1 | #连接配置
2 | dubbo=10.0.17.108:2181
3 | nacos=192.168.1.2:8848
4 | namespace=4b71d881-f7jd-4e88-acc9-f1cbe257ddb1
5 |
6 | #JAVA参数
7 | Xmx=1024m
8 | Xms=1024m
9 | Xmn=512m
10 |
--------------------------------------------------------------------------------
/08-ssl-tools/02-ssl-check/README.md:
--------------------------------------------------------------------------------
1 | ## 来源
2 | 源码来自 [https://github.com/ydzydzydz/ssl-check](https://github.com/ydzydzydz/ssl-check)
3 | ## 修改
4 | - 将对域名的检测放到后台执行,提高并发
5 | - 添加证书过期判断
6 | - 添加详细输出/精简输出切换
7 | - 可自定义域名端口
--------------------------------------------------------------------------------
/01-installation-scripts/35-consul/README.md:
--------------------------------------------------------------------------------
1 | ## 说明
2 | 本脚本支持部署`server`端和`client`端的`consul`
3 |
4 | 在部署`server`端时,请将第一个`server`节点设置为`leader`(根据脚本提示),其余设为`follower`
5 | 在部署`client`端时,需要指定一个`server`的`ip`地址,可以是`server`集群中的任意一个节点`ip`
--------------------------------------------------------------------------------
/01-installation-scripts/06-Gitlab/README.md:
--------------------------------------------------------------------------------
1 | ## 安装
2 | `gitlab-ce` 有官方的安装脚本:[https://packages.gitlab.com/gitlab/gitlab-ce?page=1](https://packages.gitlab.com/gitlab/gitlab-ce?page=1)
3 | 都是通过仓库安装的,上面的脚本配置好仓库,直接用对应系统的包管理器安装就行了。也可以在上面的页面直接下载包来安装。
--------------------------------------------------------------------------------
/01-installation-scripts/19-rsyncd/README.md:
--------------------------------------------------------------------------------
1 | ### 数据同步的方向
2 | 源服务器 --> 目标服务器
3 | - `01-start-rsyncd-service.sh` 在`目标服务`器上执行,创建rsyncd服务端。
4 | - `02-start-inotifywait.sh` 在`源服务器`上执行,监控文件状态实时同步。
5 |
6 | ---
7 | **rsync命令只会同步一次(即便是在有rsyncd的情况下)**,需要实时同步的话必须配置inotify。
--------------------------------------------------------------------------------
/03-Dockerfile/01-nacos/README.md:
--------------------------------------------------------------------------------
1 | ### 注:tgz包需自己下载
2 |
3 | 默认是单机模式,如果要启用集群模式,使用-e传递环境变量,如下
4 | ```shell
5 | docker run -d --name nacos-server --hostname nacos-server -p 8082:8848 -e MODE=cluster -v /etc/nacos/conf/cluster.conf:/usr/lcoal/nacos/conf/cluster.conf nacos-server:1.1.3
6 | ```
--------------------------------------------------------------------------------
/03-Dockerfile/01-nacos/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM zhegeshijiehuiyouai/java:1.8
2 |
3 | ENV MODE="standalone"
4 |
5 | ADD nacos-server-1.1.3.tar.gz /usr/local/
6 | ADD cluster.conf /usr/local/nacos/conf/
7 | ADD entrypoint.sh /
8 |
9 | EXPOSE 8848
10 |
11 | ENTRYPOINT ["/entrypoint.sh"]
12 |
--------------------------------------------------------------------------------
/09-parse-file/02-ini/01-ini.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 |
3 | # readINI [配置文件路径+名称] [节点名] [键值]
4 | function readINI() {
5 | INIFILE=$1
6 | SECTION=$2
7 | ITEM=$3
8 |
9 | _readIni=$(awk -F '=' '/\['$SECTION'\]/{a=1}a==1&&$1~/'$ITEM'/{print $2;exit}' $INIFILE)
10 | echo ${_readIni}
11 | }
--------------------------------------------------------------------------------
/10-pve-vmware-tools/01-pve-to-vmware/README.md:
--------------------------------------------------------------------------------
1 | 本脚本为 pve 迁移 vmware 的辅助脚本,完整迁移过程请查看博客 [https://blog.csdn.net/CHEndorid/article/details/124192788](https://blog.csdn.net/CHEndorid/article/details/124192788)
2 |
3 | 使用此脚本,需要先完成博客中的第2步,脚本实现了第1、3、4步,这些步骤较为繁琐且重复度高。
4 |
5 | 后面的步骤需要在控制台上操作,或者结合控制台操作,所以还是手动执行的好。
--------------------------------------------------------------------------------
/01-installation-scripts/09-Redis/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | redis:
4 | image: redis:6.2.6-alpine3.15
5 | container_name: redis
6 | ports:
7 | - 6379:6379
8 | command: redis-server --requirepass yourpassword
9 | volumes:
10 | - ./redis_data:/data
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/Eagle for Apache Kafka/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | # 默认账号/密码:admin/123456
2 | version: '3.1'
3 | services:
4 | eagle:
5 | container_name: eagle
6 | image: nickzurich/efak:3.0.1
7 | environment:
8 | EFAK_CLUSTER_ZK_LIST: 10.20.2.71:2181/kafka
9 | ports:
10 | - 8048:8048
--------------------------------------------------------------------------------
/03-Dockerfile/01-nacos/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [[ "${MODE}" == "standalone" ]]; then
3 | /usr/bin/sh /usr/local/nacos/bin/startup.sh -m standalone
4 | elif [[ "${MODE}" == "cluster" ]];then
5 | /usr/bin/sh /usr/local/nacos/bin/startup.sh
6 | else
7 | echo "wrong MODE"
8 | fi
9 | tail -f /etc/hosts
10 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/docs/chrome-trust.md:
--------------------------------------------------------------------------------
1 | # 如何让 Chrome 信任证书
2 |
3 | 1. 打开设置页面 `chrome://settings/certificates`,切换到 `授权中心`
4 | 
5 | 1. 然后导入 `out/root.crt`
6 | 
7 | 1. 完成
8 | 
9 | 
10 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/flush.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | cd "$(dirname "${BASH_SOURCE[0]}")"
3 | echo 'Removing dir out'
4 | rm -rf out
5 | echo 'Creating output structure'
6 | mkdir out
7 | cd out
8 | mkdir newcerts
9 | touch index.txt
10 | echo "unique_subject = no" > index.txt.attr
11 | echo 1000 > serial
12 | echo 'Done'
13 |
--------------------------------------------------------------------------------
/01-installation-scripts/29-code-push-server/code-push-server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:8.11.4-alpine
2 |
3 | RUN npm config set registry https://registry.npmmirror.com/ \
4 | && npm i -g code-push-server@0.5.2 pm2@latest --no-optional
5 |
6 | COPY ./process.json /process.json
7 |
8 | EXPOSE 3000
9 |
10 | CMD ["pm2-docker", "start", "/process.json"]
11 |
--------------------------------------------------------------------------------
/01-installation-scripts/01-MySQL/03-备份、恢复 参考命令.md:
--------------------------------------------------------------------------------
1 | ## 单库全量备份
2 | ```bash
3 | mysqldump -h主机地址 -P端口 -u用户名 -p密码 \
4 | --set-gtid-purged=OFF \
5 | --triggers \
6 | --routines \
7 | --events \
8 | --single-transaction \
9 | 库名 > 库名.sql
10 | ```
11 |
12 | ## 恢复
13 | ```bash
14 | # 需要先创建数据库【库名】
15 | mysql -h主机地址 -P端口 -u用户名 -p密码 库名 < 库名.sql
16 | ```
--------------------------------------------------------------------------------
/09-parse-file/02-ini/README.md:
--------------------------------------------------------------------------------
1 | ### 用法
2 | ```
3 | source ini.sh
4 | readINI [配置文件路径+名称] [节点名] [键值]
5 | ```
6 |
7 | ### 示例
8 | 待读取配置
9 | ```
10 | cat file.ini
11 | [IP]
12 | ip = jb51.net
13 | [MAILLIST]
14 | mail = admin@jb51.net
15 | ```
16 |
17 | 读取示例
18 | ```
19 | source ini.sh
20 | readINI file.ini IP ip
21 | # 输出
22 | jb51.net
23 | ```
--------------------------------------------------------------------------------
/01-installation-scripts/28-Node.js/README.md:
--------------------------------------------------------------------------------
1 | ## 20240705更新:
2 | `01-install-Node.js.sh`脚本停更,**推荐**用`02-install-nvm.sh`
3 | `nvm`安装、管理多版本`nodejs`,nvm github地址 [https://github.com/nvm-sh/nvm](https://github.com/nvm-sh/nvm)
4 |
5 | ---
6 | ## 单版本部署:
7 | 脚本使用`tar.xz`包部署,在较老的操作系统上安装`新版nodejs`的时候,会出现依赖没有的情况,可以使用下面链接中的脚本,通过`rpm`方式安装。
8 | [https://rpm.nodesource.com/](https://rpm.nodesource.com/)
--------------------------------------------------------------------------------
/01-installation-scripts/35-consul/k8s-yaml/README.md:
--------------------------------------------------------------------------------
1 | ## 说明
2 | consul server 集群3节点,类型为`statefulset`
3 | consul client 类型为`daemonset`
4 |
5 | **目录中的文件需要根据自身情况调整部分值**
6 | ### 执行顺序
7 | 1. 创建 `storageclass` (不使用的话跳过)
8 | provisioner-nfs-dev-01.yaml
9 | provisioner-nfs-dev-01.yaml
10 | 2. 创建 `consul server`
11 | consul-server.yaml
12 | 3. 创建 `consul client`
13 | consul-client.yaml
--------------------------------------------------------------------------------
/09-parse-file/01-yaml/README.md:
--------------------------------------------------------------------------------
1 | 脚本拷贝自 [https://github.com/jasperes/bash-yaml](https://github.com/jasperes/bash-yaml)
2 |
3 | ### 用法
4 | 拷贝 `yaml.sh` 并导入你的脚本: `source yaml.sh`
5 |
6 | 脚本提供了两个方法:
7 |
8 | - **parse_yaml**: 读取yaml文件并直接输出结果。
9 | - **create_variables**: 读取yaml文件,基于yaml文件的内容创建变量。
10 |
11 | ### 已知问题
12 | `Null` 必须用 `"attr: "` 来表示。
13 | 键不能有 `.`,例如:`com.baidu: true`
--------------------------------------------------------------------------------
/01-installation-scripts/29-code-push-server/code-push-server/process.json:
--------------------------------------------------------------------------------
1 | {
2 | "apps" : [
3 | {
4 | "name" : "code-push-server",
5 | "max_memory_restart" : "500M",
6 | "script" : "code-push-server",
7 | "instances" : "max", //开启实例数量,max为cpu核数
8 | "exec_mode" : "cluster", //集群模式,最大提升网站并发
9 | }
10 | ]
11 | }
--------------------------------------------------------------------------------
/01-installation-scripts/26-Nexus/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | # 初始密码:cat /data/nexus/data/admin.password
2 | version: '3.1'
3 | services:
4 | nexus:
5 | image: sonatype/nexus3:3.30.1
6 | container_name: nexus
7 | restart: always
8 | environment:
9 | - TZ=Asia/Shanghai
10 | ports:
11 | - 59081:8081
12 | volumes:
13 | - ./data:/nexus-data # 宿主机上需要对目录授权 chown -R 200 /data/nexus/data
--------------------------------------------------------------------------------
/01-installation-scripts/12-MongoDB/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | mongodb:
4 | container_name: mongodb
5 | image: mongo:4.2.6
6 | restart: always
7 | volumes:
8 | - ./data/db:/data/db
9 | - ./data/log:/var/log/mongodb
10 | ports:
11 | - 27017:27017
12 | environment:
13 | MONGO_INITDB_ROOT_USERNAME: admin
14 | MONGO_INITDB_ROOT_PASSWORD: yourpassword
--------------------------------------------------------------------------------
/01-installation-scripts/35-consul/k8s-yaml/storageclass-nfs-dev-01.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | annotations:
5 | k8s.kuboard.cn/storageType: 第一个storageclass,以dev-01的/data/nfsdata目录为存储点
6 | name: dev-01
7 | mountOptions: []
8 | parameters:
9 | archiveOnDelete: 'false'
10 | provisioner: nfs-dev-01
11 | reclaimPolicy: Retain
12 | volumeBindingMode: WaitForFirstConsumer
13 |
14 |
--------------------------------------------------------------------------------
/03-Dockerfile/05-java/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM anapsix/alpine-java:8u202b08_jdk
2 |
3 | RUN echo 'http://mirrors.ustc.edu.cn/alpine/v3.5/main' > /etc/apk/repositories \
4 | && echo 'http://mirrors.ustc.edu.cn/alpine/v3.5/community' >>/etc/apk/repositories \
5 | && apk update && apk add tzdata \
6 | && ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
7 | && echo "Asia/Shanghai" > /etc/timezones
8 |
9 | CMD ["/bin/bash"]
10 |
11 |
--------------------------------------------------------------------------------
/01-installation-scripts/43-goenv/README.md:
--------------------------------------------------------------------------------
1 | 安装 `goenv` 后,如果由于网络原因无法通过命令 `goenv install 1.22.10` 下载 `go` 二进制包的话,可以离线安装。具体步骤为:
2 | 1. 到镜像站点 [https://golang.google.cn/dl/](https://golang.google.cn/dl/) 下载 `linux-amd64.tar.gz` 后缀的压缩包。
3 | 2. 上传文件到 `${GOENV_ROOT}/versions/` 目录
4 | 3. 执行命令
5 | ```bash
6 | cd ${GOENV_ROOT}/versions
7 | tar xf go1.22.10.linux-amd64.tar.gz
8 | mv go 1.22.10
9 | goenv versions # 此时就可以看到1.22.10这个版本了
10 | goenv global 1.22.10 # 设置全局版本
11 | ```
--------------------------------------------------------------------------------
/04-disk-tools/03-delete-empty-dir.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # des: delete empty directories recursive
3 | deleteempty() {
4 | find ${1:-.} -mindepth 1 -maxdepth 1 -type d | while read -r dir
5 | do
6 | if [[ -z "$(find "$dir" -mindepth 1 -type f)" ]] >/dev/null
7 | then
8 | echo "$dir"
9 | rm -rf ${dir} 2>&- && echo "Empty, Deleted!" || echo "Delete error"
10 | fi
11 | if [ -d ${dir} ]
12 | then
13 | deleteempty "$dir"
14 | fi
15 | done
16 | }
17 | deleteempty
--------------------------------------------------------------------------------
/01-installation-scripts/24-PostgreSQL/03-backup-postgresql.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source /etc/profile
3 |
4 | PG_HOST=172.30.1.2
5 | PG_PORT=5432
6 | PG_USER=replica
7 | PG_PASSWORD=noPassw0rd
8 | BACK_DIR=/data/backup/postgresql
9 | SUFFIX=$(date +%F)
10 |
11 | [ -d ${BACK_DIR}/${SUFFIX} ] || mkdir -p ${BACK_DIR}/${SUFFIX}
12 | cd ${BACK_DIR}
13 |
14 | PGPASSWORD="${PG_PASSWORD}" pg_basebackup -h ${PG_HOST} -U ${PG_USER} -D ${BACK_DIR}/${SUFFIX} -Ft -z -Z5 -R -P -p ${PG_PORT}
15 |
16 | find ./ -mtime +3 | xargs rm -rf
--------------------------------------------------------------------------------
/05-system-tools/01-check-package-manager.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #判断包管理工具,可作为其他脚本的内部函数
3 |
4 | systemPackage=""
5 | if cat /etc/issue | grep -q -E -i "ubuntu|debian";then
6 | systemPackage='apt'
7 | elif cat /etc/issue | grep -q -E -i "centos|red hat|redhat";then
8 | systemPackage='yum'
9 | elif cat /proc/version | grep -q -E -i "ubuntu|debian";then
10 | systemPackage='apt'
11 | elif cat /proc/version | grep -q -E -i "centos|red hat|redhat";then
12 | systemPackage='yum'
13 | else
14 | echo "unkonw"
15 | fi
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/03-kafka可视化工具.md:
--------------------------------------------------------------------------------
1 | - 服务端工具
2 | - kafka-console-ui,推荐,GitHub地址:[https://github.com/xxd763795151/kafka-console-ui](https://github.com/xxd763795151/kafka-console-ui)
3 | - kafka egale,较重,且感觉界面较乱(虽然示例中截图漂亮,但部署出来看着乱)
4 | - CMAK,对`jdk`,`zookeeper`版本有要求
5 | - kafka-manager,CMAK前身
6 | - KnowStreaming,较重,需要`kafka`开启`JMX_PORT`,报错时负载较高
7 | - 客户端工具
8 | - 名称:Kafka Tool
9 | - 官网:https://www.kafkatool.com/download.html
10 |
11 | - 命令行工具
12 | - kaf,github地址:https://github.com/birdayz/kaf
--------------------------------------------------------------------------------
/01-installation-scripts/01-MySQL/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | mysql:
4 | container_name: mysql
5 | # image: mysql:5.7.44 # 切换版本,修改image即可
6 | image: mysql:8.4.5
7 | restart: always
8 | command:
9 | --lower_case_table_names=1
10 | --character-set-server=utf8mb4
11 | --collation-server=utf8mb4_general_ci
12 | environment:
13 | TZ: Asia/Shanghai
14 | MYSQL_ROOT_PASSWORD: yourpassword
15 | ports:
16 | - "3306:3306"
17 | volumes:
18 | - ./data:/var/lib/mysql
--------------------------------------------------------------------------------
/01-installation-scripts/05-Jenkins/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3.1'
2 | services:
3 | jenkins:
4 | image: jenkinszh/jenkins-zh:2.267
5 | container_name: jenkins
6 | restart: always
7 | environment:
8 | - TZ=Asia/Shanghai
9 | ports:
10 | - 59080:8080
11 | volumes:
12 | - ./scripts:/data/script
13 | - ./data:/var/jenkins_home # 宿主机上需要对目录授权 chown -R 1000 ./data
14 | networks:
15 | - gitlab_net
16 |
17 | # 要和gitlab配置CICD,所以用gitlab的网络
18 | networks:
19 | gitlab_net:
20 | name: gitlab_net
--------------------------------------------------------------------------------
/01-installation-scripts/33-flink/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | jobmanager:
4 | image: flink:1.12.4-scala_2.12-java8
5 | container_name: flink-jobmanager
6 | ports:
7 | - "8081:8081"
8 | command: jobmanager
9 | environment:
10 | - JOB_MANAGER_RPC_ADDRESS=jobmanager
11 | taskmanager:
12 | image: flink:1.12.4-scala_2.12-java8
13 | container_name: flink-taskmanager
14 | depends_on:
15 | - jobmanager
16 | command: taskmanager
17 | environment:
18 | - JOB_MANAGER_RPC_ADDRESS=jobmanager
--------------------------------------------------------------------------------
/01-installation-scripts/25-RocketMQ/docker-compose_rocketmq-dashbord.yaml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | rocketmq-dashboard:
5 | image: apacherocketmq/rocketmq-dashboard:latest
6 | container_name: rocketmq-dashboard
7 | environment:
8 | - JAVA_OPTS=-Drocketmq.namesrv.addr=172.16.20.8:9876
9 | - ROCKETMQ_CONFIG_LOGIN_REQUIRED=true # 开启登录认证
10 | # - ROCKETMQ_PROXY_ADDR=172.16.20.8:9080 # 添加 proxy 地址
11 | ports:
12 | - "8228:8080"
13 | volumes:
14 | - ./data:/tmp/rocketmq-console/data # 挂载账号密码目录
15 | restart: always
--------------------------------------------------------------------------------
/01-installation-scripts/25-RocketMQ/readme.md:
--------------------------------------------------------------------------------
1 | # 1、rocketmq broker
2 | 脚本默认采用`2m-2s-async`模式部署,并仅修改该模式下的配置文件。如需采用其他模式,可在`2m-2s-async`模式部署好后,手动修改`/etc/systemd/system/`目录下对应的`.service`文件,并拷贝生成的`2m-2s-async`配置至对应配置文件。
3 |
4 | # 2、rocketmq-dashbord
5 | 源码编译条件苛刻,多个步骤需要科学上网,使用`02-install-rocketmq-dashboard.sh`编译,不一定成功,可选择使用`docker-compose`启动。
6 | **如果一定要使用脚本部署,需要先配置好`HTTP_PROXY`变量和`HTTPS_PROXY`变量。**
7 | ## 2.1、docker-compose启动说明
8 | 默认开启登录认证,需要将`data`目录拷贝到`docker-compose.yaml`同级目录下才能生效。账号密码信息在`data/users.properties`文件中。
9 | 如果希望关闭登录认证,那么删除`- ROCKETMQ_CONFIG_LOGIN_REQUIRED=true`这行,删除`volumes`配置。
--------------------------------------------------------------------------------
/01-installation-scripts/02-Zabbix/01-by-ansible/README.md:
--------------------------------------------------------------------------------
1 | ### ***配置文件中所有的ip、密码均为演示或测试用的假ip、假密码,实际使用中请自行修改ip、密码***
2 |
3 |
4 | 通过ansible批量部署zabbix客户端
5 | zabbix服务端是yum部署的,详见 [官网](https://www.zabbix.com/cn/download)
6 |
7 | ## 部署命令
8 | 该命令具有幂等性,可重复执行
9 | ```shell
10 | ansible-playbook -i hosts zabbix_agent.yml
11 | ```
12 |
13 | ## 一些说明
14 | 当前目录的 `ansible.cfg`,`hosts` 文件可以保留,这样就可以使用当前目录的配置了。
15 |
16 | - ansible 会优先使用执行命令当前目录的 `ansible.cfg`
17 | - `-i` 选项可以指定 `hosts` 文件
18 |
19 | 实测中,删除了 `/etc/ansible/{ansible.cfg,hosts}` 后,即使不使用 `-i` 指定 `host` ,ansible 也能正确的使用当前目录的 `hosts` 文件。
--------------------------------------------------------------------------------
/01-installation-scripts/24-PostgreSQL/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | postgresql:
4 | container_name: postgresql
5 | image: postgres:11.11-alpine
6 | restart: always
7 | shm_size: 1gb
8 | environment:
9 | PGDATA: /var/lib/postgresql/data
10 | POSTGRES_USER: yourname
11 | POSTGRES_PASSWORD: yourpassword
12 | POSTGRES_DB: yourdb
13 | TZ: Asia/Shanghai
14 | volumes:
15 | - ./data:/var/lib/postgresql/data
16 | ports:
17 | - "5432:5432"
18 | networks:
19 | - app_net
20 |
21 | networks:
22 | app_net:
23 | name: app_net
24 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/gen.root.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | cd "$(dirname "${BASH_SOURCE[0]}")"
3 |
4 | if [ -f "out/root.crt" ]; then
5 | echo Root certificate already exists.
6 | exit 1
7 | fi
8 |
9 | if [ ! -d "out" ]; then
10 | bash flush.sh
11 | fi
12 |
13 | # Generate root cert along with root key
14 | openssl req -config ca.cnf \
15 | -newkey rsa:2048 -nodes -keyout out/root.key.pem \
16 | -new -x509 -days 7300 -out out/root.crt \
17 | -subj "/C=CN/ST=Guangdong/L=Guangzhou/O=Fishdrowned/CN=Fishdrowned ROOT CA"
18 |
19 | # Generate cert key
20 | openssl genrsa -out "out/cert.key.pem" 2048
21 |
--------------------------------------------------------------------------------
/03-Dockerfile/03-centos/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:centos7.9.2009
2 |
3 | ENV LC_ALL="en_US.utf8"
4 |
5 | RUN yum install -y wget && \
6 | rm -rf /etc/yum.repos.d/* && \
7 | wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo && \
8 | wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo && \
9 | yum update -y && \
10 | yum install -y telnet net-tools vim && \
11 | yum clean all && \
12 | rm -rf /tmp/* rm -rf /var/cache/yum/* && \
13 | ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
14 | echo "Asia/Shanghai" > /etc/timezone
15 |
16 | CMD ["/bin/bash"]
--------------------------------------------------------------------------------
/03-Dockerfile/02-feely-sys/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM zhegeshijiehuiyouai/java:1.8
2 |
3 | #other是所有没有等号的参数,other2是为了以后扩展用的
4 | ENV NewRatio=4 NativeMemoryTracking=detail MaxDirectMemorySize=256m SurvivorRatio=8 \
5 | MetaspaceSize=256m MaxMetaspaceSize=512m MaxTenuringThreshold=15 ParallelGCThreads=8 ConcGCThreads=8 \
6 | HeapDumpPath="dump/error.dump" jar="/mnt/work/feely-sys/sys-uc.jar" log="--logging.file=/mnt/work/feely-sys/log/feely-sys.log" \
7 | other="-XX:+UseG1GC -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError" other2=""
8 |
9 | ADD sys-uc.jar /mnt/work/feely-sys/
10 | ADD setting.conf /mnt/work/feely-sys/
11 | ADD entrypoint.sh /
12 |
13 | EXPOSE 8801
14 |
15 | ENTRYPOINT ["/entrypoint.sh"]
16 |
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/kafka-manager/start-kafka-manager.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | if [[ $KM_USERNAME != '' && $KM_PASSWORD != '' ]]; then
4 | sed -i.bak '/^basicAuthentication/d' /kafka-manager-${KM_VERSION}/conf/application.conf
5 | echo 'basicAuthentication.enabled=true' >> /kafka-manager-${KM_VERSION}/conf/application.conf
6 | echo "basicAuthentication.username=${KM_USERNAME}" >> /kafka-manager-${KM_VERSION}/conf/application.conf
7 | echo "basicAuthentication.password=${KM_PASSWORD}" >> /kafka-manager-${KM_VERSION}/conf/application.conf
8 | echo 'basicAuthentication.realm="Kafka-Manager"' >> /kafka-manager-${KM_VERSION}/conf/application.conf
9 | fi
10 |
11 | exec ./bin/kafka-manager -Dconfig.file=${KM_CONFIGFILE} "${KM_ARGS}" "${@}"
--------------------------------------------------------------------------------
/01-installation-scripts/32-nacos/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | nacos-server:
4 | image: nacos/nacos-server:1.4.0
5 | container_name: nacos-server
6 | environment:
7 | MODE: cluster
8 | NACOS_SERVER_IP: "172.16.20.8"
9 | NACOS_SERVERS: "172.16.21.83:8848 172.16.21.84:8848 172.16.22.83:8848 172.16.20.8:8848"
10 | SPRING_DATASOURCE_PLATFORM: mysql
11 | MYSQL_SERVICE_HOST: 172.16.21.112
12 | MYSQL_SERVICE_DB_NAME: nacos
13 | MYSQL_SERVICE_USER: fan_hairong
14 | MYSQL_SERVICE_PASSWORD: fan_hairong_passwd
15 | MYSQL_SERVICE_DB_PARAM: "characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"
16 | network_mode: host
--------------------------------------------------------------------------------
/01-installation-scripts/02-Zabbix/01-by-ansible/roles/zabbix_agent/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ##检查zabbix-release是否安装,没有的话就下载zabbix repo
3 | - name: check zabbix repo
4 | shell: rpm -qa | grep zabbix-release
5 | register: has_zabbix
6 | ignore_errors: True
7 |
8 | - name: get zabbix repo
9 | shell: rpm -Uvh https://repo.zabbix.com/zabbix/5.0/rhel/7/x86_64/zabbix-release-5.0-1.el7.noarch.rpm
10 | when: has_zabbix.rc != 0
11 |
12 | - name: install zabbix-agent
13 | yum: name=zabbix-agent
14 |
15 | - name: copy zabbix-agent conf
16 | template: src=zabbix_agentd.conf.j2 dest=/etc/zabbix/zabbix_agentd.conf
17 | tags:
18 | - conf
19 | notify:
20 | - restart zabbix-agent
21 | - name: start zabbix-agent
22 | service: name=zabbix-agent state=started enabled=true
23 |
--------------------------------------------------------------------------------
/01-installation-scripts/20-nfs/README.md:
--------------------------------------------------------------------------------
1 | # 1. nfs客户端调优命令
2 | 提高客户端并发,详细原理查看 [https://developer.aliyun.com/article/501417](https://developer.aliyun.com/article/501417)
3 | ```shell
4 | #!/bin/bash
5 | echo "options sunrpc tcp_slot_table_entries=128" >> /etc/modprobe.d/sunrpc.conf
6 | echo "options sunrpc tcp_max_slot_table_entries=128" >> /etc/modprobe.d/sunrpc.conf
7 | modprobe sunrpc
8 | sysctl -w sunrpc.tcp_slot_table_entries=128
9 | ```
10 |
11 | # 2. nfs客户端执行`df`命令无响应处理
12 | ## 原因
13 | NFS服务器故障或者nfs目录有变更等
14 | ## 解决方法
15 | ```shell
16 | # 查看挂载目录
17 | nfsstat -m
18 | /root/install from 10.10.8.111:/root/install
19 | Flags: rw,vers=3,rsize=32768,wsize=32768,hard,proto=tcp,timeo=600,retrans=2,sec=sys,addr=10.10.8.111
20 | # 卸载挂载目录
21 | umount -lf 10.10.8.111:/root/install
22 | ```
23 | 卸载后,`df`命令即可正常执行了
--------------------------------------------------------------------------------
/05-system-tools/07-show-file-create-time.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # 脚本来自:https://cloud.tencent.com/developer/article/1726834
3 |
4 |
5 | [ $# -ne 1 ] && echo "用法: $0 {FILENAME}" && exit 1
6 |
7 | INODE=`ls -i $1 |awk '{print $1}'`
8 | FILENAME=$1
9 |
10 | # 如果传入参数带/,则获取这个传入参数的目录路径并进入目录
11 | `echo $FILENAME | grep / 1> /dev/null` && { FPWD=${FILENAME%/*};FPWD=${FPWD:=/};cd ${FPWD};FPWD=`pwd`; } || FPWD=`pwd`
12 |
13 | array=(`echo ${FPWD} | sed 's@/@ @g'`)
14 | array_length=${#array[@]}
15 |
16 | for ((i=${array_length};i>=0;i--)); do
17 | unset array[$i]
18 | SUBPWD=`echo " "${array[@]} | sed 's@ @/@g'`
19 | DISK=`df -h |grep ${SUBPWD}$ |awk '{print $1}'`
20 | [[ -n $DISK ]] && break
21 | done
22 |
23 | # 文件系统非ext4则退出
24 | [[ "`df -T | grep ${DISK} |awk '{print $2}'`" != "ext4" ]] && { echo ${DISK} 不是ext4格式,脚本只支持ext4格式的文件系统;exit 2; }
25 |
26 | debugfs -R "stat <${INODE}>" ${DISK} | grep crtime
--------------------------------------------------------------------------------
/01-installation-scripts/27-yapi/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | # 账号:admin@docker.yapi
2 | # 密码:adm1n
3 | version: '3'
4 |
5 | services:
6 | yapi-web:
7 | image: jayfong/yapi:1.9.2
8 | container_name: yapi-web
9 | ports:
10 | - 40001:3000
11 | environment:
12 | - YAPI_ADMIN_ACCOUNT=admin@docker.yapi
13 | - YAPI_ADMIN_PASSWORD=adm1n
14 | - YAPI_CLOSE_REGISTER=true
15 | - YAPI_DB_SERVERNAME=yapi-mongo
16 | - YAPI_DB_PORT=27017
17 | - YAPI_DB_DATABASE=yapi
18 | - YAPI_MAIL_ENABLE=false
19 | - YAPI_LDAP_LOGIN_ENABLE=false
20 | - YAPI_PLUGINS=[{"name":"interface-oauth2-token","name":"add-user"}]
21 | depends_on:
22 | - yapi-mongo
23 | links:
24 | - yapi-mongo
25 | restart: unless-stopped
26 | yapi-mongo:
27 | image: mongo:4.4.4
28 | container_name: yapi-mongo
29 | volumes:
30 | - ./data/db:/data/db
31 | expose:
32 | - 27017
33 | restart: unless-stopped
--------------------------------------------------------------------------------
/03-Dockerfile/02-feely-sys/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #获取setting.conf参数
4 | cd /mnt/work/feely-sys
5 | dubbo_addr=`grep dubbo setting.conf | cut -d= -f2`
6 | nacos_addr=`grep nacos setting.conf | cut -d= -f2`
7 | nacos_namespace=`grep namespace setting.conf | cut -d= -f2`
8 | Xmx=`grep Xmx setting.conf | cut -d= -f2`
9 | Xms=`grep Xms setting.conf | cut -d= -f2`
10 | Xmn=`grep Xmn setting.conf | cut -d= -f2`
11 |
12 |
13 | java -Xmx${Xmx} -Xms${Xms} -Xmn${Xmn} -XX:NewRatio=${NewRatio} -XX:NativeMemoryTracking=${NativeMemoryTracking} -XX:MaxDirectMemorySize=${MaxDirectMemorySize} -XX:SurvivorRatio=${SurvivorRatio} -XX:MetaspaceSize=${MetaspaceSize} -XX:MaxMetaspaceSize=${MaxMetaspaceSize} -XX:MaxTenuringThreshold=${MaxTenuringThreshold} -XX:ParallelGCThreads=${ParallelGCThreads} -XX:ConcGCThreads=${ConcGCThreads} ${other} -XX:HeapDumpPath=${HeapDumpPath} -jar ${jar} ${log} ${other2} --dubbo.registry.address=${dubbo_addr} --spring.cloud.nacos.config.server-addr=${nacos_addr} --spring.cloud.nacos.config.namespace=${nacos_namespace}
14 |
15 |
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/kafka-manager/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Docker compose to have Zookeeper, Kafka, and Kafka Manager running for development.
2 | # Provided by @chris-zen
3 |
4 | zookeeper:
5 | image: confluent/zookeeper
6 | ports:
7 | - "2181:2181"
8 |
9 | kafka:
10 | image: wurstmeister/kafka:0.9.0.0-1
11 | ports:
12 | - "9092:9092"
13 | links:
14 | - zookeeper:zk
15 | environment:
16 | - KAFKA_ADVERTISED_HOST_NAME
17 | - KAFKA_ADVERTISED_PORT=9092
18 | - KAFKA_DELETE_TOPIC_ENABLE=true
19 | - KAFKA_LOG_RETENTION_HOURS=1
20 | - KAFKA_MESSAGE_MAX_BYTES=10000000
21 | - KAFKA_REPLICA_FETCH_MAX_BYTES=10000000
22 | - KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS=60000
23 | - KAFKA_NUM_PARTITIONS=2
24 | - KAFKA_DELETE_RETENTION_MS=1000
25 |
26 |
27 | kafka-manager:
28 | image: sheepkiller/kafka-manager:latest
29 | ports:
30 | - "9000:9000"
31 | links:
32 | - zookeeper
33 | - kafka
34 | environment:
35 | ZK_HOSTS: zookeeper:2181
36 | APPLICATION_SECRET: letmein
37 | KM_ARGS: -Djava.net.preferIPv4Stack=true
38 |
--------------------------------------------------------------------------------
/01-installation-scripts/14-zookeeper/02-zookeeper集群配置.md:
--------------------------------------------------------------------------------
1 |
2 | # zookeeper集群配置操作
3 | ## 1、配置节点标识
4 | 以三节点集群为例,在各服务器上`zookeeper`目录依次执行以下命令:
5 | - [ zk-1 执行 ]
6 | ```shell
7 | echo "1" > data/myid
8 | ```
9 |
10 | - [ zk-2 执行 ]
11 | ```shell
12 | echo "2" > data/myid
13 | ```
14 | - [ zk-3 执行 ]
15 | ```shell
16 | echo "3" > data/myid
17 | ```
18 |
19 |
20 | ## 2、在各服务器zookeeper目录均执行以下命令
21 | ```shell
22 | #!/bin/bash
23 | # 下面的ip改成实际的ip
24 | ip[0]=192.168.1.1
25 | ip[1]=192.168.1.2
26 | ip[2]=192.168.1.3
27 |
28 | id=1
29 | # 2888:节点间数据同步端口;3888:选举端口。他们没有单独的配置项指定,只能通过server.X这里指定,要修改端口的话,这里修改
30 | for i in ${ip[@]};do
31 | echo "server.${id}=${i}:2888:3888" >> conf/zoo.cfg
32 | let id++
33 | done
34 | ```
35 | ### 注意事项
36 | 上述命令生成类似下面的配置
37 | ```shell
38 | server.1=192.168.1.1:2888:3888
39 | server.2=192.168.1.2:2888:3888
40 | server.3=192.168.1.3:2888:3888
41 | ```
42 | - 如果zk集群是部署在同一台服务器的`伪集群`,那么ip需要一样,后面的端口需要换成6个不同的端口。
43 | - 手动填写配置的话,注意每行后面不要有空格,否则会报错
44 |
45 |
46 | ## 3、依次启动 zk-1、zk-2、zk-3
47 | ## 4、验证
48 | 执行下面命令,查看`Mode`的值 **不再是** `standalone`
49 | ```shell
50 | bin/zkServer.sh status
51 | ```
--------------------------------------------------------------------------------
/03-Dockerfile/04-rocksdb/verify.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | rocksdb_datadir=/data/rocksdbTemp
3 |
4 | mkdir -p ${rocksdb_datadir}
5 | cd /data
6 | cat > rocksdbtest.cpp << EOF
7 | #include
8 | #include
9 |
10 | #include "rocksdb/db.h"
11 | #include "rocksdb/slice.h"
12 | #include "rocksdb/options.h"
13 |
14 | using namespace std;
15 | using namespace rocksdb;
16 |
17 | const std::string PATH = "${rocksdb_datadir}"; //rocksDB的数据存储目录绝对路径
18 |
19 | int main(){
20 | DB* db;
21 | Options options;
22 | options.create_if_missing = true;
23 | Status status = DB::Open(options, PATH, &db);
24 | assert(status.ok());
25 | Slice key("test01");
26 | Slice value("success");
27 |
28 | std::string get_value;
29 | status = db->Put(WriteOptions(), key, value);
30 | if(status.ok()){
31 | status = db->Get(ReadOptions(), key, &get_value);
32 | if(status.ok()){
33 | printf("value is %s\n", get_value.c_str());
34 | }else{
35 | printf("get failed\n");
36 | }
37 | }else{
38 | printf("put failed\n");
39 | }
40 |
41 | delete db;
42 | }
43 | EOF
44 | g++ -std=c++11 -o rocksdbtest2 rocksdbtest.cpp -I /data/rocksdb-6.4.6/include -L/data/rocksdb-6.4.6 -lrocksdb -ldl
45 | ./rocksdbtest2
--------------------------------------------------------------------------------
/01-installation-scripts/42-sonarqube/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | sonarqube:
3 | hostname: sonarqube
4 | container_name: sonarqube
5 | #image: sonarqube:10.6.0-community
6 | # LTS版本
7 | image: sonarqube:9.9.6-community
8 | # 下面的内核参数请在宿主机上执行
9 | # sysctls:
10 | # # TCP可以排队的最大连接请求数
11 | # net.core.somaxconn: 4096
12 | # # 单个进程可以拥有的虚拟内存区域的数量
13 | # vm.max_map_count: 262184
14 | privileged: true
15 | restart: always
16 | depends_on:
17 | - db
18 | environment:
19 | SONAR_JDBC_URL: jdbc:postgresql://db:5432/sonar
20 | SONAR_JDBC_USERNAME: sonar
21 | SONAR_JDBC_PASSWORD: sonar
22 | volumes:
23 | - ./sonarqube_data:/opt/sonarqube/data
24 | - ./sonarqube_pdf:/opt/sonarqube/pdf-files
25 | - ./sonarqube_extensions:/opt/sonarqube/extensions
26 | - ./sonarqube_logs:/opt/sonarqube/logs
27 | ports:
28 | - "9000:9000"
29 | db:
30 | hostname: postgresql
31 | container_name: postgresql
32 | privileged: true
33 | restart: always
34 | image: postgres:12
35 | environment:
36 | POSTGRES_USER: sonar
37 | POSTGRES_PASSWORD: sonar
38 | volumes:
39 | - ./postgresql:/var/lib/postgresql
40 | - ./postgresql_data:/var/lib/postgresql/data
--------------------------------------------------------------------------------
/01-installation-scripts/17-rabbitmq/02-rabbitmq集群配置.md:
--------------------------------------------------------------------------------
1 | # RabbitMQ集群配置步骤
2 | ***以下操作,在各节点`rabbitmq`都启动的情况下进行***
3 |
4 | ## 1、在主节点上获取主节点的`.erlang.cookie`值
5 | 选一台服务器作为主节点,查看其值
6 | ```shell
7 | # 如果用本项目脚本部署的,那么 .erlang.cookie 在脚本配置的 rabbitmq_home 变量指定的目录下面。
8 | # 如果通过其他方式部署的,文件应该是
9 | # /var/lib/rabbitmq/.erlang.cookie 或者 ~/.erlang.cookie
10 |
11 | cat /data/rabbitmq/.erlang.cookie
12 | ```
13 |
14 |
15 | ## 2、在所有节点上配置`/etc/hosts`
16 | ```shell
17 | # rabbitmq节点间基于主机名进行通信,故各节点名应唯一,而后将集群节点都加入进集群节点 hosts 文件
18 | 192.168.1.57 node-1
19 | 192.168.1.81 node-2
20 | 192.168.1.60 node-3
21 | ```
22 | ## 3、在各子节点上操作,加入集群
23 | **1)将主节点的 `.erlang.cookie` 的值写入到子节点的 `.erlang.cookie` 中**
24 | ```shell
25 | vim /data/rabbitmq/.erlang.cookie
26 | # 修改值,保存时可能会提示只读,使用 :wq! 保存即可。
27 | # 如果上述操作不可能,那么修改该文件的权限,
28 | # chmod 777 /data/rabbitmq/.erlang.cookie
29 | # 修改完后,再降低文件权限,否则启动不了
30 | # chmod 400 /data/rabbitmq/.erlang.cookie
31 | ```
32 |
33 | **2)加入集群**
34 | ```shell
35 | # 下面的命令可以指定节点名
36 | # rabbitmqctl -n rabbit2 stop_app
37 |
38 | rabbitmqctl stop_app
39 | rabbitmqctl reset
40 | rabbitmqctl join_cluster rabbit@node-1
41 | rabbitmqctl start_app
42 | ```
43 | ## 4、在任一节点查看集群状态
44 | **1)命令查看**
45 | ```shell
46 | rabbitmqctl cluster_status
47 | ```
48 | **2)管理界面查看**
49 | 访问 `http://ip:port` 查看 `Overview` 中的 `node`
50 |
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/kafka-manager/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:7
2 |
3 | MAINTAINER Clement Laforet
4 |
5 | RUN yum update -y && \
6 | yum install -y java-1.8.0-openjdk-headless && \
7 | yum clean all
8 |
9 | ENV JAVA_HOME=/usr/java/default/ \
10 | ZK_HOSTS=localhost:2181 \
11 | KM_VERSION=1.3.1.8 \
12 | KM_REVISION=97329cc8bf462723232ee73dc6702c064b5908eb \
13 | KM_CONFIGFILE="conf/application.conf"
14 |
15 | ADD start-kafka-manager.sh /kafka-manager-${KM_VERSION}/start-kafka-manager.sh
16 |
17 | RUN yum install -y java-1.8.0-openjdk-devel git wget unzip which && \
18 | mkdir -p /tmp && \
19 | cd /tmp && \
20 | git clone https://github.com/yahoo/kafka-manager && \
21 | cd /tmp/kafka-manager && \
22 | git checkout ${KM_REVISION} && \
23 | echo 'scalacOptions ++= Seq("-Xmax-classfile-name", "200")' >> build.sbt && \
24 | ./sbt clean dist && \
25 | unzip -d / ./target/universal/kafka-manager-${KM_VERSION}.zip && \
26 | rm -fr /tmp/* /root/.sbt /root/.ivy2 && \
27 | chmod +x /kafka-manager-${KM_VERSION}/start-kafka-manager.sh && \
28 | yum autoremove -y java-1.8.0-openjdk-devel git wget unzip which && \
29 | yum clean all
30 |
31 | WORKDIR /kafka-manager-${KM_VERSION}
32 |
33 | EXPOSE 9000
34 | ENTRYPOINT ["./start-kafka-manager.sh"]
35 |
--------------------------------------------------------------------------------
/01-installation-scripts/01-MySQL/02-mysql GTID主从复制.md:
--------------------------------------------------------------------------------
1 | 由于配置简单,但如果要由脚本实现,代码量太大,且配置项较多,故以文本内容进行说明
2 | # mysql GTID主从配置操作
3 | ## 1、mysql主服务器/etc/my.cnf修改,新增内容
4 | ```shell
5 | cat >> /etc/my.cnf <> /etc/my.cnf < /dev/null
40 | if [ $? -ne 0 ];then
41 | echo_error "未找到容器:$1"
42 | exit 2
43 | fi
44 |
45 | docker ps -a | grep $1 | grep "Exited" &> /dev/null
46 | if [ $? -eq 0 ];then
47 | echo_error "容器 $1 已退出"
48 | exit 2
49 | fi
50 |
51 | # 检查nsenter命令
52 | command -v nsenter &> /dev/null
53 | if [ $? -ne 0 ];then
54 | echo_info 安装nsenter
55 | yum install -y util-linux
56 | fi
57 |
58 | container_pid=$(docker inspect -f {{.State.Pid}} $1)
59 | nsenter -n -t $container_pid ip -4 addr show | grep inet | grep -v 127.0.0.1 | awk '{print $2}' | cut -d/ -f1
60 |
--------------------------------------------------------------------------------
/01-installation-scripts/05-Jenkins/01-install-jenkins-bydocker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | jk_home=$(pwd)/jenkins
4 |
5 | # 带格式的echo函数
6 | function echo_info() {
7 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
8 | }
9 | function echo_warning() {
10 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
11 | }
12 | function echo_error() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
14 | }
15 |
16 |
17 | echo_info 设置timezone
18 | echo "Asia/Shanghai" > /etc/timezone
19 | script_dir=${jk_home}/scripts
20 | jenkins_out_home=${jk_home}/data
21 | [ -d $script_dir ] || mkdir -p $script_dir
22 | [ -d $jenkins_out_home ] || mkdir -p $jenkins_out_home
23 |
24 | echo_info 通过docker启动jenkins中文社区版
25 | # 这个是中文社区的镜像,官方镜是 jenkins/jenkins
26 | docker run -u root --name=jenkins --restart=always -d --network=host \
27 | -v ${jenkins_out_home}:/var/jenkins_home \
28 | -v ${script_dir}:/data/script \
29 | -v /var/run/docker.sock:/var/run/docker.sock \
30 | -v /etc/localtime:/etc/localtime \
31 | -v /etc/timezone:/etc/timezone \
32 | jenkinszh/jenkins-zh:2.267
33 |
34 | if [ $? -ne 0 ];then
35 | exit 1
36 | fi
37 | echo_info jenkins已启动成功,以下是相关信息:
38 | echo -e "\033[37m 端口:8080\033[0m"
39 | echo -e "\033[37m 自定义脚本目录:${script_dir} ,该目录需要到jenkins上配置,这个目录做成了数据卷挂载,在宿主机和容器中均可访问\033[0m"
40 | echo -e "\033[37m jenkins数据目录:${jenkins_out_home} ,不要删除该目录,这样重新运行该脚本,新生成的jenkins容器就可以获取之前的配置和数据\033[0m"
41 |
--------------------------------------------------------------------------------
/06-Antivirus-tools/01-kill-miner-proc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # 杀死各已知挖矿进程,单一般挖矿进程都有守护进程、母体这些,
3 | # 需要手动查找清理,本脚本或许可以提供一些思路
4 | ps auxf | grep -v grep | grep kinsing | awk '{print $2}' | xargs kill -9
5 | ps auxf | grep -v grep | grep kdevtmpfsi | awk '{print $2}' | xargs kill -9
6 | ps auxf | grep -v grep | grep "mine.moneropool.com" awk '{print $2}' | xargs kill -9
7 | ps auxf | grep -v grep | grep "pool.t00ls.ru" | awk '{print $2}' | xargs kill -9
8 | ps auxf | grep -v grep | grep "xmr.crypto-pool.fr" | awk '{print $2}' | xargs kill -9
9 | ps auxf | grep -v grep | grep "zhuabcn@yahoo.com" | awk '{print $2}' | xargs kill -9
10 | ps auxf | grep -v grep | grep "monerohash.com" | awk '{print $2}' | xargs kill -9
11 | ps auxf | grep -v grep | grep "/tmp/a7b104c270" | awk '{print $2}' | xargs kill -9
12 | ps auxf | grep -v grep | grep "stratum.f2pool.com" | awk '{print $2}' | xargs kill -9
13 | ps auxf | grep -v grep | grep "xmrpool.eu" | awk '{print $2}' | xargs kill -9
14 | ps auxf | grep -v grep | grep "minexmr.com" | awk '{print $2}' | xargs kill -9
15 | ps auxf | grep -v grep | grep "xiaoyao" | awk '{print $2}' | xargs kill -9
16 | ps auxf | grep -v grep | grep "xiaoxue" | awk '{print $2}' | xargs kill -9
17 | ps auxf | grep var | grep lib | grep jenkins | grep -v httpPort | grep -v headless | grep "\-c" xargs kill -9
18 | ps auxf | grep -v grep | grep "redis2" | awk '{print $2}' | xargs kill -9
19 | pkill -f biosetjenkins
20 | pkill -f Loopback
21 | pkill -f apaceha
22 | pkill -f cryptonight
23 | pkill -f stratum
24 | pkill -f performedl
25 | pkill -f JnKihGjn
26 | pkill -f irqba2anc1
27 | pkill -f irqba5xnc1
--------------------------------------------------------------------------------
/04-disk-tools/01-Create-Swap/01-create-swap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 单位只能为G
4 | SWAP_SIZE=2G
5 | SWAP_FILE_NAME=swap
6 | SWAP_DIR=$(pwd)/${SWAP_FILE_NAME}
7 | # 修复在根目录下创建swap文件时,有两个/的问题
8 | if [[ $(pwd) == "/" ]];then
9 | SWAP_DIR=/${SWAP_FILE_NAME}
10 | fi
11 |
12 | # 带格式的echo函数
13 | function echo_info() {
14 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
15 | }
16 | function echo_warning() {
17 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
18 | }
19 | function echo_error() {
20 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
21 | }
22 |
23 | if [[ ! ${SWAP_SIZE} =~ G$ ]];then
24 | echo_error swap单位设置错误
25 | exit 1
26 | fi
27 |
28 | echo_info 请确认信息,如需调整请修改脚本
29 | echo "swap大小:${SWAP_SIZE}"
30 | echo "swap文件路径:${SWAP_DIR}"
31 | echo_info "确认(y|N):"
32 | read USER_INPUT
33 | case ${USER_INPUT} in
34 | y|Y|yes)
35 | echo
36 | ;;
37 | *)
38 | exit 2
39 | ;;
40 | esac
41 |
42 | echo_info 设置swap亲和度为30
43 | sysctl -w vm.swappiness=30
44 | sed -i 's/vm.swappiness = 0/vm.swappiness = 30/g' /etc/sysctl.conf
45 |
46 | echo_info "swap文件${SWAP_DIR}(${SWAP_SIZE})创建中,请耐心等待..."
47 |
48 | SWAP_SIZE=$(echo ${SWAP_SIZE} | cut -d"G" -f 1)
49 | DD_BS=$((512*${SWAP_SIZE}))
50 | dd if=/dev/zero of=${SWAP_DIR} bs=${DD_BS} count=2097152
51 |
52 | mkswap ${SWAP_DIR}
53 | chmod 0600 ${SWAP_DIR}
54 |
55 | echo_info 挂载swap
56 | swapon ${SWAP_DIR}
57 | echo -e "${SWAP_DIR}\t\tswap\t\t\tswap\tdefaults\t0 0" >> /etc/fstab
58 |
59 | echo_info swap已创建成功
60 | free -h
--------------------------------------------------------------------------------
/01-installation-scripts/06-Gitlab/01-install-gitlab-bydocker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 带格式的echo函数
4 | function echo_info() {
5 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
6 | }
7 | function echo_warning() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
9 | }
10 | function echo_error() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
12 | }
13 |
14 | export GITLAB_HOME=/data/gitlab-ee-data
15 | [ -d $GITLAB_HOME/data ] || mkdir -p $GITLAB_HOME/data
16 | [ -d $GITLAB_HOME/logs ] || mkdir -p $GITLAB_HOME/logs
17 | [ -d $GITLAB_HOME/config ] || mkdir -p $GITLAB_HOME/config
18 |
19 | echo_info 设置timezone
20 | echo "Asia/Shanghai" > /etc/timezone
21 | git_ip=$(ip a|grep inet|grep -v 127.0.0.1|grep -v inet6 | awk '{print $2}' | tr -d "addr:" | sed -n '1p' | awk -F "/" '{print$1}')
22 |
23 | echo_info 通过docker启动gitlab
24 | docker run --detach \
25 | --hostname $git_ip \
26 | -p 443:443 -p 80:80\
27 | --name gitlab-ee \
28 | --restart always \
29 | -v $GITLAB_HOME/config:/etc/gitlab \
30 | -v $GITLAB_HOME/logs:/var/log/gitlab \
31 | -v $GITLAB_HOME/data:/var/opt/gitlab \
32 | -v /etc/localtime:/etc/localtime \
33 | -v /etc/timezone:/etc/timezone \
34 | gitlab/gitlab-ee:latest
35 |
36 | if [ $? -ne 0 ];then
37 | exit 1
38 | fi
39 |
40 | echo_info gitlab已启动成功,以下是相关信息:
41 | echo -e "\033[37m gitlab访问地址:http://${git_ip}/\033[0m"
42 | echo -e "\033[37m gitlab数据目录:$(dirname ${GITLAB_HOME})/ ,不要删除该目录,这样重新运行该脚本,新生成的gitlab容器就可以获取之前的配置和数据\033[0m"
--------------------------------------------------------------------------------
/03-Dockerfile/04-rocksdb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:centos7
2 |
3 | USER root
4 |
5 | ADD go1.17.linux-amd64.tar.gz /usr/local/
6 | ADD cmake-3.6.0-Linux-x86_64.tar.gz /data
7 | ADD gflags-2.2.2.tar.gz /data
8 | ADD rocksdb-6.4.6.tar.gz /data
9 | ADD zstd-1.1.3.tar.gz /data
10 |
11 | RUN yum install -y gcc gcc-c++ lrzsz git lz4-devel snappy snappy-devel zlib zlib-devel bzip2 bzip2-devel lz4 lz4-devel && \
12 | cd /data && \
13 | yum remove cmake && \
14 | echo "export PATH=\$PATH:/data/cmake-3.6.0-Linux-x86_64/bin" >> /etc/profile && \
15 | source /etc/profile && \
16 | echo "export GOROOT=/usr/local/go" >> /etc/profile && \
17 | echo "export PATH=\$PATH:/usr/local/go/bin" >> /etc/profile && \
18 | source /etc/profile && \
19 | cd /data/gflags-2.2.2 && mkdir build && cd build && \
20 | cmake -DBUILD_SHARED_LIBS=ON -DBUILD_STATICaaa_LIBS=ON -DINSTALL_HEADERS=ON -DINSTALL_SHARED_LIBS=ON -DINSTALL_STATIC_LIBS=ON .. && \
21 | make -j 5 && make install && \
22 | echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/lib" >> /etc/profile && \
23 | cd /data/zstd-1.1.3 && \
24 | make -j 5 && make install && \
25 | cd /data/rocksdb-6.4.6 && \
26 | make -j 5 static_lib && \
27 | make -j 5 shared_lib && \
28 | cp librocksdb.so.6.4.6 /usr/local/lib && \
29 | ln -s librocksdb.so.6.4.6 /usr/local/lib/librocksdb.so.6.4 && \
30 | ln -s librocksdb.so.6.4.6 /usr/local/lib/librocksdb.so.6 && \
31 | ln -s librocksdb.so.6.4.6 /usr/local/lib/librocksdb.so && \
32 | echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/lib" >> /etc/profile && \
33 | source /etc/profile
34 |
35 | CMD tail -f /etc/hosts
--------------------------------------------------------------------------------
/01-installation-scripts/07-Nginx-Tengine/README.md:
--------------------------------------------------------------------------------
1 | # 一、文件说明
2 | ## 01-install-nginx-series.sh
3 | `nginx`、`tengine` 的安装脚本
4 | ## 02-geoip2-helper.sh
5 | Geoip2相关工具部署和下载。主要包含:
6 | - libmaxminddb安装
7 | - GeoLite.mmdb下载
8 | - ngx_http_geoip2_module模块下载
9 |
10 |
11 |
12 |
13 | # 二、Nginx热升级、手动日志切割
14 |
15 | ## 1、热升级
16 | 首先运行老的 `nginx`,`ps` 命令查看得到 `master` 进程的 `pid`,假设 `pid` 是 `999`
17 | ### 1.1、备份老的 `nginx` 二进制文件(在 `nginx` 运行时可以执行)
18 | ``` shell
19 | cp nginx nginx.bak
20 | ```
21 | ### 1.2、将新版本的 `nginx` 二进制文件拷贝至 `sbin` 目录
22 | ```shell
23 | cp -f ../../nginx ./
24 | ```
25 | ### 1.3、向老 `nginx` 的 `master` 进程发送 `USR2` 信号,表示要进行热部署了;`USR2` 是用户自定义信号
26 | ```shell
27 | kill -USR2 999
28 | ```
29 | `nginx master` 进程会使用新复制过来的 `nginx` 二进制文件启一个新的 `nginx master` 进程。
30 | 此时,新老 `master` 进程、`worker` 进程都在运行。不过,老的 `worker` 进程已经不再监听端口了,新的请求全部进入新的 `worker` 进程。
31 | 新 `master` 进程的父进程是老 `master` 进程。
32 | ### 1.4、 向老的 `master` 进程发送 `WINCH` 命令,告诉 `master` 优雅的关闭它的 `worker` 进程
33 | ```shell
34 | kill -WINCH 999
35 | ```
36 | 此时老的 `worker` 进程会全部退出,而老的 `master` 进程不会自动退出,为了防止版本回退。
37 | #### 1.5.1、回滚旧版本(如果需要),向旧 `Nginx` 主进程发送 `HUP` 信号,它会重新启动 `worker` 进程。同时对新 `master` 进程发送 `QUIT` 信号
38 | ```shell
39 | kill -HUP 999
40 | # 发送SIGHUP信号等效于 nginx -s reload (PID 999的那个nginx)
41 | kill -QUIT newPID
42 | ```
43 | #### 1.5.2、确定新版本没有问题,直接杀死老 `master` 进程即可,新 `master` 进程的父进程会变成 `PID` 1
44 | ```shell
45 | kill -9 999
46 | ```
47 | # 2、手动日志切割
48 | 清空日志重新写入。
49 | ## 2.1、先备份之前的日志
50 | ```shell
51 | cp access.log access.log.bak
52 | ```
53 | ## 2.2、向nginx发送reopen命令
54 | ```shell
55 | nginx -s reopen
56 | ```
57 | `access.log` 就会重新清空并开始写入日志。
58 | 使用 `USR1` 命令也是同样的功能。
59 | ```shell
60 | kill -USR1 999
61 | ```
--------------------------------------------------------------------------------
/04-disk-tools/02-Create-LVM/01-create-lvm-by-fdisk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # only use in centos7
3 | partition=/data # 定义最终挂载的名称
4 | vgname=vgdata # 定义逻辑卷组的名称
5 | lvmname=lvmdata # 定义逻辑卷的名称
6 | code='vdb' # 根据分区的实际情况修改
7 |
8 | # 带格式的echo函数
9 | function echo_info() {
10 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
11 | }
12 | function echo_warning() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
14 | }
15 | function echo_error() {
16 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
17 | }
18 |
19 | echo_info 检测lvm2
20 | yum install -y lvm2
21 |
22 | if [ -d $partition ];then
23 | echo_error ${partition}目录已存在,退出
24 | exit 1
25 | fi
26 |
27 | disk=
28 | for i in $code
29 | do
30 | lsblk | grep $i | grep disk &> /dev/null
31 | if [ $? -ne 0 ];then
32 | echo_error 未发现硬盘/dev/$i,退出
33 | exit 2
34 | fi
35 | # 这里自动化完成了所有分区fdisk苦逼的交互步骤
36 | fdisk /dev/$i << EOF
37 | n
38 | p
39 | 1
40 |
41 |
42 | t
43 | 8e
44 | w
45 | EOF
46 | disk="$disk /dev/${i}1" # 将所有分区拼起来
47 | done
48 |
49 | echo_info 即将使用这些磁盘创建lvm:$disk
50 |
51 | echo_info 创建pv
52 | pvcreate $disk
53 | echo_info 创建vg
54 | vgcreate $vgname $disk
55 | lvcreate -l 100%VG -n $lvmname $vgname
56 | echo_info 创建lv
57 |
58 | echo_info 创建xfs文件系统
59 | mkfs.xfs /dev/$vgname/$lvmname
60 | if [[ $? == 0 ]]
61 | then
62 | mkdir -p $partition
63 | echo_info 更新/etc/fstab
64 | echo "/dev/$vgname/$lvmname $partition xfs defaults 0 0" >> /etc/fstab
65 | mount -a
66 | echo_info lvm创建成功\
67 | echo_info lvm文件系统磁盘空间使用情况:
68 | df -h
69 | else
70 | echo_error lvm创建失败!
71 | exit 1
72 | fi
73 |
--------------------------------------------------------------------------------
/01-installation-scripts/29-code-push-server/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # code-push-server默认账号密码admin/123456
2 | version: "3.7"
3 | services:
4 | server:
5 | image: tablee/code-push-server:v0.5.2
6 | volumes:
7 | - ./data-storage:/data/storage
8 | - ./data-tmp:/data/tmp
9 | - ./config.js:/config.js
10 | environment:
11 | DOWNLOAD_URL: "http://YOU_MACHINE_IP:3000/download"
12 | MYSQL_HOST: "YOU_MACHINE_IP"
13 | MYSQL_PORT: "3308"
14 | MYSQL_USERNAME: "codepush"
15 | MYSQL_PASSWORD: "123456"
16 | MYSQL_DATABASE: "codepush"
17 | STORAGE_DIR: "/data/storage"
18 | DATA_DIR: "/data/tmp"
19 | NODE_ENV: "production"
20 | CONFIG_FILE: "/config.js"
21 | REDIS_HOST: "YOU_MACHINE_IP"
22 | REDIS_PORT: "6388"
23 | deploy:
24 | resources:
25 | limits:
26 | cpus: "2"
27 | memory: 1000M
28 | restart_policy:
29 | condition: on-failure
30 | ports:
31 | - "3000:3000"
32 | networks:
33 | - servernet
34 | depends_on:
35 | - db
36 | - redis
37 | db:
38 | image: mysql:5.7.23
39 | volumes:
40 | - data-mysql:/var/lib/mysql
41 | - ./sql/codepush-all.sql:/docker-entrypoint-initdb.d/codepush-all.sql
42 | ports:
43 | - "3308:3306"
44 | environment:
45 | MYSQL_ALLOW_EMPTY_PASSWORD: "On"
46 | networks:
47 | - dbnet
48 | redis:
49 | image: redis:4.0.11-alpine
50 | volumes:
51 | - data-redis:/data
52 | ports:
53 | - "6388:6379"
54 | networks:
55 | - redisnet
56 | networks:
57 | servernet:
58 | dbnet:
59 | redisnet:
60 | volumes:
61 | data-storage:
62 | data-tmp:
63 | data-mysql:
64 | data-redis:
65 |
--------------------------------------------------------------------------------
/01-installation-scripts/02-Zabbix/01-by-ansible/hosts:
--------------------------------------------------------------------------------
1 | # This is the default ansible 'hosts' file.
2 | #
3 | # It should live in /etc/ansible/hosts
4 | #
5 | # - Comments begin with the '#' character
6 | # - Blank lines are ignored
7 | # - Groups of hosts are delimited by [header] elements
8 | # - You can enter hostnames or ip addresses
9 | # - A hostname/ip can be a member of multiple groups
10 |
11 | # Ex 1: Ungrouped hosts, specify before any group headers.
12 |
13 | #----------------------------------------
14 | #| The server names are PINYIN of ECSs |
15 | #----------------------------------------
16 | ## green.example.com
17 | ## blue.example.com
18 | ## 192.168.100.1
19 | ## 192.168.100.10
20 |
21 | # Ex 2: A collection of hosts belonging to the 'webservers' group
22 |
23 | ## [webservers]
24 | ## alpha.example.org
25 | ## beta.example.org
26 | ## 192.168.1.100
27 | ## 192.168.1.110
28 |
29 | # If you have multiple hosts following a pattern you can specify
30 | # them like this:
31 |
32 | ## www[001:006].example.com
33 |
34 | # Ex 3: A collection of database servers in the 'dbservers' group
35 |
36 | ## [dbservers]
37 | ##
38 | ## db01.intranet.mydomain.net
39 | ## db02.intranet.mydomain.net
40 | ## 10.25.1.56
41 | ## 10.25.1.57
42 |
43 | # Here's another example of host ranges, this time there are no
44 | # leading 0s:
45 |
46 | ## db-[99:101]-node.example.com
47 |
48 |
49 | #本地连接需要专门添加
50 | [local]
51 | 127.0.0.1 ansible_connection=local
52 | #示例而已,ip、账号、密码都是假的
53 | [shouhuoxianshangfuwuqi]
54 | 47.100.49.111 ansible_ssh_user=root ansible_ssh_pass=OneD@y#001
55 | #做了免密登录可以不用添加账号密码
56 | [qianduankaifahuanjing]
57 | 47.100.49.112
58 |
59 | #设置组
60 | [backend]
61 | 47.100.49.111
62 | 47.100.49.112
63 |
64 | [test]
65 | 192.168.1.81
66 |
--------------------------------------------------------------------------------
/04-disk-tools/02-Create-LVM/03-create-lvm-by-gdisk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # only use in centos7
3 | partition=/data # 定义最终挂载的名称
4 | vgname=vgdata # 定义逻辑卷组的名称
5 | lvmname=lvmdata # 定义逻辑卷的名称
6 | code='vdb' # 根据分区的实际情况修改
7 |
8 | # 带格式的echo函数
9 | function echo_info() {
10 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
11 | }
12 | function echo_warning() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
14 | }
15 | function echo_error() {
16 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
17 | }
18 |
19 | echo_info 检测lvm2
20 | yum install -y lvm2
21 |
22 | echo_info 检测gdisk
23 | yum install -y gdisk
24 |
25 | if [ -d $partition ];then
26 | echo_error ${partition}目录已存在,退出
27 | exit 1
28 | fi
29 |
30 | disk=
31 | for i in $code
32 | do
33 | lsblk | grep $i | grep disk &> /dev/null
34 | if [ $? -ne 0 ];then
35 | echo_error 未发现硬盘/dev/$i,退出
36 | exit 2
37 | fi
38 | # 这里自动化完成了所有分区fdisk苦逼的交互步骤
39 | gdisk /dev/$i <<"_EOF_"
40 | n
41 |
42 |
43 |
44 |
45 |
46 | w
47 | Y
48 | _EOF_
49 |
50 | disk="$disk /dev/${i}1" # 将所有分区拼起来
51 | done
52 |
53 | echo_info 即将使用这些磁盘创建lvm:$disk
54 |
55 | echo_info 创建pv
56 | pvcreate $disk
57 | echo_info 创建vg
58 | vgcreate $vgname $disk
59 | lvcreate -l 100%VG -n $lvmname $vgname
60 | echo_info 创建lv
61 |
62 | echo_info 创建xfs文件系统
63 | mkfs.xfs /dev/$vgname/$lvmname
64 | if [[ $? == 0 ]]
65 | then
66 | mkdir -p $partition
67 | echo_info 更新/etc/fstab
68 | echo "/dev/$vgname/$lvmname $partition xfs defaults 0 0" >> /etc/fstab
69 | mount -a
70 | echo_info lvm创建成功\
71 | echo_info lvm文件系统磁盘空间使用情况:
72 | df -h
73 | else
74 | echo_error lvm创建失败!
75 | exit 1
76 | fi
77 |
--------------------------------------------------------------------------------
/04-disk-tools/02-Create-LVM/02-create-lvm-by-parted.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # only use in centos7
3 | partition=/data # 定义最终挂载的名称
4 | vgname=vgdata # 定义逻辑卷组的名称
5 | lvmname=lvmdata # 定义逻辑卷的名称
6 | code='vdb' # 根据分区的实际情况修改
7 |
8 | # 带格式的echo函数
9 | function echo_info() {
10 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
11 | }
12 | function echo_warning() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
14 | }
15 | function echo_error() {
16 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
17 | }
18 |
19 | echo_info 检测lvm2
20 | yum install -y lvm2
21 |
22 | if [ -d $partition ];then
23 | echo_error ${partition}目录已存在,退出
24 | exit 1
25 | fi
26 |
27 | disk=
28 | for i in $code
29 | do
30 | lsblk | grep $i | grep disk &> /dev/null
31 | if [ $? -ne 0 ];then
32 | echo_error 未发现硬盘/dev/$i,退出
33 | exit 2
34 | fi
35 | # 这里自动化完成了所有分区fdisk苦逼的交互步骤
36 | parted -s /dev/$i mklabel gpt
37 | parted /dev/$i << EOF
38 | p
39 | mkpart
40 | ${code}1
41 | xfs
42 | 0%
43 | 100%
44 | toggle 1 lvm
45 | p
46 | q
47 | EOF
48 | disk="$disk /dev/${i}1" # 将所有分区拼起来
49 | done
50 |
51 | echo_info 即将使用这些磁盘创建lvm:$disk
52 |
53 | echo_info 创建pv
54 | pvcreate $disk &> /dev/null
55 | echo_info 创建vg
56 | vgcreate $vgname $disk &> /dev/null
57 | echo_info 创建lv
58 | lvcreate -l 100%VG -n $lvmname $vgname &> /dev/null
59 |
60 | echo_info 创建xfs文件系统
61 | mkfs.xfs /dev/$vgname/$lvmname &> /dev/null
62 | if [[ $? == 0 ]]
63 | then
64 | mkdir -p $partition
65 | echo_info 更新/etc/fstab
66 | echo "/dev/$vgname/$lvmname $partition xfs defaults 0 0" >> /etc/fstab
67 | mount -a
68 | echo_info lvm创建成功
69 | echo_info lvm文件系统磁盘空间使用情况:
70 | df -h
71 | else
72 | echo_error lvm创建失败!
73 | exit 1
74 | fi
75 |
--------------------------------------------------------------------------------
/02-elasticsearch-tools/01-clean-single-es-index-by-date.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # 针对es索引是单个的情况(相较于xx.log-2019.09.28这种格式来说),根据日期删除es索引
4 | # Author:zhegeshijiehuoyouai
5 | # Date:2019.9.2
6 | #
7 |
8 | # 定义要删除的索引,索引之间用空格隔开
9 | wanna_del_indices="tms-devprod-new-log wms-devprod-new-log tb-devprod-new-log"
10 | # 定义 Elasticsearch 访问路径
11 | http_es=10.0.17.109:9200
12 | # 定义索引保留天数
13 | keep_days=3
14 | # 获取所有索引
15 | all_indices=$(curl -s -XGET "http://${http_es}/_cat/indices" |awk '{print $3}' | uniq | sort)
16 | # 定义关键字颜色
17 | color=32
18 |
19 | echo -e "\033[33m以下是您希望删除的索引:\033[0m"
20 | echo ${wanna_del_indices}
21 | echo -e "\033[33m在Elasticsearch中搜索到以下索引:\033[0m"
22 | echo ${all_indices}
23 | echo
24 |
25 | # 定义删除函数
26 | clean_index(){
27 | cat > this_is_a_temp_file.sh << EOF
28 | curl -s -H'Content-Type:application/json' -d'{
29 | "query": {
30 | "range": {
31 | "@timestamp": {
32 | "lt": "now-${keep_days}d",
33 | "format": "epoch_millis"
34 | }
35 | }
36 | }
37 | }
38 | ' -XPOST "http://${http_es}/$1*/_delete_by_query?pretty"
39 | EOF
40 | sh this_is_a_temp_file.sh
41 | rm -f this_is_a_temp_file.sh
42 | }
43 |
44 | # 删除操作
45 | count=0
46 | for index in ${wanna_del_indices}; do
47 | # 判断es中是否有该索引
48 | echo ${all_indices} | grep -w "${index}" &> /dev/null
49 | if [ $? -ne 0 ]; then
50 | echo -e "没有索引:\033[${color}m${index}\033[0m"
51 | continue
52 | fi
53 |
54 | # 清理索引
55 | echo -e "清理 \033[${color}m${index}\033[0m 中..."
56 | clean_index ${index} &> /dev/null #不显示具体操作
57 | count=$[${count}+1]
58 | echo "第 ${count} 条多余索引清理完毕"
59 | done
60 |
61 | # 如果有清理索引,那么报告一下总情况
62 | if [ ${count} -gt 0 ]; then
63 | echo ""
64 | echo -e "共清理 \033[${color}m${count}\033[0m 条索引\n所有多余所有均清理完毕,均保留了近 \033[${color}m${keep_days}\033[0m 天的索引"
65 | fi
66 |
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/kafka-manager/README.md:
--------------------------------------------------------------------------------
1 | # 说明
2 | 克隆自 [https://github.com/sheepkiller/kafka-manager-docker](https://github.com/sheepkiller/kafka-manager-docker)
3 |
4 | # kafka manager Dockerfile
5 | [kafka manager](https://github.com/yahoo/kafka-manager) is a tool from Yahoo Inc. for managing [Apache Kafka](http://kafka.apache.org).
6 | ## Base Docker Image ##
7 | * [centos:7](https://hub.docker.com/_/centos/)
8 |
9 | ## RoadMap
10 | - ~~1.3.1.6 + 1 : defaulting to openjdk~~
11 | - 1.3.1.6 + 2 : switch to non root user
12 | - 1.3.1.6 + 3 : switch to alpine linux
13 |
14 | ## Howto
15 | ### Quick Start
16 | ```
17 | docker run -it --rm -p 9000:9000 -e ZK_HOSTS="your-zk.domain:2181" -e APPLICATION_SECRET=letmein sheepkiller/kafka-manager
18 | ```
19 | (if you don't define ZK_HOSTS, default value has been set to "localhost:2181")
20 |
21 |
22 | ### Use your own configuration file
23 | Until 1.3.0.4, you were able to override default configuration file via a docker volume to overi:
24 | ```
25 | docker run [...] -v /path/to/confdir:/kafka-manager-${KM_VERSION}/conf [...]
26 | ```
27 | From > 1.3.0.4, you can specify a configuration file via an environment variable.
28 | ```
29 | docker run [...] -v /path/to/confdir:/opt -e KM_CONFIG=/opt/my_shiny.conf sheepkiller/kafka-manager
30 | ```
31 |
32 | ### Pass arguments to kafka-manager
33 | For release <= 1.3.0.4, you can pass options via command/args.
34 | ```
35 | docker run -it --rm -p 9000:9000 -e ZK_HOSTS="your-zk.domain:2181" -e APPLICATION_SECRET=letmein sheepkiller/kafka-manager -Djava.net.preferIPv4Stack=true
36 | ```
37 | For release > 1.3.0.4, you can use env variable `KM_ARGS`.
38 | ```
39 | docker run -it --rm -p 9000:9000 -e ZK_HOSTS="your-zk.domain:2181" -e APPLICATION_SECRET=letmein -e KM_ARGS=-Djava.net.preferIPv4Stack=true sheepkiller/kafka-manager
40 | ```
41 |
42 | ### Specify a revision
43 | If you want to upgrade/downgrade this Dockerfile, edit it and set `KM_VERSION` and `KM_REVISION` to fetch the release from github.
44 |
45 | ## Known issues
46 | - release before 1.3.1.6 may have APPLICATION_SECRET incorrectly set (default value) and kafka-manager will fail to start. Remove /kafka-manager znode from zookeeeper and reconfigure kafka-manager.
47 |
--------------------------------------------------------------------------------
/01-installation-scripts/07-Nginx-Tengine/02-nginx 国内外流量分流.conf.sample:
--------------------------------------------------------------------------------
1 | # ----------------------------------------------------
2 | # 1. HTTP 全局块
3 | # ----------------------------------------------------
4 | http {
5 | # 1.1 引入 GeoIP2 数据库
6 | # auto_reload=5m --> 每 5 分钟自动检测 mmdb 是否有更新
7 | # source=$realip_remote_addr --> 如果用了 real_ip 模块/层叠 CDN,可替换
8 | geoip2 /etc/nginx/geoip2/GeoLite2-Country.mmdb {
9 | auto_reload 5m;
10 | $geoip2_country_code default=ZZ source=$remote_addr; # 取 ISO Alpha-2
11 | # 如需 City 级别信息,再加 country/continent/region 等字段
12 | }
13 |
14 | # 1.2 用 map 把国家码映射成“国内 / 国外”两类 Upstream 名
15 | # - 你可以把 HK/MO/TW 也归为 domestic,或另做第三类
16 | map $geoip2_country_code $target_upstream {
17 | default overseas; # 除国内以外全部走 overseas
18 | CN domestic; # 中国大陆走 domestic
19 | # HK MO TW domestic; # 如果业务需要,可自行打开
20 | }
21 |
22 | # 1.3 配置后端集群
23 | upstream domestic {
24 | server 10.10.10.1:80 max_fails=3 fail_timeout=15s;
25 | server 10.10.10.2:80 max_fails=3 fail_timeout=15s;
26 | }
27 |
28 | upstream overseas {
29 | server 172.16.1.1:80 max_fails=3 fail_timeout=15s;
30 | server 172.16.1.2:80 max_fails=3 fail_timeout=15s;
31 | }
32 |
33 | # ------------------------------------------------
34 | # 2. server 块
35 | # ------------------------------------------------
36 | server {
37 | listen 80;
38 | server_name example.com;
39 |
40 | # (可选)如果用了真实 IP 模块,请务必放在 geoip2 *之前* 设置
41 | # set_real_ip_from 0.0.0.0/0;
42 | # real_ip_header X-Forwarded-For;
43 | # real_ip_recursive on;
44 |
45 | location / {
46 | proxy_pass http://$target_upstream; # 动态 upstream
47 | proxy_set_header Host $host;
48 | proxy_set_header X-Real-IP $remote_addr;
49 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
50 | }
51 |
52 | # 2.1 (可选)输出检测信息
53 | location = /__geo_test__ {
54 | default_type text/plain;
55 | return 200 "Your country: $geoip2_country_code\nUpstream: $target_upstream\n";
56 | }
57 | }
58 | }
--------------------------------------------------------------------------------
/01-installation-scripts/35-consul/k8s-yaml/consul-client.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | name: consul-client
6 | namespace: yournamespace
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: consul
11 | component: client
12 | template:
13 | metadata:
14 | labels:
15 | app: consul
16 | component: client
17 | spec:
18 | imagePullSecrets:
19 | - name: yoursecret # 不需要的话注释掉
20 | containers:
21 | - name: consul-client
22 | image: yourharbor/consul:1.9.16
23 | imagePullPolicy: IfNotPresent
24 | ports:
25 | - containerPort: 8500
26 | name: http
27 | - containerPort: 8600
28 | name: dns-tcp
29 | protocol: TCP
30 | - containerPort: 8600
31 | name: dns-udp
32 | protocol: UDP
33 | - containerPort: 8301
34 | name: serflan
35 | - containerPort: 8302
36 | name: serfwan
37 | - containerPort: 8300
38 | name: server
39 | env:
40 | - name: POD_IP
41 | valueFrom:
42 | fieldRef:
43 | fieldPath: status.podIP
44 | - name: NAMESPACE
45 | valueFrom:
46 | fieldRef:
47 | fieldPath: metadata.namespace
48 | args:
49 | - "agent"
50 | - "-advertise=$(POD_IP)"
51 | - "-bind=0.0.0.0"
52 | - "-client=0.0.0.0"
53 | - "-ui"
54 | - "-datacenter=yourdc"
55 | - "-retry-join=consul-server-0.consul-server-headless.$(NAMESPACE).svc.cluster.local"
56 | - "-retry-join=consul-server-1.consul-server-headless.$(NAMESPACE).svc.cluster.local"
57 | - "-retry-join=consul-server-2.consul-server-headless.$(NAMESPACE).svc.cluster.local"
58 | resources:
59 | limits:
60 | cpu: "60m"
61 | memory: "100Mi"
62 | requests:
63 | cpu: "60m"
64 | memory: "100Mi"
65 | lifecycle:
66 | preStop:
67 | exec:
68 | command:
69 | - /bin/sh
70 | - -c
71 | - consul leave
72 |
--------------------------------------------------------------------------------
/02-elasticsearch-tools/02-clean-date-format-es-index-by-date.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # 针对按天拆分的索引(如test-log-2021-11-28),根据日期删除es索引
4 | #
5 |
6 | # 带格式的echo函数
7 | function echo_info() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
9 | }
10 | function echo_warning() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
12 | }
13 | function echo_error() {
14 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
15 | }
16 |
17 | # 定义 Elasticsearch 访问路径
18 | HTTP_ES=10.0.17.98:9200
19 | # 定义索引保留天数
20 | KEEP_DAYS=3
21 | # 定义要删除的索引,索引之间用空格隔开
22 | curl -w "\n" -s -XGET "http://${HTTP_ES}/_cat/indices" |awk '{print $3}' | grep -Ev "^\." > _all_indices
23 | ES_ALL_NUM_FLAG=$(cat _all_indices | grep -v "^$" | wc -l)
24 | if [ ${ES_ALL_NUM_FLAG} -eq 0 ];then
25 | echo_error ES中没有索引
26 | rm -f _all_indices
27 | exit 1
28 | fi
29 |
30 | for i in $(seq 1 ${KEEP_DAYS});do
31 | dateformat=$(date -d "${i} day ago" +%Y-%m-%d)
32 | sed -i /${dateformat}/d _all_indices
33 | done
34 | WANNA_DEL_INDICES=$(cat _all_indices | grep -Ev "^[[:space:]]*$" | grep -v $(date +%Y-%m-%d))
35 | rm -f _all_indices
36 | ES_WANNA_DEL_NUM_FLAG=$(echo ${WANNA_DEL_INDICES} | grep -v "^$" | wc -l)
37 | if [ ${ES_WANNA_DEL_NUM_FLAG} -eq 0 ];then
38 | echo_error 没有匹配的ES索引
39 | exit 2
40 | fi
41 |
42 | echo_info 在ES中搜索到以下索引:
43 | for i in ${WANNA_DEL_INDICES};do
44 | echo $i
45 | done
46 | echo
47 |
48 | echo_warning 是否要删除上面的索引[Y/n]:
49 | read USER_INPUT
50 | case ${USER_INPUT} in
51 | Y|y|yes)
52 | true
53 | ;;
54 | *)
55 | exit
56 | ;;
57 | esac
58 |
59 | # 定义删除函数
60 | clean_index(){
61 | curl -s -XDELETE http://${HTTP_ES}/$1
62 | if [ $? -ne 0 ];then
63 | echo_error 清理索引 $1 失败!
64 | exit 1
65 | fi
66 | }
67 |
68 | # 删除操作
69 | COUNT=0
70 | for index in ${WANNA_DEL_INDICES}; do
71 | # 清理索引
72 | echo_info 清理 ${index} 中...
73 | clean_index ${index} &>/dev/null # 不显示删除的详细信息
74 | COUNT=$[${COUNT}+1]
75 | done
76 |
77 | # 如果有清理索引,那么报告一下总情况
78 | if [ ${COUNT} -gt 0 ]; then
79 | echo ""
80 | echo "-------------SUMMARY-------------"
81 | echo_info 共清理 ${COUNT} 条多余索引
82 | echo_info 所有多余所有均清理完毕,保留了近 ${KEEP_DAYS} 天的索引
83 | fi
84 |
--------------------------------------------------------------------------------
/05-system-tools/04-tcp-connection-state-counter.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # 统计各个TCP连接状态的个数。用于方便排查系统连接负荷问题。
3 |
4 | # @Function
5 | # show count of tcp connection stat.
6 | #
7 | # @Usage
8 | # $ ./tcp-connection-state-counter
9 | #
10 | # @online-doc https://github.com/oldratlee/useful-scripts/blob/dev-2.x/docs/shell.md#-tcp-connection-state-counter
11 | # @author Jerry Lee (oldratlee at gmail dot com)
12 | # @author @sunuslee (sunuslee at gmail dot com)
13 | set -eEuo pipefail
14 |
15 | # NOTE: DO NOT declare var PROG as readonly, because its value is supplied by subshell.
16 | PROG="$(basename "$0")"
17 | readonly PROG_VERSION='2.4.0-dev'
18 |
19 | ################################################################################
20 | # util functions
21 | ################################################################################
22 |
23 | usage() {
24 | cat < 2 {
63 | ++s[$NF]
64 | }
65 |
66 | END {
67 | # get max length of stat and count
68 | for(v in s) {
69 | stat_len = length(v)
70 | if(stat_len > max_stat_len) max_stat_len = stat_len
71 |
72 | count_len = length(s[v])
73 | if (count_len > max_count_len) max_count_len = count_len
74 | }
75 |
76 | for(v in s) {
77 | printf "%-" max_stat_len "s %" max_count_len "s\n", v, s[v]
78 | }
79 | }' | sort -nr -k2,2
80 |
--------------------------------------------------------------------------------
/04-disk-tools/04-wipe-data-disk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # 功能:清空数据盘,删除所有数据盘的分区和 fstab 中对应的条目
3 |
4 | # 获取所有非vda的物理磁盘
5 | disks=$(lsblk | egrep -v ".da|nvme0" | grep disk | awk '{print $1}')
6 |
7 | echo "检测到以下磁盘将被清空:"
8 | echo "$disks"
9 | echo "警告:此操作将删除这些磁盘上的所有数据!"
10 | echo "请确认是否继续?(y/n)"
11 | read confirm
12 |
13 | if [ "$confirm" != "y" ]; then
14 | echo "操作已取消"
15 | exit 1
16 | fi
17 |
18 | # 卸载所有要处理的磁盘
19 | for disk in $disks; do
20 | mount_points=$(lsblk -n -o MOUNTPOINT /dev/${disk} | grep -v '^$')
21 | if [ ! -z "$mount_points" ]; then
22 | echo "正在卸载 /dev/${disk} 的所有挂载点"
23 | echo "$mount_points" | while read mount_point; do
24 | umount "$mount_point" && echo "已卸载 $mount_point" || echo "卸载 $mount_point 失败"
25 | done
26 | fi
27 | done
28 |
29 | # 备份并清理 fstab
30 | echo "正在备份 /etc/fstab 到 /etc/fstab.backup"
31 | cp /etc/fstab /etc/fstab.backup
32 |
33 | for disk in $disks; do
34 | echo "正在从 /etc/fstab 中移除 /dev/${disk} 的相关条目"
35 | sed -i "\|^/dev/${disk}|d" /etc/fstab
36 | # 获取磁盘 UUID 并从 fstab 中删除
37 | disk_uuids=$(blkid | grep /dev/${disk}.* | awk -F '"' '{print $2}')
38 | for disk_uuid in disk_uuids;do
39 | if [ ! -z "$disk_uuid" ]; then
40 | echo "检测到UUID: $disk_uuid, 正在从 /etc/fstab 中移除相关条目"
41 | sed -i "\|UUID=${disk_uuid}|d" /etc/fstab # 使用 \| 作为分隔符避免UUID中包含 / 导致sed错误
42 | fi
43 | done
44 | done
45 |
46 | # 清除磁盘上的所有数据和分区表
47 | for disk in $disks; do
48 | echo "正在清除磁盘 /dev/${disk} 的所有数据"
49 |
50 | # 确保磁盘所有分区都已卸载
51 | partitions=$(lsblk -n -o NAME /dev/${disk} | grep -v "^${disk}$")
52 | for part in $partitions; do
53 | umount "/dev/${part}" 2>/dev/null
54 | done
55 |
56 | # 使用wipefs清除所有文件系统标识
57 | echo "正在清除文件系统标识..."
58 | wipefs -a "/dev/${disk}" && echo "文件系统标识已清除" || echo "清除文件系统标识失败"
59 |
60 | # 清除分区表开始部分
61 | echo "正在清除磁盘开头数据..."
62 | dd if=/dev/zero of="/dev/${disk}" bs=1M count=1 status=none && echo "磁盘开头数据已清除" || echo "清除磁盘开头数据失败"
63 |
64 | # 清除GPT分区表(如果存在)
65 | echo "正在清除GPT分区表..."
66 | sgdisk -Z "/dev/${disk}" 2>/dev/null && echo "GPT分区表已清除" || echo "清除GPT分区表失败或不存在GPT分区表"
67 |
68 | # 清除MBR分区表
69 | echo "正在清除MBR分区表..."
70 | echo -e "d\nw" | fdisk "/dev/${disk}" 2>/dev/null && echo "MBR分区表已清除" || echo "清除MBR分区表失败或不存在MBR分区表"
71 |
72 | echo "磁盘 /dev/${disk} 已完成清除"
73 | echo "----------------------------------------"
74 | done
75 |
76 | echo "所有磁盘清除完成"
77 | echo "fstab备份文件已保存为 /etc/fstab.backup"
78 | echo "请检查 /etc/fstab 确保配置正确"
79 |
80 |
--------------------------------------------------------------------------------
/09-parse-file/01-yaml/01-yaml.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # shellcheck disable=SC1003
3 |
4 | # Based on https://gist.github.com/pkuczynski/8665367
5 | # 脚本来自:https://github.com/jasperes/bash-yaml/blob/master/script/yaml.sh
6 |
7 | parse_yaml() {
8 | local yaml_file=$1
9 | local prefix=$2
10 | local s
11 | local w
12 | local fs
13 |
14 | s='[[:space:]]*'
15 | w='[a-zA-Z0-9_.-]*'
16 | fs="$(echo @ | tr @ '\034')"
17 |
18 | (
19 | sed -e '/- [^\“]'"[^\']"'.*: /s|\([ ]*\)- \([[:space:]]*\)|\1-\'$'\n'' \1\2|g' |
20 | sed -ne '/^--/s|--||g; s|\"|\\\"|g; s/[[:space:]]*$//g;' \
21 | -e 's/\$/\\\$/g' \
22 | -e "/#.*[\"\']/!s| #.*||g; /^#/s|#.*||g;" \
23 | -e "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
24 | -e "s|^\($s\)\($w\)${s}[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" |
25 | awk -F"$fs" '{
26 | indent = length($1)/2;
27 | if (length($2) == 0) { conj[indent]="+";} else {conj[indent]="";}
28 | vname[indent] = $2;
29 | for (i in vname) {if (i > indent) {delete vname[i]}}
30 | if (length($3) > 0) {
31 | vn=""; for (i=0; i
3 |
4 | # 自签泛域名证书
5 | 此工具用于颁发泛域名证书,方便开发环境调试。
6 |
7 | 请勿用于生产环境,生产环境还是购买正式的证书。
8 | 或者到 [Let's Encrypt](https://letsencrypt.org/) 可以申请到免费证书
9 | (支持多域名和泛域名)。
10 |
11 | ## 优点
12 | 1. 你可以创建任意网站证书,只需导入一次根证书,无需多次导入;
13 | 1. 减少重复又无谓的组织信息输入,创建证书时只需要输入域名;
14 | 1. 泛域名证书可以减少 `nginx` 配置,例如你要模拟 CDN:
15 | 假设你的项目网站是 `example.dev`,CDN 网站设置为 `cdn.example.dev`,
16 | 你只需在 `nginx` 里面配置一个网站,`server_name` 同时填写 `example.dev`
17 | 和 `cdn.example.dev`,它们可以使用同一个 `*.example.dev` 的证书。
18 | 1. 现在你只需要一个证书,就可以搞定所有项目网站!
19 |
20 | 使用 `SAN` 来支持多域名和泛域名:
21 | ```ini
22 | subjectAltName=DNS:*.one.dev,DNS:one.dev,DNS:*.two.dev,DNS:two.dev,DNS:*.three.dev,DNS:three.dev
23 | ```
24 |
25 | ## 系统要求
26 | 1. Linux,openssl
27 | 1. 事先用 `hosts` 或者 `dnsmasq` 解析你本地开发的域名,
28 | 例如把 `example.dev` 指向 `127.0.0.1`
29 |
30 | ## 使用
31 | ```bash
32 | ./gen.cert.sh [] [] [] ...
33 | ```
34 | 把 `` 替换成你的域名,例如 `example.dev`
35 |
36 | 运行的输出像这样:
37 |
38 | 
39 |
40 | 如果有多个项目网站,可以把所有网站都加上去,用空格隔开。
41 |
42 | 生成的证书位于:
43 | ```text
44 | out//.crt
45 | out//.bundle.crt
46 | ```
47 |
48 | 证书有效期是 20 年,你可以修改 `ca.cnf` 来修改这个年限。
49 |
50 | 根证书位于:
51 | `out/root.crt`
52 | 成功之后,把根证书导入到操作系统里面,信任这个证书。
53 |
54 | 根证书的有效期是 20 年,你可以修改 `gen.root.sh` 来修改这个年限。
55 |
56 | 证书私钥位于:
57 | `out/cert.key.pem`
58 |
59 | 其中 `.bundle.crt` 是已经拼接好 CA 的证书,可以添加到 `nginx` 配置里面。
60 | 然后你就可以愉快地用 `https` 来访问你本地的开发网站了。
61 |
62 | ## 清空
63 | 你可以运行 `flush.sh` 来清空所有历史,包括根证书和网站证书。
64 |
65 | ## 配置
66 | 你可以修改 `ca.cnf` 来修改你的证书年限。
67 | ```ini
68 | default_days = 730
69 | ```
70 |
71 | 可以修改 `gen.root.sh` 来自定义你的根证书名称和组织。
72 |
73 | 也可以修改 `gen.cert.sh` 来自定义你的网站证书组织。
74 |
75 | ## 参考 / 致谢
76 | [Vault and self signed SSL certificates](http://dunne.io/vault-and-self-signed-ssl-certificates)
77 |
78 | [利用OpenSSL创建自签名的SSL证书备忘](http://wangye.org/blog/archives/732/)
79 |
80 | [Provide subjectAltName to openssl directly on command line](http://security.stackexchange.com/questions/74345/provide-subjectaltname-to-openssl-directly-on-command-line)
81 |
82 | ## 关于 Let's Encrypt 客户端
83 | 官方客户端 `certbot` [太复杂了](https://github.com/Neilpang/acme.sh/issues/386),推荐使用 [acme.sh](https://github.com/Neilpang/acme.sh/wiki/%E8%AF%B4%E6%98%8E)。
84 |
85 | ## 关于 .dev 域名
86 | [Chrome to force .dev domains to HTTPS via preloaded HSTS](https://ma.ttias.be/chrome-force-dev-domains-https-via-preloaded-hsts/) ([2017-9-16](https://chromium-review.googlesource.com/c/chromium/src/+/669923))
87 |
88 | ## 关于 Chrome 信任证书问题
89 | 看到有人反映 Chrome 下无法信任证书,可参考 [这个文档](docs/chrome-trust.md)
90 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/gen.cert.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ -z "$1" ]
4 | then
5 | echo
6 | echo 'Issue a wildcard SSL certificate with Fishdrowned ROOT CA'
7 | echo
8 | echo 'Usage: ./gen.cert.sh [] [] [] ...'
9 | echo ' The domain name of your site, like "example.dev",'
10 | echo ' you will get a certificate for *.example.dev'
11 | echo ' Multiple domains are acceptable'
12 | exit;
13 | fi
14 |
15 | SAN=""
16 | for var in "$@"
17 | do
18 | SAN+="DNS:*.${var},DNS:${var},"
19 | done
20 | SAN=${SAN:0:${#SAN}-1}
21 |
22 | # Move to root directory
23 | cd "$(dirname "${BASH_SOURCE[0]}")"
24 |
25 | # Generate root certificate if not exists
26 | if [ ! -f "out/root.crt" ]; then
27 | bash gen.root.sh
28 | fi
29 |
30 | # Create domain directory
31 | BASE_DIR="out/$1"
32 | TIME=`date +%Y%m%d-%H%M`
33 | DIR="${BASE_DIR}/${TIME}"
34 | mkdir -p ${DIR}
35 |
36 | # 替换ca.cnf中subjectAltName的值为本机ip
37 | #-------------------------------------------------
38 | function input_machine_ip_fun() {
39 | read input_machine_ip
40 | machine_ip=${input_machine_ip}
41 | if [[ ! $machine_ip =~ ^([0,1]?[0-9]{1,2}|2([0-4][0-9]|5[0-5]))(\.([0,1]?[0-9]{1,2}|2([0-4][0-9]|5[0-5]))){3} ]];then
42 | echo_error 错误的ip格式,退出
43 | exit 7
44 | fi
45 | }
46 | function get_machine_ip() {
47 | machine_ip=$(ip route get 1.1.1.1 | awk '{for(i=1;i<=NF;i++) if($i=="src") print $(i+1)}')
48 | }
49 | #-------------------------------------------------
50 |
51 | sed -i "s/subjectAltName = IP:.*/subjectAltName = IP:${machine_ip}/g" ca.cnf
52 |
53 | # Create CSR
54 | openssl req -new -out "${DIR}/$1.csr.pem" \
55 | -key out/cert.key.pem \
56 | -reqexts SAN \
57 | -config <(cat ca.cnf \
58 | <(printf "[SAN]\nsubjectAltName=${SAN}")) \
59 | -subj "/C=CN/ST=Guangdong/L=Guangzhou/O=Fishdrowned/OU=$1/CN=*.$1"
60 |
61 | # Issue certificate
62 | # openssl ca -batch -config ./ca.cnf -notext -in "${DIR}/$1.csr.pem" -out "${DIR}/$1.cert.pem"
63 | openssl ca -config ./ca.cnf -batch -notext \
64 | -in "${DIR}/$1.csr.pem" \
65 | -out "${DIR}/$1.crt" \
66 | -cert ./out/root.crt \
67 | -keyfile ./out/root.key.pem
68 |
69 | # Chain certificate with CA
70 | cat "${DIR}/$1.crt" ./out/root.crt > "${DIR}/$1.bundle.crt"
71 | ln -snf "./${TIME}/$1.bundle.crt" "${BASE_DIR}/$1.bundle.crt"
72 | ln -snf "./${TIME}/$1.crt" "${BASE_DIR}/$1.crt"
73 | ln -snf "../cert.key.pem" "${BASE_DIR}/$1.key.pem"
74 | ln -snf "../root.crt" "${BASE_DIR}/root.crt"
75 |
76 | # Output certificates
77 | echo
78 | echo "Certificates are located in:"
79 |
80 | LS=$([[ `ls --help | grep '\-\-color'` ]] && echo "ls --color" || echo "ls -G")
81 |
82 | ${LS} -la `pwd`/${BASE_DIR}/*.*
83 |
--------------------------------------------------------------------------------
/01-installation-scripts/43-goenv/01-install-goenv.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 带格式的echo函数
4 | function echo_info() {
5 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
6 | }
7 | function echo_warning() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
9 | }
10 | function echo_error() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
12 | }
13 |
14 | # 包下载目录
15 | src_dir=$(pwd)/00src00
16 | goenv_root=$(pwd)/goenv
17 | # github镜像地址
18 | GITHUB="https://cors.isteed.cc/https://github.com"
19 |
20 | echo_info 前置条件检测
21 |
22 | # 脚本执行用户检测
23 | if [[ $(whoami) != 'root' ]];then
24 | echo_error 请使用root用户执行
25 | exit 99
26 | fi
27 |
28 | if ! command -v git 1>/dev/null 2>&1; then
29 | echo_error 未检测到 git 命令,请先安装 git
30 | exit 1
31 | fi
32 |
33 | if [ -d $goenv_root ];then
34 | echo_error ${goenv_root} 目录已存在,请检查是否重复安装
35 | exit 1
36 | fi
37 |
38 | # 检测操作系统
39 | if grep -qs "ubuntu" /etc/os-release; then
40 | os="ubuntu"
41 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
42 | # 阻止配置更新弹窗
43 | export UCF_FORCE_CONFFOLD=1
44 | # 阻止应用重启弹窗
45 | export NEEDRESTART_SUSPEND=1
46 | elif [[ -e /etc/centos-release ]]; then
47 | os="centos"
48 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
49 | elif [[ -e /etc/rocky-release ]]; then
50 | os="rocky"
51 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
52 | elif [[ -e /etc/almalinux-release ]]; then
53 | os="alma"
54 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
55 | else
56 | echo_error 不支持的操作系统
57 | exit 99
58 | fi
59 |
60 | # 前置函数
61 | failed_checkout() {
62 | echo_error "克隆失败:$1"
63 | exit 2
64 | }
65 |
66 | checkout() {
67 | [ -d "$2" ] || git -c advice.detachedHead=0 clone --branch "$3" --depth 1 "$1" "$2" &> /dev/null || failed_checkout "$1"
68 | echo_info "$1 完成"
69 | }
70 |
71 | echo_info 克隆goenv项目
72 | checkout "${GITHUB}/go-nv/goenv.git" "${goenv_root}" "master"
73 | #mkdir -p ${goenv_root}/{cache,shims,versions}
74 | #chmod o+w ${goenv_root}/{shims,versions}
75 |
76 | echo_info 配置GOPROXY
77 | echo "export GO111MODULE=on" > /etc/profile.d/go.sh
78 | echo "export GOPROXY=https://goproxy.cn" >> /etc/profile.d/go.sh
79 |
80 |
81 | echo_info 添加环境变量到~/.bashrc
82 | cat >> ~/.bashrc << _EOF_
83 | # goenv
84 | export GOENV_ROOT="$goenv_root"
85 | export PATH="\$GOENV_ROOT/bin:\$PATH"
86 | eval "\$(goenv init -)"
87 | _EOF_
88 |
89 | echo
90 | echo_info $(${goenv_root}/bin/goenv --version) 已安装完毕,请重新加载终端以激活goenv命令。
91 | echo_warning 本脚本仅为root用户添加了goenv,若需为其他用户添加,请在该用户\~/.bashrc中添加以下内容
92 | echo
93 | cat << _EOF_
94 | # goenv
95 | export GOENV_ROOT="$goenv_root"
96 | export PATH="\$GOENV_ROOT/bin:\$PATH"
97 | eval "\$(goenv init -)"
98 | _EOF_
99 | echo
--------------------------------------------------------------------------------
/01-installation-scripts/18-sftpgo/README.md:
--------------------------------------------------------------------------------
1 | ## 与LDAP集成
2 | 参考 [https://github.com/sftpgo/sftpgo-plugin-auth](https://github.com/sftpgo/sftpgo-plugin-auth)
3 | ## 数据库替换为mysql的方法
4 | ```
5 | vim /etc/sftpgo/sftpgo.json
6 |
7 | "data_provider": {
8 | "driver": "mysql", ##数据库类型mysql
9 | "name": "SFTP", ##schema名
10 | "host": "10.0.10.201", ##数据库IP地址
11 | "port": 8306, ##数据库端口号
12 | "username": "admin", ##用户名
13 | "password": "Qwer123$", ##密码
14 | "sslmode": 0,
15 | "root_cert": "",
16 | "client_cert": "",
17 | ```
18 |
19 | ## ssh加密套件兼容性
20 | sftpgo默认配置禁用了一些低安全性的加密套件,部分极低版本客户端连接sftpgo出现失败
21 | ```
22 | debug1: kex: server->client aes128-ctr hmac-sha2-256 none
23 | debug1: kex: client->server aes128-ctr hmac-sha2-256 none
24 | Unable to negotiate a key exchange method
25 | ```
26 |
27 | 排查过程中启用verbose日志,发现sftp服务端不支持客户端的加密套件。这种问题多出现在使用`centos/rhel6.x`版本的操作系统,由于很难升级操作系统,所以只能在sftpgo服务端做兼容性改造。
28 | ```
29 | {"level":"debug","time":"2022-11-01T15:35:45.590","sender":"sftpd","message":"failed to accept an incoming connection: ssh: no common algorithm for key exchange; client offered: [diffie-hellman-group-exchange-sha256 diffie-hellman-group-exchange-sha1 diffie-hellman-group14-sha1 diffie-hellman-group1-sha1], server offered: [curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256 ecdh-sha2-nistp384 ecdh-sha2-nistp521 diffie-hellman-group14-sha256 ext-info-s]"}
30 | {"level":"debug","time":"2022-11-01T15:35:45.590","sender":"connection_failed","client_ip":"171.223.103.180","username":"","login_type":"no_auth_tryed","protocol":"SSH","error":"ssh: no common algorithm for key exchange; client offered: [diffie-hellman-group-exchange-sha256 diffie-hellman-group-exchange-sha1 diffie-hellman-group14-sha1 diffie-hellman-group1-sha1], server offered: [curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256 ecdh-sha2-nistp384 ecdh-sha2-nistp521 diffie-hellman-group14-sha256 ext-info-s]"}
31 | ```
32 |
33 | 修改配置文件,重启sftpgo服务
34 | ```
35 | vim /etc/sftpgo/sftpgo.json
36 |
37 | "host_key_algorithms": ["rsa-sha2-512-cert-v01@openssh.com", "rsa-sha2-256-cert-v01@openssh.com", "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", "ecdsa-sha2-nistp256-cert-v01@openssh.com", "ecdsa-sha2-nistp384-cert-v01@openssh.com", "ecdsa-sha2-nistp521-cert-v01@openssh.com", "ssh-ed25519-cert-v01@openssh.com", "ecdsa-sha2-nistp256", "ecdsa-sha2-nistp384", "ecdsa-sha2-nistp521", "rsa-sha2-512", "rsa-sha2-256", "ssh-rsa", "ssh-dss", "ssh-ed25519"],
38 | "kex_algorithms": ["curve25519-sha256", "curve25519-sha256@libssh.org", "ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521", "diffie-hellman-group14-sha256", "diffie-hellman-group16-sha512", "diffie-hellman-group18-sha512", "diffie-hellman-group14-sha1", "diffie-hellman-group1-sha1"],
39 | "ciphers": [],
40 | "macs": ["hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96"],
41 |
42 | systemctl restart sftpgo
43 | ```
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/01-installation-scripts/24-PostgreSQL/04-restore-postgresql.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # 将此脚本和所有增量备份文件(备份文件不要再放在目录里)放在同一目录下执行
3 |
4 | PG_HOME=/data/postgresql-11
5 | BASE_BACK_FILE=base.tar.gz
6 | INCRE_BACK_FILE=pg_wal.tar.gz
7 | SYSTEM_UNIT_FILE=postgresql-11.service
8 | NOW_TIME=$(date +%F_%H_%M_%S)
9 |
10 | # 带格式的echo函数
11 | function echo_info() {
12 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
13 | }
14 | function echo_warning() {
15 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
16 | }
17 | function echo_error() {
18 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
19 | }
20 |
21 | function check_pg_running() {
22 | ps -ef | grep postgresql-11 | grep -v grep &> /dev/null
23 | if [ $? -eq 0 ];then
24 | echo_warning 检测到postgresql运行中,是否停止postgresql,并继续恢复 [ y/n ]
25 | read C_OR_NOT
26 | case ${C_OR_NOT} in
27 | y|Y)
28 | echo_info 停止postgresql
29 | systemctl stop ${SYSTEM_UNIT_FILE}
30 | ;;
31 | n|N)
32 | echo_info 用户中止操作
33 | exit 0
34 | esac
35 | fi
36 | }
37 |
38 | function decompression_pg_backup() {
39 | PG_BASE_BACK_SIZE=$(du -h ${BASE_BACK_FILE} | awk '{print $1}')
40 | echo_info 解压基础备份包(${PG_BASE_BACK_SIZE}),请耐心等待
41 | [ -d data ] || mkdir data
42 | tar xf ${BASE_BACK_FILE} -C data
43 | echo_info 解压增量备份包
44 | tar xf ${INCRE_BACK_FILE}
45 | echo_info 移动增量备份文件至pg_wal目录
46 | \cp -rf 0000* data/pg_wal/
47 | }
48 |
49 | function check_time_format() {
50 | echo_info 请输入要恢复的时间点(${BACK_TIME_MIN}至${BACK_TIME_MAX}之间),输入格式示例 2021-06-29 10:29:06
51 | read RESTORE_TIME
52 | if [[ ! ${RESTORE_TIME} =~ ^2[0-9]{3}-[0-9]{2}-[0-9]{2}\ [0-9]{2}:[0-9]{2}:[0-9]{2} ]];then
53 | echo_error 输入格式错误,请重新输入
54 | check_time_format
55 | fi
56 | }
57 |
58 | function get_restore_time_range() {
59 | cd ${PG_HOME}/data
60 | BACK_TIME_MIN=$(ls -l --full-time -t pg_wal/0* | tail -1 | awk '{print $6" "$7}' | awk -F '.' '{print $1}')
61 | BACK_TIME_MAX=$(ls -l --full-time -t -r pg_wal/0* | tail -1 | awk '{print $6" "$7}' | awk -F '.' '{print $1}')
62 | check_time_format
63 | echo_info 生成recovery.conf
64 | cat > ${PG_HOME}/data/recovery.conf << EOF
65 | restore_command = ''
66 | recovery_target_time = '${RESTORE_TIME}'
67 | EOF
68 | }
69 |
70 | function restore_pg() {
71 | if [ -d ${PG_HOME}/data ];then
72 | echo_info 重命名原data目录:${PG_HOME}/data --\> ${PG_HOME}/data_${NOW_TIME} # \>转义重定向符号
73 | mv ${PG_HOME}/data ${PG_HOME}/data_${NOW_TIME}
74 | fi
75 | echo_info 移动解压data目录至${PG_HOME}/data
76 | mv data ${PG_HOME}/data
77 | chown -R postgres:postgres ${PG_HOME}/data
78 | chmod 700 ${PG_HOME}/data
79 | get_restore_time_range
80 | # 如果有从库配置,注释掉从库,否则主库不能写
81 | sed -i "s/^synchronous_standby_names/# &/g" ${PG_HOME}/data/postgresql.conf
82 |
83 | echo_info 启动postgresql
84 | systemctl start ${SYSTEM_UNIT_FILE}
85 | }
86 |
87 | function main() {
88 | check_pg_running
89 | decompression_pg_backup
90 | restore_pg
91 | }
92 |
93 | main
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/KnowStreaming/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | # 默认账号密码为 admin / admin
2 | version: "2"
3 | services:
4 | # *不要调整knowstreaming-manager服务名称,ui中会用到
5 | knowstreaming-manager:
6 | image: knowstreaming/knowstreaming-manager:latest
7 | container_name: knowstreaming-manager
8 | privileged: true
9 | restart: always
10 | depends_on:
11 | - elasticsearch-single
12 | - knowstreaming-mysql
13 | expose:
14 | - 80
15 | command:
16 | - /bin/sh
17 | - /ks-start.sh
18 | environment:
19 | TZ: Asia/Shanghai
20 | # mysql服务地址
21 | SERVER_MYSQL_ADDRESS: knowstreaming-mysql:3306
22 | # mysql数据库名
23 | SERVER_MYSQL_DB: know_streaming
24 | # mysql用户名
25 | SERVER_MYSQL_USER: root
26 | # mysql用户密码
27 | SERVER_MYSQL_PASSWORD: admin2022_
28 | # es服务地址
29 | SERVER_ES_ADDRESS: elasticsearch-single:9200
30 | # 服务JVM参数
31 | JAVA_OPTS: -Xmx1g -Xms1g
32 | # 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成
33 | # extra_hosts:
34 | # - "hostname:x.x.x.x"
35 | # 服务日志路径
36 | # volumes:
37 | # - /ks/manage/log:/logs
38 | knowstreaming-ui:
39 | image: knowstreaming/knowstreaming-ui:latest
40 | container_name: knowstreaming-ui
41 | restart: always
42 | ports:
43 | - '80:80'
44 | environment:
45 | TZ: Asia/Shanghai
46 | depends_on:
47 | - knowstreaming-manager
48 | # extra_hosts:
49 | # - "hostname:x.x.x.x"
50 | elasticsearch-single:
51 | image: docker.io/library/elasticsearch:7.6.2
52 | container_name: elasticsearch-single
53 | restart: always
54 | expose:
55 | - 9200
56 | - 9300
57 | # ports:
58 | # - '9200:9200'
59 | # - '9300:9300'
60 | environment:
61 | TZ: Asia/Shanghai
62 | # es的JVM参数
63 | ES_JAVA_OPTS: -Xms512m -Xmx512m
64 | # 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file
65 | discovery.type: single-node
66 | # 数据持久化路径
67 | # volumes:
68 | # - /ks/es/data:/usr/share/elasticsearch/data
69 |
70 | # es初始化服务,与manager使用同一镜像
71 | # 首次启动es需初始化模版和索引,后续会自动创建
72 | knowstreaming-init:
73 | image: knowstreaming/knowstreaming-manager:latest
74 | container_name: knowstreaming-init
75 | depends_on:
76 | - elasticsearch-single
77 | command:
78 | - /bin/bash
79 | - /es_template_create.sh
80 | environment:
81 | TZ: Asia/Shanghai
82 | # es服务地址
83 | SERVER_ES_ADDRESS: elasticsearch-single:9200
84 |
85 | knowstreaming-mysql:
86 | image: knowstreaming/knowstreaming-mysql:latest
87 | container_name: knowstreaming-mysql
88 | restart: always
89 | environment:
90 | TZ: Asia/Shanghai
91 | # root 用户密码
92 | MYSQL_ROOT_PASSWORD: admin2022_
93 | # 初始化时创建的数据库名称
94 | MYSQL_DATABASE: know_streaming
95 | # 通配所有host,可以访问远程
96 | MYSQL_ROOT_HOST: '%'
97 | expose:
98 | - 3306
99 | # ports:
100 | # - '3306:3306'
101 | # 数据持久化路径
102 | # volumes:
103 | # - /ks/mysql/data:/data/mysql
--------------------------------------------------------------------------------
/01-installation-scripts/42-sonarqube/README.md:
--------------------------------------------------------------------------------
1 | ## 一、docker-compose 启动及 sonarqube 配置说明
2 | ### 1. docker-compose.yaml 语法
3 | docker-compose.yaml 里没有 `version` 字段,是因为我是使用 `docker compose` 命令启动的,不需要该字段
4 | ### 2. 内核参数
5 | 配置 `/etc/sysctl.conf` 建议配置不低于:
6 | ```bash
7 | # TCP可以排队的最大连接请求数
8 | net.core.somaxconn: 4096
9 | # 单个进程可以拥有的虚拟内存区域的数量
10 | vm.max_map_count: 262184
11 | ```
12 | ### 3. 因权限问题启动报错解决方法
13 | 所有的目录,会在首次启动后自动创建,但可能会因为目录权限的问题报错,解决办法:
14 | ```bash
15 | # 在docker-compose.yaml所在的目录下执行
16 | sudo chown -R 1000:1000 ./sonarqube*
17 | ```
18 | ### 4. 汉化
19 | - 下载汉化jar包
20 | **方式1:**
21 | 运行成功后,浏览器访问 `http://your_ip:9000`,账号密码均为 admin,在应用市场里搜索,路径: `administrator` -> `marketplace` -> 搜索 `chinese`。在跳转到的 Github 项目中下载对应版本的汉化 jar 包。
22 | **方式2:**
23 | 直接访问 Github 下载对应版本的 jar 包:[https://github.com/xuhuisheng/sonar-l10n-zh](https://github.com/xuhuisheng/sonar-l10n-zh)
24 | - 将 jar 包上传到 `./sonarqube_extensions/downloads` (对应容器里的 `/opt/sonarqube/extensions/downloads` 目录)
25 | - 授权
26 | ```bash
27 | sudo chown -R 1000:1000 ./sonarqube_extensions/downloads
28 | ```
29 | - 重启 sonarqube,重启之后即为中文界面
30 | ```bash
31 | docker compose restart sonarqube
32 | ```
33 |
34 | ### 5. 安装 sonarqube-community-branch-plugin 插件
35 | 因为我们部署的 sonarqube 是社区版,代码分支扫描只支持 master,要多分支支持,需要下载这个插件。
36 | - 下载 jar 包
37 | 项目地址:[https://github.com/mc1arke/sonarqube-community-branch-plugin](https://github.com/mc1arke/sonarqube-community-branch-plugin)
38 | 根据 README 选择**对应版本**下载
39 | - 上传 jar 包、授权、重启步骤同汉化的对应步骤,请参考上面的步骤
40 | ### 6. 将sonarqube的配置文件挂载到宿主机
41 | 以下命令在docker-compose.yaml文件同级目录下执行:
42 | ```bash
43 | # 拷贝容器中的配置到宿主机上
44 | mkdir -p sonarqube_conf
45 | docker cp sonarqube:/opt/sonarqube/conf/sonar.properties ./sonarqube_conf/sonar.properties
46 | chown -R 1000:1000 sonarqube_conf
47 | chmod u+w sonarqube_conf/sonar.properties
48 | # 关闭容器,修改docker-compose.yaml
49 | docker compose down
50 | vim docker-compose.yaml
51 | # sonarqube的volumes下新增下面这行内容
52 | - ./sonarqube_conf:/opt/sonarqube/conf
53 | # 启动
54 | docker compose up -d
55 | ```
56 | ## 二、使用容器 sonar-scanner 分析本地项目
57 | ### 1. 克隆项目到本地
58 | ```bash
59 | git clone https://your-gitlab.com/group/demo.git
60 | ```
61 | ### 2. 生成令牌及分析命令
62 | 创建项目,创建令牌,选择构建技术和操作系统后,会生成分析命令,如:
63 | ```bash
64 | sonar-scanner \
65 | -Dsonar.projectKey=group_demo_AZDuRhN-4PcPHzf-y35q \
66 | -Dsonar.sources=. \
67 | -Dsonar.host.url=http://172.16.20.66:9000 \
68 | -Dsonar.login=sqp_b4d36186f5013d50e8508f8f342aa3fc8c179b01
69 | ```
70 | ### 3. 使用容器化的 sonar-scanner 进行分析
71 | 在上面生成命令的步骤下面,会有 sonar-scanner 的使用文档,点击可跳转。9.9 长期支持版本的 sonar 对应的 sonar-scanner 文档地址 [https://docs.sonarsource.com/sonarqube/9.9/analyzing-source-code/scanners/sonarscanner/](https://docs.sonarsource.com/sonarqube/9.9/analyzing-source-code/scanners/sonarscanner/) ,根据提示选择正确的 sonar-scanner 版本。
72 | 扫描命令:
73 | ```bash
74 | docker run \
75 | --rm \
76 | -v "/data/demo:/usr/src" \
77 | sonarsource/sonar-scanner-cli:4.8.1 \
78 | sonar-scanner \
79 | -Dsonar.projectKey=group_demo_AZDuRhN-4PcPHzf-y35q \
80 | -Dsonar.sources=. \
81 | -Dsonar.host.url=http://172.16.20.66:9000 \
82 | -Dsonar.login=sqp_b4d36186f5013d50e8508f8f342aa3fc8c179b01
83 | # -v 选项中的/data/demo是本地项目的目录
84 | # sonarsource/sonar-scanner-cli:4.8.1是镜像:tag
85 | # sonar-scanner开头的这部分就是2步骤生成的命令
86 | ```
87 | 执行完成后,sonar 上可以看到报告。
88 |
--------------------------------------------------------------------------------
/01-installation-scripts/14-zookeeper/03-install-zkui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 带格式的echo函数
4 | function echo_info() {
5 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
6 | }
7 | function echo_warning() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
9 | }
10 | function echo_error() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
12 | }
13 |
14 | # 脚本执行用户检测
15 | if [[ $(whoami) != 'root' ]];then
16 | echo_error 请使用root用户执行
17 | exit 99
18 | fi
19 |
20 | # 检测操作系统
21 | if grep -qs "ubuntu" /etc/os-release; then
22 | os="ubuntu"
23 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
24 | # 阻止配置更新弹窗
25 | export UCF_FORCE_CONFFOLD=1
26 | # 阻止应用重启弹窗
27 | export NEEDRESTART_SUSPEND=1
28 | elif [[ -e /etc/centos-release ]]; then
29 | os="centos"
30 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
31 | elif [[ -e /etc/rocky-release ]]; then
32 | os="rocky"
33 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
34 | elif [[ -e /etc/almalinux-release ]]; then
35 | os="alma"
36 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
37 | else
38 | echo_error 不支持的操作系统
39 | exit 99
40 | fi
41 |
42 | function show_summary() {
43 | echo_info 配置文件 :zkui/config.cfg
44 | echo -e "\033[37m 主要配置项:\033[0m"
45 | echo -e "\033[37m serverPort -- zkui监听的端口\033[0m"
46 | echo -e "\033[37m zkServer -- 管理的zk,zk集群可以用逗号隔开\033[0m"
47 | echo -e "\033[37m userSet -- zkui的用户设置,role可以设置为ADMIN、USER,ADMIN有增删改的权限,USER只可以查看\033[0m"
48 | echo -e "\033[37m 启动命令 :cd zkui; ./zkui.sh start&\033[0m"
49 | echo
50 | }
51 |
52 | echo_info 本项目为服务端应用(B/S),如果需要使用客户端工具,可使用
53 | echo https://github.com/vran-dev/PrettyZoo
54 | echo https://github.com/xin497668869/zookeeper-visualizer
55 | echo
56 |
57 | java -version &> /dev/null
58 | if [ $? -ne 0 ];then
59 | echo_error 未检测到jdk,请先部署jdk
60 | exit 1
61 | fi
62 |
63 | mvn -version &> /dev/null
64 | if [ $? -ne 0 ];then
65 | echo_error 未检测到maven,请先部署maven
66 | exit 2
67 | fi
68 |
69 | git --version &> /dev/null
70 | if [ $? -ne 0 ];then
71 | echo_info 安装git中
72 | if [[ $os == 'centos' ]];then
73 | yum install -y git
74 | elif [[ $os == 'ubuntu' ]];then
75 | apt install -y git
76 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
77 | dnf install -y git
78 | fi
79 | fi
80 |
81 | if [ ! -d zkui ];then
82 | echo_info 从github下载zkui,项目地址:https://github.com/DeemOpen/zkui.git
83 | git clone https://cors.isteed.cc/https://github.com/DeemOpen/zkui.git
84 | else
85 | echo_info 发现zkui目录
86 | fi
87 |
88 | cd zkui
89 |
90 | if [ -f target/zkui-2.0-SNAPSHOT-jar-with-dependencies.jar ];then
91 | echo_info 检测到zkui jar包,可直接启动
92 | show_summary
93 | else
94 | echo_info 打包中
95 | mvn clean install
96 | if [ $? -eq 0 ];then
97 | show_summary
98 | else
99 | echo_error 打包出错,请检查
100 | exit 3
101 | fi
102 | fi
103 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/zhegeshijiehuiyouai/RoadToDevOps/stargazers)
2 | [](https://github.com/zhegeshijiehuiyouai/RoadToDevOps/fork)
3 |
4 | ***本项目最初在 CentOS 7.9 环境下开发***
5 | ***部分脚本适配 ubuntu 20.04 / ubuntu 22.04 / ubuntu 24.04 / AlmaLinux 9.4 / RockyLinux 9.4***
6 |
7 | ## 🔧 脚本用法
8 | - 本项目中的各脚本,请在 `/root/` 目录以外的任意普通目录执行,否则部分脚本无法执行成功,建议 `/data/` 目录(虽然大部分脚本在 `/root/` 目录也能执行成功)。
9 | - 对于需要下载包的脚本,都提供了在线和离线安装的方法。离线安装的话,只需要将脚本和下载包放在同一目录即可。
10 | - 如果在离线环境下部署依赖于 `yum` 等工具,需要在线下载部署的,可使用以下命令将rpm包下载到本地后离线安装。
11 | ```shell
12 | yum install --downloadonly --downloaddir=/data/xxxpackage/ 包名
13 | cd /data/xxxpackage/
14 | rpm -Uvh ./*rpm
15 | ```
16 |
17 |
18 | > 项目致力于实现一键部署各种常见服务,实现常用功能,且具有幂等性(多次执行效果一致)的脚本,如果发现有bug,请提 issues 🙋♂️
19 |
20 | ## 📚 目录结构
21 | ```shell
22 | .
23 | ├── 01-installation-scripts
24 | │ ├── 01-MySQL
25 | │ ├── 02-Zabbix
26 | │ ├── 03-Jumpserver
27 | │ ├── 04-Docker
28 | │ ├── 05-Jenkins
29 | │ ├── 06-Gitlab
30 | │ ├── 07-Nginx-tengine
31 | │ ├── 08-Elasticsearch
32 | │ ├── 09-Redis
33 | │ ├── 10-GoAccess
34 | │ ├── 11-vsftp
35 | │ ├── 12-MongoDB
36 | │ ├── 13-jdk
37 | │ ├── 14-zookeeper
38 | │ ├── 15-maven
39 | │ ├── 16-kafka
40 | │ ├── 17-rabbitmq
41 | │ ├── 18-sftpgo
42 | │ ├── 19-rsyncd
43 | │ ├── 20-nfs
44 | │ ├── 21-tomcat
45 | │ ├── 22-prometheus
46 | │ ├── 23-grafana
47 | │ ├── 24-PostgreSQL
48 | │ ├── 25-RocketMQ
49 | │ ├── 26-Nexus
50 | │ ├── 27-yapi
51 | │ ├── 28-Node.js
52 | │ ├── 29-code-push-server
53 | │ ├── 30-openvpn
54 | │ ├── 31-clickhouse
55 | │ ├── 32-nacos
56 | │ ├── 33-flink
57 | │ ├── 34-apollo
58 | │ ├── 35-consul
59 | │ ├── 36-flexgw
60 | │ ├── 37-wireguard
61 | │ ├── 38-sqlite3
62 | │ ├── 39-git
63 | │ ├── 40-ffmpeg
64 | │ ├── 41-pyenv
65 | │ ├── 42-sonarqube
66 | │ ├── 43-goenv
67 | │ └── 44-shc
68 | ├── 02-elasticsearch-tools
69 | │ ├── 01-clean-single-es-index-by-date.sh
70 | │ └── 02-clean-date-format-es-index-by-date.sh
71 | ├── 03-Dockerfile
72 | │ ├── 01-nacos
73 | │ ├── 02-feely-sys
74 | │ ├── 03-centos
75 | │ ├── 04-rocksdb
76 | │ └── 05-java
77 | ├── 04-disk-tools
78 | │ ├── 01-Create-Swap
79 | │ ├── 02-Create-LVM
80 | │ ├── 03-delete-empty-dir.sh
81 | │ └── 04-wipe-data-disk.sh
82 | ├── 05-system-tools
83 | │ ├── 01-check-package-manager.sh
84 | │ ├── 02-update-openssl-and-openssh.sh
85 | │ ├── 03-init-system.sh
86 | │ ├── 04-tcp-connection-state-counter.sh
87 | │ ├── 05-uq.sh
88 | │ ├── 06-update-kernel.sh
89 | │ ├── 07-show-file-create-time.sh
90 | │ ├── 08-update-gcc.sh
91 | │ ├── 09-update-make.sh
92 | │ └── 10-update-glibc.sh
93 | ├── 06-Antivirus-tools
94 | │ └── 01-kill-miner-proc.sh
95 | ├── 07-java-tools
96 | │ ├── 01-show-busy-java-threads.sh
97 | │ ├── 02-show-duplicate-java-classes.py
98 | │ └── 03-find-in-jars.sh
99 | ├── 08-ssl-tools
100 | │ ├── 01-ssl-gen
101 | │ └── 02-ssl-check
102 | ├── 09-parse-file
103 | │ ├── 01-yaml
104 | │ └── 02-ini
105 | ├── 10-pve-vmware-tools
106 | │ └── 01-pve-to-vmware
107 | ├── 11-k8s-tools
108 | │ └── 01-create-k8s-admin-user.sh
109 | └── README.md
110 |
111 | ```
112 |
--------------------------------------------------------------------------------
/01-installation-scripts/28-Node.js/02-install-nvm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 带格式的echo函数
4 | function echo_info() {
5 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
6 | }
7 | function echo_warning() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
9 | }
10 | function echo_error() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
12 | }
13 |
14 | # 包下载目录
15 | src_dir=$(pwd)/00src00
16 | nvm_root=$(pwd)/nvm
17 | # github镜像地址
18 | GITHUB="https://mirror.ghproxy.com/https://github.com"
19 |
20 | echo_info 前置条件检测
21 |
22 | # 脚本执行用户检测
23 | if [[ $(whoami) != 'root' ]];then
24 | echo_error 请使用root用户执行
25 | exit 99
26 | fi
27 |
28 | if ! command -v git 1>/dev/null 2>&1; then
29 | echo_error 未检测到 git 命令,请先安装 git
30 | exit 1
31 | fi
32 |
33 | if [ -d $nvm_root ];then
34 | echo_error ${nvm_root} 目录已存在,请检查是否重复安装
35 | exit 1
36 | fi
37 |
38 | # 检测操作系统
39 | if grep -qs "ubuntu" /etc/os-release; then
40 | os="ubuntu"
41 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
42 | # 阻止配置更新弹窗
43 | export UCF_FORCE_CONFFOLD=1
44 | # 阻止应用重启弹窗
45 | export NEEDRESTART_SUSPEND=1
46 | elif [[ -e /etc/centos-release ]]; then
47 | os="centos"
48 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
49 | elif [[ -e /etc/rocky-release ]]; then
50 | os="rocky"
51 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
52 | elif [[ -e /etc/almalinux-release ]]; then
53 | os="alma"
54 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
55 | else
56 | echo_error 不支持的操作系统
57 | exit 99
58 | fi
59 |
60 | # 前置函数
61 | failed_checkout() {
62 | echo_error "克隆失败:$1"
63 | exit 2
64 | }
65 |
66 | checkout() {
67 | [ -d "$2" ] || git -c advice.detachedHead=0 clone --branch "$3" --depth 1 "$1" "$2" || failed_checkout "$1"
68 | echo_info "$1 完成"
69 | }
70 |
71 | echo_info 克隆nvm项目
72 | checkout "${GITHUB}/nvm-sh/nvm.git" "${nvm_root}" "master"
73 | mkdir -p ${nvm_root}/versions/node
74 |
75 | echo_info 添加环境变量到~/.bashrc
76 | cat >> ~/.bashrc << _EOF_
77 | # nvm
78 | export NVM_DIR="${nvm_root}"
79 | [ -s "\$NVM_DIR/nvm.sh" ] && \. "\$NVM_DIR/nvm.sh" # This loads nvm
80 | [ -s "\$NVM_DIR/bash_completion" ] && \. "\$NVM_DIR/bash_completion" # This loads nvm bash_completion
81 | _EOF_
82 |
83 | source ~/.bashrc
84 | echo_info nvm($(nvm --version))已安装完毕,请重新加载终端以激活nvm命令。
85 | echo_info nvm升级命令:
86 | echo "cd ${nvm_root} && git pull"
87 | echo
88 | echo_warning 本脚本仅为root用户添加了nvm,若需为其他用户添加,请在该用户\~/.bashrc中添加以下内容
89 | cat << _EOF_
90 | # nvm
91 | export NVM_DIR="${nvm_root}"
92 | [ -s "\$NVM_DIR/nvm.sh" ] && \. "\$NVM_DIR/nvm.sh" # This loads nvm
93 | [ -s "\$NVM_DIR/bash_completion" ] && \. "\$NVM_DIR/bash_completion" # This loads nvm bash_completion
94 | _EOF_
95 | echo
96 | echo_info 如果nvm install x.y.z命令卡住,可以手动下载nodejs安装,以16.20.2为例
97 | cat << _EOF_
98 | export v=16.20.2
99 | cd ${nvm_root}/versions/node
100 | wget https://registry.npmmirror.com/-/binary/node/v\${v}/node-v\${v}-linux-x64.tar.xz
101 | tar xf node-v\${v}-linux-x64.tar.xz && mv node-v\${v}-linux-x64 v\${v} && rm -f node-v\${v}-linux-x64.tar.xz
102 | nvm ls
103 | _EOF_
104 | echo
105 | echo_info npm 设置阿里源命令
106 | echo "npm config set registry https://registry.npmmirror.com"
107 | echo
--------------------------------------------------------------------------------
/01-installation-scripts/29-code-push-server/config.js:
--------------------------------------------------------------------------------
1 | var config = {};
2 | config.development = {
3 | // Config for database, only support mysql.
4 | db: {
5 | username: process.env.MYSQL_USERNAME,
6 | password: process.env.MYSQL_PASSWORD,
7 | database: process.env.MYSQL_DATABASE,
8 | host: process.env.MYSQL_HOST,
9 | port: process.env.MYSQL_PORT || 3306,
10 | dialect: "mysql",
11 | logging: false,
12 | operatorsAliases: false,
13 | },
14 | // Config for local storage when storageType value is "local".
15 | local: {
16 | // Binary files storage dir, Do not use tmpdir and it's public download dir.
17 | storageDir: process.env.STORAGE_DIR,
18 | // Binary files download host address which Code Push Server listen to. the files storage in storageDir.
19 | downloadUrl: process.env.DOWNLOAD_URL,
20 | // public static download spacename.
21 | public: '/download'
22 | },
23 | jwt: {
24 | // Recommended: 63 random alpha-numeric characters
25 | // Generate using: https://www.grc.com/passwords.htm
26 | tokenSecret: 'INSERT_RANDOM_TOKEN_KEY'
27 | },
28 | common: {
29 | /*
30 | * tryLoginTimes is control login error times to avoid force attack.
31 | * if value is 0, no limit for login auth, it may not safe for account. when it's a number, it means you can
32 | * try that times today. but it need config redis server.
33 | */
34 | tryLoginTimes: 4,
35 | // CodePush Web(https://github.com/lisong/code-push-web) login address.
36 | //codePushWebUrl: "http://127.0.0.1:3001/login",
37 | // create patch updates's number. default value is 3
38 | diffNums: 3,
39 | // data dir for caclulate diff files. it's optimization.
40 | dataDir: process.env.DATA_DIR,
41 | // storageType which is your binary package files store. options value is ("local" | "qiniu" | "s3")
42 | storageType: "local",
43 | // options value is (true | false), when it's true, it will cache updateCheck results in redis.
44 | updateCheckCache: false,
45 | // options value is (true | false), when it's true, it will cache rollout results in redis
46 | rolloutClientUniqueIdCache: false,
47 | },
48 | // Config for smtp email,register module need validate user email project source https://github.com/nodemailer/nodemailer
49 | smtpConfig:{
50 | host: "smtp.aliyun.com",
51 | port: 465,
52 | secure: true,
53 | auth: {
54 | user: "",
55 | pass: ""
56 | }
57 | },
58 | // Config for redis (register module, tryLoginTimes module)
59 | redis: {
60 | default: {
61 | host: process.env.REDIS_HOST,
62 | port: process.env.REDIS_PORT || 6379,
63 | retry_strategy: function (options) {
64 | if (options.error.code === 'ECONNREFUSED') {
65 | // End reconnecting on a specific error and flush all commands with a individual error
66 | return new Error('The server refused the connection');
67 | }
68 | if (options.total_retry_time > 1000 * 60 * 60) {
69 | // End reconnecting after a specific timeout and flush all commands with a individual error
70 | return new Error('Retry time exhausted');
71 | }
72 | if (options.times_connected > 10) {
73 | // End reconnecting with built in error
74 | return undefined;
75 | }
76 | // reconnect after
77 | return Math.max(options.attempt * 100, 3000);
78 | }
79 | }
80 | }
81 | }
82 |
83 | config.development.log4js = {
84 | appenders: {console: { type: 'console'}},
85 | categories : {
86 | "default": { appenders: ['console'], level:'error'},
87 | "startup": { appenders: ['console'], level:'info'},
88 | "http": { appenders: ['console'], level:'info'}
89 | }
90 | }
91 |
92 | config.production = Object.assign({}, config.development);
93 | module.exports = config;
94 |
--------------------------------------------------------------------------------
/01-installation-scripts/29-code-push-server/README.md:
--------------------------------------------------------------------------------
1 | ## 修改自 [https://github.com/lisong/code-push-server](https://github.com/lisong/code-push-server)
2 | # docker 部署 code-push-server
3 |
4 | >该文档用于描述docker部署code-push-server,实例包含三个部分
5 |
6 | - code-push-server部分
7 | - 更新包默认采用`local`存储(即存储在本地机器上)。使用docker volume存储方式,容器销毁不会导致数据丢失,除非人为删除volume。
8 | - 内部使用pm2 cluster模式管理进程,默认开启进程数为cpu数,可以根据自己机器配置设置docker-compose.yml文件中deploy参数。
9 | - docker-compose.yml只提供了应用的一部分参数设置,如需要设置其他配置,可以修改文件config.js。
10 | - mysql部分
11 | - 数据使用docker volume存储方式,容器销毁不会导致数据丢失,除非人为删除volume。
12 | - 应用请勿使用root用户,为了安全可以创建权限相对较小的权限供code-push-server使用,只需要给予`select,update,insert`权限即可。初始化数据库需要使用root或有建表权限用户
13 | - redis部分
14 | - `tryLoginTimes` 登录错误次数限制
15 | - `updateCheckCache` 提升应用性能
16 | - `rolloutClientUniqueIdCache` 灰度发布
17 |
18 | ## 安装docker
19 |
20 | 参考docker官方安装教程
21 |
22 | - [>>mac点这里](https://docs.docker.com/docker-for-mac/install/)
23 | - [>>windows点这里](https://docs.docker.com/docker-for-windows/install/)
24 | - [>>linux点这里](https://docs.docker.com/install/linux/docker-ce/ubuntu/)
25 |
26 |
27 | `$ docker info` 能成功输出相关信息,则安装成功,才能继续下面步骤
28 |
29 | ## 启动swarm
30 |
31 | ```shell
32 | $ sudo docker swarm init
33 | ```
34 |
35 |
36 | ## 获取代码
37 |
38 | ```shell
39 | $ git clone https://github.com/lisong/code-push-server.git
40 | $ cd code-push-server/docker
41 | ```
42 |
43 | ## 修改配置文件
44 |
45 | ```shell
46 | $ vim docker-compose.yml
47 | ```
48 |
49 | *将`DOWNLOAD_URL`中`YOU_MACHINE_IP`替换成本机外网ip或者域名*
50 |
51 | *将`MYSQL_HOST`中`YOU_MACHINE_IP`替换成本机内网ip*
52 |
53 | *将`REDIS_HOST`中`YOU_MACHINE_IP`替换成本机内网ip*
54 |
55 | ## jwt.tokenSecret修改
56 |
57 | > code-push-server 验证登录验证方式使用的json web token加密方式,该对称加密算法是公开的,所以修改config.js中tokenSecret值很重要。
58 |
59 | *非常重要!非常重要! 非常重要!*
60 |
61 | > 可以打开连接`https://www.grc.com/passwords.htm`获取 `63 random alpha-numeric characters`类型的随机生成数作为密钥
62 |
63 | ## 部署
64 |
65 | ```shell
66 | $ sudo docker stack deploy -c docker-compose.yml code-push-server
67 | ```
68 |
69 | > 如果网速不佳,需要漫长而耐心的等待。。。去和妹子聊会天吧^_^
70 |
71 |
72 | ## 查看进展
73 |
74 | ```shell
75 | $ sudo docker service ls
76 | $ sudo docker service ps code-push-server_db
77 | $ sudo docker service ps code-push-server_redis
78 | $ sudo docker service ps code-push-server_server
79 | ```
80 |
81 | > 确认`CURRENT STATE` 为 `Running about ...`, 则已经部署完成
82 |
83 | ## 访问接口简单验证
84 |
85 | `$ curl -I http://YOUR_CODE_PUSH_SERVER_IP:3000/`
86 |
87 | 返回`200 OK`
88 |
89 | ```http
90 | HTTP/1.1 200 OK
91 | X-DNS-Prefetch-Control: off
92 | X-Frame-Options: SAMEORIGIN
93 | Strict-Transport-Security: max-age=15552000; includeSubDomains
94 | X-Download-Options: noopen
95 | X-Content-Type-Options: nosniff
96 | X-XSS-Protection: 1; mode=block
97 | Content-Type: text/html; charset=utf-8
98 | Content-Length: 592
99 | ETag: W/"250-IiCMcM1ZUFSswSYCU0KeFYFEMO8"
100 | Date: Sat, 25 Aug 2018 15:45:46 GMT
101 | Connection: keep-alive
102 | ```
103 |
104 | ## 浏览器登录
105 |
106 | > 默认用户名:admin 密码:123456 记得要修改默认密码哦
107 | > 如果登录连续输错密码超过一定次数,会限定无法再登录. 需要清空redis缓存
108 |
109 | ```shell
110 | $ redis-cli -p6388 # 进入redis
111 | > flushall
112 | > quit
113 | ```
114 |
115 |
116 | ## 查看服务日志
117 |
118 | ```shell
119 | $ sudo docker service logs code-push-server_server
120 | $ sudo docker service logs code-push-server_db
121 | $ sudo docker service logs code-push-server_redis
122 | ```
123 |
124 | ## 查看存储 `docker volume ls`
125 |
126 | DRIVER | VOLUME NAME | 描述
127 | ------ | ----- | -------
128 | local | code-push-server_data-mysql | 数据库存储数据目录
129 | local | code-push-server_data-storage | 存储打包文件目录
130 | local | code-push-server_data-tmp | 用于计算更新包差异文件临时目录
131 | local | code-push-server_data-redis | redis落地数据
132 |
133 | ## 销毁退出应用
134 |
135 | ```bash
136 | $ sudo docker stack rm code-push-server
137 | $ sudo docker swarm leave --force
138 | ```
139 |
--------------------------------------------------------------------------------
/08-ssl-tools/01-ssl-gen/ca.cnf:
--------------------------------------------------------------------------------
1 | [ ca ]
2 | default_ca = Fishdrowned_ROOT_CA
3 |
4 | [ Fishdrowned_ROOT_CA ]
5 | new_certs_dir = ./out/newcerts
6 | certificate = ./out/root.crt
7 | database = ./out/index.txt
8 | private_key = ./out/root.key.pem
9 | serial = ./out/serial
10 | unique_subject = no
11 | default_days = 7300
12 | default_md = sha256
13 | policy = policy_loose
14 | x509_extensions = ca_extensions
15 | copy_extensions = copy
16 |
17 | [ policy_loose ]
18 | countryName = optional
19 | stateOrProvinceName = optional
20 | localityName = optional
21 | organizationName = optional
22 | organizationalUnitName = optional
23 | commonName = supplied
24 | emailAddress = optional
25 |
26 | [ ca_extensions ]
27 | basicConstraints = CA:false
28 | nsComment = "OpenSSL Generated Server Certificate"
29 | subjectKeyIdentifier = hash
30 | authorityKeyIdentifier = keyid:always
31 | subjectAltName = IP:127.0.0.1
32 | keyUsage = digitalSignature,keyEncipherment
33 | extendedKeyUsage = serverAuth
34 |
35 | [ req ]
36 | # Options for the `req` tool (`man req`).
37 | default_bits = 2048
38 | distinguished_name = req_distinguished_name
39 | string_mask = utf8only
40 |
41 | # SHA-1 is deprecated, so use SHA-2 instead.
42 | default_md = sha256
43 |
44 | # Extension to add when the -x509 option is used.
45 | x509_extensions = v3_ca
46 |
47 | [ req_distinguished_name ]
48 | # See .
49 | countryName = Country Name (2 letter code)
50 | stateOrProvinceName = State or Province Name
51 | localityName = Locality Name
52 | 0.organizationName = Organization Name
53 | organizationalUnitName = Organizational Unit Name
54 | commonName = Common Name
55 | emailAddress = Email Address
56 |
57 | # Optionally, specify some defaults.
58 | countryName_default = CN
59 | stateOrProvinceName_default = Guangdong
60 | localityName_default = Guangzhou
61 | 0.organizationName_default = Fishdrowned
62 | organizationalUnitName_default =
63 | emailAddress_default =
64 |
65 | [ v3_ca ]
66 | # Extensions for a typical CA (`man x509v3_config`).
67 | subjectKeyIdentifier = hash
68 | authorityKeyIdentifier = keyid:always,issuer
69 | basicConstraints = critical, CA:true
70 | keyUsage = critical, digitalSignature, cRLSign, keyCertSign
71 |
72 | [ v3_intermediate_ca ]
73 | # Extensions for a typical intermediate CA (`man x509v3_config`).
74 | subjectKeyIdentifier = hash
75 | authorityKeyIdentifier = keyid:always,issuer
76 | basicConstraints = critical, CA:true, pathlen:0
77 | keyUsage = critical, digitalSignature, cRLSign, keyCertSign
78 |
79 | [ usr_cert ]
80 | # Extensions for client certificates (`man x509v3_config`).
81 | basicConstraints = CA:FALSE
82 | nsCertType = client, email
83 | nsComment = "OpenSSL Generated Client Certificate"
84 | subjectKeyIdentifier = hash
85 | authorityKeyIdentifier = keyid,issuer
86 | keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
87 | extendedKeyUsage = clientAuth, emailProtection
88 |
89 | [ server_cert ]
90 | # Extensions for server certificates (`man x509v3_config`).
91 | basicConstraints = CA:FALSE
92 | nsCertType = server
93 | nsComment = "OpenSSL Generated Server Certificate"
94 | subjectKeyIdentifier = hash
95 | authorityKeyIdentifier = keyid,issuer:always
96 | keyUsage = critical, digitalSignature, keyEncipherment
97 | extendedKeyUsage = serverAuth
98 |
99 | [ ocsp ]
100 | # Extension for OCSP signing certificates (`man ocsp`).
101 | basicConstraints = CA:FALSE
102 | subjectKeyIdentifier = hash
103 | authorityKeyIdentifier = keyid,issuer
104 | keyUsage = critical, digitalSignature
105 | extendedKeyUsage = critical, OCSPSigning
106 |
--------------------------------------------------------------------------------
/01-installation-scripts/35-consul/k8s-yaml/provisioner-nfs-dev-01.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolume
4 | metadata:
5 | finalizers:
6 | - kubernetes.io/pv-protection
7 | name: nfs-pv-dev-01
8 | spec:
9 | accessModes:
10 | - ReadWriteMany
11 | capacity:
12 | storage: 20Gi
13 | mountOptions: []
14 | nfs:
15 | path: /data/nfsdata
16 | server: 172.16.20.8
17 | persistentVolumeReclaimPolicy: Retain
18 | storageClassName: nfs-storageclass-provisioner
19 | volumeMode: Filesystem
20 |
21 | ---
22 | apiVersion: v1
23 | kind: PersistentVolumeClaim
24 | metadata:
25 | finalizers:
26 | - kubernetes.io/pvc-protection
27 | name: nfs-pvc-dev-01
28 | namespace: kube-system
29 | spec:
30 | accessModes:
31 | - ReadWriteMany
32 | resources:
33 | requests:
34 | storage: 20Gi
35 | storageClassName: nfs-storageclass-provisioner
36 | volumeMode: Filesystem
37 | volumeName: nfs-pv-dev-01
38 |
39 | ---
40 | apiVersion: v1
41 | kind: ServiceAccount
42 | metadata:
43 | name: eip-nfs-client-provisioner
44 | namespace: kube-system
45 |
46 | ---
47 | apiVersion: rbac.authorization.k8s.io/v1
48 | kind: ClusterRole
49 | metadata:
50 | name: eip-nfs-client-provisioner-runner
51 | rules:
52 | - apiGroups:
53 | - ''
54 | resources:
55 | - nodes
56 | verbs:
57 | - get
58 | - list
59 | - watch
60 | - apiGroups:
61 | - ''
62 | resources:
63 | - persistentvolumes
64 | verbs:
65 | - get
66 | - list
67 | - watch
68 | - create
69 | - delete
70 | - apiGroups:
71 | - ''
72 | resources:
73 | - persistentvolumeclaims
74 | verbs:
75 | - get
76 | - list
77 | - watch
78 | - update
79 | - apiGroups:
80 | - storage.k8s.io
81 | resources:
82 | - storageclasses
83 | verbs:
84 | - get
85 | - list
86 | - watch
87 | - apiGroups:
88 | - ''
89 | resources:
90 | - events
91 | verbs:
92 | - create
93 | - update
94 | - patch
95 |
96 | ---
97 | apiVersion: rbac.authorization.k8s.io/v1
98 | kind: ClusterRoleBinding
99 | metadata:
100 | name: eip-run-nfs-client-provisioner
101 | roleRef:
102 | apiGroup: rbac.authorization.k8s.io
103 | kind: ClusterRole
104 | name: eip-nfs-client-provisioner-runner
105 | subjects:
106 | - kind: ServiceAccount
107 | name: eip-nfs-client-provisioner
108 | namespace: kube-system
109 |
110 | ---
111 | apiVersion: rbac.authorization.k8s.io/v1
112 | kind: Role
113 | metadata:
114 | name: eip-leader-locking-nfs-client-provisioner
115 | namespace: kube-system
116 | rules:
117 | - apiGroups:
118 | - ''
119 | resources:
120 | - endpoints
121 | verbs:
122 | - get
123 | - list
124 | - watch
125 | - create
126 | - update
127 | - patch
128 |
129 | ---
130 | apiVersion: rbac.authorization.k8s.io/v1
131 | kind: RoleBinding
132 | metadata:
133 | name: eip-leader-locking-nfs-client-provisioner
134 | namespace: kube-system
135 | roleRef:
136 | apiGroup: rbac.authorization.k8s.io
137 | kind: Role
138 | name: eip-leader-locking-nfs-client-provisioner
139 | subjects:
140 | - kind: ServiceAccount
141 | name: eip-nfs-client-provisioner
142 | namespace: kube-system
143 |
144 | ---
145 | apiVersion: apps/v1
146 | kind: Deployment
147 | metadata:
148 | labels:
149 | app: eip-nfs-dev-01
150 | name: eip-nfs-dev-01
151 | namespace: kube-system
152 | spec:
153 | replicas: 1
154 | selector:
155 | matchLabels:
156 | app: eip-nfs-dev-01
157 | strategy:
158 | type: Recreate
159 | template:
160 | metadata:
161 | labels:
162 | app: eip-nfs-dev-01
163 | spec:
164 | containers:
165 | - env:
166 | - name: PROVISIONER_NAME
167 | value: nfs-dev-01
168 | - name: NFS_SERVER
169 | value: 172.16.20.8
170 | - name: NFS_PATH
171 | value: /data/nfsdata
172 | image: >-
173 | swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/nfs-subdir-external-provisioner:v4.0.2
174 | name: nfs-client-provisioner
175 | volumeMounts:
176 | - mountPath: /persistentvolumes
177 | name: nfs-client-root
178 | serviceAccountName: eip-nfs-client-provisioner
179 | volumes:
180 | - name: nfs-client-root
181 | persistentVolumeClaim:
182 | claimName: nfs-pvc-dev-01
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/kaf/install-kaf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | kaf_version=0.2.6
4 | download_url=https://github.com/birdayz/kaf/releases/download/v${kaf_version}/kaf_${kaf_version}_Linux_x86_64.tar.gz
5 | src_dir=$(pwd)/00src00
6 |
7 |
8 | # 带格式的echo函数
9 | function echo_info() {
10 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
11 | }
12 | function echo_warning() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
14 | }
15 | function echo_error() {
16 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
17 | }
18 |
19 |
20 |
21 | # 首先判断当前目录是否有压缩包:
22 | # I. 如果有压缩包,那么就在当前目录解压;
23 | # II.如果没有压缩包,那么就检查有没有 ${src_dir} 表示的目录;
24 | # 1) 如果有目录,那么检查有没有压缩包
25 | # ① 有压缩包就解压
26 | # ② 没有压缩包则下载压缩包
27 | # 2) 如果没有,那么就创建这个目录,然后 cd 到目录中,然后下载压缩包,然
28 | # 后解压
29 | # 解压的步骤都在后面,故此处只做下载
30 |
31 | # 语法: download_tar_gz 保存的目录 下载链接
32 | # 使用示例: download_tar_gz /data/openssh-update https://mirrors.cloud.tencent.com/openssl/source/openssl-1.1.1h.tar.gz
33 | function check_downloadfile() {
34 | # 检测下载文件在服务器上是否存在
35 | http_code=$(curl -IksS $1 | head -1 | awk '{print $2}')
36 | if [ $http_code -eq 404 ];then
37 | echo_error $1
38 | echo_error 服务端文件不存在,退出
39 | exit 98
40 | fi
41 | }
42 | function download_tar_gz(){
43 | download_file_name=$(echo $2 | awk -F"/" '{print $NF}')
44 | back_dir=$(pwd)
45 | file_in_the_dir='' # 这个目录是后面编译目录的父目录
46 |
47 | ls $download_file_name &> /dev/null
48 | if [ $? -ne 0 ];then
49 | # 进入此处表示脚本所在目录没有压缩包
50 | ls -d $1 &> /dev/null
51 | if [ $? -ne 0 ];then
52 | # 进入此处表示没有${src_dir}目录
53 | mkdir -p $1 && cd $1
54 | echo_info 下载 $download_file_name 至 $(pwd)/
55 | # 检测是否有wget工具
56 | if [ ! -f /usr/bin/wget ];then
57 | echo_info 安装wget工具
58 | if [[ $os == "centos" ]];then
59 | yum install -y wget
60 | elif [[ $os == "ubuntu" ]];then
61 | apt install -y wget
62 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
63 | dnf install -y wget
64 | fi
65 | fi
66 | check_downloadfile $2
67 | wget --no-check-certificate $2
68 | if [ $? -ne 0 ];then
69 | echo_error 下载 $2 失败!
70 | exit 1
71 | fi
72 | file_in_the_dir=$(pwd)
73 | # 返回脚本所在目录,这样这个函数才可以多次使用
74 | cd ${back_dir}
75 | else
76 | # 进入此处表示有${src_dir}目录
77 | cd $1
78 | ls $download_file_name &> /dev/null
79 | if [ $? -ne 0 ];then
80 | # 进入此处表示${src_dir}目录内没有压缩包
81 | echo_info 下载 $download_file_name 至 $(pwd)/
82 | # 检测是否有wget工具
83 | if [ ! -f /usr/bin/wget ];then
84 | echo_info 安装wget工具
85 | if [[ $os == "centos" ]];then
86 | yum install -y wget
87 | elif [[ $os == "ubuntu" ]];then
88 | apt install -y wget
89 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
90 | dnf install -y wget
91 | fi
92 | fi
93 | check_downloadfile $2
94 | wget --no-check-certificate $2
95 | if [ $? -ne 0 ];then
96 | echo_error 下载 $2 失败!
97 | exit 1
98 | fi
99 | file_in_the_dir=$(pwd)
100 | cd ${back_dir}
101 | else
102 | # 进入此处,表示${src_dir}目录内有压缩包
103 | echo_info 发现压缩包$(pwd)/$download_file_name
104 | file_in_the_dir=$(pwd)
105 | cd ${back_dir}
106 | fi
107 | fi
108 | else
109 | # 进入此处表示脚本所在目录有压缩包
110 | echo_info 发现压缩包$(pwd)/$download_file_name
111 | file_in_the_dir=$(pwd)
112 | fi
113 | }
114 |
115 | echo_info 下载kaf工具
116 | download_tar_gz $src_dir $download_url
117 | cd ${file_in_the_dir}
118 |
119 | # 只解压 kaf 文件
120 | echo_info 解压kaf
121 | tar xf kaf_${kaf_version}_Linux_x86_64.tar.gz kaf
122 | mv kaf /usr/local/bin
123 | echo_info 导入shell自动补全脚本
124 | kaf completion bash > /etc/bash_completion.d/kaf
125 | source /etc/profile
126 | echo_warning 由于bash机制问题,shell自动补全仅在之后的新终端中生效(当前终端不生效)
127 |
128 | echo_info kaf部署完毕,版本:
129 | kaf --version
--------------------------------------------------------------------------------
/01-installation-scripts/31-clickhouse/02-clickhouse集群配置.md:
--------------------------------------------------------------------------------
1 | # Clickhouse集群配置
2 | ## 1、部署zookeeper集群
3 | 可参考 [这里](https://github.com/zhegeshijiehuiyouai/RoadToDevOps/tree/master/01-installation-scripts/14-zookeeper)
4 | ## 2、在各节点上分别执行以下脚本
5 | ```shell
6 | #!/bin/bash
7 | source /etc/profile
8 |
9 | echo 请确认已修改本脚本中zookeeper的地址 [ y/n ]
10 | read CONFIRM
11 | if [[ ! ${CONFIRM} == "y" ]];then
12 | echo 用户未确认,退出
13 | exit 1
14 | fi
15 |
16 | CONFIG_FILE_PATH=/etc/clickhouse-server/config.xml
17 | CH1=172.16.40.41
18 | CH2=172.16.40.35
19 | CH3=172.16.40.42
20 | CH_TCP_PORT=9000
21 |
22 | ZK1=172.16.40.41
23 | ZK2=172.16.40.35
24 | ZK3=172.16.40.42
25 | ZK_PORT=2181
26 |
27 | CH_CLUSTER_NAME=myclickhouse
28 | ########################################
29 |
30 | function get_machine_ip() {
31 | ip a | grep -E "bond" &> /dev/null
32 | if [ $? -eq 0 ];then
33 | echo_warning 检测到绑定网卡(bond),请手动输入使用的 ip :
34 | input_machine_ip_fun
35 | elif [ $(ip a | grep -E "inet.*e(ns|np|th).*[[:digit:]]+.*" | awk '{print $2}' | cut -d / -f 1 | wc -l) -gt 1 ];then
36 | echo_warning 检测到多个 ip,请手动输入使用的 ip :
37 | input_machine_ip_fun
38 | else
39 | machine_ip=$(ip a | grep -E "inet.*e(ns|np|th).*[[:digit:]]+.*" | awk '{print $2}' | cut -d / -f 1)
40 | fi
41 | }
42 | get_machine_ip
43 |
44 | echo 设置分片信息
45 | sed -i "//a\\
46 | <${CH_CLUSTER_NAME}>\\
47 | <\!-- 定义一个分片 -->\\
48 | <\!-- Optional. Shard weight when writing data. Default: 1. -->\\
49 | 1\\
50 | <\!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->\\
51 | false\\
52 | <\!-- 这个分片的副本存在在哪些机器上 -->\\
53 | ${CH1}\\
54 | ${CH_TCP_PORT}\\
55 | \\
56 | \\
57 | ${CH2}\\
58 | ${CH_TCP_PORT}\\
59 | \\
60 | \\
61 | ${CH3}\\
62 | ${CH_TCP_PORT}\\
63 | \\
64 | \\
65 | ${CH_CLUSTER_NAME}>" ${CONFIG_FILE_PATH}
66 |
67 | echo 指定本机地址
68 | sed -i "//i\\
69 | \\
70 | <\!-- 配置分片macros变量,在用client创建表的时候回自动带入 -->\\
71 | \\
72 | 1\\
73 | ${machine_ip} <\!-- 这里指定当前集群节点的名字或者IP -->\\
74 | " ${CONFIG_FILE_PATH}
75 |
76 | echo 配置zookeeper地址
77 | sed -i "/ZooKeeper is used to store metadata about replicas/i\\
78 | \\
79 | \\
80 | \\
81 | ${ZK1}\\
82 | ${ZK_PORT}\\
83 | \\
84 | \\
85 | ${ZK2}\\
86 | ${ZK_PORT}\\
87 | \\
88 | \\
89 | ${ZK3}\\
90 | ${ZK_PORT}\\
91 | \\
92 | " ${CONFIG_FILE_PATH}
93 |
94 | echo 重启clickhouse
95 | systemctl restart clickhouse-server.service
96 |
97 | ```
98 | ## 3、验证
99 | 查询sql
100 | ```SQL
101 | select * from system.clusters;
102 | ```
103 |
104 | 建表测试,在任意一台上
105 | ```shell
106 | clickhouse-client -h 127.0.0.1 -u fuza --password fuzaDeMima --port 9000 -m
107 | ```
108 | ```SQL
109 | CREATE TABLE t1 ON CLUSTER myclickhouse
110 | (
111 | `ts` DateTime,
112 | `uid` String,
113 | `biz` String
114 | )
115 | ENGINE = ReplicatedMergeTree('/ClickHouse/test1/tables/{shard}/t1', '{replica}')
116 | PARTITION BY toYYYYMMDD(ts)
117 | ORDER BY ts
118 | SETTINGS index_granularity = 8192
119 | ######说明 {shard}自动获取对应配置文件的macros分片设置变量 replica一样 ENGINE = ReplicatedMergeTree,不能为之前的MergeTree
120 | ######'/ClickHouse/test1/tables/{shard}/t1' 是写入zk里面的地址,唯一,注意命名规范
121 |
122 | INSERT INTO t1 VALUES ('2019-06-07 20:01:01', 'a', 'show');
123 | INSERT INTO t1 VALUES ('2019-06-07 20:01:02', 'b', 'show');
124 | INSERT INTO t1 VALUES ('2019-06-07 20:01:03', 'a', 'click');
125 | INSERT INTO t1 VALUES ('2019-06-08 20:01:04', 'c', 'show');
126 | INSERT INTO t1 VALUES ('2019-06-08 20:01:05', 'c', 'click');
127 | ```
128 | 然后到集群的另外一台上查询
129 | ```shell
130 | clickhouse-client -h 127.0.0.1 -u fuza --password fuzaDeMima --port 9000 -m
131 | ```
132 | ```SQL
133 | select * from t1;
134 | ```
--------------------------------------------------------------------------------
/11-k8s-tools/01-create-k8s-admin-user.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ################################################################
3 | # 功能:创建k8s集群管理员账号(自动检测版本,兼容旧版kubectl)
4 | ################################################################
5 |
6 | # --- 配置区 ---
7 | USER="k8sadmin" # 要创建的管理员用户名
8 | CONFIG_PATH="${HOME}/.kube/config" # 当前有效的kubeconfig文件路径
9 | NAMESPACE="kube-system"
10 |
11 | # 带格式的echo函数
12 | function echo_info() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32m信息\033[0m] \033[37m$@\033[0m"
14 | }
15 | function echo_warning() {
16 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33m警告\033[0m] \033[1;37m$@\033[0m"
17 | }
18 | function echo_error() {
19 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41m错误\033[0m] \033[1;31m$@\033[0m"
20 | }
21 |
22 | # 1. 从现有配置中提取集群信息
23 | echo_info "正在从 ${CONFIG_PATH} 读取集群信息..."
24 | CERT_AUTH_DATA=$(cat "$CONFIG_PATH" | grep "certificate-authority-data:" | awk '{print $2}' | head -n 1)
25 | SERVER=$(cat "$CONFIG_PATH" | grep server | awk '{print $2}' | head -n 1)
26 | CLUSTER_NAME=$(KUBECONFIG="$CONFIG_PATH" kubectl config view --minify -o jsonpath='{.clusters[0].name}')
27 |
28 |
29 | # 2. 自动检测 Kubernetes 服务器版本
30 | echo_info "正在自动检测 Kubernetes 服务器版本..."
31 | SERVER_VERSION_LINE=$(kubectl --kubeconfig "$CONFIG_PATH" version | grep "Server Version:")
32 | SERVER_MINOR_VERSION=$(echo "$SERVER_VERSION_LINE" | sed 's/.*v[0-9]*\.\([0-9]*\).*/\1/')
33 |
34 | # 增加健壮性判断,如果获取失败则退出
35 | if ! [[ "$SERVER_MINOR_VERSION" =~ ^[0-9]+$ ]]; then
36 | echo_error "无法获取有效的 Kubernetes 服务器次要版本号。请检查您的网络连接和权限。" >&2
37 | exit 1
38 | fi
39 |
40 | echo_info "检测到服务器次要版本为: ${SERVER_MINOR_VERSION}。"
41 | # 根据版本号动态设置逻辑开关
42 | if [ "$SERVER_MINOR_VERSION" -ge 24 ]; then
43 | CLUSTER_VERSION_EXCEED_V_12_4="yes"
44 | else
45 | CLUSTER_VERSION_EXCEED_V_12_4="no"
46 | fi
47 |
48 | # 3. 根据版本判断结果,创建ServiceAccount和Secret以生成Token
49 | if [ "$CLUSTER_VERSION_EXCEED_V_12_4" != "no" ]; then
50 | # 适用于 Kubernetes v1.24+ 的逻辑
51 | echo_info "版本大于等于 1.24,使用新方法生成 Token。"
52 |
53 | # 创建一个不含 secrets 字段的 ServiceAccount
54 | cat <sa.yaml
55 | apiVersion: v1
56 | kind: ServiceAccount
57 | metadata:
58 | name: ${USER}
59 | namespace: ${NAMESPACE}
60 | EOF
61 | kubectl --kubeconfig $CONFIG_PATH apply -f sa.yaml &> /dev/null
62 |
63 | # 创建一个带注解的 Secret,让系统自动为其生成 Token
64 | cat <secret.yaml
65 | apiVersion: v1
66 | kind: Secret
67 | metadata:
68 | name: ${USER}-token
69 | namespace: ${NAMESPACE}
70 | annotations:
71 | kubernetes.io/service-account.name: ${USER}
72 | type: kubernetes.io/service-account-token
73 | EOF
74 | kubectl --kubeconfig $CONFIG_PATH apply -f secret.yaml &> /dev/null
75 |
76 | SECRET_NAME=${USER}-token
77 | else
78 | # 适用于 Kubernetes v1.24 之前的旧逻辑
79 | echo_info "版本小于 1.24,使用旧方法生成 Token。"
80 | kubectl --kubeconfig $CONFIG_PATH create sa "${USER}" -n ${NAMESPACE} &> /dev/null
81 | # 获取 ServiceAccount 自动创建的 Secret 名称
82 | SECRET_NAME=$(kubectl --kubeconfig $CONFIG_PATH get sa "${USER}" -n ${NAMESPACE} -o go-template --template="{{range.secrets}}{{.name}}{{end}}")
83 | fi
84 |
85 | # 4. 为 ServiceAccount 授予集群管理员权限
86 | echo_info "正在为 ServiceAccount ${USER} 绑定 cluster-admin 权限..."
87 | # 检查绑定是否已存在,避免报错
88 | if ! kubectl --kubeconfig "$CONFIG_PATH" get clusterrolebinding "${USER}-binding" &> /dev/null; then
89 | kubectl --kubeconfig $CONFIG_PATH create clusterrolebinding "${USER}-binding" --clusterrole=cluster-admin --serviceaccount=kube-system:"${USER}" &> /dev/null
90 | fi
91 |
92 | # 5. 提取生成的 Token
93 | echo_info "正在从 Secret ${SECRET_NAME} 中提取 Token..."
94 | TOKEN_ENCODING=$(kubectl --kubeconfig $CONFIG_PATH get secret "${SECRET_NAME}" -n ${NAMESPACE} -o go-template --template="{{.data.token}}")
95 | TOKEN=$(echo "${TOKEN_ENCODING}" | base64 -d)
96 |
97 | # 6. 生成并打印/保存全新的 kubeconfig 文件内容
98 | OUTPUT_KUBECONFIG_PATH="${USER}-kubeconfig.yaml"
99 | echo "======================= 新的 KUBECONFIG 内容 ======================="
100 | (
101 | echo "apiVersion: v1
102 | kind: Config
103 | clusters:
104 | - name: ${CLUSTER_NAME}
105 | cluster:
106 | server: ${SERVER}
107 | certificate-authority-data: ${CERT_AUTH_DATA}
108 | contexts:
109 | - name: ${CLUSTER_NAME}-${USER}-context-default
110 | context:
111 | cluster: ${CLUSTER_NAME}
112 | user: ${USER}
113 | current-context: ${CLUSTER_NAME}-${USER}-context-default
114 | users:
115 | - name: ${USER}
116 | user:
117 | token: ${TOKEN}"
118 | ) | tee "${OUTPUT_KUBECONFIG_PATH}"
119 | echo "=================================================================="
120 |
121 | # 7. 清理临时文件
122 | rm -f sa.yaml secret.yaml
123 | echo_info "清理临时文件完成。"
124 | echo_info "新的 kubeconfig 内容已打印在上方,并同时保存到了文件: ${PWD}/${OUTPUT_KUBECONFIG_PATH}"
--------------------------------------------------------------------------------
/05-system-tools/06-update-kernel.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ##### 配置 #####
4 | script_dir=$(dirname $(realpath $0))
5 | src_dir=${script_dir}/00src00
6 |
7 |
8 |
9 | # 带格式的echo函数
10 | function echo_info() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
12 | }
13 | function echo_warning() {
14 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
15 | }
16 | function echo_error() {
17 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
18 | }
19 |
20 | # 检测操作系统
21 | if grep -qs "ubuntu" /etc/os-release; then
22 | os="ubuntu"
23 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
24 | # 阻止配置更新弹窗
25 | export UCF_FORCE_CONFFOLD=1
26 | # 阻止应用重启弹窗
27 | export NEEDRESTART_SUSPEND=1
28 | elif [[ -e /etc/centos-release ]]; then
29 | os="centos"
30 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
31 | if echo "$os_version" | grep -q '^7'; then
32 | true
33 | else
34 | echo "仅支持centos7"
35 | exit 99
36 | fi
37 | else
38 | echo_error 不支持的操作系统
39 | exit 99
40 | fi
41 |
42 | function install_local_rpm() {
43 | exist_kernel_rpm=$1
44 | echo_info "发现kernel安装包:${exist_kernel_rpm},是否安装[y/n]"
45 | read is_install
46 | case $is_install in
47 | y|Y)
48 | echo_info 安装${exist_kernel_rpm}
49 | yum install -y ${exist_kernel_rpm}
50 | ;;
51 | n|N)
52 | echo_info 用户取消
53 | exit 0
54 | ;;
55 | *)
56 | install_local_rpm
57 | ;;
58 | esac
59 | }
60 |
61 | function install_newest_ml_kernel() {
62 | echo_info 添加镜像源
63 | rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
64 | wait
65 | rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-5.el7.elrepo.noarch.rpm
66 | wait
67 | echo_info 安装主线最新版本的内核
68 | yum --enablerepo=elrepo-kernel install kernel-ml -y
69 | wait
70 | }
71 |
72 | function install_internet_rpm() {
73 | read choice
74 | case $choice in
75 | 1)
76 | install_newest_ml_kernel
77 | ;;
78 | 2)
79 | echo_info "提供两个kernel下载地址:"
80 | echo "coreix源,包全,速度中等:http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/"
81 | echo "阿里源,包较少,速度快: http://mirrors.aliyun.com/elrepo/kernel/el7/x86_64/RPMS/ (疑似已失效)"
82 | exit 0
83 | ;;
84 | *)
85 | install_internet_rpm
86 | ;;
87 | esac
88 | }
89 |
90 |
91 | ############################### 开始 ########################################
92 | echo_info "当前内核版本:$(uname -r)"
93 | # 判断本地是否有安装包
94 | exist_kernel_rpm=$(ls | egrep -o "^kernel-(lt|ml)-[0-9].*rpm$")
95 | if [ $? -eq 0 ];then
96 | install_local_rpm ${exist_kernel_rpm}
97 | elif [ -d ${src_dir} ];then
98 | exist_kernel_rpm=$(ls ${src_dir} | egrep -o "^kernel-(lt|ml)-[0-9].*rpm$")
99 | if [ $? -eq 0 ];then
100 | install_local_rpm ${src_dir}/${exist_kernel_rpm}
101 | else
102 | # 通过互联网安装
103 | echo_info "请输入数字选择升级到的kernel版本:"
104 | echo -e "\033[36m[1]\033[32m 主线最新版本\033[0m"
105 | echo -e "\033[36m[2]\033[32m 自己下载rpm包,然后上传到 $(pwd) ,再重新执行脚本\033[0m"
106 | install_internet_rpm
107 | fi
108 | # 通过互联网安装
109 | else
110 | echo_info "请输入数字选择升级到的kernel版本:"
111 | echo -e "\033[36m[1]\033[32m 主线最新版本\033[0m"
112 | echo -e "\033[36m[2]\033[32m 自己下载rpm包,然后上传到 $(pwd) ,再重新执行脚本\033[0m"
113 | install_internet_rpm
114 | fi
115 |
116 |
117 |
118 | echo_info 设置内核
119 | # 按照版本号对kernel进行排序
120 | newest_kernel=$(egrep ^menuentry /etc/grub2.cfg | cut -f 2 -d \' | awk -F "(" '{print $2}' | awk -F ")" '{print $1}' | sort -V | tail -1)
121 | newest_kernel_id=$(egrep ^menuentry /etc/grub2.cfg | cut -f 2 -d \' | cat -n | grep ${newest_kernel} | awk '{print $1}' | head -1)
122 | newest_kernel_id=$((newest_kernel_id - 1))
123 | grub2-set-default ${newest_kernel_id}
124 |
125 |
126 | echo_info 开启BBR算法
127 | echo 'net.core.default_qdisc=fq' > /etc/sysctl.d/bbr.conf
128 | echo 'net.ipv4.tcp_congestion_control=bbr' >> /etc/sysctl.d/bbr.conf
129 |
130 | echo_info 内核参数优化
131 | cat > /etc/sysctl.d/bbr.conf <
56 | {
57 | "podAntiAffinity": {
58 | "requiredDuringSchedulingRequiredDuringExecution": [{
59 | "labelSelector": {
60 | "matchExpressions": [{
61 | "key": "app",
62 | "operator": "In",
63 | "values": ["zk-headless"]
64 | }]
65 | },
66 | "topologyKey": "kubernetes.io/hostname"
67 | }]
68 | }
69 | }
70 | spec:
71 | containers:
72 | - name: k8szk
73 | imagePullPolicy: Always
74 | image: gcr.io/google_samples/k8szk:v1
75 | resources:
76 | requests:
77 | memory: "4Gi"
78 | cpu: "1"
79 | ports:
80 | - containerPort: 2181
81 | name: client
82 | - containerPort: 2888
83 | name: server
84 | - containerPort: 3888
85 | name: leader-election
86 | env:
87 | - name : ZK_ENSEMBLE
88 | valueFrom:
89 | configMapKeyRef:
90 | name: zk-config
91 | key: ensemble
92 | - name : ZK_HEAP_SIZE
93 | valueFrom:
94 | configMapKeyRef:
95 | name: zk-config
96 | key: jvm.heap
97 | - name : ZK_TICK_TIME
98 | valueFrom:
99 | configMapKeyRef:
100 | name: zk-config
101 | key: tick
102 | - name : ZK_INIT_LIMIT
103 | valueFrom:
104 | configMapKeyRef:
105 | name: zk-config
106 | key: init
107 | - name : ZK_SYNC_LIMIT
108 | valueFrom:
109 | configMapKeyRef:
110 | name: zk-config
111 | key: tick
112 | - name : ZK_MAX_CLIENT_CNXNS
113 | valueFrom:
114 | configMapKeyRef:
115 | name: zk-config
116 | key: client.cnxns
117 | - name: ZK_SNAP_RETAIN_COUNT
118 | valueFrom:
119 | configMapKeyRef:
120 | name: zk-config
121 | key: snap.retain
122 | - name: ZK_PURGE_INTERVAL
123 | valueFrom:
124 | configMapKeyRef:
125 | name: zk-config
126 | key: purge.interval
127 | - name: ZK_CLIENT_PORT
128 | value: "2181"
129 | - name: ZK_SERVER_PORT
130 | value: "2888"
131 | - name: ZK_ELECTION_PORT
132 | value: "3888"
133 | command:
134 | - sh
135 | - -c
136 | - zkGenConfig.sh && zkServer.sh start-foreground
137 | readinessProbe:
138 | exec:
139 | command:
140 | - "zkOk.sh"
141 | initialDelaySeconds: 15
142 | timeoutSeconds: 5
143 | livenessProbe:
144 | exec:
145 | command:
146 | - "zkOk.sh"
147 | initialDelaySeconds: 15
148 | timeoutSeconds: 5
149 | volumeMounts:
150 | - name: datadir
151 | mountPath: /var/lib/zookeeper
152 | securityContext:
153 | runAsUser: 1000
154 | fsGroup: 1000
155 | volumeClaimTemplates:
156 | - metadata:
157 | name: datadir
158 | annotations:
159 | volume.alpha.kubernetes.io/storage-class: anything
160 | spec:
161 | accessModes: ["ReadWriteOnce"]
162 | resources:
163 | requests:
164 | storage: 20Gi
--------------------------------------------------------------------------------
/01-installation-scripts/31-clickhouse/01-install-clickhouse.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CLICKHOUSE_HOME=/data/clickhouse
4 | CLICKHOUSE_TCP_PORT=9000
5 | CLICKHOUSE_HTTP_PORT=8123
6 | CLICKHOUSE_USER=fuza
7 | CLICKHOUSE_PASSWORD=fuzaDeMima
8 |
9 | # 带格式的echo函数
10 | function echo_info() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
12 | }
13 | function echo_warning() {
14 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
15 | }
16 | function echo_error() {
17 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
18 | }
19 |
20 |
21 | function create_repo() {
22 | echo_info 创建clickhouse清华源repo仓库
23 | cat > /etc/yum.repos.d/clickhouse.repo << EOF
24 | [repo.yandex.ru_clickhouse_rpm_stable_x86_64]
25 | name=clickhouse stable
26 | baseurl=https://mirrors.tuna.tsinghua.edu.cn/clickhouse/rpm/stable/x86_64
27 | enabled=1
28 | gpgcheck=0
29 | EOF
30 | }
31 |
32 | function config_clickhouse() {
33 | echo_info 调整clickhouse配置
34 | grep "[[:space:]]+0.0.0.0" ${CONFIG_FILE_PATH}
35 | if [ $? -ne 0 ];then
36 | sed -i "/ <\!-- 0.0.0.0<\/listen_host> -->/a \ 0.0.0.0" ${CONFIG_FILE_PATH}
37 | fi
38 |
39 | sed -i "s@.*@${CLICKHOUSE_HTTP_PORT}@g" ${CONFIG_FILE_PATH}
40 | sed -i "s@.*@${CLICKHOUSE_TCP_PORT}@g" ${CONFIG_FILE_PATH}
41 | sed -i "s@/var/log/clickhouse-server/clickhouse-server.log@${CLICKHOUSE_HOME}/logs/clickhouse-server.log@g" ${CONFIG_FILE_PATH}
42 | sed -i "s@/var/lib/clickhouse/@${CLICKHOUSE_HOME}/data/@g" ${CONFIG_FILE_PATH}
43 | sed -i "s@/var/lib/clickhouse/format_schemas/@${CLICKHOUSE_HOME}/data/format_schemas/@g" ${CONFIG_FILE_PATH}
44 | sed -i "s@/var/lib/clickhouse/access/@${CLICKHOUSE_HOME}/data/access/@g" ${CONFIG_FILE_PATH}
45 | sed -i "s@/var/lib/clickhouse/tmp/@${CLICKHOUSE_HOME}/data/tmp/@g" ${CONFIG_FILE_PATH}
46 | sed -i "s@/var/lib/clickhouse/user_files/@${CLICKHOUSE_HOME}/data/user_files/@g" ${CONFIG_FILE_PATH}
47 | sed -i "s@/var/log/clickhouse-server/clickhouse-server.err.log@${CLICKHOUSE_HOME}/logs/clickhouse-server.err.log@g" ${CONFIG_FILE_PATH}
48 |
49 | echo_info 添加用户
50 | sed -i "//a\\
51 | <${CLICKHOUSE_USER}>\\
52 | ${CLICKHOUSE_PASSWORD}\\
53 | 1\\
54 | \\
55 | 0.0.0.0/0\\
56 | \\
57 | default\\
58 | default\\
59 | ${CLICKHOUSE_USER}>" ${USER_FILE_PATH}
60 | sed -i "s@1@0@g" ${USER_FILE_PATH}
61 | }
62 |
63 | function check_domain() {
64 | echo_info 检查主机解析
65 | CH_HOSTNAME=$(hostname)
66 | grep -nr "${CH_HOSTNAME}" /etc/hosts &> /dev/null
67 | if [ $? -ne 0 ];then
68 | echo "127.0.0.1 ${CH_HOSTNAME}" >> /etc/hosts
69 | fi
70 | }
71 |
72 | function echo_summary() {
73 | echo_info 启动clickhouse
74 | systemctl start clickhouse-server.service
75 | echo_info clickhouse已经部署完毕,相关信息如下:
76 | echo -e "\033[37m 账号:${CLICKHOUSE_USER}\033[0m"
77 | echo -e "\033[37m 密码:${CLICKHOUSE_PASSWORD}\033[0m"
78 | echo -e "\033[37m TCP端口:${CLICKHOUSE_TCP_PORT}\033[0m"
79 | echo -e "\033[37m HTTP端口:${CLICKHOUSE_HTTP_PORT}\033[0m"
80 | echo -e "\033[37m 命令行连接:clickhouse-client -h 127.0.0.1 -u ${CLICKHOUSE_USER} --password ${CLICKHOUSE_PASSWORD} --port ${CLICKHOUSE_TCP_PORT} -m\033[0m"
81 | }
82 |
83 | function create_dirs() {
84 | mkdir -p ${CLICKHOUSE_HOME}/{data,logs}
85 | chown -R clickhouse:clickhouse ${CLICKHOUSE_HOME}
86 | }
87 |
88 | function install_by_yum() {
89 | echo_info 使用yum安装clickhouse
90 | yum install -y clickhouse-server clickhouse-client
91 | if [ $? -ne 0 ];then
92 | echo_error 安装clickhouse失败,退出
93 | exit 1
94 | fi
95 | CONFIG_FILE_PATH=/etc/clickhouse-server/config.xml
96 | USER_FILE_PATH=/etc/clickhouse-server/users.xml
97 | }
98 |
99 | function main() {
100 | create_repo
101 | install_by_yum
102 | create_dirs
103 | config_clickhouse
104 | check_domain
105 | echo_summary
106 | }
107 |
108 | main
--------------------------------------------------------------------------------
/01-installation-scripts/40-ffmpeg/01-install-ffmpeg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ffmpeg_version=4.4.2
3 | download_dir=$(pwd)/00src00
4 |
5 |
6 | # 带格式的echo函数
7 | function echo_info() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
9 | }
10 | function echo_warning() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
12 | }
13 | function echo_error() {
14 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
15 | }
16 |
17 | # 首先判断当前目录是否有压缩包:
18 | # I. 如果有压缩包,那么就在当前目录解压;
19 | # II.如果没有压缩包,那么就检查有没有 ${src_dir} 表示的目录;
20 | # 1) 如果有目录,那么检查有没有压缩包
21 | # ① 有压缩包就解压
22 | # ② 没有压缩包则下载压缩包
23 | # 2) 如果没有,那么就创建这个目录,然后 cd 到目录中,然后下载压缩包,然
24 | # 后解压
25 | # 解压的步骤都在后面,故此处只做下载
26 |
27 | # 语法: download_tar_gz 保存的目录 下载链接
28 | # 使用示例: download_tar_gz /data/openssh-update https://mirrors.cloud.tencent.com/openssl/source/openssl-1.1.1h.tar.gz
29 | function check_downloadfile() {
30 | # 检测下载文件在服务器上是否存在
31 | http_code=$(curl -IksS $1 | head -1 | awk '{print $2}')
32 | if [ $http_code -eq 404 ];then
33 | echo_error $1
34 | echo_error 服务端文件不存在,退出
35 | exit 98
36 | fi
37 | }
38 | function download_tar_gz(){
39 | download_file_name=$(echo $2 | awk -F"/" '{print $NF}')
40 | back_dir=$(pwd)
41 | file_in_the_dir='' # 这个目录是后面编译目录的父目录
42 |
43 | ls $download_file_name &> /dev/null
44 | if [ $? -ne 0 ];then
45 | # 进入此处表示脚本所在目录没有压缩包
46 | ls -d $1 &> /dev/null
47 | if [ $? -ne 0 ];then
48 | # 进入此处表示没有${src_dir}目录
49 | mkdir -p $1 && cd $1
50 | echo_info 下载 $download_file_name 至 $(pwd)/
51 | # 检测是否有wget工具
52 | if [ ! -f /usr/bin/wget ];then
53 | echo_info 安装wget工具
54 | if [[ $os == "centos" ]];then
55 | yum install -y wget
56 | elif [[ $os == "ubuntu" ]];then
57 | apt install -y wget
58 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
59 | dnf install -y wget
60 | fi
61 | fi
62 | check_downloadfile $2
63 | wget --no-check-certificate $2
64 | if [ $? -ne 0 ];then
65 | echo_error 下载 $2 失败!
66 | exit 1
67 | fi
68 | file_in_the_dir=$(pwd)
69 | # 返回脚本所在目录,这样这个函数才可以多次使用
70 | cd ${back_dir}
71 | else
72 | # 进入此处表示有${src_dir}目录
73 | cd $1
74 | ls $download_file_name &> /dev/null
75 | if [ $? -ne 0 ];then
76 | # 进入此处表示${src_dir}目录内没有压缩包
77 | echo_info 下载 $download_file_name 至 $(pwd)/
78 | # 检测是否有wget工具
79 | if [ ! -f /usr/bin/wget ];then
80 | echo_info 安装wget工具
81 | if [[ $os == "centos" ]];then
82 | yum install -y wget
83 | elif [[ $os == "ubuntu" ]];then
84 | apt install -y wget
85 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
86 | dnf install -y wget
87 | fi
88 | fi
89 | check_downloadfile $2
90 | wget --no-check-certificate $2
91 | if [ $? -ne 0 ];then
92 | echo_error 下载 $2 失败!
93 | exit 1
94 | fi
95 | file_in_the_dir=$(pwd)
96 | cd ${back_dir}
97 | else
98 | # 进入此处,表示${src_dir}目录内有压缩包
99 | echo_info 发现压缩包$(pwd)/$download_file_name
100 | file_in_the_dir=$(pwd)
101 | cd ${back_dir}
102 | fi
103 | fi
104 | else
105 | # 进入此处表示脚本所在目录有压缩包
106 | echo_info 发现压缩包$(pwd)/$download_file_name
107 | file_in_the_dir=$(pwd)
108 | fi
109 | }
110 |
111 | # 多核编译
112 | function multi_core_compile(){
113 | echo_info 多核编译
114 | assumeused=$(w | grep 'load average' | awk -F': ' '{print $2}' | awk -F'.' '{print $1}')
115 | cpucores=$(cat /proc/cpuinfo | grep -c processor)
116 | compilecore=$(($cpucores - $assumeused - 1))
117 | if [ $compilecore -ge 1 ];then
118 | make -j $compilecore && make -j $compilecore install
119 | if [ $? -ne 0 ];then
120 | echo_error 编译安装出错,请检查脚本
121 | exit 1
122 | fi
123 | else
124 | make && make install
125 | if [ $? -ne 0 ];then
126 | echo_error 编译安装出错,请检查脚本
127 | exit 1
128 | fi
129 | fi
130 | }
131 |
132 | echo_info 安装依赖
133 | yum install -y gcc yasm
134 |
135 | echo_info 下载ffmpeg源码包
136 | # 2.4.11之前的老版本没有tar.xz的包,但是这里默认不会下载这么老的版本,所以直接写死了
137 | download_tar_gz ${download_dir} http://www.ffmpeg.org/releases/ffmpeg-${ffmpeg_version}.tar.xz
138 |
139 | cd ${file_in_the_dir}
140 | echo_info 解压压缩包
141 | tar xf ffmpeg-${ffmpeg_version}.tar.xz
142 | cd ffmpeg-${ffmpeg_version}
143 | echo_info 编译ffmpeg中,请耐心等待
144 | ./configure --prefix=/usr/local/ffmpeg
145 | multi_core_compile
146 |
147 | # 清理
148 | cd ${file_in_the_dir}
149 | rm -rf ffmpeg-${ffmpeg_version}
150 |
151 | echo_info 配置环境变量
152 | echo "export PATH=$PATH:/usr/local/ffmpeg/bin" > /etc/profile.d/ffmpeg.sh
153 | source /etc/profile
154 | echo_info ffmepg已安装完毕,版本:
155 | ffmpeg -version
156 |
157 | echo_warning 由于bash特性限制,在本终端执行ffmpeg命令需要先手动执行 source /etc/profile 加载环境变量,或者重新打开一个终端
158 |
--------------------------------------------------------------------------------
/10-pve-vmware-tools/01-pve-to-vmware/pve_to_vmware.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # 需要先安装 expect,鉴于pve环境可能处于内网,故脚本不进行expect的安装,请手动安装。
3 | # 脚本用法:传递pve虚拟机的id给脚本,脚本找出对应磁盘后,转格式,并通过scp传到一台esxi对应的
4 | # 存储目录上。
5 | # 之后在控制台上的操作都手动执行。
6 |
7 | # 在该目录下查找硬盘
8 | scan_dir=/dev
9 | esxi_ip=172.16.201.3
10 | esxi_ssh_port=22
11 | esxi_ssh_user=root
12 | esxi_ssh_password=yourpassword
13 | # esxi上,存储的目录
14 | esxi_store_dir=/vmfs/volumes/cd-md3820i-1
15 | # 迁移到vm的哪台新建虚拟机上,这台虚拟机需要提前创建
16 | esxi_vm_name=$2
17 | # 第一个参数是pve虚拟机的id
18 | pve_vm_id=$1
19 | # expect超时时间
20 | timeout=15000
21 |
22 | # 带格式的echo函数
23 | function echo_info() {
24 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
25 | }
26 | function echo_warning() {
27 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
28 | }
29 | function echo_error() {
30 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
31 | }
32 |
33 | function print_logo(){
34 | echo "
35 | _____ _____ _____ _ _____ _____
36 | | _ | | | __| | |_ ___ | | | |_ _ _ ___ ___ ___
37 | | __| | | __| | _| . | | | | | | | | | | .'| _| -_|
38 | |__| \___/|_____| |_| |___| \___/|_|_|_|_____|__,|_| |___|
39 |
40 | "
41 | }
42 |
43 |
44 | ########################### 脚本起始 #############################
45 | print_logo
46 |
47 | function print_usage() {
48 | echo
49 | echo "用法:$0 "
50 | echo
51 | }
52 |
53 | workdir=$(pwd)
54 |
55 | if [ $# -ne 2 ];then
56 | echo_error 参数传递错误
57 | print_usage
58 | exit 1
59 | fi
60 |
61 | echo_info 查找虚拟机硬盘
62 | vmdisks=$(find ${scan_dir} -name vm-${pve_vm_id}-disk* | sort)
63 | if [ $? -ne 0 ];then
64 | echo_error 未找到虚拟机${pve_vm_id}的硬盘,退出
65 | exit 1
66 | fi
67 |
68 | realvmdisks_index=0
69 | for vmdisk in ${vmdisks};do
70 | vmdisk_lnlocation=$(ls -l ${vmdisk} | awk '{print $NF}')
71 | cd $(dirname ${vmdisk})
72 | realvmdisk=$(realpath ${vmdisk_lnlocation})
73 | realvmdisks[${realvmdisk_index}]=${realvmdisk}
74 | # 利用下标,将vm虚拟机的硬盘(软链接)和真是硬盘对应起来
75 | recordvmdisks[${realvmdisk_index}]=$(basename ${vmdisk})
76 | let realvmdisk_index++
77 | echo "$vmdisk (${realvmdisk})"
78 | done
79 |
80 |
81 | echo_info 是否将以上硬盘文件转为vmdk格式(y\|n)?
82 |
83 | function input_and_confirm() {
84 | read user_input
85 | case $user_input in
86 | n|N)
87 | echo_info 用户退出
88 | exit 2
89 | ;;
90 | y|Y)
91 | true
92 | ;;
93 | *)
94 | echo_warning 输入不合法,请重新输入(y\|n)
95 | input_and_confirm
96 | ;;
97 | esac
98 | }
99 |
100 | input_and_confirm
101 | # 到这里的话,说明用户输入的是确认,即需要转换
102 |
103 | function send_vmdk_to_esxi() {
104 | send_file=$1
105 | expect </dev/null 2>&1; then
29 | echo_error 未检测到 git 命令,请先安装 git
30 | exit 1
31 | fi
32 |
33 | if [ -d $pyenv_root ];then
34 | echo_error ${pyenv_root} 目录已存在,请检查是否重复安装
35 | exit 1
36 | fi
37 |
38 | # 检测操作系统
39 | if grep -qs "ubuntu" /etc/os-release; then
40 | os="ubuntu"
41 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
42 | # 阻止配置更新弹窗
43 | export UCF_FORCE_CONFFOLD=1
44 | # 阻止应用重启弹窗
45 | export NEEDRESTART_SUSPEND=1
46 | elif [[ -e /etc/centos-release ]]; then
47 | os="centos"
48 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
49 | elif [[ -e /etc/rocky-release ]]; then
50 | os="rocky"
51 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
52 | elif [[ -e /etc/almalinux-release ]]; then
53 | os="alma"
54 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
55 | else
56 | echo_error 不支持的操作系统
57 | exit 99
58 | fi
59 |
60 | # 前置函数
61 | failed_checkout() {
62 | echo_error "克隆失败:$1"
63 | exit 2
64 | }
65 |
66 | checkout() {
67 | [ -d "$2" ] || git -c advice.detachedHead=0 clone --branch "$3" --depth 1 "$1" "$2" &> /dev/null || failed_checkout "$1"
68 | echo_info "$1 完成"
69 | }
70 |
71 | if [ ! -f ~/.pip/pip.conf ]; then
72 | if [ ! -d ~/.pip/ ]; then
73 | mkdir -p ~/.pip/
74 | fi
75 | cat >> ~/.pip/pip.conf << _EOF_
76 | [global]
77 | index-url = https://pypi.tuna.tsinghua.edu.cn/simple
78 | trusted-host = pypi.tuna.tsinghua.edu.cn
79 | _EOF_
80 | fi
81 |
82 | echo_info 安装依赖
83 | if [[ $os == 'centos' ]];then
84 | pip --version &> /dev/null
85 | if [ $? -eq 0 ];then
86 | yum install -y gcc gcc-c++ zlib-devel bzip2-devel openssl-devel sqlite-devel readline-devel patch libffi-devel xz-devel
87 | else
88 | yum install -y python2-pip gcc gcc-c++ zlib-devel bzip2-devel openssl-devel sqlite-devel readline-devel patch libffi-devel xz-devel
89 | fi
90 | pip_version=$(pip --version | awk '{print $2}')
91 | latest_pip_version=$(echo -e "$pip_version\n20.2.4" |sort -V -r | head -1)
92 | # 如果当前pip版本不是最新的,那么就需要升级
93 | if [[ $latest_pip_version != $pip_version ]];then
94 | echo_info 升级pip
95 | wget https://mirrors.aliyun.com/macports/distfiles/py-pip/pip-20.2.4.tar.gz
96 | tar -zxvf pip-20.2.4.tar.gz
97 | cd pip-20.2.4/
98 | python setup.py install
99 | pip install --upgrade pip
100 | cd .. && rm -rf pip-20.2.4.tar.gz pip-20.2.4
101 | fi
102 | elif [[ $os == 'ubuntu' ]];then
103 | apt update
104 | apt install -y python3-pip gcc g++ zlib1g-dev libbz2-dev libssl-dev libsqlite3-dev libreadline-dev libffi-dev liblzma-dev
105 | pip install --upgrade pip
106 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
107 | pip --version &> /dev/null
108 | if [ $? -eq 0 ];then
109 | dnf install -y gcc gcc-c++ zlib-devel bzip2-devel openssl-devel sqlite-devel readline-devel patch libffi-devel xz-devel
110 | else
111 | dnf install -y python3-pip gcc gcc-c++ zlib-devel bzip2-devel openssl-devel sqlite-devel readline-devel patch libffi-devel xz-devel
112 | fi
113 | pip install --upgrade pip
114 | fi
115 |
116 |
117 | echo_info 克隆pyenv项目
118 | checkout "${GITHUB}/pyenv/pyenv.git" "${pyenv_root}" "master"
119 | checkout "${GITHUB}/pyenv/pyenv-doctor.git" "${pyenv_root}/plugins/pyenv-doctor" "master"
120 | checkout "${GITHUB}/pyenv/pyenv-update.git" "${pyenv_root}/plugins/pyenv-update" "master"
121 | checkout "${GITHUB}/pyenv/pyenv-virtualenv.git" "${pyenv_root}/plugins/pyenv-virtualenv" "master"
122 | mkdir -p ${pyenv_root}/{cache,shims,versions}
123 | #chmod o+w ${pyenv_root}/{shims,versions}
124 |
125 | echo_info 生成更新脚本
126 | cat > ${pyenv_root}/update.sh << _EOF_
127 | # /bin/bash
128 |
129 | cd ${pyenv_root} && echo \$(pwd) && git pull
130 | cd ${pyenv_root}/plugins/pyenv-doctor && echo \$(pwd) && git pull
131 | cd ${pyenv_root}/plugins/pyenv-update && echo \$(pwd) && git pull
132 | cd ${pyenv_root}/plugins/pyenv-virtualenv && echo \$(pwd) && git pull
133 | _EOF_
134 | chmod +x ${pyenv_root}/update.sh
135 |
136 | echo_info 添加环境变量到~/.bashrc
137 | cat >> ~/.bashrc << _EOF_
138 | # pyenv
139 | export PYENV_ROOT="$pyenv_root"
140 | export PATH="\$PYENV_ROOT/bin:\$PYENV_ROOT/shims:\$PATH"
141 | eval "\$(pyenv init -)"
142 | eval "\$(pyenv virtualenv-init -)"
143 | _EOF_
144 |
145 | echo
146 | echo_info $(${pyenv_root}/bin/pyenv --version) 已安装完毕,请重新加载终端以激活pyenv命令。
147 | echo_info pyenv升级命令:
148 | echo "bash ${pyenv_root}/update.sh"
149 | echo
150 | echo_warning 本脚本仅为root用户添加了pyenv,若需为其他用户添加,请在该用户\~/.bashrc中添加以下内容
151 | echo
152 | cat << _EOF_
153 | # pyenv
154 | export PYENV_ROOT="$pyenv_root"
155 | export PATH="\$PYENV_ROOT/bin:\$PYENV_ROOT/shims:\$PATH"
156 | eval "\$(pyenv init -)"
157 | eval "\$(pyenv virtualenv-init -)"
158 | _EOF_
159 | echo
160 | echo_info pyenv安装python加速方法,以安装3.9.7为例
161 | cat << _EOF_
162 | export v=3.9.7
163 | cd $pyenv_root/cache
164 | wget https://registry.npmmirror.com/-/binary/python/\$v/Python-\$v.tar.xz
165 | pyenv install \$v
166 | _EOF_
167 | echo
--------------------------------------------------------------------------------
/01-installation-scripts/24-PostgreSQL/02-pg流复制配置.md:
--------------------------------------------------------------------------------
1 | ***注意,本文默认您使用本目录提供的源码编译postgresql部署的pg***
2 | ***脚本中ip、密码均已脱敏***
3 | # 一、主库执行
4 | ## 1、创建同步用户
5 | ```SQL
6 | create role replica login replication encrypted password 'elaZVZzsEye3xjwH';
7 | ```
8 | ## 2、执行以下脚本
9 | ```shell
10 | #!/bin/bash
11 | REPLICA_PASS="elaZVZzsEye3xjwH"
12 | MASTER_IP=172.21.100.13
13 | SLAVE_IP=172.21.100.15
14 |
15 | # 带格式的echo函数
16 | function echo_info() {
17 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
18 | }
19 | function echo_warning() {
20 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
21 | }
22 | function echo_error() {
23 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
24 | }
25 |
26 | echo_info 检查归档目录
27 | [ -d /data/backup/pgarchive ] || mkdir -p /data/backup/pgarchive
28 | chown -R postgres:postgres /data/backup/pgarchive
29 |
30 | ##############################################
31 | echo_info 归档配置
32 | grep -E "^wal_level = " /data/postgresql-11/data/postgresql.conf &> /dev/null
33 | if [ $? -ne 0 ];then
34 | echo_info 配置wal_level
35 | sed -i "s/#wal_level = replica/wal_level = replica/" /data/postgresql-11/data/postgresql.conf
36 | fi
37 | grep -E "^archive_mode = " /data/postgresql-11/data/postgresql.conf &> /dev/null
38 | if [ $? -ne 0 ];then
39 | echo_info 配置archive_mode
40 | sed -i "s/#archive_mode = .*/archive_mode = on/" /data/postgresql-11/data/postgresql.conf
41 | fi
42 | grep -E "^archive_command = " /data/postgresql-11/data/postgresql.conf &> /dev/null
43 | if [ $? -ne 0 ];then
44 | echo_info 配置archive_command
45 | sed -i "s@#archive_command = .*@archive_command = 'cp %p /data/backup/pgarchive/%f'@" /data/postgresql-11/data/postgresql.conf
46 | fi
47 | grep -E "^wal_keep_segments = " /data/postgresql-11/data/postgresql.conf &> /dev/null
48 | if [ $? -ne 0 ];then
49 | echo_info 配置wal_keep_segments
50 | sed -i "s@#wal_keep_segments = .*@wal_keep_segments = 64@" /data/postgresql-11/data/postgresql.conf
51 | fi
52 | grep -E "^hot_standby = " /data/postgresql-11/data/postgresql.conf &> /dev/null
53 | if [ $? -ne 0 ];then
54 | echo_info 配置hot_standby
55 | sed -i "s@#hot_standby = .*@hot_standby = on@" /data/postgresql-11/data/postgresql.conf
56 | fi
57 |
58 | ##############################################
59 |
60 | grep -E "host[[:space:]]+replication[[:space:]]+all[[:space:]]+${SLAVE_IP}/24[[:space:]]+md5" /data/postgresql-11/data/pg_hba.conf &> /dev/null
61 | if [ $? -ne 0 ];then
62 | echo_info 添加replica用户连接配置
63 | echo "host replication all ${SLAVE_IP}/24 md5" >> /data/postgresql-11/data/pg_hba.conf
64 | fi
65 |
66 | echo_info 重启postgresql
67 | systemctl restart postgresql-11
68 | if [ $? -ne 0 ];then
69 | echo_error systemctl restart postgresql-11 命令重启失败,请自行重启,然后注释掉重启的代码重新执行
70 | exit 1
71 | fi
72 |
73 | echo_info 生成recovery.done文件
74 | cat > /data/postgresql-11/data/recovery.done < ~/.pgpass < /home/postgres/.pgpass < ~/.pgpass < /home/postgres/.pgpass < /dev/null
60 | if [ $? -ne 0 ];then
61 | # 进入此处表示脚本所在目录没有压缩包
62 | ls -d $1 &> /dev/null
63 | if [ $? -ne 0 ];then
64 | # 进入此处表示没有${src_dir}目录
65 | mkdir -p $1 && cd $1
66 | echo_info 下载 $download_file_name 至 $(pwd)/
67 | # 检测是否有wget工具
68 | if [ ! -f /usr/bin/wget ];then
69 | echo_info 安装wget工具
70 | if [[ $os == "centos" ]];then
71 | yum install -y wget
72 | elif [[ $os == "ubuntu" ]];then
73 | apt install -y wget
74 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
75 | dnf install -y wget
76 | fi
77 | fi
78 | check_downloadfile $2
79 | wget --no-check-certificate $2
80 | if [ $? -ne 0 ];then
81 | echo_error 下载 $2 失败!
82 | exit 1
83 | fi
84 | file_in_the_dir=$(pwd)
85 | # 返回脚本所在目录,这样这个函数才可以多次使用
86 | cd ${back_dir}
87 | else
88 | # 进入此处表示有${src_dir}目录
89 | cd $1
90 | ls $download_file_name &> /dev/null
91 | if [ $? -ne 0 ];then
92 | # 进入此处表示${src_dir}目录内没有压缩包
93 | echo_info 下载 $download_file_name 至 $(pwd)/
94 | # 检测是否有wget工具
95 | if [ ! -f /usr/bin/wget ];then
96 | echo_info 安装wget工具
97 | if [[ $os == "centos" ]];then
98 | yum install -y wget
99 | elif [[ $os == "ubuntu" ]];then
100 | apt install -y wget
101 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
102 | dnf install -y wget
103 | fi
104 | fi
105 | check_downloadfile $2
106 | wget --no-check-certificate $2
107 | if [ $? -ne 0 ];then
108 | echo_error 下载 $2 失败!
109 | exit 1
110 | fi
111 | file_in_the_dir=$(pwd)
112 | cd ${back_dir}
113 | else
114 | # 进入此处,表示${src_dir}目录内有压缩包
115 | echo_info 发现压缩包$(pwd)/$download_file_name
116 | file_in_the_dir=$(pwd)
117 | cd ${back_dir}
118 | fi
119 | fi
120 | else
121 | # 进入此处表示脚本所在目录有压缩包
122 | echo_info 发现压缩包$(pwd)/$download_file_name
123 | file_in_the_dir=$(pwd)
124 | fi
125 | }
126 |
127 | # 多核编译
128 | function multi_core_compile(){
129 | echo_info 多核编译
130 | assumeused=$(w | grep 'load average' | awk -F': ' '{print $2}' | awk -F'.' '{print $1}')
131 | cpucores=$(cat /proc/cpuinfo | grep -c processor)
132 | compilecore=$(($cpucores - $assumeused - 1))
133 | if [ $compilecore -ge 1 ];then
134 | make -j $compilecore && make -j $compilecore install
135 | if [ $? -ne 0 ];then
136 | echo_error 编译安装出错,请检查脚本
137 | exit 1
138 | fi
139 | else
140 | make && make install
141 | if [ $? -ne 0 ];then
142 | echo_error 编译安装出错,请检查脚本
143 | exit 1
144 | fi
145 | fi
146 | }
147 |
148 | OLD_VERSION_SEQ=$(echo ${OLD_VERSION} | tr -d ".")
149 | NEW_VERSION_SEQ_PRE=$(echo ${SQLITE_UNTGZ_DIR} | awk -F "-" '{print $3}')
150 | NEW_VERSION_SEQ=${NEW_VERSION_SEQ_PRE:0:4}
151 |
152 | if [ ${OLD_VERSION_SEQ} -ge ${NEW_VERSION_SEQ} ];then
153 | echo_error "脚本中sqlite3的更新版本号(${NEW_VERSION_SEQ})未高于服务器已部署的版本号(${OLD_VERSION_SEQ}),请查看官网https://www.sqlite.org/download.html,修改脚本中的最新版本号"
154 | exit 1
155 | fi
156 |
157 | echo_info 安装编译工具
158 | yum install -y gcc sqlite-devel
159 |
160 | download_tar_gz ${src_dir} ${SQLITE_DOWNLOAD_URL}
161 | cd ${file_in_the_dir}
162 | untar_tgz ${SQLITE_TGZ_FILE}
163 | cd ${SQLITE_UNTGZ_DIR}
164 | ./configure --prefix=/usr/local
165 | multi_core_compile
166 |
167 | echo_info 备份原文件至 /usr/bin/sqlite3_old
168 | mv /usr/bin/sqlite3 /usr/bin/sqlite3_old
169 |
170 | ln -s /usr/local/bin/sqlite3 /usr/bin/sqlite3
171 | echo "/usr/local/lib" > /etc/ld.so.conf.d/sqlite3.conf
172 | ldconfig
173 |
174 | echo_info 清理临时文件
175 | cd ~
176 | rm -rf ${src_dir}/${SQLITE_UNTGZ_DIR}
177 |
178 | NEW_VERSION=$(sqlite3 -version | awk '{print $1}')
179 |
180 | echo_info "sqlite3已更新:${OLD_VERSION} --> ${NEW_VERSION}"
--------------------------------------------------------------------------------
/01-installation-scripts/28-Node.js/01-install-Node.js.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # nodejs_version=v$(curl -s https://nodejs.org/zh-cn/download/ | grep "长期维护版" | awk -F'' '{print $2}' | awk -F'' '{print $1}')
4 | nodejs_version=v16.15.0
5 | src_dir=$(pwd)/00src00
6 | mydir=$(pwd)
7 |
8 |
9 | # 带格式的echo函数
10 | function echo_info() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
12 | }
13 | function echo_warning() {
14 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
15 | }
16 | function echo_error() {
17 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
18 | }
19 |
20 | # 脚本执行用户检测
21 | if [[ $(whoami) != 'root' ]];then
22 | echo_error 请使用root用户执行
23 | exit 99
24 | fi
25 |
26 | # 检测操作系统
27 | if grep -qs "ubuntu" /etc/os-release; then
28 | os="ubuntu"
29 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
30 | # 阻止配置更新弹窗
31 | export UCF_FORCE_CONFFOLD=1
32 | # 阻止应用重启弹窗
33 | export NEEDRESTART_SUSPEND=1
34 | elif [[ -e /etc/centos-release ]]; then
35 | os="centos"
36 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
37 | elif [[ -e /etc/rocky-release ]]; then
38 | os="rocky"
39 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
40 | elif [[ -e /etc/almalinux-release ]]; then
41 | os="alma"
42 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
43 | else
44 | echo_error 不支持的操作系统
45 | exit 99
46 | fi
47 |
48 | # 解压
49 | function untar_tgz(){
50 | echo_info 解压 $1 中
51 | tar xf $1
52 | if [ $? -ne 0 ];then
53 | echo_error 解压出错,请检查!
54 | exit 2
55 | fi
56 | }
57 |
58 | # 首先判断当前目录是否有压缩包:
59 | # I. 如果有压缩包,那么就在当前目录解压;
60 | # II.如果没有压缩包,那么就检查有没有 ${src_dir} 表示的目录;
61 | # 1) 如果有目录,那么检查有没有压缩包
62 | # ① 有压缩包就解压
63 | # ② 没有压缩包则下载压缩包
64 | # 2) 如果没有,那么就创建这个目录,然后 cd 到目录中,然后下载压缩包,然
65 | # 后解压
66 | # 解压的步骤都在后面,故此处只做下载
67 |
68 | # 语法: download_tar_gz 保存的目录 下载链接
69 | # 使用示例: download_tar_gz /data/openssh-update https://mirrors.cloud.tencent.com/openssl/source/openssl-1.1.1h.tar.gz
70 | function check_downloadfile() {
71 | # 检测下载文件在服务器上是否存在
72 | http_code=$(curl -IksS $1 | head -1 | awk '{print $2}')
73 | if [ $http_code -eq 404 ];then
74 | echo_error $1
75 | echo_error 服务端文件不存在,退出
76 | exit 98
77 | fi
78 | }
79 | function download_tar_gz(){
80 | download_file_name=$(echo $2 | awk -F"/" '{print $NF}')
81 | back_dir=$(pwd)
82 | file_in_the_dir='' # 这个目录是后面编译目录的父目录
83 |
84 | ls $download_file_name &> /dev/null
85 | if [ $? -ne 0 ];then
86 | # 进入此处表示脚本所在目录没有压缩包
87 | ls -d $1 &> /dev/null
88 | if [ $? -ne 0 ];then
89 | # 进入此处表示没有${src_dir}目录
90 | mkdir -p $1 && cd $1
91 | echo_info 下载 $download_file_name 至 $(pwd)/
92 | # 检测是否有wget工具
93 | if [ ! -f /usr/bin/wget ];then
94 | echo_info 安装wget工具
95 | if [[ $os == "centos" ]];then
96 | yum install -y wget
97 | elif [[ $os == "ubuntu" ]];then
98 | apt install -y wget
99 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
100 | dnf install -y wget
101 | fi
102 | fi
103 | check_downloadfile $2
104 | wget --no-check-certificate $2
105 | if [ $? -ne 0 ];then
106 | echo_error 下载 $2 失败!
107 | exit 1
108 | fi
109 | file_in_the_dir=$(pwd)
110 | # 返回脚本所在目录,这样这个函数才可以多次使用
111 | cd ${back_dir}
112 | else
113 | # 进入此处表示有${src_dir}目录
114 | cd $1
115 | ls $download_file_name &> /dev/null
116 | if [ $? -ne 0 ];then
117 | # 进入此处表示${src_dir}目录内没有压缩包
118 | echo_info 下载 $download_file_name 至 $(pwd)/
119 | # 检测是否有wget工具
120 | if [ ! -f /usr/bin/wget ];then
121 | echo_info 安装wget工具
122 | if [[ $os == "centos" ]];then
123 | yum install -y wget
124 | elif [[ $os == "ubuntu" ]];then
125 | apt install -y wget
126 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
127 | dnf install -y wget
128 | fi
129 | fi
130 | check_downloadfile $2
131 | wget --no-check-certificate $2
132 | if [ $? -ne 0 ];then
133 | echo_error 下载 $2 失败!
134 | exit 1
135 | fi
136 | file_in_the_dir=$(pwd)
137 | cd ${back_dir}
138 | else
139 | # 进入此处,表示${src_dir}目录内有压缩包
140 | echo_info 发现压缩包$(pwd)/$download_file_name
141 | file_in_the_dir=$(pwd)
142 | cd ${back_dir}
143 | fi
144 | fi
145 | else
146 | # 进入此处表示脚本所在目录有压缩包
147 | echo_info 发现压缩包$(pwd)/$download_file_name
148 | file_in_the_dir=$(pwd)
149 | fi
150 | }
151 |
152 | function check_nodejs_dir() {
153 | if [ -d ${mydir}/node-${nodejs_version} ];then
154 | echo_error 检测到目录${mydir}/node-${nodejs_version},请检查是否重复安装,退出
155 | exit 1
156 | fi
157 | }
158 |
159 | function main() {
160 | check_nodejs_dir
161 | # download_tar_gz ${src_dir} https://nodejs.org/dist/${nodejs_version}/node-${nodejs_version}-linux-x64.tar.xz
162 | download_tar_gz ${src_dir} https://nodejs.org/download/release/${nodejs_version}/node-${nodejs_version}-linux-x64.tar.xz
163 | cd ${file_in_the_dir}
164 | untar_tgz node-${nodejs_version}-linux-x64.tar.xz
165 | mv node-${nodejs_version}-linux-x64 ${mydir}/node-${nodejs_version}
166 |
167 | echo_info 配置环境变量
168 | echo "export NODE_HOME=${mydir}/node-${nodejs_version}" > /etc/profile.d/nodejs.sh
169 | echo "export PATH=\$PATH:${mydir}/node-${nodejs_version}/bin" >> /etc/profile.d/nodejs.sh
170 | source /etc/profile
171 |
172 | echo_info 配置镜像
173 | npm config set registry=https://registry.npmmirror.com/
174 |
175 | echo_info npm部署yarn
176 | npm install -g yarn
177 |
178 | echo_warning 由于bash特性限制,在本终端使用 node 等命令,需要先手动执行 source /etc/profile 加载环境变量,或者新开一个终端
179 |
180 |
181 | echo_info Node.js已部署完毕,部署目录:${mydir}/node-${nodejs_version}
182 | echo_info node版本
183 | node -v
184 | echo_info npm版本
185 | npm -v
186 | echo_info yarn版本
187 | yarn -v
188 | }
189 |
190 | main
--------------------------------------------------------------------------------
/01-installation-scripts/10-GoAccess/01-install-goaccess.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 定义变量
4 | version=1.6.5
5 | src_dir=00src00
6 |
7 | # GeoIP2需要从 MaxMind 下载 城市/国家 数据库,并通过 --geoip-database 设定。
8 | # 如果使用 GeoIP,则不用下载数据库。
9 | # MaxMind官网:https://dev.maxmind.com/geoip/geoip2/geolite2/
10 | # 必须注册登录下载,注册登录后,进入自己的账号界面,有下载链接
11 | #
12 | # 使用 GeoIP2 的话,需要安装依赖库
13 | # wget --no-check-certificate https://github.com/maxmind/libmaxminddb/releases/download/1.4.3/libmaxminddb-1.4.3.tar.gz
14 | # tar xf libmaxminddb-1.4.3.tar.gz
15 | # cd libmaxminddb-1.4.3
16 | # ./configure
17 | # make
18 | # make install
19 | # sh -c "echo /usr/local/lib >> /etc/ld.so.conf.d/local.conf"
20 | # ldconfig
21 |
22 | # 带格式的echo函数
23 | function echo_info() {
24 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
25 | }
26 | function echo_warning() {
27 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
28 | }
29 | function echo_error() {
30 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
31 | }
32 |
33 | # 解压
34 | function untar_tgz(){
35 | echo_info 解压 $1 中
36 | tar xf $1
37 | if [ $? -ne 0 ];then
38 | echo_error 解压出错,请检查!
39 | exit 2
40 | fi
41 | }
42 |
43 | # 首先判断当前目录是否有压缩包:
44 | # I. 如果有压缩包,那么就在当前目录解压;
45 | # II.如果没有压缩包,那么就检查有没有 ${src_dir} 表示的目录;
46 | # 1) 如果有目录,那么检查有没有压缩包
47 | # ① 有压缩包就解压
48 | # ② 没有压缩包则下载压缩包
49 | # 2) 如果没有,那么就创建这个目录,然后 cd 到目录中,然后下载压缩包,然
50 | # 后解压
51 | # 解压的步骤都在后面,故此处只做下载
52 |
53 | # 语法: download_tar_gz 保存的目录 下载链接
54 | # 使用示例: download_tar_gz /data/openssh-update https://mirrors.cloud.tencent.com/openssl/source/openssl-1.1.1h.tar.gz
55 | function check_downloadfile() {
56 | # 检测下载文件在服务器上是否存在
57 | http_code=$(curl -IksS $1 | head -1 | awk '{print $2}')
58 | if [ $http_code -eq 404 ];then
59 | echo_error $1
60 | echo_error 服务端文件不存在,退出
61 | exit 98
62 | fi
63 | }
64 | function download_tar_gz(){
65 | download_file_name=$(echo $2 | awk -F"/" '{print $NF}')
66 | back_dir=$(pwd)
67 | file_in_the_dir='' # 这个目录是后面编译目录的父目录
68 |
69 | ls $download_file_name &> /dev/null
70 | if [ $? -ne 0 ];then
71 | # 进入此处表示脚本所在目录没有压缩包
72 | ls -d $1 &> /dev/null
73 | if [ $? -ne 0 ];then
74 | # 进入此处表示没有${src_dir}目录
75 | mkdir -p $1 && cd $1
76 | echo_info 下载 $download_file_name 至 $(pwd)/
77 | # 检测是否有wget工具
78 | if [ ! -f /usr/bin/wget ];then
79 | echo_info 安装wget工具
80 | if [[ $os == "centos" ]];then
81 | yum install -y wget
82 | elif [[ $os == "ubuntu" ]];then
83 | apt install -y wget
84 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
85 | dnf install -y wget
86 | fi
87 | fi
88 | check_downloadfile $2
89 | wget --no-check-certificate $2
90 | if [ $? -ne 0 ];then
91 | echo_error 下载 $2 失败!
92 | exit 1
93 | fi
94 | file_in_the_dir=$(pwd)
95 | # 返回脚本所在目录,这样这个函数才可以多次使用
96 | cd ${back_dir}
97 | else
98 | # 进入此处表示有${src_dir}目录
99 | cd $1
100 | ls $download_file_name &> /dev/null
101 | if [ $? -ne 0 ];then
102 | # 进入此处表示${src_dir}目录内没有压缩包
103 | echo_info 下载 $download_file_name 至 $(pwd)/
104 | # 检测是否有wget工具
105 | if [ ! -f /usr/bin/wget ];then
106 | echo_info 安装wget工具
107 | if [[ $os == "centos" ]];then
108 | yum install -y wget
109 | elif [[ $os == "ubuntu" ]];then
110 | apt install -y wget
111 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
112 | dnf install -y wget
113 | fi
114 | fi
115 | check_downloadfile $2
116 | wget --no-check-certificate $2
117 | if [ $? -ne 0 ];then
118 | echo_error 下载 $2 失败!
119 | exit 1
120 | fi
121 | file_in_the_dir=$(pwd)
122 | cd ${back_dir}
123 | else
124 | # 进入此处,表示${src_dir}目录内有压缩包
125 | echo_info 发现压缩包$(pwd)/$download_file_name
126 | file_in_the_dir=$(pwd)
127 | cd ${back_dir}
128 | fi
129 | fi
130 | else
131 | # 进入此处表示脚本所在目录有压缩包
132 | echo_info 发现压缩包$(pwd)/$download_file_name
133 | file_in_the_dir=$(pwd)
134 | fi
135 | }
136 |
137 | # 多核编译
138 | function multi_core_compile(){
139 | assumeused=$(w | grep 'load average' | awk -F': ' '{print $2}' | awk -F'.' '{print $1}')
140 | cpucores=$(cat /proc/cpuinfo | grep -c processor)
141 | compilecore=$(($cpucores - $assumeused - 1))
142 | if [ $compilecore -ge 1 ];then
143 | make -j $compilecore && make -j $compilecore install
144 | if [ $? -ne 0 ];then
145 | echo_error 编译安装出错,请检查脚本
146 | exit 1
147 | fi
148 | else
149 | make && make install
150 | if [ $? -ne 0 ];then
151 | echo_error 编译安装出错,请检查脚本
152 | exit 1
153 | fi
154 | fi
155 | }
156 |
157 | download_tar_gz ${src_dir} https://tar.goaccess.io/goaccess-${version}.tar.gz
158 | cd ${file_in_the_dir}
159 | untar_tgz goaccess-${version}.tar.gz
160 |
161 |
162 | echo_info 安装依赖程序
163 | yum install -y openssl-devel GeoIP-devel ncurses-devel epel-release gcc
164 |
165 | echo_info 配置编译参数
166 | cd goaccess-${version}
167 | #./configure --enable-utf8 --enable-geoip=mmdb --with-openssl --with-getline --enable-tcb=memhash
168 | ./configure --enable-utf8 --enable-geoip=legacy --with-getline --enable-tcb=memhash
169 |
170 | multi_core_compile
171 |
172 | echo_info 设置配置文件为 nginx 日志分析模式
173 | sed -i 's@^#time-format %H:%M:%S@time-format %H:%M:%S@' /usr/local/etc/goaccess/goaccess.conf
174 | sed -i 's@^#date-format %d/%b/%Y@date-format %d/%b/%Y@' /usr/local/etc/goaccess/goaccess.conf
175 | sed -i 's@#log-format COMBINED@log-format COMBINED@' /usr/local/etc/goaccess/goaccess.conf
176 |
177 | echo_info goaccess 已编译安装成功,详细信息如下:
178 | echo -e "\033[37m 配置文件路径:/usr/local/etc/goaccess/goaccess.conf\033[0m"
179 | echo -e "\033[37m 设置输出html为中文的方法:\033[0m"
180 | echo -e "\033[37m \033[36mexport LANG=zh_CN.UTF-8\033[0m"
181 | echo -e "\033[37m 用法举例:\033[0m"
182 | echo -e "\033[37m \033[36mgoaccess -a -g -f yourlogfile -o output.html\033[0m"
183 | echo -e "\033[37m goaccess版本:\033[0m"
184 | goaccess -V
185 | echo
--------------------------------------------------------------------------------
/01-installation-scripts/20-nfs/01-start-nfs-service.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 带格式的echo函数
4 | function echo_info() {
5 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
6 | }
7 | function echo_warning() {
8 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
9 | }
10 | function echo_error() {
11 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
12 | }
13 |
14 | # 脚本执行用户检测
15 | if [[ $(whoami) != 'root' ]];then
16 | echo_error 请使用root用户执行
17 | exit 99
18 | fi
19 |
20 | # 检测操作系统
21 | if grep -qs "ubuntu" /etc/os-release; then
22 | os="ubuntu"
23 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
24 | # 阻止配置更新弹窗
25 | export UCF_FORCE_CONFFOLD=1
26 | # 阻止应用重启弹窗
27 | export NEEDRESTART_SUSPEND=1
28 | elif [[ -e /etc/centos-release ]]; then
29 | os="centos"
30 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
31 | elif [[ -e /etc/rocky-release ]]; then
32 | os="rocky"
33 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
34 | elif [[ -e /etc/almalinux-release ]]; then
35 | os="alma"
36 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
37 | else
38 | echo_error 不支持的操作系统
39 | exit 99
40 | fi
41 |
42 | function check_nfs_service() {
43 | ps -ef | grep -E "\[nfsd\]" | grep -v grep &> /dev/null
44 | if [ $? -eq 0 ];then
45 | echo_error 检测到 nfs 服务正在运行中,退出
46 | exit 1
47 | fi
48 | }
49 |
50 | function start_nfs() {
51 | if [[ ! -f /usr/sbin/nfsstat ]];then
52 | echo_info 安装 nfs
53 | if [[ $os == "centos" ]];then
54 | yum install -y nfs-utils
55 | elif [[ $os == "ubuntu" ]];then
56 | apt install -y nfs-common nfs-kernel-server
57 | elif [[ $os == "rocky" || $os == 'alma' ]];then
58 | dnf install -y nfs-utils
59 | fi
60 | fi
61 |
62 | if [[ $os == "centos" ]];then
63 | systemctl start rpcbind
64 | systemctl start nfs
65 | elif [[ $os == "ubuntu" ]];then
66 | systemctl start nfs-kernel-server
67 | elif [[ $os == "rocky" || $os == 'alma' ]];then
68 | systemctl start rpcbind
69 | systemctl start nfs-server
70 | fi
71 | }
72 |
73 | function get_machine_ip() {
74 | machine_ip=$(ip route get 1.1.1.1 | awk '{for(i=1;i<=NF;i++) if($i=="src") print $(i+1)}')
75 | }
76 |
77 | function input_share_dir() {
78 | function accept_share_dir() {
79 | read -e share_dir
80 |
81 | if [ "" != "$share_dir" ];then
82 | # 进入此处,表示用户输入了值,需要重置空行标志位
83 | dir_null_flag=0
84 | share_dirs[$dir_num]=$share_dir
85 | let dir_num++
86 | accept_share_dir
87 | else
88 | if [ $dir_null_flag -eq 1 ];then
89 | # 第二次输入空行,会进入到此
90 | return
91 | else
92 | # 第一次输入空行,会进入到此,设置flag
93 | dir_null_flag=1
94 | accept_share_dir
95 | fi
96 | fi
97 | }
98 | function check_share_dir_is_legal() {
99 | if [[ "${share_dirs[0]}" == "" ]];then
100 | echo_error 没有输入任何内容
101 | exit 3
102 | fi
103 | for i in ${share_dirs[@]};do
104 | if [ ! -d ${i} ];then
105 | echo_error 未检测到目录 ${i},请确认输入是否正确
106 | exit 4
107 | fi
108 | done
109 | }
110 |
111 | # 接收目录的数组的下标
112 | dir_num=0
113 | # 该标志位用户是否输入了空行,输入两次空行则表示没有输入了,继续下一步
114 | dir_null_flag=0
115 | echo_info 请输入要共享的目录,如有多个,请回车后继续输入,连输两次空行继续下一步部署操作:
116 | # read -p $'请输入要共享的目录: \n' -e share_dir
117 | accept_share_dir
118 | check_share_dir_is_legal
119 | }
120 |
121 | function generate_nfs_conf() {
122 | echo_info 配置nfs
123 | get_machine_ip
124 | net_ip=$(echo ${machine_ip} | sed 's/[0-9]*$/0/')
125 | net_mask=$(ip a | grep ${machine_ip} | awk '{print $2}' | awk -F "/" '{print $2}')
126 | if [ $? -ne 0 ];then
127 | echo_info 请手动输入子网掩码(24、32这种格式)
128 | read -e INPUT_NET_MASK
129 | if [[ ! ${INPUT_NET_MASK} =~ ^[0-9] ]];then
130 | echo_error 错误的子网掩码格式,退出
131 | exit 2
132 | fi
133 | net_mask=${INPUT_NET_MASK}
134 | fi
135 | input_share_dir
136 | :>/etc/exports
137 | for i in ${share_dirs[@]};do
138 | cat >> /etc/exports <> /etc/modprobe.d/sunrpc.conf\033[0m"
165 | echo -e "\033[45mecho \"options sunrpc tcp_max_slot_table_entries=128\" >> /etc/modprobe.d/sunrpc.conf\033[0m"
166 | echo -e "\033[45mmodprobe sunrpc\033[0m"
167 | echo -e "\033[45msysctl -w sunrpc.tcp_slot_table_entries=128\033[0m"
168 | echo -e "\033[45mmount -t nfs -o soft,intr,timeo=5,retry=5 ${machine_ip}:${share_dirs[0]} MOUNT_POINT\033[0m"
169 | # soft:(默认值)当服务器端失去响应后,访问其上文件的应用程序将收到一个错误信号而不是被挂起。
170 | # timeo:与服务器断开后,尝试连接服务器的间隔时间,默认600(60秒)
171 | # intr:允许通知中断一个NFS调用。当服务器没有应答需要放弃的时候有用处
172 | # retry:失败后重试次数
173 |
174 | echo 服务端取消nfs共享目录命令:
175 | echo -e "\033[45mexportfs -u ${net_ip}/${net_mask}:${share_dirs[0]}\033[0m"
176 | echo 停止nfs命令:
177 | if [[ $os == "centos" ]];then
178 | echo -e "\033[45msystemctl stop nfs\033[0m"
179 | elif [[ $os == "ubuntu" ]];then
180 | echo -e "\033[45msystemctl stop nfs-kernel-server\033[0m"
181 | elif [[ $os == "rocky" || $os == 'alma' ]];then
182 | echo -e "\033[45msystemctl stop nfs-server\033[0m"
183 | fi
184 | echo -e "\033[45msystemctl stop rpcbind\033[0m"
185 | }
186 |
187 | function main() {
188 | check_nfs_service
189 | start_nfs
190 | generate_nfs_conf
191 | echo_summary
192 | }
193 |
194 |
195 | main
--------------------------------------------------------------------------------
/01-installation-scripts/44-shc/01-install-shc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | shc_version=4.0.3
4 | src_dir=$(pwd)/00src00
5 |
6 |
7 | # 带格式的echo函数
8 | function echo_info() {
9 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
10 | }
11 | function echo_warning() {
12 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
13 | }
14 | function echo_error() {
15 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
16 | }
17 |
18 | # 检测操作系统
19 | if grep -qs "ubuntu" /etc/os-release; then
20 | os="ubuntu"
21 | os_version=$(grep 'VERSION_ID' /etc/os-release | cut -d '"' -f 2)
22 | # 阻止配置更新弹窗
23 | export UCF_FORCE_CONFFOLD=1
24 | # 阻止应用重启弹窗
25 | export NEEDRESTART_SUSPEND=1
26 | elif [[ -e /etc/centos-release ]]; then
27 | os="centos"
28 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/centos-release)
29 | elif [[ -e /etc/rocky-release ]]; then
30 | os="rocky"
31 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/rocky-release)
32 | elif [[ -e /etc/almalinux-release ]]; then
33 | os="alma"
34 | os_version=$(grep -oE '([0-9]+\.[0-9]+(\.[0-9]+)?)' /etc/almalinux-release)
35 | else
36 | true # 这个脚本不用区分发行版
37 | fi
38 |
39 | # 解压
40 | function untar_tgz(){
41 | echo_info 解压 $1 中
42 | tar xf $1
43 | if [ $? -ne 0 ];then
44 | echo_error 解压出错,请检查!
45 | exit 2
46 | fi
47 | }
48 |
49 | # 首先判断当前目录是否有压缩包:
50 | # I. 如果有压缩包,那么就在当前目录解压;
51 | # II.如果没有压缩包,那么就检查有没有 ${src_dir} 表示的目录;
52 | # 1) 如果有目录,那么检查有没有压缩包
53 | # ① 有压缩包就解压
54 | # ② 没有压缩包则下载压缩包
55 | # 2) 如果没有,那么就创建这个目录,然后 cd 到目录中,然后下载压缩包,然
56 | # 后解压
57 | # 解压的步骤都在后面,故此处只做下载
58 |
59 | # 语法: download_tar_gz 保存的目录 下载链接
60 | # 使用示例: download_tar_gz /data/openssh-update https://mirrors.cloud.tencent.com/openssl/source/openssl-1.1.1h.tar.gz
61 | function check_downloadfile() {
62 | # 检测下载文件在服务器上是否存在
63 | http_code=$(curl -IksS $1 | head -1 | awk '{print $2}')
64 | if [ $http_code -eq 404 ];then
65 | echo_error $1
66 | echo_error 服务端文件不存在,退出
67 | exit 98
68 | fi
69 | }
70 | function download_tar_gz(){
71 | download_file_name=$(echo $2 | awk -F"/" '{print $NF}')
72 | back_dir=$(pwd)
73 | file_in_the_dir='' # 这个目录是后面编译目录的父目录
74 |
75 | ls $download_file_name &> /dev/null
76 | if [ $? -ne 0 ];then
77 | # 进入此处表示脚本所在目录没有压缩包
78 | ls -d $1 &> /dev/null
79 | if [ $? -ne 0 ];then
80 | # 进入此处表示没有${src_dir}目录
81 | mkdir -p $1 && cd $1
82 | echo_info 下载 $download_file_name 至 $(pwd)/
83 | # 检测是否有wget工具
84 | if [ ! -f /usr/bin/wget ];then
85 | echo_info 安装wget工具
86 | if [[ $os == "centos" ]];then
87 | yum install -y wget
88 | elif [[ $os == "ubuntu" ]];then
89 | apt install -y wget
90 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
91 | dnf install -y wget
92 | fi
93 | fi
94 | check_downloadfile $2
95 | wget --no-check-certificate $2
96 | if [ $? -ne 0 ];then
97 | echo_error 下载 $2 失败!
98 | exit 1
99 | fi
100 | file_in_the_dir=$(pwd)
101 | # 返回脚本所在目录,这样这个函数才可以多次使用
102 | cd ${back_dir}
103 | else
104 | # 进入此处表示有${src_dir}目录
105 | cd $1
106 | ls $download_file_name &> /dev/null
107 | if [ $? -ne 0 ];then
108 | # 进入此处表示${src_dir}目录内没有压缩包
109 | echo_info 下载 $download_file_name 至 $(pwd)/
110 | # 检测是否有wget工具
111 | if [ ! -f /usr/bin/wget ];then
112 | echo_info 安装wget工具
113 | if [[ $os == "centos" ]];then
114 | yum install -y wget
115 | elif [[ $os == "ubuntu" ]];then
116 | apt install -y wget
117 | elif [[ $os == 'rocky' || $os == 'alma' ]];then
118 | dnf install -y wget
119 | fi
120 | fi
121 | check_downloadfile $2
122 | wget --no-check-certificate $2
123 | if [ $? -ne 0 ];then
124 | echo_error 下载 $2 失败!
125 | exit 1
126 | fi
127 | file_in_the_dir=$(pwd)
128 | cd ${back_dir}
129 | else
130 | # 进入此处,表示${src_dir}目录内有压缩包
131 | echo_info 发现压缩包$(pwd)/$download_file_name
132 | file_in_the_dir=$(pwd)
133 | cd ${back_dir}
134 | fi
135 | fi
136 | else
137 | # 进入此处表示脚本所在目录有压缩包
138 | echo_info 发现压缩包$(pwd)/$download_file_name
139 | file_in_the_dir=$(pwd)
140 | fi
141 | }
142 |
143 | function pre_make() {
144 | if [[ $os == "centos" ]];then
145 | yum install -y gcc
146 | elif [[ $os == "ubuntu" ]];then
147 | apt install -y build-essential
148 | elif [[ $os == "rocky" || $os == "alma" ]];then
149 | dnf install -y gcc
150 | fi
151 | }
152 |
153 | # 多核编译
154 | function multi_core_compile(){
155 | # 检查make存不存在
156 | make --version &> /dev/null
157 | if [ $? -ne 0 ];then
158 | if [[ $os == "centos" ]];then
159 | yum install -y make
160 | elif [[ $os == "ubuntu" ]];then
161 | apt install -y make
162 | elif [[ $os == "rocky" || $os == "alma" ]];then
163 | dnf install -y make
164 | fi
165 | fi
166 | assumeused=$(w | grep 'load average' | awk -F': ' '{print $2}' | awk -F'.' '{print $1}')
167 | cpucores=$(cat /proc/cpuinfo | grep -c processor)
168 | compilecore=$(($cpucores - $assumeused - 1))
169 | if [ $compilecore -ge 1 ];then
170 | make -j $compilecore && make -j $compilecore install
171 | if [ $? -ne 0 ];then
172 | echo_error 编译安装出错,请检查脚本
173 | exit 1
174 | fi
175 | else
176 | make && make install
177 | if [ $? -ne 0 ];then
178 | echo_error 编译安装出错,请检查脚本
179 | exit 1
180 | fi
181 | fi
182 | }
183 |
184 | download_tar_gz ${src_dir} https://cors.isteed.cc/https://github.com/neurobin/shc/archive/refs/tags/${shc_version}.tar.gz
185 | cd ${file_in_the_dir}
186 | [ -d shc-${shc_version} ] && rm -rf shc-${shc_version}
187 | untar_tgz ${shc_version}.tar.gz
188 | cd shc-${shc_version}
189 | pre_make
190 | ./configure
191 | multi_core_compile
192 | if [ $? -ne 0 ];then
193 | echo_error 编译失败!
194 | exit 1
195 | fi
196 | echo_info 清理文件
197 | cd ..
198 | rm -rf shc-${shc_version}
199 |
200 | echo_info shc已部署完毕,使用示例:
201 | echo " shc -f yourfile.sh"
202 | echo
--------------------------------------------------------------------------------
/01-installation-scripts/16-kafka/Eagle for Apache Kafka/01-install-Kafka-Eagle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | version=2.0.3
3 | download_url=https://codeload.github.com/smartloli/kafka-eagle-bin/tar.gz/${version}
4 | tgzfile=kafka-eagle-bin-${version}.tar.gz
5 | src_dir=$(pwd)/00src00
6 | kafka_eagle_port=8084
7 |
8 | # 带格式的echo函数
9 | function echo_info() {
10 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[32mINFO\033[0m] \033[37m$@\033[0m"
11 | }
12 | function echo_warning() {
13 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[1;33mWARNING\033[0m] \033[1;37m$@\033[0m"
14 | }
15 | function echo_error() {
16 | echo -e "[\033[36m$(date +%T)\033[0m] [\033[41mERROR\033[0m] \033[1;31m$@\033[0m"
17 | }
18 |
19 | # 解压
20 | function untar_tgz(){
21 | echo_info 解压 $1 中
22 | tar xf $1
23 | if [ $? -ne 0 ];then
24 | echo_error 解压出错,请检查!
25 | exit 2
26 | fi
27 | }
28 |
29 | # 此download函数为定制,不要复制给其他脚本使用
30 | function download_tar_gz(){
31 | # 检测下载文件在服务器上是否存在
32 | http_code=$(curl -IsS $2 | head -1 | awk '{print $2}')
33 | if [ $http_code -eq 404 ];then
34 | echo_error $2
35 | echo_error 服务端文件不存在,退出
36 | exit 98
37 | fi
38 |
39 | download_file_name=${tgzfile}
40 | back_dir=$(pwd)
41 | file_in_the_dir='' # 这个目录是后面编译目录的父目录
42 |
43 | ls $download_file_name &> /dev/null
44 | if [ $? -ne 0 ];then
45 | # 进入此处表示脚本所在目录没有压缩包
46 | ls -d $1 &> /dev/null
47 | if [ $? -ne 0 ];then
48 | # 进入此处表示没有${src_dir}目录
49 | mkdir -p $1 && cd $1
50 | echo_info 下载 $download_file_name 至 $(pwd)/
51 | # 检测是否有wget工具
52 | if [ ! -f /usr/bin/wget ];then
53 | echo_info 安装wget工具
54 | yum install -y wget
55 | fi
56 | wget $2 -O ${tgzfile}
57 | if [ $? -ne 0 ];then
58 | echo_error 下载 $2 失败!
59 | exit 1
60 | fi
61 | file_in_the_dir=$(pwd)
62 | # 返回脚本所在目录,这样这个函数才可以多次使用
63 | cd ${back_dir}
64 | else
65 | # 进入此处表示有${src_dir}目录
66 | cd $1
67 | ls $download_file_name &> /dev/null
68 | if [ $? -ne 0 ];then
69 | # 进入此处表示${src_dir}目录内没有压缩包
70 | echo_info 下载 $download_file_name 至 $(pwd)/
71 | # 检测是否有wget工具
72 | if [ ! -f /usr/bin/wget ];then
73 | echo_info 安装wget工具
74 | yum install -y wget
75 | fi
76 | wget $2 -O ${tgzfile}
77 | if [ $? -ne 0 ];then
78 | echo_error 下载 $2 失败!
79 | exit 1
80 | fi
81 | file_in_the_dir=$(pwd)
82 | cd ${back_dir}
83 | else
84 | # 进入此处,表示${src_dir}目录内有压缩包
85 | echo_info 发现压缩包$(pwd)/$download_file_name
86 | file_in_the_dir=$(pwd)
87 | cd ${back_dir}
88 | fi
89 | fi
90 | else
91 | # 进入此处表示脚本所在目录有压缩包
92 | echo_info 发现压缩包$(pwd)/$download_file_name
93 | file_in_the_dir=$(pwd)
94 | fi
95 | }
96 |
97 | function add_user_and_group(){
98 | if id -g ${1} >/dev/null 2>&1; then
99 | echo_warning ${1}组已存在,无需创建
100 | else
101 | groupadd ${1}
102 | echo_info 创建${1}组
103 | fi
104 | if id -u ${1} >/dev/null 2>&1; then
105 | echo_warning ${1}用户已存在,无需创建
106 | else
107 | useradd -M -g ${1} -s /sbin/nologin ${1}
108 | echo_info 创建${1}用户
109 | fi
110 | }
111 |
112 | function check_dir() {
113 | if [ -d $1 ];then
114 | echo_error 目录 $1 已存在,退出
115 | exit 2
116 | fi
117 | }
118 |
119 | function check_java(){
120 | java -version &> /dev/null
121 | if [ $? -ne 0 ];then
122 | echo_error 未检测到jdk,请先部署jdk
123 | exit 1
124 | fi
125 | }
126 |
127 | function project_echo() {
128 | echo_info
129 | echo "==================================================================================="
130 | echo "项目 github 地址:https://github.com/smartloli/kafka-eagle "
131 | echo "官网下载地址:http://download.kafka-eagle.org/ "
132 | echo "如果下载实在太慢,可将压缩包(.tar.gz)下载到本地,移动至与脚本同级目录后再执行脚本"
133 | echo " by https://github.com/zhegeshijiehuiyouai/RoadToDevOps"
134 | echo "==================================================================================="
135 | echo
136 | }
137 |
138 | function add_kafka_eagle_home_to_profile() {
139 | echo_info 配置环境变量
140 | echo "export KE_HOME=${back_dir}/kafka-eagle-web-${version}" > /etc/profile.d/kafka-eagle.sh
141 | echo "export PATH=\$PATH:${back_dir}/kafka-eagle-web-${version}/bin" >> /etc/profile.d/kafka-eagle.sh
142 | echo_warning 由于bash特性限制,在本终端使用 ke.sh 命令,需要先手动执行 source /etc/profile 加载环境变量,或者新开一个终端执行命令
143 | }
144 |
145 | function config_ke() {
146 | echo_info 调整 Kafka Eagle 配置
147 | cd ${back_dir}/kafka-eagle-web-${version}
148 | sed -i 's#^kafka.eagle.url=.*#kafka.eagle.url=jdbc:sqlite:'${back_dir}'/kafka-eagle-web-'${version}'/db/ke.db#g' conf/system-config.properties
149 | sed -i 's/^cluster2.kafka.eagle.offset.storage=zk/#&/g' conf/system-config.properties
150 | }
151 |
152 | function show_summary() {
153 | echo_info 配置文件 :kafka-eagle-web-${version}/conf/system-config.properties
154 | echo -e "\033[37m 主要配置项:\033[0m"
155 | echo -e "\033[37m kafka.eagle.zk.cluster.alias -- 管理kafka的zk集群分组别名(kafka eagle支持监控多组kafka)\033[0m"
156 | echo -e "\033[37m cluster1.zk.list -- zk的地址,格式:ip:port[/path]。如有多个,用逗号隔开。如果没有cluster2,可将cluster2注释掉\033[0m"
157 | echo -e "\033[37m kafka.eagle.webui.port -- kafka eagle web服务的端口\033[0m"
158 | echo -e "\033[37m cluster1.kafka.eagle.offset.storage -- 消费者偏移量存储方式,0.9 版本之前的kafka存储在zk,之后的存储在kafka\033[0m"
159 | echo -e "\033[37m kafka xxxxx jdbc driver address -- 数据库,默认sqlite,可改为mysql\033[0m"
160 | echo -e "\033[37m 启动命令 :ke.sh start\033[0m"
161 | echo_warning 首次启动前请自行配置好 zk 集群别名、zk 的地址,即 kafka.eagle.zk.cluster.alias、cluster1.zk.list,如无特殊需求,其余可保持默认
162 | }
163 |
164 | ############# main #############
165 | project_echo
166 | check_java
167 | download_tar_gz $src_dir download_url
168 | bare_name=$(echo ${tgzfile} | awk -F".tar.gz" '{print $1}')
169 | check_dir ${back_dir}/kafka-eagle-web-${version}
170 | cd ${file_in_the_dir}
171 | untar_tgz ${tgzfile}
172 | cd ${bare_name}
173 | tar xf kafka-eagle-web-${version}-bin.tar.gz
174 | mv kafka-eagle-web-${version} ${back_dir}/kafka-eagle-web-${version}
175 | cd ${file_in_the_dir}
176 | rm -rf ${bare_name}
177 | config_ke
178 | add_kafka_eagle_home_to_profile
179 | show_summary
180 |
--------------------------------------------------------------------------------