├── README.md
├── screehot
└── image001.jpg
├── 《DevOps和自动化运维实践》第1章
└── README.md
├── 《DevOps和自动化运维实践》第2章
├── 2.7.1
│ ├── 1.sh
│ └── 2.sh
├── 2.7.2
│ ├── 1.sh
│ ├── 2.sh
│ └── 3.sh
├── 2.7.3
│ ├── 1.sh
│ ├── 2.sh
│ └── 3.sh
└── 2.7.4
│ ├── 1.sh
│ ├── 2.sh
│ ├── 3.sh
│ ├── 4.sh
│ └── 5.sh
├── 《DevOps和自动化运维实践》第3章
├── 3.7.1
│ └── 1.py
├── 3.7.2
│ └── 1.py
└── 3.8
│ ├── 1.py
│ ├── 2.py
│ ├── 3.py
│ ├── 4.py
│ ├── 5.py
│ └── 6.py
├── 《DevOps和自动化运维实践》第4章
└── Vagrantfile
├── 《DevOps和自动化运维实践》第5章
├── 5.10.3
│ ├── client.py
│ ├── initial_v1.0.py
│ ├── mytest.yml
│ ├── requirements.txt
│ ├── work.py
│ └── 设备初始化后端API文档说明.pdf
└── 5.11
│ └── 1.py
├── 《DevOps和自动化运维实践》第6章
└── 6.3
│ └── srv
│ └── salt
│ ├── base
│ ├── bash
│ │ ├── bash
│ │ └── bash.sls
│ ├── saferm
│ │ ├── safe-rm
│ │ ├── safe-rm.conf
│ │ └── saferm.sls
│ └── saltcheck
│ │ ├── salt_agent.cron
│ │ ├── saltcheck.sh
│ │ └── saltcheck.sls
│ ├── prod
│ ├── host
│ │ ├── hosts.allow
│ │ └── hosts.sls
│ ├── keepalived
│ │ ├── keepalived.sh
│ │ └── keepalived.sls
│ ├── nginx
│ │ ├── nginx.sls
│ │ └── nginx_install.sh
│ ├── rsyslog
│ │ ├── rsyslog.conf
│ │ └── rsyslog.sls
│ └── waf
│ │ ├── config.lua
│ │ └── waf.sls
│ └── top.sls
└── 《DevOps和自动化运维实践》第7章
├── 7.7.1
└── Dockerfile
├── 7.7.2
└── docker-compose.yml
└── 7.8
└── 1.py
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | 《DevOps和自动化运维实践》脚本源码
4 | ========================================
5 | 附注:因为书中的无论是各项Linux服务的配置文件(主要是YAML文件)还是SHELL脚本、Python脚本,代码量还是较多,所以这里以章节的形式出现,比如1.5.3目录下面有1.sh,表示这是1.5.3章节的第一个SHELL脚本,如果是2.sh,则表示是第二个SHELL脚本。其它章节的脚本大家可以依此类推,在哪个章节出现的配置文件或脚本就在哪个章节去找,这样方便。再比如SaltStack章节,里面只有/srv/salt目录,那么大家就可以在对应的目录找到书中对应的脚本。
6 | 本书GitHub地址:http://github.com/yuhongchun/devops/
7 |
8 | 章节内容
9 | ----------------------------------------
10 | 第1章 DevOps与自动化运维的意义
11 | 第2章 Shell脚本在DevOps下的应用
12 | 第3章 Python在DevOps与自动化运维中的应用
13 | 第4章 Vagrant在DevOps环境中的应用
14 | 第5章 自动化部署管理工具Ansible
15 | 第6章 自动化配置管理工具SaltStack
16 | 第7章 Docker和Jinkins在DevOps中的应用
17 | 第8章 自动化运维的后续思考
18 | 附录A GibLab在DevOps工作中的实际应用
19 | 附录B 用Gunicorn部署高性能Python WSGI服务器
20 | 附录C Supervisor在DevOps工作中的应用
21 | 附录D 分布式队列管理Cerely简介
22 |
23 |
24 | 作者联系方式
25 | -----------------------------------------
26 | 在阅读中有任何问题,欢迎反馈给我,可以用以下联系方式跟我交流,谢谢。
27 | 作者邮件:yuhongchun027@gmail.com
28 | 作者微博:http://weibo.com/yuhongchun027
29 | 作者博客:http://yuhongchun.blog.51cto.com
30 |
--------------------------------------------------------------------------------
/screehot/image001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yuhongchun/devops/b8723334ec5dc1ef9db801f55e2b2c65ac012409/screehot/image001.jpg
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第1章/README.md:
--------------------------------------------------------------------------------
1 | # 第一章节
2 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.1/1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | SVNDIR=/data/svn
3 | SVNADMIN=/usr/bin/svnadmin
4 | DATE=`date +%Y-%m-%d`
5 | OLDDATE=`date +%Y-%m-%d -d '30 days'`
6 | BACKDIR=/data/backup/svn-backup
7 |
8 | [ -d ${BACKDIR} ] || mkdir -p ${BACKDIR}
9 | LogFile=${BACKDIR}/svnbak.log
10 | [ -f ${LogFile} ] || touch ${LogFile}
11 | mkdir ${BACKDIR}/${DATE}
12 |
13 |
14 | for PROJECT in myproject official analysis mypharma
15 | do
16 | cd $SVNDIR
17 | $SVNADMIN hotcopy $PROJECT $BACKDIR/$DATE/$PROJECT --clean-logs
18 | cd $BACKDIR/$DATE
19 | tar zcvf ${PROJECT}_svn_${DATE}.tar.gz $PROJECT > /dev/null
20 | rm -rf $PROJECT
21 | sleep 2
22 | done
23 |
24 | HOST=192.168.2.112
25 | FTP_USERNAME=svn
26 | FTP_PASSWORD=svn101
27 |
28 | cd ${BACKDIR}/${DATE}
29 |
30 | ftp -i -n -v << !
31 | open ${HOST}
32 | user ${FTP_USERNAME} ${FTP_PASSWORD}
33 | bin
34 | cd ${OLDDATE}
35 | mdelete *
36 | cd ..
37 | rmdir ${OLDDATE}
38 | mkdir ${DATE}
39 | cd ${DATE}
40 | mput *
41 | bye
42 | !
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.1/2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Filename:
4 | # backupdatabase.sh
5 | # Description:
6 | # backup cms database and remove backup data before 7 days
7 | # crontab
8 | # 55 23 * * * /bin/sh /yundisk/cms/crontab/backupdatabase.sh >> /yundisk/cms/crontab/backupdatabase.log 2>&1
9 |
10 | DATE=`date +%Y-%m-%d`
11 | OLDDATE=`date +%Y-%m-%d -d '-7 days'`
12 |
13 | #MYSQL=/usr/local/mysql/bin/mysql
14 | #MYSQLDUMP=/usr/local/mysql/bin/mysqldump
15 | #MYSQLADMIN=/usr/local/mysql/bin/mysqladmin
16 |
17 | BACKDIR=/yundisk/cms/database
18 | [ -d ${BACKDIR} ] || mkdir -p ${BACKDIR}
19 | [ -d ${BACKDIR}/${DATE} ] || mkdir ${BACKDIR}/${DATE}
20 | [ ! -d ${BACKDIR}/${OLDDATE} ] || rm -rf ${BACKDIR}/${OLDDATE}
21 |
22 | mysqldump --default-character-set=utf8 --no-autocommit --quick --hex-blob --single-transaction -uroot cms_production | gzip > ${BACKDIR}/${DATE}/cms-backup-${DATE}.sql.gz
23 | echo "Database cms_production and bbs has been backup successful"
24 | /bin/sleep 5
25 |
26 | aws s3 cp ${BACKDIR}/${DATE}/* s3://example-share/cms/databackup/
27 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.2/1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #####get cpu info#####
4 | cpu_num=`cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l`
5 | cpu_sum=`cat /proc/cpuinfo |grep processor |wc -l`
6 | cpu_hz=`cat /proc/cpuinfo |grep 'model name' |uniq -c |awk '{print $NF}'`
7 |
8 | #####get mem info#####
9 | mem_m=0
10 | for i in `dmidecode -t memory |grep Size: |grep -v "No Module Installed" |awk '{print $2}'`
11 | do
12 | mem_m=`expr $mem_m + $i`
13 | done
14 | mem_sum=`echo $mem_m / 1024 |bc`
15 |
16 | #####get nic info#####
17 | qian_num=`lspci |grep Ethernet |egrep -v '10-Gigabit|10 Gigabit' |wc -l`
18 | wan_num=`lspci |grep Ethernet |egrep '10-Gigabit|10 Gigabit' |wc -l`
19 |
20 | #####get disk num#####
21 | B=`date +%s`
22 | ssd_num=0=
23 | sata_num=0
24 | for i in `lsblk |grep "disk"|awk '{print $1}'|egrep -v "ram"|sort`;
25 | do
26 | code=`cat /sys/block/$i/queue/rotational`
27 | if [ "$code" = "0" ];then
28 | ssd_num=`expr $ssd_num + 1` && echo $i >>/tmp/$B.ssd
29 | else
30 | sata_num=`expr $sata_num + 1` && echo $i >>/tmp/$B.sata
31 | fi
32 | done
33 |
34 | #####get disk sum#####
35 | C=`date +%N`
36 | ssd_sum=0
37 | sata_sum=0
38 | if [ -f /tmp/$B.ssd ];then
39 | for n in `cat /tmp/$B.ssd`;do
40 | fdisk -l /dev/$n >>/tmp/$C.ssd 2>&1
41 | for x in `grep "Disk /dev" /tmp/$C.ssd |awk '{print $3}'`;do
42 | u=`echo $x / 1|bc`
43 | done
44 | ssd_sum=`expr $ssd_sum + $u + 1`
45 | done
46 | fi
47 |
48 | for m in `cat /tmp/$B.sata`;do
49 | fdisk -l /dev/$m >>/tmp/$C.sata 2>&1
50 | for y in `grep "Disk /dev" /tmp/$C.sata |awk '{print $3}'`;do
51 | v=`echo $y / 1|bc`
52 | done
53 | sata_sum=`expr $sata_sum + $v + 1`
54 | done
55 |
56 | #####show dev info#####
57 | echo -n "$ip `hostname` $plat $pop $prov "
58 | echo -n "CPU(物理核数,逻辑核数,频率): $cpu_num $cpu_sum $cpu_hz "
59 | echo -n "内存(GB): $mem_sum "
60 | echo -n "网卡数量(千兆,万兆): $qian_num $wan_num "
61 | echo "SSD数量: ${ssd_num} SSD容量: ${ssd_sum}GB SATA数量: ${sata_num} SATA容量 ${sata_sum}GB "
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.2/2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sync_redis_status=`ps aux | grep sync_redis.py | grep -v grep | wc -l `
3 | if [ ${sync_redis_status} != 1 ]; then
4 | echo "Critical! sync_redis is Died"
5 | exit 2
6 | else
7 | echo "OK! sync_redis is Alive"
8 | exit 0
9 | fi
10 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.2/3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #脚本的$1和$2报警阀值可以根据业务的实际情况调整。
3 | #$1 = 15000,$2 = 20000
4 | ip_conns=`netstat -an | grep tcp | grep EST | wc -l`
5 | messages=`netstat -ant | awk '/^tcp/ {++S[$NF]} END {for(a in S) print a, S[a]}'|tr -s '\n' ',' | sed -r 's/(.*),/\1\n/g' `
6 |
7 | if [ $ip_conns -lt $1 ]
8 | then
9 | echo "$messages,OK -connect counts is $ip_conns"
10 | exit 0
11 | fi
12 | if [ $ip_conns -gt $1 -a $ip_conns -lt $2 ]
13 | then
14 | echo "$messages,Warning -connect counts is $ip_conns"
15 | exit 1
16 | fi
17 | if [ $ip_conns -gt $2 ]
18 | then
19 | echo "$messages,Critical -connect counts is $ip_conns"
20 | exit 2
21 | fi
22 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.3/1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | while :
3 | do
4 | nginxpid=`ps -C nginx --no-header | wc -l`
5 | if [ $nginxpid -eq 0 ];then
6 | ulimit -SHn 65535
7 | /usr/local/nginx/sbin/nginx
8 | sleep 5
9 | if [ $nginxpid -eq 0 ];then
10 | /etc/init.d/keepalived stop
11 | fi
12 | fi
13 | sleep 5
14 | done
15 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.3/2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for pid in `ps aux |grep nginx |grep -v grep|awk '{print $2}'`
3 | do
4 | cat /proc/${pid}/limits | grep 'Max open files'
5 | done
6 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.3/3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # CPU Utilization Statistics plugin for Nagios
3 | #
4 | # USAGE : ./check_cpu_utili.sh [-w ] [-c ] ( [ -i ] [ -n ])
5 | #
6 | # Exemple: ./check_cpu_utili.sh
7 | # ./check_cpu_utili.sh -w 70,40,30 -c 90,60,40
8 | # ./check_cpu_utili.sh -w 70,40,30 -c 90,60,40 -i 3 -n 5
9 | # Paths to commands used in this script. These may have to be modified to match your system setup.
10 | IOSTAT="/usr/bin/iostat"
11 |
12 | # Nagios return codes
13 | STATE_OK=0
14 | STATE_WARNING=1
15 | STATE_CRITICAL=2
16 | STATE_UNKNOWN=3
17 |
18 | # Plugin parameters value if not define
19 | LIST_WARNING_THRESHOLD="70,40,30"
20 | LIST_CRITICAL_THRESHOLD="90,60,40"
21 | INTERVAL_SEC=1
22 | NUM_REPORT=1
23 | # Plugin variable description
24 | PROGNAME=$(basename $0)
25 |
26 | if [ ! -x $IOSTAT ]; then
27 | echo "UNKNOWN: iostat not found or is not executable by the nagios user."
28 | exit $STATE_UNKNOWN
29 | fi
30 |
31 | print_usage() {
32 | echo ""
33 | echo "$PROGNAME $RELEASE - CPU Utilization check script for Nagios"
34 | echo ""
35 | echo "Usage: check_cpu_utili.sh -w -c (-i -n)"
36 | echo ""
37 | echo " -w Warning threshold in % for warn_user,warn_system,warn_iowait CPU (default : 70,40,30)"
38 | echo " Exit with WARNING status if cpu exceeds warn_n"
39 | echo " -c Critical threshold in % for crit_user,crit_system,crit_iowait CPU (default : 90,60,40)"
40 | echo " Exit with CRITICAL status if cpu exceeds crit_n"
41 | echo " -i Interval in seconds for iostat (default : 1)"
42 | echo " -n Number report for iostat (default : 3)"
43 | echo " -h Show this page"
44 | echo ""
45 | echo "Usage: $PROGNAME"
46 | echo "Usage: $PROGNAME --help"
47 | echo ""
48 | exit 0
49 | }
50 |
51 | print_help() {
52 | print_usage
53 | echo ""
54 | echo "This plugin will check cpu utilization (user,system,CPU_Iowait in %)"
55 | echo ""
56 | exit 0
57 | }
58 |
59 | # Parse parameters
60 | while [ $# -gt 0 ]; do
61 | case "$1" in
62 | -h | --help)
63 | print_help
64 | exit $STATE_OK
65 | ;;
66 | -v | --version)
67 | print_release
68 | exit $STATE_OK
69 | ;;
70 | -w | --warning)
71 | shift
72 | LIST_WARNING_THRESHOLD=$1
73 | ;;
74 | -c | --critical)
75 | shift
76 | LIST_CRITICAL_THRESHOLD=$1
77 | ;;
78 | -i | --interval)
79 | shift
80 | INTERVAL_SEC=$1
81 | ;;
82 | -n | --number)
83 | shift
84 | NUM_REPORT=$1
85 | ;;
86 | *) echo "Unknown argument: $1"
87 | print_usage
88 | exit $STATE_UNKNOWN
89 | ;;
90 | esac
91 | shift
92 | done
93 |
94 | # List to Table for warning threshold (compatibility with
95 | TAB_WARNING_THRESHOLD=(`echo $LIST_WARNING_THRESHOLD | sed 's/,/ /g'`)
96 | if [ "${#TAB_WARNING_THRESHOLD[@]}" -ne "3" ]; then
97 | echo "ERROR : Bad count parameter in Warning Threshold"
98 | exit $STATE_WARNING
99 | else
100 | USER_WARNING_THRESHOLD=`echo ${TAB_WARNING_THRESHOLD[0]}`
101 | SYSTEM_WARNING_THRESHOLD=`echo ${TAB_WARNING_THRESHOLD[1]}`
102 | IOWAIT_WARNING_THRESHOLD=`echo ${TAB_WARNING_THRESHOLD[2]}`
103 | fi
104 |
105 | # List to Table for critical threshold
106 | TAB_CRITICAL_THRESHOLD=(`echo $LIST_CRITICAL_THRESHOLD | sed 's/,/ /g'`)
107 | if [ "${#TAB_CRITICAL_THRESHOLD[@]}" -ne "3" ]; then
108 | echo "ERROR : Bad count parameter in CRITICAL Threshold"
109 | exit $STATE_WARNING
110 | else
111 | USER_CRITICAL_THRESHOLD=`echo ${TAB_CRITICAL_THRESHOLD[0]}`
112 | SYSTEM_CRITICAL_THRESHOLD=`echo ${TAB_CRITICAL_THRESHOLD[1]}`
113 | IOWAIT_CRITICAL_THRESHOLD=`echo ${TAB_CRITICAL_THRESHOLD[2]}`
114 | fi
115 |
116 | if [ ${TAB_WARNING_THRESHOLD[0]} -ge ${TAB_CRITICAL_THRESHOLD[0]} -o ${TAB_WARNING_THRESHOLD[1]} -ge ${TAB_CRITICAL_THRESHOLD[1]} -o ${TAB_WARNING_THRESHOLD[2]} -ge ${TAB_CRITICAL_THRESHOLD[2]} ]; then
117 | echo "ERROR : Critical CPU Threshold lower as Warning CPU Threshold "
118 | exit $STATE_WARNING
119 | fi
120 |
121 | CPU_REPORT=`iostat -c $INTERVAL_SEC $NUM_REPORT | sed -e 's/,/./g' | tr -s ' ' ';' | sed '/^$/d' | tail -1`
122 | CPU_REPORT_SECTIONS=`echo ${CPU_REPORT} | grep ';' -o | wc -l`
123 | CPU_USER=`echo $CPU_REPORT | cut -d ";" -f 2`
124 | CPU_SYSTEM=`echo $CPU_REPORT | cut -d ";" -f 4`
125 | CPU_IOWAIT=`echo $CPU_REPORT | cut -d ";" -f 5`
126 | CPU_STEAL=`echo $CPU_REPORT | cut -d ";" -f 6`
127 | CPU_IDLE=`echo $CPU_REPORT | cut -d ";" -f 7`
128 | NAGIOS_STATUS="user=${CPU_USER}%,system=${CPU_SYSTEM}%,iowait=${CPU_IOWAIT}%,idle=${CPU_IDLE}%"
129 | NAGIOS_DATA="CpuUser=${CPU_USER};${TAB_WARNING_THRESHOLD[0]};${TAB_CRITICAL_THRESHOLD[0]};0"
130 |
131 | CPU_USER_MAJOR=`echo $CPU_USER| cut -d "." -f 1`
132 | CPU_SYSTEM_MAJOR=`echo $CPU_SYSTEM | cut -d "." -f 1`
133 | CPU_IOWAIT_MAJOR=`echo $CPU_IOWAIT | cut -d "." -f 1`
134 | CPU_IDLE_MAJOR=`echo $CPU_IDLE | cut -d "." -f 1`
135 |
136 |
137 |
138 | # Return
139 | if [ ${CPU_USER_MAJOR} -ge $USER_CRITICAL_THRESHOLD ]; then
140 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
141 | exit $STATE_CRITICAL
142 | elif [ ${CPU_SYSTEM_MAJOR} -ge $SYSTEM_CRITICAL_THRESHOLD ]; then
143 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
144 | exit $STATE_CRITICAL
145 | elif [ ${CPU_IOWAIT_MAJOR} -ge $IOWAIT_CRITICAL_THRESHOLD ]; then
146 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
147 | exit $STATE_CRITICAL
148 | elif [ ${CPU_USER_MAJOR} -ge $USER_WARNING_THRESHOLD ] && [ ${CPU_USER_MAJOR} -lt $USER_CRITICAL_THRESHOLD ]; then
149 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
150 | exit $STATE_WARNING
151 | elif [ ${CPU_SYSTEM_MAJOR} -ge $SYSTEM_WARNING_THRESHOLD ] && [ ${CPU_SYSTEM_MAJOR} -lt $SYSTEM_CRITICAL_THRESHOLD ]; then
152 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
153 | exit $STATE_WARNING
154 | elif [ ${CPU_IOWAIT_MAJOR} -ge $IOWAIT_WARNING_THRESHOLD ] && [ ${CPU_IOWAIT_MAJOR} -lt $IOWAIT_CRITICAL_THRESHOLD ]; then
155 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
156 | exit $STATE_WARNING
157 | else
158 |
159 | echo "CPU STATISTICS OK:${NAGIOS_STATUS} | CPU_USER=${CPU_USER}%;70;90;0;100"
160 | exit $STATE_OK
161 | fi
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.4/1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #添加epel外部yum扩展源
3 | cd /usr/local/src
4 | wget http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
5 | rpm -ivh epel-release-6-8.noarch.rpm
6 | #安装gcc基础库文件以及sysstat工具
7 | yum -y install gcc gcc-c++ vim-enhanced unzip unrar sysstat
8 | #配置ntpdate自动对时
9 | yum -y install ntp
10 | echo "01 01 * * * /usr/sbin/ntpdate ntp.api.bz >> /dev/null 2>&1" >> /etc/crontab
11 | ntpdate ntp.api.bz
12 | service crond restart
13 | #配置文件的ulimit值
14 | ulimit -SHn 65535
15 | echo "ulimit -SHn 65535" >> /etc/rc.local
16 | cat >> /etc/security/limits.conf << EOF
17 | * soft nofile 65535
18 | * hard nofile 65535
19 | EOF
20 |
21 | #基础系统内核优化
22 | cat >> /etc/sysctl.conf << EOF
23 | fs.file-max=419430
24 | net.ipv4.tcp_syncookies = 1
25 | net.ipv4.tcp_syn_retries = 1
26 | net.ipv4.tcp_tw_recycle = 1
27 | net.ipv4.tcp_tw_reuse = 1
28 | net.ipv4.tcp_fin_timeout = 1
29 | net.ipv4.tcp_keepalive_time = 1200
30 | net.ipv4.ip_local_port_range = 1024 65535
31 | net.ipv4.tcp_max_syn_backlog = 16384
32 | net.ipv4.tcp_max_tw_buckets = 36000
33 | net.ipv4.route.gc_timeout = 100
34 | net.ipv4.tcp_syn_retries = 1
35 | net.ipv4.tcp_synack_retries = 1
36 | net.core.somaxconn = 16384
37 | net.core.netdev_max_backlog = 16384
38 | net.ipv4.tcp_max_orphans = 16384
39 | EOF
40 | /sbin/sysctl -p
41 |
42 | #禁用control-alt-delete组合键以防止误操作
43 | sed -i 's@ca::ctrlaltdel:/sbin/shutdown -t3 -r now@#ca::ctrlaltdel:/sbin/shutdown -t3 -r now@' /etc/inittab
44 | #关闭SELinux
45 | sed -i 's@SELINUX=enforcing@SELINUX=disabled@' /etc/selinux/config
46 | #关闭iptables
47 | service iptables stop
48 | chkconfig iptables off
49 | #ssh服务配置优化,请至少保持机器中至少有一个具有sudo权限的用户,下面的配置会禁止root远程登录
50 | sed -i 's@#PermitRootLogin yes@PermitRootLogin no@' /etc/ssh/sshd_config #禁止root远程登录
51 | sed -i 's@#PermitEmptyPasswords no@PermitEmptyPasswords no@' /etc/ssh/sshd_config #禁止空密码登录
52 | sed -i 's@#UseDNS yes@UseDNS no@' /etc/ssh/sshd_config /etc/ssh/sshd_config
53 | service sshd restart
54 | #禁用IPv6地址
55 | echo "alias net-pf-10 off" >> /etc/modprobe.d/dist.conf
56 | echo "alias ipv6 off" >> /etc/modprobe.d/dist.conf
57 | chkconfig ip6tables off
58 | #vim基础语法优化
59 | echo "syntax on" >> /root/.vimrc
60 | echo "set nohlsearch" >> /root/.vimrc
61 | #精简开机自启动服务,安装最小化服务的机器初始可以只保留crond,network,rsyslog,sshd这四个服务。
62 | for i in `chkconfig --list|grep 3:on|awk '{print $1}'`;do chkconfig --level 3 $i off;done
63 | for CURSRV in crond rsyslog sshd network;do chkconfig --level 3 $CURSRV on;done
64 | #重启服务器
65 | reboot
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.4/2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #每5分钟运行一次脚本
3 |
4 | CE_HOME='/data/ContentEngine'
5 | LOG_PATH='/data/logs'
6 |
7 | # 控制爬虫数量为8
8 | MAX_SPIDER_COUNT=8
9 |
10 | # current count of spider
11 | count=`ps -ef | grep -v grep | grep run.py | wc -l`
12 | # 下面的逻辑是控制run.py进程数量始终为8,充分挖掘机器的性能,并且为了防止形成死循环,这里没有用while语句
13 | try_time=0
14 | cd $CE_HOME
15 | while [ $count -lt $MAX_SPIDER_COUNT -a $try_time -lt $MAX_SPIDER_COUNT ];do
16 | let try_time+=1
17 | python run.py >> ${LOG_PATH}/spider.log 2>&1 &
18 | count=`ps -ef | grep -v grep | grep run.py | wc -l`
19 | done
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.4/3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #如果hosts文件不存在,就调用touch命令建立;另外,这里要增加一个逻辑判断,即如果已经有人在发布平台了,第二个运维人员发布的时候,一定要强制退出,等待前面的发布人员发布结束。
3 | if [ ! -f "$hosts" ]
4 | then
5 | touch "$hosts"
6 | else
7 | echo "此平台已经有运维小伙伴在发布,请耐心等待!"
8 | exit
9 | fi
10 | #如果出现中止进程的情况,捕捉异常信号,清理临时文件。
11 | trap "echo '程序被中止,开始清理临时文件';rm -rf $hosts;exit" 1 2 3
12 | #进入public_conf目录,通过git pull获取gitlab上最新的相关文件配置
13 | cd /data/conf /public_conf/
14 | git pull origin master:master
15 | #配置文件这里也是通过内部的GitLab管理,这里没简化操作,主要是防止执行git pull origin master或git pull的时候,此时可能会存在着多分支的情况会导致运行报错
16 | if [ $? == 0 ];then
17 | echo "当前配置文件为最新版本,可以发布!"
18 | else
19 | echo "当前配置文件不是最新的,请检查后再发布"
20 | exit
21 | fi
22 | #此为发布单平台多IP的逻辑,$#判断参数个数,这里的逻辑判断为参数大于或等于3时就是单平台多IP发布。
23 | if [ $# >=3 ];then
24 | shift 1
25 | 这里通过shift命令往左偏移一个位置参数,从而获取全部的IP。
26 | echo "此次需要更新的机器IP为:$@"
27 | for flat in $@
28 | do
29 | echo "此次需要更新的机器IP为:$flat"
30 | platform=`awk '/\[/{a=$0}/'"$flat"'/{print a}' $hosts | head -n1`
31 | #通过这段awk命令组和来获取当前的机器ip属于哪条线路,比如是移动还是网通或者电信,后续有相应的措施。
32 | if [[ $platform =~ "yd" ]];then
33 | /usr/local/bin/ansible -i $hosts $flat -m shell -a "/home/fastcache_conf/publish_fastcache.sh ${public_conf}_yd"
34 | elif [[ $platform =~ "wt" ]];then
35 | /usr/local/bin/ansible -i $hosts $flat -m shell -a "/home/fastcache_conf/publish_fastcache.sh ${public_conf}_wt"
36 | else
37 | /usr/local/bin/ansible -i $hosts $flat -m shell -a "/home/fastcache_conf/publish_fastcache.sh ${public_conf}_dx"
38 | fi
39 | done
40 | fi
41 | #程序正常运行后,也要清理此临时文件,方便下次任务发布
42 | rm -rf $hosts
43 | trap "rm -rf $hosts" exit
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.4/4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | function rg_mkfs_interac(){
3 | read -p "请输入您要做的RAID级别,可选择项为0|1|5|10:" raid
4 | read -p "请输入哪些磁盘需要并进RAID,磁盘之间请用空格格开,例如sdb sdc等" mydev
5 | echo $raid
6 | echo $mydev
7 | # create md0
8 | rg_info "Create RAID..."
9 | mdadm -Ss
10 | yes | mdadm -C /dev/md0 --level=$raid --auto=yes $mydev >/dev/null
11 | mdadm -D /dev/md0 >/dev/null || rg_info 58 "Create RAID /dev/md0 failed."
12 | # public
13 | partprobe /dev/$DISK_SYS 2>/dev/null
14 | sleep 3
15 | # mkfs
16 | for i in {${DISK_SYS}4,md0}; do
17 | echo -n "$MKFS /dev/$i... "
18 | if $MKFS /dev/$i &>/dev/null; then
19 | echo OK
20 | else
21 | echo failed && rg_info 55 "mkfs $i failed"
22 | fi
23 | done
24 | rg_info "Create cache direcotry..." && mkdir -p /cache/{cache,logs}
25 | echo -e "/dev/${DISK_SYS}4 \t\t/cache/logs \t\t$FS \tdefaults \t0 0" >>/etc/fstab
26 | echo -e "/dev/md0 \t\t/cache/cache \t\t$FS \t$MOUNT_OPTS \t0 0" >>/etc/fstab
27 | echo "--"
28 | #save mdadm.conf
29 | if (mdadm -Ds 2>/dev/null |grep -q .); then
30 | [ -f /etc/mdadm.conf ] && rg_info "Backup old mdadm.conf..." && /bin/cp /etc/mdadm.conf /etc/mdadm.conf.bak
31 | rg_info "Save RAID configration (mdadm.conf)..."
32 | if [ "$VER6" == 'yes' ]; then
33 | mdadm -Ds |sed 's!/dev/md[^ ]*:\([0-9]\)!/dev/md\1!; s!metadata[^ ]* !!; s/$/ auto=yes/' >/etc/mdadm.conf
34 | else
35 | mdadm -Ds |sed 's/$/ auto=yes/' >/etc/mdadm.conf
36 | fi
37 | fi
38 | #mount all
39 | fgrep -q /cache /etc/fstab || rg_info 48 "Internal error: f_mkfs has BUG!"
40 | rg_info "挂载所有分区..."
41 | if mount -a; then
42 | rg_info "创建mkpart锁..."
43 | echo "$VERSION" >$MKFS_LOCK 2>/dev/null && chattr +i $MKFS_LOCK
44 | ret=0
45 | else
46 | rg_info 49 "mount -a 出错"
47 | fi
48 | return $ret
49 | }
50 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第2章/2.7.4/5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | function update_rules() {
3 | #使用是内部SVN服务器,所以这里帐号和密码明文,没有考虑太多安全带来的问题
4 | svn co svn://192.168.10.68/route_auto /tmp/route_auto --username=testyum --password=oTIil31pw --force --no-auth-cache
5 |
6 | if [ $? -eq 0 ]; then
7 | echo "[INFO]: 获取最新 rules 成功,检测下载的 rules 库文件是否为空..."
8 | if !(test -s $LOCAL_TMP_RULES); then
9 | echo "获取到的最新 rules 库文件为空,请检查远端 rules 库文件!!"
10 | exit 1
11 | else
12 | cp -rf $LOCAL_TMP_RULES $RULES_ENV_FILE
13 | cp -rf $LOCAL_TMP_RULES $TMPFILES
14 | echo "获取到的最新 rules 库文件非空,程序继续..."
15 | fi
16 |
17 | echo "[INFO]: 将最新 rules 库文件与本地 rules 库文件比对是否有差异..."
18 | if ! (diff $RULES_ENV_FILE $LOCAL_TMP_RULES &>/dev/null); then
19 | echo "有差异 rules,加载最新 rules 库配置..."
20 | . $LOCAL_TMP_RULES
21 | cp -rf $LOCAL_TMP_RULES $RULES_ENV_FILE
22 | else
23 | echo "无差异 rules,加载本地 rules 库配置..."
24 | . $RULES_ENV_FILE
25 | fi
26 | fi
27 | }
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.7.1/1.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf8 -*-
2 | from flask import Flask, request, jsonify
3 | from flask import jsonify
4 | app = Flask(__name__)
5 |
6 |
7 | tasks = [
8 | {
9 | 'id': 1,
10 | 'title': u'Buy groceries',
11 | 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
12 | 'done': False
13 | },
14 | {
15 | 'id': 2,
16 | 'title': u'Learn Python',
17 | 'description': u'Need to find a good Python tutorial on the web',
18 | 'done': False
19 | }
20 | ]
21 | @app.route('/hello/tasks', methods=['POST'])
22 | def create_task():
23 | if not request.json or not 'title' in request.json:
24 | abort(400)
25 | task = {
26 | 'id': tasks[-1]['id'] + 1,
27 | 'title': request.json['title'],
28 | 'description': request.json.get('description', ""),
29 | 'done': False
30 | }
31 | tasks.append(task)
32 | return jsonify({'task': task}), 201
33 |
34 | if __name__ == '__main__':
35 | app.run(port=5001,debug=True)
36 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.7.2/1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/cd python
2 | # -*- coding:utf-8 -*-
3 | from flask import Flask, jsonify,request
4 | import sys,os
5 | import urllib, urllib2
6 | import base64
7 | import hmac
8 | import hashlib
9 | from hashlib import sha1
10 | import time
11 | import uuid
12 | app = Flask(__name__)
13 | class pushAliCdn:
14 | def __init__(self):
15 | self.cdn_server_address = 'http://cdn.aliyuncs.com'
16 | self.access_key_id = 'LTAIT4YXXXXXXX'
17 | self.access_key_secret = 'iX8dQ6m3qawXXXXXX'
18 | def percent_encode(self, str):
19 | res = urllib.quote(str.decode(sys.stdin.encoding).encode('utf8'), '')
20 | res = res.replace('+', '%20')
21 | res = res.replace('*', '%2A')
22 | res = res.replace('%7E',
23 | return res
24 | def compute_signature(self, parameters, access_key_secret):
25 | sortedParameters = sorted(parameters.items(), key=lambda parameters: parameters[0])
26 | canonicalizedQueryString = ''
27 | for (k,v) in sortedParameters:
28 | canonicalizedQueryString += '&' + self.percent_encode(k) + '=' + self.percent_encode(v)
29 | stringToSign = 'GET&%2F&' + self.percent_encode(canonicalizedQueryString[1:])
30 | h = hmac.new(access_key_secret + "&", stringToSign, sha1)
31 | signature = base64.encodestring(h.digest()).strip()
32 | return signature
33 | def compose_url(self, user_params):
34 | timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
35 | parameters = { \
36 | 'Format' : 'JSON', \
37 | 'Version' : '2014-11-11', \
38 | 'AccessKeyId' : self.access_key_id, \
39 | 'SignatureVersion' : '1.0', \
40 | 'SignatureMethod' : 'HMAC-SHA1', \
41 | 'SignatureNonce' : str(uuid.uuid1()), \
42 | 'TimeStamp' : timestamp, \
43 | }
44 | for key in user_params.keys():
45 | parameters[key] = user_params[key]
46 | signature = self.compute_signature(parameters, self.access_key_secret)
47 | parameters['Signature'] = signature
48 | url = self.cdn_server_address + "/?" + urllib.urlencode(parameters)
49 | return url
50 | def make_request(self, user_params, quiet=False):
51 | url = self.compose_url(user_params)
52 | #print url
53 | #刷新url
54 | try:
55 | req = urllib2.Request(url)
56 | res_data = urllib2.urlopen(req)
57 | res = res_data.read()
58 | return res
59 | except:
60 | return user_params['ObjectPath'] + ' refresh failed!'
61 | @app.route('/api', methods=['POST'])
62 | def get_tasks():
63 | if request.form.get('url'):
64 | url = request.form.get('url')
65 | print url
66 | f = pushAliCdn()
67 | params = {'Action': 'RefreshObjectCaches', 'ObjectPath': url, 'ObjectType': 'File'}
68 | print params
69 | res = f.make_request(params)
70 | return res
71 | #return jsonify({'tasks': res})
72 |
73 | if __name__ == '__main__':
74 | app.run(host='10.0.1.134',port=9321,debug=True)
75 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.8/1.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python
2 | # -*- coding:UTF-8 -*-
3 | import re
4 | import os
5 | import urllib
6 | import threading
7 | import time
8 | import Queue
9 |
10 |
11 | def getHtml(url):
12 | html_page = urllib.urlopen(url).read()
13 | return html_page
14 |
15 |
16 | # 提取网页中图片的URL
17 | def getUrl(html):
18 | pattern = r'src="(.+?\.jpg)" pic_ext' # 正则表达式匹配图片
19 | imgre = re.compile(pattern)
20 | imglist = re.findall(imgre, html)
21 | # re.findall(pattern,string)在string中寻找所有匹配成功的字符串,以列表形式返回值
22 | return imglist
23 |
24 |
25 | class getImg(threading.Thread):
26 | def __init__(self, queue):
27 | # 进程间通过队列通信,所以每个进程需要用到同一个队列初始化
28 | threading.Thread.__init__(self)
29 | self.queue = queue
30 | self.start() # 启动线程
31 |
32 | # 使用队列实现进程间通信
33 | def run(self):
34 | global count
35 | while (True):
36 | imgurl = self.queue.get()
37 | print self.getName()
38 | # urllib.urlretrieve(url,filname) 将url的内容提取出来,并存入filename中
39 | urllib.urlretrieve(imgurl, '/root/python/images/%s.jpg' % count)
40 | print "%s.jpg done" % count
41 | count += 1
42 | if self.queue.empty():
43 | break
44 | self.queue.task_done()
45 | # 当使用者线程调用 task_done() 以表示检索了该项目、并完成了所有的工作时,那么未完成的任务的总数就会减少。
46 |
47 |
48 | def main():
49 | global count
50 | url = "http://tieba.baidu.com/p/2460150866" # 爬虫程序要抓取内容的网页地址
51 | html = getHtml(url)
52 | imglist = getUrl(html)
53 | threads = []
54 | count = 0
55 | queue = Queue.Queue()
56 |
57 | # 将所有任务加入队列
58 | for i in range(len(imglist)):
59 | queue.put(imglist[i])
60 |
61 | # 多线程爬取图片
62 | for i in range(8):
63 | thread = getImg(queue)
64 | threads.append(thread)
65 |
66 | # 合并进程,当子进程结束时,主进程才可以执行
67 | for thread in threads:
68 | thread.join()
69 |
70 |
71 | if __name__ == '__main__':
72 | if not os.path.exists("/root/python/images"):
73 | os.makedirs("/root/python/images")
74 | main()
75 | print "多线程爬取图片任务已完成!"
76 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.8/2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | import smtplib
4 | from email.mime.text import MIMEText
5 | import string
6 | import os
7 |
8 | mail_host = "mail.example.com.cn"
9 | mail_subject = "hostname名字不规则的机器列表"
10 | # mail_reciver = ["yuhc@example.com.cn"]
11 | mail_reciver = ["devops@example.com.cn", "admin@example.com.cn", "sa@example.com.cn"]
12 | # mail_cc=["wangmiao@example.com.cn","nocdev@example.com"]
13 | # mail_reliver以列表的形式存在,如果是单个收件地址,建议也以方式,即mail_reciver = ["yuhc@example.com.cn"]
14 | mail_from = "yhc@example.com.cn"
15 | text = open('/data/report/hostname_report.txt', 'r')
16 | # body = string.join((text.read().strip()), "\r\n")
17 | body = "ALL:\r\n" + " 你好,下面是我们全网内hostname名字不规范的列表,已经依次列出,麻烦将其改正并修正至 CMDB 系统,谢谢,列表如下所示:\r\n" + "\r\n" + text.read() + "\r\n" + "-------" + "\r\n" + "运维开发 | 余洪春"
18 | text.close()
19 |
20 | # body = str(body)
21 | msg = MIMEText(body, format, 'utf-8')
22 | msg['Subject'] = mail_subject
23 | msg['From'] = mail_from
24 | msg['To'] = ",".join(mail_reciver)
25 | # msg['Cc'] = ",".join(mail_cc)
26 | # 以下两行代码加上前面的MIMEText中的'utf-8'都是为了解决邮件正文乱码问题.
27 | msg["Accept-Language"] = "zh-CN"
28 | msg["Accept-Charset"] = "ISO-8859-1,utf-8"
29 |
30 | # 发送邮件至相关人员
31 | try:
32 | server = smtplib.SMTP()
33 | server.connect(mail_host, '25')
34 | # 注意这里用到了starttls
35 | server.starttls()
36 | server.login("yhc@example.com.cn", "yhc123456")
37 | server.sendmail(mail_from, mail_reciver, msg.as_string())
38 |
39 | server.quit()
40 | except Exception, e:
41 | print "发送邮件失败" + str(e)
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.8/3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from threading import Thread
3 | import subprocess
4 | from Queue import Queue
5 |
6 | num_threads = 8
7 | list = []
8 | for host in range(1,254):
9 | ip = "192.168.185." + str(host)
10 | list.append(ip)
11 |
12 | q=Queue()
13 | def pingme(i,queue):
14 | while True:
15 | ip=queue.get()
16 | print 'Thread %s pinging %s' %(i,ip)
17 | ret=subprocess.call('ping -c 1 %s' % ip,shell=True,stdout=open('/dev/null','w'),stderr=subprocess.STDOUT)
18 | if ret==0:
19 | print '%s is alive!' %ip
20 | elif ret==1:
21 | print '%s is down...'%ip
22 | queue.task_done()
23 |
24 | #start num_threads threads
25 | for i in range(num_threads):
26 | t=Thread(target=pingme,args=(i,q))
27 | t.setDaemon(True)
28 | t.start()
29 |
30 | for ip in list:
31 | q.put(ip)
32 | print 'main thread waiting...'
33 | q.join();
34 | print 'Done'
35 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.8/4.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import redis
3 | import sys
4 |
5 | STATUS_OK = 0
6 | STATUS_WARNING = 1
7 | STATUS_CRITICAL = 2
8 |
9 | HOST = sys.argv[1]
10 | PORT = int(sys.argv[2])
11 | WARNING = float(sys.argv[3])
12 | CRITICAL = float(sys.argv[4])
13 |
14 | def connect_redis(host, port):
15 | r = redis.Redis(host, port, socket_timeout = 5, socket_connect_timeout = 5)
16 | return r
17 |
18 | def main():
19 | r = connect_redis(HOST, PORT)
20 | try:
21 | r.ping()
22 | except:
23 | print HOST,PORT,'down'
24 | sys.exit(STATUS_CRITICAL)
25 |
26 | redis_info = r.info()
27 | used_mem = redis_info['used_memory']/1024/1024/1024.0
28 | used_mem_human = redis_info['used_memory_human']
29 |
30 | if WARNING <= used_mem < CRITICAL:
31 | print HOST,PORT,'use memory warning',used_mem_human
32 | sys.exit(STATUS_WARNING)
33 | elif used_mem >= CRITICAL:
34 | print HOST,PORT,'use memory critical',used_mem_human
35 | sys.exit(STATUS_CRITICAL)
36 | else:
37 | print HOST,PORT,'use memory ok',used_mem_human
38 | sys.exit(STATUS_OK)
39 |
40 | if __name__ == '__main__':
41 | main()
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.8/5.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #-*- encoding=utf-8 -*-
3 | import urllib
4 | import json
5 | url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=dict2.index'
6 |
7 | #建立一个字典
8 | data = {}
9 | data['i'] = '余洪春是帅哥'
10 | data['from'] = 'AUTO'
11 | data['to'] = 'AUTO'
12 | data['smartresult'] = 'dict'
13 | data['client'] = 'fanyideskwe'
14 | data['salt'] = '1506219252440'
15 | data['sign'] = '0b8cd8f9b8b14'
16 | data['doctype'] = 'json'
17 | data['version'] = '2.1'
18 | data['keyfrom'] = 'fanyi.web'
19 | data['action'] = 'FY_BY_CLICK'
20 | data['typoResult'] = 'true'
21 |
22 | #在这里还不能直接将data作为参数,需要进行一下数据的解析才可以
23 | #encode是将Unicode的编码转换成utf-8编码
24 | #data=urllib.urlencode(data).encode('utf-8')
25 | #另一种写法,urlencode将字典转换成url参数
26 | data = urllib.urlencode(data)
27 | response=urllib.urlopen(url,data)
28 |
29 | #decode作用是将其他形式的编码转换成python使用的Unicode编码
30 | #html=response.read().decode('utf-8')
31 | #另一种写法
32 | html = response.read()
33 | target=json.loads(html)
34 | print(target['translateResult'][0][0]['tgt'])
35 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第3章/3.8/6.py:
--------------------------------------------------------------------------------
1 | import urllib2, base64, sys
2 | import getopt
3 | import re
4 |
5 | def Usage():
6 | print "Usage: getWowzaInfo.py -a [active|accepted|handled|request|reading|writting|waiting]"
7 | sys.exit(2)
8 |
9 | def main():
10 | if len(sys.argv) < 2:
11 | Usage()
12 | try:
13 | opts, args = getopt.getopt(sys.argv[1:], "a:")
14 | except getopt.GetoptError:
15 | Usage()
16 |
17 | # Assign parameters as variables
18 | for opt, arg in opts :
19 | if opt == "-a" :
20 | getInfo = arg
21 |
22 | url="http://127.0.0.1:80/ngserver_status"
23 | request = urllib2.Request(url)
24 | result = urllib2.urlopen(request)
25 |
26 | buffer = re.findall(r'\d+', result.read())
27 |
28 | if ( getInfo == "active"):
29 | print buffer[0]
30 | elif ( getInfo == "accepted"):
31 | print buffer[1]
32 | elif ( getInfo == "handled"):
33 | print buffer[2]
34 | elif ( getInfo == "requests"):
35 | print buffer[3]
36 | elif ( getInfo == "reading"):
37 | print buffer[4]
38 | elif ( getInfo == "writting"):
39 | print buffer[5]
40 | elif ( getInfo == "waiting"):
41 | print buffer[6]
42 | else:
43 | print "unknown"
44 | sys.exit(1)
45 |
46 | if __name__ == "__main__":
47 | main()
48 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第4章/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.define "server" do |vb|
3 | config.vm.provider "virtualbox" do |v|
4 | v.memory = 512
5 | v.cpus = 8
6 | end
7 | vb.vm.host_name = "server"
8 | vb.vm.network :public_network, ip: "10.0.0.15"
9 | vb.vm.box = "centos67"
10 | end
11 |
12 | config.vm.define "vagrant1" do |vb|
13 | config.vm.provider "virtualbox" do |v|
14 | v.memory = 512
15 | v.cpus = 8
16 | end
17 | vb.vm.host_name = "vagrant1"
18 | vb.vm.network :public_network, ip: "10.0.0.16"
19 | vb.vm.box = "centos67"
20 | end
21 |
22 | config.vm.define "vagrant2" do |vb|
23 | config.vm.provider "virtualbox" do |v|
24 | v.memory = 512
25 | v.cpus = 8
26 | end
27 | vb.vm.host_name = "vagrant2"
28 | vb.vm.network :public_network, ip: "10.0.0.17"
29 | vb.vm.box = "centos67"
30 | end
31 | end
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.10.3/client.py:
--------------------------------------------------------------------------------
1 | import requests
2 | user_info = {'ips': ['192.168.1.21','192.168.1.22']}
3 | r = requests.post("http://192.168.1.118:1234/ansible/playbook/", data=user_info)
4 | print r.text
5 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.10.3/initial_v1.0.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/evn python
2 | # -*- encoding:utf-8 -*-
3 | import time
4 | import os
5 | import random
6 | import time
7 | import commands
8 | import json
9 | import jinja2
10 | import ansible.runner
11 | import logging
12 | from flask import Flask, request, render_template, session, flash, redirect, url_for, jsonify
13 | from ansible.inventory import Inventory
14 | from ansible.playbook import PlayBook
15 | from ansible import callbacks
16 | from tempfile import NamedTemporaryFile
17 | from rq import Queue
18 | from rq.job import Job
19 | from redis import Redis
20 |
21 | app = Flask(__name__)
22 | conn = Redis()
23 | q = Queue(connection=conn)
24 |
25 |
26 | @app.route('/hello')
27 | def hello_world():
28 | return 'Hello World!'
29 |
30 |
31 | @app.route('/ansible/playbook/', methods=['POST'])
32 | def playbook():
33 | inventory = """
34 | [initial]
35 | {% for i in hosts %}
36 | {{ i }}
37 | {% endfor %}
38 | """
39 |
40 | inventory_template = jinja2.Template(inventory)
41 | data = json.loads(request.get_data())
42 | inst_ip = data["ips"]
43 | rendered_inventory = inventory_template.render({'hosts': inst_ip})
44 | hosts = NamedTemporaryFile(delete=False, suffix='tmp', dir='/tmp/ansible/')
45 | hosts.write(rendered_inventory)
46 | hosts.close()
47 | '''
48 | 前端传递过来的Json数据,在Flask里面用了jinja2渲染成Ansible能识别的格式,并以临时文件的形式存在于/tmp/ansible/目录下
49 | /tmp/ansible/目录可以提前建立
50 | '''
51 | inventory = Inventory(hosts.name)
52 | vars = {}
53 | stats = callbacks.AggregateStats()
54 | playbook_cb = callbacks.PlaybookCallbacks()
55 | runner_cb = callbacks.PlaybookRunnerCallbacks(stats)
56 | pb = PlayBook(playbook='/root/ansible/mytest.yml', callbacks=playbook_cb, runner_callbacks=runner_cb, stats=stats,
57 | inventory=inventory, extra_vars=vars)
58 | job = q.enqueue_call(pb.run, result_ttl=5000, timeout=2000)
59 | jid = job.get_id()
60 | if jid:
61 | app.logger.info("Job Succesfully Queued with JobID: %s" % jid)
62 | else:
63 | app.logger.error("Failed to Queue the Job")
64 | return jid
65 |
66 |
67 | @app.route("/ansible/results/", methods=['GET'])
68 | def get_results(job_key):
69 | job = Job.fetch(job_key, connection=conn)
70 | if job.is_finished:
71 | ret = job.return_value
72 | elif job.is_queued:
73 | ret = {'status': 'in-queue'}
74 | elif job.is_started:
75 | ret = {'status': 'waiting'}
76 | elif job.is_failed:
77 | ret = {'status': 'failed'}
78 | return json.dumps(ret), 200
79 |
80 |
81 | if __name__ == "__main__":
82 | app.run(host='0.0.0.0', port=5000)
83 |
84 |
85 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.10.3/mytest.yml:
--------------------------------------------------------------------------------
1 | - name: Initial Hosts
2 | remote_user: root
3 | hosts: initial
4 | gather_facts: false
5 | tasks:
6 | - name: Initial Job
7 | script: /work/software/initial.sh --basic
8 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.10.3/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible==1.9.6
2 | rq==1.3.0
3 | redis==3.5.3
4 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.10.3/work.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/evn python
2 | import os
3 | import redis
4 | from rq import Worker, Queue, Connection
5 | listen = ['default']
6 | redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
7 | conn = redis.from_url(redis_url)
8 |
9 |
10 | if __name__ == '__main__':
11 | with Connection(conn):
12 | worker = Worker(list(map(Queue, listen)))
13 | worker.work()
14 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.10.3/设备初始化后端API文档说明.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yuhongchun/devops/b8723334ec5dc1ef9db801f55e2b2c65ac012409/《DevOps和自动化运维实践》第5章/5.10.3/设备初始化后端API文档说明.pdf
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第5章/5.11/1.py:
--------------------------------------------------------------------------------
1 | # -*- coding=utf-8 -*-
2 | import json, sys, os
3 | from collections import namedtuple
4 | from ansible.parsing.dataloader import DataLoader
5 | from ansible.vars import VariableManager
6 | from ansible.inventory import Inventory, Host, Group
7 | from ansible.playbook.play import Play
8 | from ansible.executor.task_queue_manager import TaskQueueManager
9 | from ansible.plugins.callback import CallbackBase
10 | from ansible.executor.playbook_executor import PlaybookExecutor
11 | from datetime import datetime
12 | import time
13 | import paramiko
14 | import re,MySQLdb
15 | from random import choice
16 | import string,datetime
17 |
18 |
19 | class MyInventory(Inventory):
20 | def __init__(self, resource, loader, variable_manager):
21 | self.resource = resource
22 | self.inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=[])
23 | self.dynamic_inventory()
24 |
25 | def add_dynamic_group(self, hosts, groupname, groupvars=None):
26 | my_group = Group(name=groupname)
27 | if groupvars:
28 | for key, value in groupvars.iteritems():
29 | my_group.set_variable(key, value)
30 | for host in hosts:
31 | # set connection variables
32 | hostname = host.get("hostname")
33 | hostip = host.get('ip', hostname)
34 | hostport = host.get("port")
35 | username = host.get("username")
36 | password = host.get("password")
37 | ssh_key = host.get("ssh_key")
38 | my_host = Host(name=hostname, port=hostport)
39 | my_host.set_variable('ansible_ssh_host', hostip)
40 | my_host.set_variable('ansible_ssh_port', hostport)
41 | my_host.set_variable('ansible_ssh_user', username)
42 | my_host.set_variable('ansible_ssh_pass', password)
43 | my_host.set_variable('ansible_ssh_private_key_file', ssh_key)
44 | for key, value in host.items():
45 | if key not in ["hostname", "port", "username", "password"]:
46 | my_host.set_variable(key, value)
47 | my_group.add_host(my_host)
48 |
49 | self.inventory.add_group(my_group)
50 |
51 | def dynamic_inventory(self):
52 | if isinstance(self.resource, list):
53 | self.add_dynamic_group(self.resource, 'default_group')
54 | elif isinstance(self.resource, dict):
55 | for groupname, hosts_and_vars in self.resource.items():
56 | self.add_dynamic_group(hosts_and_vars.get("hosts"), groupname, hosts_and_vars.get("vars"))
57 |
58 |
59 | class ModelResultsCollector(CallbackBase):
60 | def __init__(self, *args, **kwargs):
61 | super(ModelResultsCollector, self).__init__(*args, **kwargs)
62 | self.host_ok = {}
63 | self.host_unreachable = {}
64 | self.host_failed = {}
65 |
66 | def v2_runner_on_unreachable(self, result):
67 | self.host_unreachable[result._host.get_name()] = result
68 |
69 | def v2_runner_on_ok(self, result, *args, **kwargs):
70 | self.host_ok[result._host.get_name()] = result
71 |
72 | def v2_runner_on_failed(self, result, *args, **kwargs):
73 | self.host_failed[result._host.get_name()] = result
74 |
75 |
76 | class PlayBookResultsCollector(CallbackBase):
77 | CALLBACK_VERSION = 2.0
78 |
79 | def __init__(self, taskList, *args, **kwargs):
80 | super(PlayBookResultsCollector, self).__init__(*args, **kwargs)
81 | self.task_ok = {}
82 | self.task_skipped = {}
83 | self.task_failed = {}
84 | self.task_status = {}
85 | self.task_unreachable = {}
86 |
87 | def v2_runner_on_ok(self, result, *args, **kwargs):
88 | if result._host.get_name() in taskList:
89 | data = {}
90 | data['task'] = str(result._task).replace("TASK: ", "")
91 | taskList[result._host.get_name()].get('ok').append(data)
92 | self.task_ok[result._host.get_name()] = taskList[result._host.get_name()]['ok']
93 |
94 | def v2_runner_on_failed(self, result, *args, **kwargs):
95 | data = {}
96 | msg = result._result.get('stderr')
97 | if result._host.get_name() in taskList:
98 | data['task'] = str(result._task).replace("TASK: ", "")
99 | if msg is None:
100 | results = result._result.get('results')
101 | if result:
102 | task_item = {}
103 | for rs in results:
104 | msg = rs.get('msg')
105 | if msg:
106 | task_item[rs.get('item')] = msg
107 | data['msg'] = task_item
108 | taskList[result._host.get_name()]['failed'].append(data)
109 | else:
110 | msg = result._result.get('msg')
111 | data['msg'] = msg
112 | taskList[result._host.get_name()].get('failed').append(data)
113 | else:
114 | data['msg'] = msg
115 | taskList[result._host.get_name()].get('failed').append(data)
116 | self.task_failed[result._host.get_name()] = taskList[result._host.get_name()]['failed']
117 |
118 | def v2_runner_on_unreachable(self, result):
119 | self.task_unreachable[result._host.get_name()] = result
120 |
121 | def v2_runner_on_skipped(self, result):
122 | if result._host.get_name() in taskList:
123 | data = {}
124 | data['task'] = str(result._task).replace("TASK: ", "")
125 | taskList[result._host.get_name()].get('skipped').append(data)
126 | self.task_ok[result._host.get_name()] = taskList[result._host.get_name()]['skipped']
127 |
128 | def v2_playbook_on_stats(self, stats):
129 | hosts = sorted(stats.processed.keys())
130 | for h in hosts:
131 | t = stats.summarize(h)
132 | self.task_status[h] = {
133 | "ok": t['ok'],
134 | "changed": t['changed'],
135 | "unreachable": t['unreachable'],
136 | "skipped": t['skipped'],
137 | "failed": t['failures']
138 | }
139 |
140 |
141 | class CallbackModule(CallbackBase):
142 | """
143 | This callback module tells you how long your plays ran for.
144 | """
145 | CALLBACK_VERSION = 2.0
146 | CALLBACK_TYPE = 'aggregate'
147 | CALLBACK_NAME = 'timer'
148 | CALLBACK_NEEDS_WHITELIST = True
149 |
150 | def __init__(self):
151 | super(CallbackModule, self).__init__()
152 |
153 | self.start_time = datetime.now()
154 |
155 | def days_hours_minutes_seconds(self, runtime):
156 | minutes = (runtime.seconds // 60) % 60
157 | r_seconds = runtime.seconds - (minutes * 60)
158 | return runtime.days, runtime.seconds // 3600, minutes, r_seconds
159 |
160 | def playbook_on_stats(self, stats):
161 | self.v2_playbook_on_stats(stats)
162 |
163 | def v2_playbook_on_stats(self, stats):
164 | end_time = datetime.now()
165 | runtime = end_time - self.start_time
166 | self._display.display(
167 | "Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime)))
168 |
169 |
170 | class ANSRunner(object):
171 | def __init__(self, resource, *args, **kwargs):
172 | self.resource = resource
173 | self.inventory = None
174 | self.variable_manager = None
175 | self.loader = None
176 | self.options = None
177 | self.passwords = None
178 | self.callback = None
179 | self.callback_plugins = None
180 | self.__initializeData()
181 | self.results_raw = {}
182 |
183 | def __initializeData(self):
184 | Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'timeout', 'remote_user',
185 | 'ask_pass', 'private_key_file', 'ssh_common_args', 'ssh_extra_args',
186 | 'sftp_extra_args',
187 | 'scp_extra_args', 'become', 'become_method', 'become_user', 'ask_value_pass',
188 | 'verbosity',
189 | 'check', 'listhosts', 'listtasks', 'listtags', 'syntax'])
190 |
191 | self.variable_manager = VariableManager()
192 | self.loader = DataLoader()
193 | self.options = Options(connection='smart', module_path=None, forks=100, timeout=10,
194 | remote_user='root', ask_pass=False, private_key_file=None, ssh_common_args=None,
195 | ssh_extra_args=None,
196 | sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None,
197 | become_user='root', ask_value_pass=False, verbosity=None, check=False, listhosts=False,
198 | listtasks=False, listtags=False, syntax=False)
199 |
200 | self.passwords = dict(sshpass=None, becomepass=None)
201 | self.inventory = MyInventory(self.resource, self.loader, self.variable_manager).inventory
202 | self.variable_manager.set_inventory(self.inventory)
203 |
204 | def run_model(self, host_list, module_name, module_args):
205 | """
206 | run module from andible ad-hoc.
207 | module_name: ansible module_name
208 | module_args: ansible module args
209 | """
210 | play_source = dict(
211 | name="Ansible Play",
212 | hosts=host_list,
213 | gather_facts='no',
214 | tasks=[dict(action=dict(module=module_name, args=module_args))]
215 | )
216 | play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
217 | tqm = None
218 | self.callback = ModelResultsCollector()
219 | try:
220 | tqm = TaskQueueManager(
221 | inventory=self.inventory,
222 | variable_manager=self.variable_manager,
223 | loader=self.loader,
224 | options=self.options,
225 | passwords=self.passwords,
226 | )
227 | tqm._callback_plugins = [self.callback]
228 | result = tqm.run(play)
229 | finally:
230 | if tqm is not None:
231 | tqm.cleanup()
232 |
233 | def run_playbook(self, host_list, playbook_path, ):
234 | """
235 | run ansible palybook
236 | """
237 | global taskList
238 | taskList = {}
239 | for host in host_list:
240 | taskList[host] = {}
241 | taskList[host]['ok'] = []
242 | taskList[host]['failed'] = []
243 | taskList[host]['skppied'] = []
244 | try:
245 | self.callback = PlayBookResultsCollector(taskList)
246 | # self.callback_plugins = CallbackModule()
247 | executor = PlaybookExecutor(
248 | playbooks=[playbook_path], inventory=self.inventory, variable_manager=self.variable_manager,
249 | loader=self.loader,
250 | options=self.options, passwords=self.passwords,
251 | )
252 | # executor._tqm._callback_plugins = [self.callback_plugins]
253 | executor._tqm._callback_plugins = [self.callback]
254 | executor._tqm._callback_plugins += [CallbackModule()]
255 | executor.run()
256 | except Exception as e:
257 | print(e)
258 | return False
259 |
260 | def get_model_result(self):
261 | self.results_raw = {'success': {}, 'failed': {}, 'unreachable': {}}
262 | for host, result in self.callback.host_ok.items():
263 | self.results_raw['success'][host] = result._result
264 |
265 | for host, result in self.callback.host_failed.items():
266 | self.results_raw['failed'][host] = result._result
267 |
268 | for host, result in self.callback.host_unreachable.items():
269 | self.results_raw['unreachable'][host] = result._result
270 | return json.dumps(self.results_raw)
271 |
272 | def get_playbook_result(self):
273 | self.results_raw = {'skipped': {}, 'failed': {}, 'ok': {}, "status": {}, 'unreachable': {}}
274 |
275 | for host, result in self.callback.task_ok.items():
276 | self.results_raw['ok'][host] = result
277 |
278 | for host, result in self.callback.task_failed.items():
279 | self.results_raw['failed'][host] = result
280 |
281 | for host, result in self.callback.task_status.items():
282 | self.results_raw['status'][host] = result
283 |
284 | for host, result in self.callback.task_skipped.items():
285 | self.results_raw['skipped'][host] = result
286 |
287 | for host, result in self.callback.task_unreachable.items():
288 | self.results_raw['unreachable'][host] = result._result
289 | return json.dumps(self.results_raw)
290 |
291 |
292 | def compose_dynamic_hosts(host_list):
293 | hosts = []
294 | for i in host_list:
295 | hosts.append({'hostname': i})
296 | dic = {}
297 | ret = {}
298 | dic['hosts'] = hosts
299 | ret['dynamic_host'] = dic
300 | return ret
301 | logFile="/tmp/test.log"
302 | def logger(logContent,logFile):
303 | with open(logFile,'a') as f:
304 | f.write(logContent+'\n')
305 |
306 | def get_today_date():
307 | now_time = datetime.datetime.now()
308 | yes_time = now_time + datetime.timedelta(days=0)
309 | yes_time_nyr = yes_time.strftime('%Y-%m-%d')
310 | result = str(yes_time_nyr)
311 | return result
312 |
313 | def insertpass(ip,passwd):
314 | update_sql = "INSERT INTO password(ip,passwd,submission_date) VALUES ('%s','%s','%s');" % (ip,passwd,get_today_date())
315 | conn=MySQLdb.connect(host="127.0.0.1",user="root",passwd="eju@china",db="passroot")
316 | cur =conn.cursor()
317 | cur.execute(update_sql)
318 | cur.close()
319 | conn.commit()
320 | conn.close()
321 | return True
322 |
323 | def GenPassword(length=8,chars=string.ascii_letters+string.digits):
324 | return ''.join([choice(chars) for i in range(length)])
325 | if __name__ == '__main__':
326 | with open('test','r') as fp:
327 | for i in fp.readlines():
328 | ip =i.strip()
329 | resource = [{"hostname": ip, "username": "root", "ssh_key": "/root/.ssh/id_rsa"}]
330 | passwd = GenPassword(32)
331 | shell_name = "echo %s | passwd --stdin root" % passwd
332 | rbt = ANSRunner(resource) # resource可以是列表或者字典形式,如果做了ssh-key认证,就不会通过账户密码方式认证
333 | rbt.run_model(host_list=[ip], module_name='shell', module_args=shell_name)
334 | data = rbt.get_model_result()
335 | data = json.loads(data)
336 | print data
337 | DA={}
338 | if data.get('success'):
339 | insertpass(ip, passwd)
340 | else:
341 | logger(str(ip), logFile)
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/bash/bash:
--------------------------------------------------------------------------------
1 | # 此处存放系统中需要的bash文件,可以通过官方下载适合自己的版本文件
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/bash/bash.sls:
--------------------------------------------------------------------------------
1 | /bin/bash:
2 | file.managed:
3 | - source: salt://bash/bash
4 | - user: root
5 | - group: root
6 | - mode: 755
7 |
8 | bash:
9 | cmd.run:
10 | - name: 'source /etc/profile'
11 | - watch:
12 | - file: /bin/bash
13 | #监测/bin/bash文件,如果发生改变的话则执行source命令,profile配置里的环境变量立即生效
14 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/saferm/safe-rm:
--------------------------------------------------------------------------------
1 | # # 此处存放系统中需要的safe-rm文件,可以通过官方下载适合自己的版本文件
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/saferm/safe-rm.conf:
--------------------------------------------------------------------------------
1 | /
2 | /*
3 | /bin
4 | /boot
5 | /dev
6 | /etc
7 | /home
8 | /initrd
9 | /lib
10 | /lib32
11 | /lib64
12 | /proc
13 | /root
14 | /sbin
15 | /sys
16 | /usr
17 | /usr/bin
18 | /usr/include
19 | /usr/lib
20 | /usr/local
21 | /usr/local/bin
22 | /usr/local/include
23 | /usr/local/sbin
24 | /usr/local/share
25 | /usr/sbin
26 | /usr/share
27 | /usr/src
28 | /var
29 | /etc/salt/minion.d/_schedule.conf
30 | /etc/salt/minion
31 | /usr/bin/salt-minion
32 | /etc/init.d/salt-minion
33 | /data/logs
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/saferm/saferm.sls:
--------------------------------------------------------------------------------
1 | /usr/local/bin/safe-rm:
2 | file.managed:
3 | - source: salt://saferm/safe-rm
4 | - user: root
5 | - group: root
6 | - mode: 755
7 |
8 | /etc/safe-rm.conf:
9 | file.managed:
10 | - source: salt://saferm/safe-rm.conf
11 | - user: root
12 | - group: root
13 | - mode: 644
14 | - backup: minion
15 |
16 | saferm:
17 | cmd.run:
18 | - name: 'ln -s /usr/local/bin/safe-rm /usr/local/bin/rm; sed -i "s/PATH=/PATH=\/usr\/local\/bin:/" /root/.bash_profile; source /root/.bash_profile;'
19 | - watch:
20 | - file: /usr/local/bin/safe-rm
21 | - file: /etc/safe-rm.conf
22 | #同时监测Minion端的/usr/local/bin/safe-rm和/etc/safe-rm.conf文件,如果有变化的话,则执行name字段定义好的Shell命令
23 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/saltcheck/salt_agent.cron:
--------------------------------------------------------------------------------
1 | */5 * * * * root /usr/bin/saltcheck.sh >>/var/log/salt.log
2 | #即每隔5分钟就执行/usr/bin/saltecheck.sh命令来检查下salt-minion服务
3 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/saltcheck/saltcheck.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #!/bin/bash
4 | # Salt-minion program check
5 |
6 | export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin
7 | salt_client=`ps -ef |grep 'salt-minion' |grep -v grep|wc -l`
8 |
9 | salt_check() {
10 | if [ $salt_client -ge 1 ]
11 | then
12 | echo "ok"
13 | else
14 | /etc/init.d/salt-minion restart
15 | fi
16 | }
17 |
18 | salt_check
19 |
20 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/base/saltcheck/saltcheck.sls:
--------------------------------------------------------------------------------
1 | /usr/bin/salt.sh:
2 | file.managed:
3 | - source: salt://saltcheck/saltcheck.sh
4 | - user: root
5 | - group: root
6 | - mode: 755
7 | - backup: minion
8 |
9 | /etc/cron.d/salt_agent.cron:
10 | file.managed:
11 | - source: salt://saltcheck/salt_agent.cron
12 | - user: root
13 | - group: root
14 | - mode: 644
15 | - backup: minion
16 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/host/hosts.allow:
--------------------------------------------------------------------------------
1 | # 此文件主要定义的是Haddop和ES内部之间允许互相连接的IP地址,除了这些机器以外,其他机器之间是不允许进行SSH连接的。
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/host/hosts.sls:
--------------------------------------------------------------------------------
1 | /etc/hosts.allow:
2 | file.managed:
3 | - source: salt://host/hosts.allow
4 | - user: root
5 | - group: root
6 | - mode: 644
7 | - backup: minion
8 |
9 | hosts_allow:
10 | cmd.run:
11 | - name: 'md5sum /etc/hosts.allow'
12 | - watch:
13 | - file: /etc/hosts.allow
14 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/keepalived/keepalived.sh:
--------------------------------------------------------------------------------
1 | # 此文件是keepalived.sh启动脚本,内容较简单,大家可以根据自己的实际情况来编写
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/keepalived/keepalived.sls:
--------------------------------------------------------------------------------
1 | keepalived:
2 | file.managed:
3 | - name: /root/keepalived.sh
4 | #请注意此处的name指明了Minion端保存文件的详细路径
5 | - source: salt://keepalived/keepalived.sh
6 | - user: root
7 | - group: root
8 | - mode: 644
9 | - backup: minion
10 | cmd.run:
11 | - name: bash /root/keepalived.sh
12 | #此处的name则是指明了Minion端的执行的详细Shell命令
13 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/nginx/nginx.sls:
--------------------------------------------------------------------------------
1 | nginx_install:
2 | file.managed:
3 | - source: salt://nginx/nginx_install.sh
4 | - name: /root/anzhuang.sh
5 | - user: root
6 | - group: root
7 | - mode: 644
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/nginx/nginx_install.sh:
--------------------------------------------------------------------------------
1 | # nginx_instlal.sh这里我们自行定义的安装Nginx服务脚本,通过内部YUM服务器安装,过程较为简单,这里略过。
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/rsyslog/rsyslog.conf:
--------------------------------------------------------------------------------
1 | # 这里是自己定义的rsyslog.conf文件,大家可以结合自己的实际情况来编写
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/rsyslog/rsyslog.sls:
--------------------------------------------------------------------------------
1 | /etc/rsyslog.conf:
2 | file.managed:
3 | - source: salt://rsyslog/rsyslog.conf
4 | - user: root
5 | - group: root
6 | - mode: 644
7 | - backup: minion
8 |
9 | rsyslog:
10 | service.running:
11 | - enable: True
12 | - watch:
13 | - file: /etc/rsyslog.conf
14 | #watch选项会监控Minion端的/etc/rsyslog.conf文件,如果内容发生改变的话,rsyslog服务#也会重新启动
15 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/waf/config.lua:
--------------------------------------------------------------------------------
1 | # config.lua为我们的业务lua文件,这里略过
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/prod/waf/waf.sls:
--------------------------------------------------------------------------------
1 | /usr/local/waf/lualib/ng/config.lua:
2 | file.managed:
3 | - source: salt://waf/config.lua
4 | - user: root
5 | - group: root
6 | - mode: 644
7 | - backup: minion
8 |
9 | nginxluareload:
10 | cmd.run:
11 | - name: '/usr/local/waf/nginx/sbin/nginx -c /usr/local/waf/nginx/conf/nginx.conf -t && /usr/local/ndserver/nginx/sbin/nginx -s reload'
12 | - watch:
13 | - file: /usr/local/waf/lualib/ng/config.lua
14 | #Minion端会监控/usr/local/waf/lualib/ng/config.lua文件,如果发生改变的话,则#会执行name字段定义好的Shell命令集
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第6章/6.3/srv/salt/top.sls:
--------------------------------------------------------------------------------
1 | # 我们这个小型CDN项目已经开发了成熟的CMDB资产管理系统,Python脚本每隔30分钟会自动同步一次,对其分组进行同步更新
2 | base:
3 | '*':
4 | - bash.bash
5 | - saferm.saferm
6 | - snmp.snmpd
7 | - saltcheck.saltcheck
8 |
9 | prod:
10 | 'host':
11 | - match: nodegroup
12 | - host.hosts
13 | 'waf':
14 | - match: nodegroup
15 | - waf.waf
16 | - host.hosts
17 | 'hadoop':
18 | - match: nodegroup
19 | - rsyslog.rsyslog
20 | - bash.bash
21 | 'nginx':
22 | - match: nodegroup
23 | - host.hosts
24 | - nginx.nginx_install
25 | 'gitlab':
26 | - match: nodegroup
27 | - gitlab.gitlab
28 | 'rsyslog':
29 | - match: nodegroup
30 | - host.hosts
31 | - rsyslog.rsyslog
32 | 'keepalived':
33 | - math: nodegroup
34 | - keepalived.keepalived
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第7章/7.7.1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openjdk:8-jdk
2 | LABEL MAINTAINER="Nicolas De Loof "
3 | ARG user=jenkins
4 | ARG group=jenkins
5 | ARG uid=1000
6 | ARG gid=1000
7 | ARG JENKINS_AGENT_HOME=/home/${user}
8 | ENV JENKINS_AGENT_HOME ${JENKINS_AGENT_HOME}
9 |
10 | RUN groupadd -g ${gid} ${group} \
11 | && useradd -d "${JENKINS_AGENT_HOME}" -u "${uid}" -g "${gid}" -m -s /bin/bash "${user}"
12 | # setup SSH server
13 | RUN apt-get update \
14 | && apt-get install --no-install-recommends -y openssh-server \
15 | && rm -rf /var/lib/apt/lists/*
16 | RUN sed -i /etc/ssh/sshd_config \
17 | -e 's/#PermitRootLogin.*/PermitRootLogin no/' \
18 | -e 's/#RSAAuthentication.*/RSAAuthentication yes/' \
19 | -e 's/#PasswordAuthentication.*/PasswordAuthentication no/' \
20 | -e 's/#SyslogFacility.*/SyslogFacility AUTH/' \
21 | -e 's/#LogLevel.*/LogLevel INFO/' && \
22 | mkdir /var/run/sshd
23 |
24 | VOLUME "${JENKINS_AGENT_HOME}" "/tmp" "/run" "/var/run"
25 |
26 | WORKDIR "${JENKINS_AGENT_HOME}"
27 | COPY setup-sshd /usr/local/bin/setup-sshd
28 | EXPOSE 22
29 | ENTRYPOINT ["setup-sshd"]
30 |
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第7章/7.7.2/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | jenkins-master:
4 | image: jenkins
5 | container_name: jenkins-master
6 | ports:
7 | - "8080:8080"
8 | environment:
9 | - TZ=Asia/Shanghai
10 | volumes:
11 | - /work/jenkins_master:/var/jenkins_home
12 | extra_hosts:
13 | - "gitlab.bmkp.cn:10.186.6.170"
14 | jenkins-slave-jmeter1:
15 | image: yuhongchun/jenkins-slave-docker-jmeter:v1.0
16 | container_name: jenkins-slave-jmeter1
17 | ports:
18 | - "2226:22"
19 | environment:
20 | - TZ=Asia/Shanghai
21 | volumes:
22 | - /var/run/docker.sock:/var/run/docker.sock
23 | extra_hosts:
24 | - "gitlab.bmkp.cn:10.186.6.170"
25 | command:
26 | - "ssh-rsa B3NzaC1yc2EAAAADAQABAAABAQDQZb1vjKLkWAUOJaua/8CSFSID6L+8MbgutffdBqIeyoUvLUPpH2NkFAxKf8hW3Dj0lGkQ36hutsM23Jcs8b7rjhScmx2obyp7J1s7wic1GE3xaQY1Y0qwaxL3LIkmkrqkTdYyiVnD0Qv4PCx5GBTLQT2Xhf7wjE6oQcTOaIVu/RkooBv7sfEbcGMLwZmzFqqGtY0zEv/tbsvusVg7GPMFTvMw1r9l1C1G5Rxgcz76Vy+4MNskxdBsOZDWoX4gGulkbCBNP5Hf4WsvfH1HzPaoc3PTPUwht7/U2OtLNzO2C1rphRA6A4Eksyc3KI8OCSbku0KnGyM836QKtOv6UyR3 jenkins@1314520d"
27 | jenkins-slave-mvn1:
28 | image: yuhongchun/jenkins-slave-docker-maven:v1.0
29 | container_name: jenkins-slave-mvn1
30 | ports:
31 | - "2222:22"
32 | environment:
33 | - TZ=Asia/Shanghai
34 | volumes:
35 | - /var/run/docker.sock:/var/run/docker.sock
36 | extra_hosts:
37 | - "gitlab.bmkp.cn:10.186.6.170"
38 | command:
39 | - "ssh-rsa B3NzaC1yc2EAAAADAQABAAABAQDQZb1vjKLkWAUOJaua/8CSFSID6L+8MbgutffdBqIeyoUvLUPpH2NkFAxKf8hW3Dj0lGkQ36hutsM23Jcs8b7rjhScmx2obyp7J1s7wic1GE3xaQY1Y0qwaxL3LIkmkrqkTdYyiVnD0Qv4PCx5GBTLQT2Xhf7wjE6oQcTOaIVu/RkooBv7sfEbcGMLwZmzFqqGtY0zEv/tbsvusVg7GPMFTvMw1r9l1C1G5Rxgcz76Vy+4MNskxdBsOZDWoX4gGulkbCBNP5Hf4WsvfH1HzPaoc3PTPUwht7/U2OtLNzO2C1rphRA6A4Eksyc3KI8OCSbku0KnGyM836QKtOv6UyR3 jenkins@1314520d"
40 | jenkins-slave-sbt1:
41 | image: yuhongchun/jenkins-slave-sbt:1.0
42 | container_name: jenkins-slave-sbt1
43 | ports:
44 | - "2224:22"
45 | environment:
46 | - TZ=Asia/Shanghai
47 | volumes:
48 | - /var/run/docker.sock:/var/run/docker.sock
49 | extra_hosts:
50 | - "gitlab.bmkp.cn:10.186.6.170"
51 | networks:
52 | - default
53 | command:
54 | - "ssh-rsa B3NzaC1yc2EAAAADAQABAAABAQDQZb1vjKLkWAUOJaua/8CSFSID6L+8MbgutffdBqIeyoUvLUPpH2NkFAxKf8hW3Dj0lGkQ36hutsM23Jcs8b7rjhScmx2obyp7J1s7wic1GE3xaQY1Y0qwaxL3LIkmkrqkTdYyiVnD0Qv4PCx5GBTLQT2Xhf7wjE6oQcTOaIVu/RkooBv7sfEbcGMLwZmzFqqGtY0zEv/tbsvusVg7GPMFTvMw1r9l1C1G5Rxgcz76Vy+4MNskxdBsOZDWoX4gGulkbCBNP5Hf4WsvfH1HzPaoc3PTPUwht7/U2OtLNzO2C1rphRA6A4Eksyc3KI8OCSbku0KnGyM836QKtOv6UyR3 jenkins@1314520d"
--------------------------------------------------------------------------------
/《DevOps和自动化运维实践》第7章/7.8/1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #coding=utf-8
3 | import urllib
4 | import urllib2
5 | import json
6 | import sys
7 | import socket
8 |
9 | reload(sys)
10 | sys.setdefaultencoding('utf8')
11 |
12 | # 获取钉钉消息
13 | def extractionMessage() :
14 | with open('/home/jenkins/jenkins.log','r') as f:
15 | return f.read()
16 | #拼接需要发送的消息
17 | # return "##### 钉钉message "
18 |
19 | #发送钉钉消息
20 | def sendDingDingMessage(url, data):
21 | req = urllib2.Request(url)
22 | req.add_header("Content-Type", "application/json; charset=utf-8")
23 | opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
24 | response = opener.open(req, json.dumps(data))
25 | return response.read()
26 | #主函数
27 | def main():
28 | posturl = "https://oapi.dingtalk.com/robot/send?access_token=token_key"
29 | data = {"msgtype": "markdown", "markdown": {"text": extractionMessage(),"title":"Jenkins","isAtAll": "false"}}
30 | sendDingDingMessage(posturl, data)
--------------------------------------------------------------------------------