├── README.md ├── cmd.sh ├── du.sh └── proc.sh /README.md: -------------------------------------------------------------------------------- 1 | # falcon-scripts 2 | 3 | 小米开源监控系统[open-falcon](https://github.com/open-falcon/falcon-plus)监控采集脚本 4 | 5 | ## du.sh 6 | 7 | 该脚本主要是用于监控各目录大小的。 8 | 9 | 修改脚本中dirs变量或parent_dirs即可监控相应的目录 10 | 11 | dirs用于监控明确的目录 12 | 13 | parent_dirs用于要批量监控的目录时,填写这些目录的父目录即可 14 | 15 | | 指标名 | 注释 | 16 | |--------|------| 17 | |du.bytes.used|目录大小,单位byte| 18 | 19 | ## proc.sh 20 | 21 | 该脚本主要用于监控指定进程所用的cpu/内存/io 22 | 23 | 修改ports或cmds即可监控对应的进程 24 | 25 | ports用于以端口号寻找进程监控 26 | 27 | cmds用于以进程名寻找进程监控 28 | 29 | | 指标名 | 注释 | 30 | |-------|------| 31 | |proc.cpu|进程所占cpu,百分比| 32 | |proc.mem|进程所占内存,单位byte| 33 | |proc.io.in|进程io输入,单位byte| 34 | |proc.io.out|进程io输出,单位byte| 35 | 36 | ## cmd.sh 37 | 38 | 该脚本用于一键启动采集脚本 39 | 40 | 在变量scripts中列出脚本名后 41 | 42 | ```bash 43 | bash cmd.sh start #一键启动 44 | bash cmd.sh stop #一键停止 45 | bash cmd.sh status #一键查询 46 | ``` 47 | 48 | 请记得替换脚本中push接口的IP端口 49 | -------------------------------------------------------------------------------- /cmd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # export scripts=$PWD 4 | # cd $scripts && bash cmd.sh stop && svn update && cd $scripts && cd $scripts && bash cmd.sh start 5 | 6 | # 日志保存路径 7 | LOG_DIR=/bgi/logs/open-falcon 8 | mkdir -p $LOG_DIR 9 | 10 | # 采集脚本列表 11 | scripts=(du.sh proc.sh) 12 | 13 | function start(){ 14 | arr=$@ 15 | if [ "$arr" == "" ];then 16 | arr=${scripts[@]} 17 | fi 18 | 19 | for sh in ${arr[@]};do 20 | echo start $sh 21 | nohup bash $sh $LOG_DIR >${LOG_DIR}/$sh.out 2>&1 & 22 | sleep 1 23 | done 24 | } 25 | 26 | function stop(){ 27 | arr=$@ 28 | if [ "$arr" == "" ];then 29 | arr=${scripts[@]} 30 | fi 31 | 32 | # echo ${arr[@]} 33 | for sh in ${arr[@]};do 34 | pids=$(ps -aux|grep -v grep|grep -v cmd.sh|grep "$sh"|awk '{print $2}'|tr -s '\n' ' ') 35 | echo stop $sh $pids 36 | kill -9 $pids 37 | done 38 | } 39 | 40 | function status(){ 41 | arr=$@ 42 | if [ "$arr" == "" ];then 43 | arr=${scripts[@]} 44 | fi 45 | 46 | # echo ${arr[@]} 47 | for sh in ${arr[@]};do 48 | pids=$(ps -aux|grep -v grep|grep -v cmd.sh|grep "$sh"|awk '{print $2}'|tr -s '\n' ' ') 49 | echo status $sh $pids 50 | done 51 | } 52 | 53 | $@ -------------------------------------------------------------------------------- /du.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | logdir=$1 4 | : ${logdir:=.} 5 | 6 | # 被监控的目录 7 | # dirs="/bgi/redis_data /bgi/docker_image_container" 8 | 9 | # 批量监控时,要监控的各目录的父目录。 若父目录之间有嵌套关系,上一级目录须放在后面 10 | parent_dirs="/bgi/blockchain_data /bgi/kblockchain_data /bgi/redis_data /bgi/logs /bgi" 11 | 12 | hostname=`hostname` 13 | step=60 14 | metric=",{\"endpoint\":\"$hostname\",\"metric\":\"du.bytes.used\",\"value\":%d,\"step\":$step,\"counterType\":\"GAUGE\",\"timestamp\":%d,\"tags\":\"mount=%s\"}" 15 | olddate=`date +%Y%m%d` 16 | while true; do 17 | # 获取指标 18 | > du.tmp 19 | ts=`date +%s` 20 | du -b -s $dirs --exclude=overlay >> du.tmp 21 | du -b -d 1 $parent_dirs --exclude=overlay >> du.tmp 22 | metrics=`sort -u du.tmp|awk -v metric=$metric -v ts=$ts '{{v=$1;mount=$2;printf metric,v,ts,mount}}'` 23 | rm du.tmp 24 | 25 | # 截取字符串 26 | if [ "$metrics" != "" ];then 27 | metrics='['${metrics:1}']' 28 | fi 29 | 30 | # 保存日志,过一天更改日期 31 | newdate=`date +%Y%m%d` 32 | if [ $newdate != $olddate ];then 33 | mv ${logdir}/du.log ${logdir}/du${olddate}.log 34 | olddate=$newdate 35 | fi 36 | echo $metrics >> ${logdir}/du.log 37 | 38 | curl -X POST -d $metrics http://192.168.29.244:1988/v1/push 39 | sleep $step 40 | done 41 | 42 | -------------------------------------------------------------------------------- /proc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | logdir=$1 4 | : ${logdir:=.} 5 | 6 | # port=$1 #获取进程port 7 | ports=(22 80 8080) 8 | cmds=(client_bc redis java dockerd zookeeper kafka orderer peer nginx) 9 | #获取进程pid 10 | 11 | hostname=`hostname` 12 | step=60 13 | olddate=`date +%Y%m%d` 14 | 15 | function send(){ 16 | tags=$1 # port=$port cmdline=$cmd 17 | pid=$2 18 | ts=`date +%s` 19 | 20 | cpu=`ps --no-heading --pid=$pid -o pcpu|sed s/[[:space:]]//g` #获取cpu占用 21 | ios=`cat /proc/$pid/io` 22 | ioin=`echo "$ios"|grep read_bytes|awk '{print $2}'` #获取io输入 23 | ioout=`echo "$ios"|grep -v cancelled_write_bytes|grep write_bytes|awk '{print $2}'` #获取io输出 24 | mem=`cat /proc/$pid/status|grep -e VmRSS| awk '{print $2}'` #获取内存 25 | if [ "$mem" == "" ];then 26 | mem=0 27 | fi 28 | mem=$[ $mem * 1024 ] 29 | metrics="[{\"endpoint\":\"$hostname\",\"metric\":\"proc.cpu\",\"value\":$cpu,\"step\":$step,\"counterType\":\"GAUGE\",\"timestamp\":$ts,\"tags\":\"${tags}\"}","{\"endpoint\":\"$hostname\",\"metric\":\"proc.mem\",\"value\":$mem,\"step\":$step,\"counterType\":\"GAUGE\",\"timestamp\":$ts,\"tags\":\"${tags}\"}","{\"endpoint\":\"$hostname\",\"metric\":\"proc.io.in\",\"value\":$ioin,\"step\":$step,\"counterType\":\"GAUGE\",\"timestamp\":$ts,\"tags\":\"${tags}\"}","{\"endpoint\":\"$hostname\",\"metric\":\"proc.io.out\",\"value\":$ioout,\"step\":$step,\"counterType\":\"GAUGE\",\"timestamp\":$ts,\"tags\":\"${tags}\"}]" 30 | 31 | 32 | newdate=`date +%Y%m%d` 33 | if [ $newdate != $olddate ];then 34 | mv ${logdir}/proc.log ${logdir}/proc${olddate}.log 35 | olddate=$newdate 36 | fi 37 | echo $metrics >> ${logdir}/proc.log 38 | 39 | curl -X POST -d $metrics http://192.168.29.244:1988/v1/push 40 | echo 41 | } 42 | 43 | while true; do 44 | 45 | for p in ${ports[@]}; do 46 | pid=`netstat -anp | grep ":$p " | grep LISTEN| awk '{print $7}' | awk -F"/" '{ print $1 }'|uniq` 47 | if [ "$pid" != "" ]; then 48 | echo port=$p $pid 49 | send port=$p $pid 50 | fi 51 | done 52 | 53 | for cmd in ${cmds[@]}; do 54 | pids=`ps -ef|grep $cmd|grep -v docker-containerd|grep -v grep|awk '{print $2}'|tr -s '\n' ' '` 55 | if [ "$pids" == "" ];then 56 | continue 57 | fi 58 | 59 | for pid in ${pids[@]};do 60 | echo pid=$pid,cmdline=$cmd "$pid" 61 | send pid=$pid,cmdline=$cmd "$pid" 62 | done 63 | done 64 | 65 | sleep $step 66 | done 67 | --------------------------------------------------------------------------------