├── .gitignore ├── env ├── bashrc └── inputrc └── scripts ├── MegaRAID_SUM ├── block_attack_ips.sh ├── change_password.sh ├── check_appconn.sh ├── check_disks_sum.sh ├── check_dyn_qps.sh ├── check_failed_mountpoints.sh ├── check_ipmi_sensor_summary ├── check_megaraid_status ├── check_n_rerun_process.sh ├── clean_opensearch_indices.sh ├── create_iam_console_policy ├── create_iam_console_policy.sh └── policy_data │ ├── HEY_AWS_MFAPolicy.json │ └── HEY_AWS_SourcePolicy.json ├── create_iam_user_aksk.sh ├── create_tunnel.sh ├── deadnodes_watcher.sh ├── delete.sh ├── delete_hdfs_trash.sh ├── dmidecode_count.sh ├── dns_ops.sh ├── excludedn.sh ├── fastping.sh ├── getopts_example.sh ├── hadoop.sh ├── hdfs_clean ├── data.txt └── hdfs_clean.sh ├── icinga_schedule_downtime.sh ├── init_rhel_1.sh ├── initd_service.sh ├── install_python27.sh ├── k8s_sync_logs_s3.sh ├── macos_user_service ├── com.user.foobar.service.plist └── foobar.service.sh ├── openvpn_client ├── checkstatus.py └── vpnclient.sh ├── processes_cleanup.sh ├── rename_mysqldb.sh ├── reposync.sh ├── restrict_s3_bucket_policy.sh ├── restrict_s3_bucket_policy_tags.sh ├── s3fs_mountpoint.sh ├── s3usage.sh ├── sftp_hdfs ├── data.txt └── sftp_hdfs.sh ├── sftpuser ├── sftpuser.sh └── sshd_config_update ├── shell_expect_remote ├── config │ ├── commands.txt │ ├── commands.txt.example │ ├── hosts.txt │ └── hosts.txt.example ├── expect_run.exp ├── log │ └── run.log ├── main_shell.sh └── ssh_key │ └── id_rsa ├── smartroutes.sh ├── sync_ecr_repo.sh ├── try_lottery ├── my_balls.txt └── try_lottery.sh ├── xcp_bootstrap ├── bootstrap.sh └── configure.txt ├── xcp_extendlv.sh ├── xcp_getvnc.sh ├── xcp_ksinstvm ├── centos-6.4-x86_64-minimal.ks └── ksinstvm.sh ├── xcp_monitor.sh └── zcron /.gitignore: -------------------------------------------------------------------------------- 1 | # macOS 2 | .DS_Store 3 | 4 | # VIM 5 | *.swp 6 | 7 | # Pyc 8 | *.pyc 9 | 10 | # .vscode directory 11 | .vscode 12 | 13 | # .idea directory 14 | .idea -------------------------------------------------------------------------------- /env/bashrc: -------------------------------------------------------------------------------- 1 | # .bashrc 2 | 3 | # Source global definitions 4 | if [[ -f /etc/bashrc ]]; then 5 | . /etc/bashrc 6 | fi 7 | 8 | # Suppress the warning messages of default interactive shell 9 | export BASH_SILENCE_DEPRECATION_WARNING=1 10 | 11 | # User specific aliases and functions 12 | alias ll='ls -l' 13 | alias la='ls -A' 14 | alias lrt='ls -lrth' 15 | alias lvh='ls -lvh' 16 | alias tree='tree --dirsfirst' 17 | alias rm='rm -i' 18 | alias cp='cp -i' 19 | alias mv='mv -i' 20 | alias grep='grep --color=auto' 21 | alias watch='watch ' 22 | alias ccat='pygmentize -g -O style=inkpot' 23 | 24 | # LANG 25 | export LANG=en_US.UTF-8 26 | 27 | # Log the history commands with time stamps 28 | export HISTFILESIZE="1000000000" 29 | export HISTSIZE="1000000" 30 | export HISTTIMEFORMAT="%Y-%m-%d_%H:%M:%S " 31 | export HISTIGNORE="history*:pwd:ls:ll:la:lrt:lvh:clear:exit" 32 | export HISTCONTROL="ignoredups" 33 | 34 | # Enable auto-completion via sudo 35 | complete -cf sudo 36 | 37 | # Enable auto-completion of aws 38 | complete -cf aws 39 | 40 | # Enable color codes and pager for Git commands 41 | export LESS='-R' 42 | export GIT_PAGER='less -R' 43 | 44 | # Readline library accepts \001 and \002 as non-visible text delimiters 45 | # The bash-specific \[ and \] are translated to \001 and \002 46 | _nvt_open=$'\001' # non-visible text open 47 | _nvt_close=$'\002' # non-visible text close 48 | 49 | # Enable color in terminal with tput and non-visible text delimiters 50 | _tput_black=${_nvt_open}$(tput setaf 0)${_nvt_close} 51 | _tput_red=${_nvt_open}$(tput setaf 1)${_nvt_close} 52 | _tput_green=${_nvt_open}$(tput setaf 2)${_nvt_close} 53 | _tput_yellow=${_nvt_open}$(tput setaf 3)${_nvt_close} 54 | _tput_blue=${_nvt_open}$(tput setaf 4)${_nvt_close} 55 | _tput_magenta=${_nvt_open}$(tput setaf 5)${_nvt_close} 56 | _tput_cyan=${_nvt_open}$(tput setaf 6)${_nvt_close} 57 | _tput_white=${_nvt_open}$(tput setaf 7)${_nvt_close} 58 | _tput_bold=${_nvt_open}$(tput bold)${_nvt_close} 59 | _tput_reset=${_nvt_open}$(tput sgr0)${_nvt_close} 60 | 61 | # Check current public IP 62 | function publicip(){ 63 | local website_list=( 64 | ipinfo.io 65 | checkip.amazonaws.com 66 | ifconfig.co 67 | ifconfig.me 68 | ifconfig.io 69 | ) 70 | 71 | local website 72 | for website in ${website_list[@]}; do 73 | echo "${website}: ${_tput_yellow}$(curl -s ${website})${_tput_reset}" 74 | done 75 | } 76 | 77 | # Check specific public IP 78 | function checkip(){ 79 | curl -s ipinfo.io/$1 80 | } 81 | 82 | # Filesystem Markers & Jump 83 | export MARKPATH=$HOME/.marks 84 | 85 | function jump(){ 86 | cd -P $MARKPATH/$1 2>/dev/null || echo "No such mark: $1" 87 | } 88 | 89 | function mark(){ 90 | mkdir -p $MARKPATH; ln -s $(pwd) $MARKPATH/$1 91 | } 92 | 93 | function unmark(){ 94 | /bin/rm -i $MARKPATH/$1 95 | } 96 | 97 | function _marks(){ 98 | COMPREPLY=() 99 | local cur=${COMP_WORDS[COMP_CWORD]}; 100 | local com=${COMP_WORDS[COMP_CWORD-1]}; 101 | case $com in 102 | 'jump') 103 | local marks=($(ls ${MARKPATH})) 104 | COMPREPLY=($(compgen -W '${marks[@]}' -- $cur)) 105 | ;; 106 | 'unmark') 107 | local marks=($(ls ${MARKPATH})) 108 | COMPREPLY=($(compgen -W '${marks[@]}' -- $cur)) 109 | ;; 110 | esac 111 | } 112 | 113 | complete -F _marks jump 114 | complete -F _marks unmark 115 | 116 | # Faster change directory up N times 117 | function up(){ 118 | local arg="" 119 | local num=$1 120 | local idx 121 | if [[ ! -z "${num##*[!0-9]*}" ]]; then 122 | for idx in $(seq 1 $num); do 123 | arg+="../" 124 | done 125 | local cmd="cd ${arg}" 126 | eval "$cmd;pwd" 127 | else 128 | local cmd="cd .." 129 | eval "$cmd;pwd" 130 | fi 131 | } 132 | -------------------------------------------------------------------------------- /env/inputrc: -------------------------------------------------------------------------------- 1 | # Don't require two tabs to show all completions 2 | set show-all-if-ambiguous on 3 | 4 | # Enable case-insensitive tab completion 5 | set completion-ignore-case on 6 | 7 | # Allow the use the "up" and "down" keys to 8 | # incrementally search through command history 9 | "\e[A": history-search-backward 10 | "\e[B": history-search-forward 11 | 12 | # Allow the use of the Home/End keys 13 | "\e[1~": beginning-of-line 14 | "\e[4~": end-of-line 15 | 16 | # Mappings for "page up" and "page down" to 17 | # step to the beginning/end of the history 18 | "\e[5~": beginning-of-history 19 | "\e[6~": end-of-history 20 | 21 | # Allow the use of the Delete/Insert keys 22 | "\e[3~": delete-char 23 | "\e[2~": quoted-insert 24 | 25 | # Mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving, 26 | # Option-left-arrow and Option-right-arrow on macOS 27 | "\e[1;5C": forward-word 28 | "\e[1;5D": backward-word 29 | "\e[5C": forward-word 30 | "\e[5D": backward-word 31 | "\e\e[C": forward-word 32 | "\e\e[D": backward-word 33 | -------------------------------------------------------------------------------- /scripts/MegaRAID_SUM: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to check MegaRaidCLI status and Critical/Failed drives 4 | # 5 | # Example: 6 | # ------------------------------------ 7 | # Extra options: ./MegaRAID_SUM --help 8 | # ------------------------------------ 9 | # 10 | # Device Present 11 | # ================ 12 | # Virtual Drives : 16 13 | # Degraded : 0 14 | # Offline : 2 15 | # Physical Devices : 19 16 | # Disks : 17 17 | # Critical Disks : 0 18 | # Failed Disks : 2 19 | # 20 | # 21 | # Virtual Drive: 0 | RAIDlvl: 1 | #Drives: 2 | State: Optimal | Span Ref: 00 | Size: 278.0 GB 22 | # Virtual Drive: 1 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 01 | Size: 2.727 TB 23 | # Virtual Drive: 2 | RAIDlvl: 0 | #Drives: 1 | State: Offline | Span Ref: 02 | Size: 2.727 TB 24 | # Virtual Drive: 3 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 03 | Size: 2.727 TB 25 | # Virtual Drive: 4 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 04 | Size: 2.727 TB 26 | # Virtual Drive: 5 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 05 | Size: 2.727 TB 27 | # Virtual Drive: 6 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 06 | Size: 2.727 TB 28 | # Virtual Drive: 7 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 07 | Size: 2.727 TB 29 | # Virtual Drive: 8 | RAIDlvl: 0 | #Drives: 1 | State: Offline | Span Ref: 08 | Size: 3.637 TB 30 | # Virtual Drive: 9 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 09 | Size: 2.727 TB 31 | # Virtual Drive: 10 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 0a | Size: 2.727 TB 32 | # Virtual Drive: 16 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 0b | Size: 2.727 TB 33 | # Virtual Drive: 11 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 0c | Size: 3.637 TB 34 | # Virtual Drive: 12 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 0d | Size: 3.637 TB 35 | # Virtual Drive: 14 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 0e | Size: 3.637 TB 36 | # Virtual Drive: 15 | RAIDlvl: 0 | #Drives: 1 | State: Optimal | Span Ref: 0f | Size: 2.727 TB 37 | # 38 | # Slot: 0 - Drive's position: DiskGroup: 1, Span: 0, Arm: 0 - Online, Spun Up 39 | # Slot: 1 - Drive's position: DiskGroup: 2, Span: 0, Arm: 0 - Failed 40 | # Slot: 2 - Drive's position: DiskGroup: 3, Span: 0, Arm: 0 - Online, Spun Up 41 | # Slot: 3 - Drive's position: DiskGroup: 4, Span: 0, Arm: 0 - Online, Spun Up 42 | # Slot: 4 - Drive's position: DiskGroup: 5, Span: 0, Arm: 0 - Online, Spun Up 43 | # Slot: 5 - Drive's position: DiskGroup: 6, Span: 0, Arm: 0 - Online, Spun Up 44 | # Slot: 6 - Drive's position: DiskGroup: 7, Span: 0, Arm: 0 - Online, Spun Up 45 | # Slot: 7 - Drive's position: DiskGroup: 8, Span: 0, Arm: 0 - Failed 46 | # Slot: 8 - Drive's position: DiskGroup: 9, Span: 0, Arm: 0 - Online, Spun Up 47 | # Slot: 9 - Drive's position: DiskGroup: 10, Span: 0, Arm: 0 - Online, Spun Up 48 | # Slot: 10 - Drive's position: DiskGroup: 12, Span: 0, Arm: 0 - Online, Spun Up 49 | # Slot: 11 - Drive's position: DiskGroup: 13, Span: 0, Arm: 0 - Online, Spun Up 50 | # Slot: 13 - Drive's position: DiskGroup: 14, Span: 0, Arm: 0 - Online, Spun Up 51 | # Slot: 14 - Drive's position: DiskGroup: 15, Span: 0, Arm: 0 - Online, Spun Up 52 | # Slot: 15 - Drive's position: DiskGroup: 11, Span: 0, Arm: 0 - Online, Spun Up 53 | # Slot: 0 - Drive's position: DiskGroup: 0, Span: 0, Arm: 0 - Online, Spun Up 54 | # Slot: 1 - Drive's position: DiskGroup: 0, Span: 0, Arm: 1 - Online, Spun Up 55 | # 56 | 57 | replace_failed(){ 58 | faild=$(sudo /opt/MegaRAID/MegaCli/MegaCli64 -PDList -aALL | /bin/egrep "Failed" | wc -l) 59 | 60 | if [ "$faild" -ge 1 ]; then 61 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -CfgDsply -aALL > Cfgdsply-text 62 | mapfile -t failed_line < <( cat Cfgdsply-text | /bin/egrep -n "Failed" | cut -d':' -f1 ) 63 | 64 | for (( i = 0; i < ${#failed_line[@]}; i++)); do 65 | sed -n "1,${failed_line[$i]}p" Cfgdsply-text > Cfgdsply-tofailed-text 66 | tac Cfgdsply-tofailed-text > backw-Cfgdsplytext 67 | fadpt=$(/bin/egrep -m 1 "Adapter" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 68 | enclID=$(/bin/egrep -m 1 "Enclosure Device ID" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 69 | slot=$(/bin/egrep -m 1 "Slot Number" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 70 | spanref=$(/bin/egrep -m 1 "Span Reference" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g" | cut -d'x' -f2 | cut -c 2) 71 | row=$(/bin/egrep -m 1 "Physical Disk:" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 72 | 73 | echo "Replace Drive at: adapter: # $fadpt - enclID: $enclID - slot: $slot | Span ref: $spanref | Row: $row ??" 74 | echo "(yes) or (no)" 75 | read REPLACE 76 | 77 | if [ "$REPLACE" == "yes" ]; then 78 | echo "Setting disk offline..." 79 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -pdoffline -physdrv[$enclID:$slot] -a$fadpt 80 | echo "Marking disk as missing..." 81 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -pdmarkmissing -physdrv[$enclID:$slot] -a$fadpt 82 | echo "Preparing for removal..." 83 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -pdprprmv -physdrv[$enclID:$slot] -a$fadpt 84 | echo 85 | echo "Replace disk now..." 86 | echo "Done? (yes) or (no)" 87 | read DONE 88 | if [ "$DONE" == “yes” ]; then 89 | echo "Replace missing and start rebuilding..." 90 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -PdReplaceMissing -PhysDrv[$enclID:$slot] -Array$spanref -row$row -a$fadpt 91 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -PDRbld -Start -PhysDrv [$enclID:$slot] -a$fadpt 92 | else 93 | exit 94 | fi 95 | else 96 | continue 97 | fi 98 | done 99 | 100 | # cleanup tmp files 101 | rm -f Cfgdsply-text 102 | rm -f Cfgdsply-tofailed-text 103 | rm -f backw-Cfgdsplytext 104 | 105 | else 106 | echo "No Failed drives" 107 | fi 108 | } 109 | 110 | show_failed(){ 111 | faild=$(sudo /opt/MegaRAID/MegaCli/MegaCli64 -PDList -aALL | /bin/egrep "Failed" | wc -l) 112 | 113 | if [ "$faild" -ge 1 ]; then 114 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -CfgDsply -aALL > Cfgdsply-text 115 | mapfile -t failed_line < <( cat Cfgdsply-text | /bin/egrep -n "Failed" | cut -d':' -f1 ) 116 | 117 | for (( i = 0; i < ${#failed_line[@]}; i++)); do 118 | sed -n "1,${failed_line[$i]}p" Cfgdsply-text > Cfgdsply-tofailed-text 119 | tac Cfgdsply-tofailed-text > backw-Cfgdsplytext 120 | fadpt=$(/bin/egrep -m 1 "Adapter" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 121 | enclID=$(/bin/egrep -m 1 "Enclosure Device ID" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 122 | slot=$(/bin/egrep -m 1 "Slot Number" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 123 | spanref=$(/bin/egrep -m 1 "Span Reference" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g" | cut -d'x' -f2 | cut -c 2) 124 | row=$(/bin/egrep -m 1 "Physical Disk:" backw-Cfgdsplytext | cut -d':' -f2 | sed -e "s/ //g") 125 | 126 | echo "Failed at:" 127 | echo " adapter: # $fadpt - enclID: $enclID - slot: $slot | Span ref: $spanref | Row: $row" 128 | echo 129 | 130 | echo "Start blinking LED on drives?(start) or Stop blinking (stop)" 131 | read STARTB 132 | 133 | if [ "$STARTB" == "start" ]; then 134 | echo "sudo /opt/MegaRAID/MegaCli/MegaCli64 -PdLocate -start -physdrv \[$enclID:$slot\] -a$fadpt" 135 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -PdLocate -start -physdrv \[$enclID:$slot\] -a$fadpt 136 | 137 | elif [ "$STARTB" == "stop" ]; then 138 | echo "sudo /opt/MegaRAID/MegaCli/MegaCli64 -PdLocate -stop -physdrv \[$enclID:$slot\] -a$fdapt" 139 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -PdLocate -stop -physdrv \[$enclID:$slot\] -a$fdapt 140 | else 141 | continue 142 | fi 143 | done 144 | 145 | # cleanup tmp files 146 | rm -f Cfgdsply-text 147 | rm -f Cfgdsply-tofailed-text 148 | rm -f backw-Cfgdsplytext 149 | 150 | else 151 | echo "No Failed drives" 152 | fi 153 | } 154 | 155 | arg=$1 156 | 157 | case $arg in 158 | -h|--help) 159 | echo "-h show options" 160 | echo "-a show all MegaRAID info" 161 | echo "-p show physical drives info " 162 | echo "-v show virtual drive & physical info" 163 | echo "-f show failed drive info" 164 | echo "-r replace failed drive" 165 | ;; 166 | -a) 167 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -AdpAllInfo -aAll | less 168 | ;; 169 | -p) 170 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -PDList -aALL | less 171 | ;; 172 | -v) 173 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -CfgDsply -aALL | less 174 | ;; 175 | -f) 176 | show_failed 177 | ;; 178 | -r) 179 | replace_failed 180 | ;; 181 | esac 182 | 183 | if [ $# -eq 0 ]; then 184 | echo "------------------------------------" 185 | echo "Extra options: ${0} --help" 186 | echo "------------------------------------" 187 | echo 188 | 189 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -AdpAllInfo -aAll > Adpallinfo-text 190 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -CfgDsply -aALL > Cfgdsply-text 191 | sudo /opt/MegaRAID/MegaCli/MegaCli64 -PDList -aALL > Pdlist-text 192 | 193 | degrade=$(cat Adpallinfo-text | /bin/egrep 'Degrade' | awk '/[0-9]/ {print $3}') 194 | offline=$(cat Adpallinfo-text | /bin/egrep '[[:space:]][[:space:]]Offline' | awk '/[0-9]/ {print $3}') 195 | critical=$(cat Adpallinfo-text | /bin/egrep 'Critical' | awk '/[0-9]/ {print $4}') 196 | failed=$(cat Adpallinfo-text | /bin/egrep '[[:space:]][[:space:]]Failed' | awk '/[0-9]/ {print $4}') 197 | 198 | mapfile -t VIRTdrives < <(cat Cfgdsply-text | /bin/egrep 'Virtual Drive:'| cut -d'(' -f1) 199 | mapfile -t RAIDlvl < <(cat Cfgdsply-text | /bin/egrep 'RAID Level' | awk ' {print $4}'| cut -d'-' -f2 | cut -c1) 200 | mapfile -t drivenum < <(cat Cfgdsply-text | /bin/egrep 'Drives' | cut -d':' -f2) 201 | mapfile -t RAIDstate < <(cat Cfgdsply-text | /bin/egrep 'State\s\s\s\s\s\s\s\s\s\s\s\s\s\s\s' | cut -d':' -f2) 202 | mapfile -t RAIDsize < <(cat Cfgdsply-text | /bin/egrep 'Size\s\s\s\s\s\s\s\s\s\s\s\s\s\s\s\s:'| cut -d':' -f2) 203 | mapfile -t SPANref < <(cat Cfgdsply-text | /bin/egrep 'Span Reference' | cut -d':' -f2 | cut -d'x' -f2) 204 | 205 | cat Adpallinfo-text | /bin/egrep -A 9 'Device Present' 206 | echo 207 | 208 | for (( i = 0; i < ${#VIRTdrives[@]}; i++)); do 209 | echo "${VIRTdrives[$i]} | RAIDlvl: ${RAIDlvl[$i]} | #Drives:${drivenum[$i]} | State:${RAIDstate[$i]} | Span Ref: ${SPANref[$i]} | Size:${RAIDsize[$i]}" 210 | done 211 | 212 | mapfile -t slotnum < <(cat Pdlist-text | /bin/grep 'Slot Number'| cut -d':' -f2 | cut -c2-3) 213 | mapfile -t firmstate < <(cat Pdlist-text | /bin/grep "Firmware state" | cut -d':' -f2) 214 | mapfile -t position < <(cat Pdlist-text | /bin/grep "Drive's position" | cut -f2,4) 215 | echo 216 | for (( i = 0; i < ${#slotnum[@]}; i++)); do 217 | echo "Slot: ${slotnum[$i]} - ${position[$i]} - ${firmstate[$i]}" 218 | done 219 | 220 | # cleanup tmp files 221 | rm -f Adpallinfo-text 222 | rm -f Cfgdsply-text 223 | rm -f Pdlist-text 224 | fi 225 | -------------------------------------------------------------------------------- /scripts/block_attack_ips.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | logfiles=( 4 | /blog/logs/foo_access.log 5 | /blog/logs/bar_access.log 6 | ) 7 | 8 | whitelist=$(last | awk '{print $3}' | grep ^[1-9] | sort | uniq | xargs) 9 | whitelist+=" 127.0.0.1 10.1.2.3 1.2.3.4" 10 | 11 | function check_root(){ 12 | if [ $EUID -ne 0 ]; then 13 | echo "This script must be run as root" 14 | exit 1 15 | fi 16 | } 17 | 18 | function block_ips(){ 19 | blacklist=$@ 20 | if [ ! -z "${blacklist}" ]; then 21 | for ip in ${blacklist} 22 | do 23 | if ! $(echo ${whitelist} | grep -wq ${ip}); then 24 | if ! $(/sbin/iptables-save | grep -wq ${ip}); then 25 | echo "Blocked ${ip}" 26 | /sbin/iptables -I INPUT -s ${ip}/32 -p tcp -m tcp --dport 80 -j DROP 27 | fi 28 | fi 29 | done 30 | fi 31 | } 32 | 33 | function check_post(){ 34 | page=$1 35 | tailnum=$2 36 | retry=$3 37 | 38 | command="grep -w POST ${logfile} |tail -n ${tailnum} |grep -w ${page} |awk '{print \$1}' |sort |uniq -c |awk '(\$1 > ${retry}){print \$2}'" 39 | blacklist=$(eval ${command}) 40 | block_ips ${blacklist} 41 | } 42 | 43 | function check_all(){ 44 | tailnum=$1 45 | retry=$2 46 | 47 | command="tail -n ${tailnum} ${logfile} |awk '{print \$1}' |sort |uniq -c |awk '(\$1 > ${retry}){print \$2}'" 48 | blacklist=$(eval ${command}) 49 | block_ips ${blacklist} 50 | } 51 | 52 | check_root 53 | for logfile in ${logfiles[@]} 54 | do 55 | check_post wp-login.php 10000 50 56 | check_post wp-comments-post.php 10000 50 57 | check_all 10000 500 58 | done 59 | -------------------------------------------------------------------------------- /scripts/change_password.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect 2 | # ./change_password.sh host user oldpass newpass 3 | 4 | set timeout 5 5 | set host [lindex $argv 0] 6 | set user [lindex $argv 1] 7 | set oldpass [lindex $argv 2] 8 | set newpass [lindex $argv 3] 9 | 10 | spawn ssh -tq ${user}@${host} 11 | expect "ssword:" 12 | send "${oldpass}\r" 13 | expect "${user}@" 14 | send "passwd\r" 15 | expect "current" 16 | send "${oldpass}\r" 17 | expect { 18 | "New password:" { 19 | send "${newpass}\r" 20 | expect { 21 | "BAD PASSWORD" { 22 | exit 1 } 23 | "Password unchanged" { 24 | exit 1 } 25 | "Authentication token manipulation error" { 26 | exit 1 } 27 | "Retype new password:" { 28 | send "${newpass}\r" 29 | expect "${user}@" 30 | send "exit\r" 31 | exit 0 } 32 | eof { 33 | exit 0 } 34 | } 35 | } 36 | "You must wait longer to change" { 37 | exit 1 } 38 | } 39 | -------------------------------------------------------------------------------- /scripts/check_appconn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check App Role Related Connections 4 | # By Dong Guo 2016/08/10 5 | 6 | declare -A abbr_dict=( 7 | ["cb"]="Couchbase" 8 | ["mc"]="Memcache" 9 | ["db"]="MySQLDB" 10 | ["zk"]="Zookeeper" 11 | ["kb"]="KafkaBroker" 12 | ) 13 | 14 | function print_help(){ 15 | echo "Usage:" 16 | echo " ${0} -r ads -t cb|mc|db|zk|kb [-c /path/to/conf]" 17 | echo " ${0} -r api -t cb|mc|db|kb [-c /path/to/conf]" 18 | echo "" 19 | echo "Examples:" 20 | echo " ${0} -r ads -t zk" 21 | echo " ${0} -r ads -t kb" 22 | echo " ${0} -r api -t kb -c /path/to/kafkabroker.conf" 23 | echo "" 24 | echo "Abbreviations:" 25 | for key in "${!abbr_dict[@]}"; do 26 | raw_output="${raw_output}${key}:\'${abbr_dict["${key}"]}\' " 27 | done 28 | echo ${raw_output} | xargs -n 4 echo " " 29 | exit 1 30 | } 31 | 32 | function check_port(){ 33 | host=$1 34 | port=$2 35 | conn_time=3 36 | 37 | /usr/bin/nc -w ${conn_time} -z ${host} ${port} 38 | if [[ $? -ne 0 ]]; then 39 | echo "${host}:${port}" 40 | fi 41 | } 42 | 43 | function check_mysql(){ 44 | host=$1 45 | port=$2 46 | 47 | /usr/bin/mysqladmin -uuser -ppass -h${host} -P${port} ping | grep 'is alive' 48 | if [[ $? -ne 0 ]]; then 49 | echo "${host}:${port}" 50 | fi 51 | } 52 | 53 | function check_conn(){ 54 | if ! $(echo ${allowed_conn} |grep -wq ${conn}); then 55 | echo "ERROR: The '${conn}' is not in the allowed_conn:'${allowed_conn}' for role:'${role}'" 56 | exit 1 57 | fi 58 | 59 | case "${conn}" in 60 | "mc") 61 | if [[ -z "${conf}" ]]; then 62 | conf="/path/to/memcache.conf" 63 | fi 64 | conn_list=$(grep -E '.*.memcache.hosts=' ${conf} |grep -v '^#' |awk -F '=' '{print $2}' |sed 's/,/ /g' |xargs -n 1 |sort |uniq) 65 | ;; 66 | "cb") 67 | if [[ -z "${conf}" ]]; then 68 | conf="/path/to/couchbase.conf" 69 | fi 70 | conn_list=$(grep 'couchbase.hosts=' ${conf} |grep -v '^#' |awk -F '=' '{print $2}' |sed 's/,/ /g' |xargs -n 1 |sort |uniq) 71 | ;; 72 | "db") 73 | if [[ -z "${conf}" ]]; then 74 | conf="/path/to/mysqldb.conf" 75 | fi 76 | conn_list=$(grep -E 'db.*.url' ${conf} |grep -v '^#' |awk -F 'jdbc:mysql://' '{print $2}' |cut -d/ -f1) 77 | ;; 78 | "zk") 79 | if [[ -z "${conf}" ]]; then 80 | conf="/path/to/zookeeper.conf" 81 | fi 82 | conn_list=$(grep 'zookeeper.connect=' ${conf} |grep -v '^#' |awk -F '=' '{print $2}' |cut -d/ -f1 |sed 's/,/ /g') 83 | ;; 84 | "kb") 85 | if [[ -z "${conf}" ]]; then 86 | conf="/path/to/kafkabroker.conf" 87 | fi 88 | conn_list=$(grep 'metadata.broker.list=' ${conf} |grep -v '^#' |awk -F '=' '{print $2}' |sed 's/,/ /g') 89 | ;; 90 | esac 91 | 92 | for conn_item in ${conn_list}; do 93 | host=$(echo ${conn_item} |cut -d: -f1) 94 | port=$(echo ${conn_item} |cut -d: -f2) 95 | raw_cmdout=$(check_port ${host} ${port}) 96 | if ! $(echo ${raw_cmdout} |grep -wq succeeded); then 97 | if [[ -z "${err_cmdout}" ]]; then 98 | err_cmdout="${raw_cmdout}" 99 | else 100 | err_cmdout="${err_cmdout},${raw_cmdout}" 101 | fi 102 | fi 103 | done 104 | 105 | if [[ -z ${err_cmdout} ]]; then 106 | if $(echo "${conn}" |grep -Ewq 'db'); then 107 | for conn_item in ${conn_list}; do 108 | host=$(echo ${conn_item} |cut -d: -f1) 109 | port=$(echo ${conn_item} |cut -d: -f2) 110 | sql_cmdout=$(check_mysql ${host} ${port} |xargs) 111 | if ! $(echo ${sql_cmdout} |grep -q "is alive"); then 112 | if [[ -z "${err_cmdout}" ]]; then 113 | err_cmdout="${sql_cmdout}" 114 | else 115 | err_cmdout="${err_cmdout},${sql_cmdout}" 116 | fi 117 | fi 118 | done 119 | fi 120 | fi 121 | 122 | if [[ ! -z ${err_cmdout} ]]; then 123 | echo "CRIT. Failed to connect to ${abbr_dict["${conn}"]}:'${err_cmdout}'" 124 | exit 2 125 | else 126 | echo "OK. No failed ${abbr_dict["${conn}"]} connection" 127 | exit 0 128 | fi 129 | } 130 | 131 | while getopts "r:t:c:" opts; do 132 | case "${opts}" in 133 | "r") 134 | role=${OPTARG} 135 | ;; 136 | "t") 137 | conn=${OPTARG} 138 | ;; 139 | "c") 140 | conf=${OPTARG} 141 | ;; 142 | *) 143 | print_help 144 | ;; 145 | esac 146 | done 147 | 148 | if [[ -z "${role}" ]] || [[ -z "${conn}" ]]; then 149 | print_help 150 | else 151 | case "${role}" in 152 | "ads") 153 | allowed_conn="cb|mc|db|zk|kb" 154 | ;; 155 | "api") 156 | allowed_conn="cb|mc|db|kb" 157 | ;; 158 | "*") 159 | print_help 160 | ;; 161 | esac 162 | 163 | check_conn 164 | fi 165 | -------------------------------------------------------------------------------- /scripts/check_disks_sum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script checks all alerts for check_mk-df on Check_MK, 4 | # then summarises the output. 5 | 6 | function get_info(){ 7 | retval=$1 8 | echo "GET services 9 | Columns: host_display_name state description 10 | Separators: 10 32 44 124 11 | Filter: check_command = check_mk-df 12 | Filter: active_checks_enabled = 0 13 | Filter: scheduled_downtime_depth = 0 14 | Filter: host_scheduled_downtime_depth = 0 15 | Filter: acknowledged = 0 16 | Filter: state = ${retval}" |unixcat /var/spool/icinga/cmd/live\ 17 | |awk '{print $1}' |sort |uniq -c\ 18 | |awk '{print " "$1 " disk(s) on " $2}'|tr '\n' ';' 19 | } 20 | 21 | warn_msg="$(get_info 1)" 22 | crit_msg="$(get_info 2)" 23 | unkn_msg="$(get_info 3)" 24 | 25 | [[ "$warn_msg" ]] && { retval=1; echo -n "WARN.$warn_msg "; } 26 | [[ "$crit_msg" ]] && { retval=2; echo -n "CRIT.$crit_msg "; } 27 | [[ "$unkn_msg" ]] && { retval=2; echo -n "UNKN.$unkn_msg "; } 28 | [[ ${retval} > 0 ]] && exit ${retval} 29 | 30 | echo "OK. No failed disk found"; exit 0 31 | -------------------------------------------------------------------------------- /scripts/check_dyn_qps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Get QPS report from Dyn via REST API 4 | # 5 | # Author: Dong Guo 6 | # Last Modified: 2013/09/18 by Dong Guo 7 | 8 | STATE_OK=0 9 | STATE_WARNING=1 10 | STATE_CRITICAL=2 11 | STATE_UNKNOWN=3 12 | STATE_DEPENDENT=4 13 | 14 | # Threshold Numbers 15 | warn=$2 16 | crit=$3 17 | 18 | customer_name="YOUR-CUSTOMER-NAME" 19 | user_name="YOUR-USER-NAME" 20 | password="YOUR-PASSWORD" 21 | 22 | domain="YOUR.DOMAIN.COM" 23 | format="Content-Type: application/yaml" 24 | api_url="https://api.dynect.net" 25 | session_api="https://api.dynect.net/REST/Session/" 26 | report_api="https://api.dynect.net/REST/QPSReport/" 27 | 28 | end_ts=$(date +%s) 29 | 30 | if [ $# != 3 ]; then 31 | echo $"Usage: $0 {daily|weekly|monthly} {warn} {crit}" 32 | exit 0 33 | fi 34 | 35 | case "$1" in 36 | daily) 37 | start_ts=$(($end_ts-86400)) 38 | ;; 39 | weekly) 40 | start_ts=$(($end_ts-7*86400)) 41 | ;; 42 | monthly) 43 | start_ts=$(($end_ts-30*86400)) 44 | ;; 45 | *) 46 | echo $"Usage: $0 {daily|weekly|monthly} {warn} {crit}" 47 | exit 0 48 | ;; 49 | esac 50 | 51 | tmpfile=/tmp/qps_report_from_dyn_tmp.txt 52 | > $tmpfile 53 | 54 | token=$(curl -s -H "$format" -X POST $session_api -d "{customer_name: $customer_name,user_name: $user_name,password: $password}" | grep token | awk -F ",|:" '{print $3}') 55 | curl -s -H "$format" -H "Auth-Token: $token" -X POST $report_api -d "{start_ts: $start_ts,end_ts: $end_ts,breakdown: zones}" > $tmpfile.raw 56 | 57 | grep -q '/REST/Job/' $tmpfile.raw 58 | if [ $? -eq 0 ]; then 59 | get_url=$(cat $tmpfile.raw) 60 | sleep 2 61 | curl -s -H "$format" -H "Auth-Token: $token" -X GET $api_url$get_url > $tmpfile.raw 62 | fi 63 | 64 | for item in $(grep $domain $tmpfile.raw) 65 | do 66 | value=$(echo $item | cut -d "," -f 3 | cut -d "'" -f 1) 67 | qps=$(($value/300)) 68 | echo $qps >> $tmpfile 69 | done 70 | 71 | countall=$(wc -l $tmpfile | awk '{print $1}') 72 | tailnum=$(($countall*95/100)) 73 | sort -rn $tmpfile | tail -n $tailnum | grep -vw 0 > ${tmpfile}.tailnum 74 | 75 | count=$(wc -l ${tmpfile}.tailnum | awk '{print $1}') 76 | sum=$(awk '{i+=$1}END{print i}' ${tmpfile}.tailnum) 77 | max_qps=$(head -n 1 ${tmpfile}.tailnum) 78 | min_qps=$(tail -n 1 ${tmpfile}.tailnum) 79 | avg_qps=$(($sum/$count)) 80 | perfdata="max_qps=$max_qps;$warn;$crit min_qps=$min_qps avg_qps=$avg_qps" 81 | 82 | if [ "$max_qps" -gt "$crit" ]; then 83 | echo -n "CRIT. max_qps $max_qps is greater than $crit | $perfdata" 84 | exit $STATE_CRITICAL 85 | elif [ "$max_qps" -gt "$warn" ]; then 86 | echo -n "WARN. max_qps $max_qps is greater than $warn | $perfdata" 87 | exit $STATE_WARNING 88 | else 89 | echo -n "OK. max_qps is $max_qps | $perfdata" 90 | exit $STATE_OK 91 | fi 92 | -------------------------------------------------------------------------------- /scripts/check_failed_mountpoints.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Check failed mountpoints by writing temporary empty files 4 | # 5 | 6 | excluded_mountpoints_file=/var/tmp/excluded_mountpoints.txt 7 | if [[ ! -f ${excluded_mountpoints_file} ]]; then 8 | touch ${excluded_mountpoints_file} 9 | fi 10 | 11 | all_mountpoints=$(/usr/bin/timeout 2 /bin/mount | grep -E '^/dev|:/' | awk '{print $2}' | sort | xargs) 12 | for mountpoint_item in ${all_mountpoints}; do 13 | if ! $(grep -Eq ^${mountpoint_item}\$ ${excluded_mountpoints_file}); then 14 | touch ${mountpoint_item}/mountpoint.check.tmp 2>/dev/null 15 | if [[ $? -ne 0 ]]; then 16 | if [[ -z "${failed_mountpoints}" ]]; then 17 | failed_mountpoints="${mountpoint_item}" 18 | else 19 | failed_mountpoints="${failed_mountpoints},${mountpoint_item}" 20 | fi 21 | else 22 | rm ${mountpoint_item}/mountpoint.check.tmp 23 | fi 24 | fi 25 | done 26 | 27 | if [[ ! -z "${failed_mountpoints}" ]]; then 28 | echo "CRIT. Found Failed Mountpoints: ${failed_mountpoints}" 29 | exit 2 30 | else 31 | excluded_mountpoints=$(cat ${excluded_mountpoints_file} | xargs | sed s/' '/,/g) 32 | if [[ ! -z "${excluded_mountpoints}" ]]; then 33 | echo "OK. Excluded mountpoints: ${excluded_mountpoints} in ${excluded_mountpoints_file}" 34 | else 35 | echo "OK" 36 | fi 37 | exit 0 38 | fi 39 | -------------------------------------------------------------------------------- /scripts/check_ipmi_sensor_summary: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Check All IPMI Sensors 4 | # 5 | 6 | if $(grep -Eq 'flags.*hypervisor' /proc/cpuinfo); then 7 | echo "OK. This is a virtual machine" 8 | exit 0 9 | fi 10 | 11 | if [[ $EUID -ne 0 ]]; then 12 | echo "This script must be run as root" 1>&2 13 | exit 1 14 | fi 15 | 16 | ipmitool_bin=$(which ipmitool 2> /dev/null) 17 | if [[ $? -ne 0 ]]; then 18 | echo "ERROR: No such ipmitool command, try 'yum install ipmitool'." 19 | exit 2 20 | fi 21 | 22 | ipmi_sensor_list=$(${ipmitool_bin} sensor list | grep -v 'command failed' | awk -F '|' '{print $1"|"$2"|"$3"|"$4"|"}' | sed -e 's/ *| */|/g' -e 's/ /_/g' | grep -Evw 'ns|nc|na|discrete') 23 | ipmi_sensor_count=$(echo ${ipmi_sensor_list} | xargs -n 1 | wc -l) 24 | for ipmi_sensor_item in ${ipmi_sensor_list}; do 25 | ipmi_sensor_name=$(echo ${ipmi_sensor_item} | cut -d'|' -f1) 26 | ipmi_sensor_value=$(echo ${ipmi_sensor_item} | cut -d'|' -f2) 27 | ipmi_sensor_unit=$(echo ${ipmi_sensor_item} | cut -d'|' -f3) 28 | ipmi_sensor_status=$(echo ${ipmi_sensor_item} | cut -d'|' -f4) 29 | if [[ ${ipmi_sensor_status} != 'ok' ]]; then 30 | if [[ -z "${crit_msg}" ]]; then 31 | crit_msg="${ipmi_sensor_name} is ${ipmi_sensor_value} ${ipmi_sensor_unit}" 32 | else 33 | crit_msg="${crit_msg}, ${ipmi_sensor_name} is ${ipmi_sensor_value} ${ipmi_sensor_unit}" 34 | fi 35 | fi 36 | done 37 | 38 | if [[ -z "${crit_msg}" ]]; then 39 | echo "OK. All ${ipmi_sensor_count} Sensors are OK" 40 | exit 0 41 | else 42 | echo "CRIT. ${crit_msg}" 43 | exit 2 44 | fi 45 | -------------------------------------------------------------------------------- /scripts/check_megaraid_status: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to check MegaRaidCLI Failed drives 4 | # Works on servers with ONE RAID controller 5 | # 6 | # Example: 7 | # CRIT - Virtual Drives: {Degraded: 0, Offline: 2}, Physical Disks: {Critical: 0, Failed: 2}, 8 | # Bad Drives: [{adapter: 0, enclID: 2, slot: 7, Span ref: 8, Row: 0}, {adapter: 0, enclID: 2, slot: 1, Span ref: 2, Row: 0}] 9 | 10 | if [[ -x /opt/MegaRAID/MegaCli/MegaCli64 ]]; then 11 | megaraid_bin="sudo /opt/MegaRAID/MegaCli/MegaCli64" 12 | elif [[ -x /opt/MegaRAID/MegaCli/MegaCli ]]; then 13 | megaraid_bin="sudo /opt/MegaRAID/MegaCli/MegaCli" 14 | else 15 | echo "ERROR. No such MegaCli command" 16 | exit 1 17 | fi 18 | 19 | anyissue=$(${megaraid_bin} -AdpAllInfo -aAll | grep -E 'Degrade|[[:space:]][[:space:]]Failed|[[:space:]][[:space:]]Offline' | awk '/[1-9]/ {print $0}' | wc -l) 20 | 21 | degrade=$(${megaraid_bin} -AdpAllInfo -aAll | grep -E 'Degrade' | awk '/[0-9]/ {print $3}') 22 | critical=$(${megaraid_bin} -AdpAllInfo -aAll | grep -E 'Critical' | awk '/[0-9]/ {print $4}') 23 | offline=$(${megaraid_bin} -AdpAllInfo -aAll | grep -E '[[:space:]][[:space:]]Offline' | awk '/[0-9]/ {print $3}') 24 | failed=$(${megaraid_bin} -AdpAllInfo -aAll | grep -E '[[:space:]][[:space:]]Failed' | awk '/[0-9]/ {print $4}') 25 | 26 | if [[ ${anyissue} -ge 1 ]]; then 27 | ${megaraid_bin} -CfgDsply -aALL > /tmp/Cfgdsply.txt 28 | failed_lines=$(grep -n "Failed" /tmp/Cfgdsply.txt | cut -d':' -f1) 29 | 30 | for failed_line in ${failed_lines}; do 31 | sed -n "1,${failed_line}p" /tmp/Cfgdsply.txt > /tmp/Cfgdsply_tofailed.txt 32 | tac /tmp/Cfgdsply_tofailed.txt > /tmp/backw_Cfgdsply.txt 33 | 34 | fadpt=$(grep -m 1 "Adapter" /tmp/backw_Cfgdsply.txt | cut -d':' -f2 | sed -e "s/ //g") 35 | enclID=$(grep -m 1 "Enclosure Device ID" /tmp/backw_Cfgdsply.txt | cut -d':' -f2 | sed -e "s/ //g") 36 | slot=$(grep -m 1 "Slot Number" /tmp/backw_Cfgdsply.txt | cut -d':' -f2 | sed -e "s/ //g") 37 | spanref=$(grep -m 1 "Span Reference" /tmp/backw_Cfgdsply.txt | cut -d':' -f2 | sed -e "s/ //g" | cut -d'x' -f2 | cut -c 2) 38 | row=$(grep -m 1 "Physical Disk:" /tmp/backw_Cfgdsply.txt | cut -d':' -f2 | sed -e "s/ //g") 39 | 40 | if [[ -z "${bad_drives_info}" ]]; then 41 | bad_drives_info="{adapter: "${fadpt}", enclID: "${enclID}", slot: "${slot}", Span ref: "${spanref}", Row: "${row}"}" 42 | else 43 | bad_drives_info="{adapter: "${fadpt}", enclID: "${enclID}", slot: "${slot}", Span ref: "${spanref}", Row: "${row}"}, ${bad_drives_info}" 44 | fi 45 | done 46 | 47 | echo "CRIT. Virtual Drives: {Degraded: "${degrade}", Offline: "${offline}"}, Physical Disks: {Critical: "${critical}", Failed: "${failed}"}, Bad Drives: [${bad_drives_info}]" 48 | 49 | # clean up temp files 50 | rm -f /tmp/Cfgdsply.txt 51 | rm -f /tmp/Cfgdsply_tofailed.txt 52 | rm -f /tmp/backw_Cfgdsply.txt 53 | rm -f MegaSAS.log* 54 | rm -f CmdTool.log* 55 | 56 | exit 2 57 | else 58 | if [[ -z "${degrade}" ]] || [[ -z "${critical}" ]] || [[ -z "${offline}" ]] || [[ -z "${failed}" ]]; then 59 | echo "OK. No disk issue" 60 | else 61 | echo "OK. No disk issue. Virtual Drives: { Degraded: "${degrade}", Offline: "${offline}" }, Physical Disks: {Failed: "${failed}"}" 62 | fi 63 | 64 | # clean up temp files 65 | rm -f MegaSAS.log* 66 | rm -f CmdTool.log* 67 | 68 | exit 0 69 | fi 70 | -------------------------------------------------------------------------------- /scripts/check_n_rerun_process.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Nagios style check script for Process Check and Rerun 4 | # By Dong Guo 5 | # 6 | 7 | function print_help(){ 8 | echo "Usage: ${0} -p 'ps_string_for_Program' -n proc_num -r 'rerun_cmd' [-m max_rerun]" 9 | echo "Examples:" 10 | echo "${0} -p 'kafka.Kafka' -n 1 -r 'service kafka-server start'" 11 | echo "${0} -p 'kafka.Kafka' -n 1 -r 'service kafka-server start' -m 3" 12 | exit 1 13 | } 14 | 15 | function check_n_rerun(){ 16 | run_proc_num=$(ps aux | grep -w "${proc_grep}" | grep -Ewv "nohup|grep|${base_name}" | wc -l | xargs) 17 | if [[ ${run_proc_num} -eq ${proc_num} ]];then 18 | echo "OK" 19 | echo 0 > ${rerun_count_sign} 20 | exit 0 21 | elif [[ ${run_proc_num} -gt ${proc_num} ]];then 22 | echo "UNKN. ${run_proc_num} running '${proc_grep}', expected: ${proc_num}" 23 | exit 3 24 | elif [[ ${run_proc_num} -lt ${proc_num} ]];then 25 | rerun_count=$(cat ${rerun_count_sign}) 26 | if [[ ${rerun_count} -lt ${max_rerun} ]];then 27 | sudo ${rerun_cmd} 28 | rerun_count=$((${rerun_count}+1)) 29 | echo ${rerun_count} > ${rerun_count_sign} 30 | echo "WARN. ${run_proc_num} running '${proc_grep}', expected: ${proc_num}, rerun: ${rerun_count}/${max_rerun}" 31 | exit 1 32 | else 33 | echo "CRIT. ${run_proc_num} running '${proc_grep}', expected: ${proc_num}, rerun: ${rerun_count}/${max_rerun}" 34 | exit 2 35 | fi 36 | fi 37 | } 38 | 39 | while getopts "p:n:r:m:" opts; do 40 | case "$opts" in 41 | "p") 42 | proc_grep=$OPTARG 43 | ;; 44 | "n") 45 | proc_num=$OPTARG 46 | ;; 47 | "r") 48 | rerun_cmd=$OPTARG 49 | ;; 50 | "m") 51 | max_rerun=$OPTARG 52 | ;; 53 | *) 54 | print_help 55 | ;; 56 | esac 57 | done 58 | 59 | if [[ -z "$proc_grep" ]] || [[ -z "$proc_num" ]] || [[ -z "$rerun_cmd" ]]; then 60 | print_help 61 | else 62 | base_name=$(basename ${0}) 63 | proc_grep_formatted=$(echo "${proc_grep}" | sed 's/\//_/g' | sed 's/ /_/g') 64 | rerun_count_sign=/var/tmp/${proc_grep_formatted}.max_rerun.count 65 | if [[ ! -f ${rerun_count_sign} ]];then 66 | echo 0 > ${rerun_count_sign} 67 | fi 68 | if [[ -z "$max_rerun" ]]; then 69 | max_rerun=3 70 | fi 71 | check_n_rerun 72 | fi 73 | -------------------------------------------------------------------------------- /scripts/clean_opensearch_indices.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | opensearch_url="https://opensearch.heylinux.com" 4 | user_pass="username:password" 5 | 6 | indices_items=$(curl -s -XGET -k -u ${user_pass} "${opensearch_url}/_cat/indices?pretty=true" | awk '{print $3}' | grep -E '^[a-z]|^[A-Z]' | xargs) 7 | clean_date=$(date --date="30 days ago" +%Y%m%d) 8 | 9 | for index_item in ${indices_items}; do 10 | index_item_date=$(echo ${index_item} | awk -F '-' '{print $NF}' | sed s/[.]//g | grep -Ev '[a-z]|[A-Z]') 11 | if [ ! -z "${index_item_date}" ] && [ ${index_item_date} -lt ${clean_date} ]; then 12 | echo "Deleting index: ${index_item}" 13 | curl -s -XDELETE -k -u ${user_pass} "${opensearch_url}/${index_item}" 14 | fi 15 | done 16 | -------------------------------------------------------------------------------- /scripts/create_iam_console_policy/create_iam_console_policy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | declare -A group_policy_dict=( 4 | ["HEY_AWS_DataSteward"]="AWSGlueConsoleFullAccess AwsGlueDataBrewFullAccessPolicy CloudWatchFullAccess" 5 | ) 6 | 7 | secure_policies="HEY_AWS_MFAPolicy HEY_AWS_SourcePolicy" 8 | 9 | aws_account_id=857857857857 10 | aws_cmd="/usr/local/bin/aws" 11 | srv_path=heysrv 12 | srv_policies="$(${aws_cmd} iam list-policies --path /${srv_path}/ | grep -w PolicyName | xargs)" 13 | 14 | for secure_policy_name in ${secure_policies}; do 15 | if $(echo ${srv_policies} | grep -wq ${secure_policy_name}); then 16 | echo "Found Policy: /${srv_path}/${secure_policy_name}" 17 | else 18 | ${aws_cmd} iam create-policy --path /${srv_path}/ --policy-name ${secure_policy_name} --policy-document file://policy_data/${secure_policy_name}.json 19 | fi 20 | done 21 | 22 | for key in "${!group_policy_dict[@]}"; do 23 | group_name=${key} 24 | group_policies="${group_policy_dict["${key}"]}" 25 | 26 | echo "" 27 | echo "INFO: Checking group: ${key}..." 28 | 29 | if $(${aws_cmd} iam get-group --group-name ${group_name} 1>/dev/null 2>/dev/null); then 30 | echo "Found Group: ${group_name}" 31 | else 32 | echo "ERROR: No such group: ${group_name}" 33 | exit 1 34 | fi 35 | 36 | attached_group_policies=$(${aws_cmd} iam list-attached-group-policies --group-name ${group_name} | grep -w PolicyName | xargs) 37 | 38 | for secure_policy_name in ${secure_policies}; do 39 | if $(echo ${attached_group_policies} | grep -wq ${secure_policy_name}); then 40 | echo "Found Policy: /${srv_path}/${secure_policy_name} in Group: ${group_name}" 41 | else 42 | echo "Attaching Policy: /${srv_path}/${secure_policy_name} to Group: ${group_name}..." 43 | ${aws_cmd} iam attach-group-policy --policy-arn arn:aws:iam::${aws_account_id}:policy/${srv_path}/${secure_policy_name} --group-name ${group_name} 44 | fi 45 | done 46 | 47 | for policy_name in ${group_policies}; do 48 | if $(echo ${attached_group_policies} | grep -wq ${policy_name}); then 49 | echo "Found Policy: ${policy_name} in Group: ${group_name}" 50 | else 51 | echo "Attaching Policy: ${policy_name} to Group: ${group_name}..." 52 | ${aws_cmd} iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/${policy_name} --group-name ${group_name} 53 | fi 54 | done 55 | done 56 | -------------------------------------------------------------------------------- /scripts/create_iam_console_policy/policy_data/HEY_AWS_MFAPolicy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Id": "AllowUsersMFASelfService", 4 | "Statement": [ 5 | { 6 | "Sid": "AllowAllUsersToListAccounts", 7 | "Effect": "Allow", 8 | "Action": [ 9 | "iam:ListAccountAliases", 10 | "iam:ListUsers" 11 | ], 12 | "Resource": [ 13 | "arn:aws:iam::857857857857:user/*" 14 | ] 15 | }, 16 | { 17 | "Sid": "AllowIndividualUserToSeeTheirAccountInformation", 18 | "Effect": "Allow", 19 | "Action": [ 20 | "iam:GetAccountPasswordPolicy", 21 | "iam:GetAccountSummary", 22 | "iam:GetLoginProfile" 23 | ], 24 | "Resource": [ 25 | "arn:aws:iam::857857857857:user/${aws:username}", 26 | "arn:aws:iam::857857857857:user/heysrv/${aws:username}" 27 | ] 28 | }, 29 | { 30 | "Sid": "AllowIndividualUserToListTheirMFA", 31 | "Effect": "Allow", 32 | "Action": [ 33 | "iam:ListVirtualMFADevices", 34 | "iam:ListMFADevices" 35 | ], 36 | "Resource": [ 37 | "arn:aws:iam::857857857857:mfa/*", 38 | "arn:aws:iam::857857857857:user/${aws:username}", 39 | "arn:aws:iam::857857857857:user/heysrv/${aws:username}" 40 | ] 41 | }, 42 | { 43 | "Sid": "AllowIndividualUserToManageThierMFA", 44 | "Effect": "Allow", 45 | "Action": [ 46 | "iam:CreateVirtualMFADevice", 47 | "iam:DeactivateMFADevice", 48 | "iam:DeleteVirtualMFADevice", 49 | "iam:EnableMFADevice", 50 | "iam:ResyncMFADevice" 51 | ], 52 | "Resource": [ 53 | "arn:aws:iam::857857857857:mfa/${aws:username}", 54 | "arn:aws:iam::857857857857:user/${aws:username}", 55 | "arn:aws:iam::857857857857:user/heysrv/${aws:username}" 56 | ] 57 | }, 58 | { 59 | "Sid": "DoNotAllowAnythingOtherThanAboveUnlessMFAd", 60 | "Effect": "Deny", 61 | "NotAction": "iam:*", 62 | "Resource": "*", 63 | "Condition": { 64 | "Bool": { 65 | "aws:MultiFactorAuthPresent": "false", 66 | "aws:ViaAWSService": "false" 67 | }, 68 | "StringNotEqualsIfExists": { 69 | "aws:SourceVpc": [ 70 | "vpc-857abc857abc875aa", 71 | "vpc-857cba857cba875bb" 72 | ] 73 | } 74 | } 75 | } 76 | ] 77 | } 78 | -------------------------------------------------------------------------------- /scripts/create_iam_console_policy/policy_data/HEY_AWS_SourcePolicy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Id": "RestrictVPCsAndSourceIPs", 4 | "Statement": [ 5 | { 6 | "Sid": "SourceIpVpcWhitelist", 7 | "Effect": "Deny", 8 | "Action": "*", 9 | "Resource": "*", 10 | "Condition": { 11 | "Bool": { 12 | "aws:ViaAWSService": "false" 13 | }, 14 | "NotIpAddressIfExists": { 15 | "aws:SourceIp": [ 16 | "8.5.7.11/32", 17 | "8.5.7.22/32", 18 | "8.5.7.33/32" 19 | ] 20 | }, 21 | "StringNotEqualsIfExists": { 22 | "aws:SourceVpc": [ 23 | "vpc-857abc857abc875aa", 24 | "vpc-857cba857cba875bb" 25 | ] 26 | } 27 | } 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /scripts/create_iam_user_aksk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Description: Create IAM User, Group, Policy and genreate AKSK 3 | # Author: Damon Guo 4 | # Content: 5 | # . 6 | # ├── create_iam_user_aksk.sh 7 | # ├── generated_aksk 8 | # │   ├── HeyApp.json 9 | # └── policy_data 10 | # └── HeyAppServicePolicy.json 11 | 12 | user_group_policy=( 13 | HeyApp,HeyAppGroup,HeyAppServicePolicy 14 | ) 15 | 16 | aws_account_id=857857857857 17 | aws_cmd="/usr/local/bin/aws" 18 | srv_path="heysrv" 19 | all_users="$(${aws_cmd} iam list-users --path /${srv_path}/ | grep -w UserName | xargs)" 20 | all_groups="$(${aws_cmd} iam list-groups --path /${srv_path}/ | grep -w GroupName | xargs)" 21 | all_policies="$(${aws_cmd} iam list-policies --path /${srv_path}/ | grep -w PolicyName | xargs)" 22 | 23 | for item in ${user_group_policy[@]}; do 24 | user_name=$(echo ${item} | cut -d, -f1) 25 | group_name=$(echo ${item} | cut -d, -f2) 26 | policy_name=$(echo ${item} | cut -d, -f3) 27 | 28 | echo "" 29 | echo "INFO: Checking User_Group_Policy: ${item}..." 30 | 31 | if $(echo ${all_groups} | grep -wq ${group_name}); then 32 | echo "INFO: Found Group: /${srv_path}/${group_name}" 33 | else 34 | ${aws_cmd} iam create-group --path /${srv_path}/ --group-name ${group_name} 35 | fi 36 | 37 | if $(echo ${all_policies} | grep -wq ${policy_name}); then 38 | echo "INFO: Found Policy: /${srv_path}/${policy_name}" 39 | else 40 | ${aws_cmd} iam create-policy --path /${srv_path}/ --policy-name ${policy_name} --policy-document file://policy_data/${policy_name}.json 41 | fi 42 | 43 | group_policies=$(${aws_cmd} iam list-attached-group-policies --group-name ${group_name} | grep -w PolicyName | xargs) 44 | if $(echo ${group_policies} | grep -wq ${policy_name}); then 45 | echo "INFO: Found Policy: /${srv_path}/${policy_name} in Group: /${srv_path}/${group_name}" 46 | else 47 | ${aws_cmd} iam attach-group-policy --policy-arn arn:aws:iam::${aws_account_id}:policy/${srv_path}/${policy_name} --group-name ${group_name} 48 | fi 49 | 50 | if $(echo ${all_users} | grep -wq ${user_name}); then 51 | echo "INFO: Found User: /${srv_path}/${user_name}" 52 | else 53 | ${aws_cmd} iam create-user --path /${srv_path}/ --user-name ${user_name} 54 | fi 55 | 56 | user_groups=$(${aws_cmd} iam list-groups-for-user --user-name ${user_name} | grep -w GroupName | xargs) 57 | if $(echo ${user_groups} | grep -wq ${group_name}); then 58 | echo "INFO: Found Group: /${srv_path}/${group_name} of User: /${srv_path}/${user_name}" 59 | else 60 | ${aws_cmd} iam add-user-to-group --group-name ${group_name} --user-name ${user_name} 61 | fi 62 | 63 | user_all_access_keys=$(${aws_cmd} iam list-access-keys --user-name ${user_name} | grep -w AccessKeyId | cut -d: -f2 | cut -d\" -f2 | xargs) 64 | if [ -a generated_aksk/${user_name}.json ]; then 65 | access_key=$(grep -w AccessKeyId generated_aksk/${user_name}.json | cut -d: -f2 | cut -d\" -f2) 66 | fi 67 | if [ ! -z "${access_key}" ]; then 68 | if $(echo ${user_all_access_keys} | grep -q ${access_key}); then 69 | echo "INFO: Found Access Key: ${access_key} in generated_aksk/${user_name}.json" 70 | continue 71 | fi 72 | fi 73 | echo "INFO: Generated AKSK of User: /${srv_path}/${user_name} in generated_aksk/${user_name}.json" 74 | ${aws_cmd} iam create-access-key --user-name ${user_name} > generated_aksk/${user_name}.json 75 | done 76 | -------------------------------------------------------------------------------- /scripts/create_tunnel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create a SSH tunnel to access an unreachable web URL via another reachable host 3 | # For Mac OS only 4 | 5 | tunnel_host=${1} 6 | dest_host_full_url=${2} 7 | 8 | ssh_user="trump" 9 | ssh_port=22 10 | ssh_key="/Users/trump/.ssh/id_rsa" 11 | 12 | if [[ $# -ne 2 ]] && [[ "${1}" != "list" ]]; then 13 | echo "Usage: ${0} tunnel_host dest_host_full_url" 14 | echo " ${0} trump.heylinux.com http://tiffany.heylinux.com/#/pretty/girl" 15 | echo " ${0} trump.heylinux.com https://ivanka.heylinux.com:8443/#/pretty/girl" 16 | echo " ${0} list" 17 | exit 1 18 | elif [[ "${1}" == "list" ]]; then 19 | if ! $(ps -e -o pid,command | grep -q '\-f \-N \-T \-L'); then 20 | echo "No SSH tunnel process running" 21 | else 22 | echo "PID COMMAND" 23 | ps -e -o pid,command | grep '\-f \-N \-T \-L' 24 | fi 25 | exit 0 26 | fi 27 | 28 | dest_host_protocol=$(echo ${dest_host_full_url} | cut -d: -f1) 29 | dest_host_name=$(echo ${dest_host_full_url} | cut -d: -f2 | cut -d/ -f3) 30 | dest_host_port=$(echo ${dest_host_full_url} | cut -d: -f3 | cut -d/ -f1) 31 | dest_host_short_url=$(echo ${dest_host_full_url} | cut -d: -f3 | cut -d/ -f2-) 32 | 33 | if ! $(echo "${dest_host_protocol}" | grep -Ewq 'http|https'); then 34 | echo "Need to specify the protocol in dest_host_full_url: 'http://' or 'https://'" 35 | exit 1 36 | fi 37 | 38 | if [[ -z "${dest_host_port}" ]]; then 39 | if [[ ${dest_host_protocol} == "http" ]]; then 40 | dest_host_port=80 41 | elif [[ ${dest_host_protocol} == "https" ]]; then 42 | dest_host_port=443 43 | fi 44 | dest_host_short_url=$(echo ${dest_host_full_url} | cut -d: -f2 | cut -d/ -f4-) 45 | fi 46 | 47 | # Use a different port on localhost for ports which less than 10,000 48 | if [[ ${dest_host_port} -lt 10000 ]]; then 49 | local_host_port=$((${dest_host_port}+10000)) 50 | else 51 | local_host_port=${dest_host_port} 52 | fi 53 | 54 | ssh -i ${ssh_key} -p ${ssh_port} -l ${ssh_user} -f -N -T -L ${local_host_port}:${dest_host_name}:${dest_host_port} ${tunnel_host} 55 | 56 | echo "Opening ${dest_host_protocol}://localhost:${local_host_port}/${dest_host_short_url}" 57 | open ${dest_host_protocol}://localhost:${local_host_port}/${dest_host_short_url} 58 | -------------------------------------------------------------------------------- /scripts/deadnodes_watcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ]; then 4 | echo Usage: 5 | echo "${0} " 6 | echo Example: 7 | echo "${0} 540" 8 | exit 1 9 | fi 10 | 11 | TIMEOUT_SECOND=$1 12 | BACK_TIME_SECOND=${TIMEOUT_SECOND} 13 | BASE_DIR=$(dirname ${0}) 14 | SSH_KEY_FILE=${BASE_DIR}/key-root 15 | REPORT_TMP=${BASE_DIR}/dfsadmin_report.tmp 16 | RESTARTED_LOG=${BASE_DIR}/restarted_datanodes.log 17 | WHITELIST="idc1-datanode11 idc1-datanode13" 18 | 19 | hdfs dfsadmin -report | grep -E 'Hostname|Last contact' > ${REPORT_TMP} 20 | 21 | function restart_datanode_service(){ 22 | hostname=$1 23 | last_restarted_timestamp=$(grep -w ${hostname} ${RESTARTED_LOG} |tail -n 1 |awk '{print $2}') 24 | last_restarted_timestamp_back_second=$((${last_restarted_timestamp}+${BACK_TIME_SECOND})) 25 | now_timestamp=$(date +%s) 26 | if [ ${last_restarted_timestamp_back_second} -lt ${now_timestamp} ]; then 27 | echo "Restarting the DN on ${hostname} due to timeout ${last_contact_timeout_second} >= ${TIMEOUT_SECOND} second(s)" 28 | ssh -i ${SSH_KEY_FILE} -o StrictHostKeyChecking=no root@${hostname} 'pkill -kill -f proc_datanode' 29 | ssh -i ${SSH_KEY_FILE} -o StrictHostKeyChecking=no root@${hostname} 'ps aux | grep proc_datanode | grep -v grep || service hadoop-hdfs-datanode start' 30 | echo "${hostname} ${now_timestamp}" >> ${RESTARTED_LOG} 31 | fi 32 | } 33 | 34 | for hostname in $(grep -w Hostname ${REPORT_TMP} |awk '{print $NF}') 35 | do 36 | if [ ! -z "$hostname" ]; then 37 | hostname_short=$(echo ${hostname} |cut -d. -f1) 38 | if ! $(echo ${WHITELIST} |grep -wq ${hostname_short}); then 39 | last_contact_time=$(grep -A1 ${hostname} ${REPORT_TMP} |grep 'Last contact' |awk -F "Last contact: " '{print $NF}') 40 | last_contact_timestamp=$(date -d "${last_contact_time}" +%s) 41 | now_timestamp=$(date +%s) 42 | last_contact_timeout_second=$((${now_timestamp}-${last_contact_timestamp})) 43 | if [ ${last_contact_timeout_second} -ge ${TIMEOUT_SECOND} ]; then 44 | restart_datanode_service ${hostname} 45 | fi 46 | fi 47 | fi 48 | done 49 | -------------------------------------------------------------------------------- /scripts/delete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | trash_dir=${HOME}/.Trash/$(date +%Y%m%d%H%M%S) 4 | 5 | function move_item(){ 6 | item=$1 7 | full_path=$2 8 | full_dir=$(dirname ${full_path}) 9 | mkdir -p ${trash_dir}${full_dir} 10 | mv ${item} ${trash_dir}${full_path} 11 | if [[ $? -eq 0 ]]; then 12 | echo "Moved ${item} to ${trash_dir}${full_path}" 13 | fi 14 | } 15 | 16 | if [[ $# -eq 0 ]] || $(echo "$1" |grep -Ewq '\-h|\-\-help'); then 17 | echo "${0} [-f] [*|FILE]" 18 | exit 2 19 | fi 20 | 21 | for item in $@; do 22 | if $(echo ${item} |grep -vq '^-'); then 23 | if $(echo ${item} |grep -q '^/'); then 24 | full_path=${item} 25 | else 26 | full_path=$(pwd)/${item} 27 | fi 28 | if $(echo $@ |grep -Ewq '\-f|\-rf|\-fr'); then 29 | move_item ${item} ${full_path} 30 | else 31 | echo -n "Move ${item} to ${trash_dir}${full_path}? [y/n] " 32 | read yorn 33 | if $(echo ${yorn} |grep -Ewq 'y|Y|yes|YES'); then 34 | move_item ${item} ${full_path} 35 | fi 36 | fi 37 | fi 38 | done 39 | -------------------------------------------------------------------------------- /scripts/delete_hdfs_trash.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function print_help(){ 4 | echo "Usage:" 5 | echo " ${0} show" 6 | echo " ${0} /user/root/.Trash/Current" 7 | exit 2 8 | } 9 | 10 | if [[ -z "$1" ]];then 11 | print_help 12 | fi 13 | 14 | if [[ "$1" == "show" ]];then 15 | hadoop fs -du -h /user/*/.Trash | awk '($2=="T") {print}' | sort -rn 16 | exit 0 17 | fi 18 | 19 | if ! $(echo "$1" | grep -Eq "/user/.*/.Trash/"); then 20 | print_help 21 | else 22 | hadoop fs -rm -r $1 23 | fi 24 | -------------------------------------------------------------------------------- /scripts/dmidecode_count.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Output example: idc1-server1, X8STi, 1x4096 kB, 2x4096 MB, 4xNo Module Installed 4 | 5 | hostname=$(hostname -s) 6 | motherboard=$(dmidecode |grep "Product Name" |awk -F ': ' '{print $2}' |uniq |xargs) 7 | ram_all=$(dmidecode |grep 'Memory Device' -A 5 |grep Size: |grep -v Range |awk -F ': ' '{print "x"$2}' |grep -v 'No Module Installed' |sort |uniq -c) 8 | result="" 9 | for i in ${ram_all} 10 | do 11 | if $(echo $i |grep -q 'x'); then 12 | result="$result$i" 13 | elif $(echo $i |grep -q 'B'); then 14 | result="$result $i, " 15 | else 16 | result="$result$i" 17 | fi 18 | done 19 | 20 | slots_open=$(dmidecode |grep 'Memory Device' -A 5 |grep Size: |grep -v Range |awk -F ': ' '{print $2}' |grep -c 'No Module Installed') 21 | 22 | echo "${hostname}, ${motherboard}, ${result}${slots_open}xNo Module Installed" 23 | -------------------------------------------------------------------------------- /scripts/dns_ops.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # DNS records management tool for Bind9 4 | # Supports multiple Keys and Zone Files 5 | # By Dong Guo from heylinux.com 6 | 7 | base_dir="/var/named" 8 | server_ipaddr="172.16.2.221" 9 | domain="heylinux.com" 10 | sub_domains=".cn|.jp|.us" 11 | dnsaddfile="${base_dir}/dnsadd" 12 | 13 | declare -A private_keys_dict=( 14 | ["A"]="Kheylinux.com.+178+63254.private" 15 | ["CNAME"]="Kheylinux.com.+157+59510.private" 16 | ["PTR"]="Kheylinux.com.+165+98364.private" 17 | ) 18 | 19 | function check_root(){ 20 | if [[ $EUID -ne 0 ]]; then 21 | echo "This script must be run as root" 1>&2 22 | exit 1 23 | fi 24 | } 25 | 26 | function print_help(){ 27 | echo "Usage: ${0} -t A|CNAME|PTR -u add|del -n servername -p record_value [-s ttl_seconds]" 28 | echo "Examples:" 29 | echo "${0} -t A -u add -n ns1 -p 172.16.2.222" 30 | echo "${0} -t A -u del -n ns1 -p 172.16.2.222" 31 | echo "" 32 | echo "${0} -t A -u add -n ns1.cn -p 172.16.2.223" 33 | echo "${0} -t A -u add -n ns1.jp -p 172.16.2.224" 34 | echo "${0} -t A -u add -n ns1.us -p 172.16.2.225" 35 | echo "${0} -t A -u del -n ns1.cn -p 172.16.2.223" 36 | echo "" 37 | echo "${0} -t CNAME -u add -n ns3 -p ns1.heylinux.com" 38 | echo "${0} -t CNAME -u add -n ns3 -p ns1.heylinux.com -s 30" 39 | echo "${0} -t CNAME -u del -n ns3 -p ns1.heylinux.com" 40 | echo "" 41 | echo "${0} -t CNAME -u add -n ns3.cn -p ns1.cn.heylinux.com" 42 | echo "${0} -t CNAME -u del -n ns3.cn -p ns1.cn.heylinux.com" 43 | echo "" 44 | echo "${0} -t PTR -u add -n 172.16.2.222 -p ns1.heylinux.com" 45 | echo "${0} -t PTR -u del -n 172.16.2.222 -p ns1.heylinux.com" 46 | echo "" 47 | echo "${0} -t PTR -u add -n 172.16.2.223 -p ns1.cn.heylinux.com" 48 | echo "${0} -t PTR -u del -n 172.16.2.223 -p ns1.cn.heylinux.com" 49 | exit 1 50 | } 51 | 52 | function check_servername(){ 53 | echo $servername | grep -wq ${domain} 54 | if [[ $? -eq 0 ]]; then 55 | hostname=$(echo $servername | sed s/.${domain}//g) 56 | echo "ERROR: '${servername}' is malformed. Servername should be just '${hostname}' without the '${domain}'" 57 | exit 1 58 | fi 59 | } 60 | 61 | function check_fqdn(){ 62 | echo $record_value | grep -q '\.' 63 | if [[ $? -ne 0 ]]; then 64 | echo "ERROR: '${record_value}' is malformed. Should be a FQDN" 65 | exit 1 66 | fi 67 | } 68 | 69 | function check_prereq(){ 70 | # Check if the prerequisite is satisfied, such as duplicate and nonexistent 71 | if [[ $action == "add" ]]; then 72 | if [[ $record_type == "PTR" ]]; then 73 | echo "prereq nxrrset ${servername}.${domain} ${record_type} ${record_value}" >> ${dnsaddfile} 74 | else 75 | echo "prereq nxdomain ${servername}.${domain}" >> ${dnsaddfile} 76 | fi 77 | fi 78 | if [[ $action == "delete" ]]; then 79 | echo "prereq yxrrset ${servername}.${domain} ${record_type} ${record_value}" >> ${dnsaddfile} 80 | fi 81 | } 82 | 83 | function update_record(){ 84 | if [[ -z "${ttl_seconds}" ]]; then 85 | ttl_seconds=86400 86 | fi 87 | 88 | echo "server ${server_ipaddr}" >> ${dnsaddfile} 89 | 90 | sub_domain_string=$(echo ${sub_domains} | sed s/[.]/'\\\.'/g) 91 | eval_command="echo \"${servername}\" | grep -Erq '${sub_domain_string}'" 92 | if $(eval ${eval_command}); then 93 | sub_domain=$(echo ${servername} | awk -F '.' '{print $NF}') 94 | zone=${sub_domain}.${domain} 95 | else 96 | zone=${domain} 97 | fi 98 | echo "zone ${zone}" >> ${dnsaddfile} 99 | 100 | check_prereq 101 | echo "update $action ${servername}.${domain} ${ttl_seconds} ${record_type} ${record_value}" >> ${dnsaddfile} 102 | echo "send" >> ${dnsaddfile} 103 | 104 | echo "update $action ${servername}.${domain} ${ttl_seconds} ${record_type} ${record_value}" 105 | 106 | private_key=${private_keys_dict["${record_type}"]} 107 | /usr/bin/nsupdate -k ${private_key} ${dnsaddfile} 108 | if [[ $? -eq 0 ]]; then 109 | echo "OK. Successful" 110 | else 111 | if [[ $action == "add" ]]; then 112 | echo "ERROR: Failed because duplicate record" 113 | elif [[ $action == "delete" ]]; then 114 | echo "ERROR: Failed because nonexistent/protected record" 115 | fi 116 | exit $? 117 | fi 118 | 119 | # Write DNS records into zone file immediately, by default it does every 15 minutes 120 | #/usr/sbin/rndc freeze ${zone} 121 | #/usr/sbin/rndc reload ${zone} 122 | #/usr/sbin/rndc thaw ${zone} 123 | } 124 | 125 | check_root 126 | while getopts "t:u:n:p:s:" opts; do 127 | case "$opts" in 128 | "t") 129 | record_type=$OPTARG 130 | ;; 131 | "u") 132 | action=$OPTARG 133 | ;; 134 | "n") 135 | servername=$OPTARG 136 | ;; 137 | "p") 138 | record_value=$OPTARG 139 | ;; 140 | "s") 141 | ttl_seconds=$OPTARG 142 | ;; 143 | *) 144 | print_help 145 | ;; 146 | esac 147 | done 148 | 149 | if [[ -z "$record_type" ]] || [[ -z "$action" ]] || [[ -z "$servername" ]] || [[ -z "$record_value" ]]; then 150 | print_help 151 | else 152 | > ${dnsaddfile} 153 | case "$action" in 154 | "add") 155 | action=add 156 | ;; 157 | "del") 158 | action=delete 159 | ;; 160 | *) 161 | print_help 162 | ;; 163 | esac 164 | case "$record_type" in 165 | "A") 166 | check_servername 167 | update_record 168 | ;; 169 | "CNAME") 170 | check_servername 171 | check_fqdn 172 | update_record 173 | ;; 174 | "PTR") 175 | check_fqdn 176 | a=$(echo $servername |cut -d. -f1 |grep -Ev '[a-z]|[A-Z]') 177 | b=$(echo $servername |cut -d. -f2 |grep -Ev '[a-z]|[A-Z]') 178 | c=$(echo $servername |cut -d. -f3 |grep -Ev '[a-z]|[A-Z]') 179 | d=$(echo $servername |cut -d. -f4 |grep -Ev '[a-z]|[A-Z]') 180 | if [[ -z "$a" ]] || [[ -z "$b" ]] || [[ -z "$c" ]] || [[ -z "$d" ]]; then 181 | echo "ERROR: '${servername}' is malformed. Should be a IP address" 182 | else 183 | domain=$c.$b.$a.in-addr.arpa 184 | servername=$d 185 | if [[ ! -f ${base_dir}/${domain}.zone ]]; then 186 | echo "ERROR: ${base_dir}/${domain}.zone does not exist" 187 | exit 1 188 | else 189 | update_record 190 | fi 191 | fi 192 | ;; 193 | *) 194 | print_help 195 | ;; 196 | esac 197 | fi 198 | -------------------------------------------------------------------------------- /scripts/excludedn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Exclude or recover a datanode from Hadoop cluster 4 | 5 | conf=/etc/hadoop/conf/datanodes.exclude 6 | 7 | function check_ha_status(){ 8 | ha_status=$( hdfs haadmin -getServiceState ${1} ) 9 | if [[ $? -ne 0 ]]; then 10 | echo "Error running 'hdfs haadmin -getServiceState ${1}'." 11 | exit 1 12 | fi 13 | 14 | if [[ "${ha_status}" == "active" ]]; then 15 | echo "${1} active" 16 | return 17 | fi 18 | 19 | echo "${1} standby" 20 | return 21 | } 22 | 23 | function get_excluded_datanodes(){ 24 | for item in $(cat ${conf} | cut -d: -f1) 25 | do 26 | excluded_datanodes="${excluded_datanodes} $(host ${item} |awk '{print $NF}' |awk -F '.heylinux.com' '{print $1}')" 27 | done 28 | echo ${excluded_datanodes} |xargs -n 5 echo ' ' 29 | } 30 | 31 | function usage(){ 32 | echo "Please run as root on ${master}." 33 | echo "" 34 | echo "Usage:" 35 | echo " Exclude a datanode:" 36 | echo " $0 " 37 | echo " Recover a datanode:" 38 | echo " $0 -r " 39 | echo "" 40 | echo "Current excluded datanodes:" 41 | get_excluded_datanodes 42 | exit 1 43 | } 44 | 45 | ha_status_a=( $( check_ha_status "idc1-hnn1" ) ) 46 | ha_status_b=( $( check_ha_status "idc1-hnn2" ) ) 47 | 48 | if [[ ${#ha_status_a[@]} -eq 0 || ${#ha_status_b[@]} -eq 0 ]]; then 49 | echo "Missing namenode(s), exiting..." 50 | exit 1 51 | elif [[ "${ha_status_a[1]}" == "active" && "${ha_status_b[1]}" == "active" ]]; then 52 | echo "Both namenodes show active status, exiting..." 53 | exit 1 54 | elif [[ "${ha_status_a[1]}" == "standby" && "${ha_status_b[1]}" == "standby" ]]; then 55 | echo "Both namenodes show standby status, exiting..." 56 | exit 1 57 | fi 58 | 59 | if [[ "${ha_status_a[1]}" == "active" ]]; then 60 | master="${ha_status_a[0]}" 61 | standby="${ha_status_b[0]}" 62 | else 63 | master="${ha_status_b[0]}" 64 | standby="${ha_status_a[0]}" 65 | fi 66 | 67 | [[ "$(whoami)" == "root" ]] || usage 68 | [[ "$(hostname -s)" == "${master}" ]] || usage 69 | do_remove="false" 70 | 71 | while getopts "hr:" option 72 | do 73 | case $option in 74 | h) 75 | usage 76 | ;; 77 | r) 78 | dn=$OPTARG 79 | do_remove="true" 80 | ;; 81 | ?) 82 | usage 83 | ;; 84 | esac 85 | done 86 | 87 | if [[ -z "${dn}" ]]; then 88 | dn=${1} 89 | fi 90 | if [[ -z "${dn}" ]]; then 91 | usage 92 | fi 93 | 94 | if [[ "${do_remove}" == "false" ]]; then 95 | echo "Adding $dn to ${conf}..." 96 | dn_ip=$(host ${dn} |awk '{print $NF":50010"}') 97 | if ! $(grep -wq ${dn_ip} ${conf}); then 98 | echo "${dn_ip}" >> $conf 99 | fi 100 | else 101 | dn_ip=$(host ${dn} |awk '{print $NF":50010"}') 102 | echo "Removing ${dn} from ${conf}..." 103 | sed -i "/^${dn_ip}/d" ${conf} 104 | fi 105 | 106 | if [[ "${standby}" != "NONE" ]]; then 107 | echo "Syncing the ${conf} to ${standby}" 108 | scp ${conf} root@${standby}:${conf} 109 | if [[ $? -eq 0 ]]; then 110 | echo "Refreshing the nodes..." 111 | hdfs dfsadmin -refreshNodes 112 | echo "Current excluded datanodes:" 113 | get_excluded_datanodes 114 | else 115 | echo "Failed to sync the ${conf} to ${standby}" 116 | exit 2 117 | fi 118 | else 119 | echo "Refreshing the nodes..." 120 | hdfs dfsadmin -refreshNodes 121 | echo "Current excluded datanodes:" 122 | get_excluded_datanodes 123 | fi 124 | -------------------------------------------------------------------------------- /scripts/fastping.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # default settings 4 | retry=2 # retry times 5 | timeout=3 # timeout seconds 6 | output=/tmp/ping.output # output file 7 | subnet=$1 # C type subnet 8 | 9 | # function print_help 10 | function print_help(){ 11 | echo "Examples:" 12 | echo ${0} 172.17.32 13 | echo ${0} 192.168.1 unable 14 | exit 1 15 | } 16 | 17 | # check the parameter 18 | if [[ $# -lt 1 ]]; then 19 | print_help 20 | fi 21 | 22 | # check the network parameter's format 23 | count=0 24 | for i in $(echo $1 |sed 's/\./ /g'); do 25 | count=$((${count}+1)) 26 | done 27 | if [[ ${count} -ne 3 ]]; then 28 | print_help 29 | fi 30 | 31 | # clean the output file 32 | > ${output} 33 | 34 | function runping(){ 35 | if $(echo $OSTYPE |grep -q darwin); then 36 | # Mac OS 37 | ping -t ${retry} -W ${timeout}000 -q ${subnet}.${i} 38 | else 39 | # Linux/BSD 40 | ping -c ${retry} -w ${timeout} -q ${subnet}.${i} 41 | fi 42 | } 43 | 44 | function pingable(){ 45 | runping &> /dev/null && echo ${i} >> ${output} 46 | } 47 | 48 | function unpingable(){ 49 | runping &> /dev/null || echo ${i} >> ${output} 50 | } 51 | 52 | # get the check type 53 | if [[ "$2" == "unable" ]]; then 54 | status="unpingable" 55 | else 56 | status="pingable" 57 | fi 58 | 59 | # ping as paraller mode and write output into file 60 | for i in {1..255}; do 61 | ${status} & 62 | done 63 | 64 | # wait for all ping processes done 65 | wait 66 | 67 | # print output with better order 68 | sum=$(wc -l ${output} |awk '{print $1}') 69 | echo "There are '${sum}' '${status}' ips begin with '${subnet}.' :" 70 | cat ${output} |sort -V |xargs -n 20 echo " " 71 | -------------------------------------------------------------------------------- /scripts/getopts_example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function print_help(){ 4 | echo "Usage: ${0} -u username -p password [ -h hostname ]" 5 | exit 1 6 | } 7 | 8 | while getopts "u:p:h:" opts 9 | do 10 | case "$opts" in 11 | "u") 12 | username=$OPTARG 13 | ;; 14 | "p") 15 | password=$OPTARG 16 | ;; 17 | "h") 18 | hostname=$OPTARG 19 | ;; 20 | "*") 21 | print_help 22 | ;; 23 | esac 24 | done 25 | 26 | if [ -z "$username" ] || [ -z "$password" ]; then 27 | print_help 28 | else 29 | echo "Username: $username Password: $password" 30 | fi 31 | 32 | if [ ! -z "$hostname" ]; then 33 | echo "Hostname: $hostname" 34 | fi 35 | -------------------------------------------------------------------------------- /scripts/hadoop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # List all protected directories here 4 | # For example: 5 | # To protect all the directories in /user and the directory /user, use "/user/*" 6 | # To protect the directory /user/dong/workspace and its parent directory /user/dong, use "/user/dong/workspace" 7 | # 8 | # NOTE: "/*" is not allowed in "protected_dirs" 9 | 10 | protected_dirs=( 11 | /user/* 12 | /user/oozie/* 13 | /user/dong/workspace 14 | /rawlogs 15 | ) 16 | 17 | hadoop_cmd="/usr/bin/hadoop" 18 | 19 | function generate_detailed_protected_dirs(){ 20 | for dir_item in ${protected_dirs[@]}; do 21 | if [[ "${dir_item}" =~ "*" ]]; then 22 | dir_path=$(/usr/bin/dirname "${dir_item}") 23 | detailed_dir_item=$(${hadoop_cmd} fs -ls "${dir_path}" | grep '^dr' | awk '{print $NF}') 24 | else 25 | detailed_dir_item="${dir_item}" 26 | fi 27 | detailed_protected_dirs="${detailed_protected_dirs} ${detailed_dir_item}" 28 | done 29 | 30 | echo "${detailed_protected_dirs}" 31 | } 32 | 33 | if [[ "$1" == "fs" ]]; then 34 | if [[ "$2" =~ "rm" ]]; then 35 | detailed_protected_dirs=$(generate_detailed_protected_dirs) 36 | detailed_protected_dirs_list=$(echo "${detailed_protected_dirs}" | xargs -n1) 37 | dir_items="${@:3}" 38 | for dir_item in ${dir_items}; do 39 | if [[ "${dir_item}" != "-*" ]]; then 40 | if $(echo "${detailed_protected_dirs_list}" | grep -wq "^${dir_item}"); then 41 | echo "ERROR: The directory \"${dir_item}\" is PROTECTED, PLEASE DO NOT DELETE IT." 42 | return_value=2 43 | fi 44 | fi 45 | done 46 | fi 47 | fi 48 | 49 | if [[ ! -z "${return_value}" ]]; then 50 | exit ${return_value} 51 | fi 52 | 53 | ${hadoop_cmd} ${@:1} 54 | -------------------------------------------------------------------------------- /scripts/hdfs_clean/data.txt: -------------------------------------------------------------------------------- 1 | drwxr-xr-x - oozie supergroup 0 2013-04-25 00:17 /user/oozie/data/dpp/log/lotame-offline/2013-04-24-23-50 2 | drwxr-xr-x - oozie supergroup 0 2013-04-25 01:29 /user/oozie/data/dpp/log/lotame-offline/2013-04-25-00-50 3 | drwxr-xr-x - oozie supergroup 0 2013-05-08 18:46 /user/oozie/data/dpp/ad_groups_property_stats/dt=2013-05-08-17-00 4 | drwxr-xr-x - oozie supergroup 0 2013-05-08 18:59 /user/oozie/data/dpp/ad_groups_property_stats/dt=2013-05-08-18-00 5 | drwxr-xr-x - oozie supergroup 0 2013-05-15 03:34 /user/oozie/data/dpp/circuitbreaker2/2013-05-14-18-00 6 | drwxr-xr-x - oozie supergroup 0 2013-05-15 04:15 /user/oozie/data/dpp/circuitbreaker2/2013-05-14-19-00 7 | drwxr-xr-x - oozie supergroup 0 2013-05-21 20:57 /user/oozie/data/dpp/hive-prep/ad-reports-rollup 8 | drwxr-xr-x - oozie supergroup 0 2013-05-23 21:16 /user/oozie/data/dpp/hive-prep/ad_reports 9 | drwxr-xr-x - oozie supergroup 0 2013-06-30 00:32 /user/oozie/data/dpp/hive/ad_bid_requests2/dt=2013-06-30-00-00 10 | drwxr-xr-x - oozie supergroup 0 2013-06-30 00:36 /user/oozie/data/dpp/hive/ad_bid_requests2/dt=2013-06-30-00-10 11 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:29 /user/oozie/data/dpp/hive/ad_charge_events2/dt=2013-05-01-00-00 12 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:32 /user/oozie/data/dpp/hive/ad_charge_events2/dt=2013-05-01-00-10 13 | drwxr-xr-x - oozie supergroup 0 2013-06-30 00:32 /user/oozie/data/dpp/hive/ad_group_ecpms/dt=2013-06-30-00-00 14 | drwxr-xr-x - oozie supergroup 0 2013-06-30 00:37 /user/oozie/data/dpp/hive/ad_group_ecpms/dt=2013-06-30-00-10 15 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:29 /user/oozie/data/dpp/hive/ad_imp_click_events2/dt=2013-05-01-00-00 16 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:32 /user/oozie/data/dpp/hive/ad_imp_click_events2/dt=2013-05-01-00-10 17 | drwxr-xr-x - oozie supergroup 0 2013-07-21 00:31 /user/oozie/data/dpp/hive/ad_no_bid_requests2/dt=2013-07-21-00-00 18 | drwxr-xr-x - oozie supergroup 0 2013-07-21 00:36 /user/oozie/data/dpp/hive/ad_no_bid_requests2/dt=2013-07-21-00-10 19 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:40 /user/oozie/data/dpp/hive/ad_reports/ad_stats2/dt=2013-04-08-21-50 20 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:50 /user/oozie/data/dpp/hive/ad_reports/ad_stats2/dt=2013-04-08-22-00 21 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:40 /user/oozie/data/dpp/hive/ad_reports/adv_device_stats2/dt=2013-04-08-21-50 22 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:49 /user/oozie/data/dpp/hive/ad_reports/adv_device_stats2/dt=2013-04-08-22-00 23 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:40 /user/oozie/data/dpp/hive/ad_reports/adv_geo_stats2/dt=2013-04-08-21-50 24 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:50 /user/oozie/data/dpp/hive/ad_reports/adv_geo_stats2/dt=2013-04-08-22-00 25 | drwxr-xr-x - oozie supergroup 0 2013-04-22 05:01 /user/oozie/data/dpp/hive/ad_reports/adv_join_base2/_distcp_logs_10rcu3 26 | drwxr-xr-x - oozie supergroup 0 2013-04-20 14:17 /user/oozie/data/dpp/hive/ad_reports/adv_join_base2/_distcp_logs_11k6cw 27 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:41 /user/oozie/data/dpp/hive/ad_reports/adv_pub_stats2/dt=2013-04-08-21-50 28 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:50 /user/oozie/data/dpp/hive/ad_reports/adv_pub_stats2/dt=2013-04-08-22-00 29 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:42 /user/oozie/data/dpp/hive/ad_reports/pub_partner_stats2/dt=2013-04-08-21-50 30 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:51 /user/oozie/data/dpp/hive/ad_reports/pub_partner_stats2/dt=2013-04-08-22-00 31 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:41 /user/oozie/data/dpp/hive/ad_reports/pub_property_stats2/dt=2013-04-08-21-50 32 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:50 /user/oozie/data/dpp/hive/ad_reports/pub_property_stats2/dt=2013-04-08-22-00 33 | drwxr-xr-x - oozie supergroup 0 2013-04-10 02:53 /user/oozie/data/dpp/hive/ad_reports/testbu_stats/dt=2013-04-08-22 34 | drwxr-xr-x - oozie supergroup 0 2013-04-15 07:47 /user/oozie/data/dpp/hive/ad_reports/testbu_stats/dt=2013-04-13-00 35 | -rw-r--r-- 3 oozie supergroup 0 2013-04-09 16:22 /user/oozie/data/dpp/hive/ad_reports/testbu_stats_daily/000000_0 36 | drwxr-xr-x - oozie supergroup 0 2013-04-10 02:54 /user/oozie/data/dpp/hive/ad_reports/testbu_stats_daily/dt=2013-04-08 37 | drwxr-xr-x - oozie supergroup 0 2013-04-22 19:52 /user/oozie/data/dpp/hive/campaign_history/dt=2013-04-22-06 38 | drwxr-xr-x - oozie supergroup 0 2013-04-22 19:56 /user/oozie/data/dpp/hive/campaign_history/dt=2013-04-22-07 39 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:32 /user/oozie/data/dpp/hive/conversion_prospects3/dt=2013-05-01-00-00 40 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:35 /user/oozie/data/dpp/hive/conversion_prospects3/dt=2013-05-01-00-10 41 | drwxr-xr-x - oozie supergroup 0 2013-04-22 11:10 /user/oozie/data/dpp/hive/conversions2/_distcp_logs_10da8p 42 | drwxr-xr-x - oozie supergroup 0 2013-04-20 02:17 /user/oozie/data/dpp/hive/conversions2/_distcp_logs_11mewx 43 | drwxr-xr-x - oozie supergroup 0 2013-04-09 22:01 /user/oozie/data/dpp/hive/conversions_max_days2/dt=2013-04-07-00 44 | drwxr-xr-x - oozie supergroup 0 2013-04-09 14:17 /user/oozie/data/dpp/hive/conversions_max_days2/dt=2013-04-08-00 45 | drwxr-xr-x - oozie supergroup 0 2013-04-18 21:54 /user/oozie/data/dpp/hive/pub_profit2/dt=2013-04-18-21-00 46 | drwxr-xr-x - oozie supergroup 0 2013-04-18 21:54 /user/oozie/data/dpp/hive/pub_profit2/dt=2013-04-18-21-10 47 | drwxr-xr-x - oozie supergroup 0 2013-04-20 01:22 /user/oozie/data/dpp/hive/pub_profit_daily/dt=2013-04-19 48 | drwxr-xr-x - oozie supergroup 0 2013-04-21 01:23 /user/oozie/data/dpp/hive/pub_profit_daily/dt=2013-04-20 49 | drwxr-xr-x - oozie supergroup 0 2013-04-09 06:50 /user/oozie/data/dpp/hive/retargeting_cookie_effectiveness/cpi 50 | drwxr-xr-x - oozie supergroup 0 2013-04-09 06:49 /user/oozie/data/dpp/hive/retargeting_cookie_effectiveness/ctr 51 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:32 /user/oozie/data/dpp/hive/retargeting_prospects3/dt=2013-05-01-00-00 52 | drwxr-xr-x - oozie supergroup 0 2013-05-01 00:35 /user/oozie/data/dpp/hive/retargeting_prospects3/dt=2013-05-01-00-10 53 | -------------------------------------------------------------------------------- /scripts/hdfs_clean/hdfs_clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Generic HDFS clean up script 4 | # Author: Dong Guo 5 | # Last modified: 2013/07/25 6 | # Email: dong.guo@symbio.com 7 | # 8 | # Use "./hdfs_clean.sh delete" to trigger the action 9 | 10 | # Given above list, create a script to find directories older than X days 11 | DATA_FILE="data.txt" 12 | 13 | # 2. Script should take a configuration file, with delete days threshold and list of directories 14 | DELETE_DAYS_THRESHOLD=9 15 | DIRECTORIES_LIST=( 16 | /user/oozie/data/dpp/hive 17 | /user/oozie/data/dpp/log 18 | /user/oozie/data/dpp/ad_groups_property_stats 19 | /user/oozie/data/dpp/circuitbreaker2 20 | ) 21 | 22 | NOW_TIMESTAMP=$(date +%s) 23 | THRESHOLD_TIMESTAMP=$(($NOW_TIMESTAMP-$DELETE_DAYS_THRESHOLD*86400)) 24 | 25 | # 4. Directory depth under /user/oozie/data/dpp/hive/ can be variable 26 | DIRECTORY_DEPTH=2 27 | function check_dir_withdepth(){ 28 | DEPTH=$(($DIRECTORY_DEPTH+6)) 29 | SUBDIRECTORIES=$(grep -w ${DIRECTORY} ${DATA_FILE} | grep "drwxr" | awk '{print $8}' | cut -d / -f 1-${DEPTH} | grep [2][0-9][0-9][0-9]-[0-9][0-9]) 30 | } 31 | 32 | function check_dir_formated(){ 33 | echo "Here are the directories older than ${DELETE_DAYS_THRESHOLD} days:" 34 | for DIRECTORY in ${DIRECTORIES_LIST[@]} 35 | do 36 | if [ ${DIRECTORY} == "/user/oozie/data/dpp/hive" ]; then 37 | check_dir_withdepth 38 | else 39 | # 3. Ignore files, look at directories only 40 | SUBDIRECTORIES=$(grep -w ${DIRECTORY} ${DATA_FILE} | grep "drwxr" | awk '{print $8}' | grep [2][0-9][0-9][0-9]-[0-9][0-9]) 41 | fi 42 | if [ ! -z "${SUBDIRECTORIES}" ]; then 43 | for SUBDIRECTORY in ${SUBDIRECTORIES[@]} 44 | do 45 | # 5. Use directory name instead of last modified time 46 | DIRECTORIES_TIME=$(echo ${SUBDIRECTORY} | awk -F "/" '{print $NF}' | awk -F "=" '{print $NF}' | awk -F "-" '{print $1"-"$2"-"$3}') 47 | DIRECTORIES_TIMESTAMP=$(date -d ${DIRECTORIES_TIME} +%s) 48 | if [ ${DIRECTORIES_TIMESTAMP} -lt ${THRESHOLD_TIMESTAMP} ]; then 49 | # 1. Script should run in DEBUG mode by default (print only) and has a switch to actually run delete 50 | if [ "$1" == "delete" ]; then 51 | echo "sudo -u oozie hadoop fs -rmr ${SUBDIRECTORY}" 52 | else 53 | echo "${DIRECTORIES_TIME} ${SUBDIRECTORY}" 54 | fi 55 | fi 56 | done 57 | fi 58 | done 59 | } 60 | 61 | # 6. Directory date can be in the format of dt=2013-06-30-00-00 or dt=2013-04-19 or 2013-05-14-18-00; print directories not matching these pattern 62 | function check_dir_noformat(){ 63 | echo "" 64 | echo "Here are the directories without data format:" 65 | for DIRECTORY in ${DIRECTORIES_LIST[@]} 66 | do 67 | NODATA_DIRECTORIES=$(grep -w ${DIRECTORY} ${DATA_FILE} | grep "drwxr" | awk '{print $8}' | grep -v [2][0-9][0-9][0-9]-[0-9][0-9]*) 68 | if [ ! -z "${NODATA_DIRECTORIES}" ]; then 69 | echo "${NODATA_DIRECTORIES}" 70 | fi 71 | done 72 | } 73 | 74 | check_dir_formated 75 | check_dir_noformat 76 | -------------------------------------------------------------------------------- /scripts/icinga_schedule_downtime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Adjust variables to fit your environment as necessary. 3 | 4 | commandfile='/var/spool/icinga/cmd/icinga.cmd' 5 | 6 | if [ "$#" -ne 1 ]; then 7 | echo "$0 hostname" 8 | exit 9 | fi 10 | 11 | declare -i start=$(date +%s) 12 | declare -i end=$start+900 13 | /usr/bin/printf "[%lu] SCHEDULE_HOST_DOWNTIME;$1;$start;$end;1;0;1;AS_spindown;AS spinning down\n" $start > $commandfile 14 | -------------------------------------------------------------------------------- /scripts/init_rhel_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function check_root(){ 4 | if [[ $EUID -ne 0 ]]; then 5 | echo "This script must be run as root" 1>&2 6 | exit 1 7 | fi 8 | } 9 | 10 | function disable_service(){ 11 | echo "1. Disable services: NetworkManager,iptables,ip6tables" 12 | chkconfig NetworkManager off 13 | chkconfig iptables off 14 | chkconfig ip6tables off 15 | 16 | service NetworkManager stop 17 | service iptables stop 18 | service ip6tables stop 19 | } 20 | 21 | function disable_selinux(){ 22 | echo "2. Disable SELinux" 23 | sed -i s/'SELINUX=enable'/'SELINUX=disabled'/g /etc/selinux/config 24 | setenforce 0 25 | } 26 | 27 | function configure_network(){ 28 | echo "3. Specify the IPADDR/NETMASK/GATEWAY settings" 29 | echo -n "IPADDR: " 30 | read ipaddr 31 | 32 | echo -n "NETMASK: " 33 | read netmask 34 | 35 | echo -n "GATEWAY: " 36 | read gateway 37 | 38 | if [[ -z "${ipaddr}" ]] || [[ -z "${netmask}" ]] || [[ -z "${gateway}" ]]; then 39 | echo "ERROR: Incorrect IPADDR/NETMASK/GATEWAY" 40 | exit 1 41 | fi 42 | 43 | echo "4. Backup /etc/sysconfig/network-scripts/{ifcfg-bond0,ifcfg-eth0,ifcfg-eth1}" 44 | if [[ -f /etc/sysconfig/network-scripts/ifcfg-bond0 ]]; then 45 | cp /etc/sysconfig/network-scripts/ifcfg-bond0 /etc/sysconfig/network-scripts/ifcfg-bond0.$(date +%Y%m%d%H%M%S) 46 | if 47 | 48 | if [[ -f /etc/sysconfig/network-scripts/ifcfg-eth0 ]]; then 49 | cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth0.$(date +%Y%m%d%H%M%S) 50 | if 51 | 52 | if [[ -f /etc/sysconfig/network-scripts/ifcfg-eth1 ]]; then 53 | cp /etc/sysconfig/network-scripts/ifcfg-eth1 /etc/sysconfig/network-scripts/ifcfg-eth1.$(date +%Y%m%d%H%M%S) 54 | if 55 | 56 | echo "5. Configure /etc/sysconfig/network-scripts/{ifcfg-bond0,ifcfg-eth0,ifcfg-eth1}" 57 | cat > /etc/sysconfig/network-scripts/ifcfg-bond0 < /etc/sysconfig/network-scripts/ifcfg-eth0 < /etc/sysconfig/network-scripts/ifcfg-eth1 <> /etc/modprobe.conf 88 | fi 89 | 90 | if ! $(grep -q 'options bond0 miimon=100 mode=1' /etc/modprobe.conf); then 91 | echo 'options bond0 miimon=100 mode=1' >> /etc/modprobe.conf 92 | fi 93 | } 94 | 95 | check_root 96 | disable_service 97 | disable_selinux 98 | configure_network 99 | -------------------------------------------------------------------------------- /scripts/initd_service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # chkconfig: - 99 03 4 | # description: Starts and stops SERVICE_NAME Service 5 | # 6 | 7 | RUNUSER=service_user 8 | BASEDIR=/path/to/service/home/dir 9 | EXECBIN=${BASEDIR}/bin/service_exec_bin 10 | CONF=${BASEDIR}/config/service_name.conf 11 | LOGFILE=${BASEDIR}/logs/service_name.out 12 | PIDFILE=${BASEDIR}/run/service_name.pid 13 | RUNGREP="${BASEDIR}/bin/service_exec_bin" 14 | RUNUID=$(id -u ${RUNUSER}) 15 | 16 | function check_root(){ 17 | if [ $EUID -ne 0 ] && [ $EUID -ne "${RUNUID}" ]; then 18 | echo "This script must be run as root or ${RUNUSER}" 1>&2 19 | exit 1 20 | fi 21 | } 22 | 23 | status(){ 24 | PID=$(ps aux | grep -w ${RUNGREP} | grep -Ewv 'nohup|grep' | awk '{print $2}' | xargs) 25 | if [ ! -z "${PID}" ]; then 26 | echo "SERVICE_NAME Service is running (PID:${PID})" 27 | exit 0 28 | else 29 | echo "SERVICE_NAME Service is not running" 30 | exit 2 31 | fi 32 | } 33 | 34 | start(){ 35 | PID=$(ps aux | grep -w ${RUNGREP} | grep -Ewv 'nohup|grep' | awk '{print $2}' | xargs) 36 | if [ ! -z "${PID}" ]; then 37 | echo "SERVICE_NAME Service is already running" 38 | else 39 | echo -n "Starting SERVICE_NAME Service" 40 | if [ $EUID -eq "${RUNUID}" ]; then 41 | nohup ${EXECBIN} >> ${LOGFILE} 2>&1 & 42 | else 43 | sudo -u ${RUNUSER} nohup ${EXECBIN} >> ${LOGFILE} 2>&1 & 44 | fi 45 | sleep 1 46 | PID=$(ps aux | grep -w ${RUNGREP} | grep -Ewv 'nohup|grep' | awk '{print $2}' | xargs) 47 | if [ ! -z "${PID}" ]; then 48 | echo ". OK" 49 | echo "${PID}" > ${PIDFILE} 50 | chown ${RUNUSER}:${RUNUSER} ${PIDFILE} 51 | else 52 | echo ". FAILED" 53 | exit 2 54 | fi 55 | fi 56 | } 57 | 58 | stop(){ 59 | PID=$(ps aux | grep -w ${RUNGREP} | grep -Ewv 'nohup|grep' | awk '{print $2}' | xargs) 60 | if [ -z "${PID}" ]; then 61 | echo "SERVICE_NAME Service is aready stopped" 62 | else 63 | echo -n "Stopping SERVICE_NAME Service" 64 | kill -TERM ${PID} 65 | rm ${PIDFILE} 66 | attempt=1 67 | while true 68 | do 69 | PID=$(ps aux | grep -w ${RUNGREP} | grep -Ewv 'nohup|grep' | awk '{print $2}' | xargs) 70 | if [ ! -z "${PID}" ]; then 71 | sleep 5 72 | echo -n "." 73 | if [ "${attempt}" -eq 10 ]; then 74 | echo " FAILED" 75 | exit 2 76 | fi 77 | else 78 | echo " OK" 79 | break 80 | fi 81 | attempt=$((${attempt}+1)) 82 | done 83 | fi 84 | } 85 | 86 | check_root 87 | case "$1" in 88 | start) 89 | start 90 | ;; 91 | stop) 92 | stop 93 | ;; 94 | restart) 95 | stop 96 | sleep 1 97 | start 98 | ;; 99 | status) 100 | status 101 | ;; 102 | *) 103 | echo $"Usage: $0 {start|stop|restart|status}" 104 | exit 2 105 | esac 106 | -------------------------------------------------------------------------------- /scripts/install_python27.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Install the latest verion of Python 2.7 4 | # 5 | 6 | tmp_dir="/tmp" 7 | 8 | function check_root(){ 9 | if [[ $EUID -ne 0 ]]; then 10 | echo "This script must be run as root" 1>&2 11 | exit 1 12 | fi 13 | } 14 | 15 | function update_pip_conf(){ 16 | mkdir -p ~/.pip 17 | 18 | if ! $(grep -wq trusted-host ~/.pip/pip.conf); then 19 | echo "Updating ~/.pip/pip.conf..." 20 | cat > ~/.pip/pip.conf <> /etc/ld.so.conf.d/python27.conf 36 | ldconfig 37 | fi 38 | } 39 | 40 | function install_python(){ 41 | latest_version=$(curl -s https://www.python.org/ftp/python/ | grep 'href="2.7' | tail -n 1 | cut -d\" -f 2 | cut -d\/ -f1) 42 | 43 | if [[ ! -f /usr/local/bin/python2.7 ]]; then 44 | echo "Installing the Python ${latest_version}..." 45 | cd ${tmp_dir} 46 | /usr/bin/wget --no-check-certificate https://www.python.org/ftp/python/${latest_version}/Python-${latest_version}.tgz 47 | if [[ $? -eq 0 ]]; then 48 | tar xzf Python-${latest_version}.tgz 49 | cd Python-${latest_version} 50 | ./configure --with-threads --enable-shared && make && make altinstall 51 | else 52 | echo "Failed to download the Python-${latest_version}.tgz package, exiting..." 53 | exit 1 54 | fi 55 | fi 56 | 57 | if [[ ! -f /usr/local/bin/pip2.7 ]]; then 58 | echo "Installing the easy_install-2.7 and pip commands..." 59 | cd ${tmp_dir} 60 | /usr/bin/wget --no-check-certificate https://bootstrap.pypa.io/ez_setup.py -O - | /usr/local/bin/python2.7 61 | if [[ $? -eq 0 ]]; then 62 | /usr/local/bin/easy_install-2.7 pip 63 | rm -rf ${tmp_dir}/Python-${latest_version}* 64 | else 65 | echo "Failed to install the easy_install-2.7 command, exiting..." 66 | exit 1 67 | fi 68 | fi 69 | 70 | echo "Linking the python2.7 and pip2.7 commands from /usr/local/bin to /usr/bin..." 71 | for binfile in python2.7 pip2.7; do 72 | ln -sf /usr/local/bin/${binfile} /usr/bin/${binfile} 73 | done 74 | } 75 | 76 | function install_pip_packages(){ 77 | echo "Installing the Python modules packages by pip2.7 command..." 78 | for package in scipy sklearn sklearn-pandas panda execnet xgboost numpy; do 79 | /usr/local/bin/pip2.7 install -b ${tmp_dir} ${package} 80 | done 81 | } 82 | 83 | check_root 84 | update_pip_conf # For Chinese users only 85 | install_rpm_packages 86 | update_ldconfig 87 | install_python 88 | install_pip_packages # Just for example 89 | -------------------------------------------------------------------------------- /scripts/k8s_sync_logs_s3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl_cmd="/usr/bin/kubectl --kubeconfig /opt/devops/.kube/eks-app" 4 | 5 | master_pod="app-master-0" 6 | executor_pods=$(${kubectl_cmd} get pods | grep app-executor | awk '{print $1}') 7 | 8 | pod_logs_dir="/opt/app/logs" 9 | sync_logs_dir="/opt/backups/app_sync_logs" 10 | mkdir -p ${sync_logs_dir} 11 | 12 | ${kubectl_cmd} exec -i ${master_pod} -- mkdir -p ${pod_logs_dir} 13 | for i in $(${kubectl_cmd} exec -i ${master_pod} -- find ${pod_logs_dir} -type f -mtime -1); do 14 | echo "INFO: Copying ${i}..." 15 | i_basename=$(echo ${i} | sed "s|${pod_logs_dir}||g") 16 | ${kubectl_cmd} cp ${master_pod}:${i} ${sync_logs_dir}/${master_pod}${i_basename} 17 | done 18 | 19 | for j in ${executor_pods}; do 20 | ${kubectl_cmd} exec -i ${j} -- mkdir -p ${pod_logs_dir} 21 | for k in $(${kubectl_cmd} exec -i ${j} -- find ${pod_logs_dir} -type f -mmin -120); do 22 | echo "INFO: Copying ${k}..." 23 | k_basename=$(echo ${k} | sed "s|${pod_logs_dir}||g") 24 | ${kubectl_cmd} cp ${j}:${k} ${sync_logs_dir}/${j}${k_basename} 25 | done 26 | done 27 | 28 | /usr/local/bin/aws --profile app s3 sync --size-only ${sync_logs_dir}/ s3://heylinux-backups/app/app_sync_logs/ 29 | if [ $? -eq 0 ]; then 30 | find ${sync_logs_dir} -type f -mtime +7 -delete 31 | fi 32 | -------------------------------------------------------------------------------- /scripts/macos_user_service/com.user.foobar.service.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Label 6 | com.user.foobar.service 7 | Program 8 | /Users/username/services/bin/foobar.service.sh 9 | RunAtLoad 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /scripts/macos_user_service/foobar.service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script is used to start the FooBar service 4 | # 5 | # Location: /Users/username/services/bin/foobar.service.sh 6 | # Depends_on: launchctl load /Users/username/Library/LaunchAgents/com.user.foobar.service.plist 7 | # 8 | 9 | # Homebrew 10 | eval "$(/opt/homebrew/bin/brew shellenv)" 11 | 12 | # Service 13 | service_base=/Users/username/services 14 | service_name=foobar.service 15 | 16 | export FOOBAR_START=yes 17 | 18 | service_cmd="/opt/homebrew/bin/foobar tom \ 19 | --target=jerry \ 20 | --catch" 21 | 22 | cmd_basestr=$(echo ${service_cmd} | cut -d" " -f1-2) 23 | cmd_found=$(ps aux | grep "${cmd_basestr}" | grep -v grep) 24 | 25 | if [[ ! -z "${cmd_found}" ]]; then 26 | echo -e "ERROR: Found the Running Command:\n ${cmd_found}" 27 | exit 1 28 | fi 29 | 30 | ${service_cmd} >> ${service_base}/logs/${service_name}.log 2>&1 31 | -------------------------------------------------------------------------------- /scripts/openvpn_client/checkstatus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | 4 | import time 5 | import commands 6 | import wx 7 | 8 | class App(wx.App): 9 | def __init__(self, redirect=True, filename=None): 10 | wx.App.__init__(self, redirect, filename) 11 | 12 | def OnInit(self): 13 | dlg = wx.MessageDialog(None, 14 | '- WARNING - \nDisconnected to vpnserver', 15 | 'OpenVPN Status', 16 | wx.OK | wx.ICON_WARNING) 17 | result = dlg.ShowModal() 18 | dlg.Destroy() 19 | return True 20 | 21 | while True: 22 | time.sleep(30) 23 | (status, output) = commands.getstatusoutput('route -n | grep -q 10.20.') 24 | if status != 0: 25 | app = App(False, "Output") 26 | app.MainLoop() 27 | exit() 28 | -------------------------------------------------------------------------------- /scripts/openvpn_client/vpnclient.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | openvpn=/usr/sbin/openvpn 4 | home_dir=/etc/openvpn/vpnserver 5 | log_file=$home_dir/vpnserver.log 6 | pid_file=$home_dir/vpnserver.pid 7 | conf_file=$home_dir/vpnserver.ovpn 8 | 9 | function check_root() 10 | { 11 | if [ $EUID -ne 0 ]; then 12 | echo "This script must be run as root" 1>&2 13 | exit 1 14 | fi 15 | } 16 | 17 | function copy_vpn(){ 18 | cmp -s $home_dir/resolv.conf.vpnserver /etc/resolv.conf 19 | if [ $? -ne 0 ]; then 20 | cp $home_dir/resolv.conf.vpnserver /etc/resolv.conf 21 | echo "Replaced resolv.conf as vpnserver" 22 | fi 23 | } 24 | 25 | function copy_local(){ 26 | cmp -s $home_dir/resolv.conf.localhost /etc/resolv.conf 27 | if [ $? -ne 0 ]; then 28 | cp $home_dir/resolv.conf.localhost /etc/resolv.conf 29 | echo "Replaced resolv.conf as localhost" 30 | fi 31 | } 32 | 33 | function check_status(){ 34 | nohup $home_dir/checkstatus.py > $home_dir/nohup.out 2>&1 & 35 | } 36 | 37 | function kill_status(){ 38 | stauts_pid=$(ps aux | grep checkstatus.py | grep -v grep | awk '{print $2}') 39 | if [ -n "$stauts_pid" ]; then 40 | kill -9 $stauts_pid 41 | fi 42 | } 43 | 44 | check_root 45 | 46 | case $1 in 47 | on) 48 | route -n | grep -q 10.20. 49 | if [ $? -eq 0 ]; then 50 | echo "OK - Already connected to vpnserver" 51 | exit 0 52 | fi 53 | $openvpn --daemon --cd $home_dir --log $log_file --writepid $pid_file --config $conf_file --auth-nocache 54 | echo -n "Connecting to vpnserver" 55 | attempt=1 56 | while true 57 | do 58 | route -n | grep -q 10.20. 59 | if [ $? -ne 0 ]; then 60 | sleep 4 61 | echo -n "." 62 | if [ "$attempt" -eq 10 ]; then 63 | echo " FAILED" 64 | kill -9 `cat $pid_file` 65 | exit 1 66 | fi 67 | else 68 | echo " OK" 69 | copy_vpn 70 | check_status 71 | exit 0 72 | fi 73 | attempt=$(expr $attempt + 1) 74 | done 75 | ;; 76 | 77 | off) 78 | route -n | grep -q 10.20. 79 | if [ $? -ne 0 ]; then 80 | echo "Already disconnected to vpnserver" 81 | exit 0 82 | fi 83 | kill -9 `cat $pid_file` 84 | echo "Disconnected to vpnserver" 85 | copy_local 86 | kill_status 87 | ;; 88 | 89 | log) 90 | tailf $log_file 91 | ;; 92 | 93 | *) 94 | route -n | grep -q 10.20. 95 | if [ $? -ne 0 ]; then 96 | echo "Disconnected to vpnserver" 97 | copy_local 98 | else 99 | echo "OK - Connected to vpnserver" 100 | copy_vpn 101 | fi 102 | ;; 103 | esac 104 | -------------------------------------------------------------------------------- /scripts/processes_cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # To kill the running processes older than 3 days 4 | 5 | days_keep=3 6 | days_keep_in_seconds=$((${days_keep}*24*3600)) 7 | 8 | echo "DATE: $(date)" 9 | 10 | pid_items=$(/bin/ps -eo pid,args | grep -E '/path/to/program|ps.string.for.Program' | grep -vw grep | awk '{print $1}' | xargs) 11 | for pid in ${pid_items}; do 12 | info_started=$(/bin/ps -p ${pid} -o lstart | grep -vw STARTED) 13 | info_user=$(/bin/ps -p ${pid} -o user | grep -vw USER) 14 | info_mem_k=$(/bin/ps -p ${pid} -o rss | grep -vw RSS) 15 | info_mem_g=$(echo "scale=2;${info_mem_k}/1024/1024" | /usr/bin/bc | sed 's/^\./0./') 16 | info_elapsed_in_seconds=$(/bin/ps -p ${pid} -o etime | grep -vw ELAPSED | tr '-' ':' | awk -F: '{total=0; m=1;} {for (i=0;i=2?24:60}} {print total}') 17 | if [[ ${info_elapsed_in_seconds} -gt ${days_keep_in_seconds} ]]; then 18 | echo "KILLED: USER:'${info_user}' PID:'${pid}' MEM:'${info_mem_g}G' STARTED:'${info_started}'" 19 | /bin/kill -9 ${pid} 20 | else 21 | echo "PASSED: USER:'${info_user}' PID:'${pid}' MEM:'${info_mem_g}G' STARTED:'${info_started}'" 22 | fi 23 | done 24 | -------------------------------------------------------------------------------- /scripts/rename_mysqldb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2013 Percona LLC and/or its affiliates 3 | 4 | USER="" 5 | PASS="" 6 | 7 | set -e 8 | if [ -z "$3" ]; then 9 | echo "${0} " 10 | exit 1 11 | fi 12 | db_exists=`mysql -u$USER -p$PASS -h $1 -e "show databases like '$3'" -sss` 13 | if [ -n "$db_exists" ]; then 14 | echo "ERROR: New database already exists $3" 15 | exit 1 16 | fi 17 | TIMESTAMP=`date +%s` 18 | character_set=`mysql -u$USER -p$PASS -h $1 -e "show create database $2\G" -sss | grep ^Create | awk -F'CHARACTER SET ' '{print $2}' | awk '{print $1}'` 19 | TABLES=`mysql -u$USER -p$PASS -h $1 -e "select TABLE_NAME from information_schema.tables where table_schema='$2' and TABLE_TYPE='BASE TABLE'" -sss` 20 | STATUS=$? 21 | if [ "$STATUS" != 0 ] || [ -z "$TABLES" ]; then 22 | echo "Error retrieving tables from $2" 23 | exit 1 24 | fi 25 | echo "create database $3 DEFAULT CHARACTER SET $character_set" 26 | mysql -u$USER -p$PASS -h $1 -e "create database $3 DEFAULT CHARACTER SET $character_set" 27 | TRIGGERS=`mysql -u$USER -p$PASS -h $1 $2 -e "show triggers\G" | grep Trigger: | awk '{print $2}'` 28 | VIEWS=`mysql -u$USER -p$PASS -h $1 -e "select TABLE_NAME from information_schema.tables where table_schema='$2' and TABLE_TYPE='VIEW'" -sss` 29 | if [ -n "$VIEWS" ]; then 30 | mysqldump -u$USER -p$PASS -h $1 $2 $VIEWS > /tmp/${2}_views${TIMESTAMP}.dump 31 | fi 32 | mysqldump -u$USER -p$PASS -h $1 $2 -d -t -R -E > /tmp/${2}_triggers${TIMESTAMP}.dump 33 | for TRIGGER in $TRIGGERS; do 34 | echo "drop trigger $TRIGGER" 35 | mysql -u$USER -p$PASS -h $1 $2 -e "drop trigger $TRIGGER" 36 | done 37 | for TABLE in $TABLES; do 38 | echo "rename table $2.$TABLE to $3.$TABLE" 39 | mysql -u$USER -p$PASS -h $1 $2 -e "SET FOREIGN_KEY_CHECKS=0; rename table $2.$TABLE to $3.$TABLE" 40 | done 41 | if [ -n "$VIEWS" ]; then 42 | echo "loading views" 43 | mysql -u$USER -p$PASS -h $1 $3 < /tmp/${2}_views${TIMESTAMP}.dump 44 | fi 45 | echo "loading triggers, routines and events" 46 | mysql -u$USER -p$PASS -h $1 $3 < /tmp/${2}_triggers${TIMESTAMP}.dump 47 | TABLES=`mysql -u$USER -p$PASS -h $1 -e "select TABLE_NAME from information_schema.tables where table_schema='$2' and TABLE_TYPE='BASE TABLE'" -sss` 48 | if [ -z "$TABLES" ]; then 49 | echo "Dropping database $2" 50 | mysql -u$USER -p$PASS -h $1 $2 -e "drop database $2" 51 | fi 52 | if [ `mysql -u$USER -p$PASS -h $1 -e "select count(*) from mysql.columns_priv where db='$2'" -sss` -gt 0 ]; then 53 | COLUMNS_PRIV=" UPDATE mysql.columns_priv set db='$3' WHERE db='$2';" 54 | fi 55 | if [ `mysql -u$USER -p$PASS -h $1 -e "select count(*) from mysql.procs_priv where db='$2'" -sss` -gt 0 ]; then 56 | PROCS_PRIV=" UPDATE mysql.procs_priv set db='$3' WHERE db='$2';" 57 | fi 58 | if [ `mysql -u$USER -p$PASS -h $1 -e "select count(*) from mysql.tables_priv where db='$2'" -sss` -gt 0 ]; then 59 | TABLES_PRIV=" UPDATE mysql.tables_priv set db='$3' WHERE db='$2';" 60 | fi 61 | if [ `mysql -u$USER -p$PASS -h $1 -e "select count(*) from mysql.db where db='$2'" -sss` -gt 0 ]; then 62 | DB_PRIV=" UPDATE mysql.db set db='$3' WHERE db='$2';" 63 | fi 64 | if [ -n "$COLUMNS_PRIV" ] || [ -n "$PROCS_PRIV" ] || [ -n "$TABLES_PRIV" ] || [ -n "$DB_PRIV" ]; then 65 | echo "IF YOU WANT TO RENAME the GRANTS YOU NEED TO RUN ALL OUTPUT BELOW:" 66 | if [ -n "$COLUMNS_PRIV" ]; then echo "$COLUMNS_PRIV"; fi 67 | if [ -n "$PROCS_PRIV" ]; then echo "$PROCS_PRIV"; fi 68 | if [ -n "$TABLES_PRIV" ]; then echo "$TABLES_PRIV"; fi 69 | if [ -n "$DB_PRIV" ]; then echo "$DB_PRIV"; fi 70 | echo " flush privileges;" 71 | fi 72 | -------------------------------------------------------------------------------- /scripts/reposync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | repo_items=( 4 | base,centos/7/os/x86_64 5 | updates,centos/7/updates/x86_64 6 | extras,centos/7/extras/x86_64 7 | centosplus,centos/7/centosplus/x86_64 8 | lux,lux/centos/7 9 | epel,epel/7/x86_64/ 10 | docker-ce-stable,docker-ce/centos/7/x86_64/stable 11 | ) 12 | 13 | repo_path=/var/www/html/repos 14 | 15 | for item in ${repo_items[@]};do 16 | repo_id=$(echo $item|cut -d, -f1) 17 | sub_dir=$(echo $item|cut -d, -f2) 18 | 19 | reposync -l -d -m --repoid=$repo_id --download-metadata --norepopath --download_path=$repo_path/$sub_dir 20 | 21 | cd $repo_path/$sub_dir 22 | if [ -f $repo_path/$sub_dir/comps.xml ];then 23 | createrepo $repo_path/$sub_dir/ -g comps.xml 24 | else 25 | createrepo $repo_path/$sub_dir/ 26 | fi 27 | done 28 | -------------------------------------------------------------------------------- /scripts/restrict_s3_bucket_policy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Apply a S3 bucket policy to each S3 bucket 4 | # To limit the access only from specific VPCs and ARNs and SourceIPs 5 | # By Damon Guo at 20220706 6 | 7 | aws_cli="/usr/local/bin/aws" 8 | policy_json_data=' 9 | { 10 | "Version": "2012-10-17", 11 | "Id": "RestrictVPCsARNsSourceIPs", 12 | "Statement": [ 13 | { 14 | "Sid": "VPCsARNsSourceIPs", 15 | "Effect": "Deny", 16 | "Principal": "*", 17 | "Action": "s3:*", 18 | "Resource": [ 19 | "arn:aws:s3:::BUCKET_NAME", 20 | "arn:aws:s3:::BUCKET_NAME/*" 21 | ], 22 | "Condition": { 23 | "Bool": { 24 | "aws:ViaAWSService": "false" 25 | }, 26 | "StringNotEqualsIfExists": { 27 | "aws:SourceVpc": [ 28 | "vpc-857abc857abc875aa", 29 | "vpc-857cba857cba875bb" 30 | ] 31 | }, 32 | "ArnNotLikeIfExists": { 33 | "aws:PrincipalArn": [ 34 | "arn:aws:iam::857857857857:role/*", 35 | "arn:aws:iam::361361361361:role/*" 36 | ] 37 | }, 38 | "NotIpAddressIfExists": { 39 | "aws:SourceIp": [ 40 | "8.5.7.11/32", 41 | "8.5.7.22/32" 42 | ] 43 | } 44 | } 45 | } 46 | ] 47 | } 48 | ' 49 | 50 | bucket_list=$(${aws_cli} s3 ls | awk '{print $NF}' | grep -E '^heylinux|damonguo') 51 | for bucket in ${bucket_list}; do 52 | echo "Updating the bucket policy of s3://${bucket} ..." 53 | echo ${policy_json_data} | sed s/BUCKET_NAME/${bucket}/g > /tmp/bucket_policy.json 54 | ${aws_cli} s3api put-bucket-policy --bucket ${bucket} --policy file:///tmp/bucket_policy.json 55 | done 56 | -------------------------------------------------------------------------------- /scripts/restrict_s3_bucket_policy_tags.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Apply S3 bucket policy to the S3 buckets which have specific tags 4 | # To limit the access only from specific VPCs and ARNs and SourceIPs 5 | 6 | aws_cli="/usr/bin/aws" 7 | policy_json_data=' 8 | { 9 | "Version": "2012-10-17", 10 | "Id": "RestrictVPCsARNsSourceIPs", 11 | "Statement": [ 12 | { 13 | "Sid": "VPCsARNsSourceIPs", 14 | "Effect": "Deny", 15 | "Principal": "*", 16 | "Action": "s3:*", 17 | "Resource": [ 18 | "arn:aws:s3:::BUCKET_NAME", 19 | "arn:aws:s3:::BUCKET_NAME/*" 20 | ], 21 | "Condition": { 22 | "Bool": { 23 | "aws:ViaAWSService": "false" 24 | }, 25 | "StringNotEqualsIfExists": { 26 | "aws:SourceVpc": [ 27 | "vpc-xxxxxxxxxxxxxxxxa", 28 | "vpc-xxxxxxxxxxxxxxxxb" 29 | ] 30 | }, 31 | "ForAllValues:ArnNotLike": { 32 | "aws:PrincipalArn": [ 33 | "arn:aws:iam::xxxxxxxxxxxa:role/XxxxXxxc", 34 | "arn:aws:iam::xxxxxxxxxxxb:role/XxxxXxxc", 35 | "arn:aws:iam::xxxxxxxxxxxa:role/Xxxd*", 36 | "arn:aws:iam::xxxxxxxxxxxb:role/Xxxd* 37 | ] 38 | }, 39 | "NotIpAddressIfExists": { 40 | "aws:SourceIp": [ 41 | "1.1.1.1/32", 42 | "1.1.1.2/32" 43 | ] 44 | } 45 | } 46 | } 47 | ] 48 | } 49 | ' 50 | 51 | bucket_list=$(${aws_cli} s3 ls | awk '{print $NF}') 52 | for bucket in ${bucket_list}; do 53 | if $(${aws_cli} s3api get-bucket-tagging --bucket ${bucket} 2>/dev/null | grep -w "Xxxx" -A1 | grep -Ewq "Xxxa|Xxxb"); then 54 | echo "Updating the bucket policy of s3://${bucket} ..." 55 | echo ${policy_json_data} | sed s/BUCKET_NAME/${bucket}/g > /tmp/bucket_policy.json 56 | ${aws_cli} s3api put-bucket-policy --bucket ${bucket} --policy file:///tmp/bucket_policy.json 57 | fi 58 | done 59 | -------------------------------------------------------------------------------- /scripts/s3fs_mountpoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | log_file=/opt/data/logs/mountpoint.txt 4 | 5 | touch $log_file 6 | chmod 644 $log_file 7 | 8 | bucket_list=$(df -hP 2>&1 | grep -i "transport endpoint is not connected" | awk -F : '{print $2}' | sort | uniq | sed -n "s/\ \‘\//\//p" | sed -n "s/\’//p" | xargs) 9 | for bucket in ${bucket_list};do 10 | num=$(ps aux | grep -c ${bucket}) 11 | num_retry=$(($num+6)) 12 | for n in $(seq $num_retry); do 13 | /bin/umount -f $bucket 2>&1 | tee -a $log_file 14 | done 15 | echo "Mountpoint: $bucket was not running well and remounted at $(date)" | tee -a $log_file 16 | done 17 | 18 | for i in $(cat /etc/fstab | grep s3fs | awk '{print $2}' | awk -F / '{print $NF}'); do 19 | if ! $(df -hP | grep -q $i); then 20 | echo "Mountpoint: $i was lost and remounted at $(date)" | tee -a $log_file 21 | s3fs $i /opt/data/sync/$i -o rw,allow_other,use_path_request_style,nonempty,url=https://s3-ap-east-1.amazonaws.com,dev,suid 22 | fi 23 | done 24 | 25 | if [ ! -z "${bucket_list}" ]; then 26 | sleep 30 27 | systemctl restart lsyncd 28 | echo "Restarted lsyncd at $(date)" | tee -a $log_file 29 | fi 30 | -------------------------------------------------------------------------------- /scripts/s3usage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | bucket_list=$(s3cmd ls|awk '{print $NF}' |xargs) 4 | 5 | for bucket in $bucket_list; do 6 | bucket_name=$(echo $bucket |cut -d/ -f3) 7 | command="aws s3api list-objects --bucket $bucket_name --output json --query \"[sum(Contents[].Size)]\" |grep [0-9] |awk '{print \$1}'" 8 | bucket_size=$(eval $command) 9 | bucket_size_mb=$(($bucket_size/1024/1024)) 10 | if [[ ${bucket_size_mb} -gt 1024 ]]; then 11 | bucket_size_gb=$(($bucket_size_mb/1024)) 12 | echo "${bucket_name} ${bucket_size_gb}GB" 13 | else 14 | echo "${bucket_name} ${bucket_size_mb}MB" 15 | fi 16 | done 17 | -------------------------------------------------------------------------------- /scripts/sftp_hdfs/data.txt: -------------------------------------------------------------------------------- 1 | LiveShow_AAID_Cross_Device_HeyLinux_20150105_130229.txt.gz 2 | LiveShow_UDID_S_Cross_Device_HeyLinux_20150105_130158.txt.gz 3 | LiveShow_Cookie_Cross_Device_HeyLinux_20150105_130753.txt.gz 4 | LiveShow_IDFA_Cross_Device_HeyLinux_20150105_130118.txt.gz 5 | -------------------------------------------------------------------------------- /scripts/sftp_hdfs/sftp_hdfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATE=$(date +%Y-%m-%d) 4 | HOST=sftp.heylinux.com 5 | USER=username 6 | PASS=password 7 | PORT=22 8 | LOCAL_PATH=/root/sftp_hdfs/data 9 | REMOTE_PATH=/downloads 10 | HDFS_PATH=/data/downloads 11 | FILTER_STRING=HeyLinux 12 | NUM_FILES_PER_DAY=4 13 | 14 | echo "###########${DATE}###########" 15 | 16 | list=$(lftp -u ${USER},${PASS} -p ${PORT} sftp://${HOST} <&2 19 | exit 1 20 | fi 21 | } 22 | 23 | function print_help(){ 24 | #Print help messages then exit 25 | echo "Usage: $0 {create|disable|enable|passwd|sshkey|delete} {username}" >&2 26 | exit 1 27 | } 28 | 29 | function check_usergroup(){ 30 | #Create usergroup if NOT exist 31 | cut -d : -f 1 $groupfile | grep -wq $groupname 32 | if [ $? -ne 0 ];then 33 | groupadd $groupname 34 | fi 35 | } 36 | 37 | function check_homedir(){ 38 | #Create homedir if NOT exist 39 | if [ ! -d "$homedir" ];then 40 | mkdir $homedir 41 | fi 42 | } 43 | 44 | function check_username_exist(){ 45 | #Check if user already exist 46 | cut -d : -f 1 $userfile | grep -wq $username 47 | if [ $? -eq 0 ];then 48 | echo "User $username ALREADY exist." && exit 49 | fi 50 | } 51 | 52 | function check_username_notexist() { 53 | #Check if user not exist 54 | cut -d : -f 1 $userfile | grep -wq $username 55 | if [ $? -ne 0 ];then 56 | echo "User $username NOT exist." && exit 57 | fi 58 | } 59 | 60 | function check_user_disabled(){ 61 | #Check if user ALREADY disabled 62 | lockfile=$homedir/$username/sftpuser.locked 63 | if [ -a "$lockfile" ]; then 64 | echo "User $username ALREADY disabled." && exit 65 | fi 66 | } 67 | 68 | function update_sshkey(){ 69 | #Get the sshkey 70 | echo -n "Input ssh public key: " 71 | read sshkey 72 | #Check if sshkey is empty 73 | if [ -z "$sshkey" ];then 74 | echo "Empty ssh public key." && exit 75 | fi 76 | #Check if sshkey not correct 77 | echo $sshkey | grep -Ewq '^ssh-rsa|^ssh-dss' 78 | if [ $? -ne 0 ];then 79 | echo "String \"ssh-rsa\" or \"ssh-dss\" NOT found." && exit 80 | fi 81 | mkdir $homedir/$username/.ssh 82 | chmod 700 $homedir/$username/.ssh 83 | echo "$sshkey" > $homedir/$username/.ssh/authorized_keys 84 | chmod 600 $homedir/$username/.ssh/authorized_keys 85 | chown -R $username:$groupname $homedir/$username/.ssh 86 | } 87 | 88 | 89 | if [ $# != 2 ];then 90 | print_help 91 | fi 92 | 93 | check_root 94 | check_usergroup 95 | check_homedir 96 | 97 | case "$1" in 98 | 'create') 99 | check_username_exist 100 | useradd -m -d "$homedir/$username" -g $groupname -s $loginshell -c "$username sftp" $username 101 | chmod 755 $homedir/$username 102 | chown $username:$groupname $homedir/$username 103 | if [ $? -eq 0 ]; then 104 | echo "User $username was created." 105 | fi 106 | ;; 107 | 108 | 'disable') 109 | check_username_notexist 110 | passwd -l $username 111 | touch $homedir/$username/sftpuser.locked 112 | authfile=$homedir/$username/.ssh/authorized_keys 113 | if [ -a "$authfile" ]; then 114 | mv $authfile $authfile.disabled 115 | fi 116 | if [ $? -eq 0 ]; then 117 | echo "User $username was disabled." 118 | fi 119 | ;; 120 | 121 | 'enable') 122 | check_username_notexist 123 | passwd -u $username 124 | rm -f $homedir/$username/sftpuser.locked 125 | authfile=$homedir/$username/.ssh/authorized_keys 126 | if [ -a "$authfile.disabled" ]; then 127 | mv $authfile.disabled $authfile 128 | fi 129 | if [ $? -eq 0 ]; then 130 | echo "User $username was enabled." 131 | fi 132 | ;; 133 | 134 | 'delete') 135 | check_username_notexist 136 | echo -n "Delete all the data and account of user $username? [yes|no] " 137 | read yesorno 138 | if [ "$yesorno" == "yes" ];then 139 | userdel -rf $username 140 | if [ $? -eq 0 ]; then 141 | echo "User $username was deleted." 142 | fi 143 | fi 144 | ;; 145 | 146 | 'passwd') 147 | check_username_notexist 148 | check_user_disabled 149 | passwd $username 150 | ;; 151 | 152 | 'sshkey') 153 | check_username_notexist 154 | check_user_disabled 155 | update_sshkey 156 | if [ $? -eq 0 ]; then 157 | echo "The sshkey of user $username was updated." 158 | fi 159 | ;; 160 | 161 | *) 162 | print_help 163 | ;; 164 | esac 165 | -------------------------------------------------------------------------------- /scripts/sftpuser/sshd_config_update: -------------------------------------------------------------------------------- 1 | # override default of no subsystems 2 | #Subsystem sftp /usr/libexec/openssh/sftp-server 3 | Subsystem sftp internal-sftp 4 | 5 | Match Group sftpusers 6 | ChrootDirectory /home/sftp/%u 7 | ForceCommand internal-sftp 8 | -------------------------------------------------------------------------------- /scripts/shell_expect_remote/config/commands.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mcsrainbow/shell-scripts/e8a9e87bc661a2dbb07fce41e719597acf07c8dd/scripts/shell_expect_remote/config/commands.txt -------------------------------------------------------------------------------- /scripts/shell_expect_remote/config/commands.txt.example: -------------------------------------------------------------------------------- 1 | cd /var 2 | ls 3 | sleep 5 4 | tail -n 5 log/messages 5 | -------------------------------------------------------------------------------- /scripts/shell_expect_remote/config/hosts.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mcsrainbow/shell-scripts/e8a9e87bc661a2dbb07fce41e719597acf07c8dd/scripts/shell_expect_remote/config/hosts.txt -------------------------------------------------------------------------------- /scripts/shell_expect_remote/config/hosts.txt.example: -------------------------------------------------------------------------------- 1 | 192.168.1.100|22|username|userpasswd|rootpasswd 2 | example.com|6022|username|userpasswd|rootpasswd 3 | -------------------------------------------------------------------------------- /scripts/shell_expect_remote/expect_run.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -f 2 | # heylinux.com 3 | 4 | # Check 5 | if { $argc<6 } { 6 | send_user "usage: $argv0 \n" 7 | exit 8 | } 9 | 10 | # TIMEOUT 11 | set timeout 20 12 | 13 | # Login parameters 14 | set server [lindex $argv 0] 15 | set port [lindex $argv 1] 16 | set user [lindex $argv 2] 17 | set passwd [lindex $argv 3] 18 | set rootpasswd [lindex $argv 4] 19 | set cmdfile [ open [lindex $argv 5] ] 20 | 21 | # Logfile 22 | log_file log/run.log 23 | 24 | # Login Server 25 | spawn ssh -p $port $user@$server 26 | 27 | ## Enable this and Disable the "spawn ssh ..." above if you are using ssh_key. 28 | #spawn ssh -i ssh_key/id_rsa -p $port $user@$server 29 | 30 | expect { 31 | "yes/no)?\ " {send "yes\r";exp_continue} 32 | 33 | "*assword:\ " {send "$passwd\r"} 34 | 35 | ## Disable the "*assword:\ ..." above if you are using ssh_key, and Enable this if your ssh_key has passphrase. 36 | # "id_rsa':\ " {send "$passwd\r"} 37 | } 38 | 39 | # Login as Root 40 | expect "*]$\ " {send "su - root\r"} 41 | expect "*assword:\ " {send "$rootpasswd\r"} 42 | 43 | # Run Commands 44 | expect "*]#\ " { 45 | while {[gets $cmdfile cmd] >= 0} { 46 | send "$cmd\r" 47 | } 48 | } 49 | 50 | # Exit Root 51 | expect "*]#\ " {send "exit\r"} 52 | 53 | # Exit User 54 | expect "*]$\ " {send "exit\r"} 55 | 56 | # Close File 57 | close $cmdfile 58 | 59 | # Exit Expect 60 | expect eof 61 | -------------------------------------------------------------------------------- /scripts/shell_expect_remote/log/run.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mcsrainbow/shell-scripts/e8a9e87bc661a2dbb07fce41e719597acf07c8dd/scripts/shell_expect_remote/log/run.log -------------------------------------------------------------------------------- /scripts/shell_expect_remote/main_shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for i in `cat config/hosts.txt` 4 | do 5 | export server=`echo $i | awk -F "|" '{print $1}'` 6 | export port=`echo $i | awk -F "|" '{print $2}'` 7 | export user=`echo $i | awk -F "|" '{print $3}'` 8 | export passwd=`echo $i | awk -F "|" '{print $4}'` 9 | export rootpasswd=`echo $i | awk -F "|" '{print $5}'` 10 | 11 | export cmdfile="config/commands.txt" 12 | 13 | ./expect_run.exp $server $port $user $passwd $rootpasswd $cmdfile 14 | done 15 | -------------------------------------------------------------------------------- /scripts/shell_expect_remote/ssh_key/id_rsa: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mcsrainbow/shell-scripts/e8a9e87bc661a2dbb07fce41e719597acf07c8dd/scripts/shell_expect_remote/ssh_key/id_rsa -------------------------------------------------------------------------------- /scripts/smartroutes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Add static routes for all the public IP ranges of the current country to local default gateway, 4 | # to make data only goes through VPN when accessing other countries. 5 | 6 | basedir=$(dirname $0) 7 | country_abbr="CN" 8 | apnic_data_url="http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest" 9 | apnic_data="${basedir}/apnic.data" 10 | subnet_exceptions=( 11 | 8.5.7.0/24 12 | ) # subnets not in apnic_data $country_abbr section, just some examples, change/remove them 13 | 14 | function check_root(){ 15 | if [[ $EUID -ne 0 ]]; then 16 | echo "This script must be run as root" 1>&2 17 | exit 1 18 | fi 19 | } 20 | 21 | function print_help(){ 22 | echo "Usage:" 23 | echo " ${0} {on|off|update|status}" 24 | echo " ${0} [force|exception] {on|off|status}" 25 | } 26 | 27 | function check_data(){ 28 | if [[ ! -f ${apnic_data} ]]; then 29 | update_data 30 | fi 31 | 32 | rawnet_sign=$(grep ${country_abbr} ${apnic_data} |grep ipv4 |head -n 1 |awk -F '|' '{print $4"/"$5}') 33 | rawnet_sign_formatted=$(format_subnet ${rawnet_sign}) 34 | subnet_sign=$(format_subnet_netstat ${rawnet_sign_formatted}) 35 | } 36 | 37 | function check_size(){ 38 | apnic_data_size=$(curl --head -s ${apnic_data_url} |grep Content-Length |awk '{print $2}' |col -b) 39 | apnic_data_size_local=$(ls -l ${apnic_data} |awk '{print $5}') 40 | 41 | if [[ "${apnic_data_size}" != "${apnic_data_size_local}" ]]; then 42 | update_data 43 | else 44 | echo "The APNIC data file is up-to-date" 45 | fi 46 | } 47 | 48 | function update_data(){ 49 | echo "Downloading the latest APNIC data as ${apnic_data}..." 50 | 51 | curl --progress-bar -o ${apnic_data} ${apnic_data_url} 52 | if [[ $? -ne 0 ]]; then 53 | exit 1 54 | fi 55 | } 56 | 57 | function format_subnet(){ 58 | local subnet=${1} 59 | subneti=$(echo ${subnet} |cut -d/ -f1) 60 | rawnetm=$(echo ${subnet} |cut -d/ -f2) 61 | subnetm=$(awk -v c=${rawnetm} 'function log2(x){if(x<2)return(pow);pow--;return(log2(x/2))}BEGIN{pow=32;print log2(c)}') 62 | 63 | echo ${subneti}/${subnetm} 64 | } 65 | 66 | function check_status(){ 67 | netstat -rn |grep -Eq "^${subnet_sign}" 68 | if [[ $? -ne 0 ]]; then 69 | echo "SmartRoutes is OFF" 70 | else 71 | echo "SmartRoutes is ON" 72 | fi 73 | 74 | if [[ ! -z "${subnet_exceptions[0]}" ]]; then 75 | subnet_exception_sign=$(format_subnet_netstat ${subnet_exceptions[0]}) 76 | 77 | netstat -rn |grep -Eq "^${subnet_exception_sign}" 78 | if [[ $? -ne 0 ]]; then 79 | echo "SmartRoutes Exception is OFF" 80 | else 81 | echo "SmartRoutes Exception is ON" 82 | fi 83 | fi 84 | } 85 | 86 | function add_routes(){ 87 | oldgw=$(netstat -nr |grep '^default' |grep -v 'ppp' |sed 's/default *\([0-9\.]*\) .*/\1/' |grep -Ev '^$') 88 | dscacheutil -flushcache 89 | 90 | all_subs=$(grep ${country_abbr} ${apnic_data} |grep ipv4 |awk -F '|' '{print $4"/"$5}') 91 | sum_subs=$(grep ${country_abbr} ${apnic_data} |grep ipv4 |wc -l |awk '{print $NF}') 92 | local pos_subs=0 93 | for subnet in ${all_subs}; do 94 | subnet_formatted=$(format_subnet ${subnet}) 95 | route add ${subnet_formatted} "${oldgw}" > /dev/null 96 | let pos_subs+=1 97 | if [[ ${pos_subs} -eq ${sum_subs} ]]; then 98 | echo -ne "Adding the routes..." 99 | else 100 | echo -ne "Adding the routes... ${pos_subs}/${sum_subs}\033[0K\r" 101 | fi 102 | done 103 | echo " Done " # more blank spaces added to cover all previous output 104 | } 105 | 106 | function del_routes(){ 107 | all_subs=$(grep ${country_abbr} ${apnic_data} |grep ipv4 |awk -F '|' '{print $4"/"$5}') 108 | sum_subs=$(grep ${country_abbr} ${apnic_data} |grep ipv4 |wc -l |awk '{print $NF}') 109 | local pos_subs=0 110 | for subnet in ${all_subs}; do 111 | subnet_formatted=$(format_subnet ${subnet}) 112 | route delete ${subnet_formatted} > /dev/null 113 | let pos_subs+=1 114 | if [[ ${pos_subs} -eq ${sum_subs} ]]; then 115 | echo -ne "Deleting the routes..." 116 | else 117 | echo -ne "Deleting the routes... ${pos_subs}/${sum_subs}\033[0K\r" 118 | fi 119 | done 120 | echo " Done " # more blank spaces added to cover all previous output 121 | } 122 | 123 | function run_smartroutes(){ 124 | netstat -rn |grep -Eq "^${subnet_sign}" 125 | if [[ $? -ne 0 ]]; then 126 | add_routes 127 | else 128 | echo "SmartRoutes is already ON" 129 | fi 130 | } 131 | 132 | function del_smartroutes(){ 133 | netstat -rn |grep -Eq "^${subnet_sign}" 134 | if [[ $? -eq 0 ]]; then 135 | del_routes 136 | else 137 | echo "SmartRoutes is already OFF" 138 | fi 139 | } 140 | 141 | function format_subnet_netstat(){ 142 | local subnet=${1} 143 | a=$(echo ${subnet} |cut -d/ -f1 |cut -d. -f1) 144 | b=$(echo ${subnet} |cut -d/ -f1 |cut -d. -f2) 145 | c=$(echo ${subnet} |cut -d/ -f1 |cut -d. -f3) 146 | d=$(echo ${subnet} |cut -d/ -f1 |cut -d. -f4) 147 | m=$(echo ${subnet} |cut -d/ -f2) 148 | 149 | if [[ $m -gt 24 ]]; then 150 | echo "$a.$b.$c.$d/$m" 151 | elif [[ $m -le 24 ]] && [[ $m -gt 16 ]]; then 152 | echo "$a.$b.$c/$m" 153 | elif [[ $m -le 16 ]] && [[ $m -gt 8 ]]; then 154 | echo "$a.$b/$m" 155 | elif [[ $m -le 8 ]]; then 156 | echo "$a/$m" 157 | fi 158 | } 159 | 160 | function add_exception(){ 161 | if [[ ! -z "${subnet_exceptions[0]}" ]]; then 162 | subnet_exception_sign=$(format_subnet_netstat ${subnet_exceptions[0]}) 163 | 164 | netstat -rn |grep -Eq "^${subnet_exception_sign}" 165 | if [[ $? -ne 0 ]]; then 166 | oldgw=$(netstat -nr |grep '^default' |grep -v 'ppp' |sed 's/default *\([0-9\.]*\) .*/\1/' |grep -Ev '^$') 167 | dscacheutil -flushcache 168 | 169 | echo -n "Adding the routes..." 170 | for subnet_exception in ${subnet_exceptions[@]}; do 171 | route add ${subnet_exception} "${oldgw}" > /dev/null 172 | done 173 | echo " Done" 174 | else 175 | echo "SmartRoutes Exception is already ON" 176 | fi 177 | fi 178 | } 179 | 180 | function del_exception(){ 181 | if [[ ! -z "${subnet_exceptions[0]}" ]]; then 182 | subnet_exception_sign=$(format_subnet_netstat ${subnet_exceptions[0]}) 183 | 184 | netstat -rn |grep -Eq "^${subnet_exception_sign}" 185 | if [[ $? -ne 0 ]]; then 186 | echo "SmartRoutes Exception is already OFF" 187 | else 188 | echo -n "Deleting the routes..." 189 | for subnet_exception in ${subnet_exceptions[@]}; do 190 | route delete ${subnet_exception} > /dev/null 191 | done 192 | echo " Done" 193 | fi 194 | fi 195 | } 196 | 197 | check_root 198 | check_data 199 | case $1 in 200 | on) 201 | run_smartroutes 202 | ;; 203 | off) 204 | del_smartroutes 205 | ;; 206 | update) 207 | check_size 208 | ;; 209 | status) 210 | check_status 211 | ;; 212 | force) 213 | case $2 in 214 | on) 215 | add_routes 216 | ;; 217 | off) 218 | del_routes 219 | ;; 220 | *) 221 | print_help 222 | ;; 223 | esac 224 | ;; 225 | exception) 226 | case $2 in 227 | on) 228 | add_exception 229 | ;; 230 | off) 231 | del_exception 232 | ;; 233 | *) 234 | print_help 235 | ;; 236 | esac 237 | ;; 238 | *) 239 | print_help 240 | ;; 241 | esac 242 | -------------------------------------------------------------------------------- /scripts/sync_ecr_repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | aws_region="us-west-1" 4 | aws_cli="/usr/bin/aws --region ${aws_region}" 5 | image_prefix="dkr.ecr.${aws_region}.amazonaws.com" 6 | 7 | image_list=( 8 | redis:latest 9 | openjdk:8-jdk 10 | ) 11 | 12 | policy_json='{"Version":"2008-10-17", 13 | "Statement": 14 | [{"Sid":"AllowCrossAccountPull", 15 | "Effect":"Allow", 16 | "Principal": 17 | {"AWS":"arn:aws:iam::857857857857:root"}, 18 | "Action": 19 | ["ecr:BatchCheckLayerAvailability", 20 | "ecr:BatchGetImage", 21 | "ecr:GetDownloadUrlForLayer"]}]}' 22 | 23 | function check_params(){ 24 | if [ $# -ne 2 ]; then 25 | echo "Usage: ${0} src_aws_id dst_aws_id" 26 | echo " ${0} 233233233233 857857857857" 27 | exit 1 28 | else 29 | src_aws_id=${1} 30 | dst_aws_id=${2} 31 | docker_password=$(${aws_cli} ecr get-login-password --region ${aws_region}) 32 | fi 33 | } 34 | 35 | function pull_ecr_images(){ 36 | docker login --username AWS --password $docker_password ${src_aws_id}.${image_prefix} 37 | for image in ${image_list[@]}; do 38 | docker pull ${src_aws_id}.${image_prefix}/${image} 39 | done 40 | } 41 | 42 | function push_ecr_images(){ 43 | docker login --username AWS --password $docker_password ${dst_aws_id}.${image_prefix} 44 | repo_list=$(${aws_cli} ecr describe-repositories --output text | awk '($1=="REPOSITORIES"){print $6}') 45 | for image in ${image_list[@]}; do 46 | repo_name=$(echo $image | cut -d: -f1) 47 | if ! $(echo ${repo_list} | grep -wq "${repo_name}"); then 48 | ${aws_cli} ecr create-repository --repository-name ${repo_name} 49 | ${aws_cli} ecr set-repository-policy --repository-name ${repo_name} --policy-text "${policy_json}" 50 | fi 51 | docker tag ${src_aws_id}.${image_prefix}/${image} ${dst_aws_id}.${image_prefix}/${image} 52 | docker push ${dst_aws_id}.${image_prefix}/${image} 53 | done 54 | } 55 | 56 | check_params $@ 57 | pull_ecr_images 58 | push_ecr_images 59 | -------------------------------------------------------------------------------- /scripts/try_lottery/my_balls.txt: -------------------------------------------------------------------------------- 1 | 1 2 | 5 3 | 10 4 | 15 5 | 16 6 | 26 7 | 9 8 | -------------------------------------------------------------------------------- /scripts/try_lottery/try_lottery.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Let's see how lucky you are 4 | # 5 | # The rule is same as a lottery in China: 6 | # The red balls are between 1 to 33 7 | # The blue ball is between 1 to 16 8 | # A ticket has 6 red balls and 1 blue ball 9 | # If you predict them all you can get 500W RMB 10 | # 11 | # Just write down your balls in file "my_balls.txt": 12 | # each ball in a single line 13 | # red balls first, from small to large 14 | # then put the blue ball at the bottom line 15 | # 16 | # Run this script, it will stop when you get the money 17 | 18 | # Check the balls in file my_balls.txt 19 | function check_my_balls(){ 20 | # Check the file 21 | if [ ! -f "my_balls.txt" ]; then 22 | echo "The file my_balls.txt does not exist." 23 | exit 1 24 | fi 25 | # Check how many balls in the file 26 | MY_BALLS_NUMBER=`wc -l my_balls.txt | awk '{print $1}'` 27 | if [ $MY_BALLS_NUMBER -ne 7 ]; then 28 | echo "Just put 7 balls in my_balls.txt." 29 | exit 1 30 | fi 31 | # Check if balls are not integers or begin with 0 32 | for BALL in `cat my_balls.txt` 33 | do 34 | echo $BALL | grep "[^0-9]" 35 | if [ $? -eq 0 ]; then 36 | echo "BALL: $BALL is not an integer." 37 | exit 1 38 | fi 39 | echo $BALL | grep "[0][0-9]" 40 | if [ $? -eq 0 ]; then 41 | echo "Don't put 0 before numbers 1-9." 42 | exit 1 43 | fi 44 | done 45 | # Check if red balls are greater than 33 46 | for BALL in `head -n 6 my_balls.txt` 47 | do 48 | if [ $BALL -gt 33 ]; then 49 | echo "RED BALL: $BALL is greater than 33." 50 | exit 1 51 | fi 52 | done 53 | # Check if blue ball is greater than 16 54 | LAST_BALL=`tail -n 1 my_balls.txt` 55 | if [ $LAST_BALL -gt 16 ]; then 56 | echo "BLUE BALL: $LAST_BALL is greater than 16." 57 | exit 1 58 | fi 59 | } 60 | 61 | # Generate a red ball 62 | function get_red_ball(){ 63 | RED_BALL=`expr $RANDOM % 33 + 1` 64 | for EXISTING_BALL in `cat red_balls.txt` ; do 65 | if [ $EXISTING_BALL == $RED_BALL ]; then 66 | return 67 | fi 68 | done 69 | echo "RED: $RED_BALL" 70 | echo $RED_BALL >> red_balls.txt 71 | } 72 | 73 | # Generate all red balls 74 | function get_all_red_balls(){ 75 | RED_BALL_NUMBER=`wc -l red_balls.txt | awk '{print $1}'` 76 | MAX_NUMBER=6 77 | while [[ ${RED_BALL_NUMBER} -lt ${MAX_NUMBER} ]] 78 | do 79 | get_red_ball 80 | RED_BALL_NUMBER=`wc -l red_balls.txt | awk '{print $1}'` 81 | done 82 | } 83 | 84 | # Generate blue ball 85 | function get_blue_ball(){ 86 | BLUE_BALL=`expr $RANDOM % 16 + 1` 87 | echo "BLUE: $BLUE_BALL" 88 | echo $BLUE_BALL > blue_ball.txt 89 | } 90 | 91 | # Sort all balls and put them together 92 | function sort_balls(){ 93 | cat red_balls.txt | sort -n >> sorted_balls.txt 94 | cat blue_ball.txt >> sorted_balls.txt 95 | # Clean red balls 96 | cat /dev/null > red_balls.txt 97 | } 98 | 99 | ############################### 100 | # Begin to generate the balls # 101 | ############################### 102 | # Check the balls in my_balls.txt 103 | check_my_balls 104 | # Create an empty file - red_balls.txt 105 | touch red_balls.txt 106 | # Get the MD5SUM value of my_balls.txt 107 | MY_BALLS_MD5SUM=`md5sum my_balls.txt | awk '{print $1}'` 108 | # Initialize the times with 1 109 | TIMES=1 110 | # Start 111 | while true 112 | do 113 | get_all_red_balls 114 | get_blue_ball 115 | sort_balls 116 | # Check the MD5SUM values of my_balls.txt and sorted_balls.txt 117 | SORTED_BALLS_MD5SUM=`md5sum sorted_balls.txt | awk '{print $1}'` 118 | if [ $MY_BALLS_MD5SUM == $SORTED_BALLS_MD5SUM ]; then 119 | echo "You finally got 500W RMB after you bought this $TIMES times!" 120 | exit 0 121 | fi 122 | # Display the number of times it ran 123 | echo "You have tried $TIMES times." 124 | TIMES=`expr $TIMES + 1` 125 | # Clean all balls 126 | cat /dev/null > sorted_balls.txt 127 | done 128 | -------------------------------------------------------------------------------- /scripts/xcp_bootstrap/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Bootstrap Script for Hostname,Network... 4 | # 5 | # Author: Dong Guo 6 | # Last Modified: 2013/10/24 by Dong Guo 7 | 8 | options=$(cat /proc/cmdline|sed 's/.*rhgb quiet //g') 9 | config=/etc/sysconfig/network-scripts/ifcfg-eth0 10 | failed=/root/bootstrap.failed 11 | 12 | function check_root(){ 13 | if [ $EUID -ne 0 ]; then 14 | echo "This script must be run as root" 15 | exit 1 16 | fi 17 | } 18 | 19 | function configure_os(){ 20 | echo "DEVICE=eth0" > $config 21 | echo "ONBOOT=yes" >> $config 22 | echo "BOOTPROTO=none" >> $config 23 | 24 | for i in $options 25 | do 26 | option=$(echo $i|cut -d "=" -f 1) 27 | value=$(echo $i|cut -d "=" -f 2) 28 | if [ "${option:0:1}" = "_" ]; then 29 | case "$option" in 30 | _hostname) 31 | oldname=$(hostname) 32 | newname=$value 33 | sed -i s/"$oldname"/"$newname"/g /etc/sysconfig/network 34 | hostname $newname 35 | ;; 36 | _ipaddr) 37 | echo "IPADDR=$value" >> $config 38 | ;; 39 | _netmask) 40 | echo "NETMASK=$value" >> $config 41 | ;; 42 | _gateway) 43 | echo "GATEWAY=$value" >> $config 44 | ;; 45 | esac 46 | fi 47 | done 48 | } 49 | 50 | function restart_network(){ 51 | /etc/init.d/network restart 52 | } 53 | 54 | function check_status(){ 55 | gateway=$(grep -w GATEWAY $config|cut -d "=" -f 2) 56 | route -n | grep -wq $gateway 57 | if [ $? -eq 0 ]; then 58 | sed -i /bootstrap/d /etc/rc.local 59 | if [ -a $failed ]; then 60 | rm -f $failed 61 | fi 62 | else 63 | touch $failed 64 | fi 65 | } 66 | 67 | check_root 68 | configure_os 69 | restart_network 70 | check_status 71 | -------------------------------------------------------------------------------- /scripts/xcp_bootstrap/configure.txt: -------------------------------------------------------------------------------- 1 | [root@xcphost ~]# xe vm-param-set uuid=c77040ae-3a50-9217-ff03-41992c34d1ec HVM-boot-policy="" 2 | [root@xcphost ~]# xe vm-param-set uuid=c77040ae-3a50-9217-ff03-41992c34d1ec PV-bootloader="pygrub" 3 | [root@xcphost ~]# xe vm-param-set uuid=c77040ae-3a50-9217-ff03-41992c34d1ec PV-args="_hostname=xcpvm _ipaddr=192.168.1.121 _netmask=255.255.255.0 _gateway=192.168.1.1" 4 | 5 | [root@xcpvm ~]# cat /proc/cmdline 6 | ro root=/dev/mapper/vg_t-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD SYSFONT=latarcyrheb-sun16 rd_LVM_LV=vg_t/lv_root crashkernel=129M@0M KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet _hostname=xcpvm _ipaddr=192.168.1.121 _netmask=255.255.255.0 _gateway=192.168.1.1 7 | -------------------------------------------------------------------------------- /scripts/xcp_extendlv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | disk=/dev/xvda 4 | num=3 5 | oldsize=12G 6 | failed=/root/extendlv.failed 7 | rebooted=/root/extendlv.rebooted 8 | 9 | function check_root(){ 10 | if [ $EUID -ne 0 ]; then 11 | echo "This script must be run as root" 12 | exit 1 13 | fi 14 | } 15 | 16 | function extend_lv(){ 17 | echo "root filesystem:" 18 | df -hP / | grep -v Filesystem 19 | 20 | if [ ! -f ${rebooted} ]; then 21 | echo -e "n 22 | p 23 | ${num} 24 | \n 25 | \n 26 | w 27 | q"|fdisk ${disk} 28 | touch ${rebooted} 29 | reboot 30 | exit 1 31 | fi 32 | 33 | vg=$(df -h | grep root | cut -d/ -f4 | cut -d- -f1) 34 | lv=$(df -h | grep root | cut -d/ -f4 | cut -d- -f2) 35 | 36 | echo "resizing ${vg}-${lv}" 37 | pvcreate ${disk}${num} 38 | pvresize ${disk}${num} 39 | vgextend ${vg} ${disk}${num} 40 | free=$(vgdisplay | grep Free | awk '{print $5}') 41 | lvextend -l +${free} /dev/${vg}/${lv} 42 | resize2fs /dev/mapper/${vg}-${lv} 43 | 44 | echo "new root filesystem:" 45 | df -hP / | grep -v Filesystem 46 | } 47 | 48 | function check_status(){ 49 | root_size=$(df -hP / |grep -v Filesystem |awk '{print $2}') 50 | if [ ${root_size} != "${oldsize}" ]; then 51 | sed -i /extendlv/d /etc/rc.local 52 | if [ -f ${failed} ]; then 53 | rm -f ${failed} 54 | fi 55 | else 56 | touch ${failed} 57 | fi 58 | } 59 | 60 | check_root 61 | extend_lv 62 | check_status 63 | -------------------------------------------------------------------------------- /scripts/xcp_getvnc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | vm=$1 4 | if [ -z ${vm} ]; then 5 | echo "Usage: $0 vm_name" 6 | echo "VMs found:" 7 | xl list-vm | awk '{print $3}' | grep -vw name 8 | exit 1 9 | fi 10 | 11 | xe vm-list params=name-label name-label=${vm} | grep ${vm} > /dev/null 12 | if [ $? -gt 0 ]; then 13 | echo "Error: invalid VM name" 14 | exit 1 15 | fi 16 | 17 | host=$(xe vm-list params=resident-on name-label=${vm} | grep resident-on | awk '{print $NF}') 18 | dom=$(xe vm-list params=dom-id name-label=${vm} | grep dom-id | awk '{print $NF}') 19 | port=$(xenstore-read /local/domain/${dom}/console/vnc-port) 20 | ip=$(xe pif-list management=true params=IP host-uuid=${host} | awk '{print $NF}') 21 | 22 | echo "run this on laptop and connect via vnc to localhost:${port}" 23 | echo "--> ssh -L ${port}:localhost:${port} root@${ip}" 24 | -------------------------------------------------------------------------------- /scripts/xcp_ksinstvm/centos-6.4-x86_64-minimal.ks: -------------------------------------------------------------------------------- 1 | cmdline 2 | skipx 3 | install 4 | cdrom 5 | lang en_US.UTF-8 6 | keyboard us 7 | 8 | network --onboot=yes --device=eth0 --bootproto=static --ip=10.100.1.254 --netmask=255.255.255.0 --gateway=10.100.1.1 --nameserver=10.100.1.2 --noipv6 9 | 10 | rootpw password 11 | 12 | firewall --disabled 13 | authconfig --enableshadow --passalgo=sha512 14 | selinux --disabled 15 | timezone --utc Etc/UTC 16 | 17 | bootloader --location=mbr --driveorder=xvda --append="crashkernel=auto" 18 | 19 | zerombr 20 | clearpart --all --initlabel 21 | autopart 22 | 23 | reboot 24 | 25 | %packages --nobase 26 | @core 27 | %end 28 | -------------------------------------------------------------------------------- /scripts/xcp_ksinstvm/ksinstvm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Dong Guo 3 | # Last Modified: 2013/11/28 4 | 5 | # Note: 6 | # The IP address configs in "ks_args" and "remote kickstart file" should be same 7 | # And the IP address should be in the same subnet as the current xenserver, 8 | # otherwise it failed if through the gateway 9 | 10 | vm_name=t_c64_min 11 | repo_url=http://10.100.1.2/repo/centos/6/ 12 | ks_args="ip=10.100.1.254 netmask=255.255.255.0 gateway=10.100.1.1 ns=10.100.1.10 noipv6 ks=http://10.100.1.2/repo/ks/centos-6.4-x86_64-minimal.ks ksdevice=eth0" 13 | cpu_cores=4 14 | mem_size=8G 15 | disk_size=20G 16 | 17 | echo "Creating an empty vm:${vm_name}..." 18 | hostname=$(hostname -s) 19 | sr_uuid=$(xe sr-list | grep -A 2 -B 1 "Local storage" | grep -B 3 -w "${hostname}" | grep uuid | awk -F ": " '{print $2}') 20 | vm_uuid=$(xe vm-install new-name-label=${vm_name} sr-uuid=${sr_uuid} template=Other\ install\ media) 21 | 22 | echo "Setting up the bootloader,cpu,memory..." 23 | xe vm-param-set VCPUs-max=${cpu_cores} uuid=${vm_uuid} 24 | xe vm-param-set VCPUs-at-startup=${cpu_cores} uuid=${vm_uuid} 25 | xe vm-memory-limits-set uuid=${vm_uuid} dynamic-min=${mem_size}iB dynamic-max=${mem_size}iB static-min=${mem_size}iB static-max=${mem_size}iB 26 | xe vm-param-set HVM-boot-policy="" uuid=${vm_uuid} 27 | xe vm-param-set PV-bootloader="eliloader" uuid=${vm_uuid} 28 | 29 | echo "Setting up the kickstart..." 30 | xe vm-param-set other-config:install-repository="${repo_url}" uuid=${vm_uuid} 31 | xe vm-param-set PV-args="${ks_args}" uuid=${vm_uuid} 32 | 33 | echo "Setting up the disk..." 34 | xe vm-disk-add uuid=${vm_uuid} sr-uuid=${sr_uuid} device=0 disk-size=${disk_size}iB 35 | vbd_uuid=$(xe vbd-list vm-uuid=${vm_uuid} userdevice=0 params=uuid --minimal) 36 | xe vbd-param-set bootable=true uuid=${vbd_uuid} 37 | 38 | echo "Setting up the network..." 39 | network_uuid=$(xe network-list bridge=xenbr0 --minimal) 40 | xe vif-create vm-uuid=${vm_uuid} network-uuid=${network_uuid} mac=random device=0 41 | 42 | echo "Starting the vm:${vm_name}" 43 | xe vm-start vm=${vm_name} 44 | -------------------------------------------------------------------------------- /scripts/xcp_monitor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function get_info(){ 4 | host_name=$1 5 | host_uuid=$(xe host-list |grep -w ${host_name} -B1 |grep -w uuid |awk '{print $NF}') 6 | running_vm=$(xe vm-list resident-on=${host_uuid} is-control-domain=false |grep -w name-label |awk -F ": " '{print $NF}' |sort -n |xargs) 7 | halted_vm=$(xe vm-list affinity=${host_uuid} is-control-domain=false power-state=halted |grep -w name-label |awk -F ": " '{print $NF"(halted)"}' |sort -n |xargs) 8 | suspended_vm=$(xe vm-list affinity=${host_uuid} is-control-domain=false power-state=suspended |grep -w name-label |awk -F ": " '{print $NF"(suspended)"}' |sort -n |xargs) 9 | 10 | t_mem_b=$(xe host-param-list uuid=${host_uuid} |grep -w memory-total |awk '{print $NF}') 11 | f_mem_b=$(xe host-param-list uuid=${host_uuid} |grep -w memory-free-computed |awk '{print $NF}') 12 | t_mem_g=$(($t_mem_b/1024/1024/1024)) 13 | f_mem_g=$(($f_mem_b/1024/1024/1024)) 14 | 15 | xe sr-list |grep -A2 -B3 -w ${host_name} |grep -A1 -B4 -Ew 'lvm|ext' |grep -w name-label |awk -F ': ' '{print $2}' > /tmp/sr_items.tmp 16 | disk_info="" 17 | while read sr_name 18 | do 19 | disk_uuid=$(xe sr-list |grep -A 2 -B 1 "$sr_name" |grep -B 3 -w "$host" |grep uuid |awk -F ": " '{print $2}') 20 | t_disk_b=$(xe sr-param-list uuid=$disk_uuid |grep physical-size |cut -d : -f 2) 21 | u_disk_b=$(xe sr-param-list uuid=$disk_uuid |grep physical-utilisation |cut -d : -f 2) 22 | f_disk_b=$(($t_disk_b-$u_disk_b)) 23 | t_disk_g=$(($t_disk_b/1024/1024/1024)) 24 | f_disk_g=$(($f_disk_b/1024/1024/1024)) 25 | disk_info="${f_disk_g}/${t_disk_g}G $disk_info" 26 | done < /tmp/sr_items.tmp 27 | 28 | t_cpu_num=$(xe host-param-list uuid=${host_uuid} | grep -w 'cpu_count' | awk '{print $4}' | cut -d";" -f1) 29 | v_cpu_sum=0 30 | for vm in $running_vm 31 | do 32 | vm_uuid=$(xe vm-list |grep -B 1 -w $vm |head -n 1 |awk -F ": " '{print $2}') 33 | v_cpu_num=$(xe vm-list params=VCPUs-number uuid=${vm_uuid} |grep -w VCPUs |awk -F ": " '{print $2}') 34 | v_cpu_sum=$(($v_cpu_sum+$v_cpu_num)) 35 | done 36 | f_cpu_num=$(($t_cpu_num-$v_cpu_sum)) 37 | 38 | echo -n "Host $host_name: \"$running_vm\" " 39 | if [ ! -z "$halted_vm" ]; then 40 | echo -n "\"$halted_vm\" " 41 | fi 42 | if [ ! -z "$suspended_vm" ]; then 43 | echo -n "\"$suspended_vm\" " 44 | fi 45 | echo "" 46 | echo "Available: \"Mem=${f_mem_g}/${t_mem_g}G Disk=${disk_info} CPU=${f_cpu_num}/${t_cpu_num}Cores\"" 47 | } 48 | 49 | if [ $# == 0 ]; then 50 | host_list=$(xe host-list |grep name-label |awk '{print $4}' |cut -d. -f1 |sort -n) 51 | for host_name in $host_list 52 | do 53 | echo "-------------------------------------------------------" 54 | get_info ${host_name} 55 | done 56 | else 57 | get_info $1 58 | fi 59 | -------------------------------------------------------------------------------- /scripts/zcron: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Inspired by Check_MK mk-job script 4 | # By Dong Guo at 20170316 5 | # 6 | 7 | function print_help() { 8 | echo "Usage: zcron IDENT PROGRAM [ARGS...]" 9 | echo "" 10 | echo "Execute PROGRAM as subprocess while measuring exec_time and return_code information" 11 | echo "about the running process and writing it to an output file." 12 | echo "Zabbix checks the job status based on the content of output file via low-level discovery items" 13 | exit 1 14 | } 15 | 16 | if [[ $# -lt 2 ]]; then 17 | print_help >&2 18 | fi 19 | 20 | OUTPUT_PATH=/var/tmp 21 | IDENT=$1 22 | shift 23 | 24 | if [[ ! -d "$OUTPUT_PATH" ]]; then 25 | mkdir -p "$OUTPUT_PATH" 26 | fi 27 | 28 | if ! type $1 >/dev/null 2>&1; then 29 | echo -e "ERROR: Cannot run $1. Command not found.\n" >&2 30 | print_help >&2 31 | fi 32 | 33 | /usr/bin/time -o "${OUTPUT_PATH}/.${IDENT}.running" --append -f "${IDENT}:\n exec_time: %e\n return_code: %x" $@ 34 | RC=$? 35 | 36 | mv "${OUTPUT_PATH}/.${IDENT}.running" "${OUTPUT_PATH}/zcron_${IDENT}.yml" 37 | exit $RC 38 | --------------------------------------------------------------------------------