├── .gitattributes ├── .tmux.conf ├── .vimrc ├── .zshrc ├── BSD-License ├── README.md ├── auto-remote-shell ├── LICENSE ├── README.md ├── autossh_script └── install.sh ├── bashrc ├── bluesun-setup ├── bluesun-setup.sh ├── bluesun-wrapper.sh └── server.conf ├── download_file_with_exponential_backoff_function.sh ├── dynamic-dns-route53 ├── LICENSE ├── README.md ├── external.json ├── update-dns-external.conf └── update-dns-external.sh ├── ec2-classic-deprecation-related-command-snippets.txt ├── empty-bash-short.sh ├── find-mount-point-for-dir.sh ├── gather-public-ssh-keys.sh ├── get_server_ssl_certs.sh ├── highlight-debs.sh ├── linux-network-setup └── setup-4g-lte-notes.txt ├── loadtest-commands.sh ├── make-ephemeral-swap.sh ├── make-s3-psurl.py ├── nginx └── http_codes_nginx.conf ├── nuke-instances-utterly.sh ├── page.py ├── pihole ├── env.sh ├── setup-pihole.sh └── update-pihole.sh ├── reset-ssh-keys-list-of-hosts.sh ├── reset-ssh-keys-prefix-suffix.sh ├── retry ├── s3-du.sh ├── set-sysctl-challenge-ack.sh ├── setup-ec2-raid-0.sh ├── setup-new-zsh.sh ├── sshsetup.sh ├── sysctl-config.sh ├── sysctl.conf ├── tags ├── tcpcheck-bulk.py ├── tcpcheck.py ├── test_regular_expression.py ├── test_tls_ciphers.sh ├── upload_ssh_keys_to_ec2.sh ├── zabbix-setup.sh └── zsh_aws_aliases.sh /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Explicitly declare text files you want to always be normalized and converted 5 | # to native line endings on checkout. 6 | *.vim text 7 | *.md text -------------------------------------------------------------------------------- /.tmux.conf: -------------------------------------------------------------------------------- 1 | # unbind C-b 2 | # set -g prefix C-t 3 | 4 | unbind h 5 | bind h split-window -v 6 | unbind v 7 | bind v split-window -h 8 | 9 | unbind r 10 | bind r source-file ~/.tmux.conf\; display 'Reloaded tmux config' 11 | 12 | set -g mode-keys vi 13 | # Set mouse mode on by default 14 | setw -g mouse on 15 | # Since mouse mode messes with middle click paste, and I love that, toggle w/m 16 | # toggle mouse mode to allow mouse copy/paste, by setting mouse on with prefix m 17 | bind-key -T prefix m set -g mouse\; display 'Mouse: #{?mouse,ON,OFF}' 18 | 19 | # List of plugins 20 | # set -g @plugin 'tmux-plugins/tpm' 21 | # set -g @plugin 'tmux-plugins/tmux-sensible' 22 | # set -g @plugin 'dracula/tmux' 23 | # set -g @dracula-plugins 'cpu-usage ram-usage' 24 | 25 | # Other examples: 26 | # set -g @plugin 'github_username/plugin_name' 27 | # set -g @plugin 'github_username/plugin_name#branch' 28 | # set -g @plugin 'git@github.com:user/plugin' 29 | # set -g @plugin 'git@bitbucket.com:user/plugin' 30 | 31 | # Initialize TMUX plugin manager (keep this line at the very bottom of tmux.conf) 32 | # run '~/.tmux/plugins/tpm/tpm' 33 | -------------------------------------------------------------------------------- /.vimrc: -------------------------------------------------------------------------------- 1 | set nocompatible " Do not care about old versions yo 2 | set tabstop=4 softtabstop=2 3 | set shiftwidth=4 4 | set expandtab 5 | set smartindent 6 | set nowrap 7 | 8 | " neeed this first - curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim 9 | " enable syntax and plugins (for netrw) 10 | syntax enable 11 | filetype plugin on 12 | 13 | " Custom executations for each project, if you open 'vim .' project this read .vimrc local dir 14 | set exrc 15 | " Disable fancy cursors 16 | set guicursor= 17 | " Make line numbers relative to current line 18 | set relativenumber 19 | " Set the line number in the relative current line 20 | set number 21 | " No highlighted search after searching 22 | set nohlsearch 23 | " Keeps any buffer you've been editing, you can navigate away from it without saving it 24 | set hidden 25 | " smartcase searches for capital with uppercase 26 | set smartcase 27 | " smartcase works with ignorecase, case-sensitive searching 28 | set ignorecase 29 | 30 | set noswapfile 31 | set nobackup 32 | set undodir=~/.vim/undodir 33 | 34 | " scroll when you are 8 lines away from bottom 35 | set scrolloff=8 36 | 37 | " we set gruvbox instead 38 | " set termguicolors 39 | set noshowmode 40 | set completeopt=menuone,noinsert,noselect 41 | 42 | set colorcolumn=120 43 | " for linting (disabled) 44 | " set signcolumn=yes 45 | 46 | call plug#begin('~/.vim/plugged') 47 | Plug 'vim-scripts/vim-plug' 48 | Plug 'junegunn/seoul256.vim' 49 | Plug 'junegunn/vim-easy-align' 50 | Plug 'scrooloose/nerdtree', { 'on': 'NERDTreeToggle' } 51 | Plug 'tpope/vim-fireplace', { 'for': 'clojure' } 52 | Plug 'gruvbox-community/gruvbox' 53 | Plug 'tpope/vim-fugitive' 54 | Plug 'vim-python/python-syntax' " Python highlighting 55 | Plug 'ap/vim-css-color' " Color previews for CSS 56 | Plug 'powerline/powerline' 57 | Plug 'vim-airline/vim-airline' " https://github.com/vim-airline/vim-airline 58 | Plug 'vim-airline/vim-airline-themes' 59 | Plug 'bling/vim-bufferline' " https://github.com/bling/vim-bufferline 60 | Plug 'vifm/vifm.vim' " Vifm 61 | 62 | "Neovim only? 63 | " Plug 'nvim-lua/telescope.nvim' 64 | " ... 65 | call plug#end() 66 | 67 | colorscheme gruvbox 68 | " none isn't a color? maybe needs NeoVim? 69 | " highlight Normal guibg=none 70 | 71 | " Neovim only? 72 | " mode lhs rhs 73 | "nnoremap ps 74 | 75 | 76 | " https://github.com/vim-airline/vim-airline 77 | " AirlineTheme solarized 78 | " let g:airline_solarized_bg='dark' 79 | " let g:airline_theme='solarized' 80 | let g:airline_theme='badwolf' 81 | " let g:airline_theme='solarized' 82 | " let g:airline_solarized_bg='dark' 83 | 84 | " Remaps things into functions, start with a space on the ex: line? 85 | let mapleader = " " 86 | 87 | " actions time 88 | " first a function we will call from our auto group auto commands to trim 89 | " whitespace 90 | fun! TrimWhiteSpace() 91 | let l:save = winsaveview() 92 | keeppatterns %s/\s\+$//e 93 | call winrestview(l:save) 94 | endfun 95 | 96 | " auto group of commands 97 | augroup ZOB 98 | " first we clear the listeners, so we don't duplicate and have tons of 99 | " fork madness, wat 100 | autocmd! 101 | autocmd BufWritePre * :call TrimWhiteSpace() 102 | augroup END 103 | 104 | set wcm= 105 | cnoremap ss so $vim/sessions/*.vim 106 | set wildignore=*.o,*~,*.pyc " Ignore compiled files 107 | set cmdheight=2 " Height of the command bar 108 | set incsearch " Makes search act like search in modern browsers 109 | 110 | " coding things 111 | set showmatch " show matching brackets when text indicator is over them 112 | 113 | 114 | set spelllang=en_us 115 | set spell 116 | 117 | 118 | " From YouTube https://www.youtube.com/watch?v=XA2WjJbmmoM 119 | " https://github.com/changemewtf/no_plugins 120 | " FINDING FILES: 121 | " Search down into subfolders 122 | " Provides tab-completion for all file-related tasks 123 | set path+=** 124 | " Display all matching files when we tab complete 125 | set wildmenu 126 | " NOW WE CAN: 127 | " - Hit tab to :find by partial match 128 | " - Use * to make it fuzzy 129 | " THINGS TO CONSIDER: 130 | " - :b lets you autocomplete any open buffer 131 | " TAG JUMPING: 132 | " Create the `tags` file (may need to install ctags first) 133 | command! MakeTags !ctags -R . 134 | " NOW WE CAN: 135 | " - Use ^] to jump to tag under cursor 136 | " - Use g^] for ambiguous tags 137 | " - Use ^t to jump back up the tag stack 138 | " THINGS TO CONSIDER: 139 | " - This doesn't help if you want a visual list of tags 140 | " AUTOCOMPLETE: 141 | " The good stuff is documented in |ins-completion| 142 | " HIGHLIGHTS: 143 | " - ^x^n for JUST this file 144 | " - ^x^f for filenames (works with our path trick!) 145 | " - ^x^] for tags only 146 | " - ^n for anything specified by the 'complete' option 147 | " NOW WE CAN: 148 | " - Use ^n and ^p to go back and forth in the suggestion list 149 | 150 | 151 | " FILE BROWSING: 152 | " Tweaks for browsing 153 | let g:netrw_banner=0 " disable annoying banner 154 | let g:netrw_browse_split=4 " open in prior window 155 | let g:netrw_altv=1 " open splits to the right 156 | let g:netrw_liststyle=3 " tree view 157 | let g:netrw_list_hide=netrw_gitignore#Hide() 158 | let g:netrw_list_hide.=',\(^\|\s\s\)\zs\.\S\+' 159 | " NOW WE CAN: 160 | " - :edit a folder to open a file browser 161 | " - /v/t to open in an h-split/v-split/tab 162 | " - check |netrw-browse-maps| for more mappings 163 | 164 | 165 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 166 | " Merging in some changes from DistroTube https://www.youtube.com/DistroTube 167 | " per https://www.youtube.com/watch?v=Zir28KFCSQw 168 | " https://gitlab.com/dwt1/dotfiles/-/blob/master/.vimrc 169 | 170 | " => NERDTree 171 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 172 | " Uncomment to autostart the NERDTree 173 | " autocmd vimenter * NERDTree 174 | map :NERDTreeToggle 175 | let g:NERDTreeDirArrowExpandable = '►' 176 | let g:NERDTreeDirArrowCollapsible = '▼' 177 | let NERDTreeShowLineNumbers=1 178 | let NERDTreeShowHidden=1 179 | let NERDTreeMinimalUI = 1 180 | let g:NERDTreeWinSize=38 181 | 182 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 183 | " => Vifm - file manager, kinda meh, should disable 184 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 185 | map vv :Vifm 186 | map vs :VsplitVifm 187 | map sp :SplitVifm 188 | map dv :DiffVifm 189 | map tv :TabVifm 190 | 191 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 192 | " => Splits and Tabbed Files 193 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 194 | set splitbelow splitright 195 | 196 | " Remap splits navigation to just CTRL + hjkl 197 | nnoremap h 198 | nnoremap j 199 | nnoremap k 200 | nnoremap l 201 | 202 | " Make adjusing split sizes a bit more friendly 203 | noremap :vertical resize +3 204 | noremap :vertical resize -3 205 | noremap :resize +3 206 | noremap :resize -3 207 | 208 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 209 | " => Open terminal inside Vim 210 | """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" 211 | " map tt :vnew term://zsh 212 | " map tt :vnew term://zsh 213 | " let &shell='/bin/zsh -i' 214 | autocmd vimenter * let &shell='/bin/zsh -i' 215 | 216 | " Change 2 split windows from vert to horiz or horiz to vert 217 | map th tH 218 | map tk tK 219 | 220 | " Removes pipes | that act as seperators on splits 221 | set fillchars+=vert:\ 222 | 223 | 224 | 225 | " set t_Co=256 " Set if term supports 256 colors. 226 | " Always show statusline 227 | " set laststatus=2 228 | " Uncomment to prevent non-normal modes showing in powerline and below powerline. 229 | " set noshowmode 230 | 231 | 232 | """ powerline but no worky https://linuxconfig.org/introduction-to-powerline-the-statusline-plugin-for-vim 233 | "python3 from powerline.vim import setup as powerline_setup 234 | "python3 powerline_setup() 235 | "python3 del powerline_setup 236 | 237 | 238 | -------------------------------------------------------------------------------- /.zshrc: -------------------------------------------------------------------------------- 1 | ################################## 2 | # Author : Jon Zobrist 3 | # Homepage : http://www.jonzobrist.com 4 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 5 | # Copyright (c) 2019, Jon Zobrist 6 | # All rights reserved. 7 | # 8 | # Redistribution and use in source and binary forms, with or without 9 | # modification, are permitted provided that the following conditions are met: 10 | # 11 | # 1. Redistributions of source code must retain the above copyright notice, this 12 | # list of conditions and the following disclaimer. 13 | # 2. Redistributions in binary form must reproduce the above copyright notice, 14 | # this list of conditions and the following disclaimer in the documentation 15 | # and/or other materials provided with the distribution. 16 | # 17 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | # 28 | ################################## 29 | # .zshrc is sourced in interactive shells. 30 | # It should contain commands to set up aliases, 31 | # functions, options, key bindings, etc. 32 | # 33 | # To get this working to the max do these steps after installing ZSH 34 | # 35 | # Put this file (.zshrc) in your home dir 36 | # $ curl -o ${HOME}/.zshrc https://raw.githubusercontent.com/jonzobrist/Bash-Admin-Scripts/master/.zshrc 37 | # Setup zpresto from https://github.com/sorin-ionescu/prezto 38 | # $ git clone --recursive https://github.com/sorin-ionescu/prezto.git "${ZDOTDIR:-$HOME}/.zprezto" 39 | # $ setopt EXTENDED_GLOB 40 | # $ for rcfile in "${ZDOTDIR:-$HOME}"/.zprezto/runcoms/^README.md(.N); do 41 | # $ ln -s "$rcfile" "${ZDOTDIR:-$HOME}/.${rcfile:t}" 42 | # $ done 43 | # 44 | # Now change your default shell to zsh 45 | # $ chsh -s `which zsh` 46 | # Now logout and back in 47 | ################################## 48 | 49 | # Source Prezto. 50 | if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then 51 | source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" 52 | fi 53 | UNAME=$(uname) 54 | 55 | # HISTORY settings 56 | setopt EXTENDED_HISTORY # store time in history 57 | setopt HIST_EXPIRE_DUPS_FIRST # unique events are more usefull to me 58 | setopt HIST_VERIFY # Make those history commands nice 59 | setopt INC_APPEND_HISTORY # immediatly insert history into history file 60 | HISTSIZE=160000 # spots for duplicates/uniques 61 | SAVEHIST=150000 # unique events guaranteed 62 | HISTFILE=~/.history 63 | 64 | autoload -U compinit 65 | compinit 66 | bindkey "^[[3~" delete-char 67 | bindkey "^[OH" beginning-of-line 68 | bindkey "^[OF" end-of-line 69 | bindkey "^H" backward-delete-word 70 | 71 | #allow tab completion in the middle of a word 72 | setopt COMPLETE_IN_WORD 73 | 74 | #Fix zsh being stupid and not printing lines without newlines 75 | setopt nopromptcr 76 | 77 | ## keep background processes at full speed 78 | setopt NOBGNICE 79 | ## restart running processes on exit 80 | #setopt HUP 81 | 82 | ## never ever beep ever 83 | setopt NO_BEEP 84 | 85 | ## disable mail checking 86 | MAILCHECK=0 87 | 88 | autoload -U colors 89 | 90 | # I really hate when BSD vs. *Nix (Linux) 91 | # Crap like this comes up, c'mon guys 92 | # let's all MD5 the *right* way 93 | if [ "${UNAME}" = "Darwin" ] 94 | then 95 | alias md5sum='md5 -r ' 96 | fi 97 | 98 | jitter() { 99 | # Use like this in shell 100 | # sleep $(jitter) 101 | unset J1 102 | J1=${RANDOM} 103 | unset J2 104 | J2=${RANDOM} 105 | unset M1 106 | M1=$(echo "${J1} * ${J2}" | bc) 107 | JIT=$(echo "${M1} % 10 * .1" | bc) 108 | echo "${JIT}" 109 | # tests to see how it works: 110 | # Run it 10 times, just see the variety 111 | # You should see single-digit numbers between 0 and 1, e.g. '.4' 112 | # $ for x in {1..10}; do jitter; done 113 | } 114 | 115 | retry() { 116 | i=1 117 | mi=60 118 | while true 119 | do 120 | if [ "${DEBUG}" ]; then echo "trying $@ at `date` [attempt: ${i}]"; fi 121 | $@ 122 | let "sleep_time = ${i} * ${i}" 123 | echo "sleeping ${sleep_time}" 124 | sleep ${sleep_time} 125 | sleep $(jitter) 126 | if [ ${i} -gt ${mi} ]; then i=1; fi 127 | ((i++)) 128 | done 129 | } 130 | 131 | # System aliases 132 | alias sshrm="ssh-keygen -R " 133 | alias lsort="sort | uniq -c | sort -n" 134 | alias auxyul="retry ssh ${yul}" 135 | alias ll='ls -FAlh' 136 | alias l='ls -FAlh' 137 | alias lg='ls -FAlh | grep -i ' 138 | alias lh='ls -FAlht | head -n 20 ' 139 | alias grep="grep --color=auto" 140 | alias gvg=" grep -v 'grep' " 141 | alias ducks='du -chs * | sort -rn | head' 142 | alias duckx='du -chsx * | sort -rn | head' 143 | pskill() { 144 | ps -efl | grep $1 | grep -v grep | awk '{print $4}' | paste -sd " " 145 | } 146 | 147 | # Net aliases 148 | alias p6='ping -c 6 -W 100 ' 149 | alias ra="dig +short -x " 150 | alias ns="dig +short " 151 | alias ds="dig +short " 152 | alias wa="whois -h whois.arin.net " 153 | alias tcurl='curl -w "%{remote_ip} time_namelookup: %{time_namelookup} tcp: %{time_connect} ssl:%{time_appconnect} start_transfer:%{time_starttransfer} total:%{time_total}\n" -sk -o /dev/null' 154 | alias tcurlc='curl -w "%{remote_ip} time_namelookup: %{time_namelookup} tcp: %{time_connect} ssl:%{time_appconnect} start_transfer:%{time_starttransfer} total:%{time_total}\n" -sk -o /dev/null --cookie ${cookie} ' 155 | alias tcurlo='curl -w "%{remote_ip} time_namelookup: %{time_namelookup} tcp: %{time_connect} ssl:%{time_appconnect} start_transfer:%{time_starttransfer} total:%{time_total}\n" -sk ' 156 | alias tcurloc='curl -w "%{remote_ip} time_namelookup: %{time_namelookup} tcp: %{time_connect} ssl:%{time_appconnect} start_transfer:%{time_starttransfer} total:%{time_total}\n" -sk --cookie ${cookie} ' 157 | alias curlc='curl -skL --cookie ${cookie} ' 158 | alias watip="curl -s -X GET "https://www.dangfast.com/ip" -H "accept: application/json" | jq -r '.origin'" 159 | alias watproxyip="curl -s -X GET "http://www.dangfast.com/ip" -H "accept: application/json" | jq -r '.origin'" 160 | # Retry ssh as EC2 user! Get it!? 161 | rse() { 162 | retry ssh ec2-user@${1} 163 | } 164 | # Retry ssh as Ubuntu user! Get it!? 165 | # Also RSU = stocks = money 166 | # This function is money 167 | rsu() { 168 | retry ssh ubuntu@${1} 169 | } 170 | 171 | # Git aliases 172 | alias gl='git lol' 173 | alias gbl='git branch --list' 174 | 175 | # Dev aliases 176 | alias ipy="ipython -i ~/helpers.py" 177 | nosetests () { 178 | ./runpy -m nose.core "$@" --verbose --nocapture 179 | } 180 | 181 | # Math aliases and functions 182 | function is_int() { return $(test "$@" -eq "$@" > /dev/null 2>&1); } 183 | 184 | autoload -U promptinit 185 | promptinit 186 | declare -x PATH="${HOME}/bin:/usr/local/bin:/usr/bin:/bin:/sbin:/usr/sbin:/usr/local/sbin:${HOME}/.local/bin" 187 | 188 | possible_path_dirs=("/usr/local/app1/bin" "${HOME}/app2/bin") 189 | for path_dir in ${possible_path_dirs[@]} 190 | do 191 | if [ -d ${path_dir} ]; then 192 | declare -x PATH=$PATH:${path_dir} 193 | fi 194 | done 195 | 196 | # Workflow aliases & functions 197 | alias quicklinks="cat ${HOME}/notes/quicklinks" 198 | alias notes="cd ${HOME}/notes" 199 | alias src="cd ${HOME}/src" 200 | alias elbdate="date -u +%FT%H:%M:%SZ " 201 | function get_dropped_hosts_dmesg() { 202 | for S in $(dmesg | grep DROPPED | awk '{ print $8, "\n", $9 }' | sed -e 's/ //g' | sed -e 's/DST=//' | sed -e 's/SRC=//' | sort | uniq); do echo $(dig +short -x ${S} | sed -e 's/\.$//'); done | sort | uniq 203 | } 204 | 205 | function get_dropped_hosts_kernlog() { 206 | for S in $(grep DROPPED /var/log/kern.log* | awk '{ print $13, "\n", $14 }' | sed -e 's/ //g' | sed -e 's/DST=//' | sed -e 's/SRC=//' | sort | uniq); do echo $(dig +short -x ${S} | sed -e 's/\.$//'); done | sort | uniq 207 | } 208 | 209 | # I keep my local ssh agent info in this file, and if it's there we should source it 210 | # I use https://github.com/jonzobrist/Bash-Admin-Scripts/blob/master/sshsetup.sh 211 | # Which I run on boot like 212 | # sshsetup 213 | # source ~/.ssh/myagent 214 | # ssh-add 215 | if [ -f "${HOME}/.ssh/myagent" ] 216 | then 217 | source ${HOME}/.ssh/myagent 218 | fi 219 | 220 | # Keep the HTTP address for my S3 bucket handy 221 | declare -x ZS3="http://mybucket.s3-website-us-east-1.amazonaws.com" 222 | 223 | # Common Iterables 224 | UPPER="A B C D E F G H I J K L M N O P Q R S T U V W X Y Z" 225 | lower="a b c d e f g h i j k l m n o p q r s t u v w x y z" 226 | nums="0 1 2 3 4 5 6 7 8 9" 227 | nums_and_such="0 1 2 3 4 5 6 7 8 9 - _" 228 | hex_upper="0 1 2 3 4 5 6 7 8 9 A B C D E F" 229 | HEX="0 1 2 3 4 5 6 7 8 9 A B C D E F" 230 | hex="0 1 2 3 4 5 6 7 8 9 a b c d e f" 231 | hour_list="00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 23" 232 | 233 | ################################################################### 234 | # TICKET WORKING ALIASES & FUNCTIONS 235 | ################################################################### 236 | # We all work ticket like things, right? 237 | # I like to keep information about specific tickets 238 | # in the same place 239 | # This brings consistency and I can later find information easily 240 | # when looking for related info 241 | # I also often backup this data to encrpyted S3 buckets 242 | # these functions enable this type of workflow 243 | function nott() { 244 | # TT = working ticket identifier, matches dir name 245 | # TD = working directory for that ticket, dir name matches ticket id/name 246 | unset TT 247 | unset TD 248 | } 249 | 250 | function tt() { 251 | # Function to jump into a working directory for $1 (${TT}) if it exists 252 | # If it doesn't exist, create it, and an env.sh file 253 | if [ "${TT}" ] 254 | then 255 | declare -x TD="${HOME}/work/${TT}" 256 | else 257 | declare -x TD="${HOME}/work/${1}" 258 | declare -x TT=${1} 259 | fi 260 | if [ ! -d "{TD}" ] 261 | then 262 | mkdir -p ${TD} 263 | fi 264 | ENV_FILE="${TD}/env.sh" 265 | if [ ! -f ${ENV_FILE} ] 266 | then 267 | echo "declare -x TT=\"${TT}\"" > ${ENV_FILE} 268 | fi 269 | if [ ! -x ${ENV_FILE} ] 270 | then 271 | chmod uog+x ${ENV_FILE} 272 | fi 273 | DEBUG "Changing dir to ~/${TD}. TT=${TT}" 274 | cd ${TD} 275 | . ${ENV_FILE} 276 | } 277 | 278 | # I frequently use the pattern of setting F to the current file 279 | # (like a pointer) 280 | # I'm working on, so these aliases let me do common things with 281 | # the current file 282 | # 283 | # less the file, display it in my pager less 284 | function lf() { 285 | if [ "${F} ]; then less ${F} 2>/dev/null 286 | elif [ "${1} ]; then less ${F} 2>/dev/null 287 | else echo "Usage lsf filename, or export F=filename" 288 | fi 289 | } 290 | 291 | # wireshark the file 292 | # often times I'm doing tshark -nn -r ${F} 293 | # so this makes it easy to jump into wireshark 294 | function wf() { 295 | if [ "${F} ]; then wireshark ${F} 2>/dev/null & 296 | elif [ "${1} ]; then wireshark ${F} 2>/dev/null & 297 | else echo "Usage wf filename.pcap, or export F=filename.pcap" 298 | fi 299 | } 300 | 301 | function rtt() { 302 | # Function to 'return-to-ticket' 303 | # looks at only your env variable ${TT} 304 | # if it's set, it cd's to it 305 | if [ "${TT}" ] && [ -d ${TD} ] 306 | then 307 | cd "${TD}" 308 | else 309 | echo "No active work item" 310 | fi 311 | } 312 | 313 | ################################################################### 314 | # EC2 / AWS helper functions & aliases 315 | ################################################################### 316 | # 317 | function get_am2_ami() { 318 | # Searches for the latest Amazon Linux 2 x86 64-bit ami 319 | if [ "${1}" ] && [ ! "${R}" ] 320 | then 321 | R=${1} 322 | fi 323 | if [ "${R}" ] 324 | then 325 | aws ec2 describe-images --owners amazon --region ${R} --filters 'Name=name,Values=amzn2-ami-hvm-2.0.????????-x86_64-gp2' 'Name=state,Values=available' --output json | jq -r '.Images | sort_by(.CreationDate) | last(.[]).ImageId' 326 | else 327 | echo "Usage: ${0} region; or export R=region; ${0}" 328 | fi 329 | } 330 | 331 | function get_ubuntu_ami() { 332 | # Searches for the latest Amazon Linux 2 x86 64-bit ami 333 | if [ "${1}" ] && [ ! "${R}" ] 334 | then 335 | R=${1} 336 | fi 337 | if [ "${UBU}" ] 338 | then 339 | case ${UBU} in 340 | 12) 341 | UBUNTU="precise-12.04" 342 | ;; 343 | 14) 344 | UBUNTU="trusty-14.04" 345 | ;; 346 | 16) 347 | UBUNTU="xenial-16.04" 348 | ;; 349 | 18) 350 | UBUNTU="bionic-18.04" 351 | ;; 352 | 19) 353 | UBUNTU="disco-19.04" 354 | ;; 355 | esac 356 | else 357 | UBUNTU="bionic-18.04" 358 | fi 359 | if [ "${R}" ] 360 | then 361 | aws ec2 describe-images --owners 099720109477 --region ${R} --filters "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-${UBUNTU}-amd64-server-????????" 'Name=state,Values=available' --output json | jq -r '.Images | sort_by(.CreationDate) | last(.[]).ImageId' 362 | else 363 | echo "Usage: ${0} region; or export R=region; ${0}" 364 | fi 365 | } 366 | 367 | S3_BACKUP_BUCKET="my-s3-bucket" # Obviously you should change this 368 | function ttup() { 369 | # Given a work ticket (TT) you're working on, uploading to S3 bucket ${S3_BACKUP_BUCKET} 370 | # This uses aws s3 sync, which should de-dupe uploads 371 | # but will clobber objects 372 | # Useful if you want to work on a ticket in multiple places 373 | # or share data from a ticket with others 374 | # Dont' forget to enable encryption on the bucket! 375 | if [ ! "${TD}" ] 376 | then 377 | TD="${PWD##*/}" 378 | fi 379 | DEBUG "Backing up tt dir ${TT} at $(date)" | tee -a ~/tt-backup-$(date +%F).log 380 | sleep 1.5 381 | aws s3 sync ${TD} s3://${S3_BACKUP_BUCKET}/${TT} 382 | } 383 | 384 | function ttdown() { 385 | # Given a TT, download it to ${TD} 386 | if [ ! "${TT}" ] 387 | then 388 | TT="${PWD##*/}" 389 | fi 390 | DEBUG "Download tt dir ${TT} at $(date)" | tee -a ~/tt-download-$(date +%F).log 391 | sleep 1.5 392 | aws s3 sync s3://${S3_BACKUP_BUCKET}/${TT} ${TD} 393 | } 394 | 395 | function gt() { 396 | # gt = Get Ticket 397 | # Get a ticket directory from a host 398 | if [ ! "${1}" ] || [ ! -d "${TD}" ] || [ ! "${TT}" ] 399 | then 400 | echo "Get TT, Usage ${0} host" 401 | echo "Must have TD and TT envs set, and TD must be an existing directory" 402 | else 403 | TT="${PWD##*/}" 404 | S=${2} 405 | rsync -avz ${S}:${TD} ${TD} 406 | fi 407 | } 408 | 409 | function pt() { 410 | if [ ! "${1}" ] || [ ! -d "${TD}" ] || [ ! "${TT}" ] 411 | then 412 | echo "Push TT, Usage ${0} region" 413 | echo "Must have TD and TT envs set, and TD must be an existing directory" 414 | else 415 | rsync -avz ${TD} ${S}:${TD} 416 | fi 417 | } 418 | 419 | # I keep a list of regions in a local file 420 | # This enables me to iterate easily over all AWS regions without calling the describe-regions API 421 | update_ec2_regions() { 422 | MY_TMP_FILE=$(mktemp) 423 | aws ec2 describe-regions |grep 'RegionName' | awk -F'"' '{ print $4 }' | tee ${MY_TMP_FILE} 424 | OC=$(cat ~/regions-ec2 | wc -l | sed -e 's/ //g') 425 | NC=$(cat ${MY_TMP_FILE} | wc -l | sed -e 's/ //g') 426 | if (( ${NC} >= ${OC})) 427 | then 428 | /bin/mv ${MY_TMP_FILE} ~/regions-ec2 429 | else 430 | echo "new file (${MY_TMP_FILE}) is not larger, did we lose regions?" 431 | fi 432 | } 433 | 434 | # I often find myself on remote systems with files 435 | # that I want locally 436 | # instead of figuring out the hostname or exiting and pasting 437 | # things to make an scp command 438 | # just use this (if you use real hostnames) 439 | # example: you want to get http80.pcap 440 | # ph http80.pcap 441 | # This will print out "scp username@my-server-name:http80.pcap ./ 442 | # Which I can copy and paste easily 443 | # I tried having it push the file, but prefer this way 444 | # as often I can connect to a remote system, but it cannot connect to me 445 | # ala NAT 446 | ph() { 447 | FILE=$1 448 | if [ -f $(pwd)/${FILE} ] 449 | then 450 | echo "scp ${USER}@$(hostname):$(pwd)/${FILE} ./" 451 | elif [ -d ${FILE} ] 452 | then 453 | echo "scp -r ${USER}@$(hostname):${FILE} ./${FILE}" 454 | else 455 | echo "scp -r ${USER}@$(hostname):$(pwd) ./" 456 | fi 457 | } 458 | 459 | 460 | # I often copy paths that I want to expore 461 | # And a lot of the time the paths have a file at the end 462 | # Some applications & computers handle this better than others 463 | # But it's enough of a PITA that I use cdd 464 | # Which looks to see if the thing I pasted is a file 465 | # and cd's to its dirname if it is 466 | # 467 | function cdd() { 468 | if [ "${1}" ] && [ -f "${F}" ] 469 | then 470 | DEBUG "switching dir to ${F}, base of ${1}" 471 | pushd $(dirname ${F}) 472 | elif [ "${1}" ] && [ -d "${F}" ] 473 | then 474 | DEBUG "switching dir to ${F}, (was directory)" 475 | pushd ${F} 476 | fi 477 | } 478 | 479 | 480 | # What is with Apple lately? 481 | # I feel like OS X is now as reliable as Windows 98 at its peak 482 | # This puts me into a VIM temp file that I can rant into 483 | # and then :x and easily save these tirades 484 | # it has never come to anything 485 | # and I honestly thing Apple doesn't care anymore 486 | # RIP Steve Jobs 487 | function newcrash() { 488 | CRASH_DIR="${HOME}/mac-crashes-$(date +%Y)" 489 | if [ ! -d "{CRASH_DIR}" ] 490 | then 491 | mkdir -p ${CRASH_DIR} 492 | fi 493 | CRASH_FILE="mac-crash-ya-$(date +%F-%s).txt" 494 | vi ${CRASH_FILE} 495 | } 496 | 497 | # I like to leave old code around 498 | # in case the new version turns on me while I'm sleeping 499 | # function try_get { 500 | # FILE=$1 501 | # URL=$2 502 | # TRIES=$3 503 | # I=0 504 | # if [ -z ${TRIES} ] || [ ${TRIES} -eq 0 ]; then TRIES=3; fi 505 | # while [ ! -f ${FILE} ] 506 | # do 507 | # curl -s -o ${FILE} ${URL} 508 | # let "SLEEP_TIME = ${I} * ${I}" 509 | # sleep ${SLEEP_TIME} 510 | # ((I++)) 511 | # done 512 | # } 513 | 514 | # Ever want to download something and NOT slip up and overwrite it 515 | # while wildly CTRL+R'ing through your shell history? 516 | # Also maybe you're cool and want to respect servers 517 | # and try to get things with exponential backoff? 518 | function try_get { 519 | URL=$1 520 | FILE=$2 521 | TRIES=$3 522 | START=$(date +%s) 523 | I=0 524 | if [ -z ${2} ]; then FILE_PREFIX=${URL##*/}; FILE=${FILE_PREFIX%%\?*}; fi 525 | if [ -z ${TRIES} ] || [ ${TRIES} -eq 0 ]; then TRIES=3; fi 526 | if [ "${DEBUG}" ]; then echo "Getting ${URL} to ${FILE} max ${TRIES} attempts at $(date)"; fi 527 | while [ ! -f ${FILE} ] 528 | do 529 | if [ "${DEBUG}" ]; then echo "calling curl for attempt ${I}"; fi 530 | CURL="curl -s -o ${FILE} ${URL}" 531 | if [ "${DEBUG}" ]; then echo "${CURL}"; fi 532 | ${CURL} 533 | RETURN=$? 534 | if [ "${DEBUG}" ]; then echo "Return code: ${RETURN}"; fi 535 | let "SLEEP_TIME = ${I} * ${I}" 536 | if [ "${DEBUG}" ]; then echo "sleeping ${SLEEP_TIME}"; fi 537 | sleep ${SLEEP_TIME} 538 | ((I++)) 539 | done 540 | END=$(date +%s) 541 | let "ELAPSED_TIME = ${END} - ${START}" 542 | if [ ! ${RETURN} ] 543 | then 544 | echo "file exists" 545 | /bin/ls -hl ${FILE} 546 | elif [ ${RETURN} -gt 0 ] 547 | then 548 | echo "Failed to get ${FILE} from ${URL} after ${I} attempts and ${ELAPSED_TIME} seconds" 549 | cat ${FILE} 550 | else 551 | if [ "${DEBUG}" ] 552 | then 553 | echo "Got $(/bin/ls -1 ${FILE}) ${I} attempts after and ${ELAPSED_TIME} seconds" 554 | else 555 | echo "${FILE} ${I}" 556 | fi 557 | fi 558 | } 559 | 560 | # Ansible is a great way to easily control a bunch of hosts 561 | # I put them in ~/hosts and use this to remote ssh to all of them 562 | alias arun="ansible -m shell --user ubuntu --become -i ~/hosts -a " 563 | alias arun2="ansible -m shell --user ec2-user --become -i ~/hosts -a " 564 | 565 | # I often want to just have a specific string *highlighted* 566 | # while preserving the original contents of a text file 567 | # grepe string filename 568 | # just like you would grep string filename, but with more file 569 | function grepe { 570 | grep --color -E "$1|$" $2 571 | } 572 | 573 | # I often want to know the time in UTC vs Pacific 574 | ddu() { 575 | if [ ${TZ} ]; then PTZ=${TZ}; else PTZ=":US/Pacific"; fi 576 | export TZ=":US/Pacific" 577 | echo "Pacific time: $(date)" 578 | export TZ=":UTC" 579 | echo "UTC: $(date)" 580 | export TZ=${PTZ} 581 | } 582 | 583 | # I often want to know the time in Pacific vs Eastern 584 | edu() { 585 | if [ ${TZ} ]; then PTZ=${TZ}; else PTZ=":US/Pacific"; fi 586 | export TZ=":US/Pacific" 587 | echo "Pacific time: $(date)" 588 | export TZ=":US/Eastern" 589 | echo "Eastern time: $(date)" 590 | export TZ=${PTZ} 591 | } 592 | 593 | # Print current date in MY time format rounded to an hour 594 | # YYYY-MM-DDTHH:00:00Z 595 | # I use this for getting things like dates for metric analysis 596 | my-time() { 597 | echo "NOW $(date +%FT%H:00:00Z)" 598 | echo "24HR AGO $(date +%FT%H:00:00Z)" 599 | } 600 | 601 | # This makes the left side of your command prompt so much cleaner 602 | function collapse_pwd { 603 | echo $(pwd | sed -e "s,^$HOME,~,") 604 | } 605 | 606 | # Everyone loves PRINT (errrr echo) statements right! 607 | # I do 608 | # I love DEBUG variables even more though 609 | # and with this I can use this in my bash scripts like: 610 | # DEBUG "this process just did something I really cared about when I was writing the code at $(date) on $(hostname)" 611 | # and not be bugged by it after I'm less interested 612 | function DEBUG() { 613 | if [ "${DEBUG}" ] 614 | then 615 | echo "${1}" 616 | fi 617 | } 618 | 619 | function used_mem() { 620 | # This function reports the anon allocated memory 621 | # Interesting because this is used by #@%REDACTED#@#@%@# 622 | # Use like this to monitor % used memory 623 | # while true; do echo "$(date) $(used_anon_mem)%"; sleep .1; done 624 | MEM_TOT=$(grep MemTotal /proc/meminfo | awk '{ print $2 }') 625 | MEM_FREE=$(grep MemFree /proc/meminfo | awk '{ print $2 }') 626 | COMMIT_MEM=$(grep Committed_AS /proc/meminfo | awk '{ print $2 }') 627 | ANON_MEM=$(grep AnonPages /proc/meminfo | awk '{ print $2 }') 628 | ANON_USED_PERCENT=$(echo "scale=2; (${ANON_MEM} / ${MEM_TOT}) * 100" | bc -l) 629 | COMMIT_USED_PERCENT=$(echo "scale=2; (${COMMIT_MEM} / ${MEM_TOT}) * 100" | bc -l) 630 | echo "Mem Anon: ${ANON_USED_PERCENT}%, Commit ${COMMIT_USED_PERCENT}%, ${MEM_FREE} free of ${MEM_TOT} Total" 631 | } 632 | 633 | function used_anon_mem() { 634 | # This function reports the anon allocated memory 635 | # Interesting because this is used by #@%REDACTED#@#@%@# 636 | # Use like this to monitor % used memory 637 | # while true; do echo "$(date) $(used_anon_mem)%"; sleep .1; done 638 | MEM_TOT=$(grep MemTotal /proc/meminfo | awk '{ print $2 }') 639 | ANON_MEM=$(grep AnonPages /proc/meminfo | awk '{ print $2 }') 640 | USED_PERCENT=$(echo "scale=2; (${ANON_MEM} / ${MEM_TOT}) * 100" | bc -l) 641 | echo ${USED_PERCENT} 642 | } 643 | 644 | function aws_random_subnet() { 645 | # Cuz' sometimes you just need a subnet... 646 | # This works with your AWS CLI, needs to be setup and all that 647 | # given region ${1} or ${R} or none 648 | # describe VPC subnets, sort random, pick 1 649 | if [ "${1}" ] 650 | then 651 | MY_REGION=${1} 652 | elif [ "${R}" ] 653 | then 654 | MY_REGION=${R} 655 | fi 656 | if [ "${MY_REGION}" ] 657 | then 658 | MY_SUBNET=$(aws ec2 describe-subnets --region ${MY_REGION} | jq -r '.Subnets[].SubnetId' | sort -r | head -n 1) 659 | else 660 | MY_SUBNET=$(aws ec2 describe-subnets | jq -r '.Subnets[].SubnetId' | sort -r | head -n 1) 661 | fi 662 | echo "${MY_SUBNET}" 663 | } 664 | 665 | 666 | despace () { 667 | # Given file as arg, rename it, replacing ' ' with '_' 668 | # Cuz files should not have spaces in them, or anything that requires escaping to use 669 | if [ -f "${1}" ] 670 | then 671 | F=$(ls -1 ${1} | sed -e 's/ /_/g') 672 | mv -i ${1} ${F} 673 | fi 674 | } 675 | 676 | # global ZSH aliases 677 | # SUS - get top 25 of whatever with counts 678 | alias -g SUS=" | sort | uniq -c | sort -nr | head -n 25" 679 | # More from https://grml.org/zsh/zsh-lovers.html 680 | # alias -g ...='../..' 681 | # alias -g ....='../../..' 682 | # alias -g .....='../../../..' 683 | # alias -g CA="2>&1 | cat -A" 684 | # alias -g C='| wc -l' 685 | # alias -g D="DISPLAY=:0.0" 686 | # alias -g DN=/dev/null 687 | # alias -g ED="export DISPLAY=:0.0" 688 | # alias -g EG='|& egrep' 689 | # alias -g EH='|& head' 690 | # alias -g EL='|& less' 691 | # alias -g ELS='|& less -S' 692 | # alias -g ETL='|& tail -20' 693 | # alias -g ET='|& tail' 694 | # alias -g F=' | fmt -' 695 | # alias -g G='| egrep' 696 | # alias -g H='| head' 697 | # alias -g HL='|& head -20' 698 | # alias -g Sk="*~(*.bz2|*.gz|*.tgz|*.zip|*.z)" 699 | # alias -g LL="2>&1 | less" 700 | # alias -g L="| less" 701 | # alias -g LS='| less -S' 702 | # alias -g MM='| most' 703 | # alias -g M='| more' 704 | # alias -g NE="2> /dev/null" 705 | # alias -g NS='| sort -n' 706 | # alias -g NUL="> /dev/null 2>&1" 707 | # alias -g PIPE='|' 708 | # alias -g R=' > /c/aaa/tee.txt ' 709 | # alias -g RNS='| sort -nr' 710 | # alias -g S='| sort' 711 | # alias -g TL='| tail -20' 712 | # alias -g T='| tail' 713 | # alias -g US='| sort -u' 714 | # alias -g VM=/var/log/messages 715 | # alias -g X0G='| xargs -0 egrep' 716 | # alias -g X0='| xargs -0' 717 | # alias -g XG='| xargs egrep' 718 | # alias -g X='| xargs' 719 | 720 | alias sag="sudo apt-get install " 721 | alias sac="sudo apt-cache search " 722 | 723 | if [ -e "$HOME/.cargo/env" ] 724 | then 725 | source "$HOME/.cargo/env" 726 | fi 727 | 728 | WORK_ZSHRC="${WORK_ZSHRC:-${HOME}/.zshrc-work}" 729 | if [ -f "${WORK_ZSHRC}" ]; then source "${WORK_ZSHRC}"; fi 730 | 731 | -------------------------------------------------------------------------------- /BSD-License: -------------------------------------------------------------------------------- 1 | # 2 | # Author : Jon Zobrist 3 | # Homepage : http://www.jonzobrist.com 4 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 5 | # Copyright (c) 2012, Jon Zobrist 6 | # All rights reserved. 7 | # 8 | # Redistribution and use in source and binary forms, with or without 9 | # modification, are permitted provided that the following conditions are met: 10 | # 11 | # 1. Redistributions of source code must retain the above copyright notice, this 12 | # list of conditions and the following disclaimer. 13 | # 2. Redistributions in binary form must reproduce the above copyright notice, 14 | # this list of conditions and the following disclaimer in the documentation 15 | # and/or other materials provided with the distribution. 16 | # 17 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | # 28 | # Purpose : This script aims to gather all public ssh keys on a server and put them in a directory, with appropriate names 29 | # Usage : gather-public-ssh-keys.sh [Directory] 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Collection of bash snippets, scripts, and utilized I've accumulated/written/altered/impaired over the years ## 2 | 3 | This collection is authored by Jon Zobrist 4 | My website - http://www.jonzobrist.com/ 5 | Follow me on Twitter @jonzobrist 6 | 7 | All included files are licensed under the BSD License 8 | No warranty express or implied 9 | 10 | ``` 11 | ###################################################### 12 | # Author : Jon Zobrist 13 | # Homepage : http://www.jonzobrist.com 14 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 15 | # Copyright (c) 2022, Jon Zobrist 16 | # All rights reserved. 17 | 18 | # Redistribution and use in source and binary forms, with or without 19 | # modification, are permitted provided that the following conditions are met: 20 | # 21 | # 1. Redistributions of source code must retain the above copyright notice, this 22 | # list of conditions and the following disclaimer. 23 | # 2. Redistributions in binary form must reproduce the above copyright notice, 24 | # this list of conditions and the following disclaimer in the documentation 25 | # and/or other materials provided with the distribution. 26 | # 27 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 28 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 29 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 30 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 31 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 32 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 33 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 34 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 | # 38 | ###################################################### 39 | ``` 40 | 41 | -------------------------------------------------------------------------------- /auto-remote-shell/LICENSE: -------------------------------------------------------------------------------- 1 | This collection is authored by Jon Zobrist 2 | My website - http://www.jonzobrist.com/ 3 | Follow me on Twitter @jonzobrist 4 | 5 | All included files are licensed under the BSD License 6 | 7 | ###################################################### 8 | # Author : Jon Zobrist 9 | # Homepage : http://www.jonzobrist.com 10 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 11 | # Copyright (c) 2022, Jon Zobrist 12 | # All rights reserved. 13 | 14 | # Redistribution and use in source and binary forms, with or without 15 | # modification, are permitted provided that the following conditions are met: 16 | # 17 | # 1. Redistributions of source code must retain the above copyright notice, this 18 | # list of conditions and the following disclaimer. 19 | # 2. Redistributions in binary form must reproduce the above copyright notice, 20 | # this list of conditions and the following disclaimer in the documentation 21 | # and/or other materials provided with the distribution. 22 | # 23 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 24 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 27 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | # 34 | ###################################################### 35 | 36 | 37 | -------------------------------------------------------------------------------- /auto-remote-shell/README.md: -------------------------------------------------------------------------------- 1 | ## Auto Remote Shell ## 2 | 3 | What is this? 4 | Auto remote shell is a simple set of tools to configure a Linux box to keep a shell open to a configured remote server. This is useful if you need to troubleshoot the Linux boxes of your friends and families, or for keeping devices in the field updated as they move about with varying connectivity. 5 | 6 | ### Notes ### 7 | 8 | This script assumes you use the same username on the target machine as the homebase server. 9 | No warranty implied or otherwise should be assumed, this is open source software and you assume full responsibility for using it. 10 | For my use I also setup NordVPN, PiHole (in Docker), and a remote Dynamic DNS using Amazon Route 53. 11 | Having Dynamic DNS is nice, but this helps when the remote network doesn't allow ingress to the public IPs things egress as. 12 | 13 | ### Fast Setup on Raspberry Pi running Raspbian 11 (bullseye) ### 14 | Not done yet 15 | ``` 16 | git clone https://github.com/jonzobrist/Bash-Admin-Scripts 17 | cd Bash-Admin-Scripts/auto-remote-shell 18 | ./install.sh 19 | ``` 20 | 21 | ### Configure ### 22 | To set this up you need to: 23 | 1. Install the required dependencies / programs 24 | 1. Create the user 25 | 1. Configure SSH keys and access 26 | 1. Configure & install autossh script in watchdog 27 | 28 | ``` 29 | SSH_USER="ubuntu" 30 | # This is the FQDN or IP of the place your SSH client will connect to 31 | TARGET_SERVER="homebase.example.com" 32 | # The remote port is what the shell will open a listener on 33 | R_PORT="2222" 34 | ``` 35 | 36 | ### Manual Setup on Generic Linux w/dpkg### 37 | 38 | Walk through of the steps the install script tries to do 39 | 40 | ``` 41 | git clone https://github.com/jonzobrist/Bash-Admin-Scripts 42 | cd Bash-Admin-Scripts/auto-remote-shell 43 | sudo apt-get install autossh watchdog 44 | 45 | sudo cp autossh_script /etc/watchdog.d/autossh_script 46 | perl -pi -e "s/SSH_USER/${SSH_USER}/g" /etc/watchdog.d/autossh_script 47 | perl -pi -e "s/TARGET_SERVER/${TARGET_SERVER}/g" /etc/watchdog.d/autossh_script 48 | ``` 49 | 50 | ### Confirm your setup is working ### 51 | With the above default / example settings you would do this to connect to your remote host 52 | ``` 53 | $ ssh homebase.example.com 54 | homebase $ ssh -p 2222 localhost 55 | ``` 56 | 57 | -------------------------------------------------------------------------------- /auto-remote-shell/autossh_script: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Lifted most of this from https://askubuntu.com/questions/850388/making-a-crontab-reversed-ssh-connection-using-script 3 | # But the ssh args did not work for me or what I wanted, so I tweaked them 4 | targetuser=SSH_USER 5 | 6 | runTest=false 7 | runRepair=false 8 | 9 | case $1 in 10 | test) 11 | runTest=true 12 | ;; 13 | repair) 14 | runRepair=true 15 | repairExitCode=$2 16 | ;; 17 | *) 18 | echo 'Error: script needs to be run by watchdog' 1>&2 19 | exit 1 20 | ;; 21 | esac 22 | 23 | if ${runTest}; then 24 | #run a test here which will tell the status of your process 25 | #the exit code of this script will be the repairExitCode if it is non-zero 26 | if ! pgrep autossh -u ${targetuser} &> /dev/null; then 27 | #autossh not running; notify watchdog to repair 28 | exit 1 29 | else 30 | #autossh running; no action necessary 31 | exit 0 32 | fi 33 | fi 34 | 35 | if ${runRepair}; then 36 | #take an action to repair the affected item 37 | #use a case statement on $repairExitCode to handle different failure cases 38 | # su - ${targetuser} -c 'nohup autossh -f -CNR 127.0.0.1:4222:127.0.0.1:22 HOMEBASE_SERVER_ADDRESS' 39 | su - ${targetuser} -c 'nohup autossh -f -CNR 127.0.0.1:R_PORT:127.0.0.1:22 HOMEBASE_SERVER_ADDRESS' 40 | exit 0 41 | fi 42 | -------------------------------------------------------------------------------- /auto-remote-shell/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ### Configure ### 4 | # Overview 5 | # 1. Install the required dependencies / programs 6 | # 2. Create the user 7 | # 3. Configure SSH keys and access 8 | # 4. Configure & install autossh script in watchdog 9 | 10 | 11 | # 1. Install the required dependencies / programs 12 | sudo apt-get -y install autossh watchdog 13 | 14 | # 2. Create the user if they don't exist 15 | R_USER="ubuntu" 16 | R_PORT="2222" 17 | AUTO_SSH_SCRIPT="/etc/watchdog.d/autossh_script" 18 | 19 | if id "$R_USER" &>/dev/null; then 20 | echo 'user ${R_USER} already exists, not creating at $(date)' 21 | else 22 | echo 'user ${R_USER} does not exists, creating at $(date)' 23 | useradd -D ${R_USER} 24 | fi 25 | 26 | # now that the user was created, we get the path to their home dir 27 | USER_HOME=$(eval echo "~${R_USER}" 28 | if [ ! -d ${USER_HOME} ] 29 | then 30 | echo "defaults did not create user dir at ${USER_HOME}, making one from /etc/skel" 31 | cp -R /etc/skel ${USER_HOME} 32 | chown -R ${R_USER}:${R_USER} ${USER_HOME} 33 | else 34 | # If you're having problems and you saw this on first run, check it's permissions, other users don't need access to it 35 | echo "User ssh dir exists at $(date)" 36 | fi 37 | 38 | # Maybe we shouldn't do this, as it should be created by ssh-keygen? 39 | USER_SSH_DIR="${USER_HOME}/.ssh" 40 | if [ ! -d ${USER_SSH_DIR} ] 41 | then 42 | echo "User ssh dir does not exist, creating at $(date)" 43 | sudo mkdir -p ${USER_SSH_DIR} 44 | sudo chown -R ${R_USER}:${R_USER} ${USER_SSH_DIR} 45 | sudo chmod -R og-rwx ${USER_SSH_DIR} 46 | else 47 | echo "User ssh dir exists, not creating at $(date)" 48 | fi 49 | 50 | # 3. Configure SSH keys and access 51 | USER_SSH_SKEY="${USER_SSH_DIR}/id_rsa" 52 | USER_SSH_PKEY="${USER_SSH_DIR}/id_rsa.pub" 53 | if [ ! -e ${USER_SSH_SKEY} ] && [ ! -e ${USER_SSH_PKEY} ] && [ -d ${USER_SSH_DIR} ] 54 | then 55 | echo "User does not have SSH keys at $(date)" 56 | # related: https://unix.stackexchange.com/questions/69314/automated-ssh-keygen-without-passphrase-how#69318 57 | # ssh-keygen -b 2048 -t rsa -f /tmp/sshkey -q -N "" 58 | # pr1="sudo -u user ssh-keygen -t rsa -N '' <<<''; echo '$ID' | sudo -u user tee -a ~user/.ssh/authorized_keys" 59 | sudo -u ${R_USER} ssh-keygen -t rsa -N '' <<<'' 60 | else 61 | echo "User ${R_USER} has SSH public and private keys at $(date)" 62 | fi 63 | # Dump the public key to variable ${ID} for later use 64 | ID=$(cat ${USER_SSH_DIR}/id_rsa.pub) 65 | 66 | # 4. Configure & install autossh script in watchdog 67 | echo "Changing the SSH_USER place holder text in the autossh script" 68 | sudo perl -pi -e "s/SSH_USER/${SSH_USER}/g" 69 | # change the TARGET_SERVER place holder text in the autossh script 70 | sudo perl -pi -e "s/TARGET_SERVER/${TARGET_SERVER}/g" /etc/watchdog.d/autossh_script 71 | 72 | # 5. Check everything looks right & restart 73 | # validate 74 | 75 | 76 | # start/restart 77 | 78 | 79 | # 5. Print what they need to do on target server 80 | echo "Done here, everything looks good. Now you need to go to your remote server and make sure the key is in the target user's authorized_keys file" 81 | echo "Target server is ${TARGET_SERVER}" 82 | echo "User on target server is ${R_USER}" 83 | echo "They need the next line in their ~/.ssh/authorized_keys file (~ means home dir of the user, like ~${R_USER}" 84 | echo "ID line to add to authorized_keys:" 85 | echo ${ID} 86 | echo "the file needs to be owned and readable by them, and nobody else, if problems try:" 87 | echo "sudo chown -R ${R_USER}:${R_USER} ${USER_SSH_DIR}" 88 | echo "sudo chmod u+rwx,og-rwx -R ${USER_SSH_DIR}" 89 | echo "sudo find ${USER_SSH_DIR} -type f -exec chmod u+rw,og-rwx {}\;" 90 | 91 | 92 | # 7. Profit! 93 | echo "Shold be done now, try to ssh to ${TARGET_SERVER} and the from there ssh to port ${R_PORT} on localhost, e.g.:" 94 | echo "ssh -t ${TARGET_SERVER} \"ssh -t -p ${R_PORT} localhost\"" 95 | -------------------------------------------------------------------------------- /bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc 2 | # Author: Jon Zobrist 3 | # Date: 2018-05-11 4 | 5 | # Path should prefer my ~/bin over all else 6 | # WARNING: This makes it easy for someone with access to your bin folder to trick you into giving up your secrets 7 | 8 | # Handy ls aliases 9 | # so often i'm looking for a file in cwd, alias it! 10 | # Usage: lg 11 | # shows files that match 12 | # note this includes file data so rwx would show files with rwx perms 13 | alias lg='ls -halF | grep -i ' 14 | # looking for new files in a big directory 15 | alias lh="ls -Falht | head -n 15" 16 | # Make ll better 17 | alias ll='ls -alF' 18 | 19 | 20 | # ph: print hostname, path, file 21 | # often I want to grab a file or directory when I'm working remote 22 | # Usage: ph 23 | # Output: 24 | # scp -r @:/ 25 | # 26 | # File example: 27 | # ph capture-2018-05-10-1525911190.pcap.gz 28 | # scp ubuntu@server1.example.com:/home/ubuntu/capture-2018-05-10-1525911190.pcap.gz ./ 29 | # Dir example: 30 | # ph access-logs-2018-05-10 31 | # scp -r ubuntu@server1.example.com:/home/ubuntu/access-logs-2018-05-10 ./ 32 | # 33 | ph () { 34 | FILE=$1 35 | if [ -f $(pwd)/${FILE} ] 36 | then 37 | echo "scp ${USER}@$(hostname):$(pwd)/${FILE} ./" 38 | elif [ -d ${FILE} ] 39 | then 40 | echo "scp -r ${USER}@$(hostname):${FILE} ./${FILE}" 41 | else 42 | echo "scp -r ${USER}@$(hostname):$(pwd) ./" 43 | fi 44 | } 45 | 46 | 47 | # try_get: download file from url with exponential backoff and max tries 48 | # Everyone knows retries should include exponential backoff 49 | # Now everyone has an alias for it! 50 | # Usage: try_get [output-filename] [attempts] 51 | 52 | function try_get { 53 | URL=$1 54 | FILE=$2 55 | TRIES=$3 56 | START=$(date +%s) 57 | I=0 58 | if [ -z ${2} ]; then FILE_PREFIX=${URL##*/}; FILE=${FILE_PREFIX%%\?*}; fi 59 | if [ -z ${TRIES} ] || [ ${TRIES} -eq 0 ]; then TRIES=3; fi 60 | if [ "${DEBUG}" ]; then echo "Getting ${URL} to ${FILE} max ${TRIES} attempts at $(date)"; fi 61 | while [ ! -f ${FILE} ] 62 | do 63 | if [ "${DEBUG}" ]; then echo "calling curl for attempt ${I}"; fi 64 | CURL="curl -s -o ${FILE} ${URL}" 65 | if [ "${DEBUG}" ]; then echo "${CURL}"; fi 66 | ${CURL} 67 | RETURN=$? 68 | if [ "${DEBUG}" ]; then echo "Return code: ${RETURN}"; fi 69 | let "SLEEP_TIME = ${I} * ${I}" 70 | if [ "${DEBUG}" ]; then echo "sleeping ${SLEEP_TIME}"; fi 71 | sleep ${SLEEP_TIME} 72 | ((I++)) 73 | done 74 | END=$(date +%s) 75 | let "ELAPSED_TIME = ${END} - ${START}" 76 | if [ ! ${RETURN} ] 77 | then 78 | echo "file exists" 79 | /bin/ls -hl ${FILE} 80 | elif [ ${RETURN} -gt 0 ] 81 | then 82 | echo "Failed to get ${FILE} from ${URL} after ${I} attempts and ${ELAPSED_TIME} seconds" 83 | cat ${FILE} 84 | else 85 | if [ "${DEBUG}" ] 86 | then 87 | echo "Got $(/bin/ls -1 ${FILE}) ${I} attempts after and ${ELAPSED_TIME} seconds" 88 | else 89 | echo "${FILE} ${I}" 90 | fi 91 | fi 92 | } 93 | 94 | 95 | # from https://www.networkworld.com/article/2694433/unix-good-coding-practices-for-bash.html 96 | function lower() 97 | { 98 | local str="$@" 99 | local output 100 | output=$(tr '[A-Z]' '[a-z]'<<<"${str}") 101 | echo $output 102 | } 103 | 104 | 105 | -------------------------------------------------------------------------------- /bluesun-setup/bluesun-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ### BEGIN INIT INFO 4 | # Provides: bluesun-setup 5 | # Short-Description: S3 file and DB checkout and deploy script 6 | ### END INIT INFO 7 | # 8 | # Author : Jon Zobrist 9 | # Homepage : http://www.jonzobrist.com 10 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 11 | # Copyright (c) 2012, Jon Zobrist 12 | # All rights reserved. 13 | # 14 | # Redistribution and use in source and binary forms, with or without 15 | # modification, are permitted provided that the following conditions are met: 16 | # 17 | # 1. Redistributions of source code must retain the above copyright notice, this 18 | # list of conditions and the following disclaimer. 19 | # 2. Redistributions in binary form must reproduce the above copyright notice, 20 | # this list of conditions and the following disclaimer in the documentation 21 | # and/or other materials provided with the distribution. 22 | # 23 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 24 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 27 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | # 34 | # Purpose : This script backs up directories & MySQL to a bucket in S3, and restores them 35 | # Usage : bluesun-setup.sh [start|stop|updateS3] 36 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 37 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 38 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 39 | 40 | if [ -f /etc/bluesun-setup/server.conf ] 41 | then 42 | source /etc/bluesun-setup/server.conf 43 | else 44 | echo "No config file found, exiting at `date`" 45 | exit 1 46 | fi 47 | 48 | if [ ${DEBUG} -eq 1 ]; then echo "Starting ${0} at `date` using tmp dir ${TMPDIR}"; fi 49 | 50 | #Functions 51 | 52 | function check_mysql_ready { 53 | if [ ${DEBUG} -eq 1 ]; then echo "Waiting for MySQLD to be running."; fi 54 | COUNT=0 55 | #This did not work... 56 | #while ! mysql -e "show databases;" 2>&1 | grep -q information_schema 57 | while ! pgrep mysqld | grep -q [0-9] 58 | do 59 | if [ ${COUNT} -lt ${MAX_WAIT_FOR_MYSQL} ] 60 | then 61 | ((COUNT++)) 62 | sleep 1 63 | if [ ${DEBUG} -eq 1 ]; then echo -n "."; fi 64 | else 65 | echo "Waiting for MySQL Failed on timeout, waited longer than ${MAX_WAIT_FOR_MYSQL} seconds, exiting"; 66 | exit 1 67 | fi 68 | sleep 1 #We sleep for fun!!! 69 | done 70 | MYSQL_IS_READY=1 71 | } 72 | 73 | 74 | 75 | 76 | 77 | case "${1}" in 78 | start) 79 | mkdir -p ${TMPDIR} 80 | if [ "${DIRS}" ] && [ -d ${TMPDIR} ] 81 | then 82 | for DIR in ${DIRS} 83 | do 84 | /bin/rm "${PINGFILE}" 85 | if [ ${DEBUG} -eq 1 ]; then echo "${DIR}"; fi 86 | /bin/rm -Rf ${DIR} 87 | mkdir -p ${DIR} 88 | PREFIX=`basename ${DIR}` 89 | FILENAME="${PREFIX}-current.tar.gz" 90 | if [ ${DEBUG} -eq 1 ]; then echo "Updating files from S3 ${S3_BUCKET} at `date`"; fi 91 | if [ ${DEBUG} -eq 1 ]; then echo "s3get ${S3_BUCKET}/${FILENAME} ${TMPDIR}/${FILENAME}"; fi 92 | s3get ${S3_BUCKET}/${FILENAME} ${TMPDIR}/${FILENAME} 93 | if [ ${DEBUG} -eq 1 ] 94 | then 95 | echo "tar -zxvf ${TMPDIR}/${FILENAME} -C ${DIR}" 96 | tar -zxvf ${TMPDIR}/${FILENAME} -C ${DIR} 97 | else 98 | if [ ${DEBUG} -eq 1 ]; then echo "tar -zxf ${TMPDIR}/${FILENAME} -C ${DIR}"; fi 99 | tar -zxf ${TMPDIR}/${FILENAME} -C ${DIR} 100 | fi 101 | sleep 1 #We sleep for fun!!! 102 | echo "[ok]" > "${PINGFILE}" 103 | done 104 | fi 105 | 106 | #Check our MySQL Variables, if both MYSQL_FILENAME *AND* MYSQL_DATABASES are set then we have an error case... 107 | #MySQL Dump relies on having passwordless root/dumper acecss to mysql 108 | #MYSQL_FILENAME indicates the filename for a FULL MYSQLDUMP -A backup of an entire MYSQL server 109 | #I typically setup ~/.my.cnf for this 110 | if [ "${MYSQL_FILENAME}" ] 111 | then 112 | if [ ${DEBUG} -eq 1 ]; then echo "Downloading ${MYSQL_FILENAME} to ${TMPDIR} at `date`"; fi 113 | if [ ${DEBUG} -eq 1 ]; then echo "s3get ${S3_BUCKET}/${MYSQL_FILENAME} ${TMPDIR}/${MYSQL_FILENAME}"; fi 114 | s3get ${S3_BUCKET}/${MYSQL_FILENAME} ${TMPDIR}/${MYSQL_FILENAME} 115 | if [ ${DEBUG} -eq 1 ]; then echo "Restoring MySQL full DB `date`"; fi 116 | check_mysql_ready 117 | if [ ${DEBUG} -eq 1 ]; then echo "zcat ${TMPDIR}/${MYSQL_FILENAME} | mysql"; fi 118 | zcat ${TMPDIR}/${MYSQL_FILENAME} | sudo mysql 119 | fi 120 | 121 | #MYSQL_DATABASES 122 | # 123 | for DB in ${MYSQL_DATABASES} 124 | do 125 | if [ ${DEBUG} -eq 1 ]; then echo "Individual Database is ${DB}"; fi 126 | #DBNAME_TABLES check and processing 127 | if [ "`eval echo '$'${MYSQL_DATABASES}_TABLES`" ] 128 | then 129 | if [ ${DEBUG} -eq 1 ]; then echo "we have tables, `eval echo '$'${MYSQL_DATABASES}_TABLES`"; fi 130 | for TABLE in `eval echo '$'${MYSQL_DATABASES}_TABLES` 131 | do 132 | if [ ${DEBUG} -eq 1 ]; then echo "TABLE is ${TABLE}"; fi 133 | MYSQL_TABLE_FILENAME="${DB}-${TABLE}-${ENDFILENAME}" 134 | s3get ${S3_BUCKET}/${MYSQL_TABLE_FILENAME} ${TMPDIR}/${MYSQL_TABLE_FILENAME} 135 | if [ -f ${TMPDIR}/${MYSQL_TABLE_FILENAME} ] 136 | then 137 | check_mysql_ready 138 | if [ ${DEBUG} -eq 1 ]; then echo "zcat ${TMPDIR}/${MYSQL_TABLE_FILENAME} | mysql"; fi 139 | zcat ${TMPDIR}/${MYSQL_TABLE_FILENAME} | mysql ${DB} 140 | fi 141 | sleep 1 #We sleep for fun!!! 142 | done 143 | else 144 | if [ ${DEBUG} -eq 1 ]; then echo "We have no tables"; fi 145 | if [ ${DEBUG} -eq 1 ]; then echo "Restoring full Database ${DB}"; fi 146 | MYSQL_DB_FILENAME="${DB}-${ENDFILENAME}" 147 | s3get ${S3_BUCKET}/${MYSQL_DB_FILENAME} ${TMPDIR}/${MYSQL_DB_FILENAME} 148 | check_mysql_ready 149 | if [ ${DEBUG} -eq 1 ]; then echo "zcat ${TMPDIR}/${MYSQL_DB_FILENAME} | mysql"; fi 150 | zcat ${TMPDIR}/${MYSQL_DB_FILENAME} | mysql ${DB} 151 | fi 152 | for TABLE in `eval echo '$'${MYSQL_DATABASES}_TABLES` 153 | do 154 | if [ ${DEBUG} -eq 1 ]; then echo "Specific TABLE for DB ${DB} is ${TABLE}"; fi 155 | sleep 1 #We sleep for fun!!! 156 | done 157 | sleep 1 #We sleep for fun!!! 158 | done 159 | 160 | if [ "${COMMANDS}" ] 161 | then 162 | for COMMAND in ${COMMANDS} 163 | do 164 | if [ ${DEBUG} -eq 1 ]; then echo "Running command ${COMMAND} from ${S3_BUCKET} at `date`"; fi 165 | if [ ${DEBUG} -eq 1 ]; then echo "s3get ${S3_BUCKET}/${COMMAND} ${TMPDIR}/${COMMAND}"; fi 166 | s3get ${S3_BUCKET}/${COMMAND} ${TMPDIR}/${COMMAND} 167 | chmod u+x ${TMPDIR}/${COMMAND} 168 | if [ -f ${TMPDIR}/${COMMAND} ] 169 | then 170 | if [ ${DEBUG} -eq 1 ]; then echo "${TMPDIR}/${COMMAND}"; fi 171 | ${TMPDIR}/${COMMAND} 172 | fi 173 | sleep 1 #We sleep for fun!!! 174 | done 175 | fi 176 | if [ ${DEBUG} -eq 1 ]; then echo "[ok] > ${PINGFILE}"; fi 177 | echo "[ok]" > "${PINGFILE}" 178 | ;; 179 | stop) 180 | /bin/rm "${PINGFILE}" 181 | ;; 182 | updateS3) 183 | if [ ${DEBUG} -eq 1 ]; then echo "Updating archive files to S3 started at `date`"; fi 184 | mkdir -p ${TMPDIR} 185 | for DIR in ${DIRS} 186 | do 187 | if [ ${DEBUG} -eq 1 ]; then echo "${DIR}"; fi 188 | PREFIX=`basename ${DIR}` 189 | FILENAME="${PREFIX}-current.tar.gz" 190 | ARCHIVE_FILENAME="${PREFIX}-${HOSTNAME}-${TIMESTAMP}.tar.gz" 191 | if [ ${DEBUG} -eq 1 ]; then echo "Creating tar file ${FILENAME} at `date`"; fi 192 | cd ${DIR} 193 | if [ ${DEBUG} -eq 1 ] 194 | then 195 | if [ ${DEBUG} -eq 1 ]; then echo "tar -zvcf ${TMPDIR}/${FILENAME} *"; fi 196 | tar -zvcf ${TMPDIR}/${FILENAME} * 197 | else 198 | if [ ${DEBUG} -eq 1 ]; then echo "tar -zcf ${TMPDIR}/${FILENAME} *"; fi 199 | tar -zcf ${TMPDIR}/${FILENAME} * 200 | fi 201 | if [ ${DEBUG} -eq 1 ]; then echo "Uploading to S3 ${S3_BUCKET} at `date`"; fi 202 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${FILENAME} ${TMPDIR}/${FILENAME}"; fi 203 | s3put ${S3_BUCKET}/${FILENAME} ${TMPDIR}/${FILENAME} 204 | if [ ${ARCHIVE} -eq 1 ] 205 | then 206 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${ARCHIVE_FILENAME} ${TMPDIR}/${FILENAME}"; fi 207 | s3put ${S3_BUCKET}/${ARCHIVE_FILENAME} ${TMPDIR}/${FILENAME} 208 | fi 209 | sleep 1 #We sleep for fun!!! 210 | done 211 | #MySQL Dump relies on having passwordless root/dumper acecss to mysql 212 | #I typically setup ~/.my.cnf for this 213 | if [ "${MYSQL_FILENAME}" ] 214 | then 215 | if [ ${DEBUG} -eq 1 ]; then echo "Dumping MySQL full DB `date`"; fi 216 | if [ ${DEBUG} -eq 1 ]; then echo "mysqldump -A | gzip -c - > ${TMPDIR}/${MYSQL_FILENAME}"; fi 217 | mysqldump -A | gzip -c - > ${TMPDIR}/${MYSQL_FILENAME} 218 | if [ ${DEBUG} -eq 1 ]; then echo "Uploading ${MYSQL_FILENAME} to S3 at `date`"; fi 219 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${MYSQL_FILENAME} ${TMPDIR}/${MYSQL_FILENAME}"; fi 220 | s3put ${S3_BUCKET}/${MYSQL_FILENAME} ${TMPDIR}/${MYSQL_FILENAME} 221 | if [ ${ARCHIVE} -eq 1 ] 222 | then 223 | MYSQL_ARCHIVE_FILENAME="mysqldump-${TIMESTAMP}-${ENDFILENAME}" 224 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${MYSQL_ARCHIVE_FILENAME} ${TMPDIR}/${MYSQL_FILENAME}"; fi 225 | s3put ${S3_BUCKET}/${MYSQL_ARCHIVE_FILENAME} ${TMPDIR}/${MYSQL_FILENAME} 226 | fi 227 | fi 228 | 229 | #Backup individual databases 230 | #MYSQL_DATABASES 231 | # 232 | for DB in ${MYSQL_DATABASES} 233 | do 234 | if [ ${DEBUG} -eq 1 ]; then echo "Backing Up Individual Database ${DB}"; fi 235 | #DBNAME_TABLES check and processing 236 | if [ "`eval echo '$'${DB}_TABLES`" ] 237 | then 238 | if [ ${DEBUG} -eq 1 ]; then echo "we have tables, `eval echo '$'${DB}_TABLES`"; fi 239 | for TABLE in `eval echo '$'${DB}_TABLES` 240 | do 241 | if [ ${DEBUG} -eq 1 ]; then echo "TABLE is ${TABLE}"; fi 242 | MYSQL_TABLE_FILENAME="${DB}-${TABLE}-${ENDFILENAME}" 243 | if [ ${DEBUG} -eq 1 ]; then echo "Dumping table ${TABLE} on ${DB} `date`"; fi 244 | check_mysql_ready 245 | if [ ${DEBUG} -eq 1 ]; then echo "mysqldump ${MYSQLDUMP_ARGS} ${DB} --tables ${TABLE} | gzip -c - > ${TMPDIR}/${MYSQL_TABLE_FILENAME}"; fi 246 | mysqldump ${MYSQLDUMP_ARGS} ${DB} --tables ${TABLE} | gzip -c - > ${TMPDIR}/${MYSQL_TABLE_FILENAME} 247 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${MYSQL_TABLE_FILENAME} ${TMPDIR}/${MYSQL_TABLE_FILENAME}"; fi 248 | s3put ${S3_BUCKET}/${MYSQL_TABLE_FILENAME} ${TMPDIR}/${MYSQL_TABLE_FILENAME} 249 | #ARCHIVE TABLE 250 | if [ ${ARCHIVE} -eq 1 ] 251 | then 252 | TABLE_ARCHIVE_FILENAME="table-${DB}-${TABLE}-${TIMESTAMP}.tar.gz" 253 | s3put ${S3_BUCKET}/${TABLE_ARCHIVE_FILENAME} ${TMPDIR}/${MYSQL_TABLE_FILENAME} 254 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${TABLE_ARCHIVE_FILENAME} ${TMPDIR}/${MYSQL_TABLE_FILENAME}"; fi 255 | fi 256 | sleep 1 #We sleep for fun!!! 257 | done 258 | else 259 | if [ ${DEBUG} -eq 1 ]; then echo "We have no tables"; fi 260 | if [ ${DEBUG} -eq 1 ]; then echo "Backing up full Database ${DB}"; fi 261 | MYSQL_DB_FILENAME="${DB}-${ENDFILENAME}" 262 | check_mysql_ready 263 | if [ ${DEBUG} -eq 1 ]; then echo "mysqldump ${MYSQLDUMP_ARGS} ${DB} | gzip -c - > ${TMPDIR}/${MYSQL_DB_FILENAME}"; fi 264 | mysqldump ${MYSQLDUMP_ARGS} ${DB} | gzip -c - > ${TMPDIR}/${MYSQL_DB_FILENAME} 265 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${MYSQL_DB_FILENAME} ${TMPDIR}/${MYSQL_DB_FILENAME}"; fi 266 | s3put ${S3_BUCKET}/${MYSQL_DB_FILENAME} ${TMPDIR}/${MYSQL_DB_FILENAME} 267 | #ARCHIVE DATABASE 268 | if [ ${ARCHIVE} -eq 1 ] 269 | then 270 | DB_ARCHIVE_FILENAME="DB-${DB}-${TIMESTAMP}.tar.gz" 271 | if [ ${DEBUG} -eq 1 ]; then echo "s3put ${S3_BUCKET}/${DB_ARCHIVE_FILENAME} ${TMPDIR}/${MYSQL_DB_FILENAME}"; fi 272 | s3put ${S3_BUCKET}/${DB_ARCHIVE_FILENAME} ${TMPDIR}/${MYSQL_DB_FILENAME} 273 | fi 274 | fi 275 | for TABLE in `eval echo '$'${DB}_TABLES` 276 | do 277 | if [ ${DEBUG} -eq 1 ]; then echo "Specific TABLE for DB ${DB} is ${TABLE}"; fi 278 | sleep 1 #We sleep for fun!!! 279 | done 280 | sleep 1 #We sleep for fun!!! 281 | done 282 | ;; 283 | *) 284 | echo "Usage ${0} [start|stop|updateS3]" 285 | exit 1 286 | esac 287 | 288 | if [ ${DEBUG} -eq 1 ]; then echo "Cleaning up ${TMPDIR}"; fi 289 | /bin/rm -Rf ${TMPDIR} 290 | -------------------------------------------------------------------------------- /bluesun-setup/bluesun-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script backs up directories & MySQL to a bucket in S3, and restores them 30 | # Usage : bluesun-setup.sh [start|stop|updateS3] 31 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 32 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 33 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 34 | 35 | echo "${0} called with ${@}" >> /var/log/bluesun-setup-init.log 2>&1 36 | case "${1}" in 37 | start) 38 | /etc/init.d/bluesun-setup.sh start 2>&1 >> /var/log/bluesun-setup-start.log 2>&1 39 | ;; 40 | stop) 41 | /etc/init.d/bluesun-setup.sh stop 2>&1 >> /var/log/bluesun-setup-stop.log 2>&1 42 | ;; 43 | *) 44 | #Catch any other way we're being called in a logfile 45 | echo "/etc/init.d/bluesun-setup.sh start 2>&1 >> /var/log/bluesun-setup-wildcard.log 2>&1" 46 | ;; 47 | esac 48 | -------------------------------------------------------------------------------- /bluesun-setup/server.conf: -------------------------------------------------------------------------------- 1 | #Config file 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script backs up directories & MySQL to a bucket in S3, and restores them 30 | # Usage : bluesun-setup.sh [start|stop|updateS3] 31 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 32 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 33 | # WARNING : Running this script with start will DELETE your local versions of whatever you have it set to backup 34 | 35 | S3_BUCKET="www-backups.example.com" #S3 bucket named where you will store all the files 36 | PATH="${PATH}:/usr/local/bin:/usr/local/programs/aws/bin" 37 | DIRS="/var/www /etc/apache2" 38 | TMPDIR="/tmp/bluesun-update-`hostname`-`date +%F-%s`" 39 | 40 | ##Set this to any scripts/programs you want to download and run on a server during restore 41 | ##Note that this script does not put the commands in the list into the S3 bckup, you have to do that separately 42 | #COMMANDS="update-start.sh" 43 | PINGFILE="/var/www/example.com/ping.html" #Set this to where the Elastic Load Balancer checks for a ping file 44 | ENDFILENAME="latest.sql.gz" #Set this to the suffix you want for the end of the latest/current file. 45 | 46 | #*************************** MYSQL CONFIGURATION ***************************# 47 | # READ THESE COMMENTS THIS COULD BE DANGEROUS IF YOU FORGET TO CREATE CUSTOM VARIABLES PER DATABASE 48 | # 49 | # ALL of the MYSQL variables cascade 50 | # MYSQL_FILENAME points to an ALL DATABASES database dump to restore 51 | # If you set only this you will get a full mysql restore using the file set 52 | # MYSQL_DATABASES points to individual database names to be restored 53 | # If you set only this you will get a full restore of just the named databases (separated by spaces) 54 | # If you set this in addition to MYSQL_FILENAME you will get an ALL DATABASES restore, 55 | # then individual DATABASES named here will be restored AFTER the ALL DATABASES restore 56 | # databaseName_TABLES points to specific list of tables that will be restored in the named database 57 | # If you set this for a named database then only the individual tables in that database will be restored (separated by spaces) 58 | # 59 | # Example : You have a database server for your company named dbserver01, it has databases customers,employees these have tables people,access-list 60 | # 61 | # Situation 1 : You want to backup/restore the full database 62 | # You set the variable MYSQL_FILENAME="dbserver01-mysqldump${ENDFILENAME}" 63 | # 64 | # Situation 2 : You want to backup/restore only the customers database 65 | # You set the variable MYSQL_DATABASES="customers" 66 | # 67 | # Situation 3 : You want to backup/restore only the customers AND employees databases 68 | # You set the variable MYSQL_DATABASES="customers employees" 69 | # 70 | # Situation 4 : You want to backup/restore only the people table in the customers database 71 | # You set the variable MYSQL_DATABASES="customers" 72 | # AND you set the variable customers_TABLES="people" 73 | # 74 | # Situation 4 : You want to backup/restore both the people and access-list tables in both the customers and employees databases 75 | # You set the variable MYSQL_DATABASES="customers employees" 76 | # AND you set the variable customers_TABLES="people access-list" 77 | # 78 | # Situation 5 : You want to backup/restore a FULL BACKUP of ALL DATABASES, and THEN backup/restore on top of that 79 | # both the people and access-list tables in both the customers and employees databases 80 | # You set the variables 81 | # MYSQL_FILENAME="dbserver01-mysqldump${ENDFILENAME}" 82 | # MYSQL_DATABASES="customers employees" 83 | # AND you set the variable customers_TABLES="people access-list" 84 | # 85 | MYSQL_FILENAME="mysqldump-${ENDFILENAME}" #Filename of a FULL mysql database backup, is restored without a DATABASE name, mysql -u root < ${MYSQL_FILENAME} 86 | #MYSQL_DATABASES="centDB" #Enter DB names seperated by spaces, don't forget to CREATE variables for each DB for the specific tables to backup/restore 87 | #siloDB_TABLES="person" #dbname_TABLES variable will be checked for every entry in MYSQL_DATABASES, if there is NONE it will backup/restore the ENTIRE named DATABASE 88 | #centDB_TABLES="settingDef" 89 | ##******************************************** READ COMMENTS ABOVE!!!! DANGEROUS !!!!! *****#### 90 | DEBUG=1 # 0 disabled (silent), 1 enabled (DEBUG) 91 | ARCHIVE=1 #1 enables archiving every file that is put to S3, so there will be 1 file created, and uploaded twice each, once as the name PREFIX-current.tar.gz and once with the name PREFIX-timestamp.tar.gz . This will greatly increase the amount of space you have, but will save you from accidentally overwriting your only good backup. 92 | TIMESTAMP=`date +%F-%s` #change here if you want a format other than 2011-07-23-1311447264, avoid spaces or white space 93 | MAX_WAIT_FOR_MYSQL=300 #Set this to the longest time you want to wait for MySQL before aborting 94 | MYSQLDUMP_ARGS="--single-transaction --add-drop-table=TRUE --lock-tables=TRUE --replace=true --extended-insert" #Set this to any special mysqldump arguments you want to use 95 | -------------------------------------------------------------------------------- /download_file_with_exponential_backoff_function.sh: -------------------------------------------------------------------------------- 1 | 2 | function try_get_old { 3 | FILE=$1 4 | URL=$2 5 | TRIES=$3 6 | I=0 7 | if [ -z ${TRIES} ] || [ ${TRIES} -eq 0 ]; then TRIES=3; fi 8 | while [ ! -f ${FILE} ] 9 | do 10 | curl -s -o ${FILE} ${URL} 11 | let "SLEEP_TIME = ${I} * ${I}" 12 | sleep ${SLEEP_TIME} 13 | ((I++)) 14 | done 15 | } 16 | 17 | 18 | 19 | function try_get { 20 | FILE=$1 21 | URL=$2 22 | TRIES=$3 23 | I=0 24 | if [ -z ${TRIES} ] || [ ${TRIES} -eq 0 ]; then TRIES=3; fi 25 | while [ ! -f ${FILE} ] 26 | do 27 | let "SLEEP_TIME = ${I} * ${I}" 28 | sleep ${SLEEP_TIME} 29 | RESP=$(curl -s -w '%{http_code}\n' -o ${FILE} ${URL}) 30 | if [ ${RESP} -ne 200 ] 31 | then 32 | if [ "${DEBUG}" ]; then echo "Failed, HTTP ${RESP}, deleting file"; fi 33 | /bin/rm ${FILE} 34 | else 35 | if [ "${DEBUG}" ]; then echo "Return code was ${RESP}"; fi 36 | echo "Got ${FILE} from ${URL}" 37 | fi 38 | ((I++)) 39 | if [ ${I} -ge ${TRIES} ] 40 | then 41 | if [ "${DEBUG}" ]; then echo "Return code was ${RESP}"; fi 42 | break 43 | fi 44 | done 45 | } 46 | 47 | -------------------------------------------------------------------------------- /dynamic-dns-route53/LICENSE: -------------------------------------------------------------------------------- 1 | This collection is authored by Jon Zobrist 2 | My website - http://www.jonzobrist.com/ 3 | Follow me on Twitter @jonzobrist 4 | 5 | All included files are licensed under the BSD License 6 | 7 | ###################################################### 8 | # Author : Jon Zobrist 9 | # Homepage : http://www.jonzobrist.com 10 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 11 | # Copyright (c) 2022, Jon Zobrist 12 | # All rights reserved. 13 | 14 | # Redistribution and use in source and binary forms, with or without 15 | # modification, are permitted provided that the following conditions are met: 16 | # 17 | # 1. Redistributions of source code must retain the above copyright notice, this 18 | # list of conditions and the following disclaimer. 19 | # 2. Redistributions in binary form must reproduce the above copyright notice, 20 | # this list of conditions and the following disclaimer in the documentation 21 | # and/or other materials provided with the distribution. 22 | # 23 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 24 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 27 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | # 34 | ###################################################### 35 | 36 | 37 | -------------------------------------------------------------------------------- /dynamic-dns-route53/README.md: -------------------------------------------------------------------------------- 1 | ## Dynamic DNS with Amazon Route 53 in Bash/command line ## 2 | 3 | ### Purpose : This script aims to provide dynamic DNS names using Amazon Route 53 ### 4 | 5 | It attempts to detected your public IP, track if it has changed, and update it in Route 53 6 | ### Requires: ### 7 | aws command (awscli) - https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 8 | curl - https://curl.se/download.html 9 | And egrep, dig or nslookup 10 | 11 | ### Setup: ### 12 | Mark script executable, put where you want it, for me that's usually ${HOME}/bin/ (aka ~/bin), and the conf and template json in ${HOME}/etc/. 13 | 14 | ``` 15 | chmod uog+x update-dns-external.sh 16 | mkdir -p ${HOME}/bin/ 17 | cp update-dns-external.sh ${HOME}/bin/ 18 | mkdir -p ${HOME}/etc/ 19 | cp update-dns-external.conf ${HOME}/etc/ 20 | cp external.json ${HOME}/etc/ 21 | crontab -l > ${HOME}/cron 22 | mkdir -p ${HOME}/logs/ 23 | echo "*/15 * * * * ${HOME}/bin/update-dns-external.sh >> ${HOME}/logs/dns-external.log" >> ${HOME}/cron 24 | crontab ${HOME}/cron 25 | ``` 26 | 27 | Edit config file, or script to taste. You will need a working AWS account, to know your Route 53 hosted zone ID, change the aws CLI profile if you aren't using default. 28 | 29 | ``` 30 | aws route53 list-resource-record-sets --hosted-zone-id AWSHOSTEDZONEID --query "ResourceRecordSets[?Name == 'www.example.com.'].ResourceRecords[].Value" --output text 31 | ``` 32 | 33 | Usage : update-dns-external.sh 34 | Typically from crontab something like 35 | 36 | ``` 37 | */15 * * * * /home/ubuntu/bin/update-dns-external.sh >> ${HOME}/logs/dns-external.log 38 | ``` 39 | 40 | ### Troubleshooting ### 41 | If it isn't working, enable DEBUG either in the config or in your console, and re-run it and look through the output to see where it failed 42 | ``` 43 | declare -x DEBUG=1 44 | ``` 45 | 46 | If in DEBUG you see a variable called out as set or being used but the value doesn't appear, double-check your setup to make sure it's correct. 47 | 48 | -------------------------------------------------------------------------------- /dynamic-dns-route53/external.json: -------------------------------------------------------------------------------- 1 | { 2 | "Comment": "UPSERT record", 3 | "Changes": [{ 4 | "Action": "UPSERT", 5 | "ResourceRecordSet": { 6 | "Name": "RECORD_NAME", 7 | "Type": "A", 8 | "TTL": 300, 9 | "ResourceRecords": [{ "Value": "RECORD_VALUE"}] 10 | } 11 | }] 12 | } 13 | -------------------------------------------------------------------------------- /dynamic-dns-route53/update-dns-external.conf: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Bash config file for update-dns-external.sh 3 | # It is 'sourced' from that script if it exists 4 | 5 | # This is a separate config file so you can keep it apart from code. 6 | ################################################################ 7 | # START OF CONFIGURATION SECTION 8 | ################################################################ 9 | 10 | # Do you want DEBUG on? Anything true will be on (1, true, randomstring), false (0/unset) will be off 11 | DEBUG=${DEBUG:-0} 12 | export DEBUG 13 | 14 | # FQDN can be static or use this hosts hostname output for it 15 | # FQDN="$(hostname)." 16 | # Don't forget to put a '.' at the end of hostname used 17 | FQDN="host.example.com." 18 | export FQDN 19 | 20 | # Host is everything left of the first '.' 21 | HOST=${FQDN%%.*} 22 | export HOST 23 | 24 | # Apex domain is everything after the first '.', note this won't work for all sub-domains/configurations 25 | # for example it won't work if your FQDN is www.subdomain.example.com and the apex record you want is example.com and not subdomain.example.com 26 | APEXD=${FQDN#*.} 27 | export APEXD 28 | 29 | # Hosted zone ID from Route53 30 | # aws --profile ${P} route53 list-hosted-zones-by-name --output text --dns-name "example.com." --query 'HostedZones[0].Id' 31 | HOSTED_ZONE_ID="AWSZONEID" 32 | export HOSTED_ZONE_ID 33 | 34 | # Location of a file to record the last public IP we saw 35 | LAST_IP_FILE="${HOME}/etc/lastpublicip" 36 | export LAST_IP_FILE 37 | 38 | # Which AWS profile you want to use, if you don't have one set to 'default' 39 | # AWS_PROFILE="default" 40 | AWS_PROFILE="default" 41 | export AWS_PROFILE 42 | 43 | # Do you want to use the Route 53 API to check your IP? 44 | # API calls are more expensive, potentially in $$$ than DNS queries 45 | USE_R53_API=0 46 | export USE_R53_API 47 | 48 | 49 | # This is the JSON template file for the Route 53 update 50 | # Ideally Route 53 wouldn't need this, but I did it this way years ago and am too lazy rn to change 51 | # Feel free to send me a PR with a single script version or change the API to a better one 52 | EXTERNAL_JSON_TEMPLATE="${HOME}/etc/external.json" 53 | export EXTERNAL_JSON_TEMPLATE 54 | 55 | # Where to get the IP from, can return almost any text 56 | # We'll grep -o out the IP address, and only use the first 1 57 | # This may break if your page has multiple IPs in it 58 | # I use www.dangfast.com/ip as it's my test site using httpbin.org 59 | # HTTPBin has /ip built in, and returns JSON format by default 60 | # No guarantee it'll be up forever. 61 | # Note, using HTTPS is more reliable to get your actual IP 62 | # As HTTP is more commonly proxied than HTTPS, which is more commoly routed 63 | IP_HTTP_URL="https://www.dangfast.com/ip" 64 | export IP_HTTP_URL 65 | 66 | # print a debug of the CSV file to track changes over time to 67 | # Todo: make this a live google spreadsheet 68 | CSV_HISTORY="${HOME}/etc/ip-log-change-$(date +%Y).csv" 69 | export CSV_HISTORY 70 | 71 | ################################################################ 72 | # END OF CONFIGURATION SECTION 73 | ################################################################ 74 | -------------------------------------------------------------------------------- /dynamic-dns-route53/update-dns-external.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # 4 | # Author : Jon Zobrist 5 | # Homepage : http://www.jonzobrist.com 6 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 7 | # Copyright (c) 2012, Jon Zobrist 8 | # All rights reserved. 9 | # 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are met: 12 | # 13 | # 1. Redistributions of source code must retain the above copyright notice, this 14 | # list of conditions and the following disclaimer. 15 | # 2. Redistributions in binary form must reproduce the above copyright notice, 16 | # this list of conditions and the following disclaimer in the documentation 17 | # and/or other materials provided with the distribution. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 23 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | # 30 | # Purpose : This script aims to provide dynamic DNS names using Amazon Route 53 31 | # It attempts to detected your public IP, track if it has changed, 32 | # and update Route 53 if it has 33 | # Usage : update-dns-external.sh 34 | # Typically from crontab something like 35 | # */15 * * * * /home/ubuntu/bin/update-dns-external.sh >> ${HOME}/logs/dns-external.log 36 | 37 | # Everything should have a debug statement that is easily toggled 38 | # This will trip if you set DEBUG to 1 39 | # DEBUG=1 40 | DEBUG () { 41 | if [ "${DEBUG}" ] 42 | then 43 | if [ ${DEBUG} -eq 1 ] 44 | then 45 | echo "DEBUG $(date +%s): ${1}" 46 | fi 47 | fi 48 | } 49 | 50 | # If you want to separate config from code. If not, do'nt have config file and set values in the else block 51 | CONFIG_FILE="${HOME}/etc/update-dns-external.conf" 52 | if [ "${CONFIG_FILE}" ] 53 | then 54 | if [ -e "${CONFIG_FILE}" ] 55 | then 56 | DEBUG "Using config file" 57 | # shellcheck source=/dev/null 58 | source "${CONFIG_FILE}" 59 | fi 60 | else 61 | DEBUG "NOT using a config file, variables loaded from script" 62 | # Do you want DEBUG on? Anything true will be on (1, true, randomstring), false (0/unset) will be off 63 | DEBUG=${DEBUG:-0} 64 | export DEBUG 65 | 66 | # TODO: add test only mode 67 | # Maybe you only want to test the API call to Route 53, set this true 68 | TEST_ONLY_MODE=1 69 | export TEST_ONLY_MODE 70 | 71 | # FQDN can be static or use this hosts hostname output for it 72 | FQDN=$(hostname) 73 | export FQDN 74 | 75 | # Host is everything left of the first '.' 76 | HOST=${FQDN%%.*} 77 | export HOST 78 | 79 | # Apex domain is everything after the first '.', note this won't work for all sub-domains/configurations 80 | # for example it won't work if your FQDN is www.subdomain.example.com and the apex record you want is example.com and not subdomain.example.com 81 | APEXD=${FQDN#*.} 82 | export APEXD 83 | 84 | # Hosted zone ID from Route53 85 | # aws --profile ${P} route53 list-hosted-zones-by-name --output text --dns-name "example.com." --query 'HostedZones[0].Id' 86 | HOSTED_ZONE_ID="AWSZONEID" 87 | export HOSTED_ZONE_ID 88 | 89 | # Location of a file to record the last public IP we saw 90 | # This used to be used but I now check R53 setting instead of tracking 91 | LAST_IP_FILE="${HOME}/etc/lastpublicip" 92 | export LAST_IP_FILE 93 | 94 | # Which AWS profile you want to use, if you don't have one set to 'default' 95 | AWS_PROFILE="default" 96 | export AWS_PROFILE 97 | 98 | # This is the JSON template file for the Route 53 update 99 | # Ideally Route 53 wouldn't need this, but I did it this way years ago and am too lazy rn to change 100 | # Feel free to send me a PR with a single script version or change the API to a better one 101 | EXTERNAL_JSON_TEMPLATE="${HOME}/etc/external.json" 102 | export EXTERNAL_JSON_TEMPLATE 103 | 104 | # Where to get the IP from, can return almost any text 105 | # We'll grep -o out the IP address, and only use the first 1 106 | # This may break if your page has multiple IPs in it 107 | # I use www.dangfast.com/ip as it's my test site using httpbin.org 108 | # HTTPBin has /ip built in, and returns JSON format by default 109 | # No guarantee it'll be up forever. 110 | # Note, using HTTPS is more reliable to get your actual IP 111 | # As HTTP is more commonly proxied than HTTPS, which is more commoly routed 112 | IP_HTTP_URL="https://www.dangfast.com/ip" 113 | export IP_HTTP_URL 114 | 115 | # print a debug of the CSV file to track changes over time to 116 | # Todo: make this a live google spreadsheet 117 | CSV_HISTORY="${HOME}/ip-log-change-$(date +%Y).csv" 118 | export CSV_HISTORY 119 | 120 | # Do you want to use the Route 53 API to check your IP? 121 | # API calls are more expensive, potentially in $$$ than DNS queries 122 | USE_R53_API=1 123 | export USE_R53_API 124 | fi 125 | 126 | # I install awscli via PIP and prefer to specify --user, like 127 | # pip3 install --user awscli 128 | # And my cron doesn't source my .zshrc, where I set a path 129 | # So I use this for cron jobs 130 | PIP_LOCAL_DIR="${HOME}/.local/bin" 131 | 132 | if [ -d "${PIP_LOCAL_DIR}" ] 133 | then 134 | export PATH="${PATH}:${PIP_LOCAL_DIR}" 135 | fi 136 | 137 | ################################################################ 138 | # END OF CONFIGURATION SECTION 139 | ################################################################ 140 | 141 | ################################################################ 142 | # Dependency checking 143 | # This script depends on the AWS CLI being installed 144 | # https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 145 | AWS_CMD=$(command -v aws) 146 | # This script depends on the curl command being installed) 147 | # https://curl.se/download.html 148 | CURL_CMD=$(command -v curl) 149 | GREP_CMD=$(command -v egrep) 150 | DIG_CMD=$(command -v dig) 151 | NSLOOKUP_CMD=$(command -v nslookup) 152 | ################################################################ 153 | 154 | # Make sure we have our required executables 155 | if [ -x "${AWS_CMD}" ] || [ -x "${CURL_CMD}" ] || [ -x "${GREP_CMD}" ] || { [ -x "${DIG_CMD}" ] || [ -x "${NSLOOKUP_CMD}" ]; } 156 | then 157 | 158 | ################################################################ 159 | # Determine our last known IP, the Route 53 IP (via R53 API or DNS), 160 | # and what we are seen as publicly/public IP 161 | ################################################################ 162 | # Do we have a last IP file? 163 | # If so, grep the first IP out of it! 164 | # re-bootstrapping can start without a LAST_IP_FILE 165 | # or be initiated by deleting the LAST_IP_FILE 166 | if [ -f "${LAST_IP_FILE}" ] 167 | then 168 | LAST_IP=$(${GREP_CMD} -o '(\b25[0-5]|\b2[0-4][0-9]|\b[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}' "${LAST_IP_FILE}" | head -n 1) 169 | DEBUG "${LAST_IP_FILE} exists at $(date), and the last IP was ${LAST_IP}" 170 | else 171 | # Don't have a LAST_FILE_FILE? 172 | # Move ahead with NONE 173 | LAST_IP='NONE' 174 | fi 175 | 176 | # Above we parsed our last IP out of the last IP file or set it to NONE 177 | # This should trigger if we had a LAST_IP_FILE but failed to match it in our regex 178 | # In which case we want to overwrite that file with a new, garbage IP 179 | if [ ! "${LAST_IP}" ] 180 | then 181 | LAST_IP='NONE' 182 | /bin/rm "${LAST_IP_FILE}" 183 | echo ${LAST_IP} > "${LAST_IP_FILE}" 184 | chmod og-rwx "${LAST_IP_FILE}" 185 | DEBUG "${LAST_IP_FILE} created with NONE at $(date)" 186 | fi 187 | 188 | # Route 53 API can tell us the current IP if we prefer it over DNS 189 | # Toggle w/USE_R53_API flag 190 | if [ "${USE_R53_API}" ] 191 | then 192 | # print our command if DEBUG is true 193 | DEBUG "We're using Route 53 API to check for the current IP, since USE_R53_API is true" 194 | DEBUG "${AWS_CMD} --profile ${AWS_PROFILE:-default} route53 list-resource-record-sets --hosted-zone-id ${HOSTED_ZONE_ID} --query \"ResourceRecordSets[?Name == '${FQDN}'].ResourceRecords[].Value\" --output text" 195 | 196 | # Execute our Route 53 API call to get the last IP 197 | R53IP=$("${AWS_CMD}" --profile ${AWS_PROFILE:-default} route53 list-resource-record-sets --hosted-zone-id ${HOSTED_ZONE_ID} --query "ResourceRecordSets[?Name == '${FQDN}'].ResourceRecords[].Value" --output text) 198 | DEBUG "Got R53IP from Route 53 API and it's value is ${R53IP}" 199 | else 200 | DEBUG "Using DNS and NOT using Route 53 API to find currently configured IP" 201 | if [ -x "${DIG_CMD}" ] 202 | then 203 | DEBUG "We have dig at ${DIG_CMD}, and USE_R53_API is not set" 204 | R53IP=$("${DIG_CMD}" +short "${FQDN}" | "${GREP_CMD}" -o '(\b25[0-5]|\b2[0-4][0-9]|\b[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}' | head -n 1) 205 | elif [ -x "${NSLOOKUP_CMD}" ] 206 | then 207 | DEBUG "We have no dig, but do have nslookup at ${NSLOOKUP_CMD}, and USE_R53_API is not set" 208 | R53IP=$("${NSLOOKUP_CMD}" "${FQDN}" | "${GREP_CMD}" -o '(\b25[0-5]|\b2[0-4][0-9]|\b[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}' | head -n 1) 209 | fi 210 | fi 211 | # print a debug summary of what we've got and what we're doing 212 | DEBUG "CURR R53 IP ${R53IP}, Last IP ${LAST_IP}, FQDN ${FQDN}, HOST ${HOST}, APEXD ${APEXD}" 213 | 214 | # Get our external / publicly viewable IP address 215 | IP=$(${CURL_CMD} -s ${IP_HTTP_URL} | ${GREP_CMD} -o '(\b25[0-5]|\b2[0-4][0-9]|\b[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}') 216 | DEBUG "Got IP:${IP} from ${IP_HTTP_URL} at $(date)" 217 | ################################################################ 218 | # END OF IP INFORMATION FINDING SECTION 219 | ################################################################ 220 | 221 | 222 | ################################################################ 223 | # Let's compare and update if we don't match 224 | # If they string match, we are golden (DNS/R53 IP matches our external/public IP) - no action required 225 | # If they DO NOT string match, we need to update the DNS w/Route 53 226 | if [ "${R53IP}" ] && [ "${IP}" ] && [ ! "${R53IP}" == "${IP}" ] 227 | then 228 | DEBUG "Route53/DNS IP ${R53IP} is not current IP ${IP} updating at $(date)" 229 | # JSON_RECORD=$(sed "s/RECORD_VALUE/${IP}/" < "${EXTERNAL_JSON_TEMPLATE}") 230 | JSON_RECORD=$(sed -e "s/RECORD_VALUE/${IP}/" -e "s/RECORD_NAME/${FQDN}/" < "${EXTERNAL_JSON_TEMPLATE}") 231 | DEBUG "JSON_RECORD" 232 | DEBUG "${JSON_RECORD}" 233 | 234 | if [ "${CSV_HISTORY}" ] 235 | then 236 | #Lets update our csv for long term IP tracking 237 | #CSV Format is: 238 | #DAY, Unix time, current IP, last IP 239 | DEBUG "CSV file is ${CSV_HISTORY}" 240 | echo "$(date +%F),$(date +%s),${IP},${R53IP}" >> "${CSV_HISTORY}" 241 | else 242 | DEBUG "CSV file is NOT set, not emitting" 243 | DEBUG "$(date +%F),$(date +%s),${IP},${R53IP}" 244 | fi 245 | 246 | # http://docs.aws.amazon.com/cli/latest/reference/route53/change-resource-record-sets.html 247 | # We want to use the string variable command so put the file contents (batch-changes file) in the following JSON 248 | INPUT_JSON="{ \"ChangeBatch\": $JSON_RECORD}" 249 | DEBUG "${AWS_CMD}" --profile ${AWS_PROFILE:-default} route53 change-resource-record-sets --hosted-zone-id "${HOSTED_ZONE_ID}" --cli-input-json "${INPUT_JSON}" 250 | "${AWS_CMD}" --profile ${AWS_PROFILE:-default} route53 change-resource-record-sets --hosted-zone-id "${HOSTED_ZONE_ID}" --cli-input-json "${INPUT_JSON}" 251 | /bin/rm "${LAST_IP_FILE}" 252 | echo "${IP}" > "${LAST_IP_FILE}" 253 | else 254 | DEBUG "Public IP ${IP} matches last public DNS ${R53IP}, skipping updating at $(date)" 255 | echo "IP (${IP}) unchanged from R53IP(${R53IP}), skipping updating at $(date)" 256 | exit 0 257 | fi 258 | ################################################################ 259 | # END OF COMPARE/ACTION SECTION 260 | ################################################################ 261 | 262 | # Dependency/settings ailure handling 263 | else 264 | echo "Missing dependencies or settings, this is what we have" 265 | echo "CURR R53 IP:${R53IP}, Last IP:${LAST_IP}, FQDN:${FQDN}, HOST:${HOST}, APEXD:${APEXD}" 266 | echo "curl is ${CURL_CMD}" 267 | echo "aws is ${AWS_CMD}" 268 | echo "grep is ${GREP_CMD}" 269 | echo "dig is ${DIG_CMD}" 270 | echo "nslookup is ${NSLOOKUP_CMD}" 271 | fi 272 | -------------------------------------------------------------------------------- /ec2-classic-deprecation-related-command-snippets.txt: -------------------------------------------------------------------------------- 1 | In 1 region for ELBs you can get a list of CLBs in EC2 classic by running: 2 | 3 | ``` 4 | R="us-east-1"; aws --region ${R} elb describe-load-balancers --query 'LoadBalancerDescriptions[?Subnets == `[]`].LoadBalancerName' 5 | ``` 6 | 7 | If you want to check all regions (not filtered for ones that only have VPC) 8 | (The tee and file are just to stop the aws cli from blocking output to your pager) 9 | 10 | ``` 11 | F=$(mktemp); for R in $(aws ec2 describe-regions --query "Regions[].RegionName" --output text); do echo "Classic load balancers in ${R}:"; aws --region ${R} elb describe-load-balancers --query 'LoadBalancerDescriptions[?Subnets == `[]`].LoadBalancerName' | tee -a ${F}; done; echo "results also in file ${F} at $(date)"; 12 | ``` 13 | 14 | This will tell you if your account has EC2 Classic: 15 | 16 | ``` 17 | aws ec2 describe-account-attributes --query 'AccountAttributes[?AttributeValues[?AttributeValue == `EC2`]]' 18 | ``` 19 | 20 | From the [migration from EC2 Classic to EC2 VPC](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html), this will tell you if you have any EC2 instances in EC2 Classic: 21 | 22 | ``` 23 | aws ec2 describe-instances --query 'Reservations[*].Instances[?VpcId==`null`]' 24 | ``` 25 | 26 | 27 | -------------------------------------------------------------------------------- /empty-bash-short.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Declare function before calling in Bash ;) 3 | function PRINT_ERROR_HELP_AND_EXIT { 4 | echo "Usage : ${0} -a Required -b Required [-c Optional] [-d Optional]" 5 | exit 1 6 | } 7 | 8 | # Think about ourselves 9 | # Set a directory our binary lives in 10 | RUN_DIR="$(dirname ${0})" 11 | # Set just our script or program name (strip of leading directories) 12 | PROG="${0##*/}" 13 | # Get a meta 'short' name, minus any extension 14 | SHORT="${PROG%%.*}" 15 | 16 | # Include system config file in /etc/ 17 | if [ -f "/etc/${SHORT}/${SHORT}.conf" ] 18 | then 19 | source /etc/${SHORT}/${SHORT}.conf 20 | fi 21 | 22 | # Include system config file in /etc/ without a directory 23 | if [ -f "/etc/${SHORT}.conf" ] 24 | then 25 | source /etc/${SHORT}.conf 26 | fi 27 | 28 | # Include home dir config file 29 | if [ -f "~/.${SHORT}" ] 30 | then 31 | source ~/.${SHORT} 32 | fi 33 | 34 | # Check options for overriding values 35 | while getopts "a:b:c:d:" optionName 36 | do 37 | case "$optionName" in 38 | a) OPT_A="${OPTARG}";; 39 | b) OPT_B="${OPTARG}";; 40 | c) OPT_C="${OPTARG}";; 41 | d) OPT_D="${OPTARG}";; 42 | [?]) PRINT_ERROR_HELP_AND_EXIT;; 43 | esac 44 | done 45 | 46 | # Make sure our required args A and B are passed 47 | if [ ! "${OPT_A}" ] && [ ! "${OPT_B}" ] 48 | then 49 | PRINT_ERROR_HELP_AND_EXIT 50 | fi 51 | 52 | echo "Running ${0} with OPT_A ${OPT_A} OPT_B ${OPT_B} $(if [ "${OPT_C}" ]; then echo "OPT C is ${OPT_C}"; fi) $(if [ "${OPT_C}" ]; then echo "OPT D is ${OPT_D}"; fi)" 53 | 54 | -------------------------------------------------------------------------------- /find-mount-point-for-dir.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : given a path, default is current working directory, find all files named .deb and highlight their name only 30 | # Usage : find-mount-point-for-dir.sh 31 | # Returns mount point 32 | # If DEBUG is set in ENV or uncommented below, output is more verbose. 33 | # DEBUG=1 34 | 35 | if [ "${1}" ] 36 | then 37 | DIR="${1}" 38 | MOUNT_POINT="$(df ${DIR} | grep '^\/dev' | awk '{ print $6 }')" 39 | if [ "${DEBUG}" ] 40 | then 41 | echo "${DIR} is mounted on ${MOUNT_POINT}" 42 | else 43 | echo "${MOUNT_POINT}" 44 | fi 45 | else 46 | echo "Usage ${0} " 47 | fi 48 | -------------------------------------------------------------------------------- /gather-public-ssh-keys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script aims to gather all public ssh keys on a server and put them in a directory, with appropriate names 30 | # Usage : gather-public-ssh-keys.sh [Directory] 31 | 32 | if [ "${1}" ] 33 | then 34 | OUTPUT_DIR="${1}" 35 | else 36 | OUTPUT_DIR="./pubkeys" 37 | fi 38 | 39 | mkdir -p ${OUTPUT_DIR} 40 | echo "Writing keys to ${OUTPUT_DIR}" 41 | 42 | HOME_DIR="/home" 43 | CHOWN_USER="root:root" 44 | CHMOD_PERMS="400" 45 | KEYFILES="id_rsa.pub id_dsa.pub identity.pub id_ecdsa.pub authorized_keys" 46 | 47 | for USER in $(/bin/ls -1 ${HOME_DIR}) 48 | do 49 | for KEY in ${KEYFILES} 50 | do 51 | if [ -f "${HOME_DIR}/${USER}/.ssh/${KEY}" ] 52 | then 53 | FILE=${OUTPUT_DIR}/${USER}-${KEY} 54 | echo "${USER} has public keys, copied to ${FILE}" 55 | touch ${FILE} 56 | chown ${CHOWN_USER} ${FILE} 57 | chmod ${CHMOD_PERMS} ${FILE} 58 | cp ${HOME_DIR}/${USER}/.ssh/authorized_keys ${FILE} 59 | else 60 | echo "${USER} has no public keys" 61 | fi 62 | done 63 | done 64 | 65 | -------------------------------------------------------------------------------- /get_server_ssl_certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Get ssl cert from a given IP:PORT 3 | # writes to stdout 4 | # Usage: get_server_ssl_certs.sh server port 5 | # 6 | # Useful if you are using self-signed certs and need a client to trust them 7 | # Instead of finding your sysadmin and getting the public certs 8 | # this just downloads them directly from the server 9 | # Note: This collects PUBLIC certificate information ONLY, you will NOT get the private info 10 | # This means you can use this to configure your client to TRUST a server's certificate 11 | # But NOT to get what is required to use that certificate as a server 12 | # 13 | # Example usage for Rsyslog testing 14 | # mkdir -p /etc/rsyslog.d/certs 15 | # get_server_ssl_certs.sh ssl.example.com:6514 > /etc/rsyslog.d/certs/example-com.pem 16 | # chown -R syslog:syslog /etc/rsyslog.d/certs 17 | 18 | TEMP_FILE=$(mktemp) 19 | if [ ! -w ${TEMP_FILE} ]; then echo "Failed to create temp file, exiting at $(date)"; fi 20 | if [ ! $(which openssl) ]; then echo "Missing openssl, exiting at $(date)"; fi 21 | 22 | HOST=${1} 23 | PORT=${2} 24 | if [ ${1} ] && [ ${2} ] 25 | then 26 | echo "" | openssl s_client -showcerts -connect ${HOST}:${PORT} > ${TEMP_FILE} 27 | cat ${TEMP_FILE} | sed -n '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/p' 28 | else 29 | echo "Usage: ${0} HOST PORT" 30 | exit 1 31 | fi 32 | 33 | -------------------------------------------------------------------------------- /highlight-debs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : given a path, default is current working directory, find all files named .deb and highlight their name only 30 | # Usage : highlight-dbs.sh [PATH] 31 | 32 | if [ "${1}" ] 33 | then 34 | FIND_PATH="${1}" 35 | else 36 | FIND_PATH="$(pwd)" 37 | fi 38 | 39 | find ${FIND_PATH} -iname "*deb" -type f | grep --color -oP '[A-z0-9-_.~]*\.deb' 40 | -------------------------------------------------------------------------------- /linux-network-setup/setup-4g-lte-notes.txt: -------------------------------------------------------------------------------- 1 | https://ubuntu.com/core/docs/networkmanager/configure-cellular-connections 2 | 3 | Basically i8nstall modem manager, this gives you mmcli (modem manager cli) 4 | You should have nmcli (network manager cli) 5 | 6 | # This isn't needed since you can use '*' instead of the specific device 7 | MODEM_PORT=$(mmcli -m 0 |grep 'primary port:' | awk '{ print $4 }'); echo ${MODEM_PORT} 8 | cdc-wdm0 9 | 10 | MODEM_DEV=$(mmcli -m 0 |grep 'device: ' | awk '{ print $4 }'); echo ${MODEM_DEV} 11 | /sys/devices/platform/soc/ffe09000.usb/ff500000.usb/xhci-hcd.2.auto/usb1/1-1/1-1.3 12 | 13 | # Just let network manager guess the only modem installed 14 | # and set it up for Ting as ting0 apn for their newer 5G where APN is wholesale 15 | nmcli c add type gsm ifname '*' con-name ting0 apn wholesale 16 | 17 | # show the network connections 18 | nmcli c 19 | 20 | # Setup wifi 21 | 22 | up [id | uuid | path] ID [ifname ifname] [ap BSSID] [passwd-file file] 23 | Activate a connection. The connection is identified by its name, UUID or D-Bus path. If ID is 24 | ambiguous, a keyword id, uuid or path can be used. When requiring a particular device to 25 | activate the connection on, the ifname option with interface name should be given. If the ID is 26 | not given an ifname is required, and NetworkManager will activate the best available connection 27 | for the given ifname. In case of a VPN connection, the ifname option specifies the device of the 28 | base connection. The ap option specify what particular AP should be used in case of a Wi-Fi 29 | connection. 30 | passwd-file 31 | some networks may require credentials during activation. You can give these credentials 32 | using this option. Each line of the file should contain one password in the form: 33 | 34 | setting_name.property_name:the password 35 | 36 | For example, for WPA Wi-Fi with PSK, the line would be 37 | 38 | 802-11-wireless-security.psk:secret12345 39 | 40 | nmcli device status 41 | shows the status for all devices. 42 | 43 | nmcli dev disconnect em2 44 | disconnects a connection on interface em2 and marks the device as unavailable for 45 | auto-connecting. As a result, no connection will automatically be activated on the device until 46 | the device's 'autoconnect' is set to TRUE or the user manually activates a connection. 47 | 48 | nmcli -f GENERAL,WIFI-PROPERTIES dev show wlan0 49 | shows details for wlan0 interface; only GENERAL and WIFI-PROPERTIES sections will be shown. 50 | 51 | nmcli -f CONNECTIONS device show wlp3s0 52 | shows all available connection profiles for your Wi-Fi interface wlp3s0. 53 | 54 | nmcli dev wifi 55 | lists available Wi-Fi access points known to NetworkManager. 56 | 57 | nmcli dev wifi con "Cafe Hotspot 1" password caffeine name "My cafe" 58 | creates a new connection named "My cafe" and then connects it to "Cafe Hotspot 1" SSID using 59 | password "caffeine". This is mainly useful when connecting to "Cafe Hotspot 1" for the first 60 | time. Next time, it is better to use nmcli con up id "My cafe" so that the existing connection 61 | profile can be used and no additional is created. 62 | 63 | nmcli -s dev wifi hotspot con-name QuickHotspot 64 | creates a hotspot profile and connects it. Prints the hotspot password the user should use to 65 | connect to the hotspot from other devices. 66 | 67 | 68 | # no wrky 69 | # nmcli c add type wifi ifname '*' ap WIFI_SSID 70 | 71 | nmcli c add type wifi ifname '*' ap WIFI_SSID 72 | nmcli dev wifi con "WIFI_SSID" password 'WIFII_PASS' name 'WIFI_SSID' 73 | 74 | # After setting them up, there are files in /etc/NetworkManager/system-connections 75 | # for each config, by name 76 | 77 | WIFI_SSID.nmconnection 78 | ting0.nmconnection 79 | 80 | -------------------------------------------------------------------------------- /loadtest-commands.sh: -------------------------------------------------------------------------------- 1 | # Just some bash snippets for load testing different things 2 | 3 | # x parallel (19k max), x^2 reqs 4 | echo 'S="www.example.com"; export S' | tee -a env.sh 5 | echo 'URI="/"; export URI' | tee -a env.sh 6 | echo 'P="443"; export P' | tee -a env.sh 7 | # URI="/images/test.png" 8 | S="www.example.com"; export S 9 | P="443"; export P 10 | URI="/"; export URI 11 | . ./env.sh; for x in {1000..100000..50}; do z=$((x*x)); date; if [ ${x} -gt 19000 ]; then C=19000; else C=${x}; fi; echo "Starting AB C ${C} N ${z} at $(date)"; ab -c ${C} -n ${z} https://${S}/${URI}; echo "Done at $(date)"; done | tee -a loadtest-${S}-$(date +%F-%s).log 12 | 13 | # x parallel (19k max), x*100 reqs 14 | . ./env.sh; for x in {1000..100000..50}; do z=$((x*100)); date; if [ ${x} -gt 19000 ]; then C=19000; else C=${x}; fi; echo "Starting AB C ${C} N ${z} at $(date)"; ab -c ${C} -n ${z} https://${S}/${URI}; echo "Done at $(date)"; done | tee -a loadtest-${S}-$(date +%F-%s).log 15 | 16 | . ./env.sh; for x in {1..100}; do for y in {1..${x}}; do R=$((${RANDOM} % 109 + 1)); z=$((x*x*${R})); ab -c ${x} -n ${z} https://${S}/${URI}; done; done | tee -a ab-load-random-$(date +%F).log 17 | 18 | ; done | tee -a loadtest-${S}-$(date +%F-%s).log 19 | 20 | # Bash function for getting each AB to start at slightly different times to avoid hitting the same instances 21 | function randsleep() { 22 | if [ -z "${1}" ] 23 | then 24 | range=10 25 | else 26 | range=${1} 27 | fi 28 | R=$((${RANDOM} % ${range} + 1)); 29 | sleep ${R} 30 | } 31 | 32 | # run this in tmux to run the same commands on all panes 33 | set synchronize-panes off 34 | set synchronize-panes on 35 | 36 | # Make networking suck for all ports except SSH 37 | sudo iptables -A INPUT -m statistic --mode random --probability 0.1 -p tcp ! --dport 22 -j DROP 38 | 39 | # Make it suck bad for a specific container listener 40 | sudo iptables -A INPUT -m statistic --mode random --probability 0.8 -p tcp ! --dport ${P} -j DROP 41 | 42 | # Make it suck bad for a specific container listener 43 | 44 | function choose() { 45 | R=$((${RANDOM} % 2)) 46 | } 47 | sudo iptables -A INPUT -m statistic --mode random --probability 0.8 -p tcp ! --dport ${P} -j DROP 48 | 49 | -------------------------------------------------------------------------------- /make-ephemeral-swap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : given a path, default is current working directory, find all files named .deb and highlight their name only 30 | # Usage : make-ephemeral-swap.sh 31 | #DEBUG=1 32 | 33 | # Check options for overriding values 34 | while getopts "d:s:n:" optionName 35 | do 36 | case "$optionName" in 37 | d) BASEDIR="${OPTARG}";; 38 | s) SIZE="${OPTARG}";; 39 | n) NUM="${OPTARG}";; 40 | [?]) PRINT_ERROR_HELP_AND_EXIT;; 41 | esac 42 | done 43 | 44 | if [ ! "${BASEDIR}" ]; then BASEDIR="/mnt/swapfiles"; fi 45 | if [ ! "${NUM}" ]; then NUM=7; fi 46 | if [ ! "${SIZE}" ]; then SIZE=10240000; fi 47 | 48 | if [ ! -d "${BASEDIR}" ] 49 | then 50 | echo "Base dir ${BASEDIR} doesn't exist, creating" 51 | mkdir -p ${BASEDIR} 52 | fi 53 | 54 | MOUNT_POINT=$(df ${BASEDIR} | grep '^\/dev' | awk '{ print $6 }') 55 | FREE_SPACE_MOUNT_POINT=$(df -k | grep ${MOUNT_POINT} | head -n 1 | awk '{ print $4 }') 56 | let TOTAL_SPACE_NEEDED=(${NUM} * ${SIZE}) 57 | echo -n "${FREE_SPACE_MOUNT_POINT} free on ${MOUNT_POINT}" 58 | echo " Need a total of ${TOTAL_SPACE_NEEDED}" 59 | 60 | echo "${TOTAL_SPACE_NEEDED} -lt ${FREE_SPACE_MOUNT_POINT}" 61 | if [ ${TOTAL_SPACE_NEEDED} -lt ${FREE_SPACE_MOUNT_POINT} ] 62 | then 63 | if [ "${DEBUG}" ]; then echo "Have enough free space on ${MOUNT_POINT} for ${TOTAL_SPACE_NEEDED}"; fi 64 | else 65 | echo "Not enough free space on base mount ${MOUNT_POINT}, exiting" 66 | exit 1 67 | fi 68 | 69 | echo "Started at `date`" 70 | 71 | for (( i = 1 ; i <= ${NUM}; i++ )) 72 | do 73 | if [ ! -f ${BASEDIR}/swap-1.img ] 74 | then 75 | echo "Creating initial swapfile swap-${i}.img at `date`" 76 | if [ "${DEBUG}" ] 77 | then 78 | time dd if=/dev/zero of=${BASEDIR}/swap-${i}.img bs=1024 count=${SIZE} 79 | else 80 | dd if=/dev/zero of=${BASEDIR}/swap-${i}.img bs=1024 count=${SIZE} 81 | fi 82 | chmod og-rwx ${BASEDIR}/swap-${i}.img 83 | # echo "time dd if=/dev/zero of=${BASEDIR}/swap-${i}.img bs=1024 count=10240000" 84 | mkswap -f ${BASEDIR}/swap-${i}.img 85 | echo "${BASEDIR}/swap-${i}.img none swap sw 0 0" >> /etc/fstab 86 | if [ "${DEBUG}" ]; then echo "Created ${BASEDIR}/swap-${i}.img"; fi 87 | else 88 | if [ "${DEBUG}" ]; then echo "Initial swapfile exists, not re-creating"; fi 89 | fi 90 | 91 | if [ ! -f ${BASEDIR}/swap-${i}.img ] 92 | then 93 | if [ "${DEBUG}" ]; then echo "Creating swapfile swap-${i}.img at `date`"; fi 94 | cp ${BASEDIR}/swap-1.img ${BASEDIR}/swap-${i}.img 95 | chmod og-rwx ${BASEDIR}/swap-${i}.img 96 | mkswap -f ${BASEDIR}/swap-${i}.img 97 | echo "${BASEDIR}/swap-${i}.img none swap sw 0 0" >> /etc/fstab 98 | if [ "${DEBUG}" ]; then echo "Created ${BASEDIR}/swap-${i}.img"; fi 99 | else 100 | if [ "${DEBUG}" ]; then echo "Swapfile ${BASEDIR}/swap-${i}.img already exists, skipping!"; fi 101 | fi 102 | done 103 | 104 | if [ "${DEBUG}" ]; then echo "Done creating swapfiles at `date`, swapping on"; fi 105 | swapon -a 106 | if [ "${DEBUG}" ]; then echo "Done swapping on `date`, swapfile status :"; swapon -s; fi 107 | -------------------------------------------------------------------------------- /make-s3-psurl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # https://aws.amazon.com/premiumsupport/knowledge-center/presigned-url-s3-bucket-expiration/ 3 | # Copy their example code, slap an argparse and main on it 4 | # ship it! 5 | # 6 | # Usage: 7 | # make-s3-psurl.py -b my-bucket-name -k /path/to/key/in/bucket.zip 8 | # make-s3-psurl.py -bucket my-bucket-name -key /path/to/key/in/bucket.zip -expiry 86400 9 | # 10 | # Prints the pre-signed URL 11 | # Requires your AWS environment is setup for authentication: 12 | # pip3 install awscli 13 | # aws configure 14 | # (enter your AWS credentials, follow prompts) 15 | 16 | import boto 17 | import argparse 18 | from botocore.client import Config 19 | 20 | 21 | def presign_url(bucket, key, expiry, profile, region): 22 | # Get the service client with sigv4 configured 23 | #session = boto.Session(profile_name=profile_name) 24 | session = boto.Session(region_name=region, profile_name=profile) 25 | s3 = session.client('s3', config=Config(signature_version='s3v4'), region_name=region) 26 | # s3 = session.client('s3', config=Config()) 27 | #s3 = boto.client('s3', config=Config(signature_version='s3v4')) 28 | # Generate the URL to get 'key-name' from 'bucket-name' 29 | # URL expires in expiry seconds (default 86400 seconds or 1 day) 30 | url = s3.generate_presigned_url( 31 | ClientMethod='get_object', 32 | Params={ 33 | 'Bucket': bucket, 34 | 'Key': key 35 | }, 36 | ExpiresIn=expiry, 37 | ) 38 | return url 39 | 40 | 41 | if __name__ == '__main__': 42 | parser = argparse.ArgumentParser(description='Make a S3 presigned URL for key in bucket.') 43 | parser.add_argument('-b', '-bucket', dest='s3_bucket', type=str, help='Bucket the key is in', required=True) 44 | parser.add_argument('-k', '-key', dest='s3_key', type=str, help='Key to create PSU for', required=True) 45 | parser.add_argument('-t', '-expiry', dest='s3_expiry', type=str, help='Key to create PSU for', required=False, default=86400) 46 | parser.add_argument('-p', '-profile', dest='profile', type=str, help='AWS CLI profile to use', required=False, default="default") 47 | parser.add_argument('-r', '-region', dest='region', type=str, help='AWS Region to use', required=False, default="us-east-1") 48 | args = parser.parse_args() 49 | s3_url = presign_url(args.s3_bucket, args.s3_key, args.s3_expiry, args.profile, args.region) 50 | print(s3_url) 51 | -------------------------------------------------------------------------------- /nuke-instances-utterly.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script aims to gather all public ssh keys on a server and put them in a directory, with appropriate names 30 | # Usage : gather-public-ssh-keys.sh [Directory] 31 | 32 | 33 | if [ -f "${1}" ] 34 | then 35 | echo "ARE YOU SURE YOU WANT TO NUKE the following instances in file ${1} at `date`" 36 | cat ${1} 37 | read -p "Do you want to proceed? --[Will cancel automatically in 15 seconds]- (yes/no)? " -t 15 ANSWER 38 | 39 | if [ "$?" != "0" ] 40 | then 41 | echo 42 | echo "You didn't answer in 15 seconds, cancelling" 43 | ANSWER="no" 44 | fi 45 | 46 | if [ "${ANSWER}" == "no" ] || [ "${ANSWER}" == "n" ] || [ "${ANSWER}" == "NO" ] || [ "${ANSWER}" == "N" ] 47 | then 48 | echo "NO: Exiting." 49 | exit 0 50 | elif [ "${ANSWER}" == "yes" ] || [ "${ANSWER}" == "y" ] || [ "${ANSWER}" == "YES" ] || [ "${ANSWER}" == "Y" ] 51 | then 52 | echo "YES: Proceeding." 53 | else 54 | echo "Did not understand your choice, please choose again (yes or no)" 55 | confirm 56 | fi 57 | 58 | sleep 5 59 | for I in $(cat ${1}) 60 | do 61 | #TO DO, add checking to see if string looks like an instance ID and that the instance is available 62 | echo "${I}" 63 | ec2-modify-instance-attribute --disable-api-termination false ${I} 64 | ec2-stop-instances -f ${I} 65 | while ! ec2-describe-instances ${I} | grep -q stopped; do echo -n "."; sleep 1; done 66 | for V in $(ec2-describe-instances ${I} | grep '^BLOCKDEVICE' | awk '{ print $3 }') 67 | do 68 | echo "nuking volume ${V}" 69 | echo -n "Detaching." 70 | while ec2-detach-volume ${V} 2>&1 | grep -q detached; do echo -n "."; sleep 1; done 71 | echo -n "Deleting." 72 | while ec2-delete-volume ${V} 2>&1 | grep -q 'does not exist'; do echo -n "."; sleep 1; done 73 | done 74 | echo "disabling termination protection on ${I}" 75 | ec2-modify-instance-attribute --disable-api-termination false ${I} 76 | sleep 5 77 | echo "terminating instance ${I}" 78 | ec2-terminate-instances ${I} 79 | done 80 | else 81 | echo "Usage : ${0} - where filename has instances one per line that will be nuked" 82 | fi 83 | -------------------------------------------------------------------------------- /page.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #Author : jon@jonzobrist.com 3 | #License : BSD/public/freeware 4 | 5 | import smtplib 6 | import sys 7 | 8 | def prompt(prompt): 9 | return raw_input(prompt).strip() 10 | 11 | fromaddr = "noreply@example.com" 12 | #toaddrs = ['userA@example.com','userB@example.com','Phone1@txt.att.net','Phone2@txt.att.net','userC@example.com'] 13 | toaddrs = ['userA@example.com'] 14 | subject = "[ALERT] Alert from localhost" 15 | 16 | 17 | msg = ("From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n" 18 | % (fromaddr,toaddrs,subject)) 19 | msg = msg + sys.argv[1] 20 | server = smtplib.SMTP('server.ip.or.hostname') 21 | #server.set_debuglevel(1) 22 | server.sendmail(fromaddr, toaddrs, msg) 23 | server.quit() 24 | 25 | -------------------------------------------------------------------------------- /pihole/env.sh: -------------------------------------------------------------------------------- 1 | # This should be sourced before running docker pihole commands 2 | 3 | declare -x WEB_PASS="UseARealPassword${RANDOM}" 4 | 5 | declare -x PI_BASE="${HOME}/pihole" 6 | declare -x PI_LOG="${PI_BASE}/var-log-pihole:/var/log" 7 | declare -x PI_ETC="${PI_BASE}/etc-pihole/:/etc/pihole" 8 | declare -x PI_DNSM="${PI_BASE}/etc-dnsmasq.d:/etc/dnsmasq.d" 9 | declare -x PI_LIGHTTPD="${PI_BASE}/etc-lighttpd:/etc/lighttpd" 10 | 11 | declare -x VIRTUAL_HOST="pihole.local" 12 | declare -x SERVER_PORT="8080" 13 | declare -x M_DNS="127.0.0.1" 14 | declare -x N_DNS="1.1.1.1" 15 | declare -x TZ="US/Pacific" 16 | declare -x IP_LOOKUP="$(ip route get 8.8.8.8 | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')" 17 | declare -x IP="${IP_LOOKUP}" 18 | 19 | # declare -x IP="192.168.1.254" #Or set it statically 20 | # declare -x IPV4_ADDRESS="192.168.1.254" #Override IPv4 manually 21 | # declare -x IPV6_ADDRESS="::" #Override IPv6 manually 22 | 23 | declare -x NET_TYPE="bridge" # can change to host if you want to use pihole DHCP 24 | -------------------------------------------------------------------------------- /pihole/setup-pihole.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Pulls latest pihole scripts and starts 3 | # Requires you have docker running already, and you're using a linux host 4 | 5 | # Get the env.sh, and source it 6 | TF=$(mktemp) 7 | curl -o ${TF} https://raw.githubusercontent.com/jonzobrist/Bash-Admin-Scripts/master/pihole/env.sh 8 | perl -pi -e "s/pihole.local/$(hostname)/g" ${TF} 9 | . ${TF} 10 | mkdir -p ${PI_BASE} 11 | pushd ${PI_BASE} 12 | mv ${TF} ./env.sh 13 | 14 | curl -O https://raw.githubusercontent.com/jonzobrist/Bash-Admin-Scripts/master/pihole/update-pihole.sh 15 | chmod uog+x update-pihole.sh 16 | 17 | # This takes the output of your local hostname command and replaces all pihole.local in the env.sh 18 | ./update-pihole.sh 19 | 20 | popd 21 | 22 | -------------------------------------------------------------------------------- /pihole/update-pihole.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script updates a running pihole, but it can also start a new one 3 | # Requires Docker to be installed (see install-docker.sh) and the current user to have permissions 4 | # Also needs to listen on the port you configure, and the DNS ports (53 on TCP & UDP) 5 | # Disable or stop any other DNS servers or caches before running / if it doens't work right 6 | # Configure in env.sh or in this file 7 | 8 | DEBUG () { 9 | if [ "${DEBUG}" ] 10 | then 11 | echo "${1}" 12 | fi 13 | } 14 | 15 | if [ -f "env.sh" ] 16 | then 17 | # shellcheck source=/dev/null 18 | source env.sh 19 | else 20 | WEB_PASS="tempRandom${RANDOM}" 21 | export WEB_PASS 22 | echo "Using random web password ${WEB_PASS}" 23 | PI_BASE="${PI_BASE:-$PWD}" 24 | export PI_BASE 25 | PI_LOG="${PI_BASE}/var-log-pihole:/var/log" 26 | export PI_LOG 27 | PI_ETC="${PI_BASE}/etc-pihole/:/etc/pihole" 28 | export PI_ETC 29 | PI_DNSM="${PI_BASE}/etc-dnsmasq.d:/etc/dnsmasq.d" 30 | export PI_DNSM 31 | PI_LIGHTTPD="${PI_BASE}/etc-lighttpd:/etc/lighttpd" 32 | export PI_LIGHTTPD 33 | IP_LOOKUP="$(ip route get 8.8.8.8 | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')" 34 | export IP_LOOKUP 35 | IP="${IP:-$IP_LOOKUP}" 36 | export IP 37 | VIRTUAL_HOST="${VIRTUAL_HOST:-pihole.local}" 38 | export VIRTUAL_HOST 39 | SERVER_PORT="${SERVER_PORT:-8080}" 40 | export SERVER_PORT 41 | M_DNS="${M_DNS:-127.0.0.1}" 42 | export M_DNS 43 | N_DNS="${N_DNS:-1.1.1.1}" 44 | export N_DNS 45 | TZ="${TZ:-US/Pacific}" 46 | export TZ 47 | NET_TYPE="${NET_TYPE:-bridge}" # can change to host if you want to use pihole DHCP 48 | export NET_TYPE 49 | fi 50 | 51 | 52 | FORCE_UPDATE=${FORCE_UPDATE:-0} 53 | export FORCE_UPDATE 54 | UPDATE=0 55 | export UPDATE 56 | 57 | if [ -z "${IP}" ] || [ -z "${IP_LOOKUP}" ] || [ -z "${N_DNS}" ] || [ -z "${PI_BASE}" ] || [ -z "${PI_DNSM}" ] || [ -z "${PI_ETC}" ] || [ -z "${PI_LIGHTTPD}" ] || [ -z "${PI_LOG}" ] || [ -z "${SERVER_PORT}" ] || [ -z "${TZ}" ] || [ -z "${VIRTUAL_HOST}" ] || [ -z "${WEB_PASS}" ] || [ -z "${NET_TYPE}" ] 58 | 59 | then 60 | echo "Exiting, not all variables are set, check env.sh" 61 | DEBUG "Var IP = ${IP}" 62 | DEBUG "Var IP_LOOKUP = ${IP_LOOKUP}" 63 | DEBUG "Var N_DNS = ${N_DNS}" 64 | DEBUG "Var PI_BASE = ${PI_BASE}" 65 | DEBUG "Var PI_DNSM = ${PI_DNSM}" 66 | DEBUG "Var PI_ETC = ${PI_ETC}" 67 | DEBUG "Var PI_LIGHTTPD = ${PI_LIGHTTPD}" 68 | DEBUG "Var PI_LOG = ${PI_LOG}" 69 | DEBUG "Var SERVER_PORT = ${SERVER_PORT}" 70 | DEBUG "Var TZ = ${TZ}" 71 | DEBUG "Var VIRTUAL_HOST = ${VIRTUAL_HOST}" 72 | DEBUG "Var WEB_PASS = ${WEB_PASS}" 73 | DEBUG "Var NET_TYPE = ${NET_TYPE}" 74 | exit 1 75 | else 76 | UPDATE_OUTPUT=$(docker pull pihole/pihole) 77 | RETCODE=$? 78 | DEBUG "UPDATE_OUTPUT:" 79 | DEBUG "${UPDATE_OUTPUT}" 80 | NO_UPDATE=$(echo "${UPDATE_OUTPUT}" | grep 'Image is up to date') 81 | DEBUG "Var NO_UPDATE = ${NO_UPDATE}" 82 | if [ ${RETCODE} -eq 0 ] && [ -z "${NO_UPDATE}" ] 83 | then 84 | UPDATE=1 85 | export UPDATE 86 | fi 87 | if [ "${FORCE_UPDATE}" -eq 1 ] 88 | then 89 | UPDATE=1 90 | export UPDATE 91 | fi 92 | if [ ${UPDATE} -eq 1 ] 93 | then 94 | DEBUG "Wipping pihole: docker rm -f pihole" 95 | # Note that this is a docker rm -f, not a filesystem rm 96 | docker rm -f pihole 97 | DEBUG "Running Pihole in Docker:" 98 | DEBUG "docker run -d --name pihole --hostname pihole --net=${NET_TYPE} -e TZ=\"${TZ}\" -e WEBPASSWORD=\"${WEB_PASS}\" -e IPV4_ADDRESS=\"${IP}\" -e VIRTUAL_HOST=\"${VIRTUAL_HOST}\" -e VIRTUAL_PORT=${SERVER_PORT} -v ${PI_ETC} -v ${PI_DNSM} -p 53:53/tcp -p 53:53/udp -p ${SERVER_PORT}:80 --dns=${M_DNS} --dns=${N_DNS} --restart=unless-stopped --cap-add=NET_ADMIN pihole/pihole:latest" 99 | docker run -d --name pihole --hostname pihole --net="${NET_TYPE}" -e TZ="${TZ}" -e WEBPASSWORD="${WEB_PASS}" -e IPV4_ADDRESS="${IP}" -e VIRTUAL_HOST="${VIRTUAL_HOST}" -e VIRTUAL_PORT="${SERVER_PORT}" -p 53:53/tcp -p 53:53/udp -p "${SERVER_PORT}:80" -v "${PI_ETC}" -v "${PI_DNSM}" --dns="${M_DNS}" --dns="${N_DNS}" --restart=unless-stopped --cap-add=NET_ADMIN pihole/pihole:latest 100 | 101 | else 102 | echo "Docker images not updated, skipping restart at $(date)" 103 | fi 104 | fi 105 | 106 | -------------------------------------------------------------------------------- /reset-ssh-keys-list-of-hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2013, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script takes a list of hostnames in a file 30 | # and deletes the local users key for those hosts and adds a new one from the server. 31 | # 32 | # WARNING : from man ssh-keyscan 33 | # If an ssh_known_hosts file is constructed using ssh-keyscan without verifying the keys, users will be 34 | # vulnerable to man in the middle attacks. On the other hand, if the security model allows such a risk, 35 | # ssh-keyscan can help in the detection of tampered keyfiles or man in the middle attacks which have 36 | # begun after the ssh_known_hosts file was created. 37 | # 38 | # Usage : reset-ssh-keys-list-of-hosts.sh HOST_FILE 39 | 40 | if [ ! "${1}" ] 41 | then 42 | echo "Usage : ${0} HOST_FILE" 43 | exit 1 44 | fi 45 | 46 | HOST_FILE="${1}" 47 | if [ ! -f "${HOST_FILE}" ] 48 | then 49 | echo "${HOST_FILE} does not exist, exiting at $(date)" 50 | exit 1 51 | fi 52 | 53 | KNOWN_HOSTS_FILE="${HOME}/.ssh/known_hosts" 54 | 55 | if [ ! -f "${KNOWN_HOSTS_FILE}" ] 56 | then 57 | touch ${KNOWN_HOSTS_FILE} 58 | chmod og-rwx ${KNOWN_HOSTS_FILE} 59 | fi 60 | 61 | for S in $(cat ${HOST_FILE} | sort | uniq) 62 | do 63 | echo -n "${S} " 64 | ssh-keygen -R ${S}-${HS} 2>/dev/null 65 | echo "" 66 | done 67 | 68 | ssh-keyscan -f ${HOST_FILE} -H -t rsa 2>/dev/null >> ${KNOWN_HOSTS_FILE} 69 | -------------------------------------------------------------------------------- /reset-ssh-keys-prefix-suffix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2013, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script takes a list of prefixes, and suffixes, creates a list of hostnames 30 | # and deletes the local users key for those hosts and adds a new one. 31 | # I created this for use on Amazon Web Services EC2 since we are re-using similar name aliases 32 | # and need an easy way to update the ssh keys. 33 | # 34 | # WARNING : from man ssh-keyscan 35 | # If an ssh_known_hosts file is constructed using ssh-keyscan without verifying the keys, users will be 36 | # vulnerable to man in the middle attacks. On the other hand, if the security model allows such a risk, 37 | # ssh-keyscan can help in the detection of tampered keyfiles or man in the middle attacks which have 38 | # begun after the ssh_known_hosts file was created. 39 | # 40 | # Usage : reset-ssh-keys-prefix-suffix.sh 41 | 42 | HOST_PREFIXES="qa dev web prod" 43 | HOST_SUFFIXES="webserver.example.com dbserver.example.com" 44 | KNOWN_HOSTS_FILE="${HOME}/.ssh/known_hosts" 45 | if [ ! -f "${KNOWN_HOSTS_FILE}" ] 46 | then 47 | touch ${KNOWN_HOSTS_FILE} 48 | chmod og-rwx ${KNOWN_HOSTS_FILE} 49 | fi 50 | 51 | for S in ${HOST_PREFIXES} 52 | do 53 | echo -n "${S} " 54 | for HS in ${HOST_SUFFIXES} 55 | do 56 | echo -n "${S}-${HS} " 57 | ssh-keygen -R ${S}-${HS} 2>/dev/null 58 | ssh-keyscan -H -t rsa ${S}-${HS} 2>/dev/null >> ${KNOWN_HOSTS_FILE} 59 | done 60 | echo "" 61 | done 62 | 63 | -------------------------------------------------------------------------------- /retry: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | i=1 3 | 4 | while true 5 | do 6 | echo "trying $@ at `date` [attempt: ${i}]" 7 | $@ 8 | let "sleep_time = ${i} * ${i}" 9 | echo "sleeping ${sleep_time}" 10 | sleep ${sleep_time} 11 | ((i++)) 12 | done 13 | -------------------------------------------------------------------------------- /s3-du.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script uses the s3ls command to list all files in a bucket, root dir 30 | # Usage : gather-public-ssh-keys.sh [Directory] 31 | 32 | if [ "${1}" ] 33 | then 34 | NUM=0 35 | COUNT=0 36 | for N in `s3ls ${1} | awk '{print $11}' | grep [0-9]` 37 | do 38 | NUM=`expr $NUM + $N` 39 | ((COUNT++)) 40 | done 41 | KB=`expr ${NUM} / 1024` 42 | MB=`expr ${NUM} / 1048576` 43 | GB=`expr ${NUM} / 1073741824` 44 | echo "${COUNT} files in bucket ${1}" 45 | echo "${NUM} B" 46 | echo "${KB} KB" 47 | echo "${MB} MB" 48 | echo "${GB} GB" 49 | else 50 | echo "Usage : ${0} s3-bucket" 51 | exit 1 52 | fi 53 | 54 | -------------------------------------------------------------------------------- /set-sysctl-challenge-ack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2016, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : Script to set net.ipv4.tcp_challenge_ack_limit to something random 30 | # This helps mitigate CVE-2016-5696 31 | # THIS IS NOT A FIX FOR CVE-2016-5696 32 | # https://www.usenix.org/system/files/conference/usenixsecurity16/sec16_paper_cao.pdf 33 | 34 | 35 | unset TCAL 36 | TCAL=$(grep net.ipv4.tcp_challenge_ack_limit /etc/sysctl.conf) 37 | if [ ! "${TCAL}" ] 38 | then 39 | if [[ $EUID -ne 0 ]] 40 | then 41 | echo "Setting does not exist in /etc/sysctl.conf, re-run as root to set" 42 | exit 0 43 | fi 44 | echo "Setting does not exist in /etc/sysctl.conf, adding:" 45 | LIMIT=$(echo "999999999 - ${RANDOM} * ${RANDOM}" | bc) 46 | echo "net.ipv4.tcp_challenge_ack_limit = ${LIMIT}" >> /etc/sysctl.conf 47 | sudo sysctl -p 48 | else echo "Setting exists in /etc/sysctl.conf" 49 | grep -n net.ipv4.tcp_challenge_ack_limit /etc/sysctl.conf 50 | fi 51 | 52 | -------------------------------------------------------------------------------- /setup-ec2-raid-0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################## 4 | # 5 | # Program Information 6 | # Name : 7 | # Author : Jon Zobrist 8 | # Copyright : Inthinc Technology Solutions, Inc. 2011 9 | # License : GPL 2.0 or higher 10 | # Purpose : Create EBS Volumes, attach and set them up as a RAID array on an EC2 instance 11 | # 12 | ############################################################## 13 | 14 | ############################################################## 15 | # Configuration section : 16 | # 17 | SSH_CMD="/usr/bin/ssh -o StrictHostKeyChecking=no" #The path and options for your ssh program 18 | SSH_USER="ubuntu" #The remote user who can sudo and run commands 19 | #EBS_IOPS=1200 20 | # 21 | #The following variables should be set correctly in $ENV 22 | # EC2_REGION 23 | # EC2_ZONE 24 | # EC2_KEYS 25 | # EC2_CERT 26 | # EC2_PRIVATE_KEY 27 | # In addition to having your environment setup, you should have the ec2-tools installed and in your $PATH 28 | # Download and install EC2 API Tools from 29 | # http://aws.amazon.com/developertools/351 30 | # 31 | # 32 | #These are generally passed as args, but you can force set them here 33 | # MOUNT_BASE="/usr/local/mysql" 34 | # RAID_DEVICE="/dev/md/0" 35 | # EBS_VOLUME_SIZE="250" 36 | # 37 | ############################################################## 38 | 39 | 40 | ############################################################## 41 | # Functions 42 | 43 | function check_instance { 44 | echo "Verifying instance is running" 45 | while ! ec2-describe-instances ${INSTANCE_ID} | grep -q running; do echo -n "."; sleep 1; done 46 | echo "Instance running" 47 | INSTANCE_ADDRESS=$(ec2-describe-instances ${INSTANCE_ID} | egrep ^INSTANCE | awk '{ print $4 }') 48 | if [ ! -n "${INSTANCE_ADDRESS}" ] 49 | then 50 | echo "Missing instance address for instance ID ${INSTANCE_ID}, got ${INSTANCE_ADDRESS}" 51 | exit 1 52 | fi 53 | 54 | echo "Testing connectivity via SSH" 55 | while ! ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "/bin/ls /" | grep -q "root"; do echo "."; sleep 1; done 56 | 57 | for (( x = 0 ; x <= 30 ; x++ )) 58 | do 59 | MY_RAID_LIST=$( ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "/bin/ls /dev/md${x}* 2>/dev/null " ) 60 | if [ ! -n "${MY_RAID_LIST}" ] 61 | then 62 | RAID_DEVICE="/dev/md/${x}" 63 | echo "Using ${RAID_DEVICE}" 64 | break 65 | fi 66 | ## echo "not using ${DISK}" 67 | echo -n "." 68 | done 69 | 70 | 71 | #Check mount point doesn't exist and create it 72 | MY_MOUNT_BASE=$( ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "if [ -d ${MOUNT_BASE} ]; then echo "Directory exists"; else sudo mkdir -p ${MOUNT_BASE}; fi" ) 73 | if [ -n "${MY_MOUNT_BASE}" ] 74 | then 75 | echo "Mount base ${MOUNT_BASE} exists on ${INSTANCE_ADDRESS}, exiting at `date`" 76 | echo "Returned was ${MY_MOUNT_BASE}" 77 | exit 1 78 | fi 79 | 80 | #Make sure mdadm is installed! if not offer to install it? 81 | echo "Checking mdadm.." 82 | MDADM_CMD=$(${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "which mdadm") 83 | if [ -z "${MDADM_CMD}" ] 84 | then 85 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo apt-get -y install mdadm" 86 | else 87 | echo "mdadm exists as ${MDADM_CMD}" 88 | fi 89 | 90 | #Make sure xfs is installed 91 | MKXFS_CMD=$(${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "which mkfs.xfs") 92 | if [ -z "${MKXFS_CMD}" ] 93 | then 94 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo apt-get -y install xfsprogs" 95 | else 96 | echo "mkfs.xfs exists as ${MKXFS_CMD}" 97 | fi 98 | } 99 | 100 | function setup_raid { 101 | #Create EBS Volumes 102 | echo "entered setup_raid" 103 | RAID_DEVICES="" 104 | DRIVES=15 105 | MAX_DRIVES=14 106 | for (( i = 1 ; i <= ${EBS_VOLUMES} ; i++ )) 107 | do 108 | if [ ${DRIVES} -gt ${MAX_DRIVES} ] 109 | then 110 | DRIVES=1 111 | #Connect and check for available disk locations /dev/sdX 112 | echo "Checking for available disks" 113 | #USED_DISKS=$(${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} '/bin/ls -1 /dev/sd* | sort | tail -n 1') 114 | CHECK_DISKS="/dev/sdg /dev/sdh /dev/sdi /dev/sdj /dev/sdk /dev/sdl /dev/sdm /dev/sdn /dev/sdo /dev/sdp /dev/sdq /dev/sdr /dev/sds /dev/sdt" 115 | for DISK in ${CHECK_DISKS} 116 | do 117 | MY_DISK_LIST=$( ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "/bin/ls ${DISK}* 2>/dev/null " ) 118 | if [ ! -n "${MY_DISK_LIST}" ] 119 | then 120 | DEVICE_BASE="${DISK}" 121 | echo "Using ${DEVICE_BASE}" 122 | break 123 | fi 124 | ## echo "not using ${DISK}" 125 | echo -n "." 126 | done 127 | fi 128 | DISK_DEVICE="${DEVICE_BASE}${DRIVES}" 129 | ((DRIVES++)) 130 | if $( ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "/bin/ls ${DISK_DEVICE}" ) 131 | then 132 | echo "Volume ${DISK_DEVICE} exists, exiting at `date`" 133 | exit 1 134 | fi 135 | RAID_DEVICES="${RAID_DEVICES} ${DISK_DEVICE}" 136 | if [ "${EBS_IOPS}" ] 137 | then 138 | echo "Creating volume for ${INSTANCE_ID} as ${DISK_DEVICE} at `date` with ${EBS_IOPS} IOPS" 139 | EBS_VOLUME=$(ec2-create-volume -z ${EC2_ZONE} --region ${EC2_REGION} -t io1 -i ${EBS_IOPS} --size ${EBS_VOLUME_SIZE} | cut -f2) 140 | else 141 | echo "Creating volume for ${INSTANCE_ID} as ${DISK_DEVICE} at `date`" 142 | EBS_VOLUME=$(ec2-create-volume -z ${EC2_ZONE} --region ${EC2_REGION} --size ${EBS_VOLUME_SIZE} | cut -f2) 143 | fi 144 | echo "DEBUG: echo \"Attaching volume ${EBS_VOLUME} to ${INSTANCE_ID} as ${DISK_DEVICE} at `date`\"" 145 | #Attach volumes to instance 146 | ec2-attach-volume ${EBS_VOLUME} -i ${INSTANCE_ID} -d ${DISK_DEVICE} 147 | echo "DEBUG: ec2-attach-volume ${EBS_VOLUME} -i ${INSTANCE_ID} -d ${DISK_DEVICE}" 148 | echo "Waiting for volume to attach at `date`" 149 | while ! ec2-describe-volumes ${EBS_VOLUME} | grep -q attached; do sleep 1; done 150 | done 151 | sleep 60 152 | echo "Create and start RAID device" 153 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo mdadm --create ${RAID_DEVICE} --level 0 --chunk 256 --metadata 1.1 --raid-devices ${EBS_VOLUMES} ${RAID_DEVICES}" 154 | 155 | echo "Format RAID device" 156 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo mkfs.xfs -q ${RAID_DEVICE}" 157 | 158 | echo "Configure /etc/fstab and /etc/mdadm" 159 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo mkdir -p /etc/mdadm" 160 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo echo DEVICE partitions | sudo tee /etc/mdadm/mdadm.conf" 161 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo echo MAILADDR root@inthinc.com | sudo tee -a /etc/mdadm/mdadm.conf" 162 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo mdadm --examine --scan | sudo tee -a /etc/mdadm/mdadm.conf" 163 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo echo ${RAID_DEVICE} ${MOUNT_BASE} xfs noauto,noatime,logbsize=256k,nobarrier 0 0 | sudo tee -a /etc/fstab" 164 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo blockdev --setra 65536 ${RAID_DEVICE}" 165 | #Mount RAID device 166 | ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo mount ${MOUNT_BASE}" 167 | #Zero out disks - uncomment out these 2 lines to force zero'ing of all disks created 168 | #Note : This will take a very very very much longer time 169 | # It's safer to zero out your disks after you have the RAID done, locally 170 | # ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo time dd if=/dev/zero of=${MOUNT_BASE}/cleanup.img" 171 | # ${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS} "sudo time /bin/rm ${MOUNT_BASE}/cleanup.img" 172 | 173 | echo "connect with :" 174 | echo "${SSH_CMD} -i ${EC2_KEYS} ${SSH_USER}@${INSTANCE_ADDRESS}" 175 | 176 | } 177 | 178 | ############################################################## 179 | 180 | ############################################################## 181 | # Startup checks 182 | 183 | if ( [ -n "${EC2_REGION}" ] && [ -n "${EC2_ZONE}" ] && [ -n "${EC2_KEYS}" ] && [ -n "${1}" ] && [ -n "${2}" ] && [ -n "${3}" ] && [ -n "${4}" ] ) 184 | then 185 | if [ ${2} -gt 15 ] 186 | then 187 | echo "Max Volumes per node is 14, ${2} is more, RAID will span devices" 188 | fi 189 | echo "Proceeding with :" 190 | echo "EBS volume size=${1}" 191 | EBS_VOLUME_SIZE="${1}" 192 | echo "Number EBS volumes=${2}" 193 | EBS_VOLUMES="${2}" 194 | echo "EC2 instance ID=${3}" 195 | INSTANCE_ID="${3}" 196 | echo "Mount point=${4}" 197 | MOUNT_BASE="${4}" 198 | if [ "${5}" ] 199 | then 200 | EBS_IOPS="${5}" 201 | fi 202 | if [ "${EBS_IOPS}" ] 203 | then 204 | echo "Using dedicated IOPS, there will be ${EBS_IOPS} IOPS _per_ device" 205 | else 206 | echo "Not using dedicated IOPS" 207 | fi 208 | 209 | check_instance 210 | setup_raid 211 | else 212 | echo "Usage ${0} [IOPS]" 213 | exit 1 214 | fi 215 | 216 | 217 | -------------------------------------------------------------------------------- /setup-new-zsh.sh: -------------------------------------------------------------------------------- 1 | if [ "${SHELL##*/}" = "zsh" ]; then ZSHRC="https://raw.githubusercontent.com/jonzobrist/Bash-Admin-Scripts/master/.zshrc"; pushd ${HOME}; curl -q -o ${HOME}/.zshrc ${ZSHRC}; git clone --recursive https://github.com/sorin-ionescu/prezto.git "${ZDOTDIR:-$HOME}/.zprezto"; setopt EXTENDED_GLOB; for rcfile in "${ZDOTDIR:-$HOME}"/.zprezto/runcoms/^README.md(.N); do ln -s "$rcfile" "${ZDOTDIR:-$HOME}/.${rcfile:t}"; done; popd; else echo "Need to be using ZSH"; fi; chsh -s $(which zsh) 2 | -------------------------------------------------------------------------------- /sshsetup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | ############### 3 | ## written by: Jon Zobrist 4 | ## updated on 6.18.03 5 | ############### 6 | # usage: run sshsetup.sh when you login, follow the instructions i gives 7 | # this will start ssh-agent, and copy your current agent info to a file ($myfile) 8 | # i put my myfile in ~/.ssh/myagent 9 | # then put a line in your startup script (.bashrc or .bash_profile) 10 | # like "source ~/.ssh/myagent" 11 | # now every time you login it will connect you to your current ssh-agent 12 | # I recommend highly using a password on your private key file 13 | ############### 14 | # 15 | # here's a quick how to run down of how to use ssh-agent and public/private keys 16 | # for ssh 17 | # first create your private/public keys, run "ssh-keygen -t dsa" as the user you 18 | # want to create keys for 19 | # store them in your home directory under .ssh 20 | # NEVER EVER EVER EVER give your private key (id_dsa) to anyone ever 21 | # ALWAYS ALWAYS ALWAYS use a password for any key that will connect you to a 22 | # box with root access, it's a good idea anyways 23 | # Now, ssh into the box you want to setup passwordless ssh on 24 | # edit ~/.ssh/authorized_keys 25 | # copy and paste your public key into that file (hint: your public key is 26 | # in ~/.ssh/id_dsa.pub if you followed ssh-keygen) 27 | # make sure it's all on one line, and no new spaces/changes 28 | # if it's the only key to be used you can just copy id_dsa.pub on your 29 | # box to authorized_keys on the server box 30 | # The one thing you may need to change is the hostname in the key, 31 | # especially if you're behind nat. This will need to be the IP/hostname of 32 | # the box that the server will think you're conneting from. 33 | # if you go through a NAT box it will be the external IP of the NAT box 34 | # if you're a real host on the internet with working reverse dns then 35 | # you're all set 36 | # now you may need to edit the last 37 | # 38 | # Author : Jon Zobrist 39 | # Homepage : http://www.jonzobrist.com 40 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 41 | # Copyright (c) 2013, Jon Zobrist 42 | # All rights reserved. 43 | # 44 | # Redistribution and use in source and binary forms, with or without 45 | # modification, are permitted provided that the following conditions are met: 46 | # 47 | # 1. Redistributions of source code must retain the above copyright notice, this 48 | # list of conditions and the following disclaimer. 49 | # 2. Redistributions in binary form must reproduce the above copyright notice, 50 | # this list of conditions and the following disclaimer in the documentation 51 | # and/or other materials provided with the distribution. 52 | # 53 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 54 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 55 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 56 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 57 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 59 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 60 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 62 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 | # 64 | # Purpose : This script uses the s3ls command to list all files in a bucket, root dir 65 | # Usage : gather-public-ssh-keys.sh [Directory] 66 | 67 | 68 | @myagent = `ssh-agent`; 69 | $myfile = "$ENV{'HOME'}/.ssh/myagent"; 70 | $chmod_cmd = "/bin/chmod og-rwx"; 71 | $debug = 0; 72 | 73 | open (MYFILE,">$myfile") || die "could not ompen myfile $myfile: $!\n"; 74 | 75 | print "ssh-agent started\nCopy and paste these lines into your terminal to run them:\n\n"; 76 | 77 | foreach $myline (@myagent) { 78 | if ($myline =~ m/SSH_/) { 79 | print "$myline"; 80 | print MYFILE "$myline"; 81 | $hit = 1; 82 | } 83 | else { 84 | if ($debug ) { print "BIG FAST MISS ON $myline\n"; } 85 | } 86 | } 87 | 88 | if (!$hit) { 89 | print "\n\n\#\#\#\#\#\#\#\nsomething bad happened\n\#\#\#\#\#\#\#\n"; 90 | } 91 | else { 92 | print "\nthen run ssh-add to add your private key\n"; 93 | } 94 | $my_chmod_cmd = "$chmod_cmd $myfile"; 95 | $result = system($my_chmod_cmd); 96 | -------------------------------------------------------------------------------- /sysctl-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4: 3 | # 4 | # Author : Nicolas Brousse 5 | # From : https://www.shell-tips.com/2010/09/13/linux-sysctl-configuration-and-tuning-script/ 6 | # 7 | # Added kernel version < 2.6.33 set net.ipv4.tcp_congestion_control=htcp 8 | # Notes : 9 | # This script is a simple "helper" to configure your sysctl.conf on linux 10 | # There is no silver bullet. Don't expect the perfect setup, review comments 11 | # and adapt the parameters to your needs and application usage. 12 | # 13 | # Use this script at your OWN risk. There is no guarantee whatsoever. 14 | # 15 | # License : 16 | # This work is licenced under the CC-GNU LGPL version 2.1 or later. 17 | # To view a copy of this licence, visit http://creativecommons.org/licenses/LGPL/2.1/ 18 | # or send a letter to : 19 | # 20 | # Creative Commons 21 | # 171 Second Street, Suite 300 22 | # San Francisco, California 94105, USA 23 | # 24 | # May 2012, Jon Zobrist http://www.jonzobrist.com/ 25 | # Things added : 26 | # Apache max file handlers update to /etc/security/limits.conf 27 | # Check and add pam_limits.so is loaded by the su program (as many things run via su) 28 | # Backing up of previous sysctl.conf file 29 | # Merging of previous sysctl.conf settings if new settings don't override 30 | # tcp_available_congestion_control detection and setting 31 | # Updates hosted on github at https://github.com/jonzobrist/Bash-Admin-Scripts 32 | 33 | host=$(hostname) 34 | 35 | ARCH=$(uname -m) 36 | KERNEL_STRING=$(uname -r | sed -e 's/[^0-9]/ /g') 37 | KERNEL_VERSION=$(echo "${KERNEL_STRING}" | awk '{ print $1 }') 38 | MAJOR_VERSION=$(echo "${KERNEL_STRING}" | awk '{ print $2 }') 39 | MINOR_VERSION=$(echo "${KERNEL_STRING}" | awk '{ print $3 }') 40 | echo "${KERNEL_VERSION}.${MAJOR_VERSION}.${MINOR_VERSION}" 41 | CURRENT_SYSCTL_FILE=/tmp/sysctl-existing-$(date +%F-%s) 42 | 43 | touch ${CURRENT_SYSCTL_FILE} 44 | #chmod og-rwx ${CURRENT_SYSCTL_FILE} 45 | grep -v '^#' /etc/sysctl-1.conf | grep . >> ${CURRENT_SYSCTL_FILE} 46 | BACKUP_SYSCTL="sysctl.conf-$(date +%F-%s)" 47 | echo "moving sysctl.conf to /etc/${BACKUP_SYSCTL}" 48 | mv /etc/sysctl.conf /etc/${BACKUP_SYSCTL} 49 | 50 | which bc 51 | if [ $? -ne 0 ]; then 52 | echo "This script require GNU bc, cf. http://www.gnu.org/software/bc/" 53 | echo "On Linux Debian/Ubuntu you can install it by doing : apt-get install bc" 54 | fi 55 | 56 | echo "Update sysctl for $host" 57 | 58 | mem_bytes=$(awk '/MemTotal:/ { printf "%0.f",$2 * 1024}' /proc/meminfo) 59 | shmmax=$(echo "$mem_bytes * 0.90" | bc | cut -f 1 -d '.') 60 | shmall=$(expr $mem_bytes / $(getconf PAGE_SIZE)) 61 | max_orphan=$(echo "$mem_bytes * 0.10 / 65536" | bc | cut -f 1 -d '.') 62 | file_max=$(echo "$mem_bytes / 4194304 * 256" | bc | cut -f 1 -d '.') 63 | max_tw=$(($file_max*2)) 64 | min_free=$(echo "($mem_bytes / 1024) * 0.01" | bc | cut -f 1 -d '.') 65 | 66 | if [ "${KERNEL_VERSION}" -lt 3 ] && [ "${MAJOR_VERSION}" -lt 7 ] && [ "${MINOR_VERSION}" -lt 33 ] 67 | then 68 | CONGESTION_CONTROL="htcp" 69 | else 70 | if [ "$(sysctl net.ipv4.tcp_available_congestion_control | grep reno)" ] 71 | then 72 | CONGESTION_CONTROL="reno" 73 | else 74 | CONGESTION_CONTROL="cubic" 75 | fi 76 | fi 77 | 78 | if [ "$1" != "ssd" ]; then 79 | vm_dirty_bg_ratio=5 80 | vm_dirty_ratio=15 81 | else 82 | # This setup is generally ok for ssd and highmem servers 83 | vm_dirty_bg_ratio=3 84 | vm_dirty_ratio=5 85 | fi 86 | 87 | >/etc/sysctl.conf cat << EOF 88 | 89 | # Disable syncookies (syncookies are not RFC compliant and can use too muche resources) 90 | net.ipv4.tcp_syncookies = 0 91 | 92 | # Basic TCP tuning 93 | net.ipv4.tcp_keepalive_time = 600 94 | net.ipv4.tcp_synack_retries = 3 95 | net.ipv4.tcp_syn_retries = 3 96 | 97 | # RFC1337 98 | net.ipv4.tcp_rfc1337 = 1 99 | 100 | # Defines the local port range that is used by TCP and UDP 101 | # to choose the local port 102 | net.ipv4.ip_local_port_range = 1024 65535 103 | 104 | # Log packets with impossible addresses to kernel log 105 | net.ipv4.conf.all.log_martians = 1 106 | 107 | # Minimum interval between garbage collection passes This interval is 108 | # in effect under high memory pressure on the pool 109 | net.ipv4.inet_peer_gc_mintime = 5 110 | 111 | # Disable Explicit Congestion Notification in TCP 112 | net.ipv4.tcp_ecn = 0 113 | 114 | # Enable window scaling as defined in RFC1323 115 | net.ipv4.tcp_window_scaling = 1 116 | 117 | # Enable timestamps (RFC1323) 118 | net.ipv4.tcp_timestamps = 1 119 | 120 | # DISable select acknowledgments 121 | net.ipv4.tcp_sack = 0 122 | 123 | # Enable FACK congestion avoidance and fast restransmission 124 | net.ipv4.tcp_fack = 1 125 | 126 | # DISABLE Allows TCP to send "duplicate" SACKs 127 | net.ipv4.tcp_dsack = 0 128 | 129 | # Controls IP packet forwarding 130 | net.ipv4.ip_forward = 0 131 | 132 | # No controls source route verification (RFC1812) 133 | net.ipv4.conf.default.rp_filter = 0 134 | 135 | # Enable faster reuse for TIME-WAIT sockets 136 | net.ipv4.tcp_tw_reuse = 1 137 | 138 | # TODO : change TCP_SYNQ_HSIZE in include/net/tcp.h 139 | # to keep TCP_SYNQ_HSIZE*16<=tcp_max_syn_backlog 140 | net.ipv4.tcp_max_syn_backlog = 20000 141 | 142 | # tells the kernel how many TCP sockets that are not attached 143 | # to any user file handle to maintain 144 | net.ipv4.tcp_max_orphans = $max_orphan 145 | 146 | # How may times to retry before killing TCP connection, closed by our side 147 | net.ipv4.tcp_orphan_retries = 1 148 | 149 | # how long to keep sockets in the state FIN-WAIT-2 150 | # if we were the one closing the socket 151 | net.ipv4.tcp_fin_timeout = 20 152 | 153 | # maximum number of sockets in TIME-WAIT to be held simultaneously 154 | net.ipv4.tcp_max_tw_buckets = $max_tw 155 | 156 | # don't cache ssthresh from previous connection 157 | net.ipv4.tcp_no_metrics_save = 1 158 | net.ipv4.tcp_moderate_rcvbuf = 1 159 | 160 | # increase Linux autotuning TCP buffer limits 161 | net.ipv4.tcp_rmem = 4096 87380 16777216 162 | net.ipv4.tcp_wmem = 4096 65536 16777216 163 | 164 | # increase TCP max buffer size 165 | net.core.rmem_max = 16777216 166 | net.core.wmem_max = 16777216 167 | 168 | net.core.netdev_max_backlog = 2500 169 | net.core.somaxconn = 65000 170 | 171 | vm.swappiness = 0 172 | 173 | # You can monitor the kernel behavior with regard to the dirty 174 | # pages by using grep -A 1 dirty /proc/vmstat 175 | vm.dirty_background_ratio = $vm_dirty_bg_ratio 176 | vm.dirty_ratio = $vm_dirty_ratio 177 | 178 | # required free memory (set to 1% of physical ram) 179 | vm.min_free_kbytes = $min_free 180 | 181 | # system open file limit 182 | fs.file-max = $file_max 183 | 184 | # Core dump suidsafe 185 | fs.suid_dumpable = 2 186 | 187 | kernel.printk = 4 4 1 7 188 | kernel.core_uses_pid = 1 189 | kernel.sysrq = 0 190 | kernel.msgmax = 65536 191 | kernel.msgmnb = 65536 192 | 193 | # Maximum shared segment size in bytes 194 | kernel.shmmax = $shmmax 195 | 196 | # Maximum number of shared memory segments in pages 197 | kernel.shmall = $shmall 198 | 199 | net.ipv4.tcp_congestion_control=${CONGESTION_CONTROL} 200 | 201 | EOF 202 | 203 | LIMITS_SET=$(grep 'www-data' /etc/security/limits.conf | grep nofile | grep -v '^#') 204 | if [ "${LIMITS_SET}" ] 205 | then 206 | echo "www-data limits for nofiles already set to" 207 | grep 'www-data' /etc/security/limits.conf | grep nofil | grep -v '^#' 208 | else 209 | echo "Setting limits for user www-data in /etc/security/limits.conf" 210 | echo "www-data soft nofile 50000" >> /etc/security/limits.conf 211 | echo "www-data hard nofile 60000" >> /etc/security/limits.conf 212 | fi 213 | 214 | PAM_SU=$(grep pam_limits.so /etc/pam.d/su | grep -v '^#') 215 | if [ "${PAM_SU}" ] 216 | then 217 | echo "pam_limits is already set in /etc/pam.d/su" 218 | else 219 | echo "Adding pam limits to /etc/pam.d/su" 220 | echo "session required pam_limits.so" >> /etc/pam.d/su 221 | fi 222 | 223 | SAVEIFS=$IFS 224 | IFS=$(echo -en "\n\b") 225 | for LINE in $(grep -v '^#' ${CURRENT_SYSCTL_FILE} | grep . ) 226 | do 227 | unset RESULT 228 | MY_VAR=$(echo ${LINE} | awk '{ print $1 }') 229 | RESULT=$(grep ${MY_VAR} /etc/sysctl.conf) 230 | if [ "${RESULT}" ] 231 | then 232 | echo "${MY_VAR} exists in new sysctl.conf, skipping" 233 | else 234 | echo "Adding ${MY_VAR} from old sysctl.conf to new" 235 | echo "${LINE}" 236 | echo "${LINE}" >> /etc/sysctl.conf 237 | fi 238 | 239 | done 240 | IFS=$SAVEIFS 241 | 242 | /sbin/sysctl -p /etc/sysctl.conf 243 | exit $? 244 | 245 | -------------------------------------------------------------------------------- /sysctl.conf: -------------------------------------------------------------------------------- 1 | # Uncomment the next two lines to enable Spoof protection (reverse-path filter) 2 | # Turn on Source Address Verification in all interfaces to 3 | # prevent some spoofing attacks 4 | #net.ipv4.conf.default.rp_filter=1 5 | #net.ipv4.conf.all.rp_filter=1 6 | 7 | # Uncomment the next line to enable TCP/IP SYN cookies 8 | # See http://lwn.net/Articles/277146/ 9 | # Note: This may impact IPv6 TCP sessions too 10 | #net.ipv4.tcp_syncookies=1 11 | 12 | # Uncomment the next line to enable packet forwarding for IPv4 13 | #net.ipv4.ip_forward=1 14 | 15 | # Uncomment the next line to enable packet forwarding for IPv6 16 | # Enabling this option disables Stateless Address Autoconfiguration 17 | # based on Router Advertisements for this host 18 | #net.ipv6.conf.all.forwarding=1 19 | 20 | 21 | ################################################################### 22 | # Additional settings - these settings can improve the network 23 | # security of the host and prevent against some network attacks 24 | # including spoofing attacks and man in the middle attacks through 25 | # redirection. Some network environments, however, require that these 26 | # settings are disabled so review and enable them as needed. 27 | # 28 | # Do not accept ICMP redirects (prevent MITM attacks) 29 | #net.ipv4.conf.all.accept_redirects = 0 30 | #net.ipv6.conf.all.accept_redirects = 0 31 | # _or_ 32 | # Accept ICMP redirects only for gateways listed in our default 33 | # gateway list (enabled by default) 34 | # net.ipv4.conf.all.secure_redirects = 1 35 | # 36 | # Do not send ICMP redirects (we are not a router) 37 | #net.ipv4.conf.all.send_redirects = 0 38 | # 39 | # Do not accept IP source route packets (we are not a router) 40 | #net.ipv4.conf.all.accept_source_route = 0 41 | #net.ipv6.conf.all.accept_source_route = 0 42 | # 43 | # Log Martian Packets 44 | #net.ipv4.conf.all.log_martians = 1 45 | # 46 | # https://paste.amazon.com/show/zob/1460156497 47 | 48 | # Decrease the time default value for tcp_fin_timeout connection, this is key to get > 1k TCP conn/sec 49 | net.ipv4.tcp_fin_timeout = 15 50 | # Decrease the time default value for tcp_keepalive_time connection 51 | net.ipv4.tcp_keepalive_time = 1200 52 | # Turn off the tcp_window_scaling 53 | net.ipv4.tcp_window_scaling = 1 54 | # Turn on the tcp_sack 55 | net.ipv4.tcp_sack = 1 56 | # Turn off the tcp_timestamps 57 | net.ipv4.tcp_timestamps = 0 58 | # Reset Max ports to ~ 64,511 like in the good ole days 59 | net.ipv4.ip_local_port_range = 1024 65535 60 | net.ipv4.tcp_tw_reuse = 1 61 | 62 | net.core.rmem_default = 256960 63 | net.core.rmem_max = 256960 64 | net.core.wmem_default = 256960 65 | net.core.wmem_max = 256960 66 | 67 | # Kernel tuning settings for CentOS5, 68 | # busy webserver with lots of free memory. 69 | # Big queue for the network device 70 | net.core.netdev_max_backlog=30000 71 | # Lots of local ports for connections 72 | net.ipv4.tcp_max_tw_buckets=2000000 73 | # Bump up send/receive buffer sizes 74 | net.core.rmem_default=262141 75 | net.core.wmem_default=262141 76 | net.core.rmem_max=262141 77 | net.core.wmem_max=262141 78 | # Disable TCP selective acknowledgements 79 | net.ipv4.tcp_sack=0 80 | net.ipv4.tcp_dsack=0 81 | # Decrease the amount of time we spend 82 | # trying to maintain connections 83 | net.ipv4.tcp_retries2=5 84 | net.ipv4.tcp_fin_timeout=60 85 | net.ipv4.tcp_keepalive_time=120 86 | net.ipv4.tcp_keepalive_intvl=30 87 | net.ipv4.tcp_keepalive_probes=3 88 | # Increase the number of incoming connections 89 | # that can queue up before dropping 90 | net.core.somaxconn=256 91 | # Increase option memory buffers 92 | net.core.optmem_max=20480 93 | 94 | net.ipv4.tcp_challenge_ack_limit = 999999999 95 | 96 | -------------------------------------------------------------------------------- /tags: -------------------------------------------------------------------------------- 1 | !_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/ 2 | !_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/ 3 | !_TAG_OUTPUT_EXCMD mixed /number, pattern, mixed, or combineV2/ 4 | !_TAG_OUTPUT_FILESEP slash /slash or backslash/ 5 | !_TAG_OUTPUT_MODE u-ctags /u-ctags or e-ctags/ 6 | !_TAG_PATTERN_LENGTH_LIMIT 96 /0 for no limit/ 7 | !_TAG_PROC_CWD /home/zob/src/Bash-Admin-Scripts/ // 8 | !_TAG_PROGRAM_AUTHOR Universal Ctags Team // 9 | !_TAG_PROGRAM_NAME Universal Ctags /Derived from Exuberant Ctags/ 10 | !_TAG_PROGRAM_URL https://ctags.io/ /official site/ 11 | !_TAG_PROGRAM_VERSION 5.9.0 // 12 | 0 dynamic-dns-route53/external.json /^ "ResourceRecords": [{ "Value": "RECORD_VALUE"}]$/;" o array:Changes.0.ResourceRecordSet.ResourceRecords 13 | 0 dynamic-dns-route53/external.json /^ "Changes": [{$/;" o array:Changes 14 | APEXD dynamic-dns-route53/update-dns-external.conf /^APEXD=${FQDN#*.}$/;" k 15 | ARCHIVE bluesun-setup/server.conf /^ARCHIVE=1 #1 enables archiving every file that is put to S3, so there will be 1 file created, an/;" k 16 | AWS_PROFILE dynamic-dns-route53/update-dns-external.conf /^AWS_PROFILE="default"$/;" k 17 | Action dynamic-dns-route53/external.json /^ "Action": "UPSERT",$/;" s object:Changes.0 18 | Auto Remote Shell auto-remote-shell/README.md /^## Auto Remote Shell ##$/;" s 19 | CSV_HISTORY dynamic-dns-route53/update-dns-external.conf /^CSV_HISTORY="${HOME}\/etc\/ip-log-change-$(date +%Y).csv"$/;" k 20 | Changes dynamic-dns-route53/external.json /^ "Changes": [{$/;" a 21 | Collection of bash snippets, scripts, and utilized I've accumulated/written/altered/impaired over the years README.md /^## Collection of bash snippets, scripts, and utilized I've accumulated\/written\/altered\/impair/;" s 22 | Comment dynamic-dns-route53/external.json /^ "Comment": "UPSERT record",$/;" s 23 | Configure auto-remote-shell/README.md /^### Configure ###$/;" S section:Auto Remote Shell 24 | Confirm your setup is working auto-remote-shell/README.md /^### Confirm your setup is working ###$/;" S section:Auto Remote Shell 25 | DEBUG bluesun-setup/server.conf /^DEBUG=1 # 0 disabled (silent), 1 enabled (DEBUG)$/;" k 26 | DEBUG dynamic-dns-route53/update-dns-external.conf /^DEBUG=${DEBUG:-0}$/;" k 27 | DEBUG dynamic-dns-route53/update-dns-external.sh /^DEBUG () {$/;" f 28 | DEBUG pihole/update-pihole.sh /^DEBUG () {$/;" f 29 | DIRS bluesun-setup/server.conf /^DIRS="\/var\/www \/etc\/apache2"$/;" k 30 | Dynamic DNS with Amazon Route 53 in Bash/command line dynamic-dns-route53/README.md /^## Dynamic DNS with Amazon Route 53 in Bash\/command line ##$/;" s 31 | ENDFILENAME bluesun-setup/server.conf /^ENDFILENAME="latest.sql.gz" #Set this to the suffix you want for the end of the latest\/current /;" k 32 | EOF sysctl-config.sh /^>\/etc\/sysctl.conf cat << EOF $/;" h 33 | EXTERNAL_JSON_TEMPLATE dynamic-dns-route53/update-dns-external.conf /^EXTERNAL_JSON_TEMPLATE="${HOME}\/etc\/external.json"$/;" k 34 | FQDN dynamic-dns-route53/update-dns-external.conf /^FQDN="host.example.com."$/;" k 35 | Fast Setup on Raspberry Pi running Raspbian 11 (bullseye) auto-remote-shell/README.md /^### Fast Setup on Raspberry Pi running Raspbian 11 (bullseye) ###$/;" S section:Auto Remote Shell 36 | HOST dynamic-dns-route53/update-dns-external.conf /^HOST=${FQDN%%.*}$/;" k 37 | HOSTED_ZONE_ID dynamic-dns-route53/update-dns-external.conf /^HOSTED_ZONE_ID="AWSZONEID"$/;" k 38 | IP_HTTP_URL dynamic-dns-route53/update-dns-external.conf /^IP_HTTP_URL="https:\/\/www.dangfast.com\/ip"$/;" k 39 | LAST_IP_FILE dynamic-dns-route53/update-dns-external.conf /^LAST_IP_FILE="${HOME}\/etc\/lastpublicip"$/;" k 40 | MAX_WAIT_FOR_MYSQL bluesun-setup/server.conf /^MAX_WAIT_FOR_MYSQL=300 #Set this to the longest time you want to wait for MySQL before aborting$/;" k 41 | MYSQLDUMP_ARGS bluesun-setup/server.conf /^MYSQLDUMP_ARGS="--single-transaction --add-drop-table=TRUE --lock-tables=TRUE --replace=true --e/;" k 42 | MYSQL_FILENAME bluesun-setup/server.conf /^MYSQL_FILENAME="mysqldump-${ENDFILENAME}" #Filename of a FULL mysql database backup, is restored/;" k 43 | Manual Setup on Generic Linux w/dpkg### auto-remote-shell/README.md /^### Manual Setup on Generic Linux w\/dpkg###$/;" S section:Auto Remote Shell 44 | Name dynamic-dns-route53/external.json /^ "Name": "RECORD_NAME",$/;" s object:Changes.0.ResourceRecordSet 45 | Notes auto-remote-shell/README.md /^### Notes ###$/;" S section:Auto Remote Shell 46 | PATH bluesun-setup/server.conf /^PATH="${PATH}:\/usr\/local\/bin:\/usr\/local\/programs\/aws\/bin"$/;" k 47 | PINGFILE bluesun-setup/server.conf /^PINGFILE="\/var\/www\/example.com\/ping.html" #Set this to where the Elastic Load Balancer check/;" k 48 | PRINT_ERROR_HELP_AND_EXIT empty-bash-short.sh /^function PRINT_ERROR_HELP_AND_EXIT {$/;" f 49 | Purpose : This script aims to provide dynamic DNS names using Amazon Route 53 dynamic-dns-route53/README.md /^### Purpose : This script aims to provide dynamic DNS names using Amazon Route 53 ###$/;" S section:Dynamic DNS with Amazon Route 53 in Bash/command line 50 | Requires: dynamic-dns-route53/README.md /^### Requires: ###$/;" S section:Dynamic DNS with Amazon Route 53 in Bash/command line 51 | ResourceRecordSet dynamic-dns-route53/external.json /^ "ResourceRecordSet": {$/;" o object:Changes.0 52 | ResourceRecords dynamic-dns-route53/external.json /^ "ResourceRecords": [{ "Value": "RECORD_VALUE"}]$/;" a object:Changes.0.ResourceRecordSet 53 | S3_BUCKET bluesun-setup/server.conf /^S3_BUCKET="www-backups.example.com" #S3 bucket named where you will store all the files$/;" k 54 | Setup: dynamic-dns-route53/README.md /^### Setup: ###$/;" S section:Dynamic DNS with Amazon Route 53 in Bash/command line 55 | TIMESTAMP bluesun-setup/server.conf /^TIMESTAMP=`date +%F-%s` #change here if you want a format other than 2011-07-23-1311447264, avoi/;" k 56 | TMPDIR bluesun-setup/server.conf /^TMPDIR="\/tmp\/bluesun-update-`hostname`-`date +%F-%s`"$/;" k 57 | TTL dynamic-dns-route53/external.json /^ "TTL": 300,$/;" n object:Changes.0.ResourceRecordSet 58 | TrimWhiteSpace .vimrc /^fun! TrimWhiteSpace()$/;" f 59 | Troubleshooting dynamic-dns-route53/README.md /^### Troubleshooting ###$/;" S section:Dynamic DNS with Amazon Route 53 in Bash/command line 60 | Type dynamic-dns-route53/external.json /^ "Type": "A",$/;" s object:Changes.0.ResourceRecordSet 61 | USE_R53_API dynamic-dns-route53/update-dns-external.conf /^USE_R53_API=0$/;" k 62 | Value dynamic-dns-route53/external.json /^ "ResourceRecords": [{ "Value": "RECORD_VALUE"}]$/;" s object:Changes.0.ResourceRecordSet.ResourceRecords.0 63 | ZOB .vimrc /^augroup ZOB$/;" a 64 | args make-s3-psurl.py /^ args = parser.parse_args()$/;" v 65 | authenticate zabbix-setup.sh /^authenticate() {$/;" f 66 | check_instance setup-ec2-raid-0.sh /^function check_instance {$/;" f 67 | check_mysql_ready bluesun-setup/bluesun-setup.sh /^function check_mysql_ready {$/;" f 68 | connectTimes tcpcheck-bulk.py /^ connectTimes = int(sys.argv[3]) # use port from ARGV 3$/;" v 69 | count tcpcheck-bulk.py /^count = 0$/;" v 70 | create_host zabbix-setup.sh /^create_host() {$/;" f 71 | failurecount tcpcheck-bulk.py /^failurecount = 0$/;" v 72 | fromaddr page.py /^fromaddr = "noreply@example.com"$/;" v 73 | get_am2_ami zsh_aws_aliases.sh /^function get_am2_ami() {$/;" f 74 | get_ubuntu_ami zsh_aws_aliases.sh /^function get_ubuntu_ami() {$/;" f 75 | ip_local_port_range sysctl.conf /^net.ipv4.ip_local_port_range = 1024 65535$/;" k 76 | mapleader .vimrc /^let mapleader = " "$/;" v 77 | msg page.py /^msg = ("From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\n\\r\\n"$/;" v 78 | msg page.py /^msg = msg + sys.argv[1]$/;" v 79 | netdev_max_backlog sysctl.conf /^net.core.netdev_max_backlog=30000$/;" k 80 | optmem_max sysctl.conf /^net.core.optmem_max=20480$/;" k 81 | parser make-s3-psurl.py /^ parser = argparse.ArgumentParser(description='Make a S3 presigned URL for key in bucket.')$/;" v 82 | presign_url make-s3-psurl.py /^def presign_url(bucket, key, expiry, profile, region):$/;" f 83 | prompt page.py /^def prompt(prompt):$/;" f 84 | rmem_default sysctl.conf /^net.core.rmem_default = 256960$/;" k 85 | rmem_default sysctl.conf /^net.core.rmem_default=262141$/;" k 86 | rmem_max sysctl.conf /^net.core.rmem_max = 256960 $/;" k 87 | rmem_max sysctl.conf /^net.core.rmem_max=262141$/;" k 88 | s tcpcheck-bulk.py /^ s = socket(AF_INET, SOCK_STREAM) #create a TCP socket$/;" v 89 | s tcpcheck.py /^ s = socket(AF_INET, SOCK_STREAM) #create a TCP socket$/;" v 90 | s3_url make-s3-psurl.py /^ s3_url = presign_url(args.s3_bucket, args.s3_key, args.s3_expiry, args.profile, args.region)$/;" v 91 | server page.py /^server = smtplib.SMTP('server.ip.or.hostname')$/;" v 92 | serverHost tcpcheck-bulk.py /^ serverHost = sys.argv[1] # use port from ARGV 1$/;" v 93 | serverHost tcpcheck.py /^ serverHost = sys.argv[1] # use port from ARGV 1$/;" v 94 | serverPort tcpcheck-bulk.py /^ serverPort = int(sys.argv[2]) # use port from ARGV 2$/;" v 95 | serverPort tcpcheck.py /^ serverPort = int(sys.argv[2]) # use port from ARGV 2$/;" v 96 | setup_raid setup-ec2-raid-0.sh /^function setup_raid {$/;" f 97 | somaxconn sysctl.conf /^net.core.somaxconn=256$/;" k 98 | ss .vimrc /^cnoremap ss so $vim\/sessions\/*.vim$/;" m 99 | subject page.py /^subject = "[ALERT] Alert from localhost"$/;" v 100 | successcount tcpcheck-bulk.py /^successcount = 0$/;" v 101 | tcp_challenge_ack_limit sysctl.conf /^net.ipv4.tcp_challenge_ack_limit = 999999999$/;" k 102 | tcp_dsack sysctl.conf /^net.ipv4.tcp_dsack=0$/;" k 103 | tcp_fin_timeout sysctl.conf /^net.ipv4.tcp_fin_timeout = 15$/;" k 104 | tcp_fin_timeout sysctl.conf /^net.ipv4.tcp_fin_timeout=60$/;" k 105 | tcp_keepalive_intvl sysctl.conf /^net.ipv4.tcp_keepalive_intvl=30$/;" k 106 | tcp_keepalive_probes sysctl.conf /^net.ipv4.tcp_keepalive_probes=3$/;" k 107 | tcp_keepalive_time sysctl.conf /^net.ipv4.tcp_keepalive_time = 1200$/;" k 108 | tcp_keepalive_time sysctl.conf /^net.ipv4.tcp_keepalive_time=120$/;" k 109 | tcp_max_tw_buckets sysctl.conf /^net.ipv4.tcp_max_tw_buckets=2000000$/;" k 110 | tcp_retries2 sysctl.conf /^net.ipv4.tcp_retries2=5$/;" k 111 | tcp_sack sysctl.conf /^net.ipv4.tcp_sack = 1$/;" k 112 | tcp_sack sysctl.conf /^net.ipv4.tcp_sack=0$/;" k 113 | tcp_timestamps sysctl.conf /^net.ipv4.tcp_timestamps = 0$/;" k 114 | tcp_tw_reuse sysctl.conf /^net.ipv4.tcp_tw_reuse = 1$/;" k 115 | tcp_window_scaling sysctl.conf /^net.ipv4.tcp_window_scaling = 1$/;" k 116 | test_re test_regular_expression.py /^def test_re(my_pattern, test_line):$/;" f 117 | toaddrs page.py /^toaddrs = ['userA@example.com']$/;" v 118 | try_get download_file_with_exponential_backoff_function.sh /^function try_get {$/;" f 119 | try_get_old download_file_with_exponential_backoff_function.sh /^function try_get_old {$/;" f 120 | update_ec2_regions zsh_aws_aliases.sh /^update_ec2_regions() {$/;" f 121 | wmem_default sysctl.conf /^net.core.wmem_default = 256960$/;" k 122 | wmem_default sysctl.conf /^net.core.wmem_default=262141$/;" k 123 | wmem_max sysctl.conf /^net.core.wmem_max = 256960 $/;" k 124 | wmem_max sysctl.conf /^net.core.wmem_max=262141$/;" k 125 | -------------------------------------------------------------------------------- /tcpcheck-bulk.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script connects to a given server port a BUNCH of times 30 | # Usage : tcpcheck.py HOST PORT CONNECTIONS 31 | 32 | import errno, sys 33 | from socket import * 34 | successcount = 0 35 | failurecount = 0 36 | count = 0 37 | 38 | if (len(sys.argv) > 1): 39 | 40 | serverHost = sys.argv[1] # use port from ARGV 1 41 | serverPort = int(sys.argv[2]) # use port from ARGV 2 42 | connectTimes = int(sys.argv[3]) # use port from ARGV 3 43 | 44 | while (count < connectTimes) : 45 | s = socket(AF_INET, SOCK_STREAM) #create a TCP socket 46 | s.settimeout(1) 47 | try: 48 | s.connect((serverHost, serverPort)) #connect to server on the port 49 | s.shutdown(2) #disconnect 50 | successcount += 1 51 | count += 1 52 | #print "Success. " + repr(successcount) + " Connected to " + serverHost + " on port: " + str(serverPort) 53 | except: 54 | failurecount += 1 55 | count += 1 56 | #print "Failure. " + repr(failurecount) + " Cannot connect to " + serverHost + " on port: " + str(serverPort) 57 | 58 | print "Done with " + serverHost + " on port: " + str(serverPort) 59 | print "Done. Failures : " + repr(failurecount) + " Successes : " + repr(successcount) 60 | if (failurecount > 0): 61 | sys.exit(errno.EPERM) 62 | else: 63 | sys.exit(0) 64 | else: 65 | print "Usage : tcpcheck.py HOST PORT CONNECTIONS" 66 | 67 | -------------------------------------------------------------------------------- /tcpcheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script connects to a given server port 30 | # Usage : tcpcheck.py HOST PORT 31 | 32 | import errno, sys 33 | from socket import * 34 | 35 | 36 | if (len(sys.argv) > 1): 37 | 38 | serverHost = sys.argv[1] # use port from ARGV 1 39 | serverPort = int(sys.argv[2]) # use port from ARGV 2 40 | 41 | s = socket(AF_INET, SOCK_STREAM) #create a TCP socket 42 | try: 43 | s.connect((serverHost, serverPort)) #connect to server on the port 44 | s.shutdown(2) #disconnect 45 | print "Success. Connected to " + serverHost + " on port: " + str(serverPort) 46 | except: 47 | print "Failure. Cannot connect to " + serverHost + " on port: " + str(serverPort) 48 | sys.exit(errno.EPERM) 49 | else: 50 | print "Usage : tcpcheck.py HOST PORT" 51 | 52 | -------------------------------------------------------------------------------- /test_regular_expression.py: -------------------------------------------------------------------------------- 1 | # This Python function helps you to test arbitrary regular expressions 2 | # 3 | # Usage: 4 | # Import into python/ipython/script 5 | # Pass in your regular expression as a string or regular expression r'string' 6 | # 7 | # Often times when writing regular expressions 8 | # I find myself manually iterating over the pattern 9 | # testing a string it failed to match to see where 10 | # the problem is 11 | # I wrote this to do that automatically 12 | # You give it a string or r'string' pattern and a line to test 13 | # it against. It slices the pattern, ignores patterns that fail 14 | # to compile into a regex as invalid patterns 15 | # any valid patterns are tested against the test line 16 | # Passes are reported via printing, failure causes the function 17 | # to return the current expression string 18 | # This is the regex that compiled but failed to match 19 | # 20 | import re 21 | 22 | 23 | def test_re(my_pattern, test_line): 24 | """ Given regular expression pattern 25 | slice it and find where it's breaking 26 | check test_line and find matches 27 | """ 28 | for cur_pos in range(1, len(my_pattern)): 29 | pat_slice = my_pattern[0:cur_pos] 30 | try: 31 | my_re = re.compile(pat_slice) 32 | print("{}".format(cur_pos)) 33 | matches = my_re.findall(test_line) 34 | if len(matches) == 0: 35 | return my_pattern[0:cur_pos] 36 | else: 37 | print("Pass {}".format(matches)) 38 | except: 39 | next 40 | -------------------------------------------------------------------------------- /test_tls_ciphers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ###################################################### 4 | # Author : Jon Zobrist 5 | # Homepage : http://www.jonzobrist.com 6 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 7 | # Copyright (c) 2018, Jon Zobrist 8 | # All rights reserved. 9 | 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are met: 12 | # 13 | # 1. Redistributions of source code must retain the above copyright notice, this 14 | # list of conditions and the following disclaimer. 15 | # 2. Redistributions in binary form must reproduce the above copyright notice, 16 | # this list of conditions and the following disclaimer in the documentation 17 | # and/or other materials provided with the distribution. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 23 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | # 30 | ###################################################### 31 | 32 | # OpenSSL requires the port number. 33 | if [ ! "${2}" ]; then echo "Usage ${0} server port "; exit 1; fi 34 | SERVER=${1} 35 | PORT=${2} 36 | if [ "${3}" ]; then DELAY=${3}; else DELAY=1; fi 37 | ciphers=$(openssl ciphers 'ALL:eNULL' | sed -e 's/:/ /g') 38 | 39 | echo Obtaining cipher list from $(openssl version). 40 | 41 | for cipher in ${ciphers[@]} 42 | do 43 | echo -n Testing $cipher... 44 | result=$(echo -n | openssl s_client -cipher "$cipher" -connect $SERVER:$PORT 2>&1) 45 | if [ $? != 0 ] ; then 46 | error=$(echo -n $result | cut -d':' -f6) 47 | echo NO \($error\) 48 | else 49 | if [[ "$result" =~ "Cipher is ${cipher}" || "$result" =~ "Cipher :" ]] ; then 50 | echo YES 51 | else 52 | echo UNKNOWN RESPONSE 53 | echo $result 54 | fi 55 | fi 56 | sleep $DELAY 57 | done 58 | -------------------------------------------------------------------------------- /upload_ssh_keys_to_ec2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # upload_ssh_keys_to_ec2.sh 4 | # Uploads public ssh keys to EC2 in all regions 5 | # Usage: upload_ssh_keys_to_ec2.sh 6 | # 7 | 8 | aws=$(which aws) 9 | 10 | if [ "${1}" ] && [ -f "${2}" ] && [ -x "${aws}" ] 11 | then 12 | regions=$(aws ec2 describe-regions --output text --query 'Regions[*].RegionName') 13 | if [ "${regions}" ] 14 | then 15 | echo "Uploading SSH key ${2} as keyname ${1} to all regions" 16 | for r in $(echo $regions) 17 | do 18 | echo $r 19 | ${aws} ec2 import-key-pair --region ${r} --key-name ${1} --public-key-material "fileb://${2}" 20 | done 21 | else 22 | echo "Failed to get regions, exiting at $(date)" 23 | exit 1 24 | fi 25 | else 26 | echo "Usage ${0} keyname keyfile" 27 | exit 1 28 | fi 29 | -------------------------------------------------------------------------------- /zabbix-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Author : Jon Zobrist 4 | # Homepage : http://www.jonzobrist.com 5 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 6 | # Copyright (c) 2012, Jon Zobrist 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions are met: 11 | # 12 | # 1. Redistributions of source code must retain the above copyright notice, this 13 | # list of conditions and the following disclaimer. 14 | # 2. Redistributions in binary form must reproduce the above copyright notice, 15 | # this list of conditions and the following disclaimer in the documentation 16 | # and/or other materials provided with the distribution. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 22 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | # 29 | # Purpose : This script aims to gather all public ssh keys on a server and put them in a directory, with appropriate names 30 | # Usage : gather-public-ssh-keys.sh [Directory] 31 | 32 | # VARIABLES 33 | #HOST_NAME='' 34 | IP=$(curl http://169.254.169.254/latest/meta-data/public-ipv4/) 35 | HOST_NAME="$(curl http://169.254.169.254/latest/user-data/ 2>&1 | grep HOST_NAME | awk '{ print $2 }').tiwipro.com" 36 | echo "${HOST_NAME}" 37 | 38 | # CONSTANT VARIABLES 39 | ERROR='0' 40 | ZABBIX_USER='administrator' #Make user with API access and put name here 41 | ZABBIX_PASS='password' #Make user with API access and put password here 42 | ZABBIX_SERVER='zabbix.example.com' #DNS or IP hostname of our Zabbix Server 43 | API='https://zabbix.example.com/api_jsonrpc.php' 44 | HOSTGROUPID=10 #What host group to create the server in 45 | TEMPLATEID=10001 #What is the template ID that we want to assign to new Servers? 46 | 47 | 48 | # Install zabbix-agent in case it's not already installed 49 | apt-get install -y zabbix-agent 50 | 51 | echo "Server=${ZABBIX_SERVER}" > /etc/zabbix/zabbix_agentd.conf 52 | echo "Hostname=${HOST_NAME}" >> /etc/zabbix/zabbix_agentd.conf 53 | echo "StartAgents=5" >> /etc/zabbix/zabbix_agentd.conf 54 | echo "DebugLevel=3" >> /etc/zabbix/zabbix_agentd.conf 55 | echo "PidFile=/var/run/zabbix-agent/zabbix_agentd.pid" >> /etc/zabbix/zabbix_agentd.conf 56 | echo "LogFile=/var/log/zabbix-agent/zabbix_agentd.log" >> /etc/zabbix/zabbix_agentd.conf 57 | echo "Timeout=3" >> /etc/zabbix/zabbix_agentd.conf 58 | 59 | # stop zabbix agent 60 | service zabbix-agent stop 61 | 62 | # Authenticate with Zabbix API 63 | authenticate() { 64 | curl -i -X POST -H 'Content-Type: application/json-rpc' -d "{\"params\": {\"password\": \"$ZABBIX_PASS\", \"user\": \"$ZABBIX_USER\"}, \"jsonrpc\":\"2.0\", \"method\": \"user.authenticate\",\"auth\": \"\", \"id\": 0}" $API | grep -Eo 'Set-Cookie: zbx_sessionid=.+' | head -n 1 | cut -d '=' -f 2 | tr -d '\r' 65 | } 66 | AUTH_TOKEN=$(authenticate) 67 | 68 | # Create Host 69 | create_host() { 70 | curl -i -X POST -H 'Content-Type: application/json-rpc' -d "{\"jsonrpc\":\"2.0\",\"method\":\"host.create\",\"params\":{\"host\":\"$HOST_NAME\",\"ip\":\"$IP\",\"dns\":\"$HOST_NAME\",\"port\":10050,\"useip\":1,\"groups\":[{\"groupid\":$HOSTGROUPID}],\"templates\":[{\"templateid\":$TEMPLATEID}]},\"auth\":\"$AUTH_TOKEN\",\"id\":0}" $API 71 | } 72 | output=$(create_host) 73 | 74 | echo $output | grep -q "hostids" 75 | rc=$? 76 | if [ $rc -ne 0 ] 77 | then 78 | echo -e "Error in adding host ${HOST_NAME} at `date`:\n" 79 | echo $output | grep -Po '"message":.*?[^\\]",' 80 | echo $output | grep -Po '"data":.*?[^\\]"' 81 | exit 82 | else 83 | echo -e "\nHost ${HOST_NAME} added successfully, starting Zabbix Agent\n" 84 | # start zabbix agent 85 | service zabbix-agent start 86 | exit 87 | fi 88 | 89 | -------------------------------------------------------------------------------- /zsh_aws_aliases.sh: -------------------------------------------------------------------------------- 1 | ################################## 2 | # Author : Jon Zobrist 3 | # Homepage : http://www.jonzobrist.com 4 | # License : BSD http://en.wikipedia.org/wiki/BSD_license 5 | # Copyright (c) 2019, Jon Zobrist 6 | # All rights reserved. 7 | # 8 | # Redistribution and use in source and binary forms, with or without 9 | # modification, are permitted provided that the following conditions are met: 10 | # 11 | # 1. Redistributions of source code must retain the above copyright notice, this 12 | # list of conditions and the following disclaimer. 13 | # 2. Redistributions in binary form must reproduce the above copyright notice, 14 | # this list of conditions and the following disclaimer in the documentation 15 | # and/or other materials provided with the distribution. 16 | # 17 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | # 28 | # Purpose: Useful aliases for interacting with AWS from the CLI 29 | # Put into a .bashrc/aliases/.zshrc file, use from the command line 30 | # Usage : varies 31 | # Get Amazon Linux 2 latest AMI 32 | # get_am2_ami 33 | # Get Ubuntu 18.04 latest AMI 34 | # get_ubuntu_ami 35 | # Get a list of EC2 regions into a local regions-ec2 file (I'm lazy and use this file as a temp data store) 36 | # update_ec2_regions 37 | ################################## 38 | 39 | function get_am2_ami() { 40 | # Searches for the latest Amazon Linux 2 x86 64-bit ami 41 | if [ "${1}" ] && [ ! "${R}" ] 42 | then 43 | R=${1} 44 | fi 45 | if [ "${R}" ] 46 | then 47 | aws ec2 describe-images --owners amazon --region ${R} --filters 'Name=name,Values=amzn2-ami-hvm-2.0.????????-x86_64-gp2' 'Name=state,Values=available' --output json | jq -r '.Images | sort_by(.CreationDate) | last(.[]).ImageId' 48 | else 49 | echo "Usage: ${0} region; or export R=region; ${0}" 50 | fi 51 | } 52 | 53 | function get_ubuntu_ami() { 54 | # Searches for the latest Amazon Linux 2 x86 64-bit ami 55 | if [ "${1}" ] && [ ! "${R}" ] 56 | then 57 | R=${1} 58 | fi 59 | if [ "${R}" ] 60 | then 61 | aws ec2 describe-images --owners 099720109477 --region ${R} --filters 'Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-????????' 'Name=state,Values=available' --output json | jq -r '.Images | sort_by(.CreationDate) | last(.[]).ImageId' 62 | else 63 | echo "Usage: ${0} region; or export R=region; ${0}" 64 | fi 65 | } 66 | 67 | update_ec2_regions() { 68 | MY_TMP_FILE=$(mktemp) 69 | aws ec2 describe-regions --output json | jq -r '.Regions[].RegionName' > ${MY_TMP_FILE} 70 | OC=$(wc -l ~/regions-ec2 | awk '{ print $1 }') 71 | NC=$(wc -l ${MY_TMP_FILE} | awk '{ print $1 }') 72 | if (( ${NC} >= ${OC})) # We are assuming the number of regions will only go up, 73 | # so let's ignore it if we somehow ended up with fewer regions than in the current file 74 | then 75 | /bin/mv ${MY_TMP_FILE} ~/regions-ec2 76 | else 77 | echo "new file (${MY_TMP_FILE}) is not larger, did we lose regions?" 78 | fi 79 | } 80 | 81 | --------------------------------------------------------------------------------