├── .alias-maximus ├── .gitignore ├── LICENSE ├── README.md ├── atrain.sh ├── backup.app.sh ├── cropduster.sh ├── filelist.txt.sample ├── filtlist.txt.sample ├── fureverso.sh ├── plex_futures.sh ├── plex_futures_n_stuckers.sh ├── plex_stuckers.sh ├── restore.app.sh ├── sarotund.sh ├── sarotup.sh ├── transfer.plex.sh ├── unpackplex.sh ├── wanchors.sh ├── wanchplus.sh └── ziplist.txt.sample /.alias-maximus: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env bash 3 | # https://github.com/maximuskowalski/maxmisc/blob/master/.alias-maximus 4 | # an assembly of aliaii 5 | 6 | # Add below line without comment to your .bashrc or .zshrc 7 | # [[ -f ~/.alias-maximus ]] && . ~/.alias-maximus 8 | 9 | 10 | ### SALTY ### and friends 11 | alias croptail="tail -f /opt/crop/activity.log" 12 | alias plowtail="tail -f /opt" 13 | 14 | 15 | ### SMOUNT 16 | alias vfscycle='rclone rc vfs/refresh recursive=true --rc-addr=localhost:5575' 17 | alias vfscycleseed='rclone rc vfs/refresh recursive=true --rc-addr=localhost:5580' 18 | 19 | alias nseed="ncdu /mnt/local/seeding" 20 | 21 | alias sysfailed="systemctl list-units --failed" 22 | alias sysactive="systemctl --type=service --state=active" 23 | alias sysall="systemctl list-unit-files" 24 | 25 | #get the error messages from journalctl 26 | alias jctl="journalctl -p 3 -xb" 27 | 28 | #hardware info --short 29 | alias hw="hwinfo --short" 30 | 31 | #switch between bash and zsh 32 | alias tobash="sudo chsh $USER -s /bin/bash && echo 'Now log out.'" 33 | alias tozsh="sudo chsh $USER -s /bin/zsh && echo 'Now log out.'" 34 | 35 | alias speedtest='curl -s https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py | python -' 36 | alias reload=". ~/.zshrc && echo 'ZSH config reloaded from ~/.zshrc'" 37 | 38 | #list 39 | alias ls='ls --color=auto' 40 | alias la='ls -a' 41 | alias ll='ls -la' 42 | alias l='ls' 43 | alias l.="ls -A | egrep '^\.'" 44 | 45 | #fix obvious typo's 46 | alias cd..='cd ..' 47 | alias pdw="pwd" 48 | 49 | ## Colorize the grep command output for ease of use (good for log files)## 50 | alias grep='grep --color=auto' 51 | alias egrep='egrep --color=auto' 52 | alias fgrep='fgrep --color=auto' 53 | 54 | #readable output 55 | alias df='df -h' 56 | 57 | 58 | 59 | # # ex = EXtractor for all kinds of archives 60 | # # usage: ex 61 | ex () 62 | { 63 | if [ -f $1 ] ; then 64 | case $1 in 65 | *.tar.bz2) tar xjf $1 ;; 66 | *.tar.gz) tar xzf $1 ;; 67 | *.bz2) bunzip2 $1 ;; 68 | *.rar) unrar x $1 ;; 69 | *.gz) gunzip $1 ;; 70 | *.tar) tar xf $1 ;; 71 | *.tbz2) tar xjf $1 ;; 72 | *.tgz) tar xzf $1 ;; 73 | *.zip) unzip $1 ;; 74 | *.Z) uncompress $1;; 75 | *.7z) 7z x $1 ;; 76 | *.deb) ar x $1 ;; 77 | *.tar.xz) tar xf $1 ;; 78 | *.tar.zst) tar xf $1 ;; 79 | *) echo "'$1' cannot be extracted via ex()" ;; 80 | esac 81 | else 82 | echo "'$1' is not a valid file" 83 | fi 84 | } 85 | 86 | #youtube-dl 87 | alias yta-aac="youtube-dl --extract-audio --audio-format aac " 88 | alias yta-best="youtube-dl --extract-audio --audio-format best " 89 | alias yta-flac="youtube-dl --extract-audio --audio-format flac " 90 | alias yta-m4a="youtube-dl --extract-audio --audio-format m4a " 91 | alias yta-mp3="youtube-dl --extract-audio --audio-format mp3 " 92 | alias yta-opus="youtube-dl --extract-audio --audio-format opus " 93 | alias yta-vorbis="youtube-dl --extract-audio --audio-format vorbis " 94 | alias yta-wav="youtube-dl --extract-audio --audio-format wav " 95 | 96 | alias ytv-best="youtube-dl -f bestvideo+bestaudio " 97 | 98 | 99 | alias h="history -10" # last 10 history commands 100 | alias hc="history -c" # clear history 101 | alias hg="history | grep " # +command 102 | 103 | 104 | 105 | 106 | ## get top process eating memory 107 | alias mem5='ps auxf | sort -nr -k 4 | head -5' 108 | alias mem10='ps auxf | sort -nr -k 4 | head -10' 109 | 110 | ## get top process eating cpu ## 111 | alias cpu5='ps auxf | sort -nr -k 3 | head -5' 112 | alias cpu10='ps auxf | sort -nr -k 3 | head -10' 113 | 114 | ## list largest directories (aka "ducks") 115 | alias dir5='du -cksh * | sort -hr | head -n 5' 116 | alias dir10='du -cksh * | sort -hr | head -n 10' 117 | 118 | # Safetynets 119 | # do not delete / or prompt if deleting more than 3 files at a time # 120 | alias rm='rm -I --preserve-root' 121 | 122 | # Parenting changing perms on / # 123 | alias chown='chown --preserve-root' 124 | alias chmod='chmod --preserve-root' 125 | alias chgrp='chgrp --preserve-root' 126 | 127 | # Package management 128 | if [ -f /usr/bin/apt ]; then 129 | alias update='sudo apt update' 130 | alias upgrade='sudo apt update && sudo apt dist-upgrade && sudo apt autoremove && sudo apt clean' 131 | alias install='sudo apt install' 132 | fi 133 | if [ -f /usr/bin/pacman ]; then 134 | alias update='sudo pacman -Syyy' 135 | alias upgrade='sudo pacman -Syu' 136 | alias install='sudo pacman -S' 137 | fi 138 | 139 | 140 | alias ..='cd ..' 141 | alias ...='cd ../../../' 142 | alias ....='cd ../../../../' 143 | alias back='cd $OLDPWD' 144 | alias c='clear' 145 | alias cd..='cd ..' 146 | alias cp='cp -iv' 147 | alias chmod="chmod -c" 148 | alias chown="chown -c" 149 | alias df='df -h -x squashfs -x tmpfs -x devtmpfs' 150 | alias e="vim -O " 151 | alias E="vim -o " 152 | alias egrep='egrep --colour=auto' 153 | alias extip='curl icanhazip.com' 154 | alias grep='grep --color=auto' 155 | alias l.='ls -lhFa --time-style=long-iso --color=auto' 156 | alias ll='ls' 157 | alias ln='ln -iv' 158 | alias ls=' ls -lhF --color=auto --human-readable --time-style=long-iso --classify' 159 | alias lsmount='mount |column -t' 160 | alias mkdir='mkdir -pv' 161 | alias mv='mv -iv' 162 | alias ports='netstat -tulanp' 163 | alias h='history -i 1' 164 | alias history='history 1' 165 | alias rm='rm -iv' 166 | alias rmdir='rmdir -v' 167 | alias speedtest='curl -s https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py | python -' 168 | alias ssha='eval $(ssh-agent) && ssh-add' 169 | alias tn='tmux new -s' 170 | alias watch='watch -d' 171 | alias weather='curl wttr.in' 172 | alias wget='wget -c' 173 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | 3 | .vscode/* 4 | !.vscode/settings.json 5 | !.vscode/tasks.json 6 | !.vscode/launch.json 7 | !.vscode/extensions.json 8 | !.vscode/*.code-snippets 9 | 10 | # Local History for Visual Studio Code 11 | .history/ 12 | 13 | # Built Visual Studio Code Extensions 14 | *.vsix 15 | 16 | # environment file 17 | *.env 18 | *.conf 19 | *.notes 20 | *.txt 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Max Kowalski 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # maxmisc 2 | 3 | Misc scripts and tools. Undocumented scripts probably do what I need them to but aren't finished yet. 4 | 5 | ## wanchors.sh | wanchplus.sh 6 | 7 | Watches anchor files. If anchors are missing will shut down docker apps like plex to prevent library being emptied. 8 | Use cron to run script at desired interval, eg minutely. Use wanchplus.sh to also restart those dockers and the merger. 9 | 10 | `* * * * * /home/max/scripts/wanchors.sh` 11 | 12 | ## plex_futures.sh 13 | 14 | Resets date added to plex to now if item was added as a future date. 15 | Set DB Path and docker name variables before execution. 16 | 17 | ## plex_stuckers.sh 18 | 19 | Resets date added to plex to airdate or premiere date. 20 | Set DB Path and docker name variables before execution. 21 | -------------------------------------------------------------------------------- /atrain.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # https://github.com/maximuskowalski/maxmisc/blob/master/atrain.sh 3 | # an A-Train installer 4 | 5 | set -Eeuo pipefail 6 | IFS=$'\n\t' 7 | 8 | #________ VARS 9 | 10 | APP=atrain 11 | APPDIR=/opt 12 | NETWORK=saltbox 13 | 14 | #________ DONT CHANGE 15 | 16 | DOCKTAG=latest 17 | MNTPNT=${APPDIR}/${APP} 18 | CONFIGUS="${MNTPNT}/a-train.toml" 19 | 20 | #________ FUNCTIONS 21 | 22 | checkoff() { 23 | ([ -d "${MNTPNT}" ] || dirmkr) 24 | ([ -e ${CONFIGUS} ] || configo) 25 | } 26 | 27 | dirmkr() { 28 | sudo mkdir -p "${MNTPNT}" && sudo chown "${USER}":"${USER}" "${MNTPNT}" 29 | } 30 | 31 | # let user complete - no var replacing 32 | # use heredoc instead of var 33 | 34 | configo() { 35 | cat >"${CONFIGUS}" <"${TMPDIR}"/mycron 50 | uname -a >"${TMPDIR}"/system.txt 51 | neofetch >"${TMPDIR}"/neofetch.txt 52 | echo '' >>"${TMPDIR}"/system.txt 53 | echo '################' >>"${TMPDIR}"/system.txt 54 | echo '################' >>"${TMPDIR}"/system.txt 55 | echo 'lshw info' >>"${TMPDIR}"/system.txt 56 | echo '################' >>"${TMPDIR}"/system.txt 57 | sudo lshw -short >>"${TMPDIR}"/system.txt 58 | echo '' >>"${TMPDIR}"/system.txt 59 | echo '################' >>"${TMPDIR}"/system.txt 60 | echo '################' >>"${TMPDIR}"/system.txt 61 | echo 'lsblk info' >>"${TMPDIR}"/system.txt 62 | echo '################' >>"${TMPDIR}"/system.txt 63 | lsblk >>"${TMPDIR}"/system.txt 64 | echo '' >>"${TMPDIR}"/system.txt 65 | echo '################' >>"${TMPDIR}"/system.txt 66 | echo '################' >>"${TMPDIR}"/system.txt 67 | echo 'df info' >>"${TMPDIR}"/system.txt 68 | echo '################' >>"${TMPDIR}"/system.txt 69 | df -h >>"${TMPDIR}"/system.txt 70 | } 71 | 72 | #________ FUNCTIONS 73 | 74 | # check for root runners 75 | rooter() { 76 | if [ "$(whoami)" = root ]; then 77 | echo "Running as root or with sudo is not supported. Exiting." 78 | exit 79 | fi 80 | } 81 | 82 | # check for existence 83 | checkoff() { 84 | ([ -d "${BKUPDIR}" ] || dirmaker) 85 | ([ -e "${FIRSTRUN}" ] || setup) 86 | # check for zip / neofetch installs? 87 | } 88 | 89 | # make our destination dirs 90 | dirmaker() { 91 | # mkdir -p "${BKUPDIR}" && sudo chown "${USER}":"${USER}" "${BKUPDIR}" 92 | mkdir -p {"${BKUPDIR}","${TMPDIR}","${ZIPSDIR}","${BAKDIR}","${INFDIR}"} && sudo chown "${USER}":"${USER}" {"${BKUPDIR}","${TMPDIR}","${ZIPSDIR}","${BAKDIR}","${INFDIR}"} 93 | 94 | } 95 | 96 | # make copies of files in "${FILESLIST}" or "${FILTLIST}" 97 | # USE check for existence first 98 | 99 | arrsinker() { 100 | ([ -e "${FILESLIST}" ] && filesinker) 101 | ([ -e "${FILTLIST}" ] && filtsinker) 102 | ([ -e "${COPYLIST}" ] && cpsinker) 103 | # --prune-empty-dirs ( -m ) 104 | # -n ( for dry run ) 105 | # --exclude="*" ( this should work from filelist) 106 | } 107 | 108 | filesinker() { 109 | rsync -a --prune-empty-dirs --files-from="${FILESLIST}" / "${TMPDIR}" >/dev/null 2>&1 110 | 111 | } 112 | 113 | filtsinker() { 114 | rsync -a --prune-empty-dirs --include-from="${FILTLIST}" / "${TMPDIR}" >/dev/null 2>&1 115 | } 116 | 117 | # test using `cp` instead of rsync, should only copy new / changed files 118 | cpsinker() { 119 | while read -r file; do cp --parents --update "$file" "${BAKDIR}"; done <"${FILESLIST}" >/dev/null 2>&1 120 | # cp --parents --update -r test/1/.moo test2/ 121 | # some loop that will do above 122 | } 123 | 124 | # make some zips 125 | # config_backup.zip 126 | 127 | # list of directories to be zipped instead of copied 128 | dirzip() { 129 | ([ -e "${ZIPLIST}" ] || echo "Error: Input file does not exist.") 130 | 131 | # iterate over each line in the input file 132 | while IFS= read -r line; do 133 | # check if the directory exists 134 | if [ -d "$line" ]; then 135 | # navigate to the directory 136 | cd "$line" || return 137 | 138 | # zip all files in the directory 139 | zip -r "${ZIPSDIR}"/"${line//\//_}.zip" ./* >/dev/null 2>&1 140 | 141 | # navigate back to the previous directory 142 | cd - >/dev/null || return 143 | else 144 | echo "..." 145 | fi 146 | done <"${ZIPLIST}" 147 | 148 | } 149 | 150 | zipper() { 151 | cd "${BKUPDIR}" || return 152 | zip -r "${BAKFILE}" files/* zips/* >/dev/null 2>&1 153 | } 154 | 155 | # logrotator should be in place already? 156 | 157 | setup() { 158 | # create the rotator 159 | # rotatormator # not currently used, file rotate happens every script run 160 | # create the firstrun.txt file 161 | firstfile 162 | installutils 163 | } 164 | 165 | # test in-script rotator instead of logrotate 166 | alternator() { 167 | ls -1t "${BAKDIR}"/*.zip | tail -n +8 | xargs rm >/dev/null 2>&1 168 | # ls -1t "${BAKDIR}"/*.zip | tail -n +6 | xargs rm 169 | # find "${BAKDIR}"/*.zip -mtime +10 -delete #all files more than X (10) days old 170 | } 171 | 172 | # current file uses dates - change back if using logrotate 173 | rotatormator() { 174 | 175 | sudo bash -c 'cat > /etc/logrotate.d/reverso' <"${FIRSTRUN}" 191 | } 192 | 193 | installutils() { 194 | if hash zip 2>/dev/null; then echo "zip already installed"; else sudo apt install zip -y; fi 195 | if hash lshw 2>/dev/null; then echo "lshw already installed"; else sudo apt install lshw -y; fi 196 | if hash neofetch 2>/dev/null; then echo "neofetch already installed"; else sudo apt install neofetch -y; fi 197 | } 198 | 199 | # uploader ( might want a switch for this ) 200 | uploader() { 201 | (("${UPLOAD}" == TRUE)) && rclone copy -vP "${BAKDIR}"/ "${BKUPDRV}":/backups/${SRVR}/fureverso/ --drive-chunk-size=2048M --buffer-size 8192M || return 202 | } 203 | 204 | # finish notification 205 | fin() { 206 | echo 207 | echo " **************************" 208 | echo " * ---------------------- *" 209 | echo " * - backup complete! - *" 210 | echo " * ---------------------- *" 211 | echo " **************************" 212 | echo 213 | } 214 | 215 | #______________ RUNLIST 216 | 217 | rooter 218 | checkoff # this will run dirmaker if needed 219 | tempactions 220 | arrsinker # rsync switches 221 | dirzip 222 | zipper 223 | alternator # testing - may go back to logrotate 224 | uploader 225 | fin 226 | -------------------------------------------------------------------------------- /plex_futures.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PLEXDOCKER=plex 4 | PLEXDBPATH="/opt/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases/" 5 | PLEXDB="com.plexapp.plugins.library.db" 6 | PLEXSQL="/opt/plexsql/Plex Media Server" 7 | 8 | TODAY=$(date '+%Y_%d_%m__%H_%M_%S') 9 | 10 | docker stop "${PLEXDOCKER}" 11 | cd "${PLEXDBPATH}" || return 12 | 13 | cp "${PLEXDB}" "${PLEXDB}_${TODAY}.bak" 14 | 15 | ([ -e "com.plexapp.plugins.library.db-shm" ] && rm com.plexapp.plugins.library.db-shm) 16 | ([ -e "rm com.plexapp.plugins.library.db-wal" ] && rm rm com.plexapp.plugins.library.db-wal) 17 | 18 | "${PLEXSQL}" --sqlite "${PLEXDB}" < DATETIME('now'); 21 | END_SQL 22 | docker start "${PLEXDOCKER}" 23 | #eof 24 | -------------------------------------------------------------------------------- /plex_futures_n_stuckers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PLEXDOCKER=plex 4 | PLEXDBPATH="/opt/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases/" 5 | PLEXDB="com.plexapp.plugins.library.db" 6 | PLEXSQL="/opt/plexsql/Plex Media Server" 7 | 8 | TODAY=$(date '+%Y_%d_%m__%H_%M_%S') 9 | 10 | docker stop "${PLEXDOCKER}" 11 | cd "${PLEXDBPATH}" || return 12 | 13 | cp "${PLEXDB}" "${PLEXDB}_${TODAY}.bak" 14 | 15 | "${PLEXSQL}" --sqlite "${PLEXDB}" < DATETIME('now'); 18 | UPDATE metadata_items SET added_at = originally_available_at WHERE added_at <> originally_available_at AND originally_available_at IS NOT NULL; 19 | END_SQL 20 | docker start "${PLEXDOCKER}" 21 | #eof 22 | -------------------------------------------------------------------------------- /plex_stuckers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PLEXDOCKER=plex 4 | PLEXDBPATH="/opt/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases/" 5 | PLEXDB="com.plexapp.plugins.library.db" 6 | PLEXSQL="/opt/plexsql/Plex Media Server" 7 | 8 | TODAY=$(date '+%Y_%d_%m__%H_%M_%S') 9 | 10 | docker stop "${PLEXDOCKER}" 11 | cd "${PLEXDBPATH}" || return 12 | 13 | cp "${PLEXDB}" "${PLEXDB}_${TODAY}.bak" 14 | 15 | ([ -e "com.plexapp.plugins.library.db-shm" ] && rm com.plexapp.plugins.library.db-shm) 16 | ([ -e "rm com.plexapp.plugins.library.db-wal" ] && rm rm com.plexapp.plugins.library.db-wal) 17 | 18 | "${PLEXSQL}" --sqlite "${PLEXDB}" < originally_available_at AND originally_available_at IS NOT NULL; 21 | END_SQL 22 | docker start "${PLEXDOCKER}" 23 | #eof 24 | -------------------------------------------------------------------------------- /restore.app.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # https://github.com/maximuskowalski/maxmisc/blob/master/restore.app.sh 3 | 4 | #________ VARS 5 | 6 | PARENTDIR="/opt" # Parent dir in which backup dir is nested (docker appdata) 7 | APPDIR="plex" # appdir to backup 8 | RESTOREDIR=~/apprestore # local dir to save tar for restore 9 | BKUPDRV=maxbackup # rclone config name of destination share drive, eg 'google' 10 | FILEPATH=/backups/maxical # Path on rclone remote to the file 11 | FILENAME=appname.tar.gz # Name of file to restore 12 | THEDOCKER=plex # name of your app docker - to stop and start 13 | 14 | TARGET="${PARENTDIR}/${APPDIR}" 15 | RFLAGS="--drive-chunk-size=2048M --buffer-size 8192M" 16 | #______________ FUNCTIONS 17 | 18 | check_for_dirs() { 19 | ([ -d "${RESTOREDIR}" ] || make_restoredir) 20 | ([ -d "${TARGET}" ] || make_target) 21 | } 22 | 23 | make_restoredir() { 24 | mkdir -p "${RESTOREDIR}" 25 | } 26 | 27 | make_target() { 28 | sudo mkdir -p "${TARGET}" 29 | sudo chown "${USER}":"${USER}" "${TARGET}" 30 | chmod 775 "${TARGET}" 31 | } 32 | 33 | pull_files() { 34 | rclone copy -vP "${BKUPDRV}":"${FILEPATH}/${FILENAME}" "${RESTOREDIR}" "${RFLAGS}" 35 | } 36 | 37 | dockerinstalled() { 38 | if [[ $(which docker) && $(docker --version) ]]; then 39 | DOCKERINST="true" 40 | else 41 | echo "docker not installed" 42 | DOCKERINST="false" 43 | fi 44 | } 45 | 46 | # -q = quiet, show ID only 47 | # -f = filter output based on conditions provided 48 | # TODO: The name filter matches on all or part of a container’s name. 49 | # TODO: how to deal with no docker at all causing command failure 50 | # shellcheck disable=SC1073 51 | dockcheck() { 52 | if [ $DOCKERINST = true ]; then 53 | if [ "$(docker ps -a -q -f name=${THEDOCKER})" ]; then 54 | DOCKEXIST="true" 55 | docker stop ${THEDOCKER} 56 | else 57 | DOCKEXIST="false" 58 | echo no docker named ${THEDOCKER} running 59 | fi 60 | } 61 | 62 | extractomate() { 63 | tar -xvzf "${RESTOREDIR}"\/"${FILENAME}" -C "${PARENTDIR}" 64 | } 65 | 66 | dockstart() { 67 | if [ $DOCKEXIST = true ]; then 68 | docker start ${THEDOCKER} 69 | else 70 | echo no docker named ${THEDOCKER} to start 71 | fi 72 | } 73 | 74 | exiting() { 75 | echo 76 | echo "restore complete" 77 | echo 78 | } 79 | 80 | #______________ SET LIST 81 | 82 | check_for_dirs 83 | pullfiles 84 | dockerinstalled 85 | dockcheck 86 | extractomate 87 | dockstart 88 | exiting 89 | -------------------------------------------------------------------------------- /sarotund.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # https://github.com/maximuskowalski/maxmisc/blob/master/sarotund.sh 3 | # an sarotate installer 4 | # https://github.com/saltydk/SARotate 5 | 6 | set -Eeuo pipefail 7 | IFS=$'\n\t' 8 | 9 | # Grab service files dir 10 | # and semi config... maybe later 11 | # systemd? maybe var 12 | 13 | #________ VARS 14 | 15 | APP=sarotate 16 | APPDIR=/opt 17 | SYSDINST=true # creates systemd file and enables but does not start 18 | 19 | #________ DONT CHANGE 20 | 21 | MNTPNT=${APPDIR}/${APP} 22 | CRNTVERS=https://github.com/saltydk/SARotate/releases/download/v1.0.1/SARotate 23 | CONFIGUS="${MNTPNT}/config.yaml.sample" 24 | 25 | #________ FUNCTIONS 26 | 27 | rooter() { 28 | if [ "$(whoami)" = root ]; then 29 | echo "${BRED} Running as root or with sudo is not supported. Exiting.${RESET}" 30 | exit 31 | fi 32 | } 33 | 34 | checkoff() { 35 | ([ -d "${MNTPNT}" ] || dirmkr) 36 | } 37 | 38 | dirmkr() { 39 | sudo mkdir -p "${MNTPNT}" && sudo chown "${USER}":"${USER}" "${MNTPNT}" 40 | } 41 | 42 | fetching() { 43 | wget -c "${CRNTVERS}" -O ${MNTPNT}/SARotate 44 | chmod +x ${MNTPNT}/SARotate 45 | } 46 | 47 | configo() { 48 | cat >"${CONFIGUS}" <' 70 | EOF 71 | } 72 | 73 | sysdcheck() { 74 | ([ $SYSDINST = true ] && sysdmaker && enabler) || : 75 | } 76 | 77 | sysdmaker() { 78 | sudo bash -c 'cat > /etc/systemd/system/sarotate.service' < /etc/systemd/system/sarotate.timer' <> ${WANCHLOG} 22 | } 23 | 24 | checker 25 | 26 | # 27 | -------------------------------------------------------------------------------- /ziplist.txt.sample: -------------------------------------------------------------------------------- 1 | # an explicit list of directories 2 | # to be zipped before backing up 3 | # no wildcards 4 | # 5 | /opt/scripts 6 | /opt/sa 7 | --------------------------------------------------------------------------------