├── MIT-LICENSE ├── README ├── adjust_srt.pl ├── android_dn.sh ├── backup_md5db.sh ├── bluray_remux2hevc.sh ├── break_name.sh ├── browser_kill.sh ├── browser_ram.sh ├── browser_restore.sh ├── ch_case.sh ├── ch_perm.sh ├── compare_dirs.sh ├── corrupt_fn.sh ├── cue_time_add.sh ├── cue_time_subtract.sh ├── cuebin_extract.sh ├── cueflac_split.sh ├── dnf_reinstall.sh ├── dup_dir_ln.sh ├── echo2printf.sh ├── extract_subs.sh ├── fat32_copy.pl ├── filter_nicks.sh ├── find_movie_dups.sh ├── fix_beos_images.sh ├── fix_unicode_srt.pl ├── flac2lossy.pl ├── flac_tags.sh ├── flac_tree.sh ├── flash_usb.sh ├── flatten_ident.sh ├── format_lowlevel.sh ├── free_ram.sh ├── fuck_your_system_up.sh ├── get_pids.sh ├── hdd_dump.sh ├── imdb.sh ├── import.pl ├── kill_ram_hog.sh ├── loadavg.sh ├── lower_volume_pw.sh ├── md5db_fast.pl ├── merge_srt.pl ├── merge_subs.sh ├── mkv2srt.sh ├── no_comments.sh ├── nv_shield_dn.sh ├── old_drives.sh ├── packer.sh ├── parse_srt.pl ├── print_function.sh ├── rarbg_subs.sh ├── recursive_repack_7z.sh ├── reformat_script.sh ├── replaygain.pl ├── return_of_the_triad.sh ├── rm_dup_dirs.pl ├── rm_dup_lines.sh ├── rm_lines.sh ├── rm_md5sum.pl ├── rm_newlines.sh ├── rm_old_kernels.sh ├── rm_quotes.sh ├── round_srt.pl ├── show_undeclared.sh ├── sort_pix.pl ├── sort_roms.sh ├── start_handbrake.sh ├── stop_handbrake.sh ├── sub_lang.sh ├── to_utf8.sh ├── tracker_list.sh ├── tracklist.sh ├── vnstat.sh ├── web_title.sh ├── wolfendoom.sh └── yt_encode.sh /MIT-LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2022 linux4ever07 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | A collection of scripts that I've written and refined over the years 2 | ==================================================================== 3 | 4 | I'm uploading my scripts here in the hope that they will be useful to 5 | other people, besides just myself. A brief description of the intended 6 | use of each script is provided in the top comments of each script. 7 | 8 | Live long and prosper 🖖 9 | 10 | /linux4ever07 11 | -------------------------------------------------------------------------------- /android_dn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to find Android settings, lost files, volume 4 | # information, and trash directories, list their size and content, and 5 | # give the user the option to remove them. 6 | 7 | set -eo pipefail 8 | 9 | # Creates a function, called 'usage', which will print usage 10 | # instructions and then quit. 11 | usage () { 12 | printf '\n%s\n\n' "Usage: $(basename "$0") [directory]" 13 | exit 14 | } 15 | 16 | # If the number of arguments given to the script is not 1, and that 17 | # argument is not a directory, quit. 18 | if [[ $# -ne 1 || ! -d $1 ]]; then 19 | usage 20 | fi 21 | 22 | declare pause_msg 23 | declare -a targets dirs dirs_tmp 24 | declare -A if 25 | 26 | if[dn]=$(readlink -f "$1") 27 | targets=('Android' 'LOST.DIR' 'System Volume Information' '.Trash*') 28 | 29 | pause_msg='Are you sure? [y/n]: ' 30 | 31 | declare -A regex 32 | 33 | regex[num]='^[0-9]+$' 34 | regex[size]='^[0-9]+M' 35 | regex[date]='^[0-9]{4}-[0-9]{2}-[0-9]{2}' 36 | 37 | # Creates a function, called 'menu'. It displays 2 menus. First it 38 | # displays the directories found, and once a directory is selected it 39 | # displays options ('list' and 'remove'). 40 | menu () { 41 | declare date n size 42 | 43 | # Directory menu. 44 | clear 45 | 46 | printf '\nChoose directory:\n\n' 47 | 48 | for (( i = 0; i < ${#dirs[@]}; i++ )); do 49 | if[dn_tmp]="${dirs[${i}]}" 50 | size=$(du -BM -s "${if[dn_tmp]}" | grep -Eo "${regex[size]}") 51 | 52 | printf '%s) %s (%s)\n' "$i" "${if[dn_tmp]}" "$size" 53 | done 54 | 55 | printf '\n' 56 | read -p '>' 57 | 58 | if [[ ! $REPLY =~ ${regex[num]} ]]; then 59 | return 60 | fi 61 | 62 | if[dn_tmp]="${dirs[${REPLY}]}" 63 | n="$REPLY" 64 | 65 | if [[ -z ${if[dn_tmp]} ]]; then 66 | return 67 | fi 68 | 69 | # Options menu. 70 | clear 71 | 72 | printf '\n%s\n\n' "${if[dn_tmp]}" 73 | printf 'Choose action:\n\n' 74 | printf '(l) list\n' 75 | printf '(r) remove\n\n' 76 | 77 | read -p '>' 78 | 79 | case "$REPLY" in 80 | 'l') 81 | declare -a files 82 | 83 | mapfile -t files < <(find "${if[dn_tmp]}" -type f 2>&-) 84 | 85 | for (( i = 0; i < ${#files[@]}; i++ )); do 86 | if[fn]="${files[${i}]}" 87 | date=$(stat -c '%y' "${if[fn]}" | grep -Eo "${regex[date]}") 88 | 89 | printf '%s (%s)\n' "${if[fn]}" "$date" 90 | done | less 91 | 92 | unset -v files 93 | ;; 94 | 'r') 95 | printf '\n' 96 | read -p "$pause_msg" 97 | 98 | if [[ $REPLY != 'y' ]]; then 99 | return 100 | fi 101 | 102 | unset dirs["${n}"] 103 | dirs=("${dirs[@]}") 104 | 105 | rm -rf "${if[dn_tmp]}" 106 | ;; 107 | *) 108 | return 109 | ;; 110 | esac 111 | } 112 | 113 | # Gets all directories that matches the target names. 114 | for (( i = 0; i < ${#targets[@]}; i++ )); do 115 | if[dn_tmp]="${targets[${i}]}" 116 | 117 | mapfile -t dirs_tmp < <(find "${if[dn]}" -type d -iname "${if[dn_tmp]}" 2>&-) 118 | dirs+=("${dirs_tmp[@]}") 119 | done 120 | 121 | unset -v dirs_tmp 122 | 123 | # If no directories were found, quit. 124 | if [[ ${#dirs[@]} -eq 0 ]]; then 125 | printf '\n%s\n\n' 'Nothing to do!' 126 | exit 127 | fi 128 | 129 | # While there's still directories left in the 'dirs' array, display the 130 | # menu. If the user wants to quit before that, they can just press 131 | # Ctrl+C. 132 | while [[ ${#dirs[@]} -gt 0 ]]; do 133 | menu 134 | done 135 | -------------------------------------------------------------------------------- /backup_md5db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is just a simple script to backup all my main 'md5.db' files. 4 | 5 | set -eo pipefail 6 | 7 | # If the script isn't run with sudo / root privileges, then quit. 8 | if [[ $EUID -ne 0 ]]; then 9 | printf '\n%s\n\n' 'You need to be root to run this script!' 10 | exit 11 | fi 12 | 13 | declare date of dn depth 14 | declare -a dirs depths files files_tmp 15 | 16 | date=$(date '+%F') 17 | of="${PWD}/md5db_backup_${date}.tar.xz" 18 | 19 | dirs=('/home' '/run/media') 20 | depths=('2' '3') 21 | 22 | get_files () { 23 | find "$1" -mindepth "$2" -maxdepth "$2" -type f -name 'md5.db' 24 | } 25 | 26 | for (( i = 0; i < ${#dirs[@]}; i++ )); do 27 | dn="${dirs[${i}]}" 28 | depth="${depths[${i}]}" 29 | 30 | mapfile -t files_tmp < <(get_files "$dn" "$depth") 31 | 32 | if [[ ${#files_tmp[@]} -eq 0 ]]; then 33 | continue 34 | fi 35 | 36 | files+=("${files_tmp[@]}") 37 | done 38 | 39 | if [[ ${#files[@]} -eq 0 ]]; then 40 | exit 41 | fi 42 | 43 | tar -c "${files[@]}" | xz --compress -9 > "$of" 44 | -------------------------------------------------------------------------------- /break_name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script parses movie file names. 4 | 5 | # It has 2 modes, 'parse' and 'find'. 6 | 7 | # In 'parse' mode, it will only parse the file name to extract the movie 8 | # title and year. 9 | 10 | # In 'find' mode, it will check if the file name matches scene rules 11 | # and is the type of video file we're looking for (aka a movie rip). 12 | 13 | # This script is just to remove the need for me to store the 14 | # 'break_name' function separately in other scripts. If I need to make 15 | # changes to this function, I only have to do it in one place. 16 | 17 | declare mode query_string 18 | declare -a query 19 | 20 | declare array_ref number_ref elements type_tmp 21 | declare -a bname_dots bname_hyphens bname_underscores bname_spaces 22 | 23 | declare -A regex 24 | 25 | regex[blank]='^[[:blank:]]*(.*)[[:blank:]]*$' 26 | regex[year]='^([[:punct:]]|[[:blank:]]){0,1}([0-9]{4})([[:punct:]]|[[:blank:]]){0,1}$' 27 | 28 | while [[ $# -gt 0 ]]; do 29 | case "$1" in 30 | '-parse') 31 | mode='parse' 32 | 33 | shift 34 | ;; 35 | '-find') 36 | mode='find' 37 | 38 | shift 39 | ;; 40 | *) 41 | query+=("$1") 42 | 43 | shift 44 | ;; 45 | esac 46 | done 47 | 48 | query_string="${query[@]}" 49 | 50 | # Creates a function, called 'break_name', which will break up the input 51 | # file name. 52 | break_name () { 53 | declare bname type 54 | declare -a types 55 | declare -A bname_elements 56 | 57 | bname=$(sed -E 's/ +/ /g' <<<"$1") 58 | 59 | types=('dots' 'hyphens' 'underscores' 'spaces') 60 | 61 | # Breaks the name up in a list of words, and stores those words in 62 | # arrays, depending on whether the file name is separated by dots, 63 | # hyphens, underscores or spaces. 64 | mapfile -d'.' -t bname_dots <<<"$bname" 65 | mapfile -d'-' -t bname_hyphens <<<"$bname" 66 | mapfile -d'_' -t bname_underscores <<<"$bname" 67 | mapfile -d' ' -t bname_spaces <<<"$bname" 68 | 69 | # Gets rid of the newline at the end of the last element of each array. 70 | bname_dots[-1]="${bname_dots[-1]%$'\n'}" 71 | bname_hyphens[-1]="${bname_hyphens[-1]%$'\n'}" 72 | bname_underscores[-1]="${bname_underscores[-1]%$'\n'}" 73 | bname_spaces[-1]="${bname_spaces[-1]%$'\n'}" 74 | 75 | # Stores the total element numbers in the 'bname_elements' hash. 76 | # This will be used to figure out the correct word separator. 77 | bname_elements[dots]="${#bname_dots[@]}" 78 | bname_elements[hyphens]="${#bname_hyphens[@]}" 79 | bname_elements[underscores]="${#bname_underscores[@]}" 80 | bname_elements[spaces]="${#bname_spaces[@]}" 81 | 82 | elements=0 83 | 84 | # This for loop figures out if the name is separated by dots, hyphens, 85 | # underscores or spaces. 86 | for type in "${types[@]}"; do 87 | number_ref="bname_elements[${type}]" 88 | 89 | if [[ ${!number_ref} -gt $elements ]]; then 90 | elements="${!number_ref}" 91 | type_tmp="$type" 92 | fi 93 | done 94 | } 95 | 96 | # Creates a function, called 'break_name_parse', which will extract the 97 | # movie title, and year. 98 | break_name_parse () { 99 | declare title_tmp year_tmp 100 | 101 | year_tmp='0000' 102 | 103 | # This for loop goes through the word list from right to left, until it 104 | # finds a year. If the year is found, it's saved in a variable, and the 105 | # elements variable is modified so the next for loop will not go beyond 106 | # the element that contains the year, when saving the words that 107 | # comprise the title. 108 | for (( i = elements; i > 0; i-- )); do 109 | array_ref="bname_${type_tmp}[${i}]" 110 | 111 | if [[ -z ${!array_ref} ]]; then 112 | continue 113 | fi 114 | 115 | # If this element matches the year regex, stop going through the 116 | # array elements. 117 | if [[ ${!array_ref} =~ ${regex[year]} ]]; then 118 | year_tmp="${BASH_REMATCH[2]}" 119 | 120 | elements="$i" 121 | 122 | break 123 | fi 124 | done 125 | 126 | # This for loop goes through the word list that comprises the title. 127 | for (( i = 0; i < elements; i++ )); do 128 | array_ref="bname_${type_tmp}[${i}]" 129 | 130 | if [[ -z ${!array_ref} ]]; then 131 | continue 132 | fi 133 | 134 | title_tmp+="${!array_ref} " 135 | done 136 | 137 | title_tmp="${title_tmp% }" 138 | 139 | # Prints the complete parsed name. 140 | printf '%s\n' "$title_tmp" 141 | printf '%s\n' "$year_tmp" 142 | } 143 | 144 | # Creates a function, called 'break_name_find', which tries to match 145 | # a file name against the words in 'rip' array. It prints the number of 146 | # matches. 147 | break_name_find () { 148 | declare count tag 149 | declare -a rip 150 | 151 | count=0 152 | 153 | # Creates an array with all the different scene tags to look for in 154 | # each file name. 155 | rip=(720p 1080p 2160p screener hc dvb hdtv tvrip webrip webdl web-dl hddvd hd-dvd bluray blu-ray bdrip dvdrip divx xvid h264 x264 avc h265 x265 hevc dts ac3 pcm vorbis aac mp3) 156 | 157 | # This for loop goes through the word list, and compares each 158 | # word with the words in 'rip' array. 159 | for (( i = 0; i < elements; i++ )); do 160 | array_ref="bname_${type_tmp}[${i}]" 161 | 162 | if [[ -z ${!array_ref} ]]; then 163 | continue 164 | fi 165 | 166 | for tag in "${rip[@]}"; do 167 | if [[ ${!array_ref,,} =~ $tag ]]; then 168 | (( count += 1 )) 169 | break 170 | fi 171 | done 172 | done 173 | 174 | printf '%s' "$count" 175 | } 176 | 177 | break_name "$query_string" 178 | 179 | case "$mode" in 180 | 'parse') 181 | break_name_parse 182 | ;; 183 | 'find') 184 | break_name_find 185 | ;; 186 | esac 187 | -------------------------------------------------------------------------------- /browser_kill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is just a simple script to send a signal to 'browser_ram.sh' to 4 | # restart the web browser. If it has stopped responding. This is to 5 | # avoid having to access the hard drive, in case the intention was not 6 | # to quit the browser. 7 | 8 | usage () { 9 | cat <&1) 126 | else 127 | cmd_stdout=$(ps -C "$cmd" -o pid= 2>&1) 128 | fi 129 | 130 | return "$?" 131 | } 132 | 133 | if is_browser; then 134 | printf '\n%s\n\n' "${name} is already running!" 135 | exit 136 | fi 137 | 138 | session="${RANDOM}-${RANDOM}" 139 | ram_limit=1000000 140 | time_limit=3600 141 | 142 | pause_msg="Restart ${name}? [y/n]: " 143 | 144 | if[og_cfg]="${browsers_info[${browser},cfg]}" 145 | if[og_cache]="${browsers_info[${browser},cache]}" 146 | 147 | if[bak_cfg]="${if[og_cfg]}-${session}" 148 | if[bak_cache]="${if[og_cache]}-${session}" 149 | 150 | of[shm_dn]="/dev/shm/${browser}-${session}" 151 | of[shm_cfg]="${of[shm_dn]}/config" 152 | of[shm_cache]="${of[shm_dn]}/cache" 153 | 154 | of[restart_fn]="${of[shm_dn]}/kill" 155 | of[tar_fn]="${HOME}/${browser}-${session}.tar" 156 | of[tar_unfinished_fn]="${of[tar_fn]}.unfinished" 157 | 158 | cwd="$PWD" 159 | 160 | start_browser () { 161 | sync 162 | 163 | printf '\n%s\n\n' "Starting ${name}..." 164 | 165 | "$cmd" &>/dev/null & 166 | pid="$!" 167 | } 168 | 169 | restart_browser () { 170 | if [[ ! -f ${of[restart_fn]} ]]; then 171 | return 172 | fi 173 | 174 | rm "${of[restart_fn]}" || exit 175 | 176 | kill_browser 177 | start_browser 178 | } 179 | 180 | check_status () { 181 | declare cmd_stdout 182 | 183 | cmd_stdout=$(ps -p "$pid" -o pid= 2>&1) 184 | 185 | return "$?" 186 | } 187 | 188 | check_ram () { 189 | declare -a free_ram ram 190 | 191 | mapfile -t free_ram < <(free | sed -E 's/[[:blank:]]+/ /g') 192 | mapfile -d' ' -t ram <<<"${free_ram[1]}" 193 | ram[-1]="${ram[-1]%$'\n'}" 194 | 195 | if [[ ${ram[6]} -lt $ram_limit ]]; then 196 | printf '\n%s\n\n' 'Running out of RAM...' 197 | 198 | kill_browser 199 | 200 | printf '\n' 201 | 202 | read -p "$pause_msg" -t 60 203 | 204 | if [[ $REPLY == 'y' ]]; then 205 | start_browser 206 | fi 207 | 208 | if [[ $REPLY != 'y' ]]; then 209 | restore_browser 210 | 211 | exit 212 | fi 213 | fi 214 | } 215 | 216 | check_time () { 217 | time_start=$(date '+%s') 218 | 219 | if [[ $time_start -ge $time_end ]]; then 220 | time_end=$(( time_start + time_limit )) 221 | 222 | return 0 223 | fi 224 | 225 | return 1 226 | } 227 | 228 | check_hdd () { 229 | declare cfg_size hdd_free 230 | 231 | cfg_size=$(du --summarize --total --block-size=1 "$@" | tail -n 1 | grep -Eo '^[0-9]+') 232 | hdd_free=$(df --output=avail --block-size=1 "$HOME" | tail -n +2 | tr -d '[:blank:]') 233 | 234 | if [[ $cfg_size -gt $hdd_free ]]; then 235 | cat <&-) 57 | 58 | for (( i = 0; i < ${#files[@]}; i++ )); do 59 | eval if[fn]="${files[${i}]}" 60 | 61 | mapfile -d'/' -t path_parts <<<"${if[fn]}" 62 | depth[tmp]=$(( ${#path_parts[@]} - 1 )) 63 | depth[diff]=$(( depth[tmp] - depth[min] )) 64 | 65 | if [[ ${depth[diff]} -gt ${depth[max]} ]]; then 66 | depth[max]="${depth[diff]}" 67 | fi 68 | done 69 | 70 | unset -v "${vars[@]}" 71 | 72 | for (( i = depth[max]; i > 0; i-- )); do 73 | mapfile -t files < <(find "${if[dn]}" -mindepth "$i" -maxdepth "$i" -exec printf '%q\n' {} + 2>&-) 74 | 75 | for (( j = 0; j < ${#files[@]}; j++ )); do 76 | eval if[fn]="${files[${j}]}" 77 | of[dn]=$(dirname "${if[fn]}") 78 | if[bn]=$(basename "${if[fn]}") 79 | 80 | of[upper]="${if[bn]^^}" 81 | of[lower]="${if[bn],,}" 82 | 83 | of[bn]="${of[${case}]}" 84 | 85 | of[fn]="${of[dn]}/${of[bn]}" 86 | 87 | if [[ ${of[bn]} != "${if[bn]}" ]]; then 88 | printf '%s\n' "${of[fn]}" 89 | mv -n "${if[fn]}" "${of[fn]}" 90 | fi 91 | done 92 | done 93 | -------------------------------------------------------------------------------- /ch_perm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script recursively changes the owner (and group) of input files / 4 | # directories, to either the current user or root. It also changes read 5 | # / write permissions to match. In the case of root, all input files 6 | # and directories are write-protected. 7 | 8 | # Creates a function, called 'usage', which will print usage 9 | # instructions and then quit. 10 | usage () { 11 | printf '\n%s\n\n' "Usage: $(basename "$0") [user|root] [file / directory]" 12 | exit 13 | } 14 | 15 | # Creates a function, called 'ch_perm', which will figure out if the 16 | # name is a file or a directory, and change the permissions accordingly. 17 | ch_perm () { 18 | declare dn 19 | declare -a dirs 20 | 21 | sudo chown -v -R "${owner}:${owner}" "$1" 22 | 23 | if [[ $owner == "$USER" ]]; then 24 | sudo chmod -v -R +rw "$1" 25 | fi 26 | 27 | if [[ $owner == 'root' ]]; then 28 | sudo chmod -v -R ugo-w "$1" 29 | fi 30 | 31 | if [[ -f $1 ]]; then 32 | return 33 | fi 34 | 35 | mapfile -t dirs < <(sudo find "$1" -type d 2>&-) 36 | 37 | for (( i = 0; i < ${#dirs[@]}; i++ )); do 38 | dn="${dirs[${i}]}" 39 | sudo chmod -v ugo+x "$dn" 40 | done 41 | } 42 | 43 | declare owner fn 44 | 45 | case "$1" in 46 | 'user') 47 | owner="$USER" 48 | ;; 49 | 'root') 50 | owner='root' 51 | ;; 52 | *) 53 | usage 54 | ;; 55 | esac 56 | 57 | shift 58 | 59 | if [[ $# -eq 0 ]]; then 60 | usage 61 | fi 62 | 63 | while [[ $# -gt 0 ]]; do 64 | fn=$(readlink -f "$1") 65 | 66 | if [[ ! -f $fn && ! -d $fn ]]; then 67 | usage 68 | fi 69 | 70 | ch_perm "$fn" 71 | 72 | shift 73 | done 74 | -------------------------------------------------------------------------------- /compare_dirs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to compare two directories, check MD5 hashes of 4 | # all the files in both directories, to see which ones are identical and 5 | # which aren't. The script also checks the number of files and will list 6 | # potential missing files that exist in either directory, but not in the 7 | # other. 8 | 9 | # The script takes two arguments, both being directories. 10 | 11 | set -eo pipefail 12 | 13 | declare is_md5sum dir1 dir2 dir1_size dir2_size regex 14 | declare dir1_files_elements dir1_dirs_elements dir2_files_elements dir2_dirs_elements 15 | declare dir type key dir1_f dir2_f start bn bn_md5 16 | declare dir1_files_missing_elements dir1_dirs_missing_elements 17 | declare dir2_files_missing_elements dir2_dirs_missing_elements 18 | declare md5s_mismatch_elements identical 19 | declare dn_ref fn_ref elements_ref 20 | declare -a dir1_files dir1_dirs dir2_files dir2_dirs var_list1 var_list2 var_list3 dn_parts fn_parts 21 | 22 | # Checks if the user has 'md5sum' installed. This will probably not be 23 | # the case for macOS or FreeBSD, and that's why we're checking. If such 24 | # a user wants to run this script, he / she can just change the script 25 | # to use 'md5' instead, and parse the output accordingly. 26 | is_md5sum=$(command -v md5sum) 27 | 28 | if [[ -z $is_md5sum ]]; then 29 | printf '\n%s\n\n' "This script needs 'md5sum' installed to run!" 30 | exit 31 | fi 32 | 33 | usage () { 34 | printf '\n%s\n\n' "Usage: $(basename "$0") [dir1] [dir2]" 35 | exit 36 | } 37 | 38 | # Checks if arguments are directories, and quits if they aren't. 39 | if [[ ! -d $1 || ! -d $2 ]]; then 40 | usage 41 | fi 42 | 43 | # Gets absolute path of both directories. 44 | dir1=$(readlink -f "$1") 45 | dir2=$(readlink -f "$2") 46 | 47 | # Gets the total size of both directories. 48 | dir1_size=$(du -b -s "$dir1" | grep -Eo '^[0-9]+') 49 | dir2_size=$(du -b -s "$dir2" | grep -Eo '^[0-9]+') 50 | 51 | regex='([^ a-zA-Z0-9\.\-_ ])' 52 | 53 | # Lists all the files and directories in both directories. 54 | mapfile -t dir1_files < <(find "$dir1" -type f 2>&- | sed -E "s/${regex}/\\1/g") 55 | mapfile -t dir1_dirs < <(find "$dir1" -mindepth 1 -type d -empty 2>&- | sed -E "s/${regex}/\\1/g") 56 | mapfile -t dir2_files < <(find "$dir2" -type f 2>&- | sed -E "s/${regex}/\\1/g") 57 | mapfile -t dir2_dirs < <(find "$dir2" -mindepth 1 -type d -empty 2>&- | sed -E "s/${regex}/\\1/g") 58 | 59 | dir1_files_elements="${#dir1_files[@]}" 60 | dir1_dirs_elements="${#dir1_dirs[@]}" 61 | dir2_files_elements="${#dir2_files[@]}" 62 | dir2_dirs_elements="${#dir2_dirs[@]}" 63 | 64 | # Declares some hashes that will be used to compare the two directories. 65 | var_list1=(dir1_files_hash dir1_dirs_hash dir2_files_hash dir2_dirs_hash dir1_md5s_hash dir2_md5s_hash) 66 | var_list2=(dn_parts fn_parts start bn bn_md5) 67 | 68 | declare -A "${var_list1[@]}" 69 | 70 | # Converts the basename of all the files (in both directories) into MD5 71 | # hashes, to be more easily processed later in the script. 72 | for dir in dir1 dir2; do 73 | dn_ref="$dir" 74 | 75 | mapfile -d'/' -t dn_parts <<<"${!dn_ref}" 76 | dn_parts[-1]="${dn_parts[-1]%$'\n'}" 77 | start="${#dn_parts[@]}" 78 | 79 | for type in files dirs; do 80 | elements_ref="${dir}_${type}_elements" 81 | 82 | for (( i = 0; i < ${!elements_ref}; i++ )); do 83 | fn_ref="${dir}_${type}[${i}]" 84 | 85 | # Removes the directory name from the beginning of the string. Creating 86 | # the basename this way because it's more safe than using regex:es, if 87 | # the string contains weird characters (that are interpreted as part of 88 | # the regex). 89 | mapfile -d'/' -t fn_parts <<<"${!fn_ref}" 90 | fn_parts[-1]="${fn_parts[-1]%$'\n'}" 91 | bn=$(printf '/%s' "${fn_parts[@]:${start}}") 92 | bn="${bn:1}" 93 | 94 | # Okay, we're done messing with the string now. Now to create the MD5 95 | # hash. 96 | bn_md5=$(md5sum -b <<<"$bn") 97 | bn_md5="${bn_md5%% *}" 98 | eval "${dir}"_"${type}"_hash["${bn_md5}"]=\""${bn}"\" 99 | done 100 | 101 | unset -v "${dir}_type" 102 | done 103 | done 104 | 105 | unset -v "${var_list2[@]}" 106 | 107 | # Generates an MD5 hash of all the basenames that exist in both 108 | # directories. This is faster than checking the MD5 hash of *all* the 109 | # files. We only need to check the file names that exist in both 110 | # directories. 111 | for key in "${!dir1_files_hash[@]}"; do 112 | dir1_f="${dir1}/${dir1_files_hash[${key}]}" 113 | 114 | if [[ ${dir2_files_hash[${key}]} ]]; then 115 | dir2_f="${dir2}/${dir2_files_hash[${key}]}" 116 | 117 | dir1_md5s_hash["${key}"]=$(md5sum -b "$dir1_f") 118 | dir1_md5s_hash["${key}"]="${dir1_md5s_hash[${key}]%% *}" 119 | dir2_md5s_hash["${key}"]=$(md5sum -b "$dir2_f") 120 | dir2_md5s_hash["${key}"]="${dir2_md5s_hash[${key}]%% *}" 121 | fi 122 | done 123 | 124 | # Compares the two directories to see if files or directories are 125 | # missing. 126 | var_list3=(dir1_files_missing dir1_dirs_missing dir2_files_missing dir2_dirs_missing md5s_mismatch) 127 | 128 | declare -a "${var_list3[@]}" 129 | 130 | # Files 131 | for key in "${!dir1_files_hash[@]}"; do 132 | if [[ -z ${dir2_files_hash[${key}]} ]]; then 133 | dir2_files_missing+=("${dir1_files_hash[${key}]}") 134 | elif [[ ${dir1_md5s_hash[${key}]} != "${dir2_md5s_hash[${key}]}" ]]; then 135 | md5s_mismatch+=("${dir1_files_hash[${key}]}") 136 | fi 137 | done 138 | 139 | for key in "${!dir2_files_hash[@]}"; do 140 | if [[ -z ${dir1_files_hash[${key}]} ]]; then 141 | dir1_files_missing+=("${dir2_files_hash[${key}]}") 142 | fi 143 | done 144 | 145 | # Directories 146 | for key in "${!dir1_dirs_hash[@]}"; do 147 | if [[ -z ${dir2_dirs_hash[${key}]} ]]; then 148 | dir2_dirs_missing+=("${dir1_dirs_hash[${key}]}") 149 | fi 150 | done 151 | 152 | for key in "${!dir2_dirs_hash[@]}"; do 153 | if [[ -z ${dir1_dirs_hash[${key}]} ]]; then 154 | dir1_dirs_missing+=("${dir2_dirs_hash[${key}]}") 155 | fi 156 | done 157 | 158 | unset -v "${var_list1[@]}" 159 | 160 | dir1_files_missing_elements="${#dir1_files_missing[@]}" 161 | dir1_dirs_missing_elements="${#dir1_dirs_missing[@]}" 162 | dir2_files_missing_elements="${#dir2_files_missing[@]}" 163 | dir2_dirs_missing_elements="${#dir2_dirs_missing[@]}" 164 | md5s_mismatch_elements="${#md5s_mismatch[@]}" 165 | 166 | identical='1' 167 | 168 | # Prints the result. 169 | print_list () { 170 | fn_ref="${type}[@]" 171 | printf '%s\n' "${!fn_ref}" | sort 172 | 173 | unset -v "$type" 174 | 175 | printf '\n' 176 | } 177 | 178 | for type in "${var_list3[@]}"; do 179 | elements_ref="${type}_elements" 180 | 181 | if [[ ${!elements_ref} -gt 0 ]]; then 182 | identical='0' 183 | else 184 | continue 185 | fi 186 | 187 | printf '\n' 188 | 189 | case $type in 190 | 'dir1_files_missing') 191 | printf '%s\n' "*** 1:${dir1}" 192 | printf '%s\n\n' "The files below are missing:" 193 | 194 | print_list 195 | ;; 196 | 'dir1_dirs_missing') 197 | printf '%s\n' "*** 1:${dir1}" 198 | printf '%s\n\n' "The directories below are missing:" 199 | 200 | print_list 201 | ;; 202 | 'dir2_files_missing') 203 | printf '%s\n' "*** 2:${dir2}" 204 | printf '%s\n\n' "The files below are missing:" 205 | 206 | print_list 207 | ;; 208 | 'dir2_dirs_missing') 209 | printf '%s\n' "*** 2:${dir2}" 210 | printf '%s\n\n' "The directories below are missing:" 211 | 212 | print_list 213 | ;; 214 | 'md5s_mismatch') 215 | printf '%s\n' "*** 1:${dir1}" 216 | printf '%s\n' "*** 2:${dir2}" 217 | printf '%s\n\n' "MD5 hash mismatch:" 218 | 219 | print_list 220 | ;; 221 | esac 222 | done 223 | 224 | # If directories are identical, the code above will have printed 225 | # nothing, so we print a message saying that the directories are 226 | # identical. 227 | if [[ $identical -eq 1 ]]; then 228 | cat </dev/null 75 | done 76 | -------------------------------------------------------------------------------- /cue_time_add.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script reads from the prompt, and uses numbers in the format: 4 | # 00:00:00 (mm:ss:ff). Minutes, seconds and frames. There are 75 frames 5 | # in one second, according to the Cue sheet specification. Each time a 6 | # number is given, it's added to the total number. The script is based 7 | # around an infinite while loop, and only quits once killed or Ctrl+C is 8 | # pressed. 9 | 10 | declare track_n frames in out 11 | declare -a format 12 | 13 | format[0]='^[0-9]+$' 14 | format[1]='^([0-9]{2,}):([0-9]{2}):([0-9]{2})$' 15 | 16 | # Creates a function, called 'time_convert', which converts track 17 | # timestamps back and forth between the time (mm:ss:ff) format and 18 | # frames / sectors. 19 | time_convert () { 20 | declare time m s f 21 | 22 | time="$1" 23 | 24 | # If argument is in the mm:ss:ff format... 25 | if [[ $time =~ ${format[1]} ]]; then 26 | m="${BASH_REMATCH[1]#0}" 27 | s="${BASH_REMATCH[2]#0}" 28 | f="${BASH_REMATCH[3]#0}" 29 | 30 | # Converts minutes and seconds to frames, and adds all the numbers 31 | # together. 32 | m=$(( m * 60 * 75 )) 33 | s=$(( s * 75 )) 34 | 35 | time=$(( m + s + f )) 36 | 37 | # If argument is in the frame format... 38 | elif [[ $time =~ ${format[0]} ]]; then 39 | f="$time" 40 | 41 | # Converts frames to seconds and minutes. 42 | s=$(( f / 75 )) 43 | m=$(( s / 60 )) 44 | 45 | f=$(( f % 75 )) 46 | s=$(( s % 60 )) 47 | 48 | time=$(printf '%02d:%02d:%02d' "$m" "$s" "$f") 49 | fi 50 | 51 | printf '%s' "$time" 52 | } 53 | 54 | # Initiates the global variables. For counting the iterations of the 55 | # loop (track number), and storing the total time in frames. 56 | track_n=1 57 | frames=0 58 | 59 | printf '\n%s\n\n' 'This script will calculate the total time of all the times given.' 60 | printf '%s\n\n' 'Type or paste a time in the mm:ss:ff format.' 61 | 62 | while [[ 1 ]]; do 63 | # Reads input. 64 | read in 65 | 66 | # Continues the next iteration of the loop if input doesn't match the 67 | # correct format. 68 | if [[ ! $in =~ ${format[1]} ]]; then 69 | continue 70 | fi 71 | 72 | # Adds 1 to the track number. 73 | (( track_n += 1 )) 74 | 75 | # Converts time to frames, and adds that number to the total time. 76 | # Converts that number back to the mm:ss:ff format. 77 | in=$(time_convert "$in") 78 | (( frames += in )) 79 | out=$(time_convert "$frames") 80 | 81 | # Prints the current total time in the mm:ss:ff format. 82 | printf '\n*** Track %d start: %s ***\n' "$track_n" "$out" 83 | done 84 | -------------------------------------------------------------------------------- /cueflac_split.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script looks for CUE/FLAC files (single file FLAC albums) in the 4 | # directories passed to it as arguments. It then proceeds to split the 5 | # FLAC files it finds into separate tracks. Lastly, it copies the tags 6 | # (if available) from the CUE file to the newly split tracks. You need 7 | # 'cuetools' and 'shntool' to run this script. 8 | 9 | # The output files are put here: 10 | # ${HOME}/split-tracks/${album} 11 | 12 | declare line 13 | declare -a cmd dirs files_in files_out 14 | declare -a format 15 | declare -A if of regex 16 | 17 | format[0]='^[0-9]+$' 18 | format[1]='^([0-9]{2,}):([0-9]{2}):([0-9]{2})$' 19 | format[2]='[0-9]{2,}:[0-9]{2}:[0-9]{2}' 20 | format[3]='^(FILE) +(.*) +(.*)$' 21 | format[4]='^(TRACK) +([0-9]{2,}) +(.*)$' 22 | format[5]="^(PREGAP) +(${format[2]})$" 23 | format[6]="^(INDEX) +([0-9]{2,}) +(${format[2]})$" 24 | format[7]="^(POSTGAP) +(${format[2]})$" 25 | 26 | regex[blank]='^[[:blank:]]*(.*)[[:blank:]]*$' 27 | regex[quotes]='^\"(.*)\"$' 28 | regex[path]='^(.*[\\\/])(.*)$' 29 | regex[fn]='^(.*)\.([^.]*)$' 30 | 31 | # Creates an array of the list of commands needed by this script. 32 | cmd=('cuebreakpoints' 'shnsplit') 33 | 34 | of[dn]="${HOME}/split-tracks" 35 | 36 | # Creates a function, called 'usage', which will print usage 37 | # instructions and then quit. 38 | usage () { 39 | printf '\n%s\n\n' "Usage: $(basename "$0") [dirs]" 40 | exit 41 | } 42 | 43 | if [[ $# -eq 0 ]]; then 44 | usage 45 | fi 46 | 47 | while [[ $# -gt 0 ]]; do 48 | if [[ -d $1 ]]; then 49 | dirs+=("$(readlink -f "$1")") 50 | else 51 | usage 52 | fi 53 | 54 | shift 55 | done 56 | 57 | if [[ ${#dirs[@]} -eq 0 ]]; then 58 | usage 59 | fi 60 | 61 | # Creates a function, called 'check_cmd', which will check if the 62 | # necessary commands are installed. If any of the commands are missing, 63 | # print them and quit. 64 | check_cmd () { 65 | declare cmd_tmp 66 | declare -a missing_pkg 67 | declare -A cmd_pkg 68 | 69 | # Saves the package names of the commands that are needed by the script. 70 | cmd_pkg["${cmd[0]}"]='cuetools' 71 | cmd_pkg["${cmd[1]}"]='shntool' 72 | 73 | for cmd_tmp in "${cmd[@]}"; do 74 | command -v "$cmd_tmp" 1>&- 75 | 76 | if [[ $? -ne 0 ]]; then 77 | missing_pkg+=("$cmd_tmp") 78 | fi 79 | done 80 | 81 | if [[ ${#missing_pkg[@]} -gt 0 ]]; then 82 | printf '\n%s\n\n' 'You need to install the following through your package manager:' 83 | 84 | for cmd_tmp in "${missing_pkg[@]}"; do 85 | printf '%s\n' "${cmd_pkg[${cmd_tmp}]}" 86 | done 87 | 88 | printf '\n' 89 | 90 | exit 91 | fi 92 | } 93 | 94 | check_cmd "${cmd[@]}" 95 | 96 | for (( i = 0; i < ${#dirs[@]}; i++ )); do 97 | if[dn]="${dirs[${i}]}" 98 | 99 | mapfile -t files_in < <(find "${if[dn]}" -type f -iname "*.cue") 100 | 101 | files_out+=("${files_in[@]}") 102 | done 103 | 104 | unset -v files_in 105 | 106 | for (( i = 0; i < ${#files_out[@]}; i++ )); do 107 | if[cue]="${files_out[${i}]}" 108 | if[cue_dn]=$(dirname "${if[cue]}") 109 | 110 | declare album fn ext 111 | declare -a lines files tracks 112 | 113 | # Reads the source CUE sheet into RAM. 114 | mapfile -t lines < <(tr -d '\r' <"${if[cue]}" | sed -E "s/${regex[blank]}/\1/") 115 | 116 | # This loop processes each line in the CUE sheet, and stores all the 117 | # containing file names in the 'files' array. 118 | for (( j = 0; j < ${#lines[@]}; j++ )); do 119 | line="${lines[${j}]}" 120 | 121 | if [[ $line =~ ${format[3]} ]]; then 122 | line="${BASH_REMATCH[2]}" 123 | 124 | if [[ $line =~ ${regex[quotes]} ]]; then 125 | line="${BASH_REMATCH[1]}" 126 | fi 127 | 128 | if [[ $line =~ ${regex[path]} ]]; then 129 | line="${BASH_REMATCH[2]}" 130 | fi 131 | 132 | files+=("$line") 133 | 134 | continue 135 | fi 136 | 137 | if [[ $line =~ ${format[4]} ]]; then 138 | line="${BASH_REMATCH[2]}" 139 | 140 | tracks+=("$line") 141 | 142 | continue 143 | fi 144 | done 145 | 146 | if [[ ${#files[@]} -gt 1 || ${#tracks[@]} -eq 1 ]]; then 147 | unset -v lines files tracks 148 | 149 | continue 150 | fi 151 | 152 | fn="${files[0]}" 153 | 154 | unset -v lines files tracks 155 | 156 | album="$fn" 157 | 158 | if [[ $fn =~ ${regex[fn]} ]]; then 159 | ext="${BASH_REMATCH[2],,}" 160 | 161 | if [[ $ext != 'flac' ]]; then 162 | unset -v album fn ext 163 | 164 | continue 165 | fi 166 | 167 | album="${BASH_REMATCH[1]}" 168 | fi 169 | 170 | of[album_dn]="${of[dn]}/${album}" 171 | 172 | if [[ -d ${of[album_dn]} ]]; then 173 | unset -v album fn ext 174 | 175 | continue 176 | fi 177 | 178 | mkdir -p "${of[album_dn]}" 179 | cd "${of[album_dn]}" 180 | 181 | cuebreakpoints "${if[cue]}" | shnsplit -O always -o flac -- "${if[cue_dn]}/${fn}" 182 | cuetag.sh "${if[cue]}" split-track*.flac 183 | 184 | unset -v album fn ext 185 | done 186 | -------------------------------------------------------------------------------- /dnf_reinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script either reinstalls every RPM package on the system, or 4 | # verifies all RPMs to see which ones are broken, and reinstalls those. 5 | # Run with either 'full' or 'verify' as an argument. 6 | 7 | declare mode 8 | 9 | # Creates a function, called 'usage', which will print usage 10 | # instructions and then quit. 11 | usage () { 12 | printf '\n%s\n\n' "Usage: $(basename "$0") [full|verify]" 13 | exit 14 | } 15 | 16 | case "$1" in 17 | 'full') 18 | mode='full' 19 | ;; 20 | 'verify') 21 | mode='verify' 22 | ;; 23 | *) 24 | usage 25 | ;; 26 | esac 27 | 28 | # If the script isn't run with sudo / root privileges, quit. 29 | if [[ $EUID -ne 0 ]]; then 30 | printf '\n%s\n\n' 'You need to be root to run this script!' 31 | exit 32 | fi 33 | 34 | declare date txt_fn 35 | declare -a dnf_pkgs 36 | declare -A regex 37 | 38 | date=$(date '+%F') 39 | txt_fn="${HOME}/dnf_reinstall_${date}.txt" 40 | 41 | regex[dnf]='^([^ ]+).*$' 42 | regex[rpm]='^[^\/]+(.*)$' 43 | 44 | # Creates a function, called 'dnf_install', which will reinstall 45 | # packages. 46 | dnf_install () { 47 | declare rpm 48 | 49 | touch "$txt_fn" 50 | 51 | for (( i = 0; i < ${#dnf_pkgs[@]}; i++ )); do 52 | rpm="${dnf_pkgs[${i}]}" 53 | 54 | dnf -y reinstall "$rpm" 55 | 56 | if [[ $? -eq 0 ]]; then 57 | printf '%s\n' "$rpm" >> "$txt_fn" 58 | fi 59 | done 60 | } 61 | 62 | case "$mode" in 63 | 'full') 64 | mapfile -t dnf_pkgs < <(dnf list --installed | sed -E "s/${regex[dnf]}/\1/") 65 | dnf_install 66 | ;; 67 | 'verify') 68 | mapfile -t dnf_pkgs < <(rpm -qf $(rpm -Va | sed -E "s/${regex[rpm]}/\1/") | sort -u) 69 | dnf_install 70 | ;; 71 | esac 72 | -------------------------------------------------------------------------------- /dup_dir_ln.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script creates a duplicate of the directory specified, by 4 | # recursively re-creating the sub-directories and then creating symbolic 5 | # links for all the files. 6 | 7 | # The script takes two arguments, first being input directory, and 8 | # second being the name of the output directory. 9 | 10 | # This script was originally created to allow me to seed two slightly 11 | # different versions of the same torrent at once. I removed some files 12 | # and added some files in the new version of the torrent, and the rest 13 | # of the files are symlinks. 14 | 15 | # The permissions of the output directory will be root for owner, and 16 | # group, with only read permissions for everyone else. Permissions only 17 | # affect the created sub-directories in the output directory, not the 18 | # symlinks. 19 | 20 | set -eo pipefail 21 | 22 | # Creates a function, called 'usage', which will print usage 23 | # instructions and then quit. 24 | usage () { 25 | printf '\n%s\n\n' "Usage: $(basename "$0") [in_dir] [out_dir]" 26 | exit 27 | } 28 | 29 | # If the script isn't run with sudo / root privileges, then quit. 30 | if [[ $EUID -ne 0 ]]; then 31 | printf '\n%s\n\n' 'You need to be root to run this script!' 32 | exit 33 | fi 34 | 35 | if [[ ! -d $1 || -z $2 ]]; then 36 | usage 37 | elif [[ -e $2 ]]; then 38 | printf '\n%s\n\n' "\"${2}\" already exists!" 39 | exit 40 | fi 41 | 42 | declare pause_msg start stop 43 | declare -a files dn_parts fn_parts 44 | declare -A if of 45 | 46 | if[dn]=$(readlink -f "$1") 47 | of[dn]=$(readlink -f "$2") 48 | 49 | pause_msg=" 50 | You're about to recursively symlink: 51 | \"${if[dn]}\" 52 | 53 | To: 54 | \"${of[dn]}\" 55 | 56 | Are you sure? [y/n]: " 57 | 58 | read -p "$pause_msg" 59 | 60 | if [[ $REPLY != 'y' ]]; then 61 | exit 62 | fi 63 | 64 | mapfile -d'/' -t dn_parts <<<"${if[dn]}" 65 | dn_parts[-1]="${dn_parts[-1]%$'\n'}" 66 | start="${#dn_parts[@]}" 67 | 68 | mapfile -t files < <(find "${if[dn]}" -type f 2>&-) 69 | 70 | for (( i = 0; i < ${#files[@]}; i++ )); do 71 | if[fn]="${files[${i}]}" 72 | 73 | # Removes the directory name from the beginning of the string. Creating 74 | # the basename this way because it's more safe than using regex:es, if 75 | # the string contains weird characters (that are interpreted as part of 76 | # the regex). 77 | mapfile -d'/' -t fn_parts <<<"${if[fn]}" 78 | fn_parts[-1]="${fn_parts[-1]%$'\n'}" 79 | stop=$(( (${#fn_parts[@]} - ${#dn_parts[@]}) - 1 )) 80 | of[dn_tmp]=$(printf '/%s' "${fn_parts[@]:${start}:${stop}}") 81 | of[dn_tmp]="${of[dn_tmp]:1}" 82 | of[bn]="${fn_parts[-1]}" 83 | 84 | of[dn_tmp]="${of[dn]}/${of[dn_tmp]}" 85 | of[fn]="${of[dn_tmp]}/${of[bn]}" 86 | 87 | mkdir -p "${of[dn_tmp]}" 88 | ln -s "${if[fn]}" "${of[fn]}" 89 | done 90 | 91 | # Changes the owner and permissions of the output directory. 92 | chown -R root:root "${of[dn]}" 93 | chmod -R +r "${of[dn]}" 94 | -------------------------------------------------------------------------------- /echo2printf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to replace 'echo' commands in Bash scripts, with 4 | # 'printf'. Both echo and printf exist as Bash built-ins (as well as 5 | # external commands). When they are run, the built-in takes precedence. 6 | 7 | # I prefer to use printf over echo, as it's more flexible in my opinion. 8 | 9 | # There's still a need to go through the output script file manually 10 | # after having run it through this script. Because the string that was 11 | # passed to 'echo' in the input file might have newlines in it. Those 12 | # newlines need to be added to the '%s' part of the printf command, and 13 | # removed from the string. 14 | 15 | # Also, depending on the use case of the original echo commands, the 16 | # '\n', in the '%s' part of the printf command might not be necessary. 17 | # Such as when passing strings between functions. 18 | 19 | # The output script file replaces the input file. 20 | 21 | # Creates a function, called 'usage', which will print usage 22 | # instructions and then quit. 23 | usage () { 24 | printf '\n%s\n\n' "Usage: $(basename "$0") [file]" 25 | exit 26 | } 27 | 28 | if [[ ! -f $1 ]]; then 29 | usage 30 | fi 31 | 32 | declare if printf_cmd line 33 | declare -a lines 34 | declare -A regex 35 | 36 | if=$(readlink -f "$1") 37 | 38 | regex[comment]='^[[:blank:]]*#+' 39 | regex[echo]='echo( -[[:alpha:]]+){0,}[[:blank:]]*' 40 | 41 | printf_cmd='printf '\''%s\\n'\'' ' 42 | 43 | mapfile -t lines < <(tr -d '\r' <"$if") 44 | 45 | for (( i = 0; i < ${#lines[@]}; i++ )); do 46 | line="${lines[${i}]}" 47 | 48 | if [[ $line =~ ${regex[comment]} ]]; then 49 | continue 50 | fi 51 | 52 | if [[ $line =~ ${regex[echo]} ]]; then 53 | lines["${i}"]=$(sed -E "s/${regex[echo]}/${printf_cmd}/g" <<<"$line") 54 | fi 55 | done 56 | 57 | truncate -s 0 "$if" 58 | 59 | printf '%s\n' "${lines[@]}" > "$if" 60 | -------------------------------------------------------------------------------- /extract_subs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to extract all the subtitles from an MKV 4 | # (Matroska) file. The output filename is the same as the input 5 | # filename, only a random number is added to the name. 6 | 7 | usage () { 8 | declare -a msg 9 | 10 | msg[0]="You need mkvtoolnix installed to run this script." 11 | msg[1]="Usage: $(basename "$0") [mkv]" 12 | msg[2]="There are no subtitles in: ${if_bn}" 13 | printf '\n%s\n\n' "${msg[${1}]}" 14 | exit 15 | } 16 | 17 | declare -a cmd if_subs 18 | declare -A if of 19 | 20 | if[fn]=$(readlink -f "$1") 21 | if[bn]=$(basename "${if[fn]}") 22 | if[bn_lc]="${if[bn],,}" 23 | of[fn]="${if[fn]%.*}-${RANDOM}.mkv" 24 | 25 | mapfile -t cmd < <(command -v mkvinfo mkvmerge) 26 | 27 | if [[ ${#cmd[@]} -ne 2 ]]; then 28 | usage 0 29 | fi 30 | 31 | if [[ ! -f ${if[fn]} || ${if[bn_lc]##*.} != 'mkv' ]]; then 32 | usage 1 33 | fi 34 | 35 | mapfile -t if_subs < <(mkvinfo "${if[fn]}" 2>&- | grep 'Track type: subtitles') 36 | 37 | if [[ ${#if_subs[@]} -eq 0 ]]; then 38 | usage 2 39 | fi 40 | 41 | mkvmerge --title "" -o "${of[fn]}" --no-video --no-audio --no-chapters "${if[fn]}" 42 | 43 | exit "$?" 44 | -------------------------------------------------------------------------------- /fat32_copy.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # This script copies files to FAT32 volumes, that are larger than the 4 | # filesystem allows, by splitting them up in multiple parts. Those files 5 | # can later be put back together like this: 6 | 7 | # cat file.part1 file.part2 file.part3 > file 8 | 9 | # The script is able to both copy individual files, and recursively copy 10 | # directories. It will split files that need to be split, and copy other 11 | # files normally. Files in the destination directory that have the same 12 | # name will be overwritten. 13 | 14 | use 5.34.0; 15 | use strict; 16 | use warnings; 17 | use diagnostics; 18 | use Cwd qw(abs_path); 19 | use File::Basename qw(basename); 20 | use File::Find qw(find); 21 | use File::Path qw(make_path); 22 | use File::Copy qw(copy); 23 | 24 | my($in, $out); 25 | 26 | if (scalar(@ARGV) != 2) { usage(); } 27 | if (-f $ARGV[0] or -d $ARGV[0]) { $in = abs_path($ARGV[0]); } 28 | if (-d $ARGV[1]) { $out = abs_path($ARGV[1]); } 29 | 30 | if (! length($in) or ! length($out)) { usage(); } 31 | 32 | my(%files, $start); 33 | 34 | my $size_limit = 2 ** 32; 35 | my $buffer_size = 64 * (2 ** 10); 36 | my $split = $size_limit / $buffer_size; 37 | 38 | # The 'usage' subroutine prints syntax, and then quits. 39 | sub usage { 40 | say "\n" . 'Usage: ' . basename($0) . ' [source]' . ' [destination]' . "\n"; 41 | exit; 42 | } 43 | 44 | # The 'get_files' subroutine gets all files and directories in the 45 | # directory passed to it as argument. 46 | sub get_files { 47 | my $dn = shift; 48 | 49 | find({ wanted => \&action, no_chdir => 1 }, $dn); 50 | 51 | sub action { 52 | my $fn = $File::Find::name; 53 | my $dn = $File::Find::dir; 54 | my(@path_parts); 55 | 56 | @path_parts = (split('/', $fn)); 57 | splice(@path_parts, 0, $start); 58 | $fn = join('/', @path_parts); 59 | 60 | @path_parts = (split('/', $dn)); 61 | splice(@path_parts, 0, $start); 62 | $dn = join('/', @path_parts); 63 | 64 | if (! length($dn)) { $dn = '.'; } 65 | 66 | if (-f) { push(@{$files{$dn}}, $fn); } 67 | } 68 | } 69 | 70 | # The 'copy_split' subroutine splits files that are larger than 71 | # $size_limit, and copies files. 72 | sub copy_split { 73 | my $if = shift; 74 | my $of = shift; 75 | my $of_part = $of . '.part'; 76 | my $size = (stat($if))[7]; 77 | my($read_fn, $write_fn, $buffer); 78 | my $read_write_n = 0; 79 | my $part_n = 1; 80 | 81 | if ($if eq $of) { 82 | say " 83 | in: $if 84 | out: $of 85 | 86 | Can\'t copy file to itself! 87 | "; 88 | exit; 89 | } 90 | 91 | if ($size > $size_limit) { 92 | $of = $of_part . $part_n; 93 | } else { 94 | copy($if, $of) or die "Can't copy '$if': $!"; 95 | return; 96 | } 97 | 98 | open($read_fn, '< :raw', $if) or die "Can't open '$if': $!"; 99 | open($write_fn, '> :raw', $of) or die "Can't open '$of': $!"; 100 | while (read($read_fn, $buffer, $buffer_size)) { 101 | if ($read_write_n == $split) { 102 | $read_write_n = 0; 103 | $part_n++; 104 | close($write_fn) or die "Can't close '$of': $!"; 105 | $of = $of_part . $part_n; 106 | open($write_fn, '> :raw', $of) or die "Can't open '$of': $!"; 107 | } 108 | print $write_fn $buffer or die "Can't write to '$of': $!"; 109 | $read_write_n++ 110 | } 111 | close($read_fn) or die "Can't close '$if': $!"; 112 | close($write_fn) or die "Can't close '$of': $!"; 113 | } 114 | 115 | if (-f $in) { 116 | copy_split($in, $out . '/' . basename($in)); 117 | } 118 | 119 | if (-d $in) { 120 | $start = scalar(split('/', $in)); 121 | get_files($in); 122 | 123 | $out = $out . '/' . basename($in); 124 | make_path($out); 125 | 126 | foreach my $dn (keys(%files)) { 127 | if ($dn ne '.') { 128 | make_path($out . '/' . $dn); 129 | } 130 | 131 | foreach my $fn (@{$files{$dn}}) { 132 | my $if = $in . '/' . $fn; 133 | my $of = $out . '/' . $fn; 134 | 135 | copy_split($if, $of); 136 | } 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /filter_nicks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to filter out nicks from IRC log excerpts, except 4 | # the nicks given as arguments, and whatever other nicks those nicks 5 | # highlight. The purpose is to highlight a specific conversation going 6 | # on between the nicks specified. 7 | 8 | # Creates a function, called 'usage', which will print usage 9 | # instructions and then quit. 10 | usage () { 11 | printf '\n%s\n\n' "Usage: $(basename "$0") [log] [nicks...]" 12 | exit 13 | } 14 | 15 | # Checks if the arguments are in order. 16 | if [[ $# -lt 2 || ! -f $1 ]]; then 17 | usage 18 | fi 19 | 20 | declare time line word nick nick_tmp nick_ref nick_utf8 nick_tmp_utf8 21 | declare -a times lines words clients 22 | declare -A if of regex nicks nicks_tmp 23 | 24 | if[fn]=$(readlink -f "$1") 25 | if[bn]=$(basename "${if[fn]}") 26 | of[fn]="${if[bn]%.*}-${RANDOM}-${RANDOM}.txt" 27 | 28 | regex[nick]='^<\+*(.*)>$' 29 | 30 | clients=('hexchat' 'irccloud' 'irssi' 'konversation') 31 | 32 | regex[hexchat]='^([[:alpha:]]+ [0-9]+ [0-9]+:[0-9]+:[0-9]+)(.*)$' 33 | regex[irccloud]='^(\[[0-9]+-[0-9]+-[0-9]+ [0-9]+:[0-9]+:[0-9]+\])(.*)$' 34 | regex[irssi]='^([0-9]+:[0-9]+)(.*)$' 35 | regex[konversation]='^(\[[[:alpha:]]+, [[:alpha:]]+ [0-9]+, [0-9]+\] \[[0-9]+:[0-9]+:[0-9]+ [[:alpha:]]+ [[:alpha:]]+\])(.*)$' 36 | 37 | # Creates a function, called 'get_client', which will figure out which 38 | # client was used to generate the IRC log in question, to be able to 39 | # parse it correctly. 40 | get_client () { 41 | declare client switch 42 | 43 | switch=0 44 | 45 | for (( z = 0; z < ${#lines[@]}; z++ )); do 46 | line="${lines[${z}]}" 47 | 48 | for client in "${clients[@]}"; do 49 | if [[ ! $line =~ ${regex[${client}]} ]]; then 50 | continue 51 | fi 52 | 53 | regex[client]="${regex[${client}]}" 54 | switch=1 55 | 56 | break 57 | done 58 | 59 | if [[ $switch -eq 1 ]]; then 60 | break 61 | fi 62 | done 63 | } 64 | 65 | # Creates a function, called 'get_nick', which will print the nick this 66 | # line belongs to. 67 | get_nick () { 68 | declare word 69 | 70 | word="${words[1]}" 71 | 72 | if [[ $word =~ ${regex[nick]} ]]; then 73 | printf '%s' "${BASH_REMATCH[1]}" 74 | fi 75 | } 76 | 77 | # Creates a function, called 'utf8_convert', which will convert all 78 | # characters in the nick to their UTF8 code. This is to be able to use 79 | # the nick as a hash element name, even if the nick contains special 80 | # characters. 81 | utf8_convert () { 82 | declare char_tmp string_in string_out 83 | 84 | string_in="$@" 85 | 86 | for (( z = 0; z < ${#string_in}; z++ )); do 87 | char_tmp="${string_in:${z}:1}" 88 | 89 | string_out+=$(printf '_%X' "'${char_tmp}") 90 | done 91 | 92 | printf '%s' "$string_out" 93 | } 94 | 95 | # Creates a function, called 'set_vars', which will get the current 96 | # line, split it into words, and get the nick it belongs to. 97 | set_vars () { 98 | time="${times[${i}]}" 99 | line="${lines[${i}]}" 100 | 101 | mapfile -t words < <(sed -E 's/[[:blank:]]+/\n/g' <<<"${line,,}") 102 | 103 | nick=$(get_nick) 104 | nick_utf8=$(utf8_convert "$nick") 105 | } 106 | 107 | shift 108 | 109 | for nick in "$@"; do 110 | nick_utf8=$(utf8_convert "${nick,,}") 111 | nicks["${nick_utf8}"]="${nick,,}" 112 | done 113 | 114 | mapfile -t lines < <(tr -d '\r' <"${if[fn]}") 115 | 116 | get_client 117 | 118 | if [[ -z ${regex[client]} ]]; then 119 | exit 120 | fi 121 | 122 | # This loop finds all the nicks in the log and adds them to a hash. 123 | for (( i = 0; i < ${#lines[@]}; i++ )); do 124 | if [[ ! ${lines[${i}]} =~ ${regex[client]} ]]; then 125 | continue 126 | fi 127 | 128 | times["${i}"]="${BASH_REMATCH[1]}" 129 | lines["${i}"]="${BASH_REMATCH[2]}" 130 | 131 | set_vars 132 | 133 | if [[ -n $nick_utf8 ]]; then 134 | nicks_tmp["${nick_utf8}"]="$nick" 135 | fi 136 | done 137 | 138 | # This loop finds all the nicks highlighted by the nicks given as 139 | # arguments to the script, and adds them to the nick hash. 140 | for (( i = 0; i < ${#lines[@]}; i++ )); do 141 | set_vars 142 | 143 | if [[ -z $nick_utf8 ]]; then 144 | continue 145 | fi 146 | 147 | nick_ref="nicks[${nick_utf8}]" 148 | 149 | if [[ -z ${!nick_ref} ]]; then 150 | continue 151 | fi 152 | 153 | for nick_tmp in "${nicks_tmp[@]}"; do 154 | regex[nick_tmp]="^[[:punct:]]*${nick_tmp}[[:punct:]]*$" 155 | 156 | for word in "${words[@]}"; do 157 | if [[ ! $word =~ ${regex[nick_tmp]} ]]; then 158 | continue 159 | fi 160 | 161 | nick_tmp_utf8=$(utf8_convert "$nick_tmp") 162 | nicks["${nick_tmp_utf8}"]="${nick_tmp}" 163 | 164 | break 165 | done 166 | done 167 | done 168 | 169 | # This loop prints all the lines that match the nicks collected by 170 | # the previous loop. 171 | for (( i = 0; i < ${#lines[@]}; i++ )); do 172 | set_vars 173 | 174 | if [[ -z $nick_utf8 ]]; then 175 | continue 176 | fi 177 | 178 | nick_ref="nicks[${nick_utf8}]" 179 | 180 | if [[ -n ${!nick_ref} ]]; then 181 | printf '%s\n' "${time}${line}" 182 | fi 183 | done | tee "${of[fn]}" 184 | -------------------------------------------------------------------------------- /find_movie_dups.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to find duplicate movie rips on my drives. 4 | # It will only search for file names that match scene rules. 5 | 6 | # The script has 2 modes, 'name' (default) and 'imdb' (optional). 7 | 8 | # In 'name' mode, the file names will be lowercased and parsed to 9 | # extract the movie title and year. 10 | 11 | # In 'imdb' mode, the script will try to find the movie on IMDb to get a 12 | # better match, even if the file names contain slight differences. 13 | 14 | # To search all drives in 'name' mode: 15 | # find_movie_dups.sh "${HOME}" "/run/media/${USER}" 16 | 17 | # To do the same in 'imdb' mode: 18 | # find_movie_dups.sh -imdb "${HOME}" "/run/media/${USER}" 19 | 20 | # A recent version of 'break_name.sh' is required to be located in the 21 | # same directory as this script. 22 | 23 | declare mode count key 24 | declare -a dirs files_in files_out 25 | declare -A if movie regex 26 | 27 | regex[prune]="^\/run\/media\/${USER}\/[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\/extracted_subs" 28 | regex[720p]='\(([0-9]{3,4})p_h\.264-aac\)' 29 | regex[1080p]='\(([0-9]{3,4})p_([0-9]{1,2})fps_(h264|av1)-([0-9]{2,3})kbit_(aac|opus)\)' 30 | 31 | mode='name' 32 | 33 | # Creates a function, called 'usage', which will print usage 34 | # instructions and then quit. 35 | usage () { 36 | cat <&- 139 | } 140 | 141 | # This function runs the JSON regexes and decides which JSON type is a 142 | # list and which isn't. 143 | get_list () { 144 | declare string 145 | declare -a list 146 | declare -A lists 147 | 148 | lists=(['genre']=1 ['actor']=1 ['director']=1) 149 | 150 | (( z += 1 )) 151 | 152 | # If current JSON type is not a list, match the regex and return from 153 | # this function. 154 | if [[ -z ${lists[${json_type}]} ]]; then 155 | if [[ ${tmp_array[${z}]} =~ ${regex[${json_type}2]} ]]; then 156 | imdb_info["${json_type}"]="${BASH_REMATCH[1]}" 157 | fi 158 | 159 | return 160 | fi 161 | 162 | # This loop parses JSON lists. 163 | while [[ ${tmp_array[${z}]} =~ ${regex[${json_type}2]} ]]; do 164 | list+=("${BASH_REMATCH[1]}") 165 | 166 | (( z += 1 )) 167 | 168 | if [[ ${tmp_array[${z}]} =~ ${regex[list]} ]]; then 169 | (( z += 1 )) 170 | else 171 | (( z -= 1 )) 172 | break 173 | fi 174 | done 175 | 176 | string=$(printf '%s, ' "${list[@]}") 177 | string="${string%, }" 178 | 179 | imdb_info["${json_type}"]="$string" 180 | } 181 | 182 | if [[ ${term[-1]} =~ ${regex[y]} ]]; then 183 | y="${BASH_REMATCH[1]}" 184 | unset -v term[-1] 185 | fi 186 | 187 | t=$(uriencode "${term[@]}") 188 | 189 | # Sets the type of IMDb search results to include. 190 | 191 | # All currently available types: 192 | # feature,tv_series,short,tv_episode,tv_miniseries,tv_movie,tv_special, 193 | # tv_short,video_game,video,music_video,podcast_series,podcast_episode 194 | type='feature,tv_series,tv_miniseries,tv_movie,tv_special,video' 195 | 196 | # If the $y variable is empty, that means the year is unknown, hence we 197 | # will need to use slightly different URLs, when searching for the 198 | # movie. 199 | if [[ -z $y ]]; then 200 | url_tmp="https://www.imdb.com/search/title/?title=${t}&title_type=${type}" 201 | else 202 | url_tmp="https://www.imdb.com/search/title/?title=${t}&title_type=${type}&release_date=${y}-01-01,${y}-12-31" 203 | fi 204 | 205 | id=$(get_page "$url_tmp" | sed -nE "s/${regex[id]}.*$/\1/;s/^.*${regex[id]}/\1/p") 206 | 207 | if [[ -z $id ]]; then 208 | return 1 209 | fi 210 | 211 | url="https://www.imdb.com/${id}/" 212 | 213 | # Translate {} characters to newlines so we can parse the JSON data. 214 | # I came to the conclusion that this is the most simple, reliable and 215 | # future-proof way to get the movie information. It's possible to add 216 | # more regex:es to the for loop below, to get additional information. 217 | # Excluding lines that are longer than 500 characters, to make it 218 | # slightly faster. 219 | mapfile -t tmp_array < <(get_page "$url" | tr '{}' '\n' | grep -Ev -e '.{500}' -e '^[[:blank:]]*$') 220 | 221 | json_types=(['title']=1 ['year']=1 ['plot']=1 ['rating']=1 ['genre']=1 ['actor']=1 ['director']=1 ['runtime']=1) 222 | 223 | for (( z = 0; z < ${#tmp_array[@]}; z++ )); do 224 | if [[ ${#json_types[@]} -eq 0 ]]; then 225 | break 226 | fi 227 | 228 | for json_type in "${!json_types[@]}"; do 229 | if [[ ! ${tmp_array[${z}]} =~ ${regex[${json_type}1]} ]]; then 230 | continue 231 | fi 232 | 233 | get_list 234 | 235 | unset -v json_types["${json_type}"] 236 | break 237 | done 238 | done 239 | 240 | cat <&- | grep -Ev "${regex[prune]}") 258 | 259 | # Look for files that match the scene naming rules. 260 | for (( i = 0; i < ${#files_in[@]}; i++ )); do 261 | set_names "${files_in[${i}]}" 262 | 263 | # If name pattern matches YouTube videos, ignore file, and continue 264 | # with the next iteration of the loop. 265 | # * (720p_H.264-AAC).mp4 266 | # * (1080p_30fps_H264-128kbit_AAC).mp4 267 | if [[ ${if[bn_lc]} =~ ${regex[720p]} || ${if[bn_lc]} =~ ${regex[1080p]} ]]; then 268 | continue 269 | fi 270 | 271 | # Try to find at least 2 scene tag matches for the current name. 272 | count=$(break_name_find "${if[bn_lc]}") 273 | 274 | # If name contains at least 2 scene tags, continue on. 275 | if [[ $count -ge 2 ]]; then 276 | files_out+=("${if[fn]}") 277 | fi 278 | done 279 | 280 | unset -v files_in 281 | 282 | for (( i = 0; i < ${#files_out[@]}; i++ )); do 283 | set_names "${files_out[${i}]}" 284 | 285 | unset -v name_tmp imdb_tmp info 286 | 287 | declare -a name_tmp imdb_tmp 288 | declare -A info 289 | 290 | mapfile -t name_tmp < <(break_name_parse "${if[bn_lc]}") 291 | 292 | if [[ $mode == 'name' ]]; then 293 | info[name]="${name_tmp[0]} (${name_tmp[1]})" 294 | 295 | info[id]=$(md5sum -b <<<"${info[name]}") 296 | info[id]="${info[id]%% *}" 297 | fi 298 | 299 | if [[ $mode == 'imdb' ]]; then 300 | if [[ ${name_tmp[1]} != '0000' ]]; then 301 | mapfile -t imdb_tmp < <(imdb "${name_tmp[0]} (${name_tmp[1]})") 302 | else 303 | mapfile -t imdb_tmp < <(imdb "${name_tmp[0]}") 304 | fi 305 | 306 | info[name]="${imdb_tmp[0]} (${imdb_tmp[1]}): (${imdb_tmp[2]})" 307 | 308 | info[id]="${imdb_tmp[3]}" 309 | fi 310 | 311 | if [[ -z ${info[id]} ]]; then 312 | continue 313 | fi 314 | 315 | if [[ -z ${movie[${info[id]}]} ]]; then 316 | movie["${info[id]}"]+="${info[name]}\n" 317 | fi 318 | 319 | movie["${info[id]}"]+="${if[fn]}\n" 320 | done 321 | 322 | unset -v name_tmp imdb_tmp info 323 | 324 | for key in "${!movie[@]}"; do 325 | mapfile -t files_out < <(printf '%b' "${movie[${key}]}") 326 | 327 | if [[ ${#files_out[@]} -ge 3 ]]; then 328 | printf '*** ID: %s\n\n' "${files_out[0]}" 329 | 330 | unset -v files_out[0] 331 | 332 | printf '%s\n' "${files_out[@]}" | sort 333 | printf '\n' 334 | fi 335 | done 336 | -------------------------------------------------------------------------------- /fix_beos_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to separate the tracks of BeOS disc images 4 | # (CUE/BIN), rename the tracks, and backup the 1st track (the boot 5 | # floppy image) to a created sub-directory. 6 | 7 | # The BeOS disc image directory is recursively lowercased, before doing 8 | # anything else. This is to make sure there will be no name conflicts. 9 | 10 | # This script depends on: 11 | # * ch_case.sh 12 | # * cuebin_extract.sh 13 | 14 | set -eo pipefail 15 | 16 | # Creates a function, called 'usage', which will print usage 17 | # instructions and then quit. 18 | usage () { 19 | printf '\n%s\n\n' "Usage: $(basename "$0") [dir]" 20 | exit 21 | } 22 | 23 | if [[ ! -d $1 ]]; then 24 | usage 25 | fi 26 | 27 | declare session line name md5 size size_limit 28 | declare -a cue_files bin_files cue_dirs lines format match 29 | declare -A if of regex 30 | 31 | format[0]='^[0-9]+$' 32 | format[1]='^([0-9]{2,}):([0-9]{2}):([0-9]{2})$' 33 | format[2]='[0-9]{2,}:[0-9]{2}:[0-9]{2}' 34 | format[3]='^(FILE) +(.*) +(.*)$' 35 | format[4]='^(TRACK) +([0-9]{2,}) +(.*)$' 36 | format[5]="^(PREGAP) +(${format[2]})$" 37 | format[6]="^(INDEX) +([0-9]{2,}) +(${format[2]})$" 38 | format[7]="^(POSTGAP) +(${format[2]})$" 39 | 40 | regex[blank]='^[[:blank:]]*(.*)[[:blank:]]*$' 41 | regex[quotes]='^\"(.*)\"$' 42 | regex[path]='^(.*[\\\/])(.*)$' 43 | regex[dn]='^(.*)-[0-9]+-[0-9]+$' 44 | regex[cuebin]='^(.*)[0-9]{2}_cdr.cue$' 45 | 46 | session="${RANDOM}-${RANDOM}" 47 | 48 | size_limit=3145728 49 | 50 | if[dn]=$(readlink -f "$1") 51 | of[dn_cuebin]="${if[dn]}/cuebin" 52 | of[dn_floppy]="${if[dn]}/floppy" 53 | 54 | # Creates a function, called 'get_files', which will be used to generate 55 | # file lists to be used by other functions. 56 | get_files () { 57 | declare glob 58 | 59 | for glob in "$@"; do 60 | compgen -G "$glob" 61 | done | sort -n 62 | } 63 | 64 | ch_case.sh "${if[dn]}" lower 65 | 66 | mapfile -t cue_files < <(find "${if[dn]}" -type f -iname "*.cue") 67 | 68 | mkdir -p "${of[dn_cuebin]}" "${of[dn_floppy]}" || exit 69 | cd "${of[dn_cuebin]}" || exit 70 | 71 | for (( i = 0; i < ${#cue_files[@]}; i++ )); do 72 | if[fn]="${cue_files[${i}]}" 73 | if[dn]=$(dirname "${if[fn]}") 74 | if[bn]=$(basename "${if[fn]}") 75 | 76 | of[bn]=$(sed -E 's/ +/_/g' <<<"${if[bn],,}") 77 | 78 | if[cue]="${if[fn]}" 79 | of[cue]="${if[dn]}/${of[bn]}" 80 | 81 | if [[ ${if[cue]} != "${of[cue]}" ]]; then 82 | mv "${if[cue]}" "${of[cue]}" || exit 83 | fi 84 | 85 | printf '%s\n\n' "*** ${of[cue]}" 86 | 87 | mapfile -t lines < <(tr -d '\r' <"${of[cue]}" | sed -E "s/${regex[blank]}/\1/") 88 | 89 | truncate -s 0 "${of[cue]}" 90 | 91 | for (( j = 0; j < ${#lines[@]}; j++ )); do 92 | line="${lines[${j}]}" 93 | 94 | # If line is a FILE command... 95 | if [[ $line =~ ${format[3]} ]]; then 96 | match=("${BASH_REMATCH[@]:1}") 97 | 98 | # Strips quotes that may be present in the CUE sheet. 99 | if [[ ${match[1]} =~ ${regex[quotes]} ]]; then 100 | match[1]="${BASH_REMATCH[1]}" 101 | fi 102 | 103 | # Strips path that may be present in the CUE sheet. 104 | if [[ ${match[1]} =~ ${regex[path]} ]]; then 105 | match[1]="${BASH_REMATCH[2]}" 106 | fi 107 | 108 | match[1]="\"${match[1],,}\"" 109 | 110 | line="${match[@]}" 111 | fi 112 | 113 | printf '%s\r\n' "$line" >> "${of[cue]}" 114 | done 115 | 116 | cuebin_extract.sh "${of[cue]}" -cdr 117 | done 118 | 119 | mapfile -t cue_dirs < <(find "${of[dn_cuebin]}" -mindepth 1 -maxdepth 1 -type d) 120 | 121 | for (( i = 0; i < ${#cue_dirs[@]}; i++ )); do 122 | if[dn]="${cue_dirs[${i}]}" 123 | 124 | cd "${if[dn]}" || exit 125 | 126 | mapfile -t cue_files < <(get_files "*.cue") 127 | mapfile -t bin_files < <(get_files "*.bin" "*.cdr") 128 | 129 | if [[ ${#cue_files[@]} -eq 0 ]]; then 130 | continue 131 | fi 132 | 133 | if [[ ${#bin_files[@]} -lt 2 ]]; then 134 | continue 135 | fi 136 | 137 | if [[ ! ${cue_files[0]} =~ ${regex[cuebin]} ]]; then 138 | continue 139 | fi 140 | 141 | name="${BASH_REMATCH[1]}" 142 | 143 | mv "${cue_files[0]}" "${name}.cue" || exit 144 | cue_files[0]="${name}.cue" 145 | 146 | size=$(stat -c '%s' "${bin_files[0]}") 147 | 148 | if [[ $size -gt $size_limit ]]; then 149 | continue 150 | fi 151 | 152 | md5=$(md5sum -b "${bin_files[0]}") 153 | md5="${md5%% *}" 154 | 155 | of[floppy]="floppy_${md5}.bin" 156 | 157 | mapfile -t lines < <(tr -d '\r' <"${cue_files[0]}" | sed -E "s/${bin_files[0]}/${of[floppy]}/") 158 | printf '%s\r\n' "${lines[@]}" > "${cue_files[0]}" 159 | 160 | mv -n "${bin_files[0]}" "${of[floppy]}" || exit 161 | cp -p "${of[floppy]}" "${of[dn_floppy]}" || exit 162 | done 163 | 164 | cd "${of[dn_cuebin]}" || exit 165 | 166 | for (( i = 0; i < ${#cue_dirs[@]}; i++ )); do 167 | if[dn]="${cue_dirs[${i}]}" 168 | if[bn]=$(basename "${if[dn]}") 169 | 170 | if [[ ${if[bn]} =~ ${regex[dn]} ]]; then 171 | of[dn]="${BASH_REMATCH[1]}" 172 | fi 173 | 174 | if [[ -d ${of[dn]} ]]; then 175 | continue 176 | fi 177 | 178 | mv -n "${if[bn]}" "${of[dn]}" || exit 179 | done 180 | -------------------------------------------------------------------------------- /flac_tags.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will list tags of a FLAC album. It allows the user to 4 | # select individual tracks from a simple menu, and display the tags. 5 | 6 | # Creates a function, called 'usage', which will print usage 7 | # instructions and then quit. 8 | usage () { 9 | printf '\n%s\n\n' "Usage: $(basename "$0") [dir]" 10 | exit 11 | } 12 | 13 | if [[ ! -d $1 ]]; then 14 | usage 15 | fi 16 | 17 | # If metaflac isn't installed, quit running the script. 18 | command -v metaflac 1>&- || { printf '\n%s\n\n' 'This script requires metaflac.'; exit; } 19 | 20 | declare dn 21 | declare -a files 22 | 23 | dn=$(readlink -f "$1") 24 | 25 | mapfile -t files < <(find "$dn" -maxdepth 1 -type f -iname "*.flac" 2>&- | sort -n) 26 | 27 | if [[ ${#files[@]} -eq 0 ]]; then 28 | usage 29 | fi 30 | 31 | declare track 32 | declare -A regex 33 | 34 | regex[num]='^[0-9]+$' 35 | 36 | # Creates a function, called 'gettags', which gets all the tags present 37 | # in a FLAC file. 38 | gettags () { 39 | declare if line field 40 | declare -a lines 41 | declare -A alltags 42 | 43 | if="$1" 44 | 45 | mapfile -t lines < <(metaflac --no-utf8-convert --export-tags-to=- "$if" 2>&-) 46 | 47 | for (( z = 0; z < ${#lines[@]}; z++ )); do 48 | line="${lines[${z}]}" 49 | 50 | unset -v mflac 51 | declare -a mflac 52 | 53 | mflac[0]="${line%%=*}" 54 | mflac[1]="${line#*=}" 55 | 56 | if [[ -z ${mflac[1]} ]]; then 57 | continue 58 | fi 59 | 60 | field="${mflac[0],,}" 61 | 62 | if [[ -n ${alltags[${field}]} ]]; then 63 | continue 64 | fi 65 | 66 | alltags["${field}"]="${mflac[1]}" 67 | done 68 | 69 | for field in "${!alltags[@]}"; do 70 | printf '%s\n' "${field}: ${alltags[${field}]}" 71 | done | sort 72 | } 73 | 74 | options () { 75 | declare ref 76 | 77 | unset -v track 78 | 79 | printf '\n%s\n%s\n\n' '*** BACK (b)' '*** QUIT (q)' 80 | 81 | read -p '>' 82 | 83 | clear 84 | 85 | case "$REPLY" in 86 | 'b') 87 | return 88 | ;; 89 | 'q') 90 | exit 91 | ;; 92 | esac 93 | 94 | if [[ ! $REPLY =~ ${regex[num]} ]]; then 95 | return 96 | fi 97 | 98 | ref="files[${REPLY}]" 99 | 100 | if [[ -z ${!ref} ]]; then 101 | return 102 | fi 103 | 104 | track="${!ref}" 105 | } 106 | 107 | clear 108 | 109 | while [[ 1 ]]; do 110 | printf '\n%s\n\n' '*** CHOOSE TRACK ***' 111 | 112 | for (( i = 0; i < ${#files[@]}; i++ )); do 113 | printf '%s) %s\n' "$i" "$(basename "${files[${i}]}")" 114 | done 115 | 116 | options 117 | 118 | if [[ -z $track ]]; then 119 | continue 120 | fi 121 | 122 | gettags "$track" 123 | 124 | options 125 | done 126 | -------------------------------------------------------------------------------- /flac_tree.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script makes an ASCII tree out of the directory structure in a 4 | # FLAC music library, by reading tags. 5 | 6 | # The script expects this directory structure: 7 | # ${library}/${albumartist}/${album} 8 | 9 | usage () { 10 | printf '\n%s\n\n' "Usage: $(basename "$0") [FLAC library directory]" 11 | exit 12 | } 13 | 14 | if [[ ! -d $1 ]]; then 15 | usage 16 | fi 17 | 18 | # If metaflac isn't installed, quit running the script. 19 | command -v metaflac 1>&- || { printf '\n%s\n\n' 'This script requires metaflac.'; exit; } 20 | 21 | declare library artist_dn album_dn if albumartist album date tracks 22 | declare -a dirs1 dirs2 files 23 | declare -A alltags 24 | 25 | library=$(readlink -f "$1") 26 | 27 | gettags () { 28 | declare line field 29 | declare -a lines 30 | 31 | for field in "${!alltags[@]}"; do 32 | unset -v alltags["${field}"] 33 | done 34 | 35 | mapfile -t lines < <(metaflac --no-utf8-convert --export-tags-to=- "$if" 2>&-) 36 | 37 | for (( z = 0; z < ${#lines[@]}; z++ )); do 38 | line="${lines[${z}]}" 39 | 40 | unset -v mflac 41 | declare -a mflac 42 | 43 | mflac[0]="${line%%=*}" 44 | mflac[1]="${line#*=}" 45 | 46 | if [[ -z ${mflac[1]} ]]; then 47 | continue 48 | fi 49 | 50 | field="${mflac[0],,}" 51 | 52 | if [[ -n ${alltags[${field}]} ]]; then 53 | continue 54 | fi 55 | 56 | alltags["${field}"]="${mflac[1]}" 57 | done 58 | } 59 | 60 | # Enters the Songbird Music Library directory. 61 | cd "$library" 62 | 63 | mapfile -t dirs1 < <(find "$library" -mindepth 1 -maxdepth 1 -type d 2>&- | sort) 64 | 65 | for (( i = 0; i < ${#dirs1[@]}; i++ )); do 66 | artist_dn="${dirs1[${i}]}" 67 | 68 | if [[ ! -d $artist_dn ]]; then 69 | continue 70 | fi 71 | 72 | if=$(find "$artist_dn" -type f -iname "*.flac" | head -1) 73 | 74 | gettags 75 | 76 | albumartist="${alltags[albumartist]}" 77 | 78 | printf "+---%s\n" "$albumartist" 79 | printf "| %s\n" '\' 80 | 81 | mapfile -t dirs2 < <(find "$artist_dn" -mindepth 1 -maxdepth 1 -type d 2>&- | sort) 82 | 83 | for (( j = 0; j < ${#dirs2[@]}; j++ )); do 84 | album_dn="${dirs2[${j}]}" 85 | 86 | if [[ ! -d $album_dn ]]; then 87 | continue 88 | fi 89 | 90 | mapfile -t files < <(find "$album_dn" -maxdepth 1 -type f -iname "*.flac" 2>&-) 91 | 92 | if [[ ${#files[@]} -eq 0 ]]; then 93 | continue 94 | fi 95 | 96 | if="${files[0]}" 97 | 98 | gettags 99 | 100 | album="${alltags[album]}" 101 | date="${alltags[date]}" 102 | tracks="${#files[@]}" 103 | 104 | if [[ -z $date ]]; then 105 | date="???" 106 | fi 107 | 108 | printf "| %s (%s)\n" "$album" "$date" 109 | printf "| %s tracks.\n" "$tracks" 110 | printf "|\n" 111 | done 112 | done 113 | -------------------------------------------------------------------------------- /flash_usb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to flash USB thumbdrives with Linux ISOs. 4 | # Although, any filetype can be given as argument. The script will not 5 | # check if it's an ISO file. The script asks the user to select the 6 | # correct USB device from a menu. 7 | 8 | # Creates a function, called 'usage', which will print usage 9 | # instructions and then quit. 10 | usage () { 11 | printf '\n%s\n\n' "Usage: $(basename "$0") [image]" 12 | exit 13 | } 14 | 15 | if [[ ! -f $1 ]]; then 16 | usage 17 | fi 18 | 19 | # If the script isn't run with sudo / root privileges, then quit. 20 | if [[ $EUID -ne 0 ]]; then 21 | printf '\n%s\n\n' 'You need to be root to run this script!' 22 | exit 23 | fi 24 | 25 | declare image device pause_msg exit_status n 26 | declare -A regex 27 | 28 | image=$(readlink -f "$1") 29 | 30 | regex[part]='-part[0-9]+$' 31 | 32 | # Creates a function, called 'get_files', which will print file names in 33 | # the current directory, but only if the glob pattern matches actual 34 | # files. This is to prevent errors for when a pattern has no matches. 35 | get_files () { 36 | declare glob 37 | 38 | for glob in "$@"; do 39 | compgen -G "$glob" 40 | done 41 | } 42 | 43 | # Creates a function, called 'device_menu', which will generate a list 44 | # of available USB devices and allow the user to select one of them in a 45 | # menu. 46 | device_menu () { 47 | declare device_link 48 | declare -a devices 49 | 50 | cd '/dev/disk/by-id' 51 | 52 | mapfile -t devices < <(get_files "usb-*" | grep -Ev -- "${regex[part]}") 53 | 54 | if [[ ${#devices[@]} -eq 0 ]]; then 55 | printf '\n%s\n\n' 'No USB storage devices found!' 56 | exit 57 | fi 58 | 59 | printf '\n%s\n\n' 'Choose destination device:' 60 | 61 | select device_link in "${devices[@]}"; do 62 | device=$(readlink -f "$device_link") 63 | 64 | if [[ -b $device ]]; then 65 | printf '\n%s\n\n' "$device" 66 | 67 | fdisk -l "$device" 68 | printf '\n' 69 | fi 70 | 71 | break 72 | done 73 | } 74 | 75 | while [[ $REPLY != 'y' ]]; do 76 | device_menu 77 | 78 | read -p 'Is this the correct device? [y/n]: ' 79 | printf '\n' 80 | done 81 | 82 | pause_msg=" 83 | You are about to flash: 84 | ${device} 85 | 86 | With: 87 | ${image} 88 | 89 | Are you sure? [y/n]: " 90 | 91 | read -p "$pause_msg" 92 | 93 | if [[ $REPLY != 'y' ]]; then 94 | exit 95 | fi 96 | 97 | printf '\n' 98 | 99 | for n in {1..10}; do 100 | printf "%s..." "$n" 101 | sleep 1 102 | done 103 | 104 | printf '\n\n%s: %s\n\n' "$device" 'flashing...' 105 | 106 | dd if="$image" of="$device" bs=1M 107 | 108 | exit_status="$?" 109 | 110 | # Synchronize cached writes. 111 | sync 112 | 113 | if [[ $exit_status -eq 0 ]]; then 114 | printf '\n%s: %s\n\n' "$device" 'flash succeeded!' 115 | else 116 | printf '\n%s: %s\n\n' "$device" 'flash failed!' 117 | fi 118 | -------------------------------------------------------------------------------- /flatten_ident.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to find all sub-directories in the directory 4 | # given as argument, and flatten them, if they contain just 1 file that 5 | # has the same name as the directory. 6 | 7 | set -eo pipefail 8 | 9 | # Creates a function, called 'usage', which will print usage 10 | # instructions and then quit. 11 | usage () { 12 | printf '\n%s\n\n' "Usage: $(basename "$0") [dir]" 13 | exit 14 | } 15 | 16 | if [[ $# -ne 1 ]]; then 17 | usage 18 | fi 19 | 20 | if [[ ! -d $1 ]]; then 21 | usage 22 | fi 23 | 24 | declare session 25 | declare -a vars files dirs path_parts 26 | declare -A if of depth regex 27 | 28 | vars=('files' 'dirs' 'path_parts') 29 | 30 | session="${RANDOM}-${RANDOM}" 31 | 32 | if[dn]=$(readlink -f "$1") 33 | 34 | regex[fn]='^(.*)\.([^.]*)$' 35 | 36 | depth[max]=0 37 | 38 | mapfile -d'/' -t path_parts <<<"${if[dn]}" 39 | depth[min]=$(( ${#path_parts[@]} - 1 )) 40 | 41 | mapfile -t files < <(find "${if[dn]}" -exec printf '%q\n' {} + 2>&-) 42 | 43 | for (( i = 0; i < ${#files[@]}; i++ )); do 44 | eval if[fn]="${files[${i}]}" 45 | 46 | mapfile -d'/' -t path_parts <<<"${if[fn]}" 47 | depth[tmp]=$(( ${#path_parts[@]} - 1 )) 48 | depth[diff]=$(( depth[tmp] - depth[min] )) 49 | 50 | if [[ ${depth[diff]} -gt ${depth[max]} ]]; then 51 | depth[max]="${depth[diff]}" 52 | fi 53 | done 54 | 55 | unset -v "${vars[@]}" 56 | 57 | for (( i = depth[max]; i > 0; i-- )); do 58 | mapfile -t dirs < <(find "${if[dn]}" -type d -mindepth "$i" -maxdepth "$i" -exec printf '%q\n' {} + 2>&-) 59 | 60 | for (( j = 0; j < ${#dirs[@]}; j++ )); do 61 | eval if[fn]="${dirs[${j}]}" 62 | of[dn]=$(dirname "${if[fn]}") 63 | if[bn]=$(basename "${if[fn]}") 64 | 65 | unset -v if[ext] of[ext] 66 | 67 | mapfile -t files < <(compgen -G "${if[fn]}/*") 68 | 69 | if [[ ${#files[@]} -ne 1 ]]; then 70 | continue 71 | fi 72 | 73 | of[fn]="${files[0]}" 74 | of[bn]=$(basename "${of[fn]}") 75 | 76 | if [[ ${if[bn]} =~ ${regex[fn]} ]]; then 77 | if[bn]="${BASH_REMATCH[1]}" 78 | if[ext]="${BASH_REMATCH[2]}" 79 | fi 80 | 81 | if [[ ${of[bn]} =~ ${regex[fn]} ]]; then 82 | of[bn]="${BASH_REMATCH[1]}" 83 | of[ext]="${BASH_REMATCH[2]}" 84 | fi 85 | 86 | if [[ ${if[bn]} != "${of[bn]}" ]]; then 87 | continue 88 | fi 89 | 90 | printf '%s\n' "${if[fn]}" 91 | 92 | if [[ -n ${if[ext]} ]]; then 93 | of[fn]="${if[bn]}-${session}.${if[ext]}" 94 | else 95 | of[fn]="${if[bn]}-${session}" 96 | fi 97 | 98 | of[fn]="${of[dn]}/${of[fn]}" 99 | 100 | mv -n "${if[fn]}" "${of[fn]}" 101 | mv -n "${of[fn]}"/* "${of[dn]}" 102 | rm -r "${of[fn]}" 103 | done 104 | done 105 | -------------------------------------------------------------------------------- /format_lowlevel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to do low-level formatting of devices, by writing 4 | # 0s to the entire device, reading from /dev/zero. 5 | 6 | # Creates a function, called 'usage', which will print usage 7 | # instructions and then quit. 8 | usage () { 9 | printf '\n%s\n\n' "Usage: $(basename "$0") [devices...]" 10 | exit 11 | } 12 | 13 | if [[ $# -eq 0 ]]; then 14 | usage 15 | fi 16 | 17 | # If the script isn't run with sudo / root privileges, then quit. 18 | if [[ $EUID -ne 0 ]]; then 19 | printf '\n%s\n\n' 'You need to be root to run this script!' 20 | exit 21 | fi 22 | 23 | declare device pause_msg exit_status n 24 | declare -a types args 25 | declare -A regex 26 | 27 | types=('quick' 'full') 28 | 29 | regex[part]='^(.*)[0-9]+$' 30 | 31 | while [[ $# -gt 0 ]]; do 32 | device=$(readlink -f "$1") 33 | 34 | if [[ ! -b $device ]]; then 35 | usage 36 | fi 37 | 38 | unset -v type 39 | declare type 40 | 41 | # If argument is a partition instead of the device itself, strip the 42 | # partition number from the path. 43 | if [[ $device =~ ${regex[part]} ]]; then 44 | device="${BASH_REMATCH[1]}" 45 | fi 46 | 47 | # List information about the device using 'fdisk'. 48 | printf '\n' 49 | fdisk -l "$device" 50 | printf '\n' 51 | 52 | pause_msg=" 53 | You are about to do a low-level format of: 54 | ${device} 55 | 56 | Are you sure? [y/n]: " 57 | 58 | read -p "$pause_msg" 59 | 60 | if [[ $REPLY != 'y' ]]; then 61 | exit 62 | fi 63 | 64 | # Ask the user whether they want to do a quick or full format. 65 | printf '\n%s\n\n' 'Do you want to do a quick or full format?' 66 | 67 | until [[ -n $type ]]; do 68 | select type in "${types[@]}"; do 69 | break 70 | done 71 | done 72 | 73 | printf '\n' 74 | 75 | for n in {1..10}; do 76 | printf "%s..." "$n" 77 | sleep 1 78 | done 79 | 80 | printf '\n\n%s: %s\n\n' "$device" 'formatting...' 81 | 82 | # Depending on whether we're doing a quick or full format, adjust the 83 | # arguments to 'dd'. 84 | args=(dd if=\""/dev/zero"\" of=\""${device}"\" bs=\""1M"\") 85 | 86 | if [[ $type == 'quick' ]]; then 87 | args+=(count=\""100"\") 88 | fi 89 | 90 | # Run 'dd'. 91 | eval "${args[@]}" 92 | 93 | exit_status="$?" 94 | 95 | # Synchronize cached writes. 96 | sync 97 | 98 | if [[ $exit_status -eq 0 ]]; then 99 | printf '\n%s: %s\n\n' "$device" 'format succeeded!' 100 | else 101 | printf '\n%s: %s\n\n' "$device" 'format failed!' 102 | fi 103 | 104 | shift 105 | done 106 | -------------------------------------------------------------------------------- /free_ram.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script creates an infinite while loop, which checks the available 4 | # RAM every 1 second, and kills Firefox, Chrome, Chromium and 5 | # Tor Browser if less than 1 GB is available. The script will only kill 6 | # the tabs, but not the main window itself, so the application keeps 7 | # running but RAM is still freed up. 8 | 9 | # The web browser is always the application that ends up using the most 10 | # RAM on my system. Once the RAM is nearly full, Linux starts swapping 11 | # and gradually slows down more and more, until grinding to a complete 12 | # halt when RAM is completely full. Then Linux calls the Out Of Memory 13 | # (OOM) manager to kill processes to free up RAM. It might kill a 14 | # critical process, a program that's been running for a very long time 15 | # (i.e. video encoding). To prevent that from happening, I created 16 | # this script. 17 | 18 | declare ram_limit log_killed 19 | declare -a free_ram ram swap 20 | declare -A regex pids 21 | 22 | regex[pid_args]='^[[:blank:]]*([0-9]+)([[:blank:]]*)([^ ]+)(.*)$' 23 | regex[rend]='--type=renderer' 24 | regex[ext]='--extension-process' 25 | regex[tab]='^.*-childID [0-9]+.* tab$' 26 | 27 | # Creates a limit for the amount of free RAM required. 28 | ram_limit=1000000 29 | 30 | # Creates a file name for the log. 31 | log_killed="${HOME}/browser_killed.log" 32 | 33 | # If $log_killed is not a file, create it. 34 | if [[ ! -f $log_killed ]]; then 35 | touch "$log_killed" 36 | fi 37 | 38 | # Creates a function, called 'now', which will print the date and time. 39 | now () { date '+%F %H:%M:%S'; } 40 | 41 | # Creates a function, called 'get_pids', which gets all child process 42 | # IDs of the command names given to it as arguments. 43 | get_pids () { 44 | declare key pid args comm comm_path bn line 45 | declare -a session child match 46 | 47 | for key in "${!pids[@]}"; do 48 | unset -v pids["${key}"] 49 | done 50 | 51 | for comm in "$@"; do 52 | unset -v pid args comm_path 53 | 54 | mapfile -t session < <(ps -C "$comm" -o sid= | tr -d '[:blank:]' | sort -u) 55 | 56 | if [[ ${#session[@]} -eq 0 ]]; then 57 | continue 58 | fi 59 | 60 | mapfile -t child < <(ps -H -s "${session[0]}" -o pid=,args=) 61 | 62 | for (( i = 0; i < ${#child[@]}; i++ )); do 63 | line="${child[${i}]}" 64 | 65 | if [[ ! $line =~ ${regex[pid_args]} ]]; then 66 | continue 67 | fi 68 | 69 | match=("${BASH_REMATCH[@]:1}") 70 | pid="${match[0]}" 71 | args="${match[2]}" 72 | 73 | bn=$(basename "$args") 74 | 75 | if [[ $bn == "$comm" ]]; then 76 | comm_path="$args" 77 | break 78 | fi 79 | done 80 | 81 | if [[ -z $comm_path ]]; then 82 | continue 83 | fi 84 | 85 | for (( i = 0; i < ${#child[@]}; i++ )); do 86 | line="${child[${i}]}" 87 | 88 | if [[ ! $line =~ ${regex[pid_args]} ]]; then 89 | continue 90 | fi 91 | 92 | match=("${BASH_REMATCH[@]:1}") 93 | pid="${match[0]}" 94 | args="${match[2]}" 95 | 96 | if [[ $pid -eq ${session[0]} ]]; then 97 | continue 98 | fi 99 | 100 | if [[ $args != "$comm_path" ]]; then 101 | continue 102 | fi 103 | 104 | args+="${match[3]}" 105 | pids["${pid}"]="$args" 106 | done 107 | done 108 | } 109 | 110 | # Creates a function, called 'kill_firefox', which kills all child 111 | # processes belonging to either Firefox or Tor Browser. 112 | kill_firefox () { 113 | declare time pid args 114 | declare -a pids_tmp 115 | 116 | get_pids 'firefox' 'firefox.real' 117 | mapfile -t pids_tmp < <(printf '%s\n' "${!pids[@]}" | sort -n) 118 | 119 | if [[ ${#pids_tmp[@]} -eq 0 ]]; then 120 | return 121 | fi 122 | 123 | time=$(now) 124 | printf '%s\n\n' "${time}: Killing Firefox / Tor Browser..." | tee --append "$log_killed" 125 | 126 | for (( i = 0; i < ${#pids_tmp[@]}; i++ )); do 127 | pid="${pids_tmp[${i}]}" 128 | args="${pids[${pid}]}" 129 | 130 | # Checks if $pid is a renderer process. 131 | if [[ ! $args =~ ${regex[tab]} ]]; then 132 | continue 133 | fi 134 | 135 | printf '%s\n' "SIGKILL: ${pid}" 136 | kill -9 "$pid" 137 | done 138 | } 139 | 140 | # Creates a function, called 'kill_chrome', which kills all child 141 | # processes belonging to either Chrome or Chromium. 142 | kill_chrome () { 143 | declare time pid args 144 | declare -a pids_tmp 145 | 146 | get_pids 'chrome' 'chromium' 147 | mapfile -t pids_tmp < <(printf '%s\n' "${!pids[@]}" | sort -n) 148 | 149 | if [[ ${#pids_tmp[@]} -eq 0 ]]; then 150 | return 151 | fi 152 | 153 | time=$(now) 154 | printf '%s\n\n' "${time}: Killing Chrome / Chromium..." | tee --append "$log_killed" 155 | 156 | for (( i = 0; i < ${#pids_tmp[@]}; i++ )); do 157 | pid="${pids_tmp[${i}]}" 158 | args="${pids[${pid}]}" 159 | 160 | # Checks if $pid is a renderer / extension process. If it's NOT a 161 | # renderer, or is an extension process, skip it. This will keep 162 | # extensions and downloads running, even though the other Chrome child 163 | # processes are killed. Only renderer processes that are NOT extension 164 | # processes will get killed. 165 | if [[ ! $args =~ ${regex[rend]} ]]; then 166 | continue 167 | elif [[ $args =~ ${regex[ext]} ]]; then 168 | continue 169 | fi 170 | 171 | printf '%s\n' "SIGKILL: ${pid}" 172 | kill -9 "$pid" 173 | done 174 | } 175 | 176 | # Creates an infinite while loop. 177 | while [[ 1 ]]; do 178 | # Sleeps for 1 second. 179 | sleep 1 180 | 181 | # Runs 'free', stores output in the $free_ram array, and sets a couple 182 | # of variables based on that output. 183 | mapfile -t free_ram < <(free | sed -E 's/[[:blank:]]+/ /g') 184 | mapfile -d' ' -t ram <<<"${free_ram[1]}" 185 | mapfile -d' ' -t swap <<<"${free_ram[2]}" 186 | ram[-1]="${ram[-1]%$'\n'}" 187 | swap[-1]="${swap[-1]%$'\n'}" 188 | 189 | # Prints the free and available RAM and SWAP. 190 | printf '%s\n' 'FREE (kibibytes)' 191 | printf 'RAM: %s, SWAP: %s\n' "${ram[3]}" "${swap[3]}" 192 | printf '%s\n' '***' 193 | printf '%s\n' 'AVAILABLE (kibibytes)' 194 | printf 'RAM: %s\n\n' "${ram[6]}" 195 | 196 | # If available RAM is less than 1GB... 197 | if [[ ${ram[6]} -lt $ram_limit ]]; then 198 | # If Firefox / Tor Browser is running, then kill it, print a message to 199 | # the screen, and append a message to the log. 200 | kill_firefox 201 | 202 | # If Chrome / Chromium is running, then kill it, print a message to the 203 | # screen, and append a message to the log. 204 | kill_chrome 205 | 206 | # Writes cached writes to disk. Hopefully this will also clear up a 207 | # little RAM. 208 | sync 209 | fi 210 | done 211 | -------------------------------------------------------------------------------- /fuck_your_system_up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to utterly fuck your system up by erasing the 4 | # partition table of every connected storage device. This will 5 | # permanently delete all your files on every device. Do NOT run this! 6 | 7 | # If the script isn't run with sudo / root privileges, quit. 8 | if [[ $EUID -ne 0 ]]; then 9 | printf '\n%s\n\n' 'You need to be root to run this script!' 10 | exit 11 | fi 12 | 13 | declare device type 14 | declare -a types sources devices devices_tmp 15 | declare -A regex 16 | 17 | regex[hd]='^\/dev\/hd[[:alpha:]]+$' 18 | regex[sd]='^\/dev\/sd[[:alpha:]]+$' 19 | regex[nvme]='^\/dev\/nvme[0-9]+n[0-9]+$' 20 | 21 | types=('hd' 'sd' 'nvme') 22 | sources=('/dev/zero' '/dev/urandom') 23 | 24 | erase_devices () { 25 | declare n source 26 | 27 | for (( i = 0; i < ${#devices[@]}; i++ )); do 28 | device="${devices[${i}]}" 29 | 30 | printf '%s ' "$device" 31 | 32 | for n in {1..5}; do 33 | printf '...%s' "$n" 34 | 35 | for source in "${sources[@]}"; do 36 | dd if="$source" of="$device" bs=1M count=100 1>&- 2>&- 37 | done 38 | done 39 | 40 | printf '\n' 41 | done 42 | } 43 | 44 | mapfile -t devices_tmp < <(find /dev -maxdepth 1 -type b \( -iname "hd*" -o -iname "sd*" -o -iname "nvme*" \) 2>&- | sort -r) 45 | 46 | for (( i = 0; i < ${#devices_tmp[@]}; i++ )); do 47 | device="${devices_tmp[${i}]}" 48 | 49 | for type in "${types[@]}"; do 50 | if [[ $device =~ ${regex[${type}]} ]]; then 51 | devices+=("$device") 52 | break 53 | fi 54 | done 55 | done 56 | 57 | erase_devices 58 | sync 59 | -------------------------------------------------------------------------------- /get_pids.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script gets all the processes that share the same session ID as 4 | # the command names specified. Can be used to get all child processes 5 | # of a command, for example. Note that command names are case sensitive. 6 | # As an example, 'Xorg' will work, but 'xorg' will not. 7 | 8 | # Creates a function, called 'usage', which will print usage 9 | # instructions and then quit. 10 | usage () { 11 | printf '\n%s\n\n' "Usage: $(basename "$0") [name]" 12 | exit 13 | } 14 | 15 | if [[ $# -eq 0 ]]; then 16 | usage 17 | fi 18 | 19 | declare comm_tmp pid 20 | declare -a session name 21 | declare -A regex pids 22 | 23 | regex[pid_args]='^[[:blank:]]*([0-9]+)([[:blank:]]*)([^ ]+)(.*)$' 24 | 25 | # Creates a function, called 'get_pids', which gets all child process 26 | # IDs of the command names given to it as arguments. 27 | get_pids () { 28 | declare key pid args comm comm_path line 29 | declare -a child match 30 | 31 | for key in "${!pids[@]}"; do 32 | unset -v pids["${key}"] 33 | done 34 | 35 | for comm in "$@"; do 36 | unset -v pid args 37 | 38 | mapfile -t session < <(ps -C "$comm" -o sid= | tr -d '[:blank:]' | sort -u) 39 | 40 | if [[ ${#session[@]} -eq 0 ]]; then 41 | continue 42 | fi 43 | 44 | mapfile -t name < <(ps -p "${session[0]}" -o args=) 45 | 46 | mapfile -t child < <(ps -H -s "${session[0]}" -o pid=,args=) 47 | 48 | for (( i = 0; i < ${#child[@]}; i++ )); do 49 | line="${child[${i}]}" 50 | 51 | if [[ ! $line =~ ${regex[pid_args]} ]]; then 52 | continue 53 | fi 54 | 55 | match=("${BASH_REMATCH[@]:1}") 56 | pid="${match[0]}" 57 | args="${match[2]}" 58 | 59 | if [[ $pid -eq ${session[0]} ]]; then 60 | continue 61 | fi 62 | 63 | args+="${match[3]}" 64 | pids["${pid}"]="$args" 65 | done 66 | done 67 | } 68 | 69 | for comm_tmp in "$@"; do 70 | get_pids "$comm_tmp" 71 | 72 | if [[ ${#pids[@]} -eq 0 ]]; then 73 | continue 74 | fi 75 | 76 | printf '\n***\n\n' 77 | 78 | printf 'SID: %s\n' "${session[0]}" 79 | printf 'ARGS: %s\n\n' "${name[0]}" 80 | 81 | for pid in "${!pids[@]}"; do 82 | printf 'PID: %s\n' "$pid" 83 | printf 'ARGS: %s\n\n' "${pids[${pid}]}" 84 | done 85 | 86 | printf '***\n\n' 87 | done 88 | -------------------------------------------------------------------------------- /hdd_dump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will look for all files in directory given as first 4 | # argument, sort them by smallest > largest, and put that list in an 5 | # array. We will then go through that array and copy each file one by 6 | # one to the output directory. The script will check the MD5 hashes of 7 | # all the files to avoid copying duplicates (in order to save space in 8 | # the output directory). 9 | 10 | # This script can be useful when dumping the content of failing hard 11 | # drives or broken partitions. The script outputs a list of files that 12 | # were copied, and a list of files that couldn't be copied, in the 13 | # output directory. 14 | 15 | # Since the script copies the smallest files first, the highest possible 16 | # number of files will be copied (preferably all of the files). This is 17 | # because smaller files are faster to read / write, and there's 18 | # statistically a smaller chance of a bad block / sector hitting a small 19 | # file. By copying the smaller files first, if the hard drive really is 20 | # about to fail, the largest possible number of files will be copied. 21 | 22 | # If the script has problems reading a file, it will retry reading it a 23 | # maximum of 10 times, 5 times to check the MD5 hash, and 5 times to 24 | # copy the file. 25 | 26 | # Permissions and modification dates of the input files are preserved in 27 | # the output files by the script. 28 | 29 | # PS: It's probably a better idea to use 'ddrescue' than this script, to 30 | # make a complete file system image of the failing drive (using multiple 31 | # passes). But in case there's not enough free space on the destination 32 | # drive, maybe the script could still be useful. 33 | 34 | set -o pipefail 35 | 36 | # Creates a function, called 'usage', which will print usage 37 | # instructions and then quit. 38 | usage () { 39 | printf '\n%s\n\n' "Usage: $(basename "$0") [in_dir] [out_dir]" 40 | exit 41 | } 42 | 43 | # If the script isn't run with sudo / root privileges, then quit. 44 | if [[ $EUID -ne 0 ]]; then 45 | printf '\n%s\n\n' 'You need to be root to run this script!' 46 | exit 47 | fi 48 | 49 | if [[ ! -d $1 || -z $2 ]]; then 50 | usage 51 | elif [[ -f $2 ]]; then 52 | printf '\n%s\n\n' "\"${2}\" is a file!" 53 | exit 54 | fi 55 | 56 | declare session cp_log error_log used free diff start stop 57 | declare -a files dn_parts fn_parts 58 | declare -A if of regex md5s 59 | 60 | if[dn]=$(readlink -f "$1") 61 | of[dn]=$(readlink -f "$2") 62 | 63 | session="${RANDOM}-${RANDOM}" 64 | 65 | cp_log="${of[dn]}/hdd_dump_copied-${session}.txt" 66 | error_log="${of[dn]}/hdd_dump_errors-${session}.txt" 67 | 68 | regex[du]='^([0-9]+)([[:blank:]]+)(.*)$' 69 | 70 | mkdir -p "${of[dn]}" || exit 71 | 72 | used=$(du --summarize --block-size=1 "${if[dn]}" | grep -Eo '^[0-9]+') 73 | free=$(df --output=avail --block-size=1 "${of[dn]}" | tail -n +2 | tr -d '[:blank:]') 74 | 75 | if [[ $used -gt $free ]]; then 76 | diff=$(( used - free )) 77 | 78 | cat <&-) 99 | 100 | exit_status="$?" 101 | 102 | md5_if="${md5_if%% *}" 103 | 104 | if [[ $exit_status -eq 0 ]]; then 105 | if [[ ${md5s[${md5_if}]} -eq 1 ]]; then 106 | return 107 | fi 108 | else 109 | if [[ $n -eq 5 ]]; then 110 | printf '%s\n' "${if[fn]}" >> "$error_log" 111 | 112 | return 113 | fi 114 | 115 | sleep 1 116 | fi 117 | done 118 | 119 | md5s["${md5_if}"]=1 120 | 121 | printf '%s' "copying: ${if[fn]}... " 122 | 123 | for n in {1..5}; do 124 | cp -p "${if[fn]}" "${of[fn]}" 2>&- 125 | 126 | exit_status="$?" 127 | 128 | if [[ $exit_status -eq 0 ]]; then 129 | printf '%s\n' 'done' 130 | printf '%s\n' "${if[fn]}" >> "$cp_log" 131 | 132 | return 133 | else 134 | if [[ $n -eq 5 ]]; then 135 | printf '%s\n' 'error' 136 | printf '%s\n' "${if[fn]}" >> "$error_log" 137 | 138 | if [[ -f ${of[fn]} ]]; then 139 | rm -f "${of[fn]}" 2>&- 140 | fi 141 | 142 | return 143 | fi 144 | 145 | sleep 1 146 | fi 147 | done 148 | } 149 | 150 | touch "$cp_log" "$error_log" 151 | 152 | mapfile -d'/' -t dn_parts <<<"${if[dn]}" 153 | dn_parts[-1]="${dn_parts[-1]%$'\n'}" 154 | start="${#dn_parts[@]}" 155 | 156 | mapfile -t files < <(find "${if[dn]}" -type f -exec du -b {} + 2>&- | sort -n | sed -E "s/${regex[du]}/\3/") 157 | 158 | for (( i = 0; i < ${#files[@]}; i++ )); do 159 | if[fn]="${files[${i}]}" 160 | 161 | # Removes the directory name from the beginning of the string. Creating 162 | # the basename this way because it's more safe than using regex:es, if 163 | # the string contains weird characters (that are interpreted as part of 164 | # the regex). 165 | mapfile -d'/' -t fn_parts <<<"${if[fn]}" 166 | fn_parts[-1]="${fn_parts[-1]%$'\n'}" 167 | stop=$(( (${#fn_parts[@]} - ${#dn_parts[@]}) - 1 )) 168 | of[dn_tmp]=$(printf '/%s' "${fn_parts[@]:${start}:${stop}}") 169 | of[dn_tmp]="${of[dn_tmp]:1}" 170 | of[bn]="${fn_parts[-1]}" 171 | 172 | of[dn_tmp]="${of[dn]}/${of[dn_tmp]}" 173 | of[fn]="${of[dn_tmp]}/${of[bn]}" 174 | 175 | mkdir -p "${of[dn_tmp]}" || exit 176 | 177 | if [[ ! -f ${of[fn]} ]]; then 178 | md5copy 179 | fi 180 | done 181 | -------------------------------------------------------------------------------- /imdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script looks up movies on IMDb, and displays information about 4 | # them. 5 | 6 | # Usage: imdb.sh "movie title (year)" 7 | 8 | # (The year is optional, and only recommended for more accurate search 9 | # results. The paranthesis around (year) are required for proper 10 | # parsing.) 11 | 12 | # Creates a function, called 'usage', which will print usage 13 | # instructions and then quit. 14 | usage () { 15 | printf '\n%s\n\n' "Usage: $(basename "$0") \"movie title (year)\"" 16 | exit 17 | } 18 | 19 | if [[ $# -eq 0 ]]; then 20 | usage 21 | fi 22 | 23 | declare -A regex 24 | 25 | # Creates a function, called 'uriencode', which will translate the 26 | # special characters in any string to be URL friendly. This will be 27 | # used in the 'imdb' function. 28 | uriencode () { 29 | declare url_string 30 | 31 | url_string="$@" 32 | 33 | curl -Gso /dev/null -w %{url_effective} --data-urlencode "$url_string" 'http://localhost' | sed -E 's/^.{18}(.*)$/\1/' 34 | } 35 | 36 | # Creates a function, called 'time_calc', which will translate seconds 37 | # into the hh:mm:ss format. 38 | time_calc () { 39 | declare s m h 40 | 41 | s="$1" 42 | 43 | m=$(( s / 60 )) 44 | h=$(( m / 60 )) 45 | 46 | s=$(( s % 60 )) 47 | m=$(( m % 60 )) 48 | 49 | printf '%02d:%02d:%02d' "$h" "$m" "$s" 50 | } 51 | 52 | # Creates a function, called 'imdb', which will look up the movie name 53 | # on IMDb. https://www.imdb.com/search/title/ 54 | # https://www.imdb.com/interfaces/ 55 | imdb () { 56 | if [[ $# -eq 0 ]]; then 57 | return 1 58 | fi 59 | 60 | declare agent y t type url_tmp url id json_type 61 | declare -a term tmp_array 62 | declare -A json_types imdb_info 63 | 64 | mapfile -t term < <(sed -E 's/[[:blank:]]+/\n/g' <<<"$@") 65 | 66 | regex[y]='^\(([0-9]{4})\)$' 67 | regex[id]='(title\/tt[0-9]+)' 68 | regex[list]='^,$' 69 | 70 | regex[title1]='\,\"originalTitleText\":' 71 | regex[title2]='\"text\":\"(.*)\"\,\"__typename\":\"TitleText\"' 72 | regex[year1]='\,\"releaseYear\":' 73 | regex[year2]='\"year\":([0-9]{4})\,\"endYear\":.*\,\"__typename\":\"YearRange\"' 74 | regex[plot1]='\"plotText\":' 75 | regex[plot2]='\"plainText\":\"(.*)\"\,\"__typename\":\"Markdown\"' 76 | regex[rating1]='\,\"ratingsSummary\":' 77 | regex[rating2]='\"aggregateRating\":(.*)\,\"voteCount\":.*\,\"__typename\":\"RatingsSummary\"' 78 | regex[genre1]='\"genres\":\[' 79 | regex[genre2]='\"text\":\"(.*)\"\,\"id\":\".*\"\,\"__typename\":\"Genre\"' 80 | regex[actor1]='\,\"actor\":\[' 81 | regex[actor2]='\"@type\":\"Person\",\"url\":\".*\"\,\"name\":\"(.*)\"' 82 | regex[director1]='\]\,\"director\":\[' 83 | regex[director2]='\"@type\":\"Person\",\"url\":\".*\"\,\"name\":\"(.*)\"' 84 | regex[runtime1]='\,\"runtime\":' 85 | regex[runtime2]='\"seconds\":(.*)\,\"displayableProperty\":' 86 | 87 | agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36' 88 | 89 | # This function gets a URL using cURL. 90 | get_page () { 91 | curl --location --user-agent "$agent" --retry 10 --retry-delay 10 --connect-timeout 10 --silent "$1" 2>&- 92 | } 93 | 94 | # This function runs the JSON regexes and decides which JSON type is a 95 | # list and which isn't. 96 | get_list () { 97 | declare string 98 | declare -a list 99 | declare -A lists 100 | 101 | lists=(['genre']=1 ['actor']=1 ['director']=1) 102 | 103 | (( z += 1 )) 104 | 105 | # If current JSON type is not a list, match the regex and return from 106 | # this function. 107 | if [[ -z ${lists[${json_type}]} ]]; then 108 | if [[ ${tmp_array[${z}]} =~ ${regex[${json_type}2]} ]]; then 109 | imdb_info["${json_type}"]="${BASH_REMATCH[1]}" 110 | fi 111 | 112 | return 113 | fi 114 | 115 | # This loop parses JSON lists. 116 | while [[ ${tmp_array[${z}]} =~ ${regex[${json_type}2]} ]]; do 117 | list+=("${BASH_REMATCH[1]}") 118 | 119 | (( z += 1 )) 120 | 121 | if [[ ${tmp_array[${z}]} =~ ${regex[list]} ]]; then 122 | (( z += 1 )) 123 | else 124 | (( z -= 1 )) 125 | break 126 | fi 127 | done 128 | 129 | string=$(printf '%s, ' "${list[@]}") 130 | string="${string%, }" 131 | 132 | imdb_info["${json_type}"]="$string" 133 | } 134 | 135 | if [[ ${term[-1]} =~ ${regex[y]} ]]; then 136 | y="${BASH_REMATCH[1]}" 137 | unset -v term[-1] 138 | fi 139 | 140 | t=$(uriencode "${term[@]}") 141 | 142 | # Sets the type of IMDb search results to include. 143 | 144 | # All currently available types: 145 | # feature,tv_series,short,tv_episode,tv_miniseries,tv_movie,tv_special, 146 | # tv_short,video_game,video,music_video,podcast_series,podcast_episode 147 | type='feature,tv_series,tv_miniseries,tv_movie,tv_special,video' 148 | 149 | # If the $y variable is empty, that means the year is unknown, hence we 150 | # will need to use slightly different URLs, when searching for the 151 | # movie. 152 | if [[ -z $y ]]; then 153 | url_tmp="https://www.imdb.com/search/title/?title=${t}&title_type=${type}" 154 | else 155 | url_tmp="https://www.imdb.com/search/title/?title=${t}&title_type=${type}&release_date=${y}-01-01,${y}-12-31" 156 | fi 157 | 158 | id=$(get_page "$url_tmp" | sed -nE "s/${regex[id]}.*$/\1/;s/^.*${regex[id]}/\1/p") 159 | 160 | if [[ -z $id ]]; then 161 | return 1 162 | fi 163 | 164 | url="https://www.imdb.com/${id}/" 165 | 166 | # Translate {} characters to newlines so we can parse the JSON data. 167 | # I came to the conclusion that this is the most simple, reliable and 168 | # future-proof way to get the movie information. It's possible to add 169 | # more regex:es to the for loop below, to get additional information. 170 | # Excluding lines that are longer than 500 characters, to make it 171 | # slightly faster. 172 | mapfile -t tmp_array < <(get_page "$url" | tr '{}' '\n' | grep -Ev -e '.{500}' -e '^[[:blank:]]*$') 173 | 174 | json_types=(['title']=1 ['year']=1 ['plot']=1 ['rating']=1 ['genre']=1 ['actor']=1 ['director']=1 ['runtime']=1) 175 | 176 | for (( z = 0; z < ${#tmp_array[@]}; z++ )); do 177 | if [[ ${#json_types[@]} -eq 0 ]]; then 178 | break 179 | fi 180 | 181 | for json_type in "${!json_types[@]}"; do 182 | if [[ ! ${tmp_array[${z}]} =~ ${regex[${json_type}1]} ]]; then 183 | continue 184 | fi 185 | 186 | get_list 187 | 188 | unset -v json_types["${json_type}"] 189 | break 190 | done 191 | done 192 | 193 | imdb_info[runtime]=$(time_calc "${imdb_info[runtime]}") 194 | 195 | cat < "$cfg_fn" 164 | printf '\n%s:\n%s\n\n' 'Wrote selected audio output to' "$cfg_fn" 165 | fi 166 | fi 167 | 168 | if [[ -z $pw_id ]]; then 169 | exit 170 | fi 171 | } 172 | 173 | # Creates a function, called 'get_volume', which gets the current 174 | # volume. 175 | get_volume () { 176 | declare line 177 | declare -a pw_dump 178 | 179 | mapfile -t pw_dump < <(pw-dump "$pw_id" | sed -E -e "s/${regex[blank1]}/\1/" -e "s/${regex[blank2]}/ /g") 180 | 181 | for (( i = 0; i < ${#pw_dump[@]}; i++ )); do 182 | line="${pw_dump[${i}]}" 183 | 184 | if [[ ! $line =~ ${regex[volume]} ]]; then 185 | continue 186 | fi 187 | 188 | volume[in]="${BASH_REMATCH[1]}${BASH_REMATCH[2]}" 189 | 190 | if [[ ${volume[in]} =~ ${regex[zero]} ]]; then 191 | volume[in]="${BASH_REMATCH[1]}" 192 | fi 193 | 194 | break 195 | done 196 | 197 | if [[ -z ${volume[in]} ]]; then 198 | exit 199 | fi 200 | 201 | volume[out]="${volume[in]}" 202 | } 203 | 204 | # Creates a function, called 'set_volume', which sets the volume. 205 | set_volume () { 206 | declare mute_tmp volume_1 volume_2 207 | 208 | mute_tmp="$1" 209 | 210 | if [[ ${volume[out]} =~ ${regex[split]} ]]; then 211 | volume_1="${BASH_REMATCH[1]}" 212 | volume_2="${BASH_REMATCH[2]}" 213 | 214 | if [[ $volume_2 =~ ${regex[zero]} ]]; then 215 | volume_2="${BASH_REMATCH[1]}" 216 | fi 217 | else 218 | volume_1=0 219 | volume_2="${volume[out]}" 220 | fi 221 | 222 | volume[dec]=$(printf '%d.%06d' "$volume_1" "$volume_2") 223 | 224 | pw-cli s "$pw_id" Props "{ mute: ${mute_tmp}, channelVolumes: [ ${volume[dec]}, ${volume[dec]} ] }" 1>&- 2>&- 225 | } 226 | 227 | # Creates a function, called 'reset_volume', which resets the volume. 228 | reset_volume () { 229 | volume[out]="${volume[min]}" 230 | set_volume 'false' 231 | 232 | until [[ ${volume[out]} -ge ${volume[max]} ]]; do 233 | sleep 0.1 234 | 235 | (( volume[out] += 100000 )) 236 | set_volume 'false' 237 | done 238 | } 239 | 240 | # Creates a function, called 'sleep_low', which sleeps and then lowers 241 | # the volume. 242 | sleep_low () { 243 | printf ' %-7s\r' "${volume[out]}" 244 | 245 | for (( i = 0; i < ${#interval_out[@]}; i++ )); do 246 | sleep "$interval" 247 | 248 | volume[out]="${interval_out[${i}]}" 249 | set_volume 'false' 250 | 251 | printf ' %-7s\r' "${volume[out]}" 252 | done 253 | } 254 | 255 | # Creates a function, called 'get_interval', which will get the exact 256 | # number to decrease the volume by at each interval. Since Bash can't do 257 | # floating-point arithmetic, this becomes slightly tricky. Keep in mind 258 | # that Bash always rounds down, never up. 259 | get_interval () { 260 | declare first last 261 | declare -a diff 262 | 263 | # Calculates the difference between current volume and target volume. 264 | diff[0]=$(( (volume[out] - volume[target]) / unit )) 265 | diff[1]="${volume[out]}" 266 | 267 | # Creates array elements representing the desired volume level at each 268 | # point in time, by subtracting the difference between current volume 269 | # and target volume. 270 | for (( i = 0; i < unit; i++ )); do 271 | (( diff[1] -= diff[0] )) 272 | interval_out["${i}"]="${diff[1]}" 273 | done 274 | 275 | if [[ ${diff[1]} -eq ${volume[target]} ]]; then 276 | return 277 | fi 278 | 279 | (( diff[1] -= volume[target] )) 280 | 281 | first=$(( ${#interval_out[@]} - diff[1] )) 282 | last=$(( ${#interval_out[@]} - 1 )) 283 | 284 | # If there's still a remaining difference, go through the array in 285 | # reverse, and subtract from each element until the difference is gone. 286 | # This will distribute the difference evenly. 287 | for (( i = last; i >= first; i-- )); do 288 | (( interval_out[${i}] -= diff[1] )) 289 | (( diff[1] -= 1 )) 290 | done 291 | } 292 | 293 | # Gets the PipeWire id. 294 | get_id 295 | 296 | # Gets the volume. 297 | get_volume 298 | 299 | # We (re)set the original volume as full volume, cause otherwise the 300 | # first lowering of volume is going to be much lower to the ears than 301 | # the value set in PipeWire. The volume set in the desktop environment 302 | # seems to be indpendent of the volume set in PipeWire, which might be 303 | # what's causing this. 304 | #reset_volume 305 | 306 | # If volume is greater than target volume, then... 307 | if [[ ${volume[out]} -gt ${volume[target]} ]]; then 308 | # Gets the amount to lower the volume by at each interval. 309 | get_interval 310 | 311 | # Lowers the volume. 312 | sleep_low 313 | 314 | # Prints newline. 315 | printf '\n\n' 316 | fi 317 | -------------------------------------------------------------------------------- /mkv2srt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to extract SubRip (SRT) subtitles from MKV 4 | # (Matroska) files, and remux them without the SRT tracks. 5 | 6 | # I use this script in combination with 'extract_subs.sh', to backup 7 | # the subtitles when I replace old movie rips (like XviD, H264 etc.) 8 | # with better rips (HEVC / x265). 9 | 10 | # If there's still other track types (subtitle or not) besides SRT left 11 | # in the Matroska file after extraction, then it's remuxed. If there's 12 | # no other tracks left at all, it's deleted. 13 | 14 | # It's a good idea to keep SRT subtitles as regular text files, because 15 | # then the checksum can be compared against other files. Hence, 16 | # duplicates can be found and deleted. It also means the SRT subtitles 17 | # are more accessible in general, if they need to be edited, synced to 18 | # a different movie release etc. 19 | 20 | declare tracks_n 21 | declare -a cmd files_tmp 22 | declare -A if of regex tracks 23 | 24 | regex[start]='^\|\+ Tracks$' 25 | regex[stop]='^\|\+ ' 26 | regex[strip]='^\| +\+ (.*)$' 27 | regex[track]='^Track$' 28 | regex[num]='^Track number: [0-9]+ \(track ID for mkvmerge & mkvextract: ([0-9]+)\)$' 29 | regex[sub]='^Track type: subtitles$' 30 | regex[codec]='^Codec ID: (.*)$' 31 | regex[srt]='^S_TEXT\/UTF8$' 32 | regex[lang]='^Language( \(.*\)){0,1}: (.*)$' 33 | regex[name]='^Name: (.*)$' 34 | 35 | regex[fn]='^(.*)\.([^.]*)$' 36 | regex[lang_arg]='^[[:alpha:]]{3}$' 37 | 38 | tracks_n=0 39 | 40 | mapfile -t cmd < <(command -v mkvinfo mkvmerge) 41 | 42 | if [[ ${#cmd[@]} -ne 2 ]]; then 43 | printf '\nThis script needs %s installed!\n\n' 'mkvtoolnix' 44 | exit 45 | fi 46 | 47 | # Creates a function, called 'usage', which will print usage 48 | # instructions and then quit. 49 | usage () { 50 | printf '\n%s\n\n' "Usage: $(basename "$0") [mkv]" 51 | exit 52 | } 53 | 54 | # Creates a function, called 'clean_up', which will remove temporary 55 | # files, if they exist. 56 | clean_up () { 57 | if [[ ${#files_tmp[@]} -eq 0 ]]; then 58 | return 59 | fi 60 | 61 | for (( i = 0; i < ${#files_tmp[@]}; i++ )); do 62 | if[fn_tmp]="${files_tmp[${i}]}" 63 | 64 | if [[ -f ${if[fn_tmp]} ]]; then 65 | rm "${if[fn_tmp]}" 66 | fi 67 | done 68 | } 69 | 70 | # Creates a function, called 'set_names', which will create variables 71 | # for file names. 72 | set_names () { 73 | if[fn]=$(readlink -f "$1") 74 | if[dn]=$(dirname "${if[fn]}") 75 | if[bn]=$(basename "${if[fn]}") 76 | 77 | if[bn_lc]="${if[bn],,}" 78 | 79 | of[dn]="${if[dn]}/${if[bn]%.*}" 80 | of[mkv]="${of[dn]}/${if[bn]}" 81 | 82 | if [[ ${if[bn_lc]##*.} != 'mkv' ]]; then 83 | usage 84 | fi 85 | } 86 | 87 | # Creates a function, called 'get_tracks', which will read the metadata 88 | # of media files, and if they contain subtitle tracks, store those in 89 | # the 'sub_tracks' hash. 90 | get_tracks () { 91 | declare switch line 92 | declare -a mkvinfo_lines mkvinfo_tracks 93 | 94 | mapfile -t mkvinfo_lines < <(mkvinfo "${if[fn]}" 2>&-) 95 | 96 | # Singles out the part that lists the tracks, and ignores the rest of 97 | # the output from 'mkvinfo'. 98 | switch=0 99 | 100 | for (( i = 0; i < ${#mkvinfo_lines[@]}; i++ )); do 101 | line="${mkvinfo_lines[${i}]}" 102 | 103 | if [[ $line =~ ${regex[start]} ]]; then 104 | switch=1 105 | continue 106 | fi 107 | 108 | if [[ $switch -eq 0 ]]; then 109 | continue 110 | fi 111 | 112 | if [[ $line =~ ${regex[stop]} ]]; then 113 | switch=0 114 | break 115 | fi 116 | 117 | if [[ $line =~ ${regex[strip]} ]]; then 118 | line="${BASH_REMATCH[1]}" 119 | fi 120 | 121 | mkvinfo_tracks+=("$line") 122 | done 123 | 124 | unset -v mkvinfo_lines 125 | 126 | for (( i = 0; i < ${#mkvinfo_tracks[@]}; i++ )); do 127 | line="${mkvinfo_tracks[${i}]}" 128 | 129 | if [[ $line =~ ${regex[track]} ]]; then 130 | (( tracks_n += 1 )) 131 | tracks["${tracks_n},sub"]=0 132 | tracks["${tracks_n},srt"]=0 133 | fi 134 | 135 | if [[ $line =~ ${regex[num]} ]]; then 136 | tracks["${tracks_n},num"]="${BASH_REMATCH[1]}" 137 | fi 138 | 139 | if [[ $line =~ ${regex[sub]} ]]; then 140 | tracks["${tracks_n},sub"]=1 141 | fi 142 | 143 | if [[ $line =~ ${regex[codec]} ]]; then 144 | if [[ ${BASH_REMATCH[1]} =~ ${regex[srt]} ]]; then 145 | tracks["${tracks_n},srt"]=1 146 | fi 147 | fi 148 | 149 | # For some tracks, the language can be listed twice. First with a 150 | # three-letter code, and then with a two-letter code. The first code is 151 | # preferred by this script. 152 | if [[ $line =~ ${regex[lang]} ]]; then 153 | if [[ -z ${tracks[${tracks_n},lang]} ]]; then 154 | tracks["${tracks_n},lang"]="${BASH_REMATCH[2],,}" 155 | fi 156 | fi 157 | 158 | if [[ $line =~ ${regex[name]} ]]; then 159 | if [[ -z ${tracks[${tracks_n},name]} ]]; then 160 | tracks["${tracks_n},name"]="${BASH_REMATCH[1]}" 161 | fi 162 | fi 163 | done 164 | 165 | (( tracks_n += 1 )) 166 | 167 | unset -v mkvinfo_tracks 168 | } 169 | 170 | # Creates a function, called 'extract_remux', which will extract SRT 171 | # subtitles from the Matroska file, and remux it without the SRT 172 | # subtitles. 173 | extract_remux () { 174 | declare switch string 175 | declare num_tmp sub_tmp srt_tmp lang_tmp name_tmp 176 | declare -a args_srt args_not args_string full_args 177 | 178 | switch=0 179 | 180 | # If no subtitles have been found, quit. 181 | if [[ ${#tracks[@]} -eq 0 ]]; then 182 | usage 183 | fi 184 | 185 | # Puts together the mkvmerge command. The loop below deals with 186 | # subtitles that are in the Matroska file. 187 | for (( i = 1; i < tracks_n; i++ )); do 188 | num_tmp="${tracks[${i},num]}" 189 | sub_tmp="${tracks[${i},sub]}" 190 | srt_tmp="${tracks[${i},srt]}" 191 | lang_tmp="${tracks[${i},lang]}" 192 | name_tmp="${tracks[${i},name]}" 193 | 194 | if [[ -z $lang_tmp ]]; then 195 | lang_tmp='und' 196 | fi 197 | 198 | if [[ -z $name_tmp ]]; then 199 | name_tmp='und' 200 | fi 201 | 202 | if [[ $sub_tmp -eq 0 ]]; then 203 | switch=1 204 | continue 205 | fi 206 | 207 | if [[ $srt_tmp -eq 1 ]]; then 208 | of[srt]="${of[dn]}/${num_tmp}_${lang_tmp}_${name_tmp}.srt" 209 | files_tmp+=("${of[srt]}") 210 | 211 | args_srt+=(\""${num_tmp}:${of[srt]}"\") 212 | fi 213 | 214 | if [[ $srt_tmp -eq 0 ]]; then 215 | switch=1 216 | args_not+=("$num_tmp") 217 | fi 218 | done 219 | 220 | # If there's no SRT subtitles in the Matroska file, quit. 221 | if [[ ${#args_srt[@]} -eq 0 ]]; then 222 | printf '\n%s\n\n' "There are no SRT subtitles in: ${if[bn]}" 223 | exit 224 | fi 225 | 226 | if [[ -d ${of[dn]} ]]; then 227 | return 228 | fi 229 | 230 | mkdir -p "${of[dn]}" 231 | 232 | full_args=(mkvextract \""${if[fn]}"\" tracks "${args_srt[@]}") 233 | 234 | # Runs mkvextract, and prints the command. 235 | eval "${full_args[@]}" 236 | 237 | if [[ $? -ne 0 ]]; then 238 | clean_up 239 | exit 240 | fi 241 | 242 | printf '\n' 243 | 244 | # Change line-endings to make the files compatible with DOS/Windows. 245 | unix2dos "${files_tmp[@]}" 246 | 247 | files_tmp+=("${of[mkv]}") 248 | 249 | string="${full_args[@]}" 250 | printf '\n%s\n\n' "$string" 251 | 252 | args_string=$(printf '%s,' "${args_not[@]}") 253 | args_string="${args_string%,}" 254 | 255 | full_args=(mkvmerge -o \""${of[mkv]}"\" '--subtitle-tracks' \""${args_string}"\" \""${if[fn]}"\") 256 | 257 | # Runs mkvmerge, and prints the command. 258 | if [[ $switch -eq 1 ]]; then 259 | eval "${full_args[@]}" 260 | 261 | if [[ $? -ne 0 ]]; then 262 | clean_up 263 | exit 264 | fi 265 | 266 | string="${full_args[@]}" 267 | printf '\n%s\n\n' "$string" 268 | fi 269 | 270 | # Removes original MKV file. 271 | rm "${if[fn]}" 272 | 273 | # Resets the tracks and files variables. 274 | tracks=() 275 | files_tmp=() 276 | tracks_n=0 277 | } 278 | 279 | # The loop below handles the arguments to the script. 280 | while [[ $# -gt 0 ]]; do 281 | if [[ -f $1 ]]; then 282 | set_names "$1" 283 | get_tracks 284 | extract_remux 285 | 286 | shift 287 | else 288 | usage 289 | fi 290 | done 291 | -------------------------------------------------------------------------------- /no_comments.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is just meant to read script files, but without the 4 | # comments. 5 | 6 | # Creates a function, called 'usage', which will print usage 7 | # instructions and then quit. 8 | usage () { 9 | printf '\n%s\n\n' "Usage: $(basename "$0") [file]" 10 | exit 11 | } 12 | 13 | if [[ ! -f $1 ]]; then 14 | usage 15 | fi 16 | 17 | declare if 18 | declare -a lines 19 | declare -A regex 20 | 21 | if=$(readlink -f "$1") 22 | 23 | regex[comment]='^[[:blank:]]*#+' 24 | 25 | mapfile -t lines < <(tr -d '\r' <"$if" | grep -Ev "${regex[comment]}") 26 | 27 | printf '%s\n' "${lines[@]}" 28 | -------------------------------------------------------------------------------- /nv_shield_dn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to handle drives that have been connected to an 4 | # NVIDIA SHIELD. The SHIELD creates 2 special directories on drives that 5 | # have been connected to it. Those directories sometimes cannot be 6 | # accessed in Linux. They can't be read, moved, copied or deleted. 7 | 8 | # The script is specifically meant to prepare a drive so its content can 9 | # be copied to another drive. To avoid errors when using 'cp -rp', we 10 | # need to move everything to a sub-directory, except the 2 special 11 | # SHIELD directories. And then copy the content of that sub-directory to 12 | # the destination drive. However, the script does not do the copying 13 | # step. That will have to be done manually. 14 | 15 | set -eo pipefail 16 | 17 | declare fn_ignore session switch 18 | declare -a dirs files ignore 19 | declare -A if of 20 | 21 | # Creates a function, called 'usage', which will print usage 22 | # instructions and then quit. 23 | usage () { 24 | printf '\n%s\n\n' "Usage: $(basename "$0") [directory] [...]" 25 | exit 26 | } 27 | 28 | if [[ $# -eq 0 ]]; then 29 | usage 30 | fi 31 | 32 | while [[ $# -gt 0 ]]; do 33 | if [[ -d $1 ]]; then 34 | dirs+=("$(readlink -f $1)") 35 | else 36 | usage 37 | fi 38 | 39 | shift 40 | done 41 | 42 | # The 'ignore' array contains the names of the special SHIELD 43 | # directories. 44 | ignore=('NVIDIA_SHIELD' 'LOST.DIR') 45 | 46 | # Loop through directories given as arguments to the script. 47 | for (( i = 0; i < ${#dirs[@]}; i++ )); do 48 | if[dn]="${dirs[${i}]}" 49 | session="${RANDOM}-${RANDOM}" 50 | of[dn]="${if[dn]}/${session}" 51 | 52 | # Change into the directory. 53 | cd "${if[dn]}" 54 | 55 | # List everything in the current directory, including hidden files. 56 | mapfile -t files < <(ls -1A) 57 | 58 | # Create destination sub-directory. 59 | mkdir -p "${of[dn]}" 60 | 61 | # Loop through files and sub-directories in the current directory. 62 | for (( j = 0; j < ${#files[@]}; j++ )); do 63 | if[fn]="${files[${j}]}" 64 | switch=0 65 | 66 | # If current file name matches any of the special SHIELD directories, 67 | # ignore it. 68 | for fn_ignore in "${ignore[@]}"; do 69 | if [[ ${if[fn]} == "$fn_ignore" ]]; then 70 | switch=1 71 | break 72 | fi 73 | done 74 | 75 | if [[ $switch -eq 1 ]]; then 76 | continue 77 | fi 78 | 79 | # Move the current file / directory to destination sub-directory, 80 | # without overwriting. 81 | mv -n "${if[fn]}" "${of[dn]}" 82 | done 83 | done 84 | -------------------------------------------------------------------------------- /old_drives.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add symbolic links to my old drives, pointing to the new directory. 4 | # This is so my torrent clients can access the torrents that were 5 | # already loaded since before I swapped out some old drives. 6 | 7 | # If you want to do something similar, just change / add / remove array 8 | # elements as needed. 9 | 10 | declare owner drive_in drive_out 11 | declare -a drives_in drives_out 12 | 13 | owner="$USER" 14 | 15 | drives_in[0]='5c42d46c-30d6-4b43-a784-30a8328da5ae' 16 | drives_in[1]='f61840d6-9ba6-4cf8-ad6f-5c97c8c58b18' 17 | drives_in[2]='a73f90cd-c719-4093-92ac-f234920312f5' 18 | drives_in[3]='7b12e3a8-8802-4e3e-b782-fe94e5c57137' 19 | 20 | drives_out[0]="/home/${USER}/Downloads" 21 | drives_out[1]="/home/${USER}/Downloads" 22 | drives_out[2]="/run/media/${USER}/2c5518a5-5311-4a7d-8356-206fecd9f13f" 23 | drives_out[3]="/run/media/${USER}/2c5518a5-5311-4a7d-8356-206fecd9f13f" 24 | 25 | cd "/run/media/${USER}" 26 | 27 | for (( i = 0; i < ${#drives_in[@]}; i++ )); do 28 | drive_in="${drives_in[${i}]}" 29 | drive_out="${drives_out[${i}]}" 30 | 31 | if [[ ! -L $drive_in ]]; then 32 | sudo ln -s "$drive_out" "$drive_in" 33 | sudo chown "${owner}:${owner}" "$drive_in" 34 | fi 35 | done 36 | -------------------------------------------------------------------------------- /parse_srt.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # This script simply parses an SRT (SubRip) subtitle file, and prints 4 | # the content without the timestamps. 5 | 6 | # The charset of input files will be decoded and then encoded to UTF-8 7 | # in the output. 8 | 9 | use 5.34.0; 10 | use strict; 11 | use warnings; 12 | use diagnostics; 13 | use File::Basename qw(basename); 14 | use Cwd qw(abs_path); 15 | use Encode qw(encode decode find_encoding); 16 | use POSIX qw(floor); 17 | 18 | my(%regex, %lines); 19 | my(@files, @lines_tmp, @format); 20 | my($delim, $n, $total_n); 21 | 22 | $regex{fn} = qr/^(.*)\.([^.]*)$/; 23 | $regex{charset1} = qr/([^; ]+)$/; 24 | $regex{charset2} = qr/^charset=(.*)$/; 25 | $regex{newline} = qr/(\r){0,}(\n){0,}$/; 26 | $regex{blank1} = qr/^[[:blank:]]*(.*)[[:blank:]]*$/; 27 | $regex{blank2} = qr/^[[:blank:]]*$/; 28 | $regex{blank3} = qr/ +/; 29 | $regex{zero} = qr/^0+([0-9]+)$/; 30 | 31 | $regex{microdvd_code} = qr/^(\{[^{}]+\})(.*)$/; 32 | $regex{microdvd_bold} = qr/^\{ *y *: *b *\}$/i; 33 | $regex{microdvd_italic} = qr/^\{ *y *: *i *\}$/i; 34 | $regex{microdvd_underline} = qr/^\{ *y *: *u *\}$/i; 35 | 36 | if (! scalar(@ARGV)) { usage(); } 37 | 38 | while (my $arg = shift(@ARGV)) { 39 | my($fn, $ext); 40 | 41 | if (! length($arg)) { next; } 42 | 43 | if (! -f $arg) { usage(); } 44 | 45 | if ($arg =~ m/$regex{fn}/) { 46 | $fn = abs_path($arg); 47 | $ext = lc($2); 48 | } else { usage(); } 49 | 50 | if ($ext ne 'srt') { usage(); } 51 | 52 | push(@files, $fn); 53 | } 54 | 55 | if (! scalar(@files)) { usage(); } 56 | 57 | $delim = '-->'; 58 | 59 | $format[0] = qr/[0-9]+/; 60 | $format[1] = qr/([0-9]{2,}):([0-9]{2}):([0-9]{2}),([0-9]{3})/; 61 | $format[2] = qr/[0-9]{2,}:[0-9]{2}:[0-9]{2},[0-9]{3}/; 62 | $format[3] = qr/^($format[2]) *$delim *($format[2])$/; 63 | $format[4] = qr/^\{([0-9]+)\}\{([0-9]+)\}(.*)$/; 64 | 65 | # The 'usage' subroutine prints syntax, and then quits. 66 | sub usage { 67 | say "\n" . 'Usage: ' . basename($0) . ' [srt]' . "\n"; 68 | exit; 69 | } 70 | 71 | # The 'read_decode_fn' subroutine reads a text file and encodes the 72 | # output to UTF-8. 73 | sub read_decode_fn { 74 | my $fn = shift; 75 | my($file_enc, $tmp_enc, $enc, @lines); 76 | 77 | open(my $info, '-|', 'file', '-bi', $fn) or die "Can't run file: $!"; 78 | chomp($file_enc = <$info>); 79 | close($info) or die "Can't close file: $!"; 80 | 81 | $file_enc =~ m/$regex{charset1}/; 82 | $file_enc = $1; 83 | $file_enc =~ m/$regex{charset2}/; 84 | $file_enc = $1; 85 | 86 | $tmp_enc = find_encoding($file_enc); 87 | 88 | if (length($tmp_enc)) { $enc = $tmp_enc->name; } 89 | 90 | open(my $text, '< :raw', $fn) or die "Can't open file '$fn': $!"; 91 | foreach my $line (<$text>) { 92 | if (length($enc)) { 93 | $line = decode($enc, $line); 94 | $line = encode('utf8', $line); 95 | } 96 | 97 | $line =~ s/$regex{newline}//g; 98 | 99 | $line =~ s/$regex{blank1}/$1/; 100 | $line =~ s/$regex{blank2}//; 101 | $line =~ s/$regex{blank3}/ /g; 102 | 103 | push(@lines, $line); 104 | } 105 | close $text or die "Can't close file '$fn': $!"; 106 | 107 | return(@lines); 108 | } 109 | 110 | # The 'time_convert' subroutine converts the 'time line' back and forth 111 | # between the time (hh:mm:ss) format and milliseconds. 112 | sub time_convert { 113 | my $time = shift; 114 | 115 | my($h, $m, $s, $ms); 116 | 117 | # If argument is in the hh:mm:ss format... 118 | if ($time =~ m/$format[1]/) { 119 | $h = $1; 120 | $m = $2; 121 | $s = $3; 122 | $ms = $4; 123 | 124 | $h =~ s/$regex{zero}/$1/; 125 | $m =~ s/$regex{zero}/$1/; 126 | $s =~ s/$regex{zero}/$1/; 127 | $ms =~ s/$regex{zero}/$1/; 128 | 129 | # Converts all the numbers to milliseconds, because that kind of 130 | # value is easier to process. 131 | $h = $h * 60 * 60 * 1000; 132 | $m = $m * 60 * 1000; 133 | $s = $s * 1000; 134 | 135 | $time = $h + $m + $s + $ms; 136 | 137 | # If argument is in the millisecond format... 138 | } elsif ($time =~ m/$format[0]/) { 139 | $ms = $time; 140 | 141 | $s = floor($ms / 1000); 142 | $m = floor($s / 60); 143 | $h = floor($m / 60); 144 | 145 | $ms = floor($ms % 1000); 146 | $s = floor($s % 60); 147 | $m = floor($m % 60); 148 | 149 | $time = sprintf('%02d:%02d:%02d,%03d', $h, $m, $s, $ms); 150 | } 151 | 152 | return($time); 153 | } 154 | 155 | # The 'frames2ms' subroutine converts video frames to milliseconds. 156 | # 24 frames per second is the standard for movies. 157 | sub frames2ms { 158 | my $frames = shift; 159 | 160 | my $ms = floor(($frames / 24) * 1000); 161 | 162 | return($ms); 163 | } 164 | 165 | # The 'parse_srt_bad' subroutine parses a subtitle that has the SRT 166 | # extension, but is not in the correct (SubRip) format. It's the 167 | # MicroDVD Sub format. 168 | sub parse_srt_bad { 169 | my($i, $this, $line_tmp); 170 | my(@code); 171 | 172 | $i = 0; 173 | 174 | until ($i == scalar(@lines_tmp)) { 175 | $this = $lines_tmp[$i]; 176 | 177 | if (length($this) and ! $this =~ m/$format[4]/) { 178 | return(0); 179 | } 180 | 181 | $i += 1; 182 | } 183 | 184 | $i = 0; 185 | 186 | until ($i == scalar(@lines_tmp)) { 187 | $this = $lines_tmp[$i]; 188 | 189 | if (length($this) and $this =~ m/$format[4]/) { 190 | $n += 1; 191 | 192 | $lines{$n}{start} = frames2ms($1); 193 | $lines{$n}{stop} = frames2ms($2); 194 | 195 | $line_tmp = $3; 196 | $line_tmp =~ s/$regex{blank1}/$1/; 197 | 198 | while ($line_tmp =~ m/$regex{microdvd_code}/) { 199 | push(@code, $1); 200 | $line_tmp = $2; 201 | } 202 | 203 | while (my $code = shift(@code)) { 204 | if ($code =~ m/$regex{microdvd_bold}/) { 205 | $line_tmp = '' . $line_tmp . ''; 206 | } 207 | 208 | if ($code =~ m/$regex{microdvd_italic}/) { 209 | $line_tmp = '' . $line_tmp . ''; 210 | } 211 | 212 | if ($code =~ m/$regex{microdvd_underline}/) { 213 | $line_tmp = '' . $line_tmp . ''; 214 | } 215 | } 216 | 217 | foreach my $line (split('\|', $line_tmp)) { 218 | $line =~ s/$regex{blank1}/$1/; 219 | 220 | push(@{$lines{$n}{text}}, $line); 221 | } 222 | } 223 | 224 | $i += 1; 225 | } 226 | 227 | $total_n = $n; 228 | 229 | if ($n > 0) { return(1); } 230 | else { return(0); } 231 | } 232 | 233 | # The 'parse_srt_good' subroutine parses a subtitle in the correct SRT 234 | # (SubRip) format. 235 | sub parse_srt_good { 236 | my($i, $j, $this, $next); 237 | 238 | $i = 0; 239 | $j = 0; 240 | 241 | until ($i == scalar(@lines_tmp)) { 242 | $j = $i + 1; 243 | 244 | $this = $lines_tmp[$i]; 245 | $next = $lines_tmp[$j]; 246 | 247 | if (length($this) and $this =~ m/$format[0]/) { 248 | if (length($next) and $next =~ m/$format[3]/) { 249 | $n += 1; 250 | 251 | $lines{$n}{start} = time_convert($1); 252 | $lines{$n}{stop} = time_convert($2); 253 | 254 | $i += 2; 255 | 256 | $this = $lines_tmp[$i]; 257 | } 258 | } 259 | 260 | if (length($this)) { 261 | push(@{$lines{$n}{text}}, $this); 262 | } 263 | 264 | $i += 1; 265 | } 266 | 267 | $total_n = $n; 268 | 269 | if ($n > 0) { return(1); } 270 | else { return(0); } 271 | } 272 | 273 | # The 'process_sub' subroutine reads a subtitle file, parses and 274 | # processes it. 275 | sub process_sub { 276 | my $fn = shift; 277 | 278 | $n = 0; 279 | $total_n = 0; 280 | 281 | @lines_tmp = (); 282 | %lines = (); 283 | 284 | push(@lines_tmp, read_decode_fn($fn)); 285 | 286 | if (! parse_srt_bad()) { parse_srt_good(); } 287 | 288 | @lines_tmp = (); 289 | } 290 | 291 | # The 'print_sub' subroutine prints the finished subtitle. 292 | sub print_sub { 293 | my($start_time, $stop_time, $time_line); 294 | 295 | $n = 0; 296 | 297 | until ($n == $total_n) { 298 | $n += 1; 299 | 300 | foreach my $line (@{$lines{$n}{text}}) { 301 | push(@lines_tmp, $n . ': ' . $line); 302 | } 303 | 304 | push(@lines_tmp, ''); 305 | } 306 | } 307 | 308 | print "\n"; 309 | 310 | while (my $fn = shift(@files)) { 311 | process_sub($fn); 312 | print_sub(); 313 | 314 | say $fn . "\n"; 315 | 316 | foreach my $line (@lines_tmp) { 317 | say $line; 318 | } 319 | 320 | print "\n"; 321 | } 322 | -------------------------------------------------------------------------------- /print_function.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to print a specific function in a Bash script 4 | # given to it as argument. 5 | 6 | # Creates a function, called 'usage', which will print usage 7 | # instructions and then quit. 8 | usage () { 9 | printf '\n%s\n\n' "Usage: $(basename "$0") [file] [function name]" 10 | exit 11 | } 12 | 13 | if [[ ! -f $1 || -z $2 ]]; then 14 | usage 15 | fi 16 | 17 | declare if switch line 18 | declare -a lines 19 | declare -A regex 20 | 21 | if=$(readlink -f "$1") 22 | 23 | regex[start]="^([[:blank:]]*)${2}[[:blank:]]*\(\) \{" 24 | 25 | switch=0 26 | 27 | mapfile -t lines < <(tr -d '\r' <"$if") 28 | 29 | printf '\n' 30 | 31 | for (( i = 0; i < ${#lines[@]}; i++ )); do 32 | line="${lines[${i}]}" 33 | 34 | if [[ $line =~ ${regex[start]} ]]; then 35 | switch=1 36 | regex[stop]="^${BASH_REMATCH[1]}\}" 37 | fi 38 | 39 | if [[ $switch -eq 0 ]]; then 40 | continue 41 | fi 42 | 43 | printf '%s\n' "$line" 44 | 45 | if [[ $line =~ ${regex[stop]} ]]; then 46 | switch=0 47 | printf '\n' 48 | fi 49 | done 50 | -------------------------------------------------------------------------------- /rarbg_subs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # A simple script to automatically symlink English SRT subtitles (for 4 | # RARBG and VXT releases) to have the same name as the movie file, so 5 | # they will automatically get loaded when the movie plays. The largest 6 | # English SRT file is automatically chosen. 7 | 8 | # The script only works with movies, as for right now, and TV series 9 | # are ignored. 10 | 11 | # Also: 12 | 13 | # magnet:?xt=urn:btih:ulfihylx35oldftn7qosmk6hkhsjq5af 14 | 15 | # https://sqlitebrowser.org/ 16 | 17 | set -eo pipefail 18 | 19 | declare -a dirs files vars1 20 | declare -A regex 21 | 22 | regex[srt]='\/([0-9]+_)*eng(lish)*.srt$' 23 | 24 | vars1=('size' 'dn' 'bn' 'if' 'of') 25 | dirs=("$HOME" "/run/media/${USER}") 26 | 27 | get_files () { 28 | declare srt_tmp size_tmp 29 | declare -a files_tmp movie_tmp sub_tmp 30 | 31 | for dn in "$@"; do 32 | mapfile -t files_tmp < <(find "$dn" -type d -name "*-RARBG" -o -name "*-VXT" 2>&-) 33 | 34 | if [[ ${#files_tmp[@]} -eq 0 ]]; then 35 | continue 36 | fi 37 | 38 | files+=("${files_tmp[@]}") 39 | done 40 | 41 | if [[ ${#files[@]} -eq 0 ]]; then 42 | return 43 | fi 44 | 45 | printf '\n' 46 | 47 | for (( i = 0; i < ${#files[@]}; i++ )); do 48 | declare "${vars1[@]}" 49 | 50 | dn="${files[${i}]}" 51 | 52 | mapfile -t movie_tmp < <(compgen -G "${dn}/*.mp4") 53 | 54 | if [[ ${#movie_tmp[@]} -ne 1 ]]; then 55 | continue 56 | fi 57 | 58 | mapfile -t sub_tmp < <(compgen -G "${dn}/Subs/*.srt") 59 | 60 | if [[ ${#sub_tmp[@]} -eq 0 ]]; then 61 | continue 62 | fi 63 | 64 | size=0 65 | 66 | for (( j = 0; j < ${#sub_tmp[@]}; j++ )); do 67 | srt_tmp="${sub_tmp[${j}]}" 68 | 69 | if [[ ! ${srt_tmp,,} =~ ${regex[srt]} ]]; then 70 | continue 71 | fi 72 | 73 | size_tmp=$(stat -c '%s' "$srt_tmp") 74 | 75 | if [[ $size_tmp -gt $size ]]; then 76 | size="$size_tmp" 77 | 78 | bn=$(basename "$srt_tmp") 79 | if="Subs/${bn}" 80 | 81 | of="${movie_tmp[0]%.*}.srt" 82 | fi 83 | done 84 | 85 | if [[ -z $of ]]; then 86 | unset -v "${vars1[@]}" 87 | continue 88 | fi 89 | 90 | if [[ -e $of ]]; then 91 | unset -v "${vars1[@]}" 92 | continue 93 | fi 94 | 95 | printf '%s\n' "$if" 96 | printf '%s\n\n' "$of" 97 | 98 | ln -s "$if" "$of" 99 | 100 | unset -v "${vars1[@]}" 101 | done 102 | } 103 | 104 | get_files "${dirs[@]}" 105 | -------------------------------------------------------------------------------- /reformat_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is just a simple script to reformat / clean up my old shell 4 | # scripts. My formatting style as well as choice of text editor have 5 | # changed over the years. I now use the Geany text editor, which has a 6 | # page width of 72 characters. 7 | 8 | # This script will: 9 | 10 | # * Replace individual spaces at the beginning of each line with tabs 11 | # (4 spaces / tab). 12 | # * Reduce the number of successive empty lines to a maximum of 1. 13 | # * Remove space at the end of lines. 14 | # * Remove space at the beginning of comments. 15 | # * Reduce multiple #s to just 1 # in comments. 16 | # * Replace multiple successive spaces in comments with just 1 space. 17 | # * Reduce the total length of comments to 72 characters. 18 | 19 | set -eo pipefail 20 | 21 | # Creates a function, called 'usage', which will print usage 22 | # instructions and then quit. 23 | usage () { 24 | printf '\n%s\n\n' "Usage: $(basename "$0") [file]" 25 | exit 26 | } 27 | 28 | # If the script isn't run with sudo / root privileges, quit. 29 | if [[ $EUID -ne 0 ]]; then 30 | printf '\n%s\n\n' 'You need to be root to run this script!' 31 | exit 32 | fi 33 | 34 | # If argument is not a real file, print usage instructions and then 35 | # quit. 36 | if [[ ! -f $1 ]]; then 37 | usage 38 | fi 39 | 40 | declare if limit tab date line_this line_next 41 | declare -a lines_in lines_out 42 | declare -A regex 43 | 44 | if=$(readlink -f "$1") 45 | 46 | regex[comment]='^[[:blank:]]*#+[[:blank:]]*' 47 | regex[blank1]='^[[:blank:]]+' 48 | regex[blank2]='[[:blank:]]+$' 49 | regex[blank3]='[[:blank:]]+' 50 | regex[tab]='^ {4}' 51 | regex[shebang]='^#!' 52 | 53 | limit=72 54 | tab=$(printf '\t') 55 | 56 | # Reads the input file. 57 | mapfile -t lines_in < <(tr -d '\r' <"$if") 58 | 59 | # Creates a function, called 'next_line', which will shift the line 60 | # variables by 1 line. 61 | next_line () { 62 | (( i += 1 )) 63 | (( j = (i + 1) )) 64 | 65 | line_this="${lines_in[${i}]}" 66 | line_next="${lines_in[${j}]}" 67 | } 68 | 69 | # Creates a function, called 'if_shebang', which will check if the 70 | # current line is a shebang, and add an empty line after if needed. 71 | if_shebang () { 72 | if [[ $line_this =~ ${regex[shebang]} ]]; then 73 | lines_out+=("$line_this") 74 | 75 | if [[ -n $line_next ]]; then 76 | lines_out+=('') 77 | fi 78 | 79 | next_line 80 | fi 81 | } 82 | 83 | # Creates a function, called 'reformat_comments', which will reformat 84 | # comments if they're longer than the set limit. 85 | reformat_comments () { 86 | declare start stop switch string chars word 87 | declare -a words buffer 88 | 89 | start="$i" 90 | 91 | switch=0 92 | 93 | if [[ ! $line_this =~ ${regex[comment]} ]]; then 94 | lines_out+=("$line_this") 95 | 96 | return 97 | fi 98 | 99 | while [[ $line_this =~ ${regex[comment]} ]]; do 100 | mapfile -t words < <(sed -E -e "s/${regex[comment]}//" -e "s/${regex[blank2]}//" -e "s/${regex[blank3]}/\n/g" <<<"$line_this") 101 | string="# ${words[@]}" 102 | chars="${#string}" 103 | 104 | if [[ $chars -gt $limit ]]; then 105 | switch=1 106 | fi 107 | 108 | buffer+=("${words[@]}") 109 | 110 | next_line 111 | done 112 | 113 | if [[ $switch -eq 0 ]]; then 114 | (( stop = (i - start) )) 115 | 116 | lines_out+=("${lines_in[@]:${start}:${stop}}") 117 | fi 118 | 119 | if [[ $switch -eq 1 ]]; then 120 | string='#' 121 | chars=1 122 | 123 | for (( k = 0; k < ${#buffer[@]}; k++ )); do 124 | word="${buffer[${k}]}" 125 | 126 | (( chars += (${#word} + 1) )) 127 | 128 | if [[ $chars -le $limit ]]; then 129 | string+=" ${word}" 130 | else 131 | lines_out+=("$string") 132 | 133 | string="# ${word}" 134 | (( chars = (${#word} + 2) )) 135 | fi 136 | done 137 | 138 | lines_out+=("$string") 139 | fi 140 | 141 | (( i -= 1 )) 142 | } 143 | 144 | # Creates a function, called 'reformat_lines', which will fix 145 | # indentation among other things. 146 | reformat_lines () { 147 | declare indent 148 | 149 | if [[ $line_this =~ ${regex[comment]} ]]; then 150 | line_this=$(sed -E -e "s/${regex[comment]}/# /" -e "s/${regex[blank3]}/ /g" <<<"$line_this") 151 | fi 152 | 153 | while [[ $line_this =~ ${regex[tab]} ]]; do 154 | line_this=$(sed -E "s/${regex[tab]}//" <<<"$line_this") 155 | indent+="$tab" 156 | done 157 | 158 | line_this="${indent}${line_this}" 159 | 160 | if [[ $line_this =~ ${regex[blank2]} ]]; then 161 | line_this=$(sed -E "s/${regex[blank2]}//" <<<"$line_this") 162 | fi 163 | 164 | lines_out+=("$line_this") 165 | } 166 | 167 | # Creates a function, called 'reset_arrays', which will reset the line 168 | # arrays in-between loops. 169 | reset_arrays () { 170 | lines_in=("${lines_out[@]}") 171 | lines_out=() 172 | } 173 | 174 | for (( i = 0; i < ${#lines_in[@]}; i++ )); do 175 | (( j = (i + 1) )) 176 | 177 | line_this="${lines_in[${i}]}" 178 | line_next="${lines_in[${j}]}" 179 | 180 | if_shebang 181 | 182 | if [[ -z $line_this && -z $line_next ]]; then 183 | continue 184 | fi 185 | 186 | reformat_comments 187 | done 188 | 189 | reset_arrays 190 | 191 | for (( i = 0; i < ${#lines_in[@]}; i++ )); do 192 | (( j = (i + 1) )) 193 | 194 | line_this="${lines_in[${i}]}" 195 | line_next="${lines_in[${j}]}" 196 | 197 | if_shebang 198 | 199 | reformat_lines 200 | done 201 | 202 | # If the last line is not empty, add an empty line. 203 | #if [[ -n ${lines_out[-1]} ]]; then 204 | # lines_out+=('') 205 | #fi 206 | 207 | # Gets the modification time of the input file. 208 | date=$(date -R -r "$if") 209 | 210 | # Truncates the input file. 211 | truncate -s 0 "$if" 212 | 213 | # Prints the altered lines to the input file. 214 | printf '%s\n' "${lines_out[@]}" > "$if" 215 | 216 | # Copies the original modification time to the changed file. 217 | touch -d "$date" "$if" 218 | -------------------------------------------------------------------------------- /return_of_the_triad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is a launcher for Return of the Triad (DOOM mod), fan-made 4 | # sequel to Rise of the Triad: Dark War, for GZDoom. 5 | 6 | # https://www.moddb.com/mods/return-of-the-triad 7 | 8 | declare gzdoom_cfg rott_dn 9 | declare doom_wad doom2_wad iwad wad 10 | 11 | # Change the path variables below, to point to the correct directories 12 | # for: 13 | # * GZDoom configuration directory 14 | # * Return of the Triad PK3 file directory 15 | gzdoom_cfg="${HOME}/.var/app/org.zdoom.GZDoom/.config/gzdoom" 16 | rott_dn="${gzdoom_cfg}/rott" 17 | 18 | # * The Ultimate DOOM WAD 19 | # * DOOM 2 WAD 20 | # * Fake IWAD 21 | # * Return of the Triad WAD 22 | doom_wad="${gzdoom_cfg}/doom.wad" 23 | doom2_wad="${gzdoom_cfg}/doom2.wad" 24 | iwad="${rott_dn}/fakeiwad.wad" 25 | wad="${rott_dn}/rott_tc_full.pk3" 26 | 27 | # Creates a function, called 'gzdoom', which will run the GZDoom 28 | # Flatpak (with arguments). 29 | gzdoom () { 30 | flatpak run org.zdoom.GZDoom "$@" 31 | } 32 | 33 | gzdoom -iwad "$iwad" -file "$wad" -noautoload 34 | -------------------------------------------------------------------------------- /rm_dup_dirs.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # This script recursively removes duplicate sub-directories in the 4 | # directories given as arguments. It starts at maximum depth and works 5 | # its way backwards to the root of the directories. 6 | 7 | # This can be useful if there are multiple different versions of the 8 | # same directory tree, but with slight modifications. The script will 9 | # remove all the sub-directories that are identical, making it easier to 10 | # merge the directory trees if needed. 11 | 12 | use 5.34.0; 13 | use strict; 14 | use warnings; 15 | use diagnostics; 16 | use Cwd qw(abs_path); 17 | use Digest::MD5 qw(md5_hex); 18 | use File::Basename qw(basename); 19 | use File::Find qw(find); 20 | 21 | my(@dirs, %dirs, @depths, %files, @files_in, @files_out); 22 | 23 | while (my $arg = shift(@ARGV)) { 24 | if (-d $arg) { 25 | push(@dirs, abs_path($arg)); 26 | } else { usage(); } 27 | } 28 | 29 | if (scalar(@dirs) == 0) { usage(); } 30 | 31 | # The 'usage' subroutine prints syntax, and then quits. 32 | sub usage { 33 | say "\n" . 'Usage: ' . basename($0) . ' [dirs...]' . "\n"; 34 | exit; 35 | } 36 | 37 | # The 'md5sum' subroutine gets the MD5 hash of files. 38 | sub md5sum { 39 | my $fn = shift; 40 | 41 | my($hash); 42 | 43 | open(my $read_fn, '< :raw', $fn) or die "Can't open '$fn': $!"; 44 | $hash = Digest::MD5->new->addfile($read_fn)->hexdigest; 45 | close($read_fn) or die "Can't close '$fn': $!"; 46 | 47 | return($hash); 48 | } 49 | 50 | # The 'get_dirs' subroutine gets all directories in the directory 51 | # passed to it as argument. 52 | sub get_dirs { 53 | my $dn = shift; 54 | 55 | find({ wanted => \&action, no_chdir => 1 }, $dn); 56 | 57 | sub action { 58 | if (! -d) { return; } 59 | 60 | my $fn = $File::Find::name; 61 | my $bn = basename($fn); 62 | 63 | my @path_parts = split('/', $fn); 64 | my $depth = scalar(@path_parts); 65 | 66 | $dirs{$depth}{$fn} = $bn; 67 | } 68 | } 69 | 70 | # The 'get_files' subroutine gets all files in the directory passed to 71 | # it as argument. 72 | sub get_files { 73 | my $dn = shift; 74 | 75 | my(@files, $fn, $hash); 76 | 77 | opendir(my $dh, $dn) or die "Can't open '$dn': $!"; 78 | @files = readdir($dh); 79 | closedir($dh) or die "Can't close '$dn': $!"; 80 | 81 | foreach my $bn (@files) { 82 | $fn = "$dn/$bn"; 83 | 84 | if (! -f $fn) { next; } 85 | 86 | $hash = md5sum($fn); 87 | 88 | $files{$dn}{$fn}{$bn} = $hash; 89 | } 90 | } 91 | 92 | # Gets the depths and directories. 93 | foreach my $dn (@dirs) { 94 | get_dirs($dn); 95 | } 96 | 97 | # Sorts the depths in descending order. 98 | @depths = sort { $b <=> $a } keys(%dirs); 99 | 100 | # Gets the list of files contained in each directory. 101 | foreach my $depth (@depths) { 102 | foreach my $dn (keys(%{$dirs{$depth}})) { 103 | get_files($dn); 104 | } 105 | } 106 | 107 | # This loop goes through all the directories, and compares the number of 108 | # files, as well as MD5 hashes. If multiple different directories have 109 | # the same number of files (with the same names), as well as identical 110 | # MD5 hashes, then remove the duplicate files. This is not recursive, so 111 | # there's no risk of accidentally removing non-empty sub-directories. 112 | foreach my $depth_in (@depths) { 113 | foreach my $dn_in (sort(keys(%{$dirs{$depth_in}}))) { 114 | my $dn_bn_in = $dirs{$depth_in}{$dn_in}; 115 | 116 | @files_in = sort(keys(%{$files{$dn_in}})); 117 | if (scalar(@files_in) == 0) { next; } 118 | 119 | foreach my $depth_out (@depths) { 120 | foreach my $dn_out (sort(keys(%{$dirs{$depth_out}}))) { 121 | if ($dn_in eq $dn_out) { next; } 122 | 123 | my $dn_bn_out = $dirs{$depth_out}{$dn_out}; 124 | 125 | if ($dn_bn_in ne $dn_bn_out) { next; } 126 | 127 | @files_out = sort(keys(%{$files{$dn_out}})); 128 | if (scalar(@files_out) == 0) { next; } 129 | 130 | if (scalar(@files_in) != scalar(@files_out)) { next; } 131 | 132 | my $switch = 0; 133 | 134 | for (my $i = 0; $i < scalar(@files_in); $i++) { 135 | my $fn_in = $files_in[$i]; 136 | my $fn_out = $files_out[$i]; 137 | 138 | my @fn_bn_in = keys(%{$files{$dn_in}{$fn_in}}); 139 | my @fn_bn_out = keys(%{$files{$dn_out}{$fn_out}}); 140 | 141 | if ($fn_bn_in[0] ne $fn_bn_out[0]) { 142 | $switch = 1; 143 | 144 | last; 145 | } 146 | 147 | my $hash_in = $files{$dn_in}{$fn_in}{$fn_bn_in[0]}; 148 | my $hash_out = $files{$dn_out}{$fn_out}{$fn_bn_out[0]}; 149 | 150 | if ($hash_in ne $hash_out) { 151 | $switch = 1; 152 | 153 | last; 154 | } 155 | } 156 | 157 | if ($switch == 1) { next; } 158 | 159 | foreach my $fn_in (@files_out) { 160 | say 'Removing: ' . $fn_in; 161 | 162 | unlink($fn_in) or die "Can't remove '$fn_in': $!"; 163 | } 164 | 165 | $files{$dn_out} = (); 166 | } 167 | } 168 | } 169 | } 170 | 171 | # Removes empty directories. Try to remove directories that are probably 172 | # empty. If they're not empty, no harm done, as the 'rmdir' command can 173 | # only remove directories that are actually empty. 174 | foreach my $depth_in (@depths) { 175 | foreach my $dn_in (sort(keys(%{$dirs{$depth_in}}))) { 176 | @files_in = sort(keys(%{$files{$dn_in}})); 177 | 178 | if (scalar(@files_in) == 0) { 179 | say 'Removing: ' . $dn_in; 180 | 181 | rmdir($dn_in); 182 | } 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /rm_dup_lines.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script removes duplicate lines from IRC logs in the current 4 | # directory. 5 | 6 | set -eo pipefail 7 | 8 | declare session line line_tmp 9 | declare -a clients files lines 10 | declare -A if of regex 11 | 12 | clients=('hexchat' 'irccloud' 'irssi' 'konversation') 13 | 14 | regex[hexchat]='^[[:alpha:]]+ [0-9]+ [0-9]+:[0-9]+:[0-9]+(.*)$' 15 | regex[irccloud]='^\[[0-9]+-[0-9]+-[0-9]+ [0-9]+:[0-9]+:[0-9]+\](.*)$' 16 | regex[irssi]='^[0-9]+:[0-9]+(.*)$' 17 | regex[konversation]='^\[[[:alpha:]]+, [[:alpha:]]+ [0-9]+, [0-9]+\] \[[0-9]+:[0-9]+:[0-9]+ [[:alpha:]]+ [[:alpha:]]+\](.*)$' 18 | 19 | session="${RANDOM}-${RANDOM}" 20 | of[dn]="/dev/shm/rm_dup_lines-${session}" 21 | 22 | mkdir "${of[dn]}" 23 | 24 | # Creates a function, called 'get_client', which will figure out which 25 | # client was used to generate the IRC log in question, to be able to 26 | # parse it correctly. 27 | get_client () { 28 | declare switch client 29 | 30 | switch=0 31 | 32 | for (( z = 0; z < ${#lines[@]}; z++ )); do 33 | line="${lines[${z}]}" 34 | 35 | for client in "${clients[@]}"; do 36 | if [[ ! $line =~ ${regex[${client}]} ]]; then 37 | continue 38 | fi 39 | 40 | regex[client]="${regex[${client}]}" 41 | switch=1 42 | 43 | break 44 | done 45 | 46 | if [[ $switch -eq 1 ]]; then 47 | break 48 | fi 49 | done 50 | } 51 | 52 | mapfile -t files < <(find "$PWD" -type f -iname "*.log" -o -iname "*.txt" 2>&-) 53 | 54 | for (( i = 0; i < ${#files[@]}; i++ )); do 55 | if[fn]="${files[${i}]}" 56 | if[bn]=$(basename "${if[fn]}") 57 | of[fn]="${of[dn]}/${if[bn]}" 58 | 59 | declare previous 60 | 61 | touch "${of[fn]}" 62 | 63 | mapfile -t lines < <(tr -d '\r' <"${if[fn]}") 64 | 65 | get_client 66 | 67 | for (( j = 0; j < ${#lines[@]}; j++ )); do 68 | line="${lines[${j}]}" 69 | line_tmp="$line" 70 | 71 | if [[ -n ${regex[client]} ]]; then 72 | if [[ $line =~ ${regex[client]} ]]; then 73 | line_tmp="${BASH_REMATCH[1]}" 74 | fi 75 | fi 76 | 77 | if [[ $j -ge 1 ]]; then 78 | if [[ $line_tmp == "$previous" ]]; then 79 | continue 80 | fi 81 | fi 82 | 83 | previous="$line_tmp" 84 | 85 | printf '%s\n' "$line" >> "${of[fn]}" 86 | done 87 | 88 | unset -v previous regex[client] 89 | 90 | touch -r "${if[fn]}" "${of[fn]}" 91 | mv "${of[fn]}" "${if[fn]}" 92 | done 93 | 94 | rm -rf "${of[dn]}" 95 | -------------------------------------------------------------------------------- /rm_lines.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script reads 2 text files. The first file is the input file, and 4 | # the second is the output. Every line present in both files is removed 5 | # from the output file. 6 | 7 | set -eo pipefail 8 | 9 | # Creates a function, called 'usage', which will print usage 10 | # instructions and then quit. 11 | usage () { 12 | printf '\n%s\n\n' "Usage: $(basename "$0") [file 1] [file 2]" 13 | exit 14 | } 15 | 16 | if [[ ! -f $1 || ! -f $2 ]]; then 17 | usage 18 | fi 19 | 20 | declare session line_in line_out 21 | declare -a lines_in lines_out 22 | declare -A if of 23 | 24 | session="${RANDOM}-${RANDOM}" 25 | 26 | if[fn]=$(readlink -f "$1") 27 | of[fn]=$(readlink -f "$2") 28 | of[fn_tmp]="${of[fn]%.*}-${session}.txt" 29 | 30 | mapfile -t lines_in < <(tr -d '\r' <"${if[fn]}") 31 | mapfile -t lines_out < <(tr -d '\r' <"${of[fn]}") 32 | 33 | declare switch 34 | 35 | for (( i = 0; i < ${#lines_out[@]}; i++ )); do 36 | line_out="${lines_out[${i}]}" 37 | 38 | switch=0 39 | 40 | for (( j = 0; j < ${#lines_in[@]}; j++ )); do 41 | line_in="${lines_in[${j}]}" 42 | 43 | if [[ $line_out == "$line_in" ]]; then 44 | switch=1 45 | 46 | break 47 | fi 48 | done 49 | 50 | if [[ $switch -eq 0 ]]; then 51 | printf '%s\n' "$line_out" | tee -a "${of[fn_tmp]}" 52 | fi 53 | done 54 | 55 | touch -r "${of[fn]}" "${of[fn_tmp]}" 56 | mv "${of[fn_tmp]}" "${of[fn]}" 57 | -------------------------------------------------------------------------------- /rm_md5sum.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # This script removes duplicate files in the directories given as 4 | # arguments. The behavior differs depending on whether the script was 5 | # run with 1 directory or multiple directories as arguments. 6 | 7 | # If the script is run with only 1 directory as argument, the files with 8 | # the oldest modification date will be considered to be the originals, 9 | # when other files with the same MD5 hash are found. In this case, the 10 | # basename can be the same or different. It doesn't matter. 11 | 12 | # If the script is run with multiple directories as arguments, it will 13 | # consider the 1st directory as the source, and delete files from the 14 | # other directories that have both the same MD5 hash and the same 15 | # basename. 16 | 17 | use 5.34.0; 18 | use strict; 19 | use warnings; 20 | use diagnostics; 21 | use Cwd qw(abs_path); 22 | use Digest::MD5 qw(md5_hex); 23 | use File::Basename qw(basename); 24 | use File::Find qw(find); 25 | 26 | my(@dirs, %files, @files_in, @files_out); 27 | 28 | while (my $arg = shift(@ARGV)) { 29 | if (-d $arg) { 30 | push(@dirs, abs_path($arg)); 31 | } else { usage(); } 32 | } 33 | 34 | if (scalar(@dirs) == 0) { usage(); } 35 | 36 | # The 'usage' subroutine prints syntax, and then quits. 37 | sub usage { 38 | say "\n" . 'Usage: ' . basename($0) . ' [dirs...]' . "\n"; 39 | exit; 40 | } 41 | 42 | # The 'get_files' subroutine gets all files in the directory passed to 43 | # it as argument. 44 | sub get_files { 45 | my $dn = shift; 46 | 47 | find({ wanted => \&action, no_chdir => 1 }, $dn); 48 | 49 | sub action { 50 | if (! -f) { return; } 51 | 52 | my $fn = $File::Find::name; 53 | my $bn = basename($fn); 54 | 55 | push(@{$files{$bn}}, $fn); 56 | } 57 | } 58 | 59 | # The 'md5sum' subroutine gets the MD5 hash of files. 60 | sub md5sum { 61 | my $fn = shift; 62 | 63 | my($hash); 64 | 65 | open(my $read_fn, '< :raw', $fn) or die "Can't open '$fn': $!"; 66 | $hash = Digest::MD5->new->addfile($read_fn)->hexdigest; 67 | close($read_fn) or die "Can't close '$fn': $!"; 68 | 69 | return($hash); 70 | } 71 | 72 | # This loop gets the file names. 73 | for (my $i = 0; $i < scalar(@dirs); $i++) { 74 | my $dn = $dirs[$i]; 75 | 76 | get_files($dn); 77 | 78 | $files_in[$i] = {%files}; 79 | %files = (); 80 | } 81 | 82 | # This loop gets the MD5 hash and modification date of the files. 83 | for (my $i = 0; $i < scalar(@dirs); $i++) { 84 | foreach my $bn (keys(%{$files_in[$i]})) { 85 | if (! length($files_in[0]{$bn})) { next; } 86 | 87 | foreach my $fn (@{$files_in[$i]{$bn}}) { 88 | my $hash = md5sum($fn); 89 | my $date = (stat($fn))[9]; 90 | 91 | $files_out[$i]{$hash}{$fn} = $date; 92 | } 93 | } 94 | } 95 | 96 | @files_in = (@files_out); 97 | @files_out = (); 98 | 99 | # This loop is only run if the script was run with more than 1 directory 100 | # as arguments (since it starts @ element 1 of the array). 101 | for (my $i = 1; $i < scalar(@dirs); $i++) { 102 | foreach my $hash (keys(%{$files_in[$i]})) { 103 | if (! length($files_in[0]{$hash})) { next; } 104 | 105 | foreach my $fn (sort(keys(%{$files_in[$i]{$hash}}))) { 106 | say $fn; 107 | 108 | unlink($fn) or die "Can't remove '$fn': $!"; 109 | } 110 | } 111 | } 112 | 113 | # This loop is only run if the script was run with 1 directory as 114 | # argument. 115 | if (scalar(@dirs) == 1) { 116 | foreach my $hash (keys(%{$files_in[0]})) { 117 | if (keys(%{$files_in[0]{$hash}}) == 1) { next; } 118 | 119 | my($date_tmp, $fn_tmp); 120 | 121 | foreach my $fn (sort(keys(%{$files_in[0]{$hash}}))) { 122 | my $date = $files_in[0]{$hash}{$fn}; 123 | 124 | if (! length($date_tmp)) { 125 | $date_tmp = $date; 126 | $fn_tmp = $fn; 127 | } 128 | 129 | if ($date < $date_tmp) { 130 | $date_tmp = $date; 131 | $fn_tmp = $fn; 132 | } 133 | } 134 | 135 | foreach my $fn (sort(keys(%{$files_in[0]{$hash}}))) { 136 | if ($fn eq $fn_tmp) { next; } 137 | 138 | say $fn; 139 | 140 | unlink($fn) or die "Can't remove '$fn': $!"; 141 | } 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /rm_newlines.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will recursively change the file / directory names under 4 | # the directory specified, to remove newlines from file / directory 5 | # names. 6 | 7 | set -eo pipefail 8 | 9 | # Creates a function, called 'usage', which will print usage 10 | # instructions and then quit. 11 | usage () { 12 | printf '\n%s\n\n' "Usage: $(basename "$0") [dir]" 13 | exit 14 | } 15 | 16 | if [[ ! -d $1 ]]; then 17 | usage 18 | fi 19 | 20 | declare -a vars files path_parts 21 | declare -A if of depth 22 | 23 | vars=('files' 'path_parts') 24 | 25 | if[dn]=$(readlink -f "$1") 26 | 27 | depth[max]=0 28 | 29 | mapfile -d'/' -t path_parts <<<"${if[dn]}" 30 | depth[min]=$(( ${#path_parts[@]} - 1 )) 31 | 32 | mapfile -t files < <(find "${if[dn]}" -exec printf '%q\n' {} + 2>&-) 33 | 34 | for (( i = 0; i < ${#files[@]}; i++ )); do 35 | eval if[fn]="${files[${i}]}" 36 | 37 | mapfile -d'/' -t path_parts <<<"${if[fn]}" 38 | depth[tmp]=$(( ${#path_parts[@]} - 1 )) 39 | depth[diff]=$(( depth[tmp] - depth[min] )) 40 | 41 | if [[ ${depth[diff]} -gt ${depth[max]} ]]; then 42 | depth[max]="${depth[diff]}" 43 | fi 44 | done 45 | 46 | unset -v "${vars[@]}" 47 | 48 | for (( i = depth[max]; i > 0; i-- )); do 49 | mapfile -t files < <(find "${if[dn]}" -mindepth "$i" -maxdepth "$i" -exec printf '%q\n' {} + 2>&-) 50 | 51 | for (( j = 0; j < ${#files[@]}; j++ )); do 52 | eval if[fn]="${files[${j}]}" 53 | of[dn]=$(dirname "${if[fn]}") 54 | if[bn]=$(basename "${if[fn]}") 55 | 56 | of[bn]=$(tr -d "\r\n" <<<"${if[bn]}") 57 | of[fn]="${of[dn]}/${of[bn]}" 58 | 59 | if [[ ${of[bn]} != "${if[bn]}" ]]; then 60 | printf '%s\n' "${of[fn]}" 61 | mv -n "${if[fn]}" "${of[fn]}" 62 | fi 63 | done 64 | done 65 | -------------------------------------------------------------------------------- /rm_old_kernels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to remove old kernel versions from Fedora. It 4 | # automatically figures out what the latest installed version is for 5 | # each kernel package, and removes all the older versions. 6 | 7 | # If the script isn't run with sudo / root privileges, quit. 8 | if [[ $EUID -ne 0 ]]; then 9 | printf '\n%s\n\n' 'You need to be root to run this script!' 10 | exit 11 | fi 12 | 13 | declare dnf_pkgs_n dnf_pkg arch pause_msg line current latest_tmp type 14 | declare -a match types lines versions_in versions_out keep remove 15 | declare -A dnf_pkgs latest regex 16 | 17 | dnf_pkgs_n=0 18 | 19 | arch='x86_64' 20 | pause_msg='Does this look OK? [y/n]: ' 21 | 22 | types=('core' 'devel' 'devel_matched' 'headers' 'kernel' 'modules' 'modules_extra') 23 | 24 | regex[column]="^([^ ]+${arch}) ([^ ]+) ([^ ]+)" 25 | regex[version]='^([0-9]+)\.([0-9]+)\.([0-9]+)-([0-9]+)\.fc[0-9]+' 26 | 27 | regex[core]="^kernel-core\.${arch}$" 28 | regex[devel]="^kernel-devel\.${arch}$" 29 | regex[devel_matched]="^kernel-devel-matched\.${arch}$" 30 | regex[headers]="^kernel-headers\.${arch}$" 31 | regex[kernel]="^kernel\.${arch}$" 32 | regex[modules]="^kernel-modules\.${arch}$" 33 | regex[modules_extra]="^kernel-modules-extra\.${arch}$" 34 | 35 | # Creates a function, called 'parse_version', which will parse a version 36 | # number and print the result. 37 | parse_version () { 38 | if [[ ! $1 =~ ${regex[version]} ]]; then 39 | exit 40 | fi 41 | 42 | printf '%s\n' "${BASH_REMATCH[@]:1}" 43 | } 44 | 45 | # Creates a function, called 'sort_versions', which will sort a list of 46 | # version numbers from latest to oldest. 47 | sort_versions () { 48 | declare in out 49 | declare -a num_in num_out 50 | 51 | while [[ ${#versions_in[@]} -gt 0 ]]; do 52 | out=0 53 | 54 | mapfile -t num_out < <(parse_version "${versions_in[0]}") 55 | 56 | for (( y = 1; y < ${#versions_in[@]}; y++ )); do 57 | in="${versions_in[${y}]}" 58 | 59 | mapfile -t num_in < <(parse_version "$in") 60 | 61 | # This loop goes through each number, and checks if the number is lower 62 | # or higher than the previous version that was checked. 63 | for (( z = 0; z < ${#num_in[@]}; z++ )); do 64 | if [[ ${num_in[${z}]} -lt ${num_out[${z}]} ]]; then 65 | break 66 | fi 67 | 68 | if [[ ${num_in[${z}]} -gt ${num_out[${z}]} ]]; then 69 | out="$y" 70 | num_out=("${num_in[@]}") 71 | 72 | break 73 | fi 74 | done 75 | done 76 | 77 | versions_out+=("${versions_in[${out}]}") 78 | 79 | unset -v versions_in["${out}"] 80 | versions_in=("${versions_in[@]}") 81 | done 82 | } 83 | 84 | mapfile -t lines < <(dnf list --installed | grep -E '^kernel' | sed -E 's/[[:blank:]]+/ /g') 85 | 86 | # This loop gets the package name and version from each line, and saves 87 | # that in the 'dnf_pkgs' hash. 88 | for (( i = 0; i < ${#lines[@]}; i++ )); do 89 | line="${lines[${i}]}" 90 | 91 | if [[ ! $line =~ ${regex[column]} ]]; then 92 | continue 93 | fi 94 | 95 | match=("${BASH_REMATCH[@]:1}") 96 | 97 | dnf_pkgs["${dnf_pkgs_n},pkg"]="${match[0]}" 98 | dnf_pkgs["${dnf_pkgs_n},ver"]="${match[1]}" 99 | 100 | (( dnf_pkgs_n += 1 )) 101 | done 102 | 103 | unset -v lines 104 | 105 | # This loop finds out what the latest version is for each kernel 106 | # package. 107 | for type in "${types[@]}"; do 108 | versions_in=() 109 | versions_out=() 110 | 111 | for (( i = 0; i < dnf_pkgs_n; i++ )); do 112 | match=("${dnf_pkgs[${i},pkg]}" "${dnf_pkgs[${i},ver]}") 113 | 114 | if [[ ! ${match[0]} =~ ${regex[${type}]} ]]; then 115 | continue 116 | fi 117 | 118 | versions_in+=("${match[1]}") 119 | done 120 | 121 | sort_versions 122 | 123 | latest["${type}"]="${versions_out[0]}" 124 | done 125 | 126 | # This loop decides which kernel packages will be kept, and which will 127 | # be removed. 128 | for type in "${types[@]}"; do 129 | for (( i = 0; i < dnf_pkgs_n; i++ )); do 130 | match=("${dnf_pkgs[${i},pkg]}" "${dnf_pkgs[${i},ver]}") 131 | 132 | dnf_pkg="${match[0]%.${arch}}-${match[1]}.${arch}" 133 | 134 | if [[ ! ${match[0]} =~ ${regex[${type}]} ]]; then 135 | continue 136 | fi 137 | 138 | if [[ ${match[1]} == "${latest[${type}]}" ]]; then 139 | keep+=("$dnf_pkg") 140 | else 141 | remove+=("$dnf_pkg") 142 | fi 143 | done 144 | done 145 | 146 | # If there's no kernel packages older than the currently running 147 | # version, quit. 148 | if [[ ${#remove[@]} -eq 0 ]]; then 149 | printf '\n%s\n\n' 'Nothing to do!' 150 | exit 151 | fi 152 | 153 | current=$(uname -r) 154 | latest_tmp="${latest[kernel]}.${arch}" 155 | 156 | # If the user does not have the latest installed kernel loaded, ask them 157 | # to reboot before running the script. 158 | if [[ $current != "$latest_tmp" ]]; then 159 | cat <&-) 33 | 34 | for (( i = 0; i < ${#files[@]}; i++ )); do 35 | eval if[fn]="${files[${i}]}" 36 | 37 | mapfile -d'/' -t path_parts <<<"${if[fn]}" 38 | depth[tmp]=$(( ${#path_parts[@]} - 1 )) 39 | depth[diff]=$(( depth[tmp] - depth[min] )) 40 | 41 | if [[ ${depth[diff]} -gt ${depth[max]} ]]; then 42 | depth[max]="${depth[diff]}" 43 | fi 44 | done 45 | 46 | unset -v "${vars[@]}" 47 | 48 | for (( i = depth[max]; i > 0; i-- )); do 49 | mapfile -t files < <(find "${if[dn]}" -mindepth "$i" -maxdepth "$i" -exec printf '%q\n' {} + 2>&-) 50 | 51 | for (( j = 0; j < ${#files[@]}; j++ )); do 52 | eval if[fn]="${files[${j}]}" 53 | of[dn]=$(dirname "${if[fn]}") 54 | if[bn]=$(basename "${if[fn]}") 55 | 56 | of[bn]=$(printf '%s' "$bn" | tr -d "'" | tr -d '"') 57 | of[fn]="${of[dn]}/${of[bn]}" 58 | 59 | if [[ ${of[bn]} != "${if[bn]}" ]]; then 60 | printf '%s\n' "${of[fn]}" 61 | mv -n "${if[fn]}" "${of[fn]}" 62 | fi 63 | done 64 | done 65 | -------------------------------------------------------------------------------- /show_undeclared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to read a Bash script, and show variables that 4 | # are used within functions, but haven't been formally declared within 5 | # that same function. However, the script only goes 1 level deep, hence 6 | # functions within functions will all be considered as 1 entity. 7 | 8 | # Creates a function, called 'usage', which will print usage 9 | # instructions and then quit. 10 | usage () { 11 | printf '\n%s\n\n' "Usage: $(basename "$0") [file]" 12 | exit 13 | } 14 | 15 | if [[ ! -f $1 ]]; then 16 | usage 17 | fi 18 | 19 | declare if switch_func switch_var func_name var var_tmp line line_tmp 20 | declare -a lines declared_tmp 21 | declare -A regex declared_global undeclared_global 22 | 23 | if=$(readlink -f "$1") 24 | 25 | regex[start]='^([[:blank:]]*)([^ ]+)[[:blank:]]*\(\) \{' 26 | regex[blank]='^[[:blank:]]*(.*)[[:blank:]]*$' 27 | regex[declare]='^(declare|local)( -[[:alpha:]]+){0,1} (.*)$' 28 | regex[var]='[a-zA-Z0-9_]+' 29 | regex[var_set]="^(${regex[var]})(\[.*\]){0,1}=.*$" 30 | regex[var_for]="^for (${regex[var]}) in .*; do$" 31 | regex[mapfile]="^mapfile( -d.{3}){0,1}( -t){0,1} (${regex[var]}).*$" 32 | 33 | switch_func=0 34 | 35 | func_name='main' 36 | 37 | mapfile -t lines < <(tr -d '\r' <"$if") 38 | 39 | printf '\n%s\n\n' "$if" 40 | 41 | # Handling global variables here. 42 | for (( i = 0; i < ${#lines[@]}; i++ )); do 43 | line="${lines[${i}]}" 44 | line_tmp="$line" 45 | 46 | if [[ $line =~ ${regex[start]} && $switch_func -eq 0 ]]; then 47 | switch_func=1 48 | 49 | regex[stop]="^${BASH_REMATCH[1]}\}" 50 | fi 51 | 52 | if [[ $line =~ ${regex[stop]} && $switch_func -eq 1 ]]; then 53 | switch_func=0 54 | fi 55 | 56 | if [[ $switch_func -eq 1 ]]; then 57 | continue 58 | fi 59 | 60 | if [[ $line_tmp =~ ${regex[blank]} ]]; then 61 | line_tmp="${BASH_REMATCH[1]}" 62 | fi 63 | 64 | if [[ $line_tmp =~ ${regex[declare]} ]]; then 65 | mapfile -d' ' -t declared_tmp <<<"${BASH_REMATCH[3]}" 66 | declared_tmp[-1]="${declared_tmp[-1]%$'\n'}" 67 | 68 | for (( j = 0; j < ${#declared_tmp[@]}; j++ )); do 69 | var_tmp="${declared_tmp[${j}]}" 70 | declared_global["${var_tmp}"]=1 71 | done 72 | fi 73 | 74 | if [[ $line_tmp =~ ${regex[var_set]} || $line_tmp =~ ${regex[var_for]} ]]; then 75 | var="${BASH_REMATCH[1]}" 76 | 77 | switch_var=0 78 | 79 | for var_tmp in "${!declared_global[@]}"; do 80 | if [[ $var_tmp == "$var" ]]; then 81 | switch_var=1 82 | break 83 | fi 84 | done 85 | 86 | if [[ $switch_var -eq 0 ]]; then 87 | for var_tmp in "${!undeclared_global[@]}"; do 88 | if [[ $var_tmp == "$var" ]]; then 89 | switch_var=1 90 | break 91 | fi 92 | done 93 | 94 | if [[ $switch_var -eq 0 ]]; then 95 | undeclared_global["${var}"]=1 96 | fi 97 | fi 98 | fi 99 | 100 | if [[ $line_tmp =~ ${regex[mapfile]} ]]; then 101 | var="${BASH_REMATCH[3]}" 102 | 103 | switch_var=0 104 | 105 | for var_tmp in "${!declared_global[@]}"; do 106 | if [[ $var_tmp == "$var" ]]; then 107 | switch_var=1 108 | break 109 | fi 110 | done 111 | 112 | if [[ $switch_var -eq 0 ]]; then 113 | for var_tmp in "${!undeclared_global[@]}"; do 114 | if [[ $var_tmp == "$var" ]]; then 115 | switch_var=1 116 | break 117 | fi 118 | done 119 | 120 | if [[ $switch_var -eq 0 ]]; then 121 | undeclared_global["${var}"]=1 122 | fi 123 | fi 124 | fi 125 | done 126 | 127 | if [[ ${#undeclared_global[@]} -gt 0 ]]; then 128 | printf '*** %s ***\n' "$func_name" 129 | printf '%s\n' "${!undeclared_global[@]}" | sort 130 | printf '\n' 131 | fi 132 | 133 | unset -v func_name declared_global undeclared_global 134 | 135 | # Handling local variables here. 136 | for (( i = 0; i < ${#lines[@]}; i++ )); do 137 | line="${lines[${i}]}" 138 | line_tmp="$line" 139 | 140 | if [[ $line =~ ${regex[start]} && $switch_func -eq 0 ]]; then 141 | switch_func=1 142 | 143 | declare func_name 144 | declare -A declared_local undeclared_local 145 | 146 | func_name="${BASH_REMATCH[2]}" 147 | regex[stop]="^${BASH_REMATCH[1]}\}" 148 | fi 149 | 150 | if [[ $switch_func -eq 0 ]]; then 151 | continue 152 | fi 153 | 154 | if [[ $line_tmp =~ ${regex[blank]} ]]; then 155 | line_tmp="${BASH_REMATCH[1]}" 156 | fi 157 | 158 | if [[ $line_tmp =~ ${regex[declare]} ]]; then 159 | mapfile -d' ' -t declared_tmp <<<"${BASH_REMATCH[3]}" 160 | declared_tmp[-1]="${declared_tmp[-1]%$'\n'}" 161 | 162 | for (( j = 0; j < ${#declared_tmp[@]}; j++ )); do 163 | var_tmp="${declared_tmp[${j}]}" 164 | declared_local["${var_tmp}"]=1 165 | done 166 | fi 167 | 168 | if [[ $line_tmp =~ ${regex[var_set]} || $line_tmp =~ ${regex[var_for]} ]]; then 169 | var="${BASH_REMATCH[1]}" 170 | 171 | switch_var=0 172 | 173 | for var_tmp in "${!declared_local[@]}"; do 174 | if [[ $var_tmp == "$var" ]]; then 175 | switch_var=1 176 | break 177 | fi 178 | done 179 | 180 | if [[ $switch_var -eq 0 ]]; then 181 | for var_tmp in "${!undeclared_local[@]}"; do 182 | if [[ $var_tmp == "$var" ]]; then 183 | switch_var=1 184 | break 185 | fi 186 | done 187 | 188 | if [[ $switch_var -eq 0 ]]; then 189 | undeclared_local["${var}"]=1 190 | fi 191 | fi 192 | fi 193 | 194 | if [[ $line_tmp =~ ${regex[mapfile]} ]]; then 195 | var="${BASH_REMATCH[3]}" 196 | 197 | switch_var=0 198 | 199 | for var_tmp in "${!declared_local[@]}"; do 200 | if [[ $var_tmp == "$var" ]]; then 201 | switch_var=1 202 | break 203 | fi 204 | done 205 | 206 | if [[ $switch_var -eq 0 ]]; then 207 | for var_tmp in "${!undeclared_local[@]}"; do 208 | if [[ $var_tmp == "$var" ]]; then 209 | switch_var=1 210 | break 211 | fi 212 | done 213 | 214 | if [[ $switch_var -eq 0 ]]; then 215 | undeclared_local["${var}"]=1 216 | fi 217 | fi 218 | fi 219 | 220 | if [[ $line =~ ${regex[stop]} && $switch_func -eq 1 ]]; then 221 | switch_func=0 222 | 223 | if [[ ${#undeclared_local[@]} -gt 0 ]]; then 224 | printf '*** %s ***\n' "$func_name" 225 | printf '%s\n' "${!undeclared_local[@]}" | sort 226 | 227 | printf '\n' 228 | fi 229 | 230 | unset -v func_name declared_local undeclared_local 231 | fi 232 | done 233 | -------------------------------------------------------------------------------- /sort_pix.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # This script checks the file type, MD5 hash, resolution and aspect 4 | # ratio of each image file in the directories given as argumnets, and 5 | # acts accordingly. If it finds an image that has an identical MD5 hash 6 | # to another image, the second image will be deleted. Only the match 7 | # with the oldest modification date will be kept. For each aspect ratio 8 | # defined in the %accepted_ratios hash, the script will create a 9 | # directory and move matching images there. If the horizontal resolution 10 | # is less than 1024, the script will create a directory called 'low_res' 11 | # and move those files there. The remaining images matching neither of 12 | # these conditions are moved to a directory called 'other_res'. 13 | 14 | # The script is not recursive, and it only looks for files that are 1 15 | # level deep in the directory hierarchy. 16 | 17 | use 5.34.0; 18 | use strict; 19 | use warnings; 20 | use diagnostics; 21 | use File::Basename qw(basename); 22 | use Digest::MD5; 23 | use File::Copy qw(move); 24 | use File::Path qw(make_path); 25 | use Cwd qw(abs_path); 26 | 27 | my(@dirs, %md5h, %files, %regex); 28 | 29 | my $limit = 1024; 30 | my %accepted_ratios = ('1:1' => 1, '4:3' => 1, '16:9' => 1, '16:10' => 1); 31 | 32 | $regex{fn} = qr/^(.*)\.([^.]*)$/; 33 | $regex{file} = qr/^([^\/]+).*$/; 34 | $regex{magick} = qr/^ +Geometry: ([0-9]+x[0-9]+).*$/; 35 | 36 | # The 'usage' subroutine prints syntax, and then quits. 37 | sub usage { 38 | say "\n" . 'Usage: ' . basename($0) . ' [directory] .. [directory N]' . "\n"; 39 | exit; 40 | } 41 | 42 | while (my $arg = shift(@ARGV)) { 43 | if (-d $arg) { 44 | push(@dirs, abs_path($arg)); 45 | } else { usage(); } 46 | } 47 | 48 | if (! scalar(@dirs)) { usage(); } 49 | 50 | # The 'get_type' subroutine gets the file type and the proper extension 51 | # for said file type. 52 | sub get_type { 53 | my $fn = shift; 54 | chomp(my $type = `file --brief --mime "$fn"`); 55 | chomp(my $ext = `file --brief --extension "$fn"`); 56 | 57 | $type =~ s/$regex{file}/$1/; 58 | 59 | if (! length($type)) { return; } 60 | 61 | $ext =~ s/$regex{file}/$1/; 62 | 63 | if (! length($ext)) { return; } 64 | 65 | if ($ext eq 'jpeg') { $ext = 'jpg'; } 66 | 67 | return($type, $ext); 68 | } 69 | 70 | # The 'md5sum' subroutine gets the MD5 hash, as well as last 71 | # modification date, of the image. 72 | sub md5sum { 73 | my $if = shift; 74 | 75 | my $date = (stat($if))[9]; 76 | 77 | my($hash); 78 | 79 | open(my $mf, '< :raw', $if) or die "Can't open '$if': $!"; 80 | $hash = Digest::MD5->new->addfile($mf)->hexdigest; 81 | close($mf) or die "Can't close '$if': $!"; 82 | 83 | $md5h{$hash}{$if} = $date; 84 | } 85 | 86 | # The 'get_res' subroutine gets the resolution of the image, using 87 | # ImageMagick. 88 | sub get_res { 89 | my $fn = shift; 90 | 91 | my(@lines, $res, $ratio); 92 | 93 | open(my $output, '-|', 'identify', '-quiet', '-verbose', $fn) 94 | or die "Can't open 'identify': $!"; 95 | chomp(@lines = (<$output>)); 96 | close($output) or die "Can't close 'identify': $!"; 97 | 98 | foreach my $line (@lines) { 99 | if ($line =~ m/$regex{magick}/) { 100 | $res = $1; 101 | last; 102 | } 103 | } 104 | 105 | if (! length($res)) { return; } 106 | 107 | return(split('x', $res)); 108 | } 109 | 110 | # The 'get_ratio' subroutine gets the aspect ratio of a resolution, by 111 | # figuring out the 'greatest common factor' of the 2 numbers. 112 | sub get_ratio { 113 | my $x_res = shift; 114 | my $y_res = shift; 115 | 116 | my($x_rem, $y_rem, $ratio); 117 | 118 | my $gcf = $y_res; 119 | 120 | if ($y_res > $x_res) { 121 | $gcf = $x_res; 122 | } 123 | 124 | $x_rem = $x_res % $gcf; 125 | $y_rem = $y_res % $gcf; 126 | 127 | while ($x_rem > 0 or $y_rem > 0) { 128 | $gcf -= 1; 129 | 130 | $x_rem = $x_res % $gcf; 131 | $y_rem = $y_res % $gcf; 132 | } 133 | 134 | $ratio = $x_res / $gcf . ':' . $y_res / $gcf; 135 | 136 | return($ratio); 137 | } 138 | 139 | # The 'mv_res' subroutine moves the image to the proper directory, named 140 | # after resolution and aspect ratio. 141 | sub mv_res { 142 | my $if = shift; 143 | my $if_dn = shift; 144 | my $if_bn = shift; 145 | my $x_res = shift; 146 | my $y_res = shift; 147 | my $ratio = shift; 148 | 149 | my $of_ratio = $ratio; 150 | 151 | $of_ratio =~ tr/:/_/; 152 | 153 | my $res = join('x', $x_res, $y_res); 154 | 155 | my $of_dn = join('/', $if_dn, $of_ratio, $res); 156 | 157 | my($of); 158 | 159 | # If resolution is lower than defined in $limit, then create a directory 160 | # called 'low_res' and move the image there. 161 | if ($x_res < $limit) { 162 | $of_dn = join('/', $if_dn, 'low_res', $res); 163 | # If the resolution is not among the accepted aspect ratios, then create 164 | # a directory called 'other_res' and move the image there. 165 | } elsif (! length($accepted_ratios{$ratio})) { 166 | $of_dn = join('/', $if_dn, 'other_res', $res); 167 | } 168 | 169 | make_path($of_dn); 170 | 171 | $of = join('/', $of_dn, $if_bn); 172 | 173 | unless (-f $of) { 174 | move($if, $of) or die "Can't move '$if': $!"; 175 | } 176 | 177 | say $if_bn . ': ' . $res . ' (' . $ratio . ')'; 178 | } 179 | 180 | foreach my $if_dn (@dirs) { 181 | chdir($if_dn) or die "Can't change to '$if_dn': $!"; 182 | 183 | my @files_in = (glob("*")); 184 | 185 | my(@files_out); 186 | 187 | # Check if the file is an image, and has the right extension. 188 | foreach my $if (@files_in) { 189 | if (! -f $if) { next; } 190 | 191 | $if =~ m/$regex{fn}/; 192 | 193 | my $if_bn = $1; 194 | my $if_ext = $2; 195 | 196 | $if = $if_dn . '/' . $if; 197 | 198 | my($of); 199 | 200 | my($type, $of_ext) = get_type($if); 201 | 202 | if (! length($type)) { next; } 203 | 204 | if ($type ne 'image') { next; } 205 | 206 | $of = $if_dn . '/' . $if_bn . '.' . $of_ext; 207 | 208 | if ($if ne $of and ! -f $of) { 209 | move($if, $of) or die "Can't move '$if': $!"; 210 | push(@files_out, $of); 211 | } else { push(@files_out, $if); } 212 | } 213 | 214 | @files_in = (@files_out); 215 | @files_out = (); 216 | 217 | foreach my $if (@files_in) { 218 | md5sum($if); 219 | } 220 | 221 | @files_in = (); 222 | 223 | # See if there's duplicate MD5 hashes among the files, and delete every 224 | # file except the oldest match. 225 | foreach my $hash (keys(%md5h)) { 226 | if (keys(%{$md5h{$hash}}) == 1) { next; } 227 | 228 | my($og_fn, $og_date); 229 | 230 | foreach my $fn (keys(%{$md5h{$hash}})) { 231 | my $date = $md5h{$hash}{$fn}; 232 | 233 | if (! length($og_fn) and ! length($og_date)) { 234 | $og_fn = $fn; 235 | $og_date = $date; 236 | 237 | next; 238 | } 239 | 240 | if ($date < $og_date) { 241 | $og_fn = $fn; 242 | $og_date = $date; 243 | } 244 | } 245 | 246 | foreach my $fn (keys(%{$md5h{$hash}})) { 247 | if ($fn ne $og_fn) { 248 | unlink($fn) or die "Can't remove '$fn': $!"; 249 | } 250 | } 251 | } 252 | 253 | foreach my $hash (keys(%md5h)) { 254 | foreach my $fn (keys(%{$md5h{$hash}})) { 255 | if (! -f $fn) { next; } 256 | 257 | $files{$fn} = $hash; 258 | } 259 | } 260 | 261 | %md5h = (); 262 | 263 | # Check the resolution and aspect ratio of the images, and move them to 264 | # their proper directories. 265 | foreach my $if (sort(keys(%files))) { 266 | my $if_bn = basename($if); 267 | 268 | my($x_res, $y_res, $ratio); 269 | 270 | ($x_res, $y_res) = get_res($if); 271 | $ratio = get_ratio($x_res, $y_res); 272 | 273 | if (! length($ratio)) { next; } 274 | 275 | mv_res($if, $if_dn, $if_bn, $x_res, $y_res, $ratio); 276 | } 277 | } 278 | -------------------------------------------------------------------------------- /sort_roms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to sort out good video game ROMs from full sets. 4 | # It will prefer US ROMs, but use another region if that's not 5 | # available. 6 | 7 | # According to GoodTools naming practices, good (verified) ROM dumps, 8 | # have the '[!]' tag. 9 | 10 | # For best results, run your ROM collection through GoodTools before 11 | # using this script, if it has not been already. That will properly 12 | # format all the ROM filenames, so the tags will be recognized. 13 | 14 | # The region priority order is: U, UK, A, W, E, J. 15 | 16 | # U = US 17 | # UK = United Kingdom 18 | # A = Australia 19 | # W = World 20 | # E = Europe 21 | # J = Japan 22 | 23 | # Special tags for Genesis: 24 | 25 | # 4 = US 26 | # 8 = PAL 27 | # 1 = Japan 28 | # 5 = NTSC 29 | 30 | # Creates a function, called 'usage', which will print usage 31 | # instructions and then quit. 32 | usage () { 33 | printf '\n%s\n\n' "Usage: $(basename "$0") [ROM directory]" 34 | exit 35 | } 36 | 37 | if [[ ! -d $1 ]]; then 38 | usage 39 | fi 40 | 41 | declare if_dn of_dn session title target 42 | declare -a global_vars priority files 43 | declare -A regex titles 44 | 45 | session="${RANDOM}-${RANDOM}" 46 | 47 | if_dn=$(readlink -f "$1") 48 | of_dn="sorted-${session}" 49 | 50 | regex[blank]='^[[:blank:]]*(.*)[[:blank:]]*$' 51 | regex[ext]='\.([^.]*)$' 52 | regex[1]="\(([A-Z]{1,3}|[0-9]{1})\).*${regex[ext]}" 53 | regex[2]="^.*(\[\!\]).*${regex[ext]}" 54 | 55 | global_vars=('fn' 'bn' 'region' 'region_n') 56 | priority=('^U$' 'U' '^4$' '^UK$' '^A$' 'A' '^W$' '^E$' 'E' '^8$' '^J$' 'J' '^1$' '^5$') 57 | 58 | cd "$if_dn" 59 | mkdir "$of_dn" 60 | 61 | mapfile -t files < <(find "$if_dn" -maxdepth 1 -type f 2>&-) 62 | 63 | set_target () { 64 | set_vars () { 65 | titles["${title}"]="$region_n" 66 | } 67 | 68 | for (( j = 0; j < ${#priority[@]}; j++ )); do 69 | target="${priority[${j}]}" 70 | 71 | if [[ ! $region =~ $target ]]; then 72 | continue 73 | fi 74 | 75 | region_n="$j" 76 | 77 | if [[ ${titles[${title}]} != 'undef' ]]; then 78 | if [[ $region_n -lt ${titles[${title}]} ]]; then 79 | set_vars 80 | fi 81 | else 82 | set_vars 83 | fi 84 | 85 | break 86 | done 87 | } 88 | 89 | loop_intro () { 90 | fn="${files[${i}]}" 91 | bn=$(basename "$fn") 92 | 93 | if [[ ! $bn =~ ${regex[1]} ]]; then 94 | return 95 | fi 96 | 97 | region="${BASH_REMATCH[1]}" 98 | 99 | if [[ ! $bn =~ ${regex[2]} ]]; then 100 | unset -v region 101 | fi 102 | } 103 | 104 | get_games () { 105 | for (( i = 0; i < ${#files[@]}; i++ )); do 106 | fn="${files[${i}]}" 107 | bn=$(basename "$fn") 108 | 109 | if [[ ! $bn =~ ${regex[1]} ]]; then 110 | continue 111 | fi 112 | 113 | title=$(sed -E "s/${regex[1]}//" <<<"$bn") 114 | 115 | if [[ -n $title ]]; then 116 | titles["${title}"]='undef' 117 | fi 118 | done 119 | } 120 | 121 | get_games 122 | 123 | # Gets the verified ROMs. 124 | for title in "${!titles[@]}"; do 125 | mapfile -t files < <(find "$if_dn" -maxdepth 1 -type f -name "${title}*" 2>&-) 126 | 127 | for (( i = 0; i < ${#files[@]}; i++ )); do 128 | declare "${global_vars[@]}" 129 | 130 | loop_intro 131 | 132 | if [[ -z $region ]]; then 133 | unset -v "${global_vars[@]}" 134 | continue 135 | fi 136 | 137 | set_target 138 | 139 | unset -v "${global_vars[@]}" 140 | done 141 | 142 | for (( i = 0; i < ${#files[@]}; i++ )); do 143 | declare "${global_vars[@]}" 144 | 145 | loop_intro 146 | 147 | if [[ -z $region ]]; then 148 | unset -v "${global_vars[@]}" 149 | continue 150 | fi 151 | 152 | for (( j = 0; j < ${#priority[@]}; j++ )); do 153 | target="${priority[${j}]}" 154 | 155 | if [[ ! $region =~ $target ]]; then 156 | continue 157 | fi 158 | 159 | region_n="$j" 160 | break 161 | done 162 | 163 | if [[ $region_n == "${titles[${title}]}" ]]; then 164 | printf '%s\n' "$bn" 165 | mv -n "$bn" "$of_dn" || exit 166 | fi 167 | 168 | unset -v "${global_vars[@]}" 169 | done 170 | done 171 | -------------------------------------------------------------------------------- /start_handbrake.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script starts / resumes HandBrakeCLI again, after it has been 4 | # paused by 'stop_handbrake.sh'. 5 | 6 | # The script uses the SIGCONT (18) signal to resume the process. 7 | # To get a list of available signals: kill -l 8 | 9 | declare comm pid args state 10 | declare -a hb_pids 11 | declare -A regex 12 | 13 | comm='HandBrakeCLI' 14 | 15 | regex[pid_comm]='^[[:blank:]]*([0-9]+)[[:blank:]]*(.*)$' 16 | 17 | mapfile -t hb_pids < <(ps -C "$comm" -o pid=,args=) 18 | 19 | for (( i = 0; i < ${#hb_pids[@]}; i++ )); do 20 | if [[ ! ${hb_pids[${i}]} =~ ${regex[pid_comm]} ]]; then 21 | continue 22 | fi 23 | 24 | pid="${BASH_REMATCH[1]}" 25 | args="${BASH_REMATCH[2]}" 26 | 27 | state=$(ps -p "$pid" -o state=) 28 | 29 | if [[ $state != 'T' ]]; then 30 | continue 31 | fi 32 | 33 | cat <&- 35 | 36 | if [[ $? -ne 0 ]]; then 37 | printf '\nThis script needs %s installed!\n\n' 'mkvtoolnix' 38 | exit 39 | fi 40 | 41 | mapfile -t mkvinfo_lines < <(mkvinfo "${if[fn]}" 2>&-) 42 | 43 | # Singles out the part that lists the tracks, and ignores the rest of 44 | # the output from 'mkvinfo'. 45 | switch=0 46 | 47 | for (( i = 0; i < ${#mkvinfo_lines[@]}; i++ )); do 48 | line="${mkvinfo_lines[${i}]}" 49 | 50 | if [[ $line =~ ${regex[start]} ]]; then 51 | switch=1 52 | continue 53 | fi 54 | 55 | if [[ $switch -eq 0 ]]; then 56 | continue 57 | fi 58 | 59 | if [[ $line =~ ${regex[stop]} ]]; then 60 | switch=0 61 | break 62 | fi 63 | 64 | if [[ $line =~ ${regex[strip]} ]]; then 65 | line="${BASH_REMATCH[1]}" 66 | fi 67 | 68 | mkvinfo_tracks+=("$line") 69 | done 70 | 71 | unset -v mkvinfo_lines 72 | 73 | # Gets all tracks from Matroska file. 74 | declare n 75 | 76 | for (( i = 0; i < ${#mkvinfo_tracks[@]}; i++ )); do 77 | line="${mkvinfo_tracks[${i}]}" 78 | 79 | if [[ $line =~ ${regex[track]} ]]; then 80 | if [[ -z $n ]]; then 81 | n=0 82 | else 83 | (( n += 1 )) 84 | fi 85 | 86 | tracks["${n},sub"]=0 87 | fi 88 | 89 | if [[ $line =~ ${regex[sub]} ]]; then 90 | tracks["${n},sub"]=1 91 | fi 92 | 93 | # For some tracks, the language can be listed twice. First with a 94 | # three-letter code, and then with a two-letter code. The first code is 95 | # preferred by this script. 96 | if [[ $line =~ ${regex[lang]} ]]; then 97 | if [[ -z ${tracks[${n},lang]} ]]; then 98 | tracks["${n},lang"]="${BASH_REMATCH[2],,}" 99 | fi 100 | fi 101 | 102 | if [[ $line =~ ${regex[name]} ]]; then 103 | if [[ -z ${tracks[${n},name]} ]]; then 104 | tracks["${n},name"]="${BASH_REMATCH[1]}" 105 | fi 106 | fi 107 | done 108 | 109 | (( n += 1 )) 110 | 111 | unset -v mkvinfo_tracks 112 | 113 | # Creates a function, called 'sort_list', which will sort any subtitle 114 | # tracks it finds in alphabetical order, and remove duplicates. 115 | sort_list () { 116 | for (( i = 0; i < n; i++ )); do 117 | if [[ ${tracks[${i},sub]} -ne 1 ]]; then 118 | continue 119 | fi 120 | 121 | if [[ -n ${tracks[${i},lang]} ]]; then 122 | printf '%s\n' "${tracks[${i},lang]}" 123 | elif [[ -n ${tracks[${i},name]} ]]; then 124 | printf '%s\n' "${tracks[${i},name]}" 125 | fi 126 | done | sort -u 127 | } 128 | 129 | mapfile -t lang_list < <(sort_list) 130 | 131 | unset -v n 132 | 133 | printf 'Subtitles: ' 134 | 135 | for (( i = 0; i < ${#lang_list[@]}; i++ )); do 136 | line="${lang_list[${i}]}" 137 | 138 | if [[ $i -ne 0 ]]; then 139 | printf '%s' ', ' 140 | fi 141 | 142 | if [[ -n $line ]]; then 143 | printf '%s' "${line^}" 144 | fi 145 | done 146 | 147 | printf '\n' 148 | -------------------------------------------------------------------------------- /to_utf8.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script converts text files to the UTF-8 charset. 4 | 5 | # To list available character set encodings: 6 | # iconv -l 7 | 8 | set -eo pipefail 9 | 10 | # Creates a function, called 'usage', which will print usage 11 | # instructions and then quit. 12 | usage () { 13 | printf '\n%s\n\n' "Usage: $(basename "$0") [txt]" 14 | exit 15 | } 16 | 17 | if [[ $# -eq 0 ]]; then 18 | usage 19 | fi 20 | 21 | declare -a files 22 | 23 | while [[ $# -gt 0 ]]; do 24 | if [[ -f $1 ]]; then 25 | files+=("$(readlink -f "$1")") 26 | else 27 | usage 28 | fi 29 | 30 | shift 31 | done 32 | 33 | if [[ ${#files[@]} -eq 0 ]]; then 34 | usage 35 | fi 36 | 37 | declare if charset_of session 38 | 39 | charset_of='UTF-8' 40 | session="${RANDOM}-${RANDOM}" 41 | 42 | declare -A regex 43 | 44 | regex[fn]='^(.*)\.([^.]*)$' 45 | regex[charset1]='([^; ]+)$' 46 | regex[charset2]='^charset=(.*)$' 47 | 48 | # Creates a function, called 'read_decode_fn', which tries to figure out 49 | # the correct character set encoding of the input file. If it succeeds, 50 | # it will encode that file to UTF-8. 51 | read_decode_fn () { 52 | declare charset_if of 53 | 54 | charset_if=$(file -bi "$if") 55 | 56 | if [[ -z $charset_if ]]; then 57 | return 58 | fi 59 | 60 | if [[ ! $charset_if =~ ${regex[charset1]} ]]; then 61 | return 62 | fi 63 | 64 | charset_if="${BASH_REMATCH[1]}" 65 | 66 | if [[ ! $charset_if =~ ${regex[charset2]} ]]; then 67 | return 68 | fi 69 | 70 | charset_if="${BASH_REMATCH[1]^^}" 71 | 72 | if [[ $if =~ ${regex[fn]} ]]; then 73 | of="${BASH_REMATCH[1]}-${session}.${BASH_REMATCH[2]}" 74 | else 75 | of="${if}-${session}" 76 | fi 77 | 78 | iconv -f "$charset_if" -t "$charset_of" -o "$of" "$if" 79 | 80 | printf '\n(%s -> %s) %s %s\n\n' "$charset_if" "$charset_of" 'Wrote file:' "$of" 81 | 82 | unset -v charset_if of 83 | } 84 | 85 | for (( i = 0; i < ${#files[@]}; i++ )); do 86 | if="${files[${i}]}" 87 | 88 | read_decode_fn 89 | done 90 | -------------------------------------------------------------------------------- /tracker_list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script parses BitTorrent tracker list text files, sorts, removes 4 | # duplicates, checks online status of each URL, and prints the list to 5 | # STDOUT in the correct format. 6 | 7 | # Only the trackers that are still online will be printed. This is 8 | # useful to clean up old lists of public trackers that can be found 9 | # online, as an example. Though, it might be a good idea to run the 10 | # script a couple of times, waiting a few hours or days inbetween, since 11 | # a tracker could be only temporarily offline. 12 | 13 | # Any tracker URL using protocols besides HTTP, HTTPS and UDP will be 14 | # ignored / skipped by this script, when checking online status. 15 | 16 | # The second argument to the script (-nocheck), is optional. If used, 17 | # the online status of trackers will not be checked, but the list will 18 | # only get sorted and rid of duplicates. 19 | 20 | # If you want to save the list in a text file, you can just do a 21 | # redirection like so: 22 | 23 | # tracker_list.sh 'trackers.txt' | tee 'trackers_checked.txt' 24 | 25 | declare nocheck 26 | declare -a files 27 | 28 | nocheck=0 29 | 30 | # Creates a function, called 'usage', which will print usage 31 | # instructions and then quit. 32 | usage () { 33 | printf '\n%s\n\n' "Usage: $(basename "$0") [tracker txt] [-nocheck]" 34 | exit 35 | } 36 | 37 | # The loop below handles the arguments to the script. 38 | while [[ $# -gt 0 ]]; do 39 | case "$1" in 40 | '-nocheck') 41 | nocheck=1 42 | 43 | shift 44 | ;; 45 | *) 46 | if [[ -f $1 ]]; then 47 | files+=("$(readlink -f "$1")") 48 | else 49 | usage 50 | fi 51 | 52 | shift 53 | ;; 54 | esac 55 | done 56 | 57 | if [[ ${#files[@]} -eq 0 ]]; then 58 | usage 59 | fi 60 | 61 | declare line end_l end_tmp_l switch n 62 | declare protocol address end port tracker 63 | declare protocol_tmp address_tmp end_tmp port_tmp 64 | declare -a lines_out protocols addresses ends ports 65 | declare -A regex 66 | 67 | regex[url]='^([[:alpha:]]+):\/\/([^:\/]+)(.*)$' 68 | regex[end]='^(.*):([0-9]+)(.*)$' 69 | 70 | # Creates a function, called 'get_lines', which reads the files given as 71 | # arguments to the script into memory. 72 | get_lines () { 73 | declare fn 74 | declare -a lines_in 75 | 76 | for (( z = 0; z < ${#files[@]}; z++ )); do 77 | fn="${files[${z}]}" 78 | 79 | declare -a lines 80 | 81 | mapfile -t lines < <(tr -d '\r' <"$fn" | tr '[:upper:]' '[:lower:]' | sed -E 's/[[:blank:]]+/\n/g') 82 | lines_in+=("${lines[@]}") 83 | 84 | unset -v lines 85 | done 86 | 87 | mapfile -t lines_out < <(printf '%s\n' "${lines_in[@]}" | sort -u) 88 | } 89 | 90 | get_lines 91 | 92 | for (( i = 0; i < ${#lines_out[@]}; i++ )); do 93 | line="${lines_out[${i}]}" 94 | 95 | switch=0 96 | 97 | # Deletes the line from memory, since we already have a temporary 98 | # duplicate. 99 | lines_out["${i}"]='' 100 | 101 | # Checks if the current line matches the URL regex, and if not continue 102 | # the next iteration of the loop. 103 | if [[ ! $line =~ ${regex[url]} ]]; then 104 | continue 105 | fi 106 | 107 | protocol="${BASH_REMATCH[1]}" 108 | address="${BASH_REMATCH[2]}" 109 | end="${BASH_REMATCH[3]}" 110 | 111 | # If there's no port number in the URL, use port 80. Otherwise, just use 112 | # the one in the URL. 113 | port=80 114 | 115 | if [[ $end =~ ${regex[end]} ]]; then 116 | end="${BASH_REMATCH[1]}${BASH_REMATCH[3]}" 117 | port="${BASH_REMATCH[2]}" 118 | fi 119 | 120 | # Compares the tracker URL with ones that have already been added to the 121 | # list. 122 | for (( j = 0; j < ${#addresses[@]}; j++ )); do 123 | protocol_tmp="${protocols[${j}]}" 124 | address_tmp="${addresses[${j}]}" 125 | end_tmp="${ends[${j}]}" 126 | port_tmp="${ports[${j}]}" 127 | 128 | if [[ $protocol != "$protocol_tmp" ]]; then 129 | continue 130 | fi 131 | 132 | if [[ $port != "$port_tmp" ]]; then 133 | continue 134 | fi 135 | 136 | # If the address matches, then check which has the longest URL ending. 137 | # A new element will not be created in the list, but the longest match 138 | # is used. 139 | if [[ $address == "$address_tmp" ]]; then 140 | switch=1 141 | 142 | end_l="${#end}" 143 | end_tmp_l="${#end_tmp}" 144 | 145 | if [[ $end_l > $end_tmp_l ]]; then 146 | ends["${j}"]="$end" 147 | fi 148 | fi 149 | done 150 | 151 | # If this URL is unique, add it to the different lists. 152 | if [[ $switch -eq 0 ]]; then 153 | protocols+=("$protocol") 154 | addresses+=("$address") 155 | ends+=("$end") 156 | ports+=("$port") 157 | fi 158 | done 159 | 160 | # The loop below goes through each URL, and checks online status. If the 161 | # URL is online, print it. If '-nocheck' was used, just print the URL 162 | # and keep iterating the loop. 163 | for (( i = 0; i < ${#addresses[@]}; i++ )); do 164 | protocol="${protocols[${i}]}" 165 | address="${addresses[${i}]}" 166 | end="${ends[${i}]}" 167 | port="${ports[${i}]}" 168 | 169 | tracker="${protocol}://${address}:${port}${end}" 170 | 171 | if [[ $nocheck -eq 1 ]]; then 172 | printf '%s\n\n' "$tracker" 173 | 174 | continue 175 | fi 176 | 177 | case $protocol in 178 | http*) 179 | curl --retry 10 --retry-delay 10 --connect-timeout 10 --silent --output /dev/null "$tracker" 180 | ;; 181 | udp) 182 | for n in {1..10}; do 183 | timeout 10 nc --udp -z "$address" "$port" 1>&- 2>&- 184 | done 185 | ;; 186 | *) 187 | continue 188 | ;; 189 | esac 190 | 191 | if [[ $? -ne 0 ]]; then 192 | ping -c 10 -W 10 "$address" &>/dev/null 193 | 194 | if [[ $? -eq 0 ]]; then 195 | printf '%s\n\n' "$tracker" 196 | fi 197 | elif [[ $? -eq 0 ]]; then 198 | printf '%s\n\n' "$tracker" 199 | fi 200 | done 201 | -------------------------------------------------------------------------------- /tracklist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script looks for FLAC files in the current directory and creates 4 | # a tracklist from the tags. 5 | 6 | # If metaflac isn't installed, quit running the script. 7 | command -v metaflac 1>&- || { printf '\n%s\n\n' 'This script requires metaflac.'; exit; } 8 | 9 | declare if albumartist artist album date tracks track title 10 | declare length total_length time total_time 11 | declare -a files 12 | declare -A alltags 13 | 14 | # Creates a function, called 'gettags', which gets all the tags present 15 | # in a FLAC file. 16 | gettags () { 17 | declare line field 18 | declare -a lines 19 | 20 | for field in "${!alltags[@]}"; do 21 | unset -v alltags["${field}"] 22 | done 23 | 24 | mapfile -t lines < <(metaflac --no-utf8-convert --export-tags-to=- "$if" 2>&-) 25 | 26 | for (( z = 0; z < ${#lines[@]}; z++ )); do 27 | line="${lines[${z}]}" 28 | 29 | unset -v mflac 30 | declare -a mflac 31 | 32 | mflac[0]="${line%%=*}" 33 | mflac[1]="${line#*=}" 34 | 35 | if [[ -z ${mflac[1]} ]]; then 36 | continue 37 | fi 38 | 39 | field="${mflac[0],,}" 40 | 41 | if [[ -n ${alltags[${field}]} ]]; then 42 | continue 43 | fi 44 | 45 | alltags["${field}"]="${mflac[1]}" 46 | done 47 | } 48 | 49 | # Find FLAC files in the current directory. 50 | mapfile -t files < <(find "$PWD" -maxdepth 1 -type f -iname "*.flac" 2>&- | sort -n) 51 | 52 | # If there are no FLAC files in the dir, quit. 53 | if [[ ${#files[@]} -eq 0 ]]; then 54 | printf '\n%s\n\n' 'There are no FLAC files in this directory.' 55 | exit 56 | fi 57 | 58 | if="${files[0]}" 59 | 60 | gettags 61 | 62 | albumartist="${alltags[albumartist]}" 63 | album="${alltags[album]}" 64 | date="${alltags[date]}" 65 | tracks="${#files[@]}" 66 | 67 | # Function to calculate seconds for a track. 68 | # Usage: time_seconds 69 | time_seconds () { 70 | declare samples rate 71 | 72 | samples=$(metaflac --show-total-samples "$1") 73 | rate=$(metaflac --show-sample-rate "$1") 74 | printf '%d' "$(( samples / rate ))" 75 | } 76 | 77 | # Function to make the time a little more readable. 78 | # Usage: time_readable 79 | # Since the positional parameter is an integer we have to put a $ in 80 | # front of it so it doesn't get interpreted as a regular integer. 81 | time_readable () { 82 | declare minutes seconds 83 | 84 | minutes=$(( $1 / 60 )) 85 | seconds=$(( $1 % 60 )) 86 | printf '%d:%02d' "$minutes" "$seconds" 87 | } 88 | 89 | # Calculates the time of all tracks combined in seconds and stores the 90 | # value in the $total_length variable. 91 | for (( i = 0; i < ${#files[@]}; i++ )); do 92 | (( total_length += $(time_seconds "${files[${i}]}") )) 93 | done 94 | 95 | # Makes the time readable. 96 | total_time=$(time_readable "$total_length") 97 | 98 | # Prints album information. 99 | cat <&- 21 | } 22 | 23 | for (( i = 0; i < ${#words[@]}; i++ )); do 24 | word="${words[${i}]}" 25 | 26 | if [[ ! $word =~ ${regex[url]} ]]; then 27 | continue 28 | fi 29 | 30 | get_page "$word" | sed -nE "s/${regex[title]}/\1/p" | head -n 1 31 | done 32 | -------------------------------------------------------------------------------- /wolfendoom.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is a launcher for Laz Rojas' WolfenDoom (DOOM 2 mod) for 4 | # GZDoom. 5 | 6 | # http://lazrojas.com/wolfendoom/index2.html 7 | # https://forum.zdoom.org/viewtopic.php?t=48518 8 | 9 | # File mirrors: 10 | 11 | # https://www.dropbox.com/s/ws5xmhnncot3950/wolf4zdoom_v132.zip?dl=1 12 | # https://mega.nz/#F!AFxnRCyZ!Ce648WN0JYI8jVWtZ_p89A 13 | # https://ufile.io/c53dwjdf 14 | 15 | declare gzdoom_cfg wolf_dn 16 | declare doom_wad doom2_wad wad 17 | declare -a wolf_wads 18 | declare -a wolf_info 19 | declare -A regex 20 | 21 | # Change the path variables below, to point to the correct directories 22 | # for: 23 | # * GZDoom configuration directory 24 | # * WolfenDoom PK3 files directory 25 | gzdoom_cfg="${HOME}/.var/app/org.zdoom.GZDoom/.config/gzdoom" 26 | wolf_dn="${gzdoom_cfg}/wolfendoom" 27 | 28 | # * The Ultimate DOOM WAD 29 | # * DOOM 2 WAD 30 | doom_wad="${gzdoom_cfg}/doom.wad" 31 | doom2_wad="${gzdoom_cfg}/doom2.wad" 32 | 33 | regex[num]='^[0-9]+$' 34 | 35 | cd "$wolf_dn" 36 | 37 | # Creates a function, called 'gzdoom', which will run the GZDoom 38 | # Flatpak (with arguments). 39 | gzdoom () { 40 | flatpak run org.zdoom.GZDoom "$@" 41 | } 42 | 43 | # Laz Rojas WADs. 44 | wolf_wads[0]='astrostein.pk3' 45 | wolf_wads[1]='astrostein2.pk3' 46 | wolf_wads[2]='astrostein3.pk3' 47 | wolf_wads[3]='totenhaus.pk3' 48 | wolf_wads[4]='halten.pk3' 49 | wolf_wads[5]='arcticwolf1.pk3' 50 | wolf_wads[6]='arcticwolf2.pk3' 51 | wolf_wads[7]='eisenmann.pk3' 52 | wolf_wads[8]='rheingold1.pk3' 53 | wolf_wads[9]='rheingold2.pk3' 54 | wolf_wads[10]='portal.pk3' 55 | wolf_wads[11]='treasure.pk3' 56 | wolf_wads[12]='wolfen_2nd.pk3' 57 | wolf_wads[13]='wolfen_orig.pk3' 58 | wolf_wads[14]='wolfen_noct.pk3' 59 | wolf_wads[15]='wolfen_sod.pk3' 60 | 61 | # Caleb26 Spear of Destiny WADs. 62 | wolf_wads[16]='sod_revisited.pk3' 63 | wolf_wads[17]='sod_lost.pk3' 64 | 65 | # Laz Rojas WADs. 66 | wolf_info[0]='Astrostein Trilogy 1' 67 | wolf_info[1]='Astrostein Trilogy 2' 68 | wolf_info[2]='Astrostein Trilogy 3' 69 | wolf_info[3]='Escape from Totenhaus' 70 | wolf_info[4]='Halten Sie!' 71 | wolf_info[5]='Operation Arctic Wolf Pt. 1' 72 | wolf_info[6]='Operation Arctic Wolf Pt. 2' 73 | wolf_info[7]='Operation Eisenmann' 74 | wolf_info[8]='Operation Rheingold Ep. 1' 75 | wolf_info[9]='Operation Rheingold Ep. 2' 76 | wolf_info[10]='The Portal' 77 | wolf_info[11]='Treasure Hunt' 78 | wolf_info[12]='WolfenDoom: Second Encounter' 79 | wolf_info[13]='WolfenDoom: Original Missions' 80 | wolf_info[14]='WolfenDoom: Nocturnal Missions' 81 | wolf_info[15]='WolfenDoom: Spear of Destiny' 82 | 83 | # Caleb26 Spear of Destiny WADs. 84 | wolf_info[16]='Spear Revisited' 85 | wolf_info[17]='SoD: The Lost Episodes' 86 | 87 | while [[ 1 ]]; do 88 | printf '\n%s\n\n' '*** CHOOSE WAD ***' 89 | 90 | for (( i = 0; i < ${#wolf_wads[@]}; i++ )); do 91 | printf '%s) %s\n' "$i" "${wolf_info[${i}]}" 92 | done 93 | 94 | printf '\n' 95 | 96 | read -p '>' 97 | 98 | if [[ ! $REPLY =~ ${regex[num]} ]]; then 99 | continue 100 | fi 101 | 102 | wad="${wolf_wads[${REPLY}]}" 103 | 104 | if [[ -z $wad ]]; then 105 | continue 106 | fi 107 | 108 | gzdoom -iwad "$doom2_wad" -file "$wad" -noautoload 109 | 110 | break 111 | done 112 | -------------------------------------------------------------------------------- /yt_encode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This simple script is meant to encode my YouTube videos (desktop / 4 | # gameplay recordings) to AV1 (from FFV1 lossless video). YouTube has a 5 | # file size limit of 256 GB. The quality produced by this script is 6 | # virtually identical to lossless, but the files are a lot smaller, 7 | # hence much quicker to upload. 8 | 9 | # The source files were created with SimpleScreenRecorder and Audacity. 10 | 11 | # The output audio format is FLAC, as that's what I use now when 12 | # recording. Before that, I would use PCM (WAV). 13 | 14 | # The script needs to be run with root priviliges in order to be able to 15 | # run 'renice' to raise the priority of the ffmpeg process. 16 | 17 | # FPS is optional. If not specified, the output will use the same 18 | # framerate as the input. FPS can only be specified once, and it will 19 | # be used for all input files. 20 | 21 | # How to get information about the AV1 encoder: 22 | # ffmpeg --help encoder=libsvtav1 23 | 24 | # How to suspend and resume ffmpeg processes (hence pausing the script): 25 | # killall -20 ffmpeg 26 | # killall -18 ffmpeg 27 | 28 | # If the script isn't run with sudo / root privileges, quit. 29 | if [[ $EUID -ne 0 ]]; then 30 | printf '\n%s\n\n' 'You need to be root to run this script!' 31 | exit 32 | fi 33 | 34 | # Creates a function, called 'usage', which will print usage 35 | # instructions and then quit. 36 | usage () { 37 | printf '\n%s\n\n' "Usage: $(basename "$0") [fps] [mkv]" 38 | exit 39 | } 40 | 41 | if [[ $# -eq 0 ]]; then 42 | usage 43 | fi 44 | 45 | declare if of pid exit_status 46 | declare -a files args1 args2 args3 args 47 | declare -A regex 48 | 49 | regex[fps]='^([0-9]+)(\.[0-9]+){0,1}$' 50 | 51 | if [[ $1 =~ ${regex[fps]} ]]; then 52 | args2=(-r \""${1}"\") 53 | 54 | shift 55 | fi 56 | 57 | while [[ $# -gt 0 ]]; do 58 | if [[ -f $1 ]]; then 59 | files+=("$(readlink -f "$1")") 60 | else 61 | usage 62 | fi 63 | 64 | shift 65 | done 66 | 67 | if [[ ${#files[@]} -eq 0 ]]; then 68 | usage 69 | fi 70 | 71 | for (( i = 0; i < ${#files[@]}; i++ )); do 72 | if="${files[${i}]}" 73 | of="${if%.*}_av1.mkv" 74 | 75 | # If there's any running ffmpeg processes, wait until they're finished 76 | # to avoid oversaturating the CPU. 77 | while ps -C ffmpeg -o pid= >/dev/null; do 78 | sleep 1 79 | done 80 | 81 | args1=(ffmpeg -y -i \""${if}"\" -pix_fmt yuv420p10le) 82 | args3=(-c:a flac -c:v libsvtav1 -crf 20 \""${of}"\") 83 | 84 | if [[ ${#args2[@]} -gt 0 ]]; then 85 | args=("${args1[@]}" "${args2[@]}" "${args3[@]}") 86 | else 87 | args=("${args1[@]}" "${args3[@]}") 88 | fi 89 | 90 | eval "${args[@]}" & 91 | 92 | pid="$!" 93 | 94 | renice -n -20 -p "$pid" 95 | 96 | wait "$pid" 1>&- 2>&- 97 | 98 | exit_status="$?" 99 | 100 | # If the encoding succeeded, copy file permissions and modification 101 | # time from input file to output file, and then delete the input file. 102 | if [[ $exit_status -eq 0 ]]; then 103 | chown --reference="$if" "$of" 104 | chmod --reference="$if" "$of" 105 | touch -r "$if" "$of" 106 | 107 | rm -f "$if" 108 | else 109 | exit 110 | fi 111 | done 112 | --------------------------------------------------------------------------------