├── .gitmodules ├── Colours-EyeCandy ├── colourbars ├── colours ├── colourtheme ├── hypnotoad.pl ├── pacman.sh ├── spacey.sh └── tanks.sh ├── README.md ├── aur-check ├── aur-release ├── aur-remotebuild ├── backup-system.sh ├── batmanpager ├── bsnap.sh ├── btrfs-diff ├── btrfs-sync ├── btrfs-sync-WIP ├── clean-aur-dir.py ├── convertToUtf8.py ├── cp-p ├── fatcp ├── fetchmail ├── ffparser.py ├── fmount.py ├── forcemp3convert.sh ├── hddtemp.sh ├── img2pdf.sh ├── initscreen.sh ├── lf-paste ├── maildir-strip-attachments.py ├── makeissue.sh ├── mp3convert.py ├── mv-p ├── nat-launch-subnet.sh ├── nat-launch.sh ├── notify-brightness.sh ├── notify-volume.sh ├── pacman-disowned.sh ├── pdf-extract.sh ├── perm.sh ├── pythonscripts ├── __init__.py ├── cpu.py ├── daemon.py ├── ffparser.py ├── logger.py ├── misc.py ├── tempfiles.py └── terminal.py ├── qemu-launcher.sh ├── qemu-mac-hasher.py ├── qemu-tap-helper.sh ├── remove-dead-symlinks.sh ├── replaygain.py ├── rexe ├── rmshit.py ├── run-pvserver ├── sway-sensible-terminal ├── teams-attendance-parser.py ├── toggle-touchpad.sh ├── touch-tree.py ├── waybar-khal.py └── x /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "submodules/cp-p"] 2 | path = submodules/cp-p 3 | url = ../cp-p.git 4 | -------------------------------------------------------------------------------- /Colours-EyeCandy/colourbars: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # by Him on the Arch boards 3 | # ANSI Color -- use these variables to easily have different color 4 | # and format output. Make sure to output the reset sequence after 5 | # colors (f = foreground, b = background), and use the 'off' 6 | # feature for anything you turn on. 7 | 8 | initializeANSI() 9 | { 10 | esc="" 11 | 12 | blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m" 13 | yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m" 14 | cyanf="${esc}[36m"; whitef="${esc}[37m" 15 | 16 | blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m" 17 | yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m" 18 | cyanb="${esc}[46m"; whiteb="${esc}[47m" 19 | 20 | boldon="${esc}[1m"; boldoff="${esc}[22m" 21 | italicson="${esc}[3m"; italicsoff="${esc}[23m" 22 | ulon="${esc}[4m"; uloff="${esc}[24m" 23 | invon="${esc}[7m"; invoff="${esc}[27m" 24 | 25 | reset="${esc}[0m" 26 | } 27 | 28 | # note in this first use that switching colors doesn't require a reset 29 | # first - the new color overrides the old one. 30 | 31 | initializeANSI 32 | 33 | cat << EOF 34 | 35 | ${redf}▆▆▆▆▆▆▆▆▆▆${reset} ${greenf}▆▆▆▆▆▆▆▆▆▆${reset} ${yellowf}▆▆▆▆▆▆▆▆▆▆${reset} ${bluef}▆▆▆▆▆▆▆▆▆▆${reset} ${purplef}▆▆▆▆▆▆▆▆▆▆${reset} ${cyanf}▆▆▆▆▆▆▆▆▆▆${reset} ${whitef}▆▆▆▆▆▆▆▆▆▆${reset} 36 | ${boldon}${blackf} ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::${reset} 37 | ${boldon}${redf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${greenf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${yellowf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${bluef}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${purplef}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${cyanf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${whitef}▆▆▆▆▆▆▆▆▆▆${reset} 38 | 39 | 40 | EOF 41 | -------------------------------------------------------------------------------- /Colours-EyeCandy/colours: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Original: http://frexx.de/xterm-256-notes/ 3 | # http://frexx.de/xterm-256-notes/data/colortable16.sh 4 | # Modified by Aaron Griffin 5 | # and further by Kazuo Teramoto 6 | 7 | 8 | FGNAMES=(' black ' ' red ' ' green ' ' yellow' ' blue ' 'magenta' ' cyan ' ' white ') 9 | BGNAMES=('DFT' 'BLK' 'RED' 'GRN' 'YEL' 'BLU' 'MAG' 'CYN' 'WHT') 10 | echo " ┌──────────────────────────────────────────────────────────────────────────┐" 11 | for b in $(seq 0 8); do 12 | if [ "$b" -gt 0 ]; then 13 | bg=$(($b+39)) 14 | fi 15 | 16 | echo -en "\033[0m ${BGNAMES[$b]} │ " 17 | for f in $(seq 0 7); do 18 | echo -en "\033[${bg}m\033[$(($f+30))m ${FGNAMES[$f]} " 19 | done 20 | echo -en "\033[0m │" 21 | 22 | echo -en "\033[0m\n\033[0m │ " 23 | for f in $(seq 0 7); do 24 | echo -en "\033[${bg}m\033[1;$(($f+30))m ${FGNAMES[$f]} " 25 | done 26 | echo -en "\033[0m │" 27 | echo -e "\033[0m" 28 | 29 | if [ "$b" -lt 8 ]; then 30 | echo " ├──────────────────────────────────────────────────────────────────────────┤" 31 | fi 32 | done 33 | echo " └──────────────────────────────────────────────────────────────────────────┘" 34 | 35 | -------------------------------------------------------------------------------- /Colours-EyeCandy/colourtheme: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file echoes a bunch of color codes to the 4 | # terminal to demonstrate what's available. Each 5 | # line is the color code of one forground color, 6 | # out of 17 (default + 16 escapes), followed by a 7 | # test use of that color on all nine background 8 | # colors (default + 8 escapes). 9 | # 10 | 11 | T='▆ ▆' # The test text 12 | 13 | echo -e "\n 40m 41m 42m 43m\ 14 | 44m 45m 46m 47m"; 15 | 16 | for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' \ 17 | '1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' \ 18 | ' 36m' '1;36m' ' 37m' '1;37m'; 19 | do FG=${FGs// /} 20 | echo -en " $FGs \033[$FG $T " 21 | for BG in 40m 41m 42m 43m 44m 45m 46m 47m; 22 | do echo -en "$EINS \033[$FG\033[$BG $T \033[0m"; 23 | done 24 | echo; 25 | done 26 | echo 27 | -------------------------------------------------------------------------------- /Colours-EyeCandy/hypnotoad.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # script by karabaja4 4 | # mail: karabaja4@archlinux.us 5 | 6 | my $blackFG_yellowBG = "\e[30;43m"; 7 | my $blackFG_redBG = "\e[30;41m"; 8 | my $blackFG_purpleBG = "\e[30;45m"; 9 | 10 | my $yellowFG_blackBG = "\e[1;33;40m"; 11 | my $yellowFG_redBG = "\e[1;33;41m"; 12 | 13 | my $redFG_yellowBG = "\e[31;43m"; 14 | 15 | my $purpleFG_yellowBG = "\e[35;43m"; 16 | my $purpleFG_blueBG = "\e[1;35;44m"; 17 | 18 | my $end = "\e[0m"; 19 | 20 | system("clear"); 21 | 22 | print " 23 | 24 | ${blackFG_yellowBG},'${blackFG_redBG}`${blackFG_yellowBG}`.._${end} ${blackFG_yellowBG},'${blackFG_redBG}`${end}${blackFG_yellowBG}`.${end} 25 | ${blackFG_yellowBG}:${blackFG_redBG},${yellowFG_blackBG}--.${end}${blackFG_redBG}_${blackFG_yellowBG}:)\\,:${blackFG_redBG},${yellowFG_blackBG}._,${end}${yellowFG_redBG}.${end}${blackFG_yellowBG}:${end} 26 | ${blackFG_yellowBG}:`-${yellowFG_blackBG}-${end}${blackFG_yellowBG},${blackFG_yellowBG}''${end}${redFG_yellowBG}@@\@${end}${blackFG_yellowBG}:`.${yellowFG_redBG}.${end}${blackFG_yellowBG}.';\\${end} All Glory to 27 | ${blackFG_yellowBG}`,'${end}${redFG_yellowBG}@@@@@@\@${end}${blackFG_yellowBG}`---'${redFG_yellowBG}@\@${end}${blackFG_yellowBG}`.${end} the HYPNOTOAD! 28 | ${blackFG_yellowBG}/${redFG_yellowBG}@@@@@@@@@@@@@@@@\@${end}${blackFG_yellowBG}:${end} 29 | ${blackFG_yellowBG}/${redFG_yellowBG}@@@@@@@@@@@@@@@@@@\@${end}${blackFG_yellowBG}\\${end} 30 | ${blackFG_yellowBG},'${redFG_yellowBG}@@@@@@@@@@@@@@@@@@@@\@${end}${purpleFG_yellowBG}:\\${end}${blackFG_yellowBG}.___,-.${end} 31 | ${blackFG_yellowBG}`...,---'``````-..._${redFG_yellowBG}@@@\@${end}${blackFG_purpleBG}|:${end}${redFG_yellowBG}@@@@@@\@${end}${blackFG_yellowBG}\\${end} 32 | ${blackFG_yellowBG}( )${end}${redFG_yellowBG}@@\@${end}${blackFG_purpleBG};:${end}${redFG_yellowBG}@@@\@)@@\@${end}${blackFG_yellowBG}\\${end} ${blackFG_yellowBG}_,-.${end} 33 | ${blackFG_yellowBG}`. (${end}${redFG_yellowBG}@@\@${end}${blackFG_purpleBG}//${end}${redFG_yellowBG}@@@@@@@@@\@${end}${blackFG_yellowBG}`'${end}${redFG_yellowBG}@@@\@${end}${blackFG_yellowBG}\\${end} 34 | ${blackFG_yellowBG}: `.${end}${blackFG_purpleBG}//${end}${redFG_yellowBG}@@)@@@@@@)@@@@@,\@${end}${blackFG_yellowBG};${end} 35 | ${blackFG_purpleBG}|`${purpleFG_yellowBG}.${blackFG_yellowBG} ${end}${purpleFG_yellowBG}_${end}${purpleFG_yellowBG},${blackFG_purpleBG}'/${end}${redFG_yellowBG}@@@@@@@)@@@@)@,'\@${end}${blackFG_yellowBG},'${end} 36 | ${blackFG_yellowBG}:${end}${blackFG_purpleBG}`.`${end}${purpleFG_yellowBG}-..____..=${end}${blackFG_purpleBG}:.-${end}${blackFG_yellowBG}':${end}${redFG_yellowBG}@@@@@.@@@@\@_,@@,'${end} 37 | ${redFG_yellowBG},'${end}${blackFG_yellowBG}\\ ${end}${blackFG_purpleBG}``--....${end}${purpleFG_blueBG}-)='${end}${blackFG_yellowBG} `.${end}${redFG_yellowBG}_,@\@${end}${blackFG_yellowBG}\\${end} ${redFG_yellowBG})@@\@'``._${end} 38 | ${redFG_yellowBG}/\@${end}${redFG_yellowBG}_${end}${redFG_yellowBG}\@${end}${blackFG_yellowBG}`.${end}${blackFG_yellowBG} ${end}${blackFG_redBG}(@)${end}${blackFG_yellowBG} /${end}${redFG_yellowBG}@@@@\@${end}${blackFG_yellowBG})${end} ${redFG_yellowBG}; / \\ \\`-.'${end} 39 | ${redFG_yellowBG}(@@\@${end}${redFG_yellowBG}`-:${end}${blackFG_yellowBG}`. ${end}${blackFG_yellowBG}`' ___..'${end}${redFG_yellowBG}@\@${end}${blackFG_yellowBG}_,-'${end} ${redFG_yellowBG}|/${end} ${redFG_yellowBG}`.)${end} 40 | ${redFG_yellowBG}`-. `.`.${end}${blackFG_yellowBG}``-----``--${end}${redFG_yellowBG},@\@.'${end} 41 | ${redFG_yellowBG}|/`.\\`'${end} ${redFG_yellowBG},',');${end} 42 | ${redFG_yellowBG}`${end} ${redFG_yellowBG}(/${end} ${redFG_yellowBG}(/${end} 43 | 44 | 45 | "; 46 | 47 | 48 | -------------------------------------------------------------------------------- /Colours-EyeCandy/pacman.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # ANSI Color -- use these variables to easily have different color 4 | # and format output. Make sure to output the reset sequence after 5 | # colors (f = foreground, b = background), and use the 'off' 6 | # feature for anything you turn on. 7 | 8 | initializeANSI() 9 | { 10 | esc="" 11 | 12 | blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m" 13 | yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m" 14 | cyanf="${esc}[36m"; whitef="${esc}[37m" 15 | 16 | blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m" 17 | yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m" 18 | cyanb="${esc}[46m"; whiteb="${esc}[47m" 19 | 20 | boldon="${esc}[1m"; boldoff="${esc}[22m" 21 | italicson="${esc}[3m"; italicsoff="${esc}[23m" 22 | ulon="${esc}[4m"; uloff="${esc}[24m" 23 | invon="${esc}[7m"; invoff="${esc}[27m" 24 | 25 | reset="${esc}[0m" 26 | } 27 | 28 | # note in this first use that switching colors doesn't require a reset 29 | # first - the new color overrides the old one. 30 | 31 | clear 32 | 33 | initializeANSI 34 | 35 | cat << EOF 36 | 37 | ${yellowf} ▄███████▄${reset} ${redf} ▄██████▄${reset} ${greenf} ▄██████▄${reset} ${bluef} ▄██████▄${reset} ${purplef} ▄██████▄${reset} ${cyanf} ▄██████▄${reset} 38 | ${yellowf}▄█████████▀▀${reset} ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄${reset} ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄${reset} ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄${reset} ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄${reset} ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset} 39 | ${yellowf}███████▀${reset} ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███${reset} ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███${reset} ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███${reset} ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███${reset} ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset} 40 | ${yellowf}███████▄${reset} ${redf}████████████${reset} ${greenf}████████████${reset} ${bluef}████████████${reset} ${purplef}████████████${reset} ${cyanf}████████████${reset} 41 | ${yellowf}▀█████████▄▄${reset} ${redf}██▀██▀▀██▀██${reset} ${greenf}██▀██▀▀██▀██${reset} ${bluef}██▀██▀▀██▀██${reset} ${purplef}██▀██▀▀██▀██${reset} ${cyanf}██▀██▀▀██▀██${reset} 42 | ${yellowf} ▀███████▀${reset} ${redf}▀ ▀ ▀ ▀${reset} ${greenf}▀ ▀ ▀ ▀${reset} ${bluef}▀ ▀ ▀ ▀${reset} ${purplef}▀ ▀ ▀ ▀${reset} ${cyanf}▀ ▀ ▀ ▀${reset} 43 | 44 | ${boldon}${yellowf} ▄███████▄ ${redf} ▄██████▄ ${greenf} ▄██████▄ ${bluef} ▄██████▄ ${purplef} ▄██████▄ ${cyanf} ▄██████▄${reset} 45 | ${boldon}${yellowf}▄█████████▀▀ ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄ ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄ ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄ ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄ ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset} 46 | ${boldon}${yellowf}███████▀ ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███ ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███ ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███ ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███ ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset} 47 | ${boldon}${yellowf}███████▄ ${redf}████████████ ${greenf}████████████ ${bluef}████████████ ${purplef}████████████ ${cyanf}████████████${reset} 48 | ${boldon}${yellowf}▀█████████▄▄ ${redf}██▀██▀▀██▀██ ${greenf}██▀██▀▀██▀██ ${bluef}██▀██▀▀██▀██ ${purplef}██▀██▀▀██▀██ ${cyanf}██▀██▀▀██▀██${reset} 49 | ${boldon}${yellowf} ▀███████▀ ${redf}▀ ▀ ▀ ▀ ${greenf}▀ ▀ ▀ ▀ ${bluef}▀ ▀ ▀ ▀ ${purplef}▀ ▀ ▀ ▀ ${cyanf}▀ ▀ ▀ ▀${reset} 50 | 51 | EOF 52 | -------------------------------------------------------------------------------- /Colours-EyeCandy/spacey.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #ANSI color scheme script featuring Space Invaders 3 | # 4 | # Original: http://crunchbanglinux.org/forums/post/126921/#p126921 5 | # Modified by lolilolicon 6 | 7 | 8 | f=3 b=4 9 | for j in f b; do 10 | for i in {0..7}; do 11 | eval ${j}${i}=\$\'\\e\[${!j}${i}m\' 12 | done 13 | done 14 | bld=$'\e[1m' 15 | rst=$'\e[0m' 16 | 17 | cat << EOF 18 | 19 | $f0 ▄██▄ $f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst 20 | $f0▄█▀██▀█▄ $f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄ $rst 21 | $f0▀▀█▀▀█▀▀ $f1█▀███████▀█ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4█▀███████▀█ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀ $rst 22 | $f0▄▀▄▀▀▄▀▄ $f1▀ ▀▄▄ ▄▄▀ ▀ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄ $rst 23 | 24 | $bld $f0 ▄██▄ $f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst 25 | $bld $f0▄█▀██▀█▄ $f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst 26 | $bld $f0▀▀█▀▀█▀▀ $f1█▀███████▀█ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4█▀███████▀█ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst 27 | $bld $f0▄▀▄▀▀▄▀▄ $f1▀ ▀▄▄ ▄▄▀ ▀ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst 28 | 29 | 30 | $f7▌$rst 31 | 32 | $f7▌$rst 33 | 34 | $f7 ▄█▄ $rst 35 | $f7▄█████████▄$rst 36 | $f7▀▀▀▀▀▀▀▀▀▀▀$rst 37 | 38 | EOF 39 | -------------------------------------------------------------------------------- /Colours-EyeCandy/tanks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # ANSI Color -- use these variables to easily have different color 4 | # and format output. Make sure to output the reset sequence after 5 | # colors (f = foreground, b = background), and use the 'off' 6 | # feature for anything you turn on. 7 | 8 | initializeANSI() 9 | { 10 | esc="" 11 | 12 | blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m" 13 | yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m" 14 | cyanf="${esc}[36m"; whitef="${esc}[37m" 15 | 16 | blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m" 17 | yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m" 18 | cyanb="${esc}[46m"; whiteb="${esc}[47m" 19 | 20 | boldon="${esc}[1m"; boldoff="${esc}[22m" 21 | italicson="${esc}[3m"; italicsoff="${esc}[23m" 22 | ulon="${esc}[4m"; uloff="${esc}[24m" 23 | invon="${esc}[7m"; invoff="${esc}[27m" 24 | 25 | reset="${esc}[0m" 26 | } 27 | 28 | # note in this first use that switching colors doesn't require a reset 29 | # first - the new color overrides the old one. 30 | 31 | initializeANSI 32 | 33 | cat << EOF 34 | 35 | ${boldon}${redf} █ ${reset} ${boldon}${greenf} █ ${reset} ${boldon}${yellowf} █ ${reset} ${boldon}${bluef} █ ${reset} ${boldon}${purplef} █ ${reset} ${boldon}${cyanf} █ ${reset} 36 | ${boldon}${redf}▄▄ █ ▄▄${reset} ${boldon}${greenf}▄▄ █ ▄▄${reset} ${boldon}${yellowf}▄▄ █ ▄▄${reset} ${boldon}${bluef}▄▄ █ ▄▄${reset} ${boldon}${purplef}▄▄ █ ▄▄${reset} ${boldon}${cyanf}▄▄ █ ▄▄${reset} 37 | ${boldon}${redf}███▀▀▀███${reset} ${boldon}${greenf}███▀▀▀███${reset} ${boldon}${yellowf}███▀▀▀███${reset} ${boldon}${bluef}███▀▀▀███${reset} ${boldon}${purplef}███▀▀▀███${reset} ${boldon}${cyanf}███▀▀▀███${reset} 38 | ${boldon}${redf}███ █ ███${reset} ${boldon}${greenf}███ █ ███${reset} ${boldon}${yellowf}███ █ ███${reset} ${boldon}${bluef}███ █ ███${reset} ${boldon}${purplef}███ █ ███${reset} ${boldon}${cyanf}███ █ ███${reset} 39 | ${boldon}${redf}██ ▀▀▀ ██${reset} ${boldon}${greenf}██ ▀▀▀ ██${reset} ${boldon}${yellowf}██ ▀▀▀ ██${reset} ${boldon}${bluef}██ ▀▀▀ ██${reset} ${boldon}${purplef}██ ▀▀▀ ██${reset} ${boldon}${cyanf}██ ▀▀▀ ██${reset} 40 | 41 | ${redf} █ ${reset} ${greenf} █ ${reset} ${yellowf} █ ${reset} ${bluef} █ ${reset} ${purplef} █ ${reset} ${cyanf} █ ${reset} 42 | ${redf}▄▄ █ ▄▄${reset} ${greenf}▄▄ █ ▄▄${reset} ${yellowf}▄▄ █ ▄▄${reset} ${bluef}▄▄ █ ▄▄${reset} ${purplef}▄▄ █ ▄▄${reset} ${cyanf}▄▄ █ ▄▄${reset} 43 | ${redf}███▀▀▀███${reset} ${greenf}███▀▀▀███${reset} ${yellowf}███▀▀▀███${reset} ${bluef}███▀▀▀███${reset} ${purplef}███▀▀▀███${reset} ${cyanf}███▀▀▀███${reset} 44 | ${redf}███ █ ███${reset} ${greenf}███ █ ███${reset} ${yellowf}███ █ ███${reset} ${bluef}███ █ ███${reset} ${purplef}███ █ ███${reset} ${cyanf}███ █ ███${reset} 45 | ${redf}██ ▀▀▀ ██${reset} ${greenf}██ ▀▀▀ ██${reset} ${yellowf}██ ▀▀▀ ██${reset} ${bluef}██ ▀▀▀ ██${reset} ${purplef}██ ▀▀▀ ██${reset} ${cyanf}██ ▀▀▀ ██${reset} 46 | EOF 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | A bunch of scripts I keep in `~/Scripts`, which is included in `$PATH`. 2 | -------------------------------------------------------------------------------- /aur-check: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | """ 4 | Check the repo for problems and new package versions 5 | """ 6 | 7 | import subprocess 8 | from pathlib import Path 9 | 10 | import tomlkit.toml_file 11 | 12 | SOURCE_DIRS = [ 13 | { 14 | "path": Path("~/Arch/packaging/aur/").expanduser(), 15 | "nvchecker_source": "aur", 16 | }, 17 | ] 18 | NVCHECKER_CONFIG_FILE = Path("~/Arch/packaging/aur/nvchecker.toml").expanduser() 19 | 20 | 21 | def get_from_SRCINFO(path, key): 22 | with open(path, "r") as f: 23 | for line in f.readlines(): 24 | line = line.strip() 25 | if not line or line.startswith("#"): 26 | continue 27 | k, v = line.split("=", 1) 28 | if k.strip() == key: 29 | return v.strip() 30 | 31 | 32 | def get_from_PKGBUILD(path, key): 33 | with open(path, "r") as f: 34 | for line in f.readlines(): 35 | if line.startswith(f"{key}="): 36 | value = line.split("=", 1)[1].strip() 37 | if value.startswith("'") and value.endswith("'"): 38 | value = value[1:-1] 39 | if value.startswith('"') and value.endswith('"'): 40 | value = value[1:-1] 41 | return value 42 | 43 | 44 | def nvchecker(): 45 | """Updates ``nvchecker`` config file with the sources defined in ``SOURCE_DIRS`` 46 | and then runs ``nvchecker``. 47 | """ 48 | for src in SOURCE_DIRS: 49 | root_path = src["path"] 50 | 51 | # read the config file 52 | config_file = tomlkit.toml_file.TOMLFile(NVCHECKER_CONFIG_FILE) 53 | config = config_file.read() 54 | 55 | # iterate over package directories in the source root 56 | for pkg in root_path.iterdir(): 57 | if not pkg.is_dir(): 58 | continue 59 | elif not (pkg / "PKGBUILD").is_file(): 60 | print(f"WARNING: PKGBUILD not found in {pkg}") 61 | continue 62 | 63 | # extract from .SRCINFO if it exists 64 | if (pkg / ".SRCINFO").is_file(): 65 | pkgname = get_from_SRCINFO(pkg / ".SRCINFO", "pkgname") 66 | # pkgver = get_from_SRCINFO(pkg / ".SRCINFO", "pkgver") 67 | else: 68 | # extract pkgname and pkgver from PKGBUILD in the most hackish way 69 | pkgname = pkg.name 70 | # pkgname = get_from_PKGBUILD(pkg / "PKGBUILD", "pkgname") 71 | # pkgver = get_from_PKGBUILD(pkg / "PKGBUILD", "pkgver") 72 | 73 | # ensure that a TOML table for the pkgname exists 74 | if pkgname not in config: 75 | config.add(pkgname, tomlkit.table()) 76 | update_config = True 77 | else: 78 | update_config = src.get("nvchecker_overwrite", True) 79 | 80 | # update the config file 81 | if update_config: 82 | source = src["nvchecker_source"] 83 | config[pkgname]["source"] = source 84 | if source in {"aur", "archpkg"}: 85 | config[pkgname][source] = pkgname 86 | elif source == "gitlab": 87 | config[pkgname]["host"] = src["nvchecker_host"] 88 | config[pkgname]["gitlab"] = src["nvchecker_gitlab_format"].format( 89 | remote_pkgname=pkgname 90 | ) 91 | 92 | # write the config file 93 | config_file.write(config) 94 | 95 | # run nvchecker 96 | subprocess.run(["nvchecker", "-c", NVCHECKER_CONFIG_FILE], check=True) 97 | 98 | 99 | def check(): 100 | nvchecker() 101 | 102 | # TODO: check if rebuild-detector is installed 103 | print("Checking packages that need to be rebuilt...") 104 | subprocess.run(["checkrebuild", "-i", "lahwaacz"], check=True) 105 | 106 | # TODO: list packages that are in the database, but package file is deleted or source is missing 107 | 108 | 109 | if __name__ == "__main__": 110 | check() 111 | -------------------------------------------------------------------------------- /aur-release: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # SPDX-License-Identifier: GPL-3.0-or-later 4 | 5 | # aur-remotebuild - build packages remotely using aur-chroot 6 | # (based on commitpkg from devtools) 7 | set -o errexit 8 | shopt -s nullglob 9 | readonly argv0=release 10 | 11 | source /usr/share/devtools/lib/common.sh 12 | source /usr/share/devtools/lib/util/srcinfo.sh 13 | 14 | source /usr/share/makepkg/util/parseopts.sh 15 | source /usr/share/makepkg/util/util.sh 16 | 17 | set -eo pipefail 18 | 19 | if [[ ! -f PKGBUILD ]]; then 20 | echo "No PKGBUILD in the current directory!" >&2 21 | exit 1 22 | fi 23 | 24 | # Check if releasing from a branch 25 | if ! branchname=$(git symbolic-ref --short HEAD); then 26 | die 'not on any branch' 27 | fi 28 | if [[ "$branchname" != master ]]; then 29 | die 'must be run from the master branch' 30 | fi 31 | 32 | # default arguments 33 | server=pkgbuild.com 34 | remote_path=public_html/repo 35 | rsyncopts=("${RSYNC_OPTS[@]}" --perms --chmod='u=rw,go=r') 36 | release_commit=1 37 | release_push=0 38 | release_upload=0 39 | 40 | usage() { 41 | echo >&2 "Usage: $argv0 [OPTIONS]" 42 | echo >&2 "" 43 | echo >&2 "Run this script in a PKGBUILD dir to release an already built package." 44 | echo >&2 "" 45 | echo >&2 "The script comprises the following operations:" 46 | echo >&2 "" 47 | echo >&2 "- (default) modified version-controlled files are staged for commit" 48 | echo >&2 "- (default) all build artifacts are signed with gpg" 49 | echo >&2 "- (optional) commits are pushed to the remote git repository" 50 | echo >&2 "- (optional) build artifacts are uploaded to the binary repository" 51 | echo >&2 "" 52 | echo >&2 "OPTIONS" 53 | echo >&2 " --no-commit Do not stage version-controlled files for commit" 54 | echo >&2 " --push Automatically push commits to the remote git repository" 55 | echo >&2 " --upload Automatically upload all build artifacts to the binary" 56 | echo >&2 " repository hosted at $server:$remote_path" 57 | exit 1 58 | } 59 | 60 | ## option parsing 61 | opt_short='' 62 | opt_long=('no-commit' 'push' 'upload') 63 | opt_hidden=() 64 | 65 | if ! parseopts "$opt_short" "${opt_long[@]}" "${opt_hidden[@]}" -- "$@"; then 66 | usage 67 | fi 68 | set -- "${OPTRET[@]}" 69 | 70 | while true; do 71 | case "$1" in 72 | --no-commit) 73 | release_commit=0 74 | ;; 75 | --push) 76 | release_push=1 77 | ;; 78 | --upload) 79 | release_upload=1 80 | ;; 81 | --) 82 | shift; 83 | break 84 | ;; 85 | esac 86 | shift 87 | done 88 | 89 | 90 | check_pkgbuild_validity() { 91 | # shellcheck source=/usr/share/pacman/PKGBUILD.proto 92 | . ./PKGBUILD 93 | 94 | # skip when there are no sources available 95 | if (( ! ${#source[@]} )); then 96 | return 97 | fi 98 | 99 | # validate sources hash algo is at least > sha1 100 | local bad_algos=("cksums" "md5sums" "sha1sums") 101 | local good_hash_algo=false 102 | 103 | # from makepkg libmakepkg/util/schema.sh 104 | for integ in "${known_hash_algos[@]}"; do 105 | local sumname="${integ}sums" 106 | if [[ -n ${!sumname} ]] && ! in_array "${sumname}" "${bad_algos[@]}"; then 107 | good_hash_algo=true 108 | break 109 | fi 110 | done 111 | 112 | if ! $good_hash_algo; then 113 | die "PKGBUILD lacks a secure cryptographic checksum, insecure algorithms: ${bad_algos[*]}" 114 | fi 115 | } 116 | 117 | # Source makepkg.conf; fail if it is not found 118 | if [[ -r '/etc/makepkg.conf' ]]; then 119 | source '/etc/makepkg.conf' 120 | else 121 | die '/etc/makepkg.conf not found!' 122 | fi 123 | 124 | # Source user-specific makepkg.conf overrides 125 | if [[ -r "${XDG_CONFIG_HOME:-$HOME/.config}/pacman/makepkg.conf" ]]; then 126 | # shellcheck source=/dev/null 127 | source "${XDG_CONFIG_HOME:-$HOME/.config}/pacman/makepkg.conf" 128 | elif [[ -r "$HOME/.makepkg.conf" ]]; then 129 | # shellcheck source=/dev/null 130 | source "$HOME/.makepkg.conf" 131 | fi 132 | 133 | source=() 134 | # shellcheck source=/usr/share/pacman/PKGBUILD.proto 135 | . ./PKGBUILD 136 | pkgbase=${pkgbase:-$pkgname} 137 | 138 | if (( ${#validpgpkeys[@]} != 0 )); then 139 | if [[ -d keys ]]; then 140 | for key in "${validpgpkeys[@]}"; do 141 | if [[ ! -f keys/pgp/$key.asc ]]; then 142 | export-pkgbuild-keys || die 'Failed to export valid PGP keys for source files' 143 | fi 144 | done 145 | else 146 | export-pkgbuild-keys || die 'Failed to export valid PGP keys for source files' 147 | fi 148 | 149 | git add --force -- keys/pgp/* 150 | fi 151 | 152 | # find files which should be under source control 153 | needsversioning=(PKGBUILD) 154 | for s in "${source[@]}"; do 155 | [[ $s != *://* ]] && needsversioning+=("$s") 156 | done 157 | for i in 'changelog' 'install'; do 158 | while read -r file; do 159 | # evaluate any bash variables used 160 | # shellcheck disable=SC2001 161 | eval "file=\"$(sed "s/^\(['\"]\)\(.*\)\1\$/\2/" <<< "$file")\"" 162 | needsversioning+=("$file") 163 | done < <(sed -n "s/^[[:space:]]*$i=//p" PKGBUILD) 164 | done 165 | for key in "${validpgpkeys[@]}"; do 166 | needsversioning+=("keys/pgp/$key.asc") 167 | done 168 | 169 | # assert that they really are controlled by git 170 | if (( ${#needsversioning[*]} )); then 171 | for file in "${needsversioning[@]}"; do 172 | # skip none existing files 173 | if [[ ! -f "${file}" ]]; then 174 | continue 175 | fi 176 | if ! git ls-files --error-unmatch "$file"; then 177 | die "%s is not under version control" "$file" 178 | fi 179 | done 180 | fi 181 | 182 | 183 | # check packages for validity 184 | for _arch in "${arch[@]}"; do 185 | for _pkgname in "${pkgname[@]}"; do 186 | fullver=$(get_full_version "$_pkgname") 187 | 188 | if pkgfile=$(find_cached_package "$_pkgname" "$fullver" "$_arch"); then 189 | check_package_validity "$pkgfile" 190 | fi 191 | done 192 | 193 | fullver=$(get_full_version "$pkgbase") 194 | if pkgfile=$(find_cached_package "$pkgbase-debug" "$fullver" "$_arch"); then 195 | check_package_validity "$pkgfile" 196 | fi 197 | done 198 | 199 | # NOTE: not a reality on the AUR... 200 | # check for PKGBUILD standards 201 | #check_pkgbuild_validity 202 | 203 | # auto generate .SRCINFO 204 | # shellcheck disable=SC2119 205 | write_srcinfo_file 206 | 207 | 208 | if (( release_commit )); then 209 | git add --force .SRCINFO 210 | 211 | if [[ -n $(git status --porcelain --untracked-files=no) ]]; then 212 | stat_busy 'Staging files' 213 | for f in $(git ls-files --modified); do 214 | git add "$f" 215 | done 216 | for f in $(git ls-files --deleted); do 217 | git rm "$f" 218 | done 219 | stat_done 220 | 221 | msgtemplate="upgpkg: $(get_full_version)" 222 | if [[ -n $1 ]]; then 223 | stat_busy 'Committing changes' 224 | git commit -q -m "${msgtemplate}: ${1}" || die 225 | stat_done 226 | else 227 | [[ -z ${WORKDIR:-} ]] && setup_workdir 228 | msgfile=$(mktemp --tmpdir="${WORKDIR}" commitpkg.XXXXXXXXXX) 229 | echo "$msgtemplate" > "$msgfile" 230 | if [[ -n $GIT_EDITOR ]]; then 231 | $GIT_EDITOR "$msgfile" || die 232 | elif giteditor=$(git config --get core.editor); then 233 | $giteditor "$msgfile" || die 234 | elif [[ -n $VISUAL ]]; then 235 | $VISUAL "$msgfile" || die 236 | elif [[ -n $EDITOR ]]; then 237 | $EDITOR "$msgfile" || die 238 | else 239 | die "No usable editor found (tried \$GIT_EDITOR, git config [core.editor], \$VISUAL, \$EDITOR)." 240 | fi 241 | [[ -s $msgfile ]] || die 242 | stat_busy 'Committing changes' 243 | git commit -v -q -F "$msgfile" || die 244 | unlink "$msgfile" 245 | stat_done 246 | fi 247 | fi 248 | 249 | if (( release_push )); then 250 | git_remote_branch=$(git rev-parse --abbrev-ref --symbolic-full-name "@{u}") 251 | git_remote=${git_remote_branch%/*} 252 | git_remote_url=$(git remote get-url "$git_remote") 253 | 254 | msg 'Fetching remote changes' 255 | git fetch --prune --prune-tags origin || die 'failed to fetch remote changes' 256 | 257 | # Check if local branch is up to date and contains the latest origin commit 258 | if remoteref=$(git rev-parse "$git_remote_branch" 2>/dev/null); then 259 | if [[ $(git branch "$branchname" --contains "$remoteref" --format '%(refname:short)') != "$branchname" ]]; then 260 | die "local branch is out of date, run 'git pull --rebase'" 261 | fi 262 | fi 263 | 264 | msg "Pushing commits to $git_remote_branch where $git_remote is $git_remote_url" 265 | git push --tags --set-upstream "$git_remote" "$branchname" || abort 266 | else 267 | warning "Not pushing commits because --push was not given." 268 | fi 269 | elif (( release_push )); then 270 | warning "Not pushing commits because --no-commit was given." 271 | fi 272 | 273 | 274 | declare -a uploads 275 | 276 | for _arch in "${arch[@]}"; do 277 | for _pkgname in "${pkgname[@]}"; do 278 | fullver=$(get_full_version "$_pkgname") 279 | if ! pkgfile=$(find_cached_package "$_pkgname" "$fullver" "${_arch}"); then 280 | warning "Skipping %s: failed to locate package file" "$_pkgname-$fullver-$_arch" 281 | continue 2 282 | fi 283 | uploads+=("$pkgfile") 284 | done 285 | 286 | fullver=$(get_full_version "$pkgbase") 287 | if ! pkgfile=$(find_cached_package "$pkgbase-debug" "$fullver" "$_arch"); then 288 | continue 289 | fi 290 | if ! is_debug_package "$pkgfile"; then 291 | continue 292 | fi 293 | uploads+=("$pkgfile") 294 | done 295 | 296 | for pkgfile in "${uploads[@]}"; do 297 | sigfile="${pkgfile}.sig" 298 | if [[ ! -f $sigfile ]]; then 299 | msg "Signing package %s..." "${pkgfile}" 300 | if [[ -n $GPGKEY ]]; then 301 | SIGNWITHKEY=(-u "${GPGKEY}") 302 | fi 303 | gpg --detach-sign --use-agent --no-armor "${SIGNWITHKEY[@]}" "${pkgfile}" || die 304 | fi 305 | if ! gpg --verify "$sigfile" "$pkgfile" >/dev/null 2>&1; then 306 | die "Signature %s is incorrect!" "$sigfile" 307 | fi 308 | uploads+=("$sigfile") 309 | done 310 | 311 | 312 | if (( release_upload )) && [[ ${#uploads[*]} -gt 0 ]]; then 313 | new_uploads=() 314 | 315 | # convert to absolute paths so rsync can work with colons (epoch) 316 | while read -r -d '' upload; do 317 | new_uploads+=("$upload") 318 | done < <(realpath -z "${uploads[@]}") 319 | 320 | uploads=("${new_uploads[@]}") 321 | unset new_uploads 322 | 323 | msg 'Uploading all package and signature files' 324 | rsync "${rsyncopts[@]}" "${uploads[@]}" "$server:$remote_path/" || die 325 | 326 | # convert to remote paths 327 | declare -a remote_pkgfiles 328 | for pkgfile in "${uploads[@]}"; do 329 | if ! [[ "$pkgfile" = *.sig ]]; then 330 | remote_pkgfiles+=("$remote_path"/"$(basename "$pkgfile")") 331 | fi 332 | done 333 | 334 | msg 'Updating remote pacman database' 335 | ssh -t "${SSH_OPTS[@]}" -- "$server" "./repo add ${remote_pkgfiles[*]@Q} && ./repo update" 336 | fi 337 | -------------------------------------------------------------------------------- /aur-remotebuild: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # SPDX-License-Identifier: GPL-3.0-or-later 4 | 5 | # aur-remotebuild - build packages remotely using aur-chroot 6 | # (partly based on offload-build from devtools) 7 | set -o errexit 8 | shopt -s nullglob 9 | readonly argv0=remotebuild 10 | 11 | if [[ ! -f PKGBUILD ]]; then 12 | echo "No PKGBUILD in the current directory!" >&2 13 | exit 1 14 | fi 15 | 16 | # default arguments 17 | repo_name=lahwaacz 18 | chroot_args=( 19 | --create 20 | --update 21 | --build 22 | # makechrootpkg options 23 | --checkpkg 24 | --namcap 25 | ) 26 | 27 | usage() { 28 | printf >&2 'Usage: %s HOSTNAME [--repo NAME] [--inspect never|always|failure] [--] \n' "$argv0" 29 | exit 1 30 | } 31 | 32 | server="$1" 33 | shift 34 | if [[ "$server" == "" ]]; then 35 | usage 36 | fi 37 | 38 | source /usr/share/makepkg/util/parseopts.sh 39 | 40 | ## option parsing 41 | opt_short='d:' 42 | opt_long=('inspect:') 43 | opt_hidden=() 44 | 45 | if ! parseopts "$opt_short" "${opt_long[@]}" "${opt_hidden[@]}" -- "$@"; then 46 | usage 47 | fi 48 | set -- "${OPTRET[@]}" 49 | 50 | while true; do 51 | case "$1" in 52 | --repo) 53 | shift; 54 | repo_name=$1 55 | ;; 56 | --inspect) 57 | shift; 58 | chroot_args+=(--makechrootpkg-args="-x,$1") 59 | ;; 60 | --) 61 | shift; 62 | break 63 | ;; 64 | esac 65 | shift 66 | done 67 | 68 | # pass db name to aur-chroot 69 | chroot_args+=( 70 | --pacman-conf "/etc/aurutils/pacman-$repo_name.conf" 71 | --makepkg-conf "/etc/aurutils/makepkg-$repo_name.conf" 72 | ) 73 | 74 | # pass remaining arguments to aur-chroot 75 | if (($#)); then 76 | chroot_args+=("$@") 77 | fi 78 | 79 | 80 | # aur chroot command to run remotely 81 | aur_chroot_cmd=( 82 | env 83 | # aur-chroot does not preserve SRCDEST and LOGDEST by default https://github.com/aurutils/aurutils/issues/1196 84 | AUR_PACMAN_AUTH="sudo --preserve-env=GNUPGHOME,SSH_AUTH_SOCK,SRCDEST,PKGDEST,LOGDEST" 85 | # NOTE: do not clear SRCDEST to use cached directory set in the user's makepkg.conf on the remote host 86 | #SRCDEST="" 87 | PKGDEST="" 88 | LOGDEST="" 89 | aur 90 | chroot 91 | "${chroot_args[@]}" 92 | ) 93 | 94 | 95 | ### offload-build-like part 96 | source /usr/share/devtools/lib/common.sh 97 | source /usr/share/devtools/lib/util/makepkg.sh 98 | source /usr/share/devtools/lib/util/srcinfo.sh 99 | source /usr/share/makepkg/util/config.sh 100 | 101 | [[ -z ${WORKDIR:-} ]] && setup_workdir 102 | TEMPDIR=$(mktemp --tmpdir="${WORKDIR}" --directory aur-remotebuild.XXXXXXXXXX) 103 | export TEMPDIR 104 | 105 | # Load makepkg.conf variables to be available 106 | # shellcheck disable=SC2119 107 | load_makepkg_config 108 | 109 | # Use a source-only tarball as an intermediate to transfer files. This 110 | # guarantees the checksums are okay, and guarantees that all needed files are 111 | # transferred, including local sources, install scripts, and changelogs. 112 | export SRCPKGDEST="${TEMPDIR}" 113 | makepkg_source_package || die "unable to make source package" 114 | 115 | # Temporary cosmetic workaround makepkg if SRCDEST is set somewhere else 116 | # but an empty src dir is created in PWD. Remove once fixed in makepkg. 117 | rmdir --ignore-fail-on-non-empty src 2>/dev/null || true 118 | 119 | # Create a temporary directory on the server 120 | remote_temp=$( 121 | ssh "${SSH_OPTS[@]}" -- "$server" ' 122 | temp="${XDG_CACHE_HOME:-$HOME/.cache}/aur-remotebuild" && 123 | mkdir -p "$temp" && 124 | mktemp --directory --tmpdir="$temp" 125 | ') 126 | 127 | # Transfer the srcpkg to the server 128 | msg "Transferring source package to the server..." 129 | _srcpkg=("$SRCPKGDEST"/*"$SRCEXT") 130 | srcpkg="${_srcpkg[0]}" 131 | rsync "${RSYNC_OPTS[@]}" -- "$srcpkg" "$server":"$remote_temp" || die 132 | 133 | # Prepare the srcpkg on the server 134 | msg "Extracting srcpkg" 135 | ssh "${SSH_OPTS[@]}" -- "$server" "cd ${remote_temp@Q} && bsdtar --strip-components 1 -xvf $(basename "$srcpkg")" || die 136 | 137 | # Run the build command on the server 138 | msg "Running aur chroot ${chroot_args[*]}" 139 | # shellcheck disable=SC2145 140 | if ssh "${SSH_OPTS[@]}" -t -- "$server" "cd ${remote_temp@Q} && ${aur_chroot_cmd[@]@Q}"; then 141 | msg "Build complete" 142 | 143 | # Get an array of files that should be downloaded from the server 144 | mapfile -t files < <( 145 | ssh "${SSH_OPTS[@]}" -- "$server" " 146 | cd ${remote_temp@Q}"' && 147 | while read -r file; do 148 | [[ -f "${file}" ]] && printf "%s\n" "${file}" ||: 149 | done < <(SRCDEST="" PKGDEST="" LOGDEST="" makepkg --packagelist) && 150 | printf "%s\n" '"${remote_temp@Q}/PKGBUILD"' 151 | 152 | find '"${remote_temp@Q}"' -name "*.log" 153 | ') 154 | else 155 | # Build failed, only the logs should be downloaded from the server 156 | mapfile -t files < <( 157 | ssh "${SSH_OPTS[@]}" -- "$server" ' 158 | find '"${remote_temp@Q}"' -name "*.log" 159 | ') 160 | fi 161 | 162 | 163 | if (( ${#files[@]} )); then 164 | msg 'Downloading files...' 165 | rsync "${RSYNC_OPTS[@]}" -- "${files[@]/#/$server:}" "${TEMPDIR}/" || die 166 | 167 | if is_globfile "${TEMPDIR}"/*.log; then 168 | # shellcheck disable=SC2031 169 | mv "${TEMPDIR}"/*.log "${LOGDEST:-${PWD}}/" 170 | fi 171 | if is_globfile "${TEMPDIR}"/*.pkg.tar*; then 172 | # Building a package may change the PKGBUILD during update_pkgver 173 | # shellcheck disable=SC2031 174 | mv "${TEMPDIR}/PKGBUILD" "${PWD}/" 175 | # shellcheck disable=SC2031 176 | mv "${TEMPDIR}"/*.pkg.tar* "${PKGDEST:-${PWD}}/" 177 | else 178 | # shellcheck disable=SC2031 179 | error "Build failed, check logs in ${LOGDEST:-${PWD}}" 180 | exit 1 181 | fi 182 | 183 | # auto generate .SRCINFO 184 | # shellcheck disable=SC2119 185 | write_srcinfo_file 186 | 187 | msg "Removing remote temporary directory $remote_temp" 188 | ssh "${SSH_OPTS[@]}" -- "$server" "rm -rf -- ${remote_temp@Q}" 189 | else 190 | exit 1 191 | fi 192 | -------------------------------------------------------------------------------- /backup-system.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit on first error 4 | set -e 5 | 6 | backupdir="/media/WD-black/backups" 7 | 8 | # check if destination dir exists 9 | if [[ ! -d "$backupdir" ]]; then 10 | echo "Backup directory $backupdir does not exist. Is the drive mounted?" 11 | exit 1 12 | fi 13 | 14 | #homedir="$backupdir/home_rsync_copy" 15 | #rootdir="$backupdir/root_rsync_copy" 16 | 17 | #echo "Syncing / to $rootdir (root permissions required)" 18 | #sudo rsync / "$rootdir" -aPhAHX --info=progress2,name0,stats2 --delete --exclude={"/dev/*","/proc/*","/sys/*","/tmp/*","/run/*","/mnt/*","/media/*","/lost+found","/home","/swapfile","/.snapshots"} 19 | 20 | #echo "Syncing ~/ to $homedir" 21 | #rsync ~/ $homedir -aPhAHX --one-file-system --info=progress2,name0,stats2 --delete 22 | 23 | 24 | # TODO: 25 | # - make snapshot with snapper just before btrfs-sync 26 | # - run `sync` before btrfs-sync to make sure that the snapshot is fully written to the disk 27 | # - copy the snapper metadata files (info.xml) 28 | # - make snapshots of the remaining subvolumes: @postgres @nspawn_containers @var_log 29 | 30 | echo "Syncing /.snapshots to $backupdir/root (root permissions required)" 31 | sudo btrfs-sync --verbose --delete /.snapshots "$backupdir/root" 32 | 33 | echo "Syncing /home/.snapshots to $backupdir/home (root permissions required)" 34 | sudo btrfs-sync --verbose --delete /home/.snapshots "$backupdir/home" 35 | -------------------------------------------------------------------------------- /batmanpager: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # mandoc passes a file name, other tools write to stdout 4 | # using `cat "$@"` we take care of both reading from file and stdin 5 | # https://github.com/sharkdp/bat/issues/1145#issuecomment-1743518097 6 | exec cat "$@" | col -bx | bat --language man --style plain --pager "$PAGER" 7 | -------------------------------------------------------------------------------- /bsnap.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/bash 2 | 3 | # exit on first error 4 | set -e 5 | 6 | backupdir="$HOME/_backup_snapshots" 7 | 8 | usage() { 9 | echo $@ >&2 10 | echo "Usage: $0 {snapshot|transfer} ... 11 | 12 | snapshot Create snapshots for every subvolume configured in '\$backupdir/*'. 13 | The subvolume is specified by a symlink '\$backupdir/*/cur' 14 | pointing to a Btrfs subvolume. 15 | 16 | transfer Transfer all snapshots from '\$backupdir/*/' to '/', which 17 | should be other Btrfs partition. The tree structure is kept 18 | intact. 19 | 20 | \$backupdir is set to '$backupdir' 21 | " >&2 22 | } 23 | 24 | transfer() { 25 | src="$1" # e.g. ~/_backup_snapshots/Bbox/ 26 | dst="$2" # e.g. /media/WD1T/backup-lahwaacz/Bbox/ 27 | 28 | [[ ! -d "$dst" ]] && mkdir "$dst" 29 | 30 | # get list of snapshots to transfer 31 | src_snapshots=($(find "$src" -mindepth 1 -maxdepth 1 -type d | sort)) 32 | 33 | _len=${#src_snapshots[@]} 34 | for ((i=0; i<$_len; i++)); do 35 | if [[ -e "$dst/$(basename ${src_snapshots[$i]})" ]]; then 36 | # nothing to transfer 37 | echo "Snapshot '$dst/$(basename ${src_snapshots[$i]})' already exists" 38 | continue 39 | fi 40 | 41 | # There is currently an issue that the snapshots to be used with "btrfs send" 42 | # must be physically on the disk, or you may receive a "stale NFS file handle" 43 | # error. This is accomplished by "sync" after the snapshot 44 | # 45 | # ref: http://marc.merlins.org/perso/btrfs/post_2014-03-22_Btrfs-Tips_-Doing-Fast-Incremental-Backups-With-Btrfs-Send-and-Receive.html 46 | sync 47 | 48 | dst_snapshots=($(find "$dst" -mindepth 1 -maxdepth 1 -type d | sort)) 49 | 50 | if [[ $i -eq 0 ]]; then 51 | # no parent, make initial transfer 52 | sudo sh -c "btrfs send ${src_snapshots[$i]} | btrfs receive $dst" 53 | else 54 | sudo sh -c "btrfs send -p ${src_snapshots[(($i-1))]} ${src_snapshots[$i]} | btrfs receive $dst" 55 | fi 56 | 57 | done 58 | 59 | 60 | 61 | } 62 | 63 | case $1 in 64 | snapshot) 65 | for dir in "$backupdir"/*; do 66 | if [[ -L "$dir/cur" ]]; then 67 | btrfs subvolume snapshot -r $(realpath "$dir/cur") "$dir/$(date +%F-%T)" 68 | else 69 | echo "$dir/cur does not exist or is not a symlink" 70 | fi 71 | done 72 | ;; 73 | transfer) 74 | [ -n "$2" -a -d "$2" ] || usage "Invalid destination path" 75 | 76 | for dir in "$backupdir"/*; do 77 | transfer "$dir" "$2"/$(basename "$dir") 78 | done 79 | ;; 80 | *) 81 | usage "Incorrect invocation" 82 | esac 83 | -------------------------------------------------------------------------------- /btrfs-diff: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Author: http://serverfault.com/users/96883/artfulrobot 4 | # License: Unknown 5 | # 6 | # This script will show most files that got modified or added. 7 | # Renames and deletions will not be shown. 8 | # Read limitations on: 9 | # http://serverfault.com/questions/399894/does-btrfs-have-an-efficient-way-to-compare-snapshots 10 | # 11 | # btrfs send is the best way to do this long term, but as of kernel 12 | # 3.14, btrfs send cannot just send a list of changed files without 13 | # scanning and sending all the changed data blocks along. 14 | 15 | usage() { echo $@ >&2; echo "Usage: $0 " >&2; exit 1; } 16 | 17 | [ $# -eq 2 ] || usage "Incorrect invocation"; 18 | SNAPSHOT_OLD=$1; 19 | SNAPSHOT_NEW=$2; 20 | 21 | [ -d $SNAPSHOT_OLD ] || usage "$SNAPSHOT_OLD does not exist"; 22 | [ -d $SNAPSHOT_NEW ] || usage "$SNAPSHOT_NEW does not exist"; 23 | 24 | OLD_TRANSID=`btrfs subvolume find-new "$SNAPSHOT_OLD" 9999999` 25 | OLD_TRANSID=${OLD_TRANSID#transid marker was } 26 | [ -n "$OLD_TRANSID" -a "$OLD_TRANSID" -gt 0 ] || usage "Failed to find generation for $SNAPSHOT_NEW" 27 | 28 | btrfs subvolume find-new "$SNAPSHOT_NEW" $OLD_TRANSID | sed '$d' | cut -f17- -d' ' | sort | uniq 29 | -------------------------------------------------------------------------------- /btrfs-sync: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Simple script that synchronizes BTRFS snapshots locally. 5 | # Features compression, retention policy and automatic incremental sync 6 | # 7 | 8 | set -e 9 | set -o pipefail 10 | set -o errtrace 11 | 12 | print_usage() { 13 | echo "Usage: 14 | $BIN [options] [...] 15 | 16 | -k|--keep NUM keep only last sync'ed snapshots 17 | -d|--delete delete snapshots in that don't exist in 18 | -q|--quiet don't display progress 19 | -v|--verbose display more information 20 | -h|--help show usage 21 | 22 | can either be a single snapshot, or a folder containing snapshots 23 | " 24 | } 25 | 26 | echov() { if [[ "$VERBOSE" == 1 ]]; then echo "$@"; fi } 27 | 28 | #---------------------------------------------------------------------------------------------------------- 29 | 30 | # preliminary checks 31 | BIN="${0##*/}" 32 | [[ $# -lt 2 ]] && { print_usage ; exit 1; } 33 | [[ ${EUID} -ne 0 ]] && { echo "Must be run as root. Try 'sudo $BIN'"; exit 1; } 34 | 35 | # parse arguments 36 | KEEP=0 37 | 38 | OPTS=$( getopt -o hqzZk:p:dv -l quiet -l help -l keep: -l delete -l verbose -- "$@" 2>/dev/null ) 39 | [[ $? -ne 0 ]] && { echo "error parsing arguments"; exit 1; } 40 | eval set -- "$OPTS" 41 | 42 | while true; do 43 | case "$1" in 44 | -h|--help ) print_usage; exit 0 ;; 45 | -q|--quiet ) QUIET=1 ; shift 1 ;; 46 | -d|--delete ) DELETE=1 ; shift 1 ;; 47 | -k|--keep ) KEEP=$2 ; shift 2 ;; 48 | -v|--verbose) VERBOSE=1 ; shift 1 ;; 49 | --) shift; break ;; 50 | esac 51 | done 52 | 53 | # detect src and dst arguments 54 | SRC=( "${@:1:$#-1}" ) 55 | DST="${@: -1}" 56 | 57 | test -x "$SRC" &>/dev/null || { 58 | echo "Access error. Do you have adequate permissions for $SRC?" 59 | exit 1 60 | } 61 | 62 | test -x "$DST" &>/dev/null || { 63 | echo "Access error. Do you have adequate permissions for $DST?" 64 | exit 1 65 | } 66 | 67 | #---------------------------------------------------------------------------------------------------------- 68 | 69 | # more checks 70 | 71 | ## don't overlap 72 | if pgrep -F /run/btrfs-sync.pid &>/dev/null; then 73 | echo "$BIN is already running" 74 | exit 1 75 | fi 76 | echo $$ > /run/btrfs-sync.pid 77 | 78 | ## src checks 79 | echov "* Check source" 80 | SRCS=() 81 | SRCS_BASE=() 82 | for s in "${SRC[@]}"; do 83 | src="$(realpath "$s")" 84 | if ! test -e "$src"; then 85 | echo "$s not found" 86 | exit 1 87 | fi 88 | # check if the src is a read-only subvolume 89 | if btrfs subvolume show "$src" &>/dev/null && [[ "$(btrfs property get -ts "$src")" == "ro=true" ]]; then 90 | SRCS+=("$src") 91 | SRCS_BASE+=("$src") 92 | else 93 | for dir in $( find "$src" -maxdepth 2 -type d ); do 94 | # check if the src is a read-only subvolume 95 | if btrfs subvolume show "$dir" &>/dev/null && [[ "$(btrfs property get -ts "$dir")" == "ro=true" ]]; then 96 | SRCS+=("$dir") 97 | SRCS_BASE+=("$src") 98 | fi 99 | done 100 | fi 101 | done 102 | if [[ ${#SRCS[@]} -eq 0 ]]; then 103 | echo "no BTRFS subvolumes found" 104 | exit 1 105 | fi 106 | 107 | ## use 'pv' command if available 108 | PV=( pv -F"time elapsed [%t] | rate %r | total size [%b]" ) 109 | if [[ "$QUIET" == "1" ]]; then 110 | PV=( cat ) 111 | else 112 | if ! type pv &>/dev/null; then 113 | echo "INFO: install the 'pv' package in order to get a progress indicator" 114 | PV=( cat ) 115 | fi 116 | fi 117 | 118 | #---------------------------------------------------------------------------------------------------------- 119 | 120 | # sync snapshots 121 | 122 | get_dst_snapshots() { # sets DSTS DST_UUIDS 123 | local DST="$1" 124 | DSTS=() 125 | DST_UUIDS=() 126 | for dir in $( find "$DST" -maxdepth 2 -type d ); do 127 | if btrfs subvolume show "$dir" &>/dev/null; then 128 | local UUID=$( btrfs subvolume show "$dir" 2>/dev/null | grep 'Received UUID' | awk '{ print $3 }' ) 129 | if [[ "$UUID" != "-" ]] && [[ "$UUID" != "" ]]; then 130 | DSTS+=("$dir") 131 | DST_UUIDS+=("$UUID") 132 | fi 133 | fi 134 | done 135 | } 136 | 137 | choose_seed() { # sets SEED 138 | local SRC="$1" 139 | local SRC_BASE="$2" 140 | 141 | SEED="$SEED_NEXT" 142 | if [[ "$SEED" == "" ]]; then 143 | # try to get most recent src snapshot that exists in dst to use as a seed 144 | local RXID_CALCULATED=0 145 | declare -A PATH_RXID DATE_RXID SHOWP RXIDP DATEP 146 | local LIST="$( btrfs subvolume list -su "$SRC" )" 147 | local SEED_CANDIDATES=() 148 | for id in "${DST_UUIDS[@]}"; do 149 | # try to match by UUID 150 | local PATH_=$( awk "{ if ( \$14 == \"$id\" ) print \$16 }" <<<"$LIST" ) 151 | local DATE=$( awk "{ if ( \$14 == \"$id\" ) print \$11, \$12 }" <<<"$LIST" ) 152 | 153 | # try to match by received UUID, only if necessary 154 | if [[ "$PATH_" == "" ]]; then 155 | if [[ "$RXID_CALCULATED" == "0" ]]; then # create table during the first iteration if needed 156 | local PATHS=( $( btrfs subvolume list -u "$SRC" | awk '{ print $11 }' ) ) 157 | for p in "${PATHS[@]}"; do 158 | SHOWP="$( btrfs subvolume show "$( dirname "$SRC" )/$( basename "$p" )" 2>/dev/null )" 159 | RXIDP="$( grep 'Received UUID' <<<"$SHOWP" | awk '{ print $3 }' )" 160 | DATEP="$( grep 'Creation time' <<<"$SHOWP" | awk '{ print $3, $4 }' )" 161 | [[ "$RXIDP" == "" ]] && continue 162 | PATH_RXID["$RXIDP"]="$p" 163 | DATE_RXID["$RXIDP"]="$DATEP" 164 | done 165 | RXID_CALCULATED=1 166 | fi 167 | PATH_="${PATH_RXID["$id"]}" 168 | DATE="${DATE_RXID["$id"]}" 169 | fi 170 | 171 | if [[ "$PATH_" == "" ]] || [[ "$PATH_" == "$( basename "$SRC" )" ]]; then 172 | continue 173 | fi 174 | 175 | # if the path does not exist, it is likely relative to the root subvolume 176 | # rather than the mounted subvolume 177 | if ! test -d "$PATH_" && mountpoint -q "$SRC_BASE"; then 178 | local SRC_BASE_SUBVOL=$(findmnt -n -o OPTIONS "$SRC_BASE" | tr "," "\n" | grep "subvol=" | awk -F '=' '{ print $2 }') 179 | # drop the leading slash 180 | SRC_BASE_SUBVOL="${SRC_BASE_SUBVOL#/}" 181 | # replace the prefix in $PATH_ 182 | if [[ "$PATH_" =~ "$SRC_BASE_SUBVOL"* ]]; then 183 | PATH_="${PATH_#${SRC_BASE_SUBVOL}}" 184 | PATH_="$SRC_BASE/$PATH_" 185 | fi 186 | fi 187 | 188 | local SECS=$( date -d "$DATE" +"%s" ) 189 | SEED_CANDIDATES+=("$SECS|$PATH_") 190 | done 191 | SEED=$(IFS=$'\n' echo "${SEED_CANDIDATES[@]}" | sort -V | tail -1 | cut -f2 -d'|') 192 | fi 193 | } 194 | 195 | exists_at_dst() { 196 | local SHOW="$( btrfs subvolume show "$SRC" )" 197 | 198 | local SRC_UUID="$( grep 'UUID:' <<< "$SHOW" | head -1 | awk '{ print $2 }' )" 199 | grep -q "$SRC_UUID" <<<"${DST_UUIDS[@]}" && return 0; 200 | 201 | local SRC_RXID="$( grep 'Received UUID' <<< "$SHOW" | awk '{ print $3 }' )" 202 | grep -q "^-$" <<<"$SRC_RXID" && return 1; 203 | grep -q "$SRC_RXID" <<<"${DST_UUIDS[@]}" && return 0; 204 | 205 | return 1 206 | } 207 | 208 | ## sync incrementally 209 | sync_snapshot() { 210 | local SRC="$1" 211 | local SRC_BASE="$2" 212 | if ! test -d "$SRC" || ! test -d "$SRC_BASE"; then 213 | return 214 | fi 215 | 216 | if exists_at_dst "$SRC"; then 217 | echov "* Skip existing '$SRC'" 218 | return 0 219 | fi 220 | 221 | choose_seed "$SRC" "$SRC_BASE" # sets SEED 222 | echo "SEED=$SEED" 223 | 224 | # incremental sync argument 225 | if [[ "$SEED" != "" ]]; then 226 | if test -d "$SEED"; then 227 | # Sends the difference between the new snapshot and old snapshot to the 228 | # backup location. Using the -c flag instead of -p tells it that there 229 | # is an identical subvolume to the old snapshot at the receiving 230 | # location where it can get its data. This helps speed up the transfer. 231 | local SEED_ARG=( -c "$SEED" ) 232 | else 233 | echo "INFO: couldn't find $SEED. Non-incremental mode" 234 | fi 235 | fi 236 | 237 | # destination path where the subvolume will be sent 238 | local DST_SUBVOL="$DST/$( realpath --relative-to "$SRC_BASE" "$SRC" )" 239 | if test -d "$DST_SUBVOL"; then 240 | echo "ERROR: destination directory $DST_SUBVOL already exists, but was not detected as a Btrfs subvolume." >&2 241 | return 1 242 | fi 243 | 244 | # create the parent directory at destination 245 | mkdir -p "$(dirname "$DST_SUBVOL")" 246 | 247 | # print info 248 | echo -n "* Synchronizing '$SRC' to '$DST_SUBVOL'" 249 | if [[ "$SEED" != "" ]]; then 250 | echov -n " using seed '$SEED'" 251 | fi 252 | echo "..." 253 | 254 | # do it 255 | btrfs send -q "${SEED_ARG[@]}" "$SRC" \ 256 | | "${PV[@]}" \ 257 | | btrfs receive "$(dirname "$DST_SUBVOL")" 2>&1 \ 258 | | (grep -v -e'^At subvol ' -e'^At snapshot ' || true) \ 259 | || { 260 | btrfs subvolume delete "$DST_SUBVOL" 2>/dev/null 261 | return 1; 262 | } 263 | 264 | # update DST list 265 | DSTS+=("$DST_SUBVOL") 266 | DST_UUIDS+=("$SRC_UUID") 267 | SEED_NEXT="$SRC" 268 | } 269 | 270 | #---------------------------------------------------------------------------------------------------------- 271 | 272 | # sync all snapshots found in src 273 | echov "* Check destination" 274 | get_dst_snapshots "$DST" # sets DSTS DST_UUIDS 275 | for (( i=0; i<"${#SRCS[@]}"; i++ )); do 276 | src="${SRCS[$i]}" 277 | src_base="${SRCS_BASE[$i]}" 278 | sync_snapshot "$src" "$src_base" && RET=0 || RET=1 279 | # for i in 1 2; do 280 | # [[ "$RET" != "1" ]] && break 281 | # echo "* Retrying '$src'..." 282 | # sync_snapshot "$src" && RET=0 || RET=1 283 | # done 284 | if [[ "$RET" == "1" ]]; then 285 | echo "Abort" 286 | exit 1 287 | fi 288 | done 289 | 290 | #---------------------------------------------------------------------------------------------------------- 291 | 292 | # retention policy 293 | if [[ "$KEEP" != 0 ]] && [[ ${#DSTS[@]} -gt $KEEP ]]; then 294 | echo "* Pruning old snapshots..." 295 | for (( i=0; i < $(( ${#DSTS[@]} - KEEP )); i++ )); do 296 | PRUNE_LIST+=( "${DSTS[$i]}" ) 297 | done 298 | btrfs subvolume delete "${PRUNE_LIST[@]}" 299 | fi 300 | 301 | # delete flag 302 | if [[ "$DELETE" == 1 ]]; then 303 | for dst in "${DSTS[@]}"; do 304 | FOUND=0 305 | # for src in "${SRCS[@]}"; do 306 | for (( i=0; i<"${#SRCS[@]}"; i++ )); do 307 | src="${SRCS[$i]}" 308 | echo "checking $src" 309 | if [[ "$( basename $src )" == "$( basename $dst )" ]]; then 310 | FOUND=1 311 | break 312 | fi 313 | done 314 | if [[ "$FOUND" == 0 ]]; then 315 | DEL_LIST+=( "$dst" ) 316 | fi 317 | done 318 | if [[ "$DEL_LIST" != "" ]]; then 319 | echo "* Deleting non existent snapshots..." 320 | btrfs subvolume delete "${DEL_LIST[@]}" 321 | fi 322 | fi 323 | -------------------------------------------------------------------------------- /btrfs-sync-WIP: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errtrace 4 | 5 | version="0.0" 6 | name="btrfs-sync" 7 | 8 | SNAPPER_CONFIG=/etc/conf.d/snapper 9 | 10 | TMPDIR=$(mktemp -d) 11 | PIPE=$TMPDIR/$name.out 12 | mkfifo $PIPE 13 | systemd-cat -t "$name" < $PIPE & 14 | exec 3>$PIPE 15 | 16 | donotify=0 17 | which notify-send &> /dev/null 18 | if [[ $? -ne 0 ]]; then 19 | donotify=1 20 | fi 21 | 22 | error() { 23 | printf "==> ERROR: %s\n" "$@" 24 | notify_error 'Error' 'Check journal for more information.' 25 | } >&2 26 | 27 | die() { 28 | error "$@" 29 | exit 1 30 | } 31 | 32 | traperror() { 33 | printf "Exited due to error on line %s.\n" $1 34 | printf "exit status: %s\n" "$2" 35 | printf "command: %s\n" "$3" 36 | printf "bash line: %s\n" "$4" 37 | printf "function name: %s\n" "$5" 38 | exit 1 39 | } 40 | 41 | trapkill() { 42 | die "Exited due to user intervention." 43 | } 44 | 45 | trap 'traperror ${LINENO} $? "$BASH_COMMAND" $BASH_LINENO "${FUNCNAME[@]}"' ERR 46 | trap trapkill SIGTERM SIGINT 47 | 48 | usage() { 49 | cat < snapper configuration to backup 55 | -d, --description snapper description 56 | -h, --help print this message 57 | -n, --noconfirm do not ask for confirmation 58 | -q, --quiet do not send notifications; instead print them. 59 | -s, --subvolid subvolume id of the mounted BTRFS subvolume to back up to 60 | -u, --UUID UUID of the mounted BTRFS subvolume to back up to 61 | 62 | See 'man snap-sync' for more details. 63 | EOF 64 | } 65 | 66 | while [[ $# -gt 0 ]]; do 67 | key="$1" 68 | case $key in 69 | -d|--description) 70 | description="$2" 71 | shift 2 72 | ;; 73 | -c|--config) 74 | selected_configs="$2" 75 | shift 2 76 | ;; 77 | -u|--UUID) 78 | uuid_cmdline="$2" 79 | shift 2 80 | ;; 81 | -s|--subvolid) 82 | subvolid_cmdline="$2" 83 | shift 2 84 | ;; 85 | -n|--noconfirm) 86 | noconfirm="yes" 87 | shift 88 | ;; 89 | -h|--help) 90 | usage 91 | exit 1 92 | ;; 93 | -q|--quiet) 94 | donotify=1 95 | shift 96 | ;; 97 | *) 98 | die "Unknown option: '$key'. Run '$name -h' for valid options." 99 | ;; 100 | esac 101 | done 102 | 103 | notify() { 104 | for u in $(users | tr ' ' '\n' | sort -u); do 105 | sudo -u $u DISPLAY=:0 \ 106 | DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$(sudo -u $u id -u)/bus \ 107 | notify-send -a $name "$1" "$2" --icon="dialog-$3" 108 | done 109 | } 110 | 111 | notify_info() { 112 | if [[ $donotify -eq 0 ]]; then 113 | notify "$1" "$2" "information" 114 | else 115 | printf "$1: $2\n" 116 | fi 117 | } 118 | 119 | notify_error() { 120 | if [[ $donotify -eq 0 ]]; then 121 | notify "$1" "$2" "error" 122 | else 123 | printf "$1: $2\n" 124 | fi 125 | } 126 | 127 | [[ $EUID -ne 0 ]] && die "Script must be run as root. See '$name -h' for a description of options" 128 | ! [[ -f $SNAPPER_CONFIG ]] && die "$SNAPPER_CONFIG does not exist." 129 | 130 | description=${description:-"latest incremental backup"} 131 | uuid_cmdline=${uuid_cmdline:-"none"} 132 | subvolid_cmdline=${subvolid_cmdline:-"5"} 133 | noconfirm=${noconfirm:-"no"} 134 | 135 | if [[ "$uuid_cmdline" != "none" ]]; then 136 | notify_info "Backup started" "Starting backups to $uuid_cmdline subvolid=$subvolid_cmdline..." 137 | else 138 | notify_info "Backup started" "Starting backups. Use command line menu to select disk." 139 | fi 140 | 141 | if [[ "$(findmnt -n -v --target / -o FSTYPE)" == "btrfs" ]]; then 142 | EXCLUDE_UUID=$(findmnt -n -v -t btrfs --target / -o UUID) 143 | TARGETS=$(findmnt -n -v -t btrfs -o UUID,TARGET --list | grep -v $EXCLUDE_UUID | awk '{print $2}') 144 | UUIDS=$(findmnt -n -v -t btrfs -o UUID,TARGET --list | grep -v $EXCLUDE_UUID | awk '{print $1}') 145 | else 146 | TARGETS=$(findmnt -n -v -t btrfs -o TARGET --list) 147 | UUIDS=$(findmnt -n -v -t btrfs -o UUID --list) 148 | fi 149 | 150 | declare -a TARGETS_ARRAY 151 | declare -a UUIDS_ARRAY 152 | declare -a SUBVOLIDS_ARRAY 153 | 154 | i=0 155 | for x in $TARGETS; do 156 | SUBVOLIDS_ARRAY[$i]=$(btrfs subvolume show $x | awk '/Subvolume ID:/ { print $3 }') 157 | TARGETS_ARRAY[$i]=$x 158 | i=$((i+1)) 159 | done 160 | 161 | i=0 162 | disk=-1 163 | disk_count=0 164 | for x in $UUIDS; do 165 | UUIDS_ARRAY[$i]=$x 166 | if [[ "$x" == "$uuid_cmdline" && ${SUBVOLIDS_ARRAY[$((i))]} == "$subvolid_cmdline" ]]; then 167 | disk=$i 168 | disk_count=$(($disk_count+1)) 169 | fi 170 | i=$((i+1)) 171 | done 172 | 173 | if [[ "${#UUIDS_ARRAY[$@]}" -eq 0 ]]; then 174 | die "No external btrfs subvolumes found to backup to. Run '$name -h' for more options." 175 | fi 176 | 177 | if [[ "$disk_count" > 1 ]]; then 178 | printf "Multiple mount points were found with UUID %s and subvolid %s.\n" "$uuid_cmdline" "$subvolid_cmdline" 179 | disk="-1" 180 | fi 181 | 182 | if [[ "$disk" == -1 ]]; then 183 | if [[ "$disk_count" == 0 && "$uuid_cmdline" != "none" ]]; then 184 | error "A device with UUID $uuid_cmdline and subvolid $subvolid_cmdline was not found to be mounted, or it is not a BTRFS device." 185 | fi 186 | printf "Select a mounted BTRFS device on your local machine to backup to.\nFor more options, exit and run '$name -h'.\n" 187 | while [[ $disk -lt 0 || $disk -gt $i ]]; do 188 | for x in "${!TARGETS_ARRAY[@]}"; do 189 | printf "%4s) %s (uuid=%s, subvolid=%s)\n" "$((x+1))" "${TARGETS_ARRAY[$x]}" "${UUIDS_ARRAY[$x]}" "${SUBVOLIDS_ARRAY[$x]}" 190 | done 191 | printf "%4s) Exit\n" "0" 192 | read -e -r -p "Enter a number: " disk 193 | if ! [[ $disk == ?(-)+([0-9]) ]] || [[ $disk -lt 0 || $disk -gt $i ]]; then 194 | printf "\nNo disk selected. Select a disk to continue.\n" 195 | disk=-1 196 | fi 197 | done 198 | if [[ $disk == 0 ]]; then 199 | exit 0 200 | fi 201 | disk=$(($disk-1)) 202 | fi 203 | 204 | selected_subvolid="${SUBVOLIDS_ARRAY[$((disk))]}" 205 | selected_uuid="${UUIDS_ARRAY[$((disk))]}" 206 | selected_mnt="${TARGETS_ARRAY[$((disk))]}" 207 | printf "\nYou selected the disk with uuid=%s, subvolid=%s.\n" "$selected_uuid" "$selected_subvolid" | tee $PIPE 208 | printf "The disk is mounted at '%s'.\n" "$selected_mnt" | tee $PIPE 209 | 210 | source $SNAPPER_CONFIG 211 | 212 | if [[ -z $selected_configs ]]; then 213 | printf "\nInteractively cycling through all snapper configurations...\n" 214 | fi 215 | selected_configs=${selected_configs:-$SNAPPER_CONFIGS} 216 | 217 | declare -a BACKUPDIRS_ARRAY 218 | declare -a MYBACKUPDIR_ARRAY 219 | declare -a OLD_NUM_ARRAY 220 | declare -a OLD_SNAP_ARRAY 221 | declare -a NEW_NUM_ARRAY 222 | declare -a NEW_SNAP_ARRAY 223 | declare -a NEW_INFO_ARRAY 224 | declare -a BACKUPLOC_ARRAY 225 | declare -a CONT_BACKUP_ARRAY 226 | 227 | # Initial configuration of where backup directories are 228 | i=0 229 | for x in $selected_configs; do 230 | 231 | if [[ "$(snapper -c $x list -t single | awk '/'"subvolid=$selected_subvolid, uuid=$selected_uuid"'/ {cnt++} END {print cnt}')" -gt 1 ]]; then 232 | error "More than one snapper entry found with UUID $selected_uuid subvolid $selected_subvolid for configuration $x. Skipping configuration $x." 233 | continue 234 | fi 235 | 236 | if [[ "$(snapper -c $x list -t single | awk '/'$name' backup in progress/ {cnt++} END {print cnt}')" -gt 0 ]]; then 237 | printf "\nNOTE: Previous failed %s backup snapshots found for '%s'.\n" "$name" "$x" | tee $PIPE 238 | if [[ $noconfirm == "yes" ]]; then 239 | printf "'noconfirm' option passed. Failed backups will not be deleted.\n" | tee $PIPE 240 | else 241 | read -e -r -p "Delete failed backup snapshot(s)? (These local snapshots from failed backups are not used.) [y/N]? " delete_failed 242 | while [[ -n "$delete_failed" && "$delete_failed" != [Yy]"es" && 243 | "$delete_failed" != [Yy] && "$delete_failed" != [Nn]"o" && 244 | "$delete_failed" != [Nn] ]]; do 245 | read -e -r -p "Delete failed backup snapshot(s)? (These local snapshots from failed backups are not used.) [y/N] " delete_failed 246 | if [[ -n "$delete_failed" && "$delete_failed" != [Yy]"es" && 247 | "$delete_failed" != [Yy] && "$delete_failed" != [Nn]"o" && 248 | "$delete_failed" != [Nn] ]]; then 249 | printf "Select 'y' or 'N'.\n" 250 | fi 251 | done 252 | if [[ "$delete_failed" == [Yy]"es" || "$delete_failed" == [Yy] ]]; then 253 | snapper -c $x delete $(snapper -c $x list | awk '/'$name' backup in progress/ {print $1}') 254 | fi 255 | fi 256 | fi 257 | 258 | SNAP_SYNC_EXCLUDE=no 259 | 260 | if [[ -f "/etc/snapper/configs/$x" ]]; then 261 | source /etc/snapper/configs/$x 262 | # TODO: snapper -c "$x" --jsonout get-config 263 | else 264 | die "Selected snapper configuration $x does not exist." 265 | fi 266 | 267 | if [[ $SNAP_SYNC_EXCLUDE == "yes" ]]; then 268 | continue 269 | fi 270 | 271 | printf "\n" 272 | 273 | old_num=$(snapper -c "$x" list -t single | awk '/'"subvolid=$selected_subvolid, uuid=$selected_uuid"'/ {print $1}') 274 | old_snap=$SUBVOLUME/.snapshots/$old_num/snapshot 275 | 276 | OLD_NUM_ARRAY[$i]=$old_num 277 | OLD_SNAP_ARRAY[$i]=$old_snap 278 | 279 | if [[ -z "$old_num" ]]; then 280 | printf "No backups have been performed for '%s' on this disk.\n" "$x" 281 | read -e -r -p "Enter name of subvolume to store backups, relative to $selected_mnt (to be created if not existing): " mybackupdir 282 | printf "This will be the initial backup for snapper configuration '%s' to this disk. This could take awhile.\n" "$x" 283 | BACKUPDIR="$selected_mnt/$mybackupdir" 284 | test -d "$BACKUPDIR" || btrfs subvolume create "$BACKUPDIR" 285 | else 286 | mybackupdir=$(snapper -c "$x" list -t single | awk -F"|" '/'"subvolid=$selected_subvolid, uuid=$selected_uuid"'/ {print $5}' | awk -F "," '/backupdir/ {print $1}' | awk -F"=" '{print $2}') 287 | BACKUPDIR="$selected_mnt/$mybackupdir" 288 | test -d $BACKUPDIR || die "%s is not a directory on %s.\n" "$BACKUPDIR" "$selected_uuid" 289 | fi 290 | BACKUPDIRS_ARRAY[$i]="$BACKUPDIR" 291 | MYBACKUPDIR_ARRAY[$i]="$mybackupdir" 292 | 293 | printf "Creating new local snapshot for '%s' configuration...\n" "$x" | tee $PIPE 294 | # new_num=$(snapper -c "$x" create --print-number -d "$name backup in progress") 295 | new_num=TODO 296 | new_snap=$SUBVOLUME/.snapshots/$new_num/snapshot 297 | new_info=$SUBVOLUME/.snapshots/$new_num/info.xml 298 | sync 299 | backup_location=$BACKUPDIR/$x/$new_num/ 300 | printf "Will backup %s to %s\n" "$new_snap" "$backup_location/snapshot" | tee $PIPE 301 | 302 | if (test -d "$backup_location/snapshot") ; then 303 | printf "WARNING: Backup directory '%s' already exists. This configuration will be skipped!\n" "$backup_location/snapshot" | tee $PIPE 304 | printf "Move or delete destination directory and try backup again.\n" | tee $PIPE 305 | fi 306 | 307 | NEW_NUM_ARRAY[$i]="$new_num" 308 | NEW_SNAP_ARRAY[$i]="$new_snap" 309 | NEW_INFO_ARRAY[$i]="$new_info" 310 | BACKUPLOC_ARRAY[$i]="$backup_location" 311 | 312 | cont_backup="K" 313 | CONT_BACKUP_ARRAY[$i]="yes" 314 | if [[ $noconfirm == "yes" ]]; then 315 | cont_backup="yes" 316 | else 317 | while [[ -n "$cont_backup" && "$cont_backup" != [Yy]"es" && 318 | "$cont_backup" != [Yy] && "$cont_backup" != [Nn]"o" && 319 | "$cont_backup" != [Nn] ]]; do 320 | read -e -r -p "Proceed with backup of '$x' configuration [Y/n]? " cont_backup 321 | if [[ -n "$cont_backup" && "$cont_backup" != [Yy]"es" && 322 | "$cont_backup" != [Yy] && "$cont_backup" != [Nn]"o" && 323 | "$cont_backup" != [Nn] ]]; then 324 | printf "Select 'Y' or 'n'.\n" 325 | fi 326 | done 327 | fi 328 | 329 | if [[ "$cont_backup" != [Yy]"es" && "$cont_backup" != [Yy] && -n "$cont_backup" ]]; then 330 | CONT_BACKUP_ARRAY[$i]="no" 331 | printf "Not backing up '%s' configuration.\n" $x 332 | # snapper -c $x delete $new_num 333 | fi 334 | 335 | i=$(($i+1)) 336 | 337 | done 338 | 339 | # Actual backing up 340 | printf "\nPerforming backups...\n" | tee $PIPE 341 | i=-1 342 | for x in $selected_configs; do 343 | 344 | i=$(($i+1)) 345 | 346 | SNAP_SYNC_EXCLUDE=no 347 | 348 | if [[ -f "/etc/snapper/configs/$x" ]]; then 349 | source /etc/snapper/configs/$x 350 | else 351 | die "Selected snapper configuration $x does not exist." 352 | fi 353 | 354 | cont_backup=${CONT_BACKUP_ARRAY[$i]} 355 | if [[ $cont_backup == "no" || $SNAP_SYNC_EXCLUDE == "yes" ]]; then 356 | notify_info "Backup in progress" "NOTE: Skipping $x configuration." 357 | continue 358 | fi 359 | 360 | notify_info "Backup in progress" "Backing up $x configuration." 361 | 362 | printf "\n" 363 | 364 | old_num="${OLD_NUM_ARRAY[$i]}" 365 | old_snap="${OLD_SNAP_ARRAY[$i]}" 366 | BACKUPDIR="${BACKUPDIRS_ARRAY[$i]}" 367 | mybackupdir="${MYBACKUPDIR_ARRAY[$i]}" 368 | new_num="${NEW_NUM_ARRAY[$i]}" 369 | new_snap="${NEW_SNAP_ARRAY[$i]}" 370 | new_info="${NEW_INFO_ARRAY[$i]}" 371 | backup_location="${BACKUPLOC_ARRAY[$i]}" 372 | 373 | if (test -d "$backup_location/snapshot") ; then 374 | printf "ERROR: Backup directory '%s' already exists. Skipping backup of this configuration!\n" "$backup_location/snapshot" | tee $PIPE 375 | continue 376 | fi 377 | 378 | mkdir -p $backup_location 379 | 380 | if [[ -z "$old_num" ]]; then 381 | printf "Sending first snapshot for '%s' configuration...\n" "$x" | tee $PIPE 382 | # btrfs send "$new_snap" | btrfs receive "$backup_location" &>/dev/null 383 | else 384 | 385 | printf "Sending incremental snapshot for '%s' configuration...\n" "$x" | tee $PIPE 386 | # Sends the difference between the new snapshot and old snapshot to the 387 | # backup location. Using the -c flag instead of -p tells it that there 388 | # is an identical subvolume to the old snapshot at the receiving 389 | # location where it can get its data. This helps speed up the transfer. 390 | 391 | # btrfs send -c "$old_snap" "$new_snap" | btrfs receive "$backup_location" 392 | 393 | # printf "Modifying data for old local snapshot for '%s' configuration...\n" "$x" | tee $PIPE 394 | # snapper -v -c "$x" modify -d "old snap-sync snapshot (you may remove)" -u "backupdir=,subvolid=,uuid=" -c "number" "$old_num" 395 | 396 | fi 397 | 398 | cp "$new_info" "$backup_location" 399 | 400 | # It's important not to change this userdata in the snapshots, since that's how 401 | # we find the previous one. 402 | 403 | # userdata="backupdir=$mybackupdir, subvolid=$selected_subvolid, uuid=$selected_uuid" 404 | 405 | # Tag new snapshot as the latest 406 | # printf "Tagging local snapshot as latest backup for '%s' configuration...\n" "$x" | tee $PIPE 407 | # snapper -v -c "$x" modify -d "$description" -u "$userdata" "$new_num" 408 | 409 | printf "Backup complete for '%s' configuration.\n" "$x" > $PIPE 410 | 411 | done 412 | 413 | printf "\nDone!\n" | tee $PIPE 414 | exec 3>&- 415 | 416 | if [[ "$uuid_cmdline" != "none" ]]; then 417 | notify_info "Finished" "Backups to $uuid_cmdline complete!" 418 | else 419 | notify_info "Finished" "Backups complete!" 420 | fi 421 | -------------------------------------------------------------------------------- /clean-aur-dir.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import sys 5 | import re 6 | import subprocess 7 | 8 | pkgname_regex = re.compile("^(?P[a-z0-9@._+-]+)-(?P[a-z0-9._:-]+)-(?Pany|x86_64|i686)\.pkg\.tar(\.xz)?(\.sig)?$", re.IGNORECASE) 9 | 10 | def usage(): 11 | print("Simple utility to clean directories from old Arch's package files, keeping only those currently installed") 12 | print("usage: %s PATH" % sys.argv[0]) 13 | sys.exit(1) 14 | 15 | if __name__ == "__main__": 16 | if len(sys.argv) != 2: 17 | usage() 18 | 19 | path = sys.argv[1] 20 | if not os.path.isdir(path): 21 | usage() 22 | os.chdir(path) 23 | 24 | files = {} 25 | 26 | # remove files that don't match pkgname_reges from further processing!! 27 | for f in os.listdir(): 28 | if not os.path.isfile(f): 29 | continue 30 | match = re.match(pkgname_regex, f) 31 | if match: 32 | # strip extension for future comparison with expac's output 33 | files[f] = "{pkgname}-{pkgver}-{arch}".format(**match.groupdict()) 34 | 35 | # get list of installed packages 36 | installed = subprocess.check_output("expac -Qs '%n-%v-%a'", shell=True, universal_newlines=True).splitlines() 37 | 38 | for f in sorted(files): 39 | # compare with the key instead of the whole filename 40 | # (drops file extensions like .pkg.tar.{xz,gz}{,.sig} ) 41 | ff = files[f] 42 | 43 | if ff in installed: 44 | print("Kept: %s" % f) 45 | else: 46 | print("Deleted: %s" % f) 47 | os.remove(f) 48 | -------------------------------------------------------------------------------- /convertToUtf8.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import traceback 6 | 7 | CHARSETS = ("ascii", "cp1250", "cp1252", "iso-8859-9", "iso-8859-15") 8 | 9 | def is_utf8(filepath): 10 | try: 11 | file = open(filepath, "rb") 12 | file.read().decode('utf-8') 13 | file.close() 14 | return True 15 | except: 16 | return False 17 | 18 | def to_utf8(path): 19 | for charset in CHARSETS: 20 | try: 21 | f = open(path, 'rb') 22 | content = f.read().decode(charset) 23 | f.close() 24 | f = open(path, 'wb') 25 | f.write(content.encode('utf-8')) 26 | f.close() 27 | return "Converting to utf-8: " + os.path.split(path)[1] 28 | except: 29 | pass 30 | return "Unable to open " + os.path.split(path)[1] + " - unknown charset or binary file." 31 | 32 | def run(): 33 | message = "" 34 | for filename in sys.argv[1:]: 35 | if os.path.isfile(filename): 36 | if is_utf8(filename): 37 | message += os.path.split(filename)[1] + " is already in utf-8.\n" 38 | else: 39 | message += to_utf8(filename) + "\n" 40 | return message.strip() 41 | 42 | if __name__ == "__main__": 43 | if len(sys.argv) < 2: 44 | print("Usage: " + sys.argv[0] + " file1 [file2 ...]") 45 | sys.exit(1) 46 | 47 | try: 48 | message = run() 49 | except: 50 | message = traceback.format_exc() 51 | if message != "": 52 | print(message) 53 | -------------------------------------------------------------------------------- /cp-p: -------------------------------------------------------------------------------- 1 | submodules/cp-p/cp-p -------------------------------------------------------------------------------- /fatcp: -------------------------------------------------------------------------------- 1 | #! /usr/bin/bash 2 | 3 | # Script for safe copying to FAT32 filesystems. 4 | # All bad characters are replaced by '_' (underscore) when copying. 5 | # File conflicts (e.g. 'foo?' and 'foo:' are both mapped to 'foo_') are not checked - using 'cp -i' is recommended. 6 | 7 | # Some resources: 8 | # http://askubuntu.com/questions/11634/how-can-i-substitute-colons-when-i-rsync-on-a-usb-key 9 | # 10 | # Simple (stupid) alternative: 11 | # find -type f -name '*.pat' -print0 | tar -c -f - --null --files-from - | tar -C /path/to/dst -v -x -f - --show-transformed --transform 's/?/_/g' 12 | # 13 | 14 | # two arguments are accepted 15 | if [[ $# -ne 2 ]]; then 16 | echo "Usage: $0 " 17 | exit 1 18 | fi 19 | 20 | base=$(realpath "$1") 21 | basedir=$(dirname "$base") 22 | dst=$(realpath "$2") 23 | 24 | # $dst must be existing dir 25 | if [[ ! -d "$dst" ]]; then 26 | echo "Target directory '$dst' does not exist." 27 | exit 1 28 | fi 29 | 30 | # 'cp' alias 31 | CP="cp -i --preserve=all" 32 | # characters that will be replaced with '_' 33 | BADCHARS='<>|;:!?"*\+' 34 | 35 | # enhance globbing 36 | shopt -s dotglob globstar 37 | 38 | # function creating target file/dir name 39 | mk_target() { 40 | local target=${1#"$basedir"} 41 | echo "$dst/${target//[$BADCHARS]/_}" 42 | } 43 | 44 | # dirs and files are handled differently 45 | if [[ -d "$base" ]]; then 46 | target=$(mk_target "$base") 47 | mkdir "$target" 48 | for src in "$base"/**/*; do 49 | target=$(mk_target "$src") 50 | if [[ -d "$src" ]]; then 51 | mkdir -p -- "$target" 52 | elif [[ "$src" != "$target" ]]; then 53 | $CP -- "$src" "$target" 54 | fi 55 | done 56 | elif [[ -f "$base" ]]; then 57 | target=$(mk_target "$base") 58 | if [[ "$src" != "$target" ]]; then 59 | $CP -- "$base" "$target" 60 | fi 61 | fi 62 | -------------------------------------------------------------------------------- /fetchmail: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MBOX_TMPFILE="$HOME/Maildir/.geraldine_mbox.tmp" 4 | MAILDIR="$HOME/Maildir" 5 | 6 | set -e 7 | 8 | function fetchmail() 9 | { 10 | if [[ -s "$MBOX_TMPFILE" ]]; then 11 | echo "Running fdm early to process leftover files in $MBOX_TMPFILE" 12 | fdm -f "$MAILDIR/fdm.conf" -a "geraldine" fetch 13 | fi 14 | if [[ -s "$MBOX_TMPFILE" ]]; then 15 | echo "ERROR: fdm did not process all messages in $MBOX_TMPFILE" >&2 16 | exit 1 17 | fi 18 | 19 | echo "Transferring mbox from geraldine.fjfi.cvut.cz" 20 | messages=$(ssh geraldine '$HOME/.local/bin/messages -q') 21 | if [[ "$messages" != "0" ]]; then 22 | # NOTE: movemail supports locking 23 | ssh geraldine '$HOME/.local/bin/movemail /var/mail/klinkovsky $HOME/mbox' 24 | scp -Cq geraldine:mbox "$MBOX_TMPFILE" 25 | ssh geraldine 'rm $HOME/mbox' 26 | fi 27 | 28 | fdm -f "$MAILDIR/fdm.conf" fetch 29 | 30 | # synchronize with jlk.fjfi.cvut.cz 31 | if [[ $(uname -n) != *".fjfi.cvut.cz" ]]; then 32 | echo "Synchronizing maildir with jlk.fjfi.cvut.cz" 33 | unison maildir -batch -silent -log=false 34 | fi 35 | 36 | # synchronize calendars, tasks, contacts 37 | vdirsyncer sync 38 | } 39 | 40 | # check online 41 | check_domain=ping.archlinux.org 42 | if ! ping -c 1 -w 1 "$check_domain" > /dev/null; then 43 | echo "$check_domain is not reachable, check your network status." 44 | exit 1 45 | fi 46 | check_host=10.13.0.1 47 | if ! ping -c 1 -w 1 "$check_host" > /dev/null; then 48 | echo "$check_host is not reachable, check the host status or WireGuard network." 49 | exit 1 50 | fi 51 | 52 | # synchronize metadata (displayname, color) 53 | vdirsyncer metasync 54 | 55 | if [[ "$1" == "loop" ]]; then 56 | # ring a bell when the loop breaks 57 | trap 'echo $(tput bel)' EXIT 58 | 59 | while true; do 60 | fetchmail 61 | sleep 2m 62 | done 63 | else 64 | fetchmail 65 | fi 66 | -------------------------------------------------------------------------------- /ffparser.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import argparse 4 | 5 | from pythonscripts.ffparser import FFprobeParser 6 | 7 | 8 | if __name__ == "__main__": 9 | parser = argparse.ArgumentParser(description="parse ffprobe's json output") 10 | 11 | option = parser.add_mutually_exclusive_group(required=True) 12 | option.add_argument("-a", "--audio", action="store_const", const="audio", dest="option", help="get audio attribute") 13 | option.add_argument("-v", "--video", action="store_const", const="video", dest="option", help="get video attribute") 14 | option.add_argument("-f", "--format", action="store_const", const="format", dest="option", help="get format attribute") 15 | 16 | action = parser.add_mutually_exclusive_group(required=True) 17 | action.add_argument("-g", "--get", action="store", nargs=1, dest="attribute", help="attribute name to get") 18 | action.add_argument("-p", "--print", action="store_true", dest="pprint", help="print all attributes and exit") 19 | 20 | parser.add_argument("path", action="store", nargs=1, help="path to file to parse") 21 | 22 | args = parser.parse_args() 23 | ffparser = FFprobeParser(args.path[0]) 24 | if args.pprint: 25 | ffparser.pprint(args.option) 26 | else: 27 | print(ffparser.get(args.option, args.attribute[0])) 28 | -------------------------------------------------------------------------------- /fmount.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import argparse 4 | import configparser 5 | import os 6 | import subprocess 7 | import sys 8 | from pathlib import Path 9 | 10 | CONFIG = Path.home() / ".config" / "fmount.conf" 11 | DEFAULT_MOUNTPATH = Path.home() / "mnt" 12 | 13 | 14 | # we just strip spaces in the mntopts string 15 | def reformat_mntopts(mntopts): 16 | mntopts = mntopts.split(",") 17 | options = [] 18 | for opt in mntopts: 19 | options.append("=".join(tk.strip() for tk in opt.split("="))) 20 | return ",".join(set(options)) 21 | 22 | 23 | def mount_gio(*, scheme: str, host: str, path: str, user: str, port: str, mountpoint: Path): 24 | if mountpoint.exists() and not mountpoint.is_symlink(): 25 | print(f"Error: path {mountpoint} exists but is not a symlink", file=sys.stderr) 26 | return 27 | 28 | location = f"{scheme}://" 29 | if user: 30 | location += user + "@" 31 | location += host 32 | if port: 33 | location += ":" + port 34 | location += "/" + path 35 | 36 | # get path to thet gvfs directory 37 | XDG_RUNTIME_DIR = os.environ.get("XDG_RUNTIME_DIR") 38 | if XDG_RUNTIME_DIR is None: 39 | XDG_RUNTIME_DIR = f"/run/user/{os.getuid()}" 40 | gvfs = Path(XDG_RUNTIME_DIR) / "gvfs" 41 | 42 | # save current gvfs mounts 43 | if gvfs.is_dir(): 44 | mounts_before = set(gvfs.glob(f"{scheme}-share:*")) 45 | else: 46 | mounts_before = set() 47 | 48 | print(f"Mounting {location}") 49 | cmd = ["gio", "mount", location] 50 | subprocess.run(cmd, check=True) 51 | 52 | if not gvfs.is_dir(): 53 | print(f"Error: gvfs directory {gvfs} does not exist", file=sys.stderr) 54 | return 55 | 56 | # detect the new gvfs mount symlink it to mountpoint 57 | mounts_after = set(gvfs.glob(f"{scheme}-share:*")) 58 | target = list(mounts_after - mounts_before)[0] 59 | 60 | # hack for inaccessible parents of the path on smb servers 61 | if scheme == "smb": 62 | _path = Path(path.lstrip("/")) 63 | # the first part is the remote share, the rest is the location we want 64 | target /= _path.relative_to(_path.parts[0]) 65 | 66 | # create a symlink from mountpoint to gvfs target 67 | mountpoint.symlink_to(target) 68 | 69 | 70 | def mount_sshfs(*, host: str, path: str, user: str, port: str, mountpoint: Path, mntopts: str): 71 | uhd = host + ":" + path 72 | if user: 73 | uhd = user + "@" + uhd 74 | 75 | cmd = ["sshfs", uhd, str(mountpoint)] 76 | if mntopts: 77 | cmd += ["-o", mntopts] 78 | if port: 79 | cmd += ["-p", port] 80 | 81 | print(f"Mounting at '{mountpoint}'...") 82 | # the mountpoint might exist after an error or automatic unmount 83 | mountpoint.mkdir(parents=True, exist_ok=True) 84 | subprocess.run(cmd, check=True) 85 | 86 | 87 | def mount(name, mountpath: Path, config): 88 | mountpoint = mountpath / name 89 | scheme = config.get(name, "scheme", fallback="sshfs") 90 | host = config.get(name, "host", fallback=name) 91 | path = config.get(name, "path", fallback="") 92 | user = config.get(name, "user", fallback=None) 93 | port = config.get(name, "port", fallback=None) 94 | mntopts = config.get(name, "mntopts", fallback="") 95 | mntopts = reformat_mntopts(mntopts) 96 | 97 | if scheme == "sshfs": 98 | # sshfs is *much* faster than gvfs 99 | return mount_sshfs( 100 | host=host, 101 | path=path, 102 | user=user, 103 | port=port, 104 | mountpoint=mountpoint, 105 | mntopts=mntopts, 106 | ) 107 | else: 108 | return mount_gio( 109 | scheme=scheme, 110 | host=host, 111 | path=path, 112 | user=user, 113 | port=port, 114 | mountpoint=mountpoint, 115 | ) 116 | 117 | 118 | def umount(mntpoint: Path): 119 | if path.is_mount(): 120 | cmd = ["fusermount3", "-u", str(mntpoint)] 121 | subprocess.run(cmd, check=True) 122 | clean(mntpoint) 123 | elif path.is_symlink(): 124 | if path.readlink().exists(): 125 | cmd = ["gio", "mount", "--unmount", str(mntpoint.resolve())] 126 | subprocess.run(cmd, check=True) 127 | # do not call clean(path), gio takes a while to remove the target 128 | path.unlink() 129 | elif path.is_dir(): 130 | print(f"Note: directory '{path}' is not a mount point.", file=sys.stderr) 131 | return 132 | 133 | 134 | def clean(path: Path): 135 | if path.is_symlink() and not path.readlink().exists(): 136 | print(f"Removing broken symlink '{path}'...") 137 | path.unlink() 138 | else: 139 | if not path.is_mount() and not any(path.iterdir()): 140 | print(f"Removing empty mountpoint '{path}'...") 141 | path.rmdir() 142 | 143 | 144 | def cleanAll(mountpath): 145 | for file in mountpath.iterdir(): 146 | path = mountpath / file 147 | if path.is_dir(): 148 | clean(path) 149 | 150 | 151 | def writeDefaultConfig(): 152 | with open(CONFIG, mode="w", encoding="utf-8") as cfile: 153 | print( 154 | f"""\ 155 | # globals live in the DEFAULT section 156 | [DEFAULT] 157 | mountpath = {DEFAULT_MOUNTPATH} 158 | #mntopts = opt1=val1, opt2=val2, ... # optional 159 | 160 | #[remote_name] 161 | #scheme = ... # optional, either sshfs (default) or anything else supported by gvfs 162 | #host = ... # optional, equal to remote_name by default 163 | #path = ... # optional, sshfs defaults to remote $HOME 164 | #user = ... # optional, .ssh/config is honoured 165 | #port = ... # optional, .ssh/config is honoured 166 | #mntopts = opt1=val1, opt2=val2, ... # optional 167 | """, 168 | file=cfile, 169 | ) 170 | 171 | 172 | if __name__ == "__main__": 173 | config = configparser.ConfigParser() 174 | if not CONFIG.exists(): 175 | writeDefaultConfig() 176 | config.read(CONFIG) 177 | 178 | parser = argparse.ArgumentParser( 179 | description="wrapper for sshfs and gio with a config file" 180 | ) 181 | parser.add_argument( 182 | "--list-available", 183 | action="store_true", 184 | help="list the hosts defined in the configuration file and exit", 185 | ) 186 | parser.add_argument( 187 | "--list-mounted", 188 | action="store_true", 189 | help="list the currently mounted hosts and exit", 190 | ) 191 | parser.add_argument( 192 | "-u", "--unmount", action="store_true", help="unmount given host or path" 193 | ) 194 | parser.add_argument( 195 | "host", nargs="*", help="remote name(s) specified in the config file" 196 | ) 197 | args = parser.parse_args() 198 | 199 | mountpath = Path( 200 | os.path.expanduser( 201 | config.get("DEFAULT", "mountpath", fallback=DEFAULT_MOUNTPATH) 202 | ) 203 | ) 204 | 205 | if args.list_available: 206 | hosts = set(key for key in config.keys() if key != "DEFAULT") 207 | for host in sorted(hosts): 208 | print(host) 209 | 210 | elif args.list_mounted: 211 | for file in sorted(mountpath.iterdir()): 212 | print(file.name) 213 | 214 | else: 215 | if args.host: 216 | for host in args.host: 217 | if args.unmount: 218 | if Path(host).is_dir(): 219 | # not a host, but a path 220 | path = Path(host) 221 | else: 222 | path = mountpath / host 223 | if not path.exists(): 224 | print( 225 | f"Note: path '{path}' does not exist.", file=sys.stderr 226 | ) 227 | umount(path) 228 | else: 229 | if config.has_section(host): 230 | if (mountpath / host).is_mount(): 231 | parser.error(f"Host '{host}' is already mounted.") 232 | mount(host, mountpath, config) 233 | else: 234 | parser.error( 235 | f"Section '{host}' does not exist in the config file." 236 | ) 237 | else: 238 | parser.error("No hosts were given.") 239 | cleanAll(mountpath) 240 | -------------------------------------------------------------------------------- /forcemp3convert.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # forcefully convert any file to mp3 (with fixed bitrate), preserving metadata (if possible) 4 | 5 | set -e 6 | 7 | for file in "$@"; do 8 | tmpfile="$(mktemp -u)-forcemp3convert.mp3" 9 | ffmpeg -i "$file" -acodec libmp3lame -ar 44100 -ab 128k -ac 2 -f mp3 -map_metadata 0 -y "$tmpfile" 10 | mv "$tmpfile" "${file%\.*}.mp3" 11 | done 12 | -------------------------------------------------------------------------------- /hddtemp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | devices="$@" 4 | devices=${devices:-/dev/sda} 5 | 6 | for device in $devices; do 7 | cmd="smartctl -d ata -a $device | grep \"Temperature_Celsius\" | awk '{print \$10}'" 8 | 9 | if [[ $UID != 0 ]]; then 10 | echo "Running \`sudo $cmd\`" 11 | temp=$(eval "sudo $cmd") 12 | else 13 | echo "Running \`$cmd\`" 14 | temp=$(eval "$cmd") 15 | fi 16 | 17 | echo "Temperature of $device: $temp°C" 18 | done 19 | -------------------------------------------------------------------------------- /img2pdf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | outfile=$1 6 | ext=tif 7 | 8 | echo "Converting images to pdf..." 9 | declare -a pages 10 | # take input pattern "anything_number.ext", sort numerically by "number" 11 | for file in $(ls ./*.$ext | sort -t_ -k2,2n); do 12 | echo " $file" 13 | pdf=$(basename "$file" .$ext).pdf 14 | # convert "$file" "$pdf" 15 | tiff2pdf -z -F -x 300 -y 300 -o "$pdf" "$file" 16 | pages+=("$pdf") 17 | done 18 | echo "Merging into one pdf..." 19 | stapler sel "${pages[@]}" "$outfile" 20 | -------------------------------------------------------------------------------- /initscreen.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # exit on error 4 | set -e 5 | 6 | #hdmi=$(cat /sys/class/drm/card0-HDMI-A-1/status) 7 | #vga=$(cat /sys/class/drm/card0-VGA-1/status) 8 | if [[ "$WAYLAND_DISPLAY" == "" ]]; then 9 | connected=$(xrandr | grep " connected" | sed -e "s/\([A-Z0-9]\+\) connected.*/\1/") 10 | else 11 | connected=$(swaymsg -pt get_outputs | grep -E "^Output" | awk '{print $2}') 12 | fi 13 | 14 | #echo "initscreen.sh: hdmi $hdmi; vga $vga" 15 | 16 | if [[ $connected =~ "LVDS-0" ]]; then 17 | if [[ $connected =~ "HDMI-0" ]]; then 18 | # hdmi only 19 | # NOTE: i3 fails if no active output is detected, so we have to first enable second output and then disable the first 20 | # xrandr --nograb --output HDMI-0 --auto --primary 21 | # xrandr --nograb --output LVDS-0 --off 22 | # both 23 | # HDMI-0 is primary, LVDS-0 is panned to be vertically aligned to the bottom 24 | # xrandr --nograb --output HDMI-0 --auto --primary --output LVDS-0 --auto --left-of HDMI-0 --panning 1366x768+0+312 25 | xrandr --output HDMI-0 --auto --primary --output LVDS-0 --auto --left-of HDMI-0 --panning 1366x768+0+312 26 | # xrandr --output HDMI-0 --auto --primary --output LVDS-0 --auto --right-of HDMI-0 27 | elif [[ $connected =~ "VGA-0" ]]; then 28 | # xrandr --nograb --output VGA-0 --auto --output LVDS-0 --mode 1024x768 --primary 29 | # TODO: look at --scale argument 30 | xrandr --output VGA-0 --auto --primary --output LVDS-0 --auto --below VGA-0 31 | else 32 | # xrandr --nograb --output LVDS-0 --auto --primary --output HDMI-0 --off 33 | # xrandr --output LVDS-0 --auto --primary --output HDMI-0 --off 34 | xrandr --output LVDS-0 --auto --primary --output HDMI-0 --off --output VGA-0 --off 35 | fi 36 | elif [[ $connected =~ "eDP-1" ]]; then 37 | if [[ -f /proc/acpi/button/lid/LID/state ]]; then 38 | lid=$(cat /proc/acpi/button/lid/LID/state | awk '{print $2}') 39 | else 40 | lid="open" 41 | fi 42 | if [[ "$WAYLAND_DISPLAY" == "" ]]; then 43 | if [[ $connected =~ "HDMI-1" ]] && [[ "$lid" == "closed" ]]; then 44 | xrandr --output HDMI-1 --auto --primary --output eDP-1 --off 45 | echo "Xft.dpi: 96" | xrdb -merge 46 | elif [[ $connected =~ "HDMI-1" ]]; then 47 | xrandr --output HDMI-1 --auto --primary --output eDP-1 --auto --left-of HDMI-1 48 | else 49 | xrandr --output eDP-1 --auto --primary --output HDMI-1 --off 50 | echo "Xft.dpi: 168" | xrdb -merge # scale=1.75 51 | fi 52 | else 53 | if [[ $connected =~ "HDMI-A-1" ]] && [[ "$lid" == "closed" ]]; then 54 | swaymsg output HDMI-A-1 enable 55 | swaymsg output eDP-1 disable 56 | elif [[ $connected =~ "HDMI-A-1" ]]; then 57 | swaymsg output HDMI-A-1 enable 58 | swaymsg output eDP-1 enable 59 | else 60 | swaymsg output eDP-1 enable 61 | swaymsg output HDMI-A-1 disable 62 | fi 63 | fi 64 | else 65 | first=$(echo $connected | cut -f1 -d' ') 66 | xrandr --output ${first} --auto --primary 67 | fi 68 | -------------------------------------------------------------------------------- /lf-paste: -------------------------------------------------------------------------------- 1 | submodules/cp-p/lf-paste -------------------------------------------------------------------------------- /maildir-strip-attachments.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Documentation: 4 | # - https://docs.python.org/3/library/mailbox.html#mailbox.Maildir 5 | # - https://docs.python.org/3/library/mailbox.html#mailbox.MaildirMessage 6 | 7 | import os 8 | import argparse 9 | import mailbox 10 | 11 | DROP_MIN_SIZE = 256 # KiB 12 | DROP_CONTENT_TYPES = [ 13 | "image/", 14 | "video/", 15 | "application/pdf", 16 | "application/x-extension-pdf", 17 | "application/zip", 18 | "application/gzip", 19 | "application/x-gzip", 20 | "application/x-xz", 21 | "application/x-7z-compressed", 22 | "application/x-zip-compressed", 23 | "application/x-rar-compressed", 24 | "application/x-msdownload", 25 | "application/msword", 26 | "application/vnd.ms-excel", 27 | "application/vnd.ms-powerpoint", 28 | "application/vnd.ms-xpsdocument", 29 | "application/octet-stream", 30 | ] 31 | 32 | def process_maildir(maildir): 33 | dropped_items = 0 34 | dropped_size = 0 35 | 36 | mb = mailbox.Maildir(maildir, create=False) 37 | for key, message in mb.iteritems(): 38 | for part in message.walk(): 39 | if part.is_multipart(): 40 | continue 41 | size = len(part.as_bytes()) / 1024 42 | if size > DROP_MIN_SIZE: 43 | print("{}\tsize: {:g} KiB".format(part.get_content_type(), size)) 44 | for ct in DROP_CONTENT_TYPES: 45 | if part.get_content_type().startswith(ct): 46 | part.set_payload("") 47 | dropped_items += 1 48 | dropped_size += size 49 | 50 | # update the message on disk 51 | mb.update({key: message}) 52 | 53 | print("Dropped {} attachements ({:g} MiB).".format(dropped_items, dropped_size / 1024)) 54 | 55 | def argtype_dir_path(string): 56 | if os.path.isdir(string): 57 | return string 58 | raise NotADirectoryError(string) 59 | 60 | def argtype_maildir(string): 61 | string = argtype_dir_path(string) 62 | for sub in ["cur", "new", "tmp"]: 63 | subdir = os.path.join(string, sub) 64 | if not os.path.isdir(subdir): 65 | raise NotADirectoryError(subdir) 66 | return string 67 | 68 | if __name__ == "__main__": 69 | ap = argparse.ArgumentParser(description="Strip attachments from messages in a maildir.") 70 | ap.add_argument("maildir", metavar="PATH", type=argtype_maildir, 71 | help="path to the maildir") 72 | 73 | args = ap.parse_args() 74 | process_maildir(args.maildir) 75 | -------------------------------------------------------------------------------- /makeissue.sh: -------------------------------------------------------------------------------- 1 | echo -e '\e[H\e[2J' > issue 2 | echo -e ' \e[1;30m| \e[34m\\s \\r' >> issue 3 | echo -e ' \e[36;1m/\\\\ \e[37m|| \e[36m| | \e[30m|' >> issue 4 | echo -e ' \e[36m/ \\\\ \e[37m|| \e[36m| _ \e[30m| \e[32m\\t' >> issue 5 | echo -e ' \e[1;36m/ \e[0;36m.. \e[1m\\\\ \e[37m//==\\\\\\\\ ||/= /==\\\\ ||/=\\\\ \e[36m| | |/ \\\\ | | \\\\ / \e[30m| \e[32m\\d' >> issue 6 | echo -e ' \e[0;36m/ . . \\\\ \e[37m|| || || | || || \e[36m| | | | | | X \e[1;30m|' >> issue 7 | echo -e ' \e[0;36m/ . . \\\\ \e[37m\\\\\\\\==/| || \\\\==/ || || \e[36m| | | | \\\\_/| / \\\\ \e[1;30m| \e[31m\\U' >> issue 8 | echo -e ' \e[0;36m/ .. .. \\\\ \e[0;37mA simple, lightweight linux distribution. \e[1;30m|' >> issue 9 | echo -e ' \e[0;36m/_\x27 `_\\\\ \e[1;30m| \e[35m\\l \e[0mon \e[1;33m\\n' >> issue 10 | echo -e ' \e[0m' >> issue 11 | echo -e '' >> issue 12 | -------------------------------------------------------------------------------- /mp3convert.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import asyncio 7 | from concurrent.futures import ThreadPoolExecutor 8 | import re 9 | import shutil 10 | import subprocess 11 | import shlex 12 | 13 | from pythonscripts.cpu import cores_count 14 | from pythonscripts.tempfiles import TempFiles 15 | from pythonscripts.ffparser import FFprobeParser 16 | 17 | 18 | audio_types = ("mp3", "aac", "ac3", "mp2", "wma", "wav", "mka", "m4a", "ogg", "oga", "flac") 19 | audio_file_regex = re.compile("^(?P/(.*/)*)(?P.*(?P\.(" + "|".join(audio_types) + ")))$") 20 | ffmpeg_command = "ffmpeg -i {input} -acodec libmp3lame -ar 44100 -ab {bitrate:d}k -ac 2 -f mp3 -map_metadata 0 -y {output}" 21 | 22 | 23 | class GettingBitrateError(Exception): 24 | def __init__(self, fname): 25 | self.message = "Couldn't get bitrate from file " + fname 26 | 27 | 28 | class ConversionError(Exception): 29 | def __init__(self, fname, status, output): 30 | self.message = "Error while converting file " + fname + "\nffmpeg exited with status " + str(status) + "\n" + output 31 | 32 | 33 | def get_bitrate(filename): 34 | parser = FFprobeParser(filename) 35 | bitrate = parser.get("audio", "bit_rate") 36 | del parser 37 | if bitrate is None: 38 | raise GettingBitrateError(filename) 39 | else: 40 | return bitrate // 1000 41 | 42 | 43 | def convert(filename, output_extension, bitrate, delete_after=False): 44 | tmpfile = tmp.getTempFileName() 45 | command = ffmpeg_command.format(input=shlex.quote(filename), bitrate=bitrate, output=shlex.quote(tmpfile)) 46 | try: 47 | subprocess.run(command, shell=True, check=True, capture_output=True) 48 | if delete_after: 49 | os.remove(filename) 50 | shutil.move(tmpfile, os.path.splitext(filename)[0] + output_extension) 51 | tmp.remove(tmpfile) 52 | except subprocess.CalledProcessError as e: 53 | tmp.remove(tmpfile) 54 | raise ConversionError(filename, e.returncode, e.output) 55 | 56 | 57 | class Main(): 58 | def __init__(self, args): 59 | self.countAudioFiles = 0 60 | self.countHigherBitrate = 0 61 | self.countDifferentFormat = 0 62 | self.countErrors = 0 63 | self.countNonAudioFiles = 0 64 | 65 | self.dry_run = args.dry_run 66 | self.bitrate = args.bitrate 67 | self.verbose = args.verbose 68 | self.recursive = args.recursive 69 | self.deleteAfter = args.delete_after 70 | self.outputExtension = "." + args.output_extension 71 | self.paths = args.path 72 | 73 | def print_stats(self): 74 | print() 75 | print("-----------collected statistics-----------") 76 | print("All audio files (without errors): % 6d" % self.countAudioFiles) 77 | print("Converted files: % 6d" % (self.countDifferentFormat + self.countHigherBitrate)) 78 | print(" - different format: % 6d" % self.countDifferentFormat) 79 | print(" - %3s but higher bitrate: % 6d" % (self.outputExtension[1:], self.countHigherBitrate)) 80 | print("Errors: % 6d" % self.countErrors) 81 | print("Non-audio files: % 6d" % self.countNonAudioFiles) 82 | print("------------------------------------------") 83 | 84 | def check(self, path): 85 | match = re.match(audio_file_regex, path) 86 | 87 | if not match: 88 | self.countNonAudioFiles += 1 89 | return False 90 | 91 | filename = match.group("filename") 92 | ext = match.group("extension") 93 | 94 | self.countAudioFiles += 1 95 | if ext != self.outputExtension: 96 | self.countDifferentFormat += 1 97 | return True 98 | 99 | bitrate = get_bitrate(path) 100 | if self.verbose > 0: 101 | sys.stdout.write("% 3s kb/s: %s\n" % (bitrate, filename)) 102 | if bitrate > self.bitrate: 103 | self.countHigherBitrate += 1 104 | return True 105 | return False 106 | 107 | async def run(self): 108 | # We could use the default single-threaded executor with basically the same performance 109 | # (because of Python's GIL), but the ThreadPoolExecutor allows to limit the maximum number 110 | # of workers and thus the maximum number of concurrent subprocesses. 111 | with ThreadPoolExecutor(max_workers=cores_count()) as executor: 112 | loop = asyncio.get_event_loop() 113 | tasks = [ 114 | loop.run_in_executor(executor, self.worker, path) 115 | for path in self.queue_generator() 116 | ] 117 | for result in await asyncio.gather(*tasks): 118 | pass 119 | 120 | self.print_stats() 121 | 122 | def worker(self, path): 123 | path = os.path.abspath(path) 124 | 125 | try: 126 | # check bitrate/filetype etc., skip if conversion not necessary 127 | if not self.check(path) or self.dry_run: 128 | return 129 | print("Converting: {}".format(path)) 130 | convert(path, self.outputExtension, self.bitrate, self.deleteAfter) 131 | except ConversionError as e: 132 | msg = "ERROR: failed to convert file '{}'".format(path) 133 | if self.verbose > 0: 134 | msg += "\n" + e.message 135 | print(msg, file=sys.stderr) 136 | self.countErrors += 1 137 | except GettingBitrateError as e: 138 | msg = "ERROR: failed to get bitrate from file '{}'".format(path) 139 | if self.verbose > 0: 140 | msg += "\n" + e.message 141 | print(msg, file=sys.stderr) 142 | self.countErrors += 1 143 | else: 144 | print("Done: {}".format(path)) 145 | 146 | def queue_generator(self): 147 | """ For each directory in self.files returns generator returning full paths to mp3 files in that folder. 148 | If self.files contains file paths instead of directory, it's returned as [file]. 149 | """ 150 | 151 | def walk(root): 152 | dirs = [] 153 | files = [] 154 | for entry in os.scandir(root): 155 | if entry.is_dir(): 156 | dirs.append(entry.name) 157 | elif entry.is_file(): 158 | files.append(entry.name) 159 | 160 | # first yield found files, then recurse into subdirs 161 | for f in files: 162 | yield os.path.join(root, f) 163 | if self.recursive: 164 | for d in dirs: # recurse into subdir 165 | for f in walk(os.path.join(root, d)): 166 | yield f 167 | 168 | for path in self.paths: 169 | if os.path.isdir(path): 170 | for f in walk(path): 171 | yield f 172 | else: 173 | yield path 174 | 175 | 176 | if __name__ == "__main__": 177 | parser = argparse.ArgumentParser(description="convert all audio files in given folder (recursively) to specified bitrate, skip if bitrate is less or equal") 178 | parser.add_argument("path", action="store", nargs="+", help="path to file(s) to convert - filename or directory") 179 | parser.add_argument("-r", "--recursive", action="store_true", help="browse folders recursively") 180 | parser.add_argument("--dry-run", action="store_true", help="don't convert, only print stats") 181 | parser.add_argument("-b", "--bitrate", action="store", type=int, metavar="BITRATE", default="128", help="set bitrate - in kb/s, default=128") 182 | parser.add_argument("-v", "--verbose", action="count", default=0, help="set verbosity level") 183 | parser.add_argument("--delete-after", action="store_true", help="delete old files after conversion") 184 | parser.add_argument("--output-extension", choices=audio_types, type=str, default="mp3", help="set output extension") 185 | 186 | args = parser.parse_args() 187 | 188 | tmp = TempFiles() 189 | main = Main(args) 190 | asyncio.run(main.run()) 191 | -------------------------------------------------------------------------------- /mv-p: -------------------------------------------------------------------------------- 1 | submodules/cp-p/mv-p -------------------------------------------------------------------------------- /nat-launch-subnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | function print_launch_subnet_usage() 5 | { 6 | echo "USAGE" 7 | echo " $0 " 8 | cat <<'CONFIG' 9 | 10 | REQUIRED VARIABLES 11 | # The network interface card (NIC) that is connected to the internet or other 12 | # wide area network. 13 | wan_nic="wlan0" 14 | 15 | # The network interface card connected to the subnet. 16 | subnet_nic="eth0" 17 | 18 | # The subnet IP mask. 19 | mask=/24 20 | 21 | # The subnet IP range. 22 | subnet_ip=10.0.0.0$mask 23 | 24 | # The IP of the subnet NIC on the subnet. 25 | server_ip=10.0.0.100$mask 26 | 27 | # The IP tables binary to use. 28 | iptables=/usr/bin/idemptables 29 | 30 | # The dnsmasq arguments - PID and lease files to use. 31 | dnsmasq_pid=/tmp/dhcpd.pid 32 | dnsmasq_lease=/tmp/dhcpd.lease 33 | 34 | # The port of DNS service, see dnsmasq(8) for details. Specify "0" to disable DNS server. 35 | dnsmasq_port=53 36 | 37 | # The DHCP range, see dnsmasq(8) for details. 38 | dnsmasq_dhcp_range="192.168.1.100,192.168.1.200,12h" 39 | 40 | OPTIONAL VARIABLES 41 | # Function or external scripts to run before before and after bringing the 42 | # subnet NIC up or down: pre_up, post_up, pre_down, post_down 43 | 44 | # pre_up as a function: 45 | # function pre_up() 46 | # { 47 | # } 48 | 49 | # pre_up as a script: 50 | # pre_up=/path/to/script 51 | 52 | # ip_forward=0 53 | # The value of /proc/sys/net/ipv4/ip_forward to restore when shutting down 54 | # the subnet. 55 | CONFIG 56 | } 57 | 58 | function launch_subnet() 59 | { 60 | set -e 61 | 62 | if [[ -z $1 ]] 63 | then 64 | print_launch_subnet_usage 65 | exit 1 66 | else 67 | action="$1" 68 | fi 69 | 70 | if [[ -z $wan_nic ]] 71 | then 72 | echo "wan_nic is undefined" 73 | exit 1 74 | fi 75 | 76 | if [[ -z $subnet_nic ]] 77 | then 78 | echo "subnet_nic is undefined" 79 | exit 1 80 | fi 81 | 82 | if [[ -z $mask ]] 83 | then 84 | echo "mask is undefined" 85 | exit 1 86 | fi 87 | 88 | if [[ -z $subnet_ip ]] 89 | then 90 | echo "subnet_ip is undefined" 91 | exit 1 92 | fi 93 | 94 | if [[ -z $server_ip ]] 95 | then 96 | echo "server_ip is undefined" 97 | exit 1 98 | fi 99 | 100 | if [[ -z $iptables ]] 101 | then 102 | echo "iptables is undefined" 103 | exit 1 104 | fi 105 | 106 | if [[ -z $dnsmasq_pid ]] 107 | then 108 | echo "dnsmasq_pid is undefined" 109 | exit 1 110 | fi 111 | 112 | if [[ -z $dnsmasq_lease ]] 113 | then 114 | echo "dnsmasq_lease is undefined" 115 | exit 1 116 | fi 117 | 118 | if [[ -z $dnsmasq_port ]] 119 | then 120 | echo "dnsmasq_port is undefined" 121 | exit 1 122 | fi 123 | 124 | if [[ -z $dnsmasq_dhcp_range ]] 125 | then 126 | echo "dnsmasq_dhcp_range is undefined" 127 | exit 1 128 | fi 129 | 130 | 131 | case "$action" in 132 | up) 133 | 134 | # Enable IP forwarding. 135 | echo 1 > /proc/sys/net/ipv4/ip_forward 136 | 137 | ## iptables rules are changed to fit my firewall config 138 | ## see http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html for original rules 139 | 140 | # Open up DNS (53) and DHCP (67) ports on subnet_nic. 141 | "$iptables" -A nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p tcp --dport 53 -j ACCEPT 142 | "$iptables" -A nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p udp --dport 53 -j ACCEPT 143 | "$iptables" -A nat-subnet -i "$subnet_nic" -p udp --dport 67 -j ACCEPT 144 | 145 | # Reply to ICMP (ping) packets so clients can check their connections. 146 | "$iptables" -A nat-subnet -i "$subnet_nic" -p icmp --icmp-type echo-request -j ACCEPT 147 | #"$iptables" -A OUTPUT -i "$subnet_nic" -p icmp --icmp-type echo-reply -j ACCEPT 148 | 149 | # Allow postrouting to wan_nic (for e.g. internet access on the subnet). 150 | "$iptables" -t nat -A POSTROUTING -s "$subnet_ip" -o "$wan_nic" -j MASQUERADE 151 | 152 | # Enable forwarding from subnet_nic to wan_nic (and back via related and established connections). 153 | "$iptables" -A FORWARD -i "$subnet_nic" -s "$subnet_ip" -o "$wan_nic" -j ACCEPT 154 | "$iptables" -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 155 | 156 | # Bring down subnet_nic, configure it and bring it up again. 157 | if [[ ! -z $pre_up ]] 158 | then 159 | ip link set dev "$subnet_nic" down 160 | "$pre_up" 161 | fi 162 | ip link set dev "$subnet_nic" up 163 | if [[ ! -z $post_up ]] 164 | then 165 | "$post_up" 166 | fi 167 | 168 | # Set the static IP for subnet_nic. 169 | ip addr add "$server_ip" dev "$subnet_nic" 170 | 171 | # Ensure the lease file exists. 172 | mkdir -p -- "${dnsmasq_lease%/*}" 173 | [[ -f $dnsmasq_lease ]] || touch "$dnsmasq_lease" 174 | 175 | # Launch the DHCP server 176 | dnsmasq \ 177 | --pid-file="$dnsmasq_pid" \ 178 | --dhcp-leasefile="$dnsmasq_lease" \ 179 | --port="$dnsmasq_port" \ 180 | --interface="$subnet_nic" \ 181 | --except-interface=lo \ 182 | --bind-interfaces \ 183 | --dhcp-range="$dnsmasq_dhcp_range" \ 184 | --dhcp-authoritative \ 185 | --dhcp-option=6,"${server_ip%/*}" 186 | ;; 187 | 188 | down) 189 | # Kill the DHCP server. 190 | if [[ -f $dnsmasq_pid ]] 191 | then 192 | kill $(cat "$dnsmasq_pid") && rm "$dnsmasq_pid" && echo "killed server" 193 | fi 194 | 195 | if [[ ! -z $pre_down ]] 196 | then 197 | "$pre_down" 198 | fi 199 | ip addr delete "$server_ip" dev "$subnet_nic" 200 | ip link set dev "$subnet_nic" down 201 | if [[ ! -z $post_down ]] 202 | then 203 | "$post_down" 204 | fi 205 | 206 | # Undo all of the changes above in reverse order. 207 | "$iptables" -D FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 208 | "$iptables" -D FORWARD -i "$subnet_nic" -s "$subnet_ip" -o "$wan_nic" -j ACCEPT 209 | "$iptables" -t nat -D POSTROUTING -s "$subnet_ip" -o "$wan_nic" -j MASQUERADE 210 | #"$iptables" -D OUTPUT -i "$subnet_nic" -p icmp --icmp-type echo-reply -j ACCEPT 211 | "$iptables" -D nat-subnet -i "$subnet_nic" -p icmp --icmp-type echo-request -j ACCEPT 212 | "$iptables" -D nat-subnet -i "$subnet_nic" -p udp --dport 67 -j ACCEPT 213 | "$iptables" -D nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p udp --dport 53 -j ACCEPT 214 | "$iptables" -D nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p tcp --dport 53 -j ACCEPT 215 | 216 | 217 | if [[ ! -z $ip_forward ]] 218 | then 219 | if [[ $ip_forward != $(cat /proc/sys/net/ipv4/ip_forward) ]] 220 | then 221 | echo $ip_forward > /proc/sys/net/ipv4/ip_forward 222 | fi 223 | else 224 | echo 0 > /proc/sys/net/ipv4/ip_forward 225 | fi 226 | ;; 227 | 228 | *) 229 | print_launch_subnet_usage 230 | exit 1 231 | ;; 232 | esac 233 | } 234 | -------------------------------------------------------------------------------- /nat-launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Original author: Xyne 4 | # http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html 5 | 6 | function print_usage() { 7 | echo "usage: $0 " 8 | } 9 | 10 | if [[ $EUID -ne 0 ]]; then 11 | echo "This script must be run as root." >&2 12 | exit 1 13 | fi 14 | 15 | if [[ -z $3 ]]; then 16 | print_usage 17 | exit 1 18 | else 19 | wan_nic="$1" 20 | subnet_nic="$2" 21 | action="$3" 22 | fi 23 | 24 | 25 | mask=/24 26 | subnet_ip=192.168.1.0$mask 27 | server_ip=192.168.1.23$mask 28 | iptables=/usr/bin/idemptables 29 | dnsmasq_pid=/run/dnsmasq_$subnet_nic.pid 30 | dnsmasq_lease=/run/dnsmasq_$subnet_nic.lease 31 | dnsmasq_port=0 32 | dnsmasq_dhcp_range="192.168.1.100,192.168.1.150,6h" 33 | 34 | source nat-launch-subnet.sh 35 | 36 | launch_subnet "$action" 37 | -------------------------------------------------------------------------------- /notify-brightness.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # LCD brightness notification (level changed by ACPI, no action required) 4 | 5 | # duration in ms 6 | duration=1500 7 | 8 | # get brightness level, set title 9 | level=$(cat "/sys/class/backlight/intel_backlight/brightness") 10 | max=$(cat "/sys/class/backlight/intel_backlight/max_brightness") 11 | percent=$(( $level * 100 / $max )) 12 | title="LCD brightness" 13 | 14 | # create fancy bar 15 | f=$((percent/10)) 16 | e=$((10-f)) 17 | fchars='◼◼◼◼◼◼◼◼◼◼' 18 | echars='◻◻◻◻◻◻◻◻◻◻' 19 | bar="${fchars:0:f}${echars:0:e} $percent%" 20 | 21 | notify-send --app-name=VolumeNotification --expire-time="$duration" --urgency=low "$title" "$bar" 22 | -------------------------------------------------------------------------------- /notify-volume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # volume control (up/down/mute/unmute/toggle) + notification 4 | 5 | # duration in ms 6 | duration=1500 7 | 8 | notify () { 9 | # get volume level 10 | percent=$(pactl get-sink-volume @DEFAULT_SINK@ | grep -Po '\d+(?=%)' | head -n 1) 11 | 12 | # check if muted, set title 13 | if [[ $(pactl get-sink-mute @DEFAULT_SINK@) == "Mute: yes" ]]; then 14 | title="Volume muted" 15 | else 16 | title="Volume" 17 | fi 18 | 19 | # create fancy bar 20 | f=$((percent/10)) 21 | e=$((10-f)) 22 | fchars='◼◼◼◼◼◼◼◼◼◼' 23 | echars='◻◻◻◻◻◻◻◻◻◻' 24 | bar="${fchars:0:f}${echars:0:e} $percent%" 25 | 26 | notify-send --app-name=VolumeNotification --expire-time="$duration" --urgency=low "$title" "$bar" 27 | } 28 | 29 | # redirect stdout of this script to /dev/null 30 | exec > /dev/null 31 | 32 | case "$1" in 33 | up) 34 | pactl set-sink-volume @DEFAULT_SINK@ +5% 35 | pactl set-sink-mute @DEFAULT_SINK@ 0 36 | ;; 37 | down) 38 | pactl set-sink-volume @DEFAULT_SINK@ -5% 39 | pactl set-sink-mute @DEFAULT_SINK@ 0 40 | ;; 41 | mute) 42 | pactl set-sink-mute @DEFAULT_SINK@ 1 43 | ;; 44 | unmute) 45 | pactl set-sink-mute @DEFAULT_SINK@ 0 46 | ;; 47 | toggle) 48 | pactl set-sink-mute @DEFAULT_SINK@ toggle 49 | ;; 50 | esac 51 | 52 | notify 53 | -------------------------------------------------------------------------------- /pacman-disowned.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | tmp=${TMPDIR-/tmp}/pacman-disowned-$UID-$$ 4 | db=$tmp/db 5 | fs=$tmp/fs 6 | 7 | mkdir "$tmp" 8 | trap 'rm -rf "$tmp"' EXIT 9 | 10 | pacman -Qlq | sort -u > "$db" 11 | 12 | find /etc /opt /usr ! -name lost+found \( -type d -printf '%p/\n' -o -print \) | sort > "$fs" 13 | 14 | comm -23 "$fs" "$db" 15 | -------------------------------------------------------------------------------- /pdf-extract.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit on error 4 | set -e 5 | 6 | any2img() { 7 | convert -density 150 "$1" -quality 100 "$2" &>/dev/null 8 | } 9 | 10 | pdf2img() { 11 | echo "Splitting single pdf file by pages (tiff)" 12 | stapler burst "$1" 13 | base=${1%.*} 14 | for i in "${base}_"*.pdf 15 | do 16 | out=pg${i#"$base"} # will result in 'pg_123.pdf' 17 | out=${out%.*}.tiff # replace extension 18 | echo "$out" 19 | # any2img "$i" "$out" 20 | convert -density 300 "$i" -compress lzw "$out" 21 | rm -f "$i" 22 | done 23 | } 24 | 25 | djvu2img() { 26 | echo "Splitting single djvu file by pages (tiff)" 27 | pages=`djvused -e "n" "$1"` 28 | for (( i=1; i<=$pages; i++ )) 29 | do 30 | num=$(printf "%03d" "$i") 31 | out="pg_$num.tiff" 32 | echo " $out" 33 | ddjvu -page=$i -format=tiff "$1" "$out" 34 | done 35 | } 36 | 37 | path=$(realpath "$1") 38 | filename=$(basename "$path") 39 | extension=${filename##*.} 40 | basename=${filename%.*} # filename without extension 41 | 42 | # create directory for extracted images 43 | mkdir -p "$basename" 44 | cp "$path" "$basename" 45 | cd "$basename" 46 | 47 | if [[ "$extension" == "pdf" ]]; then 48 | pdf2img "$filename" 49 | rm -f "$filename" 50 | elif [[ "$extension" == "djvu" ]]; then 51 | djvu2img "$filename" 52 | rm -f "$filename" 53 | else 54 | echo "Supported file types: pdf, djvu" 55 | exit 1 56 | fi 57 | -------------------------------------------------------------------------------- /perm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | opt=${1:-'-h'} 4 | dir=${2:-'.'} 5 | 6 | fmode=0644 7 | dmode=0755 8 | 9 | case "$1" in 10 | -a) # dirs and files 11 | find "$2" -type d -exec chmod $dmode "{}" + 12 | find "$2" -type f -exec chmod $fmode "{}" + 13 | ;; 14 | -d) 15 | find "$2" -type d -exec chmod $dmode "{}" + 16 | ;; 17 | -f) 18 | find "$2" -type f -exec chmod $fmode "{}" + 19 | ;; 20 | *) 21 | printf "Usage: $(basename $0) option [directory] 22 | -a \t set permissions of files and directories to $fmode, resp. $dmode. 23 | -d \t set permissions of directories to $dmode. 24 | -f \t set permissions of files to $fmode. 25 | -h \t print this help. 26 | " 27 | ;; 28 | esac 29 | -------------------------------------------------------------------------------- /pythonscripts/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | 6 | # hack - enable importing from _this_ directory 7 | sys.path.append(os.path.dirname(__file__)) 8 | 9 | from misc import * 10 | from tempfiles import * 11 | from terminal import * 12 | -------------------------------------------------------------------------------- /pythonscripts/cpu.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | def cores_count(): 4 | f = open("/proc/cpuinfo") 5 | for line in f.readlines(): 6 | if line.startswith("cpu cores"): 7 | try: 8 | _, n = line.split(":") 9 | return int(n.strip()) 10 | except ValueError: 11 | continue 12 | return 1 13 | -------------------------------------------------------------------------------- /pythonscripts/daemon.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | 5 | def spawnDaemon(*args, detach_fds=True): 6 | """Spawn a completely detached subprocess (i.e., a daemon). 7 | 8 | E.g. for mark: 9 | spawnDaemon("../bin/producenotify.py", "producenotify.py", "xx") 10 | """ 11 | if len(args) == 0: 12 | raise ValueError("no arguments supplied") 13 | 14 | # fork the first time (to make a non-session-leader child process) 15 | try: 16 | pid = os.fork() 17 | except OSError as e: 18 | raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno)) 19 | if pid != 0: 20 | # parent (calling) process is all done 21 | return 22 | 23 | # detach from controlling terminal (to make child a session-leader) 24 | os.setsid() 25 | try: 26 | pid = os.fork() 27 | except OSError as e: 28 | raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno)) 29 | raise Exception("%s [%d]" % (e.strerror, e.errno)) 30 | if pid != 0: 31 | # child process is all done 32 | os._exit(0) 33 | 34 | if detach_fds: 35 | # grandchild process now non-session-leader, detached from parent 36 | # grandchild process must now close all open files 37 | try: 38 | maxfd = os.sysconf("SC_OPEN_MAX") 39 | except (AttributeError, ValueError): 40 | maxfd = 1024 41 | 42 | for fd in range(maxfd): 43 | try: 44 | os.close(fd) 45 | except OSError: # ERROR, fd wasn't open to begin with (ignored) 46 | pass 47 | 48 | # redirect stdin, stdout and stderr to /dev/null 49 | os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) 50 | os.dup2(0, 1) 51 | os.dup2(0, 2) 52 | 53 | # and finally let's execute the executable for the daemon! 54 | try: 55 | os.execvp(args[0], args) 56 | except Exception as e: 57 | # oops, we're cut off from the world, let's just give up 58 | os._exit(255) 59 | -------------------------------------------------------------------------------- /pythonscripts/ffparser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import subprocess 5 | import shlex 6 | from pprint import pprint 7 | 8 | 9 | ffprobe = "ffprobe -v quiet -print_format json -show_format -show_streams " 10 | 11 | 12 | class FFprobeParser: 13 | def __init__(self, path): 14 | self.data = json.loads(subprocess.check_output(ffprobe + shlex.quote(path), shell=True, universal_newlines=True)) 15 | 16 | self.format = self.data["format"] 17 | self.audio = None 18 | self.video = None 19 | for stream in self.data["streams"]: 20 | if self.audio is None and stream["codec_type"] == "audio": 21 | self.audio = stream 22 | if self.video is None and stream["codec_type"] == "video": 23 | self.video = stream 24 | 25 | def _get(self, option, attribute): 26 | return getattr(self, option)[attribute] 27 | 28 | def _getBitrate(self, option): 29 | if option == "audio": 30 | try: 31 | return int(self._get("audio", "bit_rate")) 32 | except: 33 | return int(self._getBitrate("format")) - int(self._getBitrate("video")) 34 | elif option == "video": 35 | try: 36 | return int(self._get("video", "bit_rate")) 37 | except: 38 | return int(self._getBitrate("format")) - int(self._getBitrate("audio")) 39 | elif option == "format": 40 | try: 41 | return int(self._get("format", "bit_rate")) 42 | except: 43 | return None 44 | 45 | def get(self, option, attribute): 46 | """ 'option' is one of "audio", "video", "format" 47 | 'attribute' is the json attribute to query 48 | """ 49 | if attribute == "bit_rate": 50 | return self._getBitrate(option) 51 | else: 52 | try: 53 | return self._get(option, attribute) 54 | except: 55 | return None 56 | 57 | def pprint(self, option): 58 | """ 'option' is one of "audio", "video", "format", 59 | otherwise 'self.data' is printed 60 | """ 61 | pprint(getattr(self, option, self.data)) 62 | 63 | -------------------------------------------------------------------------------- /pythonscripts/logger.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | """ 4 | Simple logger object. Log level is integer for easy comparison. 5 | """ 6 | 7 | import sys 8 | 9 | class Logger: 10 | def __init__(self, log_level, prog_name): 11 | self.log_level = log_level 12 | self.prog_name = prog_name 13 | self.filename = None 14 | 15 | def prefix(self, msg): 16 | if self.filename is None: 17 | return msg 18 | return "%s: %s" % (self.filename, msg) 19 | 20 | def debug(self, msg): 21 | if self.log_level >= 4: 22 | print(self.prefix(msg)) 23 | 24 | def info(self, msg): 25 | if self.log_level >= 3: 26 | print(self.prefix(msg)) 27 | 28 | def warning(self, msg): 29 | if self.log_level >= 2: 30 | print(self.prefix("WARNING: %s" % msg)) 31 | 32 | def error(self, msg): 33 | if self.log_level >= 1: 34 | sys.stderr.write("%s: %s\n" % (self.prog_name, msg)) 35 | 36 | def critical(self, msg, retval=1): 37 | self.error(msg) 38 | sys.exit(retval) 39 | -------------------------------------------------------------------------------- /pythonscripts/misc.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | """ 4 | Human-readable file size. Algorithm does not use a for-loop. It has constant 5 | complexity, O(1), and is in theory more efficient than algorithms using a for-loop. 6 | 7 | Original source code from: 8 | http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size 9 | """ 10 | 11 | from math import log 12 | 13 | unit_list = { 14 | "long": list(zip(['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'], [0, 0, 1, 2, 2, 2])), 15 | "short": list(zip(['B', 'K', 'M', 'G', 'T', 'P'], [0, 0, 1, 2, 2, 2])), 16 | } 17 | 18 | def format_sizeof(num, unit_format="long"): 19 | if num > 1: 20 | exponent = min(int(log(num, 1024)), len(unit_list[unit_format]) - 1) 21 | quotient = float(num) / 1024**exponent 22 | unit, num_decimals = unit_list[unit_format][exponent] 23 | format_string = '{:.%sf} {}' % (num_decimals) 24 | return format_string.format(quotient, unit) 25 | else: 26 | return str(int(num)) + " B" 27 | 28 | 29 | 30 | """ 31 | Nice time format, useful for ETA etc. Output is never longer than 6 characters. 32 | """ 33 | 34 | def format_time(seconds): 35 | w, s = divmod(seconds, 3600*24*7) 36 | d, s = divmod(s, 3600*24) 37 | h, s = divmod(s, 3600) 38 | m, s = divmod(s, 60) 39 | if w > 0: 40 | return "%dw" % w 41 | if d > 0: 42 | return "%dd%02dh" % (d, h) 43 | if h > 0: 44 | return "%02dh%02dm" % (h, m) 45 | if m > 0: 46 | return "%02dm%02ds" % (m, s) 47 | return str(s) 48 | 49 | 50 | 51 | """ 52 | Get content of any readable text file. 53 | """ 54 | 55 | def cat(fname): 56 | try: 57 | f = open(fname, "r") 58 | s = f.read() 59 | f.close() 60 | return s.strip() 61 | except: 62 | return None 63 | 64 | 65 | 66 | """ 67 | Returns a string of at most `max_length` characters, cutting 68 | only at word-boundaries. If the string was truncated, `suffix` 69 | will be appended. 70 | """ 71 | 72 | import re 73 | 74 | def smart_truncate(text, max_length=100, suffix='...'): 75 | if len(text) > max_length: 76 | pattern = r'^(.{0,%d}\S)\s.*' % (max_length-len(suffix)-1) 77 | return re.sub(pattern, r'\1' + suffix, text) 78 | else: 79 | return text 80 | 81 | 82 | 83 | """ 84 | Recursive directory creation function (like 'mkdir -p' in linux). 85 | """ 86 | 87 | import os 88 | 89 | def mkdir(path): 90 | try: 91 | os.makedirs(path) 92 | except OSError as e: 93 | if e.errno != 17: 94 | raise e 95 | -------------------------------------------------------------------------------- /pythonscripts/tempfiles.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | """ 4 | Create temporary file, close file descriptor and return full path of the file. 5 | """ 6 | 7 | import os 8 | import tempfile 9 | import atexit 10 | 11 | class TempFiles: 12 | def __init__(self): 13 | self.tempFiles = [] 14 | atexit.register(self.removeAll) 15 | 16 | def removeAll(self): 17 | for file in self.tempFiles[:]: 18 | self.remove(file) 19 | 20 | def remove(self, file): 21 | if file in self.tempFiles and os.path.exists(file): 22 | os.remove(file) 23 | self.tempFiles.remove(file) 24 | 25 | def getTempFileName(self, prefix="tmp", suffix="", dir=None, text=False): 26 | fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir, text=text) 27 | os.close(fd) 28 | self.tempFiles.append(path) 29 | return path 30 | 31 | -------------------------------------------------------------------------------- /pythonscripts/terminal.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | """ 4 | Linux terminal colors. 5 | """ 6 | 7 | #import sys 8 | 9 | COLORS = {"black":30, "red":31, "green":32, "yellow":33, "blue":34, "magenta":35, "cyan":36, "white":37} 10 | 11 | def colorize(color, text): 12 | c = COLORS[color] 13 | return "\033[1;%im%s\033[0m" % (c, text) 14 | # if sys.stdout.isatty(): 15 | # c = COLORS[color] 16 | # return "\033[1;%im%s\033[0m" % (c, text) 17 | # else: 18 | # return text 19 | 20 | def getColor(status, download_speed=0): 21 | if status == "error": 22 | return "red" 23 | elif status == "active": 24 | if download_speed > 0: 25 | return "blue" 26 | else: 27 | return "yellow" 28 | elif status == "complete": 29 | return "green" 30 | elif status == "paused": 31 | return "cyan" 32 | elif status == "waiting": 33 | return "magenta" 34 | else: 35 | return "" 36 | 37 | 38 | 39 | """ 40 | Get size of unix terminal as tuple (width, height). 41 | When all fails, default value is (80, 25). 42 | 43 | Original source code from: 44 | http://stackoverflow.com/a/566752 45 | """ 46 | 47 | def getTerminalSize(): 48 | import os 49 | env = os.environ 50 | def ioctl_GWINSZ(fd): 51 | try: 52 | import fcntl, termios, struct, os 53 | cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) 54 | except: 55 | return None 56 | return cr 57 | cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) 58 | if not cr: 59 | try: 60 | fd = os.open(os.ctermid(), os.O_RDONLY) 61 | cr = ioctl_GWINSZ(fd) 62 | os.close(fd) 63 | except: 64 | pass 65 | if not cr: 66 | try: 67 | cr = (env['LINES'], env['COLUMNS']) 68 | except: 69 | cr = (25, 80) 70 | return int(cr[1]), int(cr[0]) 71 | 72 | -------------------------------------------------------------------------------- /qemu-launcher.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/bash 2 | 3 | # Author: Jakub Klinkovský (Lahwaacz) 4 | # https://github.com/lahwaacz 5 | 6 | function print_usage() { 7 | echo "usage: $0 " 8 | } 9 | 10 | ## Generate name of TAP interface to create 11 | function get_tap_name() { 12 | for (( i=0; i<$tap_limit; i++ )); do 13 | local name="tap$i" 14 | if [[ ! -d "/sys/class/net/$name" ]]; then 15 | echo "$name" 16 | break 17 | fi 18 | done 19 | } 20 | 21 | # do not run as root 22 | if [[ $EUID -eq 0 ]]; then 23 | echo "This script is not supposed to be run as root." >&2 24 | exit 1 25 | fi 26 | 27 | # parse command line arguments 28 | if [[ -z $1 ]]; then 29 | print_usage 30 | exit 1 31 | else 32 | vm_name="$1" 33 | fi 34 | 35 | 36 | sudo_args=("-Ap" "Enter your root password (QEMU launcher script)") 37 | username=$(whoami) 38 | tap_limit=10 # maximum number of TAP interfaces created by this script 39 | tap_nic=$(get_tap_name) 40 | br_nic="br0-qemu" # bridge interface name (will be created) 41 | wan_nic="wlan0" # WAN interface name (for NAT) 42 | 43 | 44 | case "$vm_name" in 45 | btrfs) 46 | sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" up 47 | 48 | qemu-system-x86_64 \ 49 | -name "$vm_name" \ 50 | -monitor stdio \ 51 | -enable-kvm -smp 2 -cpu host -m 1024 \ 52 | -vga qxl -spice port=5931,disable-ticketing \ 53 | -drive file="/home/lahwaacz/virtual_machines/archlinux-btrfs.raw",if=virtio,cache=none -boot once=c \ 54 | -net nic,model=virtio,macaddr=$(qemu-mac-hasher.py "$vm_name") -net tap,ifname="$tap_nic",script=no,downscript=no,vhost=on \ 55 | -usbdevice tablet 56 | 57 | sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" down 58 | ;; 59 | virtarch) 60 | sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" up 61 | 62 | qemu-system-x86_64 \ 63 | -name "$vm_name" \ 64 | -monitor stdio \ 65 | -enable-kvm -smp 2 -cpu host -m 1024 \ 66 | -vga qxl -spice port=5931,disable-ticketing \ 67 | -drive file="/home/lahwaacz/virtual_machines/archlinux.raw",if=virtio,cache=none -boot once=c \ 68 | -net nic,model=virtio,macaddr=$(qemu-mac-hasher.py "$vm_name") -net tap,ifname="$tap_nic",script=no,downscript=no,vhost=on \ 69 | -usbdevice tablet 70 | 71 | sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" down 72 | ;; 73 | winxp) 74 | sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" up 75 | 76 | qemu-system-i386 \ 77 | -name "$vm_name" \ 78 | -monitor stdio \ 79 | -enable-kvm -smp 2 -cpu host -m 1024 \ 80 | -vga qxl -spice port=5930,disable-ticketing \ 81 | -drive file="/home/lahwaacz/virtual_machines/winxp.raw",if=virtio,cache=none -boot order=c \ 82 | -net nic,model=virtio,macaddr=$(qemu-mac-hasher.py "$vm_name") -net tap,ifname="$tap_nic",script=no,downscript=no,vhost=on \ 83 | -usbdevice tablet \ 84 | -soundhw ac97 \ 85 | -localtime 86 | 87 | sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" down 88 | ;; 89 | liveiso) 90 | if [[ -z "$2" ]]; then 91 | echo "You must specify the ISO file as a second argument." >&2 92 | exit 1 93 | fi 94 | 95 | qemu-system-x86_64 \ 96 | -name "$vm_name" \ 97 | -monitor stdio \ 98 | -enable-kvm -smp 2 -cpu host -m 1024 \ 99 | -vga virtio \ 100 | -display gtk,gl=on \ 101 | -drive file="$2",if=virtio,media=cdrom -boot once=d \ 102 | -net nic -net user \ 103 | -usbdevice tablet 104 | ;; 105 | liveiso-efi) 106 | if [[ -z "$2" ]]; then 107 | echo "You must specify the ISO file as a second argument." >&2 108 | exit 1 109 | fi 110 | if [[ ! -e "/usr/share/ovmf/x64/OVMF_CODE.fd" ]]; then 111 | echo "File /usr/share/ovmf/x64/OVMF_CODE.fd does not exist. Is the package ovmf installed?" >&2 112 | exit 1 113 | fi 114 | 115 | qemu-system-x86_64 \ 116 | -bios /usr/share/ovmf/x64/OVMF_CODE.fd \ 117 | -name "$vm_name" \ 118 | -monitor stdio \ 119 | -enable-kvm -smp 2 -cpu host -m 1024 \ 120 | -vga virtio \ 121 | -display gtk,gl=on \ 122 | -drive file="$2",if=virtio,media=cdrom -boot once=d \ 123 | -net nic -net user \ 124 | -usbdevice tablet 125 | ;; 126 | *) 127 | echo "Unknown VM name specified: $vm_name" >&2 128 | exit 1 129 | ;; 130 | esac 131 | 132 | 133 | ### frequently/previously used options: 134 | 135 | ## user-mode networking 136 | # -net nic,model=virtio -net user 137 | 138 | ## user-mode networking with redirect (localhost:2222 -> 10.0.2.15:22) 139 | # -net nic,model=virtio -net user -redir tcp:2222:10.0.2.15:22 140 | -------------------------------------------------------------------------------- /qemu-mac-hasher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Jakub Klinkovský (Lahwaacz) 4 | # https://github.com/lahwaacz 5 | 6 | import sys 7 | import zlib 8 | 9 | if len(sys.argv) != 2: 10 | print("usage: %s " % sys.argv[0]) 11 | sys.exit(1) 12 | 13 | crc = zlib.crc32(sys.argv[1].encode("utf-8")) & 0xffffffff 14 | crc = str(hex(crc))[2:] 15 | print("52:54:%s%s:%s%s:%s%s:%s%s" % tuple(crc)) 16 | -------------------------------------------------------------------------------- /qemu-tap-helper.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/bash 2 | 3 | # Author: Jakub Klinkovský (Lahwaacz) 4 | # https://github.com/lahwaacz 5 | 6 | ########## Functions ########## 7 | 8 | ## Check if a string represents a network interface 9 | # $1: potential interface name 10 | function is_interface() { 11 | [[ -d "/sys/class/net/$1" ]] 12 | } 13 | 14 | ## Create new TAP interface 15 | # $1: name of the interface to create 16 | function create_tap() { 17 | if ! is_interface "$1"; then 18 | echo "Creating TAP interface '$1'" 19 | ip tuntap add "$1" mode tap user "$username" 20 | ip link set dev "$1" up 21 | fi 22 | } 23 | 24 | ## Delete TAP interface 25 | # $1: name of the interface to delete 26 | function del_tap() { 27 | echo "Deleting TAP interface '$1'" 28 | ip link set dev "$1" down 29 | ip tuntap del "$1" mode tap 30 | } 31 | 32 | ## Check if the bridge has any interface 33 | # $1: bridge interface name 34 | function bridge_is_empty() { 35 | [[ $(ls "/sys/class/net/$1/brif" | wc -w) == "0" ]] 36 | } 37 | 38 | ## Create bridge interface if it does not exist 39 | # $1: bridge interface name 40 | function create_br() { 41 | if is_interface "$1"; then 42 | if [[ ! -d "/sys/class/net/$1/brif" ]]; then 43 | echo "Interface '$1' already exists and is not a bridge" 44 | exit 1 45 | fi 46 | else 47 | echo "Creating bridge interface '$1'" 48 | ip link add name "$1" type bridge 49 | ip link set dev "$1" up 50 | 51 | # Xyne's excellent script to launch NAT 52 | echo "Starting NAT" 53 | nat-launch.sh "$wan_nic" "$1" up 54 | fi 55 | } 56 | 57 | ## Delete bridge interface if it exists and has no interface 58 | # $1: bridge interface name 59 | function del_br() { 60 | if bridge_is_empty "$1"; then 61 | # Xyne's excellent script to launch NAT 62 | echo "Stopping NAT" 63 | nat-launch.sh "$wan_nic" "$1" down 64 | 65 | echo "Deleting bridge interface '$1'" 66 | ip link set dev "$1" down 67 | ip link delete "$1" type bridge 68 | fi 69 | } 70 | 71 | ## Add interface to the bridge 72 | # $1: bridge interface name 73 | # $2: name of the interface to add 74 | function br_add_iface() { 75 | echo "Adding interface '$2' to bridge '$1'" 76 | ip link set dev "$2" promisc on up 77 | ip addr flush dev "$2" scope host &>/dev/null 78 | ip addr flush dev "$2" scope site &>/dev/null 79 | ip addr flush dev "$2" scope global &>/dev/null 80 | ip link set dev "$2" master "$1" 81 | # skip forwarding delay 82 | bridge link set dev "$2" state 3 83 | } 84 | 85 | ## Remove interface from the bridge 86 | # $1: bridge interface name 87 | # $2: name of the interface to remove 88 | function br_rm_iface() { 89 | echo "Removing interface '$2' from bridge '$1'" 90 | ip link set "$2" promisc off down 91 | ip link set dev "$2" nomaster 92 | } 93 | 94 | ########## Main ############### 95 | 96 | function print_qemu_tap_helper_usage() { 97 | echo "usage: $0 " 98 | echo " and will be created," 99 | echo " NAT from to will be set up" 100 | } 101 | 102 | if [[ $EUID -ne 0 ]]; then 103 | echo "This script must be run as root." >&2 104 | exit 1 105 | fi 106 | 107 | if [[ -z $4 ]]; then 108 | print_qemu_tap_helper_usage 109 | exit 1 110 | else 111 | username="$1" 112 | tap_nic="$2" 113 | br_nic="$3" 114 | wan_nic="$4" 115 | action="$5" 116 | fi 117 | 118 | # exit on errors 119 | set -e 120 | 121 | case "$action" in 122 | up) 123 | create_br "$br_nic" 124 | create_tap "$tap_nic" 125 | br_add_iface "$br_nic" "$tap_nic" 126 | ;; 127 | down) 128 | br_rm_iface "$br_nic" "$tap_nic" 129 | del_tap "$tap_nic" 130 | del_br "$br_nic" 131 | ;; 132 | *) 133 | print_qemu_tap_helper_usage 134 | exit 1 135 | ;; 136 | esac 137 | -------------------------------------------------------------------------------- /remove-dead-symlinks.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # recursively remove dead symlinks 4 | 5 | shopt -s globstar 6 | 7 | # non-recursive version: 'for itm in *' 8 | for itm in **/* 9 | do 10 | if [ -h "$itm" ] 11 | then 12 | target=$(readlink -fn "$itm") 13 | if [ ! -e "$target" ] 14 | then 15 | echo "$itm" 16 | rm "$itm" 17 | fi 18 | fi 19 | done 20 | -------------------------------------------------------------------------------- /replaygain.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import subprocess 7 | import asyncio 8 | from concurrent.futures import ThreadPoolExecutor 9 | 10 | import taglib 11 | 12 | from pythonscripts.cpu import cores_count 13 | from pythonscripts.logger import Logger 14 | 15 | class ReplayGain: 16 | """ Will consider all files to belong to one album. 17 | """ 18 | 19 | def __init__(self, logger, options, files): 20 | # logger 21 | self.log = logger 22 | self.log.filename = None 23 | 24 | # internals 25 | self.raw_lines = [] 26 | self.data_files = [] 27 | self.data_album = {} 28 | 29 | # options 30 | self.force = options.force 31 | self.force_album = options.force_album 32 | self.force_track = options.force_track 33 | self.files = files 34 | 35 | def run(self): 36 | # check if all files have ReplayGain tags; mp3gain runs very long 37 | if not (self.force or self.force_album or self.force_track) and self.all_files_have_replaygain_tags(): 38 | self.log.error("All files already have ReplayGain tags, no action taken.") 39 | return 40 | if self.run_mp3gain(): 41 | self.update_tags() 42 | 43 | def all_files_have_replaygain_tags(self): 44 | """ Quick analysis to determine if input files contain replaygain_* tags. 45 | """ 46 | for fname in self.files: 47 | # open id3 tag 48 | f = taglib.File(fname) 49 | 50 | tags = set([tag.lower() for tag in f.tags.keys() if tag.lower().startswith("replaygain_")]) 51 | return tags == set(["replaygain_track_gain", "replaygain_album_gain", "replaygain_track_peak", "replaygain_album_peak"]) 52 | 53 | def run_mp3gain(self): 54 | """ Compute values for replaygain_* tags. 55 | """ 56 | self.log.debug("running mp3gain on specified files") 57 | cmd = ["mp3gain", "-q", "-o", "-s", "s"] + self.files 58 | ret = True 59 | try: 60 | raw_data = subprocess.check_output(cmd, universal_newlines=True) 61 | self.raw_lines = raw_data.splitlines() 62 | except subprocess.CalledProcessError as exc: 63 | code = exc.returncode 64 | msg = "mp3gain returned error status: " + str(code) + "\n" 65 | msg += "-----------mp3gain output dump-----------\n" 66 | msg += exc.output 67 | msg += "\n-----------------------------------------\n" 68 | self.log.error(msg) 69 | ret = False 70 | except Exception as e: 71 | print(e) 72 | ret = False 73 | raise 74 | finally: 75 | return ret 76 | 77 | def update_tags(self): 78 | """ Add computed replaygain_* tags into all files. 79 | """ 80 | self.log.debug("parsing mp3gain output") 81 | album_parts = self.raw_lines[-1].strip().split("\t") 82 | 83 | # just in case 84 | if album_parts[0] != '"Album"': 85 | self.log.error("unable to parse mp3gain output") 86 | return 87 | 88 | a_gain = float(album_parts[2]) # album gain 89 | a_peak = float(album_parts[3]) / 32768.0 # album peak 90 | 91 | del self.raw_lines[0] # header 92 | del self.raw_lines[-1] # album summary 93 | for line in self.raw_lines: 94 | parts = line.strip().split("\t") 95 | fname = parts[0] # filename 96 | 97 | self.log.filename = fname 98 | self.log.debug("begin processing file") 99 | 100 | t_gain = float(parts[2]) # track gain 101 | t_peak = float(parts[3]) / 32768.0 # track peak 102 | 103 | # set t_gain, t_peak, a_gain, a_peak depending on options 104 | if self.force_album: 105 | t_gain = a_gain 106 | t_peak = a_peak 107 | elif self.force_track: 108 | a_gain = t_gain 109 | a_peak = t_peak 110 | 111 | # open id3 tag 112 | f = taglib.File(fname) 113 | 114 | # update tag 115 | f.tags["REPLAYGAIN_TRACK_GAIN"] = "%.2f dB" % t_gain 116 | f.tags["REPLAYGAIN_ALBUM_GAIN"] = "%.2f dB" % a_gain 117 | f.tags["REPLAYGAIN_TRACK_PEAK"] = "%.6f" % t_peak 118 | f.tags["REPLAYGAIN_ALBUM_PEAK"] = "%.6f" % a_peak 119 | 120 | # save tag 121 | self.log.debug("saving modified ID3 tag") 122 | f.save() 123 | 124 | self.log.debug("done processing file") 125 | self.log.filename = None 126 | 127 | 128 | class Main: 129 | """ Will parse input pattern and create ReplayGain object on every directory found. 130 | """ 131 | 132 | def __init__(self, logger, options): 133 | self.logger = logger 134 | self.options = options 135 | self.recursive = options.recursive 136 | self.paths = options.files 137 | del options.recursive # don't want to pass it to ReplayGain object 138 | del options.files # don't want to pass it to ReplayGain object 139 | 140 | async def run(self): 141 | # We could use the default single-threaded executor with basically the same performance 142 | # (because of Python's GIL), but the ThreadPoolExecutor allows to limit the maximum number 143 | # of workers and thus the maximum number of concurrent subprocesses. 144 | with ThreadPoolExecutor(max_workers=cores_count()) as executor: 145 | loop = asyncio.get_event_loop() 146 | tasks = [ 147 | loop.run_in_executor(executor, self.worker, path) 148 | for path in self.queue_generator() 149 | ] 150 | for result in await asyncio.gather(*tasks): 151 | pass 152 | 153 | def worker(self, paths): 154 | paths = sorted(list(paths)) 155 | 156 | # skip dirs not containing any mp3 file 157 | if len(paths) == 0: 158 | return 159 | 160 | # write info 161 | print("Procesing:") 162 | for path in paths: 163 | print(" " + path) 164 | 165 | try: 166 | # create ReplayGain object, pass files and run 167 | rg = ReplayGain(self.logger, self.options, paths) 168 | rg.run() 169 | except Exception as e: 170 | print(e, file=sys.stderr) 171 | raise 172 | 173 | def queue_generator(self): 174 | """ For each directory in self.files returns generator returning full paths to mp3 files in that folder. 175 | If self.files contains file paths instead of directory, it's returned as [file]. 176 | """ 177 | 178 | def walk(root): 179 | dirs = [] 180 | files = [] 181 | for entry in os.scandir(root): 182 | if entry.is_dir(): 183 | dirs.append(entry.name) 184 | elif entry.is_file() and entry.name.endswith(".mp3"): 185 | files.append(entry.name) 186 | 187 | # first yield found files, then recurse into subdirs 188 | if files: 189 | yield (os.path.join(root, x) for x in files) 190 | if self.recursive: 191 | for d in dirs: # recurse into subdir 192 | for x in walk(os.path.join(root, d)): 193 | yield x 194 | 195 | for path in self.paths: 196 | if os.path.isdir(path): 197 | for x in walk(path): 198 | yield x 199 | else: 200 | yield [path] 201 | 202 | def main(prog_name, options): 203 | logger = Logger(options.log_level, prog_name) 204 | logger.debug("Selected mp3 files:") 205 | logger.debug("\n".join(sorted(options.files))) 206 | main = Main(logger, options) 207 | asyncio.run(main.run()) 208 | 209 | def argparse_path_handler(path): 210 | if not os.path.exists(path): 211 | raise argparse.ArgumentTypeError("invalid path: '%s'" % path) 212 | if os.path.isfile(path) and not path.endswith(".mp3"): 213 | raise argparse.ArgumentTypeError("not a mp3 file: '%s'" % path) 214 | return os.path.abspath(path) 215 | 216 | 217 | if __name__ == "__main__": 218 | parser = argparse.ArgumentParser(description="Write correct ReplayGain tags into mp3 files; uses mp3gain internally") 219 | 220 | # log level options 221 | log = parser.add_mutually_exclusive_group() 222 | log.add_argument("-q", "--quiet", dest="log_level", action="store_const", const=0, default=1, help="do not output error messages") 223 | log.add_argument("-v", "--verbose", dest="log_level", action="store_const", const=3, help="output warnings and informational messages") 224 | log.add_argument("-d", "--debug", dest="log_level", action="store_const", const=4, help="output debug messages") 225 | 226 | parser.add_argument("-r", "--recursive", action="store_true", help="when path to directory is specified, browse it recursively (albums still respected)") 227 | parser.add_argument("--force", action="store_true", help="force overwriting of existing ID3v2 ReplayGain tags") 228 | group = parser.add_mutually_exclusive_group() 229 | group.add_argument("--force-album", action="store_true", help="write replaygain_album_{gain,peak} values into replaygain_track_{gain,peak} tags") 230 | group.add_argument("--force-track", action="store_true", help="write replaygain_track_{gain,peak} values into replaygain_album_{gain,peak} tags") 231 | 232 | parser.add_argument("files", nargs="+", metavar="FILE | FOLDER", type=argparse_path_handler, help="path to mp3 file(s) or directory(ies)") 233 | 234 | args = parser.parse_args() 235 | main(sys.argv[0], args) 236 | -------------------------------------------------------------------------------- /rexe: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | HOST="" 6 | LOCAL_PATH="" 7 | REMOTE_PATH="" 8 | REXE_DIR="rexe" 9 | CMD="" 10 | DOWNLOAD="true" 11 | EXCLUDE=() 12 | EXCLUDE_DOWNLOAD=() 13 | 14 | function handle_argument() 15 | { 16 | if [[ "$HOST" == "" ]]; then 17 | case "$1" in 18 | *:*) 19 | HOST="${1%:*}" 20 | REMOTE_PATH="${1#*:}" 21 | ;; 22 | *) 23 | HOST="$1" 24 | ;; 25 | esac 26 | return 27 | fi 28 | if [[ "$CMD" == "" ]]; then 29 | CMD="$1" 30 | else 31 | CMD="$CMD $1" 32 | fi 33 | } 34 | 35 | while [ "$#" -gt 0 ]; do 36 | if [[ "$CMD" == "" ]]; then 37 | case "$1" in 38 | --path=*) LOCAL_PATH="${1#*=}"; shift 1;; 39 | -p|--path) LOCAL_PATH="$2"; shift 2;; 40 | --no-download) DOWNLOAD="false"; shift 1;; 41 | --exclude=*) EXCLUDE+=("--exclude" "${1#*=}"); shift 1;; 42 | --exclude) EXCLUDE+=("--exclude" "$2"); shift 2;; 43 | --exclude-download=*) EXCLUDE_DOWNLOAD+=("--exclude" "${1#*=}"); shift 1;; 44 | --exclude-download) EXCLUDE_DOWNLOAD+=("--exclude" "$2"); shift 2;; 45 | 46 | -*) echo "unknown option: $1" >&2; exit 1;; 47 | *) handle_argument "$1"; shift 1;; 48 | esac 49 | else 50 | handle_argument "$1" 51 | shift 1 52 | fi 53 | done 54 | 55 | if [[ "$HOST" == "" ]]; then 56 | echo "error: remote host was not specified." >&2 57 | exit 1 58 | fi 59 | if [[ "$CMD" == "" ]]; then 60 | echo "error: remote command was not sepcified." >&2 61 | exit 1 62 | fi 63 | 64 | # fill in defaults 65 | if [[ "$LOCAL_PATH" == "" ]]; then 66 | LOCAL_PATH=$(pwd) 67 | fi 68 | # NOTE: the tmpfs for $XDG_RUNTIME_DIR may be too small (e.g. only 10% of the available RAM) 69 | #if [[ "$REMOTE_PATH" == "" ]]; then 70 | # REMOTE_PATH=$(ssh "$HOST" echo '$XDG_RUNTIME_DIR') 71 | #fi 72 | if [[ "$REMOTE_PATH" == "" ]]; then 73 | REMOTE_PATH="/tmp" 74 | REXE_DIR="rexe_$(ssh "$HOST" whoami)" 75 | fi 76 | 77 | if [[ ! -d "$LOCAL_PATH" ]]; then 78 | echo "error: local path '$LOCAL_PATH' is does not exist or is not a directory." >&2 79 | exit 1 80 | fi 81 | 82 | # create remote main directory for rexe with restricted permissions 83 | echo "Creating remote directory '$REMOTE_PATH/$REXE_DIR'..." 84 | ssh "$HOST" mkdir -m 0700 -p "$REMOTE_PATH/$REXE_DIR" 85 | 86 | # change remote path into full path 87 | _basename=$(basename "$LOCAL_PATH") 88 | REMOTE_PATH="$REMOTE_PATH/$REXE_DIR/$_basename" 89 | 90 | echo "Uploading local directory '$LOCAL_PATH' to remote directory '$REMOTE_PATH'..." 91 | rsync -rlptD "$LOCAL_PATH/" "$HOST:$REMOTE_PATH/" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} 92 | 93 | echo "Executing remote command '$CMD'..." 94 | # ignore errors of the ssh command to always run rsync afterwards (even on keyboard interrupt) 95 | set +e 96 | ssh -t "$HOST" "cd ${REMOTE_PATH@Q}; bash --login -c -- ${CMD@Q}" 97 | set -e 98 | 99 | if [[ "$DOWNLOAD" != "false" ]]; then 100 | echo "Synchronizing remote directory '$REMOTE_PATH' into the local directory..." 101 | # FIXME: EXCLUDE_DOWNLOAD does not work correctly for wildcards 102 | echo rsync -rlptD "$HOST:$REMOTE_PATH/" "$LOCAL_PATH/" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} ${EXCLUDE_DOWNLOAD[@]} -v 103 | rsync -rlptD "$HOST:$REMOTE_PATH/" "$LOCAL_PATH/" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} ${EXCLUDE_DOWNLOAD[@]} -v 104 | fi 105 | -------------------------------------------------------------------------------- /rmshit.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import os 4 | import shutil 5 | from pathlib import Path 6 | 7 | import yaml 8 | 9 | DEFAULT_CONFIG = """ 10 | - ~/.adobe # Flash crap 11 | - ~/.macromedia # Flash crap 12 | - ~/.recently-used 13 | - ~/.local/share/recently-used.xbel 14 | - ~/.thumbnails 15 | - ~/.gconfd 16 | - ~/.gconf 17 | - ~/.local/share/gegl-0.2 18 | - ~/.FRD/log/app.log # FRD 19 | - ~/.FRD/links.txt # FRD 20 | - ~/.objectdb # FRD 21 | - ~/.gstreamer-0.10 22 | - ~/.pulse 23 | - ~/.esd_auth 24 | - ~/.config/enchant 25 | - ~/.spicec # contains only log file; unconfigurable 26 | - ~/.dropbox-dist 27 | - ~/.parallel 28 | - ~/.dbus 29 | - ~/ca2 # WTF? 30 | - ~/ca2~ # WTF? 31 | - ~/.distlib/ # contains another empty dir, don't know which software creates it 32 | - ~/.bazaar/ # bzr insists on creating files holding default values 33 | - ~/.bzr.log 34 | - ~/.nv/ 35 | - ~/.viminfo # configured to be moved to ~/.cache/vim/viminfo, but it is still sometimes created... 36 | - ~/.npm/ # npm cache 37 | - ~/.java/ 38 | - ~/.swt/ 39 | - ~/.oracle_jre_usage/ 40 | - ~/.openjfx/ 41 | - ~/.org.jabref.gui.JabRefMain/ 42 | - ~/.org.jabref.gui.MainApplication/ 43 | - ~/.jssc/ 44 | - ~/.tox/ # cache directory for tox 45 | - ~/.pylint.d/ 46 | - ~/.qute_test/ 47 | - ~/.QtWebEngineProcess/ 48 | - ~/.qutebrowser/ # created empty, only with webengine backend 49 | - ~/.asy/ 50 | - ~/.cmake/ 51 | - ~/.gnome/ 52 | - ~/unison.log 53 | - ~/.texlive/ 54 | - ~/.w3m/ 55 | - ~/.subversion/ 56 | - ~/nvvp_workspace/ # created empty even when the path is set differently in nvvp 57 | - ~/.ansible/ 58 | - ~/.fltk/ 59 | - ~/.vnc/ 60 | - ~/.local/share/Trash/ # VSCode puts deleted files here 61 | """ 62 | 63 | 64 | def get_size(path): 65 | if Path(path).is_dir(): 66 | return sum(p.stat().st_size for p in Path(path).rglob("*")) 67 | return Path(path).stat().st_size 68 | 69 | 70 | def read_config(): 71 | """ 72 | Reads the list of shitty files from a YAML config. 73 | """ 74 | config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config/")) 75 | config_path = Path(config_dir) / "rmshit.yaml" 76 | 77 | # write default config if it does not exist 78 | if not config_path.exists(): 79 | with open(config_path, "w") as f: 80 | print(DEFAULT_CONFIG.strip(), file=f) 81 | 82 | with open(config_path, "r") as f: 83 | return yaml.safe_load(f) 84 | 85 | 86 | def yesno(question, default="n"): 87 | """ 88 | Asks the user for YES or NO, always case insensitive. 89 | Returns True for YES and False for NO. 90 | """ 91 | prompt = f"{question} (y/[n]) " 92 | 93 | ans = input(prompt).strip().lower() 94 | 95 | if not ans: 96 | ans = default 97 | 98 | if ans == "y": 99 | return True 100 | return False 101 | 102 | 103 | def format_size(size_in_bytes): 104 | """Format file size in bytes to a human-readable string.""" 105 | if size_in_bytes <= 0: 106 | return "0 bytes" 107 | 108 | units = ["bytes", "KiB", "MiB", "GiB"] 109 | size = float(size_in_bytes) 110 | unit_index = min(int((size_in_bytes.bit_length() - 1) // 10), len(units) - 1) 111 | size /= 1024**unit_index 112 | 113 | return f"{size:.4g} {units[unit_index]}" 114 | 115 | 116 | def rmshit(): 117 | shittyfiles = read_config() 118 | 119 | print("Found shittyfiles:") 120 | found = [] 121 | total_size = 0 122 | for f in shittyfiles: 123 | absf = os.path.expanduser(f) 124 | if os.path.exists(absf): 125 | found.append(absf) 126 | size = get_size(absf) 127 | total_size += size 128 | print(f" {f} ({format_size(size)})") 129 | 130 | if len(found) == 0: 131 | print("No shitty files found :)") 132 | return 133 | 134 | if yesno("Remove all?", default="n"): 135 | for f in found: 136 | if os.path.isfile(f): 137 | os.remove(f) 138 | else: 139 | shutil.rmtree(f) 140 | print(f"All cleaned, {format_size(total_size)} freed.") 141 | else: 142 | print("No file removed") 143 | 144 | 145 | if __name__ == "__main__": 146 | rmshit() 147 | -------------------------------------------------------------------------------- /run-pvserver: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="$1" 4 | 5 | if [[ "$host" == "" ]]; then 6 | echo "usage: $0 [user@]hostname" 7 | exit 1 8 | fi 9 | 10 | hostname=$(ssh "$host" "uname -n") 11 | port=11111 12 | display=42 13 | 14 | # NOTE: bash -lc is needed to get full $PATH by sourcing /etc/profile.d/*.sh 15 | ssh -C -t -L "localhost:$port:$hostname:$port" "$host" "bash -lc 'xvfb-run --server-num=$display mpirun -np 2 pvserver --displays=:$display --server-port=$port'" 16 | -------------------------------------------------------------------------------- /sway-sensible-terminal: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import json 6 | import subprocess 7 | 8 | PATH = os.environ.get("PATH", "/usr/bin") 9 | TERMINAL = os.environ.get("TERMINAL", "alacritty") 10 | ARGS = sys.argv[1:] 11 | 12 | def get_cwd(tree): 13 | for node in tree.get("nodes", []): 14 | if node["focused"]: 15 | name = node["name"] 16 | for part in name.split(":"): 17 | if part.startswith("/") and os.path.exists(part): 18 | return part 19 | cwd = get_cwd(node) 20 | if cwd is not None: 21 | return cwd 22 | 23 | if os.path.isfile("/usr/bin/swaymsg"): 24 | prog = "swaymsg" 25 | elif os.path.isfile("/usr/bin/i3-msg"): 26 | prog = "i3-msg" 27 | else: 28 | raise Exception("Neither swaymsg or i3-msg was found in /usr/bin/") 29 | 30 | cmd = subprocess.run(f"{prog} -t get_tree", shell=True, check=True, capture_output=True) 31 | tree = json.loads(cmd.stdout) 32 | cwd = get_cwd(tree) 33 | if cwd is not None: 34 | if "alacritty" in TERMINAL: 35 | ARGS = ["--working-directory", cwd, *ARGS] 36 | else: 37 | ARGS = ["-d", cwd, *ARGS] 38 | 39 | for d in PATH.split(":"): 40 | path = os.path.join(d, TERMINAL) 41 | if not os.path.isfile(path): 42 | continue 43 | 44 | os.execl(path, path, *ARGS) 45 | 46 | raise Exception(f"Error: terminal '{TERMINAL}' was not found in $PATH ({PATH})") 47 | -------------------------------------------------------------------------------- /teams-attendance-parser.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | """ 4 | THE BEER-WARE LICENSE (Revision 42): 5 | Jakub Klinkovský wrote this file. As long as you retain this notice you 6 | can do whatever you want with this stuff. If we meet some day, and you think 7 | this stuff is worth it, you can buy me a beer in return. 8 | """ 9 | 10 | import argparse 11 | import os.path 12 | import datetime 13 | import sys 14 | 15 | # maybe depends on the locale in which MS Teams runs... 16 | TIMESTAMP_FORMATS = [ 17 | "%m/%d/%Y, %I:%M:%S %p", 18 | "%d. %m. %Y %H:%M:%S", 19 | ] 20 | 21 | CLASS_LENGTH = datetime.timedelta(minutes=100) 22 | 23 | def parse_timestamp(timestamp): 24 | last_error = None 25 | for format in TIMESTAMP_FORMATS: 26 | try: 27 | return datetime.datetime.strptime(timestamp, format) 28 | except ValueError as e: 29 | last_error = e 30 | continue 31 | raise last_error 32 | 33 | def parse_attendance_list(path): 34 | print(f"Parsing file {path}...") 35 | data = {} 36 | text = open(path, "r", encoding="utf-16").read() 37 | 38 | for line in text.splitlines(): 39 | # parse items on the line 40 | name, action, timestamp = line.split("\t") 41 | # skip header line 42 | if name == "Full Name" or name == "Celé jméno": 43 | continue 44 | 45 | # validate items 46 | assert "," in name, name 47 | assert action in {"Joined", "Left", "Připojeno", "Odpojil(a) se"}, f"unknown action: {action}" 48 | timestamp = parse_timestamp(timestamp) 49 | 50 | # initialize data 51 | user_actions = data.setdefault(name, []) 52 | 53 | # append action 54 | user_actions.append((action, timestamp)) 55 | 56 | return data 57 | 58 | def get_attendance(class_start, actions): 59 | class_end = class_start + CLASS_LENGTH 60 | 61 | # make sure actions are sorted by timestamp 62 | actions.sort(key=lambda a: a[1]) 63 | 64 | # calculate 65 | attendance = datetime.timedelta() 66 | joined = None 67 | for i, item in enumerate(actions): 68 | action, timestamp = item 69 | if action in {"Joined", "Připojeno"}: 70 | assert joined is None 71 | joined = timestamp 72 | elif action in {"Left", "Odpojil(a) se"}: 73 | assert joined is not None 74 | attendance += timestamp - joined 75 | joined = None 76 | else: 77 | assert False 78 | # handle the missing "Left" action 79 | if joined is not None: 80 | attendance += class_end - joined 81 | 82 | return attendance 83 | 84 | def print_attendance(teacher, class_start, data): 85 | print(f"Class teacher:\t{teacher}") 86 | print(f"Class start:\t{class_start}") 87 | print("Attendance:") 88 | 89 | for name in sorted(data.keys()): 90 | attendance = get_attendance(class_start, data[name]) 91 | perc = attendance.seconds / CLASS_LENGTH.seconds * 100 92 | print(f" {name:<30}\t{attendance} ({perc:.0f}%)") 93 | 94 | print() 95 | 96 | def main(path): 97 | data = parse_attendance_list(path) 98 | teacher = list(data.keys())[0] 99 | class_start = data[teacher][0][1] 100 | del data[teacher] 101 | print_attendance(teacher, class_start, data) 102 | 103 | parser = argparse.ArgumentParser(description="parser for MS Teams attendance list files") 104 | parser.add_argument("path", nargs="+", help="path to the attendance list file") 105 | 106 | args = parser.parse_args() 107 | for p in args.path: 108 | if os.path.isfile(p): 109 | main(p) 110 | else: 111 | print(f"ERROR: {p} is not a file", file=sys.stderr) 112 | -------------------------------------------------------------------------------- /toggle-touchpad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Toggle touchpad status 3 | # Using libinput and xinput 4 | 5 | # Use xinput list and do a search for touchpads. Then get the first one and get its name. 6 | device="$(xinput list | grep -P '(?<= )[\w\s:]*(?i)(touchpad|synaptics)(?-i).*?(?=\s*id)' -o | head -n1)" 7 | 8 | # If it was activated disable it and if it wasn't disable it 9 | [[ "$(xinput list-props "$device" | grep -P ".*Device Enabled.*\K.(?=$)" -o)" == "1" ]] && 10 | xinput disable "$device" || 11 | xinput enable "$device" 12 | -------------------------------------------------------------------------------- /touch-tree.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | # Little script to "touch" directory structure. 4 | # Works like 'cp -r', but instead of copying full file, the new file is "touched", 5 | # so the tree structure is preserved and only empty files created. 6 | 7 | import sys 8 | import os 9 | 10 | 11 | class Main: 12 | def __init__(self, oldRoot, newRoot): 13 | self.oldRoot = oldRoot 14 | self.newRoot = newRoot 15 | 16 | def browse(self, path): 17 | for file in os.listdir(path): 18 | absPath = os.path.join(path, file) 19 | relPath = os.path.relpath(absPath, self.oldRoot) 20 | if os.path.isdir(absPath): 21 | os.mkdir(os.path.join(self.newRoot, relPath)) 22 | self.browse(absPath) 23 | elif os.path.isfile(absPath): 24 | open(os.path.join(self.newRoot, relPath), "w").close() 25 | 26 | def touchTree(self): 27 | os.mkdir(newRoot) 28 | self.browse(self.oldRoot) 29 | 30 | if len(sys.argv) != 3 or not os.path.isdir(sys.argv[1]) or os.path.exists(sys.argv[2]): 31 | sys.exit(1) 32 | 33 | oldRoot = os.path.abspath(sys.argv[1]) 34 | newRoot = os.path.abspath(sys.argv[2]) 35 | 36 | print(oldRoot + " => " + newRoot) 37 | main = Main(oldRoot, newRoot) 38 | main.touchTree() 39 | -------------------------------------------------------------------------------- /waybar-khal.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import subprocess 4 | import json 5 | 6 | data = {} 7 | 8 | cmd = [ 9 | "khal", 10 | "list", 11 | "now", 12 | "23:59", 13 | "--once", 14 | "--format", 15 | "{start-time} ({location}) {title}{repeat-symbol}{alarm-symbol}", 16 | ] 17 | output = subprocess.run(cmd, check=True, text=True, capture_output=True).stdout 18 | 19 | lines = [line.strip() for line in output.split("\n")] 20 | 21 | # filter out lines that do not start with a number 22 | # (khal list includes headings like "Monday, 2025-03-31" for each day) 23 | lines = [line for line in lines if line and line[0].isdigit()] 24 | 25 | if lines: 26 | data["text"] = " " + lines[0] 27 | data["tooltip"] = "\n".join(lines) 28 | else: 29 | data["text"] = "" 30 | 31 | print(json.dumps(data)) 32 | -------------------------------------------------------------------------------- /x: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # Some references: 4 | # https://wiki.archlinux.org/index.php/Bash#Functions 5 | # https://github.com/robbyrussell/oh-my-zsh/blob/master/plugins/extract/extract.plugin.zsh 6 | 7 | function extract() { 8 | local remove_archive 9 | local success 10 | local fname 11 | local basename 12 | local extension 13 | 14 | success=0 15 | fname=$(realpath "$1") 16 | extension=${fname##*.} 17 | 18 | # remove extension from basename 19 | basename=$(basename "${fname%.*}") 20 | 21 | # hack to recognize .tar.gz etc as extension 22 | if [[ "${basename##*.}" == "tar" ]]; then 23 | extension="${basename##*.}.$extension" 24 | basename=$(basename "${basename%.*}") 25 | fi 26 | 27 | # split \.part[0-9]* from $basename 28 | basename="${basename%\.part[0-9]*}" 29 | 30 | case "$extension" in 31 | tar.gz|tgz|tar.bz2|tbz|tbz2|tar.xz|txz|tar.lzma|tlz|tar|tar.zst) 32 | mkdir "$basename" 33 | tar xvf "$fname" -C "$basename" 34 | ;; 35 | gz|Z) 36 | gzip -dkv "$fname" 37 | ;; 38 | bz2) 39 | bzip2 -dkv "$fname" 40 | ;; 41 | xz|lzma) 42 | xz -dkv "$fname" 43 | ;; 44 | zst) 45 | zstd -dkv "$fname" 46 | ;; 47 | zip) 48 | unzip "$fname" -d "$basename" 49 | ;; 50 | rar) 51 | mkdir "$basename" 52 | pushd "$basename" 53 | unrar x "$fname" 54 | popd 55 | ;; 56 | 7z) 57 | 7za x "$fname" -o"$basename" 58 | ;; 59 | *) 60 | echo "extract: '$fname' cannot be extracted" 1>&2 61 | success=1 62 | ;; 63 | esac 64 | 65 | [[ $success == 0 ]] && success=$? 66 | 67 | # if destination directory contains only one file/dir, move it to cwd 68 | if [[ $success == 0 ]]; then 69 | count=$(find "$basename" -maxdepth 1 -mindepth 1 | wc -l) 70 | 71 | if [[ $count == 1 ]]; then 72 | name=$(basename "$(find "$basename" -maxdepth 1 -mindepth 1)") 73 | 74 | # can't move ./foo/foo into ./foo 75 | if [[ "$basename" == "$name" ]]; then 76 | tmp="$name.tmp" 77 | else 78 | tmp="$name" 79 | fi 80 | 81 | mv "$basename/$name" "$tmp" 82 | rmdir "$basename" 83 | mv "$tmp" "$name" 84 | fi 85 | fi 86 | } 87 | 88 | if [[ $# == 0 ]]; then 89 | echo "Usage: $0 file [file ...]" 90 | exit 1 91 | fi 92 | 93 | while [[ $# > 0 ]]; do 94 | if [[ -f "$1" ]]; then 95 | extract "$1" 96 | else 97 | echo "extract: '$1' is not a valid file" 98 | fi 99 | shift 100 | done 101 | --------------------------------------------------------------------------------