├── .gitignore ├── CopyGeoTags.py ├── GetGeoTags.py ├── NEOMagnitude.py ├── README.md ├── SetGeoTags.py ├── audio ├── extract_cover_arts.py ├── shannons.py ├── slim.py └── sort_cover_arts.py ├── backup-lv-snapshot.sh ├── backup-svn.sh ├── battery_life.py ├── cluster └── sugvc.py ├── dicmp.py ├── external_storage_tools ├── pbind ├── psync ├── switchoff └── switchon ├── genpasswd.py ├── hdr_viewer.py ├── hipparcos_main_catalogue_reader.py ├── lvsnap.sh ├── macos ├── get_default_gateway.sh ├── get_default_interface.sh ├── get_default_network_service.sh └── touchbar_deflicker.applescript ├── pfetch.py ├── timecap.sh ├── verify-local-repo.py ├── vid2gif.sh └── whatsnew.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | *~ 4 | -------------------------------------------------------------------------------- /CopyGeoTags.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Copy GeoTags from reference photos. 3 | 4 | Syntax: 5 | CopyGeoTags.py reference target [method] 6 | 7 | reference - reference photo(s). Single file or directory. 8 | target - photo(s) that need GeoTags. Single file or directory. 9 | method - optional. Interpolation method, e.g., nearest, linear, cubic. 10 | 11 | """ 12 | SupportedFileExt = ['.jpg','.jpeg'] 13 | R = 6400.0e3 14 | import piexif 15 | import numpy as np 16 | import sys 17 | import os 18 | from os import path 19 | from fractions import Fraction 20 | from datetime import datetime 21 | from scipy.interpolate import interp1d 22 | 23 | def float2rational(f): 24 | F = Fraction(f).limit_denominator() 25 | return (F.numerator, F.denominator) 26 | 27 | def dd2dms(dd): 28 | mnt, sec = divmod(dd*3600.0, 60) 29 | deg, mnt = divmod(mnt, 60) 30 | return deg, mnt, sec 31 | 32 | def exiftag2timestamp(exiftag): 33 | try: 34 | return ( 35 | datetime.strptime(exiftag[0x9003].decode(), 36 | r'%Y:%m:%d %H:%M:%S')- 37 | datetime.strptime("1970-01-01T00:00:00", 38 | r'%Y-%m-%dT%H:%M:%S') 39 | ).total_seconds() 40 | except KeyError: 41 | return None 42 | 43 | def geotag2xyzt(geotag): 44 | try: 45 | latref = geotag[0x0001] 46 | lattuple = geotag[0x0002] 47 | lonref = geotag[0x0003] 48 | lontuple = geotag[0x0004] 49 | altref = geotag[0x0005] 50 | alttuple = geotag[0x0006] 51 | HH,MM,SS = geotag[0x0007] 52 | lat = np.deg2rad( 53 | lattuple[0][0]/ 1.0/lattuple[0][1]+ 54 | lattuple[1][0]/ 60.0/lattuple[1][1]+ 55 | lattuple[2][0]/3600.0/lattuple[2][1] 56 | ) 57 | if latref == 'S': 58 | lat = 0 - lat 59 | lon = np.deg2rad( 60 | lontuple[0][0]/ 1.0/lontuple[0][1]+ 61 | lontuple[1][0]/ 60.0/lontuple[1][1]+ 62 | lontuple[2][0]/3600.0/lontuple[2][1] 63 | ) 64 | if lonref == 'W': 65 | lon = 0 - lon 66 | alt = alttuple[0]*1.0/alttuple[1] 67 | if altref == 1: 68 | alt = 0 - alt 69 | x = np.cos(lon)*np.cos(lat)*(R+alt) 70 | y = np.sin(lon)*np.cos(lat)*(R+alt) 71 | z = np.sin(lat)*(R+alt) 72 | t = HH[0]*3600.0/HH[1] + MM[0]*60.0/MM[1] + SS[0]*1.0/SS[1] + ( 73 | datetime.strptime(geotag[0x001d],r'%Y:%m:%d')-datetime(1970,1,1,0,0,0) 74 | ).total_seconds() 75 | return x,y,z,t 76 | except KeyError: 77 | return None 78 | 79 | try: 80 | method = sys.argv[3] 81 | except IndexError: 82 | method = 'nearest' 83 | 84 | ts = [] 85 | rs = [] 86 | if path.isdir(sys.argv[1]): 87 | for f in os.listdir(sys.argv[1]): 88 | if path.splitext(f)[1].lower() in SupportedFileExt: 89 | f = path.join(sys.argv[1],f) 90 | d = piexif.load(f) 91 | t = exiftag2timestamp(d['Exif']) 92 | r = geotag2xyzt(d['GPS']) 93 | if r is not None: 94 | ts.append(t) 95 | rs.append(r) 96 | else: 97 | print('Missing datetime and/or GPS data in {}.'.format(f)) 98 | elif path.isfile(sys.argv[1]): 99 | f = sys.argv[1] 100 | d = piexif.load(f) 101 | t = exiftag2timestamp(d['Exif']) 102 | r = geotag2xyzt(d['GPS']) 103 | if r is not None: 104 | ts.append(t) 105 | rs.append(r) 106 | else: 107 | print('Missing datetime and/or GPS data in {}.'.format(f)) 108 | if len(ts) == 0: 109 | raise StandardError('No reference found.') 110 | elif len(ts) == 1: 111 | xfunc = lambda t:rs[0][0] 112 | yfunc = lambda t:rs[0][1] 113 | zfunc = lambda t:rs[0][2] 114 | tfunc = lambda t:t-(ts[0]-rs[0][3]) 115 | else: 116 | xfunc = interp1d(np.double(ts), np.double(rs)[:,0], kind=method, bounds_error=False, fill_value='extrapolate') 117 | yfunc = interp1d(np.double(ts), np.double(rs)[:,1], kind=method, bounds_error=False, fill_value='extrapolate') 118 | zfunc = interp1d(np.double(ts), np.double(rs)[:,2], kind=method, bounds_error=False, fill_value='extrapolate') 119 | ofunc = interp1d(np.double(ts), np.double(ts)-np.double(rs)[:,3], kind='nearest', bounds_error=False, fill_value='extrapolate') 120 | tfunc = lambda t:t-ofunc(t) 121 | targets = [] 122 | if path.isdir(sys.argv[2]): 123 | for f in os.listdir(sys.argv[2]): 124 | if path.splitext(f)[1].lower() in SupportedFileExt: 125 | targets.append(path.join(sys.argv[2],f)) 126 | elif path.isfile(sys.argv[2]): 127 | targets.append(sys.argv[2]) 128 | for f in targets: 129 | d = piexif.load(f) 130 | t = exiftag2timestamp(d['Exif']) 131 | x = xfunc(t) 132 | y = yfunc(t) 133 | z = zfunc(t) 134 | utc = tfunc(t) 135 | r = float(sum((x**2.0,y**2.0,z**2.0))**0.5) 136 | alt = r-R 137 | lat = np.rad2deg(np.arcsin(z/r)) 138 | lon = np.rad2deg(np.arctan2(y,x)) 139 | if lat<0: 140 | latref = 'S' 141 | lat = abs(lat) 142 | else: 143 | latref = 'N' 144 | if lon<0: 145 | lonref = 'W' 146 | lon = abs(lon) 147 | else: 148 | lonref = 'E' 149 | if alt<0: 150 | altref = 1 151 | alt = abs(alt) 152 | else: 153 | altref = 0 154 | dd,mm,ss = dd2dms(lon) 155 | lontuple = (float2rational(dd),float2rational(mm),float2rational(ss)) 156 | dd,mm,ss = dd2dms(lat) 157 | lattuple = (float2rational(dd),float2rational(mm),float2rational(ss)) 158 | alttuple = float2rational(alt) 159 | dt = datetime.fromtimestamp(utc) 160 | timetuple = ((dt.hour,1),(dt.minute,1),(dt.second*1000+dt.microsecond/1000,1000)) 161 | datestr = datetime(dt.year,dt.month,dt.day).strftime(r'%Y:%m:%d') 162 | gps_ifd = { 163 | piexif.GPSIFD.GPSVersionID: (2,0,0,0), 164 | piexif.GPSIFD.GPSLatitudeRef: latref, 165 | piexif.GPSIFD.GPSLatitude: lattuple, 166 | piexif.GPSIFD.GPSLongitudeRef: lonref, 167 | piexif.GPSIFD.GPSLongitude: lontuple, 168 | piexif.GPSIFD.GPSAltitudeRef: altref, 169 | piexif.GPSIFD.GPSAltitude: alttuple, 170 | piexif.GPSIFD.GPSTimeStamp: timetuple, 171 | piexif.GPSIFD.GPSDateStamp: datestr, 172 | } 173 | d['GPS'] = gps_ifd 174 | piexif.insert(piexif.dump(d), f) 175 | -------------------------------------------------------------------------------- /GetGeoTags.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import piexif 3 | import sys 4 | exif_dict = piexif.load(sys.argv[1]) 5 | for tag in exif_dict["GPS"]: 6 | print(tag) 7 | print(piexif.TAGS["GPS"][tag]["name"], exif_dict["GPS"][tag]) 8 | -------------------------------------------------------------------------------- /NEOMagnitude.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import numpy as np 3 | import sys 4 | Vsun = -26.74 5 | Rsat = 1.0 # Radius of the satellite orbit in AU. 6 | Rsun = 1.0 # Radius of the Earth orbit in AU. 7 | Rneo = 1.3 # Radius of the maximum perihelion in AU. 8 | Albd = 0.13 # Albedo of the NEO surface. 9 | AU = 1.496e11 # Astromonical Unit in meter. 10 | try: 11 | Dneo = eval(sys.argv[1]) 12 | except: 13 | Dneo = 140.0 # Diameter of the NEO in meter. 14 | Vneo = -2.5*np.log10((Albd*(Dneo/(Rneo*AU))**2.0)/(16.0*(Rneo-Rsat)**2.0))+Vsun 15 | print Vneo 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Toolbox 2 | 3 | My homemade automation toolbox. 4 | 5 | ## CopyGeoTags 6 | Copy GeoTags from reference photos. 7 | ### Syntax: 8 | ```bash 9 | CopyGeoTags.py reference target [method] 10 | ``` 11 | `reference` is reference photo(s) (single file or directory), e.g., photos shot with your smartphone. `target` is photo(s) that require GeoTags, e.g., photos shot with your DSLR. `method` indicates the interpolation method, e.g., nearest, linear, cubic, which is optional (Default: LINEAR). 12 | 13 | ## pfetch 14 | 15 | RSYNC with multi-threads parallelism and auto-retry. 16 | 17 | ### Examples: 18 | ```bash 19 | $ python pfetch.py -azu user@server:/path fetch_list ./ 20 | ``` 21 | 22 | 23 | ## GeoTag 24 | 25 | Get/Set geotags (GPS tags in EXIF) of photos. Requires piexif and PIL (pillow). 26 | 27 | ### Examples: 28 | 29 | ```bash 30 | $ python GetGeoTags.py photo.jpg 31 | $ python SetGeoTags.py 4.000000N 50.000000E 3.5 photo.jpg 32 | ``` 33 | 34 | 35 | 36 | ## Check battery in CLI 37 | 38 | Get Linux kernel reported battery percentage in command line interface. 39 | 40 | ### Example: 41 | 42 | ```bash 43 | $ python battery_life.py 44 | ``` 45 | 46 | ## Random password generator 47 | 48 | Generates random password with specific length. Digits, lowercase letters and uppercase letters are included. 49 | 50 | ### Example: 51 | 52 | The following command returns a random password with 8 characters. 53 | 54 | ```bash 55 | $ python genpasswd.py 8 56 | ``` 57 | 58 | -------------------------------------------------------------------------------- /SetGeoTags.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Insert GPS tags to the photo. 3 | 4 | Syntax: 5 | python SetGeoTags.py latitude longtitude altitude|"" filename 6 | 7 | latitude is a decimal number suffixed a letter 'S' or 'N'. 8 | longitude is a decimal number suffixed a letter 'E' or 'W'. 9 | altitude is a decimal number or a null string "". 10 | 11 | Requirements: piexif and PIL 12 | 13 | Author: pigsboss@github 14 | """ 15 | 16 | from PIL import Image 17 | from fractions import Fraction 18 | import piexif 19 | import sys 20 | 21 | def float2rational(f): 22 | F = Fraction(f).limit_denominator() 23 | return (F.numerator, F.denominator) 24 | 25 | def dd2dms(dd): 26 | mnt, sec = divmod(dd*3600.0, 60) 27 | deg, mnt = divmod(mnt, 60) 28 | return deg, mnt, sec 29 | 30 | try: 31 | lat_str = sys.argv[1] 32 | lon_str = sys.argv[2] 33 | alt_str = sys.argv[3] 34 | filename = sys.argv[4] 35 | except: 36 | print(__doc__) 37 | sys.exit() 38 | 39 | lon_ref = lon_str[-1] 40 | lon_val = float(lon_str[:-1]) 41 | lat_ref = lat_str[-1] 42 | lat_val = float(lat_str[:-1]) 43 | if len(alt_str) > 0: 44 | alt_ref = 0 45 | alt_val = float(alt_str) 46 | else: 47 | alt_ref = None 48 | alt_val = None 49 | 50 | d,m,s = dd2dms(lon_val) 51 | lon_tuple = (float2rational(d), float2rational(m), float2rational(s)) 52 | d,m,s = dd2dms(lat_val) 53 | lat_tuple = (float2rational(d), float2rational(m), float2rational(s)) 54 | 55 | exif_dict = piexif.load(filename) 56 | gps_ifd = { 57 | piexif.GPSIFD.GPSVersionID: (2,0,0,0), 58 | piexif.GPSIFD.GPSLatitudeRef: lat_ref, 59 | piexif.GPSIFD.GPSLatitude: lat_tuple, 60 | piexif.GPSIFD.GPSLongitudeRef: lon_ref, 61 | piexif.GPSIFD.GPSLongitude: lon_tuple, 62 | } 63 | if alt_ref is not None: 64 | gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = alt_ref 65 | gps_ifd[piexif.GPSIFD.GPSAltitude] = float2rational(alt_val) 66 | 67 | exif_dict["GPS"] = gps_ifd 68 | exif_bytes = piexif.dump(exif_dict) 69 | piexif.insert(exif_bytes, filename) 70 | -------------------------------------------------------------------------------- /audio/extract_cover_arts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #coding=utf-8 3 | """Extract album cover arts. 4 | 5 | Usage: 6 | extract_cover_arts.py [options] SRC DEST 7 | 8 | Options: 9 | -h print this help message. 10 | -v verbose. 11 | -o output filename. Default: cover.png. 12 | -i input audio file format(s). Default: all supported formats. 13 | """ 14 | from getopt import gnu_getopt 15 | from subprocess import run, PIPE, DEVNULL 16 | import sys, os 17 | from os import path 18 | opts, args = gnu_getopt(sys.argv[1:], 'hvo:i:') 19 | verbose = False 20 | outfile = 'cover.png' 21 | formats = ['dsf', 'flac'] 22 | for opt, val in opts: 23 | if opt == '-h': 24 | print(__doc__) 25 | sys.exit() 26 | elif opt == '-v': 27 | verbose = True 28 | elif opt == '-o': 29 | outfile = val 30 | elif opt == '-i': 31 | formats = val.split(',') 32 | else: 33 | assert False, "unhandled option" 34 | try: 35 | srcdir = args[0] 36 | outdir = args[1] 37 | except IndexError: 38 | print(__doc__) 39 | sys.exit() 40 | 41 | while not path.exists(srcdir): 42 | srcdir = input(u"{} does not exist. Please try again or press [Ctrl-c] to quit: ".format(srcdir)) 43 | while path.exists(outdir): 44 | outdir = input(u"{} already exists. Please try again or press [Ctrl-c] to quit: ".format(outdir)) 45 | os.makedirs(outdir) 46 | args = ['find', srcdir, '-type', 'f'] 47 | for fmt in formats: 48 | args += ['-name', '*.{}'.format(fmt), '-or'] 49 | result = run(args[:-1], check=True, stdout=PIPE).stdout 50 | albums = dict() 51 | for trkpath in result.decode().split('\n'): 52 | if path.isfile(trkpath): 53 | albpath, trkfile = path.split(trkpath) 54 | if albpath in albums: 55 | albums[albpath] += [trkfile] 56 | else: 57 | albums[albpath] = [trkfile] 58 | 59 | for alb in albums: 60 | outpath = path.join(outdir, path.relpath(alb, srcdir)) 61 | os.makedirs(outpath) 62 | trkpath = path.join(alb, albums[alb][0]) 63 | if verbose: 64 | sys.stdout.write(u' Extract from {}......'.format(trkpath)) 65 | sys.stdout.flush() 66 | run(["ffmpeg", "-i", trkpath, "-an", "-c:v", "png", path.join(outpath, outfile)], stdout=DEVNULL, stderr=DEVNULL) 67 | if verbose: 68 | sys.stdout.write(u'\r Extract from {}......OK\n'.format(trkpath)) 69 | sys.stdout.flush() 70 | -------------------------------------------------------------------------------- /audio/shannons.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #coding=utf-8 3 | """Estimate information entropy in bits of specified audio file. 4 | 5 | Copyright: pigsboss@github 6 | """ 7 | import numpy as np 8 | import soundfile as sf 9 | import sys 10 | from getopt import gnu_getopt 11 | from os import path 12 | 13 | def analyze_soundfile(filepath, bits=28): 14 | data, samplerate = sf.read(filepath, always_2d=True) 15 | nframes, nchans = data.shape 16 | dmin = np.min(data[:,0]) 17 | dmax = np.max(data[:,0]) 18 | x = np.int64((data[:,0]-dmin)/(dmax-dmin)*(2**bits-1)+0.5) 19 | cts = np.bincount(x) 20 | idx = np.nonzero(cts) 21 | pmf = cts[idx] / np.sum(cts) 22 | shs = -np.sum(pmf * np.log2(pmf)) 23 | return { 24 | 'path':filepath, 25 | 'samplerate':samplerate, 26 | 'nframes':nframes, 27 | 'nchans':nchans, 28 | 'shannons':shs 29 | } 30 | 31 | def pprint(info_dict, display=True): 32 | if display: 33 | print("File path: {}".format(info_dict['path'])) 34 | print("Sample rate: {:d} Hz".format(info_dict['samplerate'])) 35 | print("Frames per channel: {:d}".format(info_dict['nframes'])) 36 | print("Channels: {:d}".format(info_dict['nchans'])) 37 | print("Entropy bits: {:.2f}".format(info_dict['shannons'])) 38 | else: 39 | print("{}: {:d} channels * {:d} Hz * {:.2f} bits".format( 40 | info_dict['path'], 41 | info_dict['nchans'], 42 | info_dict['samplerate'], 43 | info_dict['shannons'] 44 | )) 45 | 46 | if __name__ == '__main__': 47 | opts, args = gnu_getopt(sys.argv[1:], 'hdb:') 48 | display = False 49 | bits = 28 50 | for opt, val in opts: 51 | if opt == '-h': 52 | print(__doc__) 53 | sys.exit() 54 | elif opt == '-d': 55 | display = True 56 | elif opt == '-b': 57 | bits = int(val) 58 | info = analyze_soundfile(path.abspath(path.normpath(path.realpath(args[0]))), bits=bits) 59 | pprint(info, display=display) 60 | -------------------------------------------------------------------------------- /audio/slim.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #coding=utf-8 3 | """SLIM is a slim SONY Music Library Manager. 4 | 5 | Usage: slim.py action [options] 6 | 7 | Actions: 8 | 9 | scan 10 | Scan SONY Music Library, generate metadata. 11 | Syntax: slim.py scan -s SRC DEST 12 | SRC is path of SONY Music Library. 13 | 14 | build 15 | Build SONY Music Library, generate metadata and checksums and extract album cover arts. 16 | Syntax: slim.py build -s SRC DEST 17 | SRC is path of SONY Music Library. 18 | 19 | print 20 | print pre-built SONY Music Library from user specified database. 21 | Syntax: slim.py print PATH_TO_DB 22 | 23 | export 24 | Export SONY Music Library to specified audio format. 25 | 26 | update 27 | Re-scan filesystem and update SONY Music Library. 28 | 29 | sort[_cover_arts] 30 | Sort album cover arts by user specified key. 31 | 32 | Options: 33 | -v verbose. 34 | -s source (SONY Music Library) path. 35 | -m match, in [artist]/[album]/[discnumber.][tracknumber - ][title]. 36 | -e '[s]kip', '[o]verwrite' or '[u]pdate' if user specified output file already exists. 37 | skip skip exporting. 38 | overwrite overwrite existing file. 39 | update skip if existing file has been exported from the same source. 40 | -p audio format preset. 41 | Available presets (case-insensitive): 42 | dxd (up to 384kHz/24bit FLAC). 43 | ldac (up to 96kHz/24bit FLAC). 44 | cd (up to 48kHz/24bit FLAC). 45 | itunes (256kbps 44.1kHz VBR AAC). 46 | aac (44.1kHz/48kHz VBR 5 AAC). 47 | opus (44.1kHz/48kHz 128bps Opus). 48 | radio (128kbps 44.1kHz VBR MP3). 49 | -b bitrate, in kbps (overrides preset default bitrate). 50 | -k key for sorting. Default: width. 51 | -r sort in reverse order. 52 | -o output. 53 | 54 | Copyright: pigsboss@github 55 | """ 56 | 57 | import sys 58 | import os 59 | import hashlib 60 | import signal 61 | import pickle 62 | import warnings 63 | import csv 64 | import shutil 65 | import unicodedata 66 | import numpy as np 67 | from mutagen.mp3 import MP3 68 | from mutagen.dsf import DSF 69 | from mutagen.flac import FLAC, Picture 70 | from mutagen.mp4 import MP4, MP4Cover, MP4FreeForm 71 | from mutagen.id3 import ID3, APIC, ID3TimeStamp, TextFrame, COMM 72 | from mutagen.oggopus import OggOpus 73 | from multiprocessing import cpu_count, Pool, Process, Queue 74 | from time import time, sleep 75 | from os import path 76 | from getopt import gnu_getopt 77 | from subprocess import run, Popen, PIPE, DEVNULL, CalledProcessError 78 | from tempfile import TemporaryDirectory 79 | from mpi4py import MPI 80 | 81 | comm = MPI.COMM_WORLD 82 | 83 | ## Reference: 84 | ## https://wiki.hydrogenaud.io/index.php?title=Tag_Mapping 85 | ## https://mutagen.readthedocs.io/en/latest/api/vcomment.html#mutagen._vorbis.VCommentDict 86 | TAG_MAP = { 87 | 'ID3': { 88 | 'grouping' : 'TIT1', 89 | 'title' : 'TIT2', 90 | 'subtitle' : 'TIT3', 91 | 'album' : 'TALB', 92 | 'discsubtitle' : 'TSST', 93 | 'artist' : 'TPE1', 94 | 'albumartist' : 'TPE2', 95 | 'conductor' : 'TPE3', 96 | 'remixer' : 'TPE4', 97 | 'composer' : 'TCOM', 98 | 'lyricist' : 'TEXT', 99 | 'publisher' : 'TPUB', 100 | 'tracknumber' : 'TRCK', 101 | 'discnumber' : 'TPOS', 102 | 'date' : 'TDRC', 103 | 'year' : 'TYER', 104 | 'isrc' : 'TSRC', 105 | 'encoded-by' : 'TENC', 106 | 'encoder' : 'TSSE', 107 | 'compilation' : 'TCMP', 108 | 'genre' : 'TCON', 109 | 'comment' : 'COMM', 110 | 'copyright' : 'TCOP', 111 | 'language' : 'TLAN' 112 | }, 113 | 'MP4': { 114 | 'grouping' : '\xa9grp', 115 | 'title' : '\xa9nam', 116 | 'subtitle' : '----:com.apple.iTunes:SUBTITLE', 117 | 'album' : '\xa9alb', 118 | 'discsubtitle' : '----:com.apple.iTunes:DISCSUBTITLE', 119 | 'albumartist' : 'aART', 120 | 'artist' : '\xa9ART', 121 | 'conductor' : '----:com.apple.iTunes:CONDUCTOR', 122 | 'remixer' : '----:com.apple.iTunes:REMIXER', 123 | 'composer' : '\xa9wrt', 124 | 'lyricist' : '----:com.apple.iTunes:LYRICIST', 125 | 'license' : '----:com.apple.iTunes:LICENSE', 126 | 'label' : '----:com.apple.iTunes:LABEL', 127 | 'tracknumber' : 'trkn', 128 | 'discnumber' : 'disk', 129 | 'year' : '\xa9day', 130 | 'isrc' : '----:com.apple.iTunes:ISRC', 131 | 'encoded-by' : '\xa9too', 132 | 'genre' : '\xa9gen', 133 | 'compilation' : 'cpil', 134 | 'comment' : '\xa9cmt', 135 | 'copyright' : 'cprt', 136 | 'language' : '----:com.apple.iTunes:LANGUAGE', 137 | 'description' : 'desc' 138 | }, 139 | 'Vorbis': { 140 | 'grouping' : 'grouping', 141 | 'title' : 'title', 142 | 'subtitle' : 'subtitle', 143 | 'album' : 'album', 144 | 'discsubtitle' : 'discsubtitle', 145 | 'albumartist' : 'albumartist', 146 | 'artist' : 'artist', 147 | 'conductor' : 'conductor', 148 | 'remixer' : 'remixer', 149 | 'composer' : 'composer', 150 | 'lyricist' : 'lyricist', 151 | 'performer' : 'performer', 152 | 'publisher' : 'publisher', 153 | 'label' : 'label', 154 | 'license' : 'license', 155 | 'tracknumber' : 'tracknumber', 156 | 'totaltracks' : 'totaltracks', 157 | 'tracktotal' : 'tracktotal', 158 | 'discnumber' : 'discnumber', 159 | 'totaldiscs' : 'totaldiscs', 160 | 'disctotal' : 'disctotal', 161 | 'date' : 'date', 162 | 'isrc' : 'isrc', 163 | 'encoded-by' : 'encoded-by', 164 | 'encoder' : 'encoder', 165 | 'genre' : 'genre', 166 | 'compilation' : 'compilation', 167 | 'comment' : 'comment', 168 | 'copyright' : 'copyright', 169 | 'language' : 'language', 170 | 'description' : 'description' 171 | } 172 | } 173 | 174 | PRESETS = { 175 | 'dxd': { 176 | 'max_sample_rate' : 384000, 177 | 'max_bits_per_sample' : 24, 178 | 'format' : 'FLAC', 179 | 'extension' : 'flac', 180 | 'art_format' : 'png', 181 | 'art_resolution' : None 182 | }, 183 | 'ldac': { 184 | 'max_sample_rate' : 96000, 185 | 'max_bits_per_sample' : 24, 186 | 'format' : 'FLAC', 187 | 'extension' : 'flac', 188 | 'art_format' : 'png', 189 | 'art_resolution' : 800 190 | }, 191 | 'cd': { 192 | 'max_sample_rate' : 48000, 193 | 'max_bits_per_sample' : 24, 194 | 'format' : 'FLAC', 195 | 'extension' : 'flac', 196 | 'art_format' : 'png', 197 | 'art_resolution' : 800 198 | }, 199 | ## Reference: https://images.apple.com/itunes/mastered-for-itunes/docs/mastered_for_itunes.pdf 200 | 'itunes': { 201 | 'bitrate' : 256000, 202 | 'format' : 'M4A', 203 | 'extension' : 'm4a', 204 | 'art_format' : 'jpeg', 205 | 'art_resolution' : 640 206 | }, 207 | 'aac': { 208 | 'max_sample_rate' : 48000, 209 | # variable bitrate (-vbr) mode: 1, 2, 3, 4, and 5. (Reference: http://wiki.hydrogenaud.io/index.php?title=Fraunhofer_FDK_AAC#Bitrate_Modes) 210 | 'bitrate' : 5, 211 | 'format' : 'M4A', 212 | 'extension' : 'm4a', 213 | 'art_format' : 'jpeg', 214 | 'art_resolution' : 640 215 | }, 216 | 'opus': { 217 | 'max_sample_rate' : 48000, 218 | 'bitrate' : 128, ## kbps 219 | 'format' : 'OGG', 220 | 'extension' : 'opus', 221 | 'art_format' : 'jpeg', 222 | 'art_resolution' : 640 223 | }, 224 | 'radio': { 225 | 'max_sample_rate' : 48000, 226 | 'bitrate' : 128, ## kbps 227 | 'format' : 'MP3', 228 | 'extension' : 'mp3', 229 | 'art_format' : 'jpeg', 230 | 'art_resolution' : 200 231 | } 232 | } 233 | 234 | DEFAULT_CHECKSUM_PROG = 'sha224sum' 235 | SAFE_PATH_CHARS = ' _' 236 | 237 | def hostname(): 238 | return run(['hostname','-f'], check=True, stdout=PIPE).stdout.decode().splitlines()[0] 239 | 240 | def nwidechars(s): 241 | return sum(unicodedata.east_asian_width(x)=='W' for x in s) 242 | 243 | def width(s): 244 | return len(s)+nwidechars(s) 245 | 246 | def uljust(s, w): 247 | return s.ljust(w-nwidechars(s)) 248 | 249 | def wait_file(filepath, timeout=5.0): 250 | t = 0.0 251 | dt = 0.1 252 | tic = time() 253 | while (t < timeout) and (not path.isfile(filepath)): 254 | sleep(dt) 255 | t = time()-tic 256 | return path.isfile(filepath) 257 | 258 | def load_tags(audio_file): 259 | """Load tags from audio file. 260 | 261 | Reference: 262 | http://age.hobba.nl/audio/mirroredpages/ogg-tagging.html 263 | 264 | """ 265 | if audio_file.lower().endswith('.dsf'): 266 | audio = DSF(audio_file) 267 | scheme = 'ID3' 268 | elif audio_file.lower().endswith('.flac'): 269 | audio = FLAC(audio_file) 270 | scheme = 'Vorbis' 271 | elif audio_file.lower().endswith('.m4a'): 272 | audio = MP4(audio_file) 273 | scheme = 'MP4' 274 | elif audio_file.lower().endswith('.mp3'): 275 | audio = MP3(audio_file) 276 | scheme ='ID3' 277 | elif audio_file.lower().endswith('.opus'): 278 | audio = OggOpus(audio_file) 279 | scheme = 'Vorbis' 280 | else: 281 | raise TypeError(u'unsupported audio file format {}.'.format(audio_file)) 282 | meta = {} 283 | if scheme == 'ID3': 284 | for k in TAG_MAP[scheme]: 285 | if TAG_MAP[scheme][k] in audio.keys(): 286 | if k == 'date': 287 | meta[k] = audio[TAG_MAP[scheme][k]][0].get_text() 288 | elif k == 'discnumber': 289 | try: 290 | meta[k], meta['totaldiscs'] = map(int, audio[TAG_MAP[scheme][k]][0].split('/')) 291 | except ValueError: 292 | meta[k] = int(audio[TAG_MAP[scheme][k]][0]) 293 | meta['totaldiscs'] = 0 294 | elif k == 'tracknumber': 295 | try: 296 | meta[k], meta['totaltracks'] = map(int, audio[TAG_MAP[scheme][k]][0].split('/')) 297 | except ValueError: 298 | meta[k] = int(audio[TAG_MAP[scheme][k]][0]) 299 | meta['totaltracks'] = 0 300 | elif k == 'year': 301 | meta[k] = str(ID3TimeStamp(audio[TAG_MAP[scheme][k]][0]).year) 302 | if 'date' not in meta: 303 | meta['date'] = ID3TimeStamp(audio[TAG_MAP[scheme][k]][0]).get_text() 304 | elif k == 'compilation': 305 | meta[k] = bool(int(audio[TAG_MAP[scheme][k]][0])) 306 | elif k == 'genre': 307 | meta[k] = audio[TAG_MAP[scheme][k]].genres 308 | else: 309 | meta[k] = audio[TAG_MAP[scheme][k]].text[0] 310 | if k == 'comment': 311 | meta[k] = [] 312 | for kk in audio.keys(): 313 | if kk.lower().startswith('comm'): 314 | meta[k] += audio[kk].text 315 | elif scheme == 'MP4': 316 | for k in TAG_MAP[scheme]: 317 | if TAG_MAP[scheme][k] in audio.keys(): 318 | if k == 'date': 319 | meta[k] = audio[TAG_MAP[scheme][k]][0] 320 | meta['year'] = str(ID3TimeStamp(meta['date']).year) 321 | elif k == 'discnumber': 322 | try: 323 | meta[k], meta['totaldiscs'] = audio[TAG_MAP[scheme][k]][0] 324 | except ValueError: 325 | meta[k] = audio[TAG_MAP[scheme][k]][0] 326 | meta['totaldiscs'] = 0 327 | elif k == 'tracknumber': 328 | try: 329 | meta[k], meta['totaltracks'] = audio[TAG_MAP[scheme][k]][0] 330 | except ValueError: 331 | meta[k] = audio[TAG_MAP[scheme][k]][0] 332 | meta['totaltracks'] = 0 333 | elif k == 'year': 334 | meta[k] = str(ID3TimeStamp(audio[TAG_MAP[scheme][k]][0]).year) 335 | if 'date' not in meta: 336 | meta['date'] = ID3TimeStamp(audio[TAG_MAP[scheme][k]][0]).get_text() 337 | elif k == 'compilation': 338 | meta[k] = bool(int(audio[TAG_MAP[scheme][k]][0])) 339 | elif TAG_MAP[scheme][k].startswith('----'): 340 | ## MP4 freeform keys start with '----' and only accept bytearray instead of str. 341 | meta[k] = list(map(MP4FreeForm.decode, audio[TAG_MAP[scheme][k]])) 342 | else: 343 | meta[k] = audio[TAG_MAP[scheme][k]][0] 344 | elif scheme == 'Vorbis': 345 | for k in TAG_MAP[scheme]: 346 | if TAG_MAP[scheme][k] in audio.keys(): 347 | if k == 'date': 348 | meta[k] = audio[TAG_MAP[scheme][k]][0] 349 | meta['year'] = str(ID3TimeStamp(meta['date']).year) 350 | elif k == 'discnumber': 351 | try: 352 | meta[k], meta['totaldiscs'] = map(int, audio[TAG_MAP[scheme][k]][0].split('/')) 353 | except ValueError: 354 | meta[k] = int(audio[TAG_MAP[scheme][k]][0]) 355 | meta['totaldiscs'] = 0 356 | elif k == 'tracknumber': 357 | try: 358 | meta[k], meta['totaltracks'] = map(int, audio[TAG_MAP[scheme][k]][0].split('/')) 359 | except ValueError: 360 | meta[k] = int(audio[TAG_MAP[scheme][k]][0]) 361 | meta['totaltracks'] = 0 362 | elif k in ['totaldiscs', 'disctotal']: 363 | meta['totaldiscs'] = int(audio[TAG_MAP[scheme][k]][0]) 364 | elif k in ['totaltracks', 'tracktotal']: 365 | meta['totaltracks'] = int(audio[TAG_MAP[scheme][k]][0]) 366 | elif k == 'year': 367 | meta[k] = str(ID3TimeStamp(audio[TAG_MAP[scheme][k]][0]).year) 368 | if 'date' not in meta: 369 | meta['date'] = ID3TimeStamp(audio[TAG_MAP[scheme][k]][0]).get_text() 370 | elif k == 'compilation': 371 | meta[k] = False 372 | if len(audio[TAG_MAP[scheme][k]][0]) > 0: 373 | try: 374 | meta[k] = bool(int(audio[TAG_MAP[scheme][k]][0])) 375 | except ValueError: 376 | meta[k] = True 377 | else: 378 | meta[k] = audio[TAG_MAP[scheme][k]][0] 379 | return meta 380 | 381 | def save_tags(meta, audio_file): 382 | """Save metadata to specified audio file. 383 | """ 384 | if audio_file.lower().endswith('.dsf'): 385 | audio = DSF(audio_file) 386 | scheme = 'ID3' 387 | elif audio_file.lower().endswith('.flac'): 388 | audio = FLAC(audio_file) 389 | scheme = 'Vorbis' 390 | elif audio_file.lower().endswith('.m4a'): 391 | audio = MP4(audio_file) 392 | scheme = 'MP4' 393 | elif audio_file.lower().endswith('.mp3'): 394 | audio = MP3(audio_file) 395 | scheme ='ID3' 396 | elif audio_file.lower().endswith('.opus'): 397 | audio = OggOpus(audio_file) 398 | scheme = 'Vorbis' 399 | else: 400 | raise TypeError(u'unsupported audio file format {}.'.format(audio_file)) 401 | if scheme == 'ID3': 402 | for k in meta: 403 | if k in TAG_MAP[scheme]: 404 | if k == 'date': 405 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=[ID3TimeStamp(meta[k])]) 406 | elif k == 'discnumber': 407 | if meta['totaldiscs'] > 0: 408 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=['{:d}/{:d}'.format(meta[k], meta['totaldiscs'])]) 409 | else: 410 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=['{:d}'.format(meta[k])]) 411 | elif k == 'tracknumber': 412 | if meta['totaltracks'] > 0: 413 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=['{:d}/{:d}'.format(meta[k], meta['totaltracks'])]) 414 | else: 415 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=['{:d}'.format(meta[k])]) 416 | elif k == 'compilation': 417 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=[str(int(meta[k]))]) 418 | else: 419 | audio[TAG_MAP[scheme][k]] = TextFrame(encoding=3, text=[meta[k]]) 420 | elif scheme == 'MP4': 421 | for k in meta: 422 | if k in TAG_MAP[scheme]: 423 | if k == 'discnumber': 424 | audio[TAG_MAP[scheme][k]] = [(meta[k], meta['totaldiscs'])] 425 | elif k == 'tracknumber': 426 | audio[TAG_MAP[scheme][k]] = [(meta[k], meta['totaltracks'])] 427 | elif k == 'compilation': 428 | audio[TAG_MAP[scheme][k]] = int(meta[k]) 429 | elif TAG_MAP[scheme][k].startswith('----'): 430 | audio[TAG_MAP[scheme][k]] = list(map(lambda x:MP4FreeForm(x.encode('utf-8')), meta[k])) 431 | else: 432 | audio[TAG_MAP[scheme][k]] = meta[k] 433 | elif scheme == 'Vorbis': 434 | for k in meta: 435 | if k in TAG_MAP[scheme]: 436 | if k in ['discnumber', 'tracknumber', 'totaldiscs', 'totaldracks', 'compilation']: 437 | audio[TAG_MAP[scheme][k]] = '{:d}'.format(int(meta[k])) 438 | else: 439 | audio[TAG_MAP[scheme][k]] = meta[k] 440 | audio.save() 441 | 442 | def copy_tags(src, dest, keys=None): 443 | """Copy tags from source audio file to destined audio file. 444 | """ 445 | meta = load_tags(src) 446 | try: 447 | tags = {k:meta[k] for k in keys} 448 | except TypeError: 449 | tags = meta 450 | save_tags(tags, dest) 451 | 452 | def genpath(s): 453 | """Generate valid path from input string. 454 | """ 455 | p = '' 456 | for x in s: 457 | if x.isalpha() or x.isdigit() or x in SAFE_PATH_CHARS: 458 | p+=x 459 | else: 460 | p+='_' 461 | return p.strip() 462 | 463 | def add_cover_art(audio_file, picture_file): 464 | if audio_file.lower().endswith('.flac'): 465 | metadata = FLAC(audio_file) 466 | coverart = Picture() 467 | coverart.type = 3 468 | if picture_file.lower().endswith('.png'): 469 | mime = 'image/png' 470 | else: 471 | mime = 'image/jpeg' 472 | coverart.desc = 'front cover' 473 | with open(picture_file, 'rb') as f: 474 | coverart.date = f.read() 475 | metadata.add_picture(coverart) 476 | elif audio_file.endswith('.m4a'): 477 | metadata = MP4(audio_file) 478 | with open(picture_file, 'rb') as f: 479 | if picture_file.lower().endswith('.png'): 480 | metadata['covr'] = [MP4Cover(f.read(), imageformat=MP4Cover.FORMAT_PNG)] 481 | else: 482 | metadata['covr'] = [MP4Cover(f.read(), imageformat=MP4Cover.FORMAT_JPEG)] 483 | elif audio_file.endswith('.mp3'): 484 | metadata = ID3(audio_file) 485 | with open(picture_file, 'rb') as f: 486 | metadata['APIC'] = APIC( 487 | encoding=3, 488 | mime='image/jpeg', 489 | type=3, 490 | desc=u'Cover', 491 | data=f.read() 492 | ) 493 | else: 494 | assert False, 'unsupported audio file format.' 495 | metadata.save() 496 | 497 | def get_source_file_checksum(audio_file): 498 | prog = None 499 | csum = None 500 | tags = load_tags(audio_file) 501 | if 'comment' in tags: 502 | cmts = tags['comment'] 503 | if isinstance(cmts, list): 504 | cmts = '\n'.join(cmts) 505 | for cmt in cmts.splitlines(): 506 | if cmt.lower().startswith('source checksum program:'): 507 | prog = cmt.split(':')[1].strip() 508 | elif cmt.lower().startswith('source file checksum:'): 509 | csum = cmt.split(':')[1].strip() 510 | return {'program': prog, 'checksum': csum} 511 | 512 | def set_source_file_checksum(audio_file, csum, program=DEFAULT_CHECKSUM_PROG): 513 | if audio_file.lower().endswith('.flac'): 514 | metadata = FLAC(audio_file) 515 | scheme = 'Vorbis' 516 | elif audio_file.lower().endswith('.opus'): 517 | metadata = OggOpus(audio_file) 518 | scheme = 'Vorbis' 519 | elif audio_file.lower().endswith('.dsf'): 520 | metadata = DSF(audio_file) 521 | scheme = 'ID3' 522 | elif audio_file.lower().endswith('.mp3'): 523 | metadata = MP3(audio_file) 524 | scheme = 'ID3' 525 | elif audio_file.lower().endswith('.m4a'): 526 | metadata = MP4(audio_file) 527 | scheme = 'MP4' 528 | else: 529 | raise TypeError(u'unsupported audio format {}.'.format(audio_file)) 530 | if scheme=='ID3': 531 | if TAG_MAP[scheme]['comment'] in metadata.tags.keys(): 532 | metadata.tags[TAG_MAP[scheme]['comment']] = COMM(encoding=3, text=['\n'.join([ 533 | metadata.tags[TAG_MAP[scheme]['comment']][0], 534 | u'Source Checksum Program: {}'.format(program), 535 | u'Source File Checksum: {}'.format(csum)])]) 536 | else: 537 | metadata.tags[TAG_MAP[scheme]['comment']] = COMM(encoding=3, text=['\n'.join([ 538 | u'Source Checksum Program: {}'.format(program), 539 | u'Source File Checksum: {}'.format(csum)])]) 540 | else: 541 | if TAG_MAP[scheme]['comment'] in metadata.tags.keys(): 542 | if isinstance(metadata.tags[TAG_MAP[scheme]['comment']], str): 543 | cmt = '\n'.join([ 544 | metadata.tags[TAG_MAP[scheme]['comment']], 545 | u'Source Checksum Program: {}'.format(program), 546 | u'Source File Checksum: {}'.format(csum) 547 | ]) 548 | elif isinstance(metadata.tags[TAG_MAP[scheme]['comment']], list): 549 | cmt = '\n'.join([ 550 | '\n'.join(metadata.tags[TAG_MAP[scheme]['comment']]), 551 | u'Source Checksum Program: {}'.format(program), 552 | u'Source File Checksum: {}'.format(csum) 553 | ]) 554 | else: 555 | raise TypeError(u'target tag is neither str nor list.') 556 | metadata.tags[TAG_MAP[scheme]['comment']] = cmt 557 | else: 558 | metadata.tags[TAG_MAP[scheme]['comment']] = '\n'.join([ 559 | u'Source Checksum Program: {}'.format(program), 560 | u'Source File Checksum: {}'.format(csum)]) 561 | metadata.save() 562 | 563 | def find_tracks(srcdir): 564 | """Find all SONY Music tracks (*.flac and *.dsf). 565 | """ 566 | result = run([ 567 | 'find', path.abspath(srcdir), '-type', 'f', 568 | '-iname', '*.flac', '-or', '-iname', '*.dsf' 569 | ], check=True, stdout=PIPE, stderr=DEVNULL) 570 | tracks = [] 571 | for p in result.stdout.decode().splitlines(): 572 | if path.isfile(path.normpath(path.abspath(p))): 573 | tracks.append(path.normpath(path.abspath(p))) 574 | return tracks 575 | 576 | def gen_opus_tagopts(tags): 577 | """Generate opusenc metadata options. 578 | """ 579 | opts = [] 580 | for k in tags: 581 | if k in ['title', 'artist', 'album', 'tracknumber', 'date', 'genre']: 582 | opts += ['--{}'.format(k), '{}'.format(tags[k])] 583 | elif k == 'comment': 584 | opts += ['--comment', '{}={}'.format('comment', tags[k])] 585 | elif k in TAG_MAP['Vorbis']: 586 | opts += ['--comment', '{}={}'.format(k.upper(), tags[k])] 587 | return opts 588 | 589 | def gen_flac_tagopts(tags): 590 | """Generate FLAC Tagging options. 591 | """ 592 | opts = [] 593 | for k in tags: 594 | if k in TAG_MAP['Vorbis']: 595 | if isinstance(tags[k], list): 596 | for opt in [['-T', '{}={}'.format(TAG_MAP['Vorbis'][k].upper(), v)] for v in tags[k]]: 597 | opts += opt 598 | else: 599 | opts += ['-T', '{}={}'.format(TAG_MAP['Vorbis'][k].upper(), tags[k])] 600 | return opts 601 | 602 | class AudioTrack(object): 603 | def __init__(self, filepath, checksum=True): 604 | ## examine path 605 | if not path.isfile(filepath): 606 | raise FileNotFoundError(u'Audio track file does not exist.'.format(filepath)) 607 | self.source = path.normpath(path.abspath(filepath)) 608 | self.file = { 609 | 'size' : path.getsize(self.source), 610 | 'ctime' : path.getctime(self.source) 611 | } 612 | extname = path.splitext(self.source)[1] 613 | if extname.lower() in ['.dsf']: 614 | self.format = 'DSD' 615 | elif extname.lower() in ['.flac']: 616 | self.format = 'FLAC' 617 | else: 618 | raise TypeError(u'Audio format {} is not supported.'.format(extname)) 619 | self.UpdateMetadata() 620 | if checksum: 621 | self.UpdateFileChecksum() 622 | self.id = hashlib.sha224('{}{}'.format(self.GenPath(), extname).encode('utf-8')).hexdigest() 623 | self.parent_id = hashlib.sha224(self.GenParentPath().encode('utf-8')).hexdigest() 624 | 625 | def GenFilename(self): 626 | if not hasattr(self, 'metadata'): 627 | self.UpdateMetadata() 628 | try: 629 | return '{:d}.{:02d} - {}'.format( 630 | self.metadata['discnumber'], 631 | self.metadata['tracknumber'], 632 | genpath(self.metadata['title']) 633 | ) 634 | except KeyError: 635 | return '{:02d} - {}'.format( 636 | self.metadata['tracknumber'], 637 | genpath(self.metadata['title']) 638 | ) 639 | 640 | def GenParentPath(self): 641 | if not hasattr(self, 'metadata'): 642 | self.UpdateMetadata() 643 | return path.join( 644 | genpath(self.metadata['albumartist']), 645 | genpath(self.metadata['album']) 646 | ) 647 | 648 | def GenPath(self): 649 | return path.join(self.GenParentPath(), self.GenFilename()) 650 | 651 | def UpdateFileChecksum(self, program=DEFAULT_CHECKSUM_PROG): 652 | self.file_checksum = { 653 | 'program': program, 654 | 'checksum': run([ 655 | program, '-b', self.source 656 | ], check=True, stdout=PIPE, stderr=DEVNULL).stdout.decode().split()[0] 657 | } 658 | 659 | def UpdateMetadata(self): 660 | if self.format == 'DSD': 661 | scheme = 'ID3' 662 | metadata = DSF(self.source) 663 | elif self.format == 'FLAC': 664 | scheme = 'Vorbis' 665 | metadata = FLAC(self.source) 666 | else: 667 | assert False, 'unsupported format {}.'.format(self.formmat) 668 | self.metadata = load_tags(self.source) 669 | if 'albumartist' not in self.metadata: 670 | try: 671 | self.metadata['albumartist'] = self.metadata['artist'] 672 | except KeyError: 673 | self.metadata['artist'] = self.metadata['performer'] 674 | self.metadata['albumartist'] = self.metadata['performer'] 675 | self.metadata['info'] = { 676 | 'sample_rate' : metadata.info.sample_rate, 677 | 'bits_per_sample' : metadata.info.bits_per_sample, 678 | 'channels' : metadata.info.channels, 679 | 'bitrate' : metadata.info.bitrate, 680 | 'length' : metadata.info.length 681 | } 682 | return self.metadata 683 | 684 | def Export(self, filepath, preset, exists, bitrate): 685 | """Export this audio track with specified preset. 686 | """ 687 | if not hasattr(self, 'file_checksum'): 688 | self.UpdateFileChecksum() 689 | if path.isfile(filepath): 690 | if exists.lower()[0] == 's': 691 | ## skip 692 | return filepath 693 | elif exists.lower()[0] == 'u': 694 | ## update 695 | if self.file_checksum == get_source_file_checksum(filepath): 696 | return filepath 697 | coverart_path = path.join(path.split(filepath)[0], 'cover.{}'.format(PRESETS[preset]['art_format'])) 698 | if preset.lower() in ['dxd', 'ldac', 'cd']: 699 | if self.format == 'DSD': 700 | ## dsf ------> aiff ------------> flac/opus 701 | ## ffmpeg flac/opusenc 702 | if self.metadata['info']['sample_rate'] > int(PRESETS[preset]['max_sample_rate']/48000+0.5)*44100*16: 703 | sample_rate=int(PRESETS[preset]['max_sample_rate']/48000+0.5)*44100 704 | else: 705 | sample_rate=int(self.metadata['info']['sample_rate']/44100/16+0.5)*44100 706 | ffmpeg = Popen([ 707 | 'ffmpeg', '-y', '-i', self.source, 708 | '-af', 'aresample=resampler=soxr:precision=28:dither_method=triangular:osr={:d},volume=+6dB'.format(sample_rate), 709 | '-vn', '-map_metadata', '-1', 710 | '-c:a', 'pcm_s24be', 711 | '-f', 'aiff', '-' 712 | ], stdout=PIPE, stderr=DEVNULL) 713 | flac_enc = Popen([ 714 | 'flac', '-', '-f', 715 | '--picture', '3|image/png|Cover||{}'.format(coverart_path), 716 | '--ignore-chunk-sizes', '--force-aiff-format', 717 | *gen_flac_tagopts(self.metadata), 718 | '-o', filepath 719 | ], stdin=ffmpeg.stdout, stderr=DEVNULL) 720 | flac_enc.communicate() 721 | else: 722 | q = int(self.metadata['info']['sample_rate']/44100+0.5) 723 | b = self.metadata['info']['sample_rate'] // q 724 | if q > PRESETS[preset]['max_sample_rate']//48000: 725 | sample_rate = PRESETS[preset]['max_sample_rate']//48000*b 726 | ## resample is required. 727 | flac_dec = Popen([ 728 | 'flac', self.source, '-d', '-c' 729 | ], stdout=PIPE, stderr=DEVNULL) 730 | ffmpeg = Popen([ 731 | 'ffmpeg', '-i', '-', 732 | '-af', 'aresample=resampler=soxr:precision=28:dither_method=triangular:osr={:d}'.format(sample_rate), 733 | '-vn', '-map_metadata', '-1', 734 | '-c:a', 'pcm_s24be', 735 | '-f', 'aiff', '-' 736 | ], stdin=flac_dec.stdout, stdout=PIPE, stderr=DEVNULL) 737 | flac_enc = Popen([ 738 | 'flac', '-', '-f', '--lax', 739 | '--picture', '3|image/png|Cover||{}'.format(path.join(path.split(filepath)[0], 'cover.png')), 740 | '--ignore-chunk-sizes', '--force-aiff-format', 741 | *gen_flac_tagopts(self.metadata), 742 | '-o', filepath 743 | ], stdin=ffmpeg.stdout, stdout=DEVNULL, stderr=DEVNULL) 744 | flac_enc.communicate() 745 | else: 746 | shutil.copyfile(self.source, filepath) 747 | ## substitute cover art 748 | audio = FLAC(filepath) 749 | audio.clear_pictures() 750 | audio.save() 751 | add_cover_art(filepath, path.join(path.split(filepath)[0], 'cover.png')) 752 | elif preset.lower() in ['itunes']: 753 | with TemporaryDirectory(prefix=self.id, dir=path.split(filepath)[0]) as tmpdir: 754 | if self.format == 'DSD': 755 | src = path.join(tmpdir, 'a.aiff') 756 | run([ 757 | 'ffmpeg', '-y', '-i', self.source, 758 | '-af', 'aresample=resampler=soxr:precision=32:dither_method=triangular:osr=352800,volume=+6dB', 759 | '-c:a', 'pcm_s24be', 760 | '-f', 'aiff', src 761 | ], check=True, stdout=DEVNULL, stderr=DEVNULL) 762 | else: 763 | src = self.source 764 | if not wait_file(src): 765 | raise FileNotFoundError(u'{} (source) not found.'.format(src)) 766 | caff = path.join(tmpdir, 'a.caf') 767 | run([ 768 | 'afconvert', src, 769 | '-d', 'LEF32@44100', 770 | '-f', 'caff', 771 | '--soundcheck-generate', 772 | '--src-complexity', 'bats', 773 | '-r', '127', caff 774 | ], check=True) 775 | if not wait_file(caff): 776 | raise FileNotFoundError(u'{} (caff) not found.'.format(caff)) 777 | run([ 778 | 'afconvert', caff, 779 | '-d', 'aac', 780 | '-f', 'm4af', 781 | '-u', 'pgcm', '2', 782 | '--soundcheck-read', 783 | '-b', '{:d}'.format(PRESETS[preset]['bitrate']), 784 | '-q', '127', 785 | '-s', '2', filepath 786 | ], check=True) 787 | if not wait_file(filepath): 788 | raise FileNotFoundError(u'{} (m4a) not found.'.format(filepath)) 789 | copy_tags(self.source, filepath) 790 | add_cover_art(filepath, path.join( 791 | path.split(filepath)[0], 792 | 'cover.{}'.format(PRESETS[preset]['art_format']) 793 | )) 794 | elif preset.lower() in ['aac']: 795 | if bitrate is None: 796 | bitrate = str(PRESETS[preset]['bitrate']) 797 | qu = self.metadata['info']['sample_rate']//44100 798 | br = self.metadata['info']['sample_rate']//qu 799 | sr = br * min((PRESETS[preset]['max_sample_rate']//44100), qu) 800 | if self.format == 'DSD': 801 | gain = ',volume=+6dB' 802 | else: 803 | gain = '' 804 | run([ 805 | 'ffmpeg', '-y', '-i', self.source, 806 | '-af', 'aresample=resampler=soxr:precision=24:dither_method=triangular:osr={:d}{}'.format(sr, gain), 807 | '-vn', '-c:a', 'libfdk_aac', '-vbr', bitrate, filepath 808 | ], check=True, stdout=DEVNULL, stderr=DEVNULL) 809 | add_cover_art(filepath, path.join( 810 | path.split(filepath)[0], 811 | 'cover.{}'.format(PRESETS[preset]['art_format']) 812 | )) 813 | elif preset.lower() in ['opus']: 814 | b = 48000 ## according to official opus codec RFC 6716 MDCT (modified discrete cosine transform) 815 | ## layer of opus encoder always operates on 48kHz sampling rate. 816 | if bitrate is None: 817 | bitrate = str(PRESETS[preset]['bitrate']) 818 | if self.format == 'DSD': 819 | gain = ',volume=+6dB' 820 | else: 821 | gain = '' 822 | ffmpeg = Popen([ 823 | 'ffmpeg', '-y', '-i', self.source, 824 | '-af', 'aresample=resampler=soxr:precision=28:dither_method=triangular:osr={:d}{}'.format(b, gain), 825 | '-vn', '-map_metadata', '-1', 826 | '-c:a', 'pcm_s24le', 827 | '-f', 'wav', '-' 828 | ], stdout=PIPE, stderr=DEVNULL) 829 | opus_enc = Popen([ 830 | 'opusenc', '-', 831 | '--picture', '3||Cover||{}'.format(coverart_path), 832 | '--raw', '--raw-bits', '24', '--raw-rate', '{:d}'.format(b), '--raw-chan', '2', 833 | '--music', '--framesize', '60', '--comp', '10', '--vbr', 834 | '--bitrate', '{}k'.format(bitrate), 835 | *gen_opus_tagopts(self.metadata), 836 | filepath 837 | ], stdin=ffmpeg.stdout, stderr=DEVNULL) 838 | opus_enc.communicate() 839 | elif preset.lower() in ['radio']: 840 | if bitrate is None: 841 | bitrate = str(PRESETS[preset]['bitrate']) 842 | q = self.metadata['info']['sample_rate']//44100 843 | b = self.metadata['info']['sample_rate']//q 844 | if self.format == 'DSD': 845 | gain = ',volume=+6dB' 846 | else: 847 | gain = '' 848 | run([ 849 | 'ffmpeg', '-y', '-i', self.source, 850 | '-af', 'aresample=resampler=soxr:precision=24:dither_method=triangular:osr={:d}{}'.format(b, gain), 851 | '-vn', '-c:a', 'libmp3lame', '-b:a', '{}k'.format(bitrate), filepath 852 | ], check=True, stdout=DEVNULL, stderr=DEVNULL) 853 | add_cover_art(filepath, path.join( 854 | path.split(filepath)[0], 855 | 'cover.{}'.format(PRESETS[preset]['art_format']) 856 | )) 857 | else: 858 | raise TypeError(u'unsupported preset {}.'.format(preset)) 859 | if not wait_file(filepath): 860 | raise FileNotFoundError(u'{} not found.'.format(filepath)) 861 | set_source_file_checksum( 862 | filepath, 863 | self.file_checksum['checksum'], 864 | program=self.file_checksum['program'] 865 | ) 866 | return filepath 867 | 868 | def ExtractCoverArt(self, filepath): 869 | try: 870 | run(['ffmpeg', '-y', '-i', self.source, 871 | '-an', '-c:v', 'png', filepath 872 | ], check=True, stdout=DEVNULL, stderr=DEVNULL) 873 | except CalledProcessError: 874 | parentpath = path.dirname(self.source) 875 | parentname = path.basename(parentpath) 876 | coverartpath = path.join(parentpath, parentname) 877 | success = False 878 | for fmt in ['jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG']: 879 | p = coverartpath + '.' + fmt 880 | if path.isfile(p): 881 | run(['convert', p, filepath], check=True, stdout=DEVNULL, stderr=DEVNULL) 882 | success = True 883 | break 884 | assert success 885 | return filepath 886 | 887 | def Print(self): 888 | print(' {:<20}: {:<80}'.format('Source', self.source)) 889 | print(' {:<20}: {:<80}'.format('Format', self.format)) 890 | print(' {:<20}: {:<80}'.format('Track No.', self.metadata['tracknumber'])) 891 | print(' {:<20}: {:<80}'.format('Title', self.metadata['title'])) 892 | print(' {:<20}: {:<80}'.format('Album', self.metadata['album'])) 893 | print(' {:<20}: {:<80}'.format('Artist', self.metadata['albumartist'])) 894 | print(' {:<20}: {:<80}'.format('Genre', self.metadata['genre'])) 895 | print(' {:<20}: {:.1f} kHz'.format('Sample rate', float(self.metadata['info']['sample_rate'])/1000.0)) 896 | print(' {:<20}: {:<80}'.format('Bits per sample', self.metadata['info']['bits_per_sample'])) 897 | print(' {:<20}: {:<80}'.format('Channels', self.metadata['info']['channels'])) 898 | print(' {:<20}: {:.1f} kbits/s'.format('Bitrate', float(self.metadata['info']['bitrate'])/1000.0)) 899 | return 900 | 901 | class Album(list): 902 | def __init__(self, title=None, artist=None): 903 | self.title = title 904 | self.artist = artist 905 | self.id = hashlib.sha224(self.GenPath().encode('utf-8')).hexdigest() 906 | 907 | def GenPath(self): 908 | return path.join(genpath(self.artist), genpath(self.title)) 909 | 910 | def ExtractCoverArt(self, prefix): 911 | self.cover_art_path = path.join(prefix, '{}.png'.format(self.id)) 912 | if not wait_file(self[0].ExtractCoverArt(self.cover_art_path)): 913 | raise FileNotFoundError(u'cover art picture file not found.') 914 | return self.GetCoverArtInfo() 915 | 916 | def GetCoverArtInfo(self): 917 | result = run([ 918 | 'identify', self.cover_art_path 919 | ], check=True, stdout=PIPE).stdout.decode() 920 | info = dict(zip([ 921 | 'format', 922 | 'geometry', 923 | 'page_geometry', 924 | 'depth', 925 | 'colorspace', 926 | 'filesize', 927 | 'user_time', 928 | 'elapsed_time' 929 | ], result.split('.png')[1].split())) 930 | info['width'] = int(info['geometry'].split('x')[0]) 931 | info['height'] = int(info['geometry'].split('x')[1]) 932 | self.cover_art_info = info 933 | return info 934 | 935 | def __import_worker__(q_in, q_out, checksum=True): 936 | pack_in = q_in.get() 937 | while pack_in is not None: 938 | q_out.put(AudioTrack(pack_in, checksum)) 939 | pack_in = q_in.get() 940 | 941 | def __export_worker__(q_in, q_out): 942 | pack_in = q_in.get() 943 | while pack_in is not None: 944 | tobj, outfile, preset, exists, bitrate = pack_in 945 | q_out.put(tobj.Export(outfile, preset, exists, bitrate)) 946 | pack_in = q_in.get() 947 | 948 | def __extract_worker__(q_in, q_out): 949 | pack_in = q_in.get() 950 | while pack_in is not None: 951 | aobj, prefix = pack_in 952 | aobj.ExtractCoverArt(prefix) 953 | q_out.put(aobj) 954 | pack_in = q_in.get() 955 | 956 | class Library(object): 957 | """SONY Music Library. 958 | """ 959 | def Scan(self, libroot, outdir): 960 | """Scan SONY Music Library. 961 | """ 962 | self.source = path.normpath(path.abspath(libroot)) 963 | if path.exists(outdir): 964 | assert False, 'output directory already exists.' 965 | os.makedirs(outdir) 966 | ## 967 | ## initialize containers. 968 | tracks = find_tracks(self.source) 969 | self.tracks = {} 970 | self.albums = {} 971 | self.arts_path = path.join(outdir, 'arts') 972 | self.checksum_path = path.join(outdir, '{}.txt'.format(DEFAULT_CHECKSUM_PROG)) 973 | self.ImportTracks(tracks, checksum=False) 974 | self.UpdateAlbums() 975 | ## self.ExtractCoverArts() 976 | print(u'{:d} audio tracks of {:d} album(s) loaded.'.format(len(self.tracks), len(self.albums))) 977 | return 978 | 979 | def Build(self, libroot, outdir): 980 | """Build SONY Music Library from source directory. 981 | """ 982 | self.source = path.normpath(path.abspath(libroot)) 983 | if path.exists(outdir): 984 | assert False, 'output directory already exists.' 985 | os.makedirs(outdir) 986 | ## 987 | ## initialize containers. 988 | tracks = find_tracks(self.source) 989 | self.tracks = {} 990 | self.albums = {} 991 | self.arts_path = path.join(outdir, 'arts') 992 | self.checksum_path = path.join(outdir, '{}.txt'.format(DEFAULT_CHECKSUM_PROG)) 993 | self.ImportTracks(tracks) 994 | self.UpdateAlbums() 995 | self.ExtractCoverArts() 996 | print(u'{:d} audio tracks of {:d} album(s) loaded.'.format(len(self.tracks), len(self.albums))) 997 | return 998 | 999 | def ImportTracks(self, tracks, checksum=True): 1000 | """Import SONY Music tracks. 1001 | """ 1002 | tic = time() 1003 | new_tracks = [] 1004 | q_file = Queue() 1005 | q_obj = Queue() 1006 | workers = [] 1007 | nworkers = max(2, cpu_count()) 1008 | ntrks = len(tracks) 1009 | for i in range(nworkers): 1010 | proc = Process(target=__import_worker__, args=(q_file, q_obj, checksum)) 1011 | proc.start() 1012 | workers.append(proc) 1013 | sys.stdout.write(u'Importing audio tracks......') 1014 | sys.stdout.flush() 1015 | for t in tracks: 1016 | q_file.put(t) 1017 | i = 0 1018 | while i < ntrks: 1019 | tobj = q_obj.get() 1020 | if tobj.id in self.tracks: 1021 | print('Duplicate track found: {}'.format(tobj.source)) 1022 | else: 1023 | self.tracks[tobj.id] = tobj 1024 | i += 1 1025 | sys.stdout.write(u'\rImporting audio tracks......{:d}/{:d} ({:5.1f}%)'.format(i, ntrks, 100.0*i/ntrks)) 1026 | sys.stdout.flush() 1027 | for i in range(nworkers): 1028 | q_file.put(None) 1029 | for proc in workers: 1030 | proc.join() 1031 | run(['stty', 'sane'], stdout=DEVNULL, stderr=DEVNULL) 1032 | sys.stdout.write(u'\rImporting audio tracks......Finished. ({:.2f} seconds)\n'.format(time()-tic)) 1033 | sys.stdout.flush() 1034 | for t in new_tracks: 1035 | if t.id not in self.tracks: 1036 | self.tracks[t.id] = t 1037 | 1038 | def UpdateAlbums(self): 1039 | self.albums = {} 1040 | tic = time() 1041 | sys.stdout.write(u'Updating albums......') 1042 | sys.stdout.flush() 1043 | for t in self.tracks.values(): 1044 | if t.parent_id not in self.albums: 1045 | self.albums[t.parent_id] = Album(title=t.metadata['album'], artist=t.metadata['albumartist']) 1046 | self.albums[t.parent_id].append(t) 1047 | sys.stdout.write(u'\rUpdating albums......Finished. ({:.2f} seconds)\n'.format(time()-tic)) 1048 | 1049 | def ExtractCoverArts(self): 1050 | """Extract album cover arts. 1051 | """ 1052 | if not path.exists(self.arts_path): 1053 | os.makedirs(self.arts_path) 1054 | q_alb = Queue() 1055 | q_pic = Queue() 1056 | workers = [] 1057 | nworkers = cpu_count() 1058 | nalbs = len(self.albums) 1059 | for i in range(nworkers): 1060 | proc = Process(target=__extract_worker__, args=(q_alb, q_pic)) 1061 | proc.start() 1062 | workers.append(proc) 1063 | for a in self.albums.values(): 1064 | q_alb.put((a, self.arts_path)) 1065 | tic = time() 1066 | sys.stdout.write('Extracting album cover arts......') 1067 | sys.stdout.flush() 1068 | i = 0 1069 | while i < nalbs: 1070 | a = q_pic.get() 1071 | self.albums[a.id] = a 1072 | i += 1 1073 | sys.stdout.write(u'\rExtracting album cover arts......{:d}/{:d} ({:5.1f}%)'.format(i, nalbs, 100.0*i/nalbs)) 1074 | sys.stdout.flush() 1075 | sys.stdout.write(u'\rExtracting album cover arts......Finished. ({:.2f} seconds)\n'.format(time()-tic)) 1076 | sys.stdout.flush() 1077 | run(['stty', 'sane'], stdout=DEVNULL, stderr=DEVNULL) 1078 | for i in range(nworkers): 1079 | q_alb.put(None) 1080 | for proc in workers: 1081 | proc.join() 1082 | 1083 | def SortCoverArts(self, sortkey, reverse=False): 1084 | """Sort album cover arts. 1085 | """ 1086 | if reverse: 1087 | print("Album cover arts sorted by {} (reversed):".format(sortkey)) 1088 | else: 1089 | print("Album cover arts sorted by {}:".format(sortkey)) 1090 | alb_sorted = sorted(self.albums.keys(), key=lambda alb:self.albums[alb].cover_art_info[sortkey], reverse=reverse) 1091 | artist_width = max([width(self.albums[alb].artist) for alb in alb_sorted]) 1092 | albnam_width = max([width(self.albums[alb].title) for alb in alb_sorted]) 1093 | for alb in alb_sorted: 1094 | print(u'{} {} {}'.format( 1095 | uljust(self.albums[alb].artist, artist_width+4), 1096 | uljust(self.albums[alb].title, albnam_width+4), 1097 | self.albums[alb].cover_art_info[sortkey] 1098 | )) 1099 | 1100 | def Update(self): 1101 | """Update pre-built SONY Music Library. 1102 | """ 1103 | tracks = self.tracks 1104 | self.tracks = {} 1105 | for tid in tracks: 1106 | if path.isfile(tracks[tid].source): 1107 | self.tracks[tid] = tracks[tid] 1108 | new_tracks = [] 1109 | src_tracks = {t.source:t.file for t in self.tracks.values()} 1110 | for t in find_tracks(self.source): 1111 | if t in src_tracks: 1112 | if path.getctime(t) > src_tracks[t]['ctime'] or path.getsize(t) != src_tracks[t]['size']: 1113 | new_tracks.append(t) 1114 | else: 1115 | new_tracks.append(t) 1116 | self.ImportTracks(new_tracks) 1117 | self.UpdateAlbums() 1118 | self.ExtractCoverArts() 1119 | print(u'{:d} audio tracks of {:d} album(s) loaded.'.format(len(self.tracks), len(self.albums))) 1120 | 1121 | def Export(self, match=None, prefix=None, preset='dxd', exists='skip', verbose=False, bitrate=None): 1122 | """Export matched tracks. 1123 | """ 1124 | mpi_rank = comm.Get_rank() 1125 | mpi_size = comm.Get_size() 1126 | if match is None: 1127 | artist_match = '' 1128 | album_match = '' 1129 | track_match = '' 1130 | else: 1131 | artist_match, album_match, track_match = match.split('/') 1132 | if mpi_size == 1: 1133 | ## non-mpi parallelism, but multiprocessing 1134 | ## prepare albums 1135 | nalbs = len(self.albums) 1136 | i = 0 1137 | tic = time() 1138 | sys.stdout.write(u'Preparing album directories......') 1139 | sys.stdout.flush() 1140 | for a in self.albums.values(): 1141 | i+=1 1142 | if artist_match in a.artist and album_match in a.title: 1143 | if not path.exists(path.join(prefix, a.GenPath())): 1144 | os.makedirs(path.join(prefix, a.GenPath())) 1145 | if PRESETS[preset]['art_resolution'] is None: 1146 | args = [ 1147 | 'convert', 1148 | path.join(self.arts_path, '{}.png'.format(a.id)), 1149 | path.join(prefix, a.GenPath(), 'cover.{}'.format(PRESETS[preset]['art_format'])) 1150 | ] 1151 | else: 1152 | args = [ 1153 | 'convert', 1154 | path.join(self.arts_path, '{}.png'.format(a.id)), 1155 | '-resize', '{:d}x{:d}>'.format(PRESETS[preset]['art_resolution'], PRESETS[preset]['art_resolution']), 1156 | path.join(prefix, a.GenPath(), 'cover.{}'.format(PRESETS[preset]['art_format'])) 1157 | ] 1158 | run(args, check=True, stdout=DEVNULL, stderr=DEVNULL) 1159 | sys.stdout.write(u'\rPreparing album directories......{:d}/{:d} ({:5.1f}%)'.format(i, nalbs, 100.0*i/nalbs)) 1160 | sys.stdout.flush() 1161 | sys.stdout.write(u'\rPreparing album directories......Finished. ({:.2f} seconds)\n'.format(time()-tic)) 1162 | sys.stdout.flush() 1163 | ## export 1164 | to_path = [] 1165 | tracks = [] 1166 | for t in self.tracks.values(): 1167 | if artist_match in t.metadata['albumartist'] and album_match in t.metadata['album'] and track_match in t.GenFilename(): 1168 | tracks.append(t) 1169 | to_path.append(u'{}.{}'.format(path.join(prefix, t.GenPath()), PRESETS[preset]['extension'])) 1170 | q_obj = Queue() 1171 | q_out = Queue() 1172 | ntrks = len(tracks) 1173 | nworkers = max(2, cpu_count()) 1174 | workers = [] 1175 | for i in range(nworkers): 1176 | proc = Process(target=__export_worker__, args=(q_obj, q_out)) 1177 | proc.start() 1178 | workers.append(proc) 1179 | for i in range(ntrks): 1180 | q_obj.put((tracks[i], to_path[i], preset, exists, bitrate)) 1181 | tic = time() 1182 | i = 0 1183 | sys.stdout.write(u'Exporting audio tracks......') 1184 | sys.stdout.flush() 1185 | while i < ntrks: 1186 | outfile = q_out.get() 1187 | i += 1 1188 | sys.stdout.write( 1189 | u'\rExporting audio tracks......{:d}/{:d} ({:5.1f}%)'.format( 1190 | i, ntrks, 100.0*i/ntrks)) 1191 | sys.stdout.flush() 1192 | for i in range(nworkers): 1193 | q_obj.put(None) 1194 | for proc in workers: 1195 | proc.join() 1196 | sys.stdout.write(u'\r\rExporting audio tracks......Finished. ({:.2f} seconds)\n'.format(time() - tic)) 1197 | sys.stdout.flush() 1198 | run(['stty', 'sane'], stdout=DEVNULL, stderr=DEVNULL) 1199 | else: 1200 | ## mpi parallelism 1201 | tic = time() 1202 | if mpi_rank == 0: 1203 | nalbs = len(self.albums) 1204 | i = 0 1205 | tic = time() 1206 | sys.stdout.write(u'Preparing album directories......') 1207 | sys.stdout.flush() 1208 | for a in self.albums.values(): 1209 | i+=1 1210 | if artist_match in a.artist and album_match in a.title: 1211 | if not path.exists(path.join(prefix, a.GenPath())): 1212 | os.makedirs(path.join(prefix, a.GenPath())) 1213 | if PRESETS[preset]['art_resolution'] is None: 1214 | args = [ 1215 | 'convert', 1216 | path.join(self.arts_path, '{}.png'.format(a.id)), 1217 | path.join(prefix, a.GenPath(), 'cover.{}'.format(PRESETS[preset]['art_format'])) 1218 | ] 1219 | else: 1220 | args = [ 1221 | 'convert', 1222 | path.join(self.arts_path, '{}.png'.format(a.id)), 1223 | '-resize', '{:d}x{:d}>'.format(PRESETS[preset]['art_resolution'], PRESETS[preset]['art_resolution']), 1224 | path.join(prefix, a.GenPath(), 'cover.{}'.format(PRESETS[preset]['art_format'])) 1225 | ] 1226 | run(args, check=True, stdout=DEVNULL, stderr=DEVNULL) 1227 | sys.stdout.write(u'\rPreparing album directories......{:d}/{:d} ({:5.1f}%)'.format(i, nalbs, 100.0*i/nalbs)) 1228 | sys.stdout.flush() 1229 | sys.stdout.write(u'\rPreparing album directories......Finished. ({:.2f} seconds)\n'.format(time()-tic)) 1230 | sys.stdout.flush() 1231 | sleep(1.0) 1232 | to_path = [] 1233 | tracks = [] 1234 | for t in self.tracks.values(): 1235 | if artist_match in t.metadata['albumartist'] and album_match in t.metadata['album'] and track_match in t.GenFilename(): 1236 | tracks.append(t) 1237 | to_path.append(u'{}.{}'.format(path.join(prefix, t.GenPath()), PRESETS[preset]['extension'])) 1238 | else: 1239 | tracks = None 1240 | to_path = None 1241 | tracks = comm.bcast( tracks, root=0) 1242 | to_path = comm.bcast(to_path, root=0) 1243 | ntrks = len(tracks) 1244 | node = hostname() 1245 | node = comm.gather(node, root=0) 1246 | if mpi_rank == 0: 1247 | for i in range(mpi_size): 1248 | sleep(.1) 1249 | print(u'Process {}/{} is ready on [{}].'.format(i+1, mpi_size, node[i])) 1250 | print(u'All processes are ready.') 1251 | itrks = list(map(len, [tracks[i::(mpi_size-1)] for i in range(mpi_size-1)])) 1252 | t = np.zeros(mpi_size-1, dtype='int64') 1253 | while np.sum(t) $LATESTDUMP 28 | if [ -f $DEST/$REPOS-latest.dump.$COMPFORMAT ]; then 29 | echo Compressing previous dump... 30 | LASTDUMP=$(readlink $DEST/$REPOS-latest.dump.$COMPFORMAT) 31 | xdelta3 -q -s $LATESTDUMP $LASTDUMP $LASTDUMP.vcdiff 32 | rm $LASTDUMP 33 | fi 34 | ln -sf $LATESTDUMP $DEST/$REPOS-latest.dump.$COMPFORMAT 35 | echo Archiving svn repository database $REPOS... 36 | LATESTDB=$DEST/$REPOS-$DATE.db.tar.$COMPFORMAT 37 | tar cf - $REPOS | $COMPRESSOR -c > $LATESTDB 38 | if [ -f $DEST/$REPOS-latest.db.tar.$COMPFORMAT ]; then 39 | echo Compressing previous database... 40 | LASTDB=$(readlink $DEST/$REPOS-latest.db.tar.$COMPFORMAT) 41 | xdelta3 -q -s $LATESTDB $LASTDB $LASTDB.vcdiff 42 | rm $LASTDB 43 | fi 44 | ln -sf $LATESTDB $DEST/$REPOS-latest.db.tar.$COMPFORMAT 45 | else 46 | echo $REPOS is not a valid repository. 47 | fi 48 | fi 49 | done 50 | -------------------------------------------------------------------------------- /battery_life.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | import sys 4 | import os 5 | from os import path 6 | TOP_DIR = '/sys/bus/acpi/drivers/battery' 7 | if path.isdir(TOP_DIR): 8 | full = {} 9 | now = {} 10 | for d in os.listdir(TOP_DIR): 11 | if path.isdir(path.join(TOP_DIR,d)): 12 | for b in os.listdir(path.join(TOP_DIR,d,'power_supply')): 13 | with open(path.join(TOP_DIR,d,'power_supply',b,'energy_full')) as f: 14 | full[b] = eval(f.readline()) 15 | with open(path.join(TOP_DIR,d,'power_supply',b,'energy_now')) as f: 16 | now[b] = eval(f.readline()) 17 | print '%s: %d/%d = %0.2f%%'%(b,now[b],full[b],now[b]*100.0/full[b]) 18 | else: 19 | print "System platform is not supported." 20 | sys.exit() 21 | 22 | full_t = 0.0 23 | now_t = 0.0 24 | for b in full: 25 | full_t += full[b] 26 | for b in now: 27 | now_t += now[b] 28 | print 'Total: %d/%d = %0.2f%%'%(now_t,full_t,now_t*100.0/full_t) 29 | -------------------------------------------------------------------------------- /cluster/sugvc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #coding=utf-8 3 | """Simple Unified Grouping Version Control. 4 | 5 | Syntax: 6 | sugvc up[date]|st[atus] path 7 | 8 | Copyright: 9 | pigsboss@github 10 | """ 11 | import sys 12 | import os 13 | from os import path 14 | import subprocess 15 | cwd = os.getcwd() 16 | action = sys.argv[1] 17 | grpath = path.normpath(path.abspath(path.realpath(sys.argv[2]))) 18 | assert path.isdir(grpath), '{} is not accessible.'.format(grpath) 19 | for sub in os.listdir(grpath): 20 | subpath = path.normpath(path.abspath(path.realpath(path.join(grpath, sub)))) 21 | print(subpath) 22 | if path.isdir(path.join(subpath, '.svn')): 23 | if action.startswith('up'): 24 | print(r'Update SVN local working copy: {}'.format(subpath)) 25 | subprocess.run(['svn', 'up', subpath], check=True) 26 | elif action.startswith('st'): 27 | print(r'Check SVN local working copy: {}'.format(subpath)) 28 | subprocess.run(['svn', 'st', subpath], check=True) 29 | elif path.isdir(path.join(subpath, '.git')): 30 | if action.startswith('up'): 31 | print(r'Update Git local branch: {}'.format(subpath)) 32 | os.chdir(subpath) 33 | subprocess.run(['git', 'pull'], check=True) 34 | elif action.startswith('st'): 35 | print(r'Check Git local branch: {}'.format(subpath)) 36 | os.chdir(subpath) 37 | subprocess.run(['git', 'status', '-s'], check=True) 38 | os.chdir(cwd) 39 | -------------------------------------------------------------------------------- /dicmp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """dicmp 3 | Dictionary Compare. 4 | 5 | """ 6 | import sys 7 | from six import iteritems() 8 | from os import path 9 | 10 | def expand_path(s): 11 | l = [] 12 | a, b = path.split(s) 13 | l.insert(0, b) 14 | while len(a)>0: 15 | s = a 16 | a, b = path.split(s) 17 | l.insert(0, b) 18 | return l 19 | 20 | da = {} 21 | with open(sys.argv[1], 'r') as f: 22 | for line in f: 23 | val, p = line.split() 24 | key = path.join(*tuple(expand_path(p))) 25 | da[key] = val.lower() 26 | 27 | db = {} 28 | with open(sys.argv[2], 'r') as f: 29 | for line in f: 30 | val, p = line.split() 31 | key = path.join(*tuple(expand_path(p))) 32 | db[key] = val.lower() 33 | 34 | for key,val in iteritems(da): 35 | if db.has_key(key): 36 | if val != db[key]: 37 | print '{} in {}: {}'.format(key, sys.argv[1], da[key]) 38 | print '{} in {}: {}'.format(key, sys.argv[2], db[key]) 39 | 40 | -------------------------------------------------------------------------------- /external_storage_tools/pbind: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # 3 | # Usage: 4 | # pbind -a|-d PATH 5 | # Options: 6 | # -a activate 7 | # -d deactivate 8 | # 9 | # Copyright pigsboss@github 10 | # 11 | 12 | use strict; 13 | use warnings; 14 | use Config::IniFiles; 15 | use Sys::Hostname; 16 | use File::Spec; 17 | use Getopt::Std; 18 | 19 | our($opt_a, $opt_d); 20 | my $deviceroot; 21 | getopts('a:d:'); 22 | $deviceroot = $opt_a if($opt_a); 23 | $deviceroot = $opt_d if($opt_d); 24 | 25 | my $hostname = hostname; 26 | my $cfgfile = File::Spec->catfile($deviceroot, ".$hostname.ini"); 27 | my $cfg; 28 | if(-d $deviceroot){ 29 | if(-e -r $cfgfile){ 30 | $cfg = Config::IniFiles->new(-file=>$cfgfile); 31 | }else{ 32 | print("Configuration file for the current host ($cfgfile) does not exits.\n"); 33 | exit 1; 34 | } 35 | }else{ 36 | print("User specified device root $deviceroot does not exist.\n"); 37 | exit 1; 38 | } 39 | 40 | my @tasks = $cfg->Sections(); 41 | my ($host_dir, $device_dir); 42 | for(@tasks){ 43 | $host_dir = File::Spec->canonpath($cfg->val($_, "host_dir")); 44 | $device_dir = File::Spec->canonpath(File::Spec->catdir(($deviceroot, $cfg->val($_, "device_dir")))); 45 | system('sudo', 'bindfs', '--multithreaded', $device_dir, $host_dir) if($opt_a); 46 | system('sudo', 'umount', $host_dir) if($opt_d); 47 | } 48 | -------------------------------------------------------------------------------- /external_storage_tools/psync: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # 3 | # Usage: 4 | # psync [-v] -f|-t|-w PATH 5 | # Options: 6 | # -v verbose 7 | # -t sync to 8 | # -f sync from 9 | # -w sync with 10 | # 11 | # Copyright pigsboss@github 12 | # 13 | 14 | use strict; 15 | use warnings; 16 | use Config::IniFiles; 17 | use Sys::Hostname; 18 | use File::Spec; 19 | use Getopt::Std; 20 | use File::Find; 21 | 22 | our($opt_v, $opt_t, $opt_f, $opt_w); 23 | our $deviceroot; 24 | my $syncmode; 25 | getopts('vt:f:w:'); 26 | if($opt_t){ 27 | $syncmode = 'to'; 28 | $deviceroot = $opt_t; 29 | } 30 | if($opt_f){ 31 | $syncmode = 'from'; 32 | $deviceroot = $opt_f; 33 | } 34 | if($opt_w){ 35 | $syncmode = 'with'; 36 | $deviceroot = $opt_w; 37 | } 38 | 39 | my $hostname = hostname; 40 | my $cfgfile = File::Spec->catfile($deviceroot, ".$hostname.ini"); 41 | my $cfg; 42 | if(-d $deviceroot){ 43 | if(-e -r $cfgfile){ 44 | $cfg = Config::IniFiles->new(-file=>$cfgfile); 45 | }else{ 46 | print("Configuration file for the current host ($cfgfile) does not exits.\n"); 47 | exit 1; 48 | } 49 | }else{ 50 | print("User specified device root $deviceroot does not exist.\n"); 51 | exit 1; 52 | } 53 | 54 | my @tasks = $cfg->Sections(); 55 | our($host_dir, $device_dir); 56 | for(@tasks){ 57 | $host_dir = $cfg->val($_, "host_dir"); 58 | $device_dir = File::Spec->catdir(($deviceroot, $cfg->val($_, "device_dir"))); 59 | print("Task [$_] synchronising $host_dir $syncmode $device_dir...\n"); 60 | if($syncmode eq 'from'){ 61 | sync_from_device(); 62 | }elsif($syncmode eq 'to'){ 63 | sync_to_device(); 64 | }elsif($syncmode eq 'with'){ 65 | sync_to_device(); 66 | sync_from_device(); 67 | }else{ 68 | print("User specified sync mode is not supported.\n"); 69 | exit 2; 70 | } 71 | print("Task [$_] is finished.\n"); 72 | print("----------------------------------------------\n"); 73 | } 74 | 75 | sub sync_from_device{ 76 | chdir $host_dir; 77 | find(\&update_exists, '.'); 78 | } 79 | 80 | sub sync_to_device{ 81 | print(" Copying $host_dir to $device_dir...") if($opt_v); 82 | system('cp', '-a', '-u', $host_dir, File::Spec->catdir($device_dir, '../')); 83 | print("\r Copying $host_dir to $device_dir... OK.\n") if($opt_v); 84 | } 85 | 86 | sub update_exists{ 87 | my $curpath = $File::Find::name; 88 | my $devicepath; 89 | my $hostpath; 90 | if(-e File::Spec->rel2abs($host_dir, $curpath)){ 91 | $devicepath = File::Spec->catfile($device_dir, $curpath); 92 | $hostpath = File::Spec->catfile($host_dir, $curpath); 93 | if(-e -f $devicepath){ 94 | print(" Updating $hostpath...") if($opt_v); 95 | system('cp', '-d', '--preserve=all', '-u', $devicepath, $hostpath); 96 | print("\r Updating $hostpath... OK.\n") if($opt_v); 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /external_storage_tools/switchoff: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pbind -d $1 3 | psync -f $1 4 | 5 | -------------------------------------------------------------------------------- /external_storage_tools/switchon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | psync -t $1 3 | pbind -a $1 4 | -------------------------------------------------------------------------------- /genpasswd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | """Generate random string as password. 3 | 4 | Syntax: 5 | genpasswd.py LENGTH 6 | 7 | LENGTH is the required length of the random string. 8 | 9 | Author: pigsboss@github 10 | """ 11 | import numpy as np 12 | import sys 13 | 14 | def is_valid(passwd): 15 | has_digit = False 16 | has_lower = False 17 | has_upper = False 18 | for i in range(ord('0'),ord('9')+1): 19 | if chr(i) in passwd: 20 | has_digit = True 21 | break 22 | for i in range(ord('a'),ord('z')+1): 23 | if chr(i) in passwd: 24 | has_lower = True 25 | break 26 | for i in range(ord('A'),ord('Z')+1): 27 | if chr(i) in passwd: 28 | has_upper = True 29 | break 30 | return has_digit & has_lower & has_upper 31 | 32 | def randstr(length): 33 | cdata = np.uint8(np.random.rand(length)*(10.0+26.0*2)) 34 | str = '' 35 | for i in range(length): 36 | if cdata[i]<10: 37 | str+=chr(ord('0')+cdata[i]) 38 | elif cdata[i]<36: 39 | str+=chr(ord('a')+cdata[i]-10) 40 | else: 41 | str+=chr(ord('A')+cdata[i]-36) 42 | return str 43 | 44 | def gen_valid_passwd(length,maxloops=1000): 45 | t=0 46 | while t 0: 204 | job['retry'] -= 1 205 | job['status'] = 'pending' 206 | else: 207 | worker['output'].append('Job %d is failed since we give up after %d retries.'%(job['id'], retry)) 208 | job['status'] = 'failed' 209 | worker['status'] = 'idle' 210 | elif job['status'] == 'failed': 211 | worker['output'].append('Job %d is failed.'%job['id']) 212 | elif job['status'] == 'completed': 213 | worker['output'].append('Job %d is completed.'%job['id']) 214 | else: 215 | raise ValueError('Status %s is undefined.'%job['status']) 216 | time.sleep(interval/1000.0) 217 | i += 1 218 | with open('pfetch.worker_%d.log'%worker_id, 'w') as f: 219 | f.write('\n'.join(worker['output'])) 220 | 221 | displayer = Thread(target=display) 222 | displayer.start() 223 | dispatcher = Thread(target=dispatch) 224 | dispatcher.start() 225 | workers = [] 226 | for i in range(nthreads): 227 | worker = Thread(target=rsync, args=(i,)) 228 | worker.start() 229 | workers.append(worker) 230 | finished = np.zeros(njobs) 231 | i = 0 232 | while np.sum(finished) < njobs: 233 | job = jobs_pool[i % njobs] 234 | if job['status'] in ['failed', 'completed']: 235 | finished[i % njobs] = 1 236 | time.sleep(0.1) 237 | i += 1 238 | display_on = False 239 | displayer.join() 240 | dispatch_on = False 241 | dispatcher.join() 242 | for i in range(nthreads): 243 | workers_pool[i]['status'] = 'dismissed' 244 | workers[i].join() 245 | print('\nTerminated.') 246 | -------------------------------------------------------------------------------- /timecap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DATE=$(date +%Y%m%d-%H%M%S) 3 | SRC=$(readlink -f $1) 4 | DEST=$(readlink -f $2) 5 | if [ ! -d $SRC ]; then 6 | echo "Invalid source directory: $SRC" 7 | exit 1 8 | fi 9 | if [ ! -d $DEST ]; then 10 | echo "Invalid destination directory: $DEST" 11 | exit 1 12 | fi 13 | CFGFILE=$SRC/.timecap 14 | if [ -f $CFGFILE ]; then 15 | source $CFGFILE 16 | else 17 | echo "TimeCap config file is missing." 18 | echo "Use default configuration." 19 | SRCNAME=$(basename $SRC) 20 | MAXDEPTH=7 21 | fi 22 | echo "Name: $SRCNAME" 23 | echo "Maximum depth: $MAXDEPTH" 24 | SNAR=$(readlink -f $DEST/$SRCNAME.snar) 25 | if [ -f $SNAR ]; then 26 | echo "Found snapshot file $SNAR of incremental backups." 27 | LATEST=$(readlink -f $DEST/$SRCNAME-latest.tar) 28 | LASTDEPTH=$(basename -s .tar $LATEST|grep -o -E '[0-9]+') 29 | echo "Found previous backup $LATEST (depth: $LASTDEPTH)." 30 | else 31 | LASTDEPTH=-1 32 | fi 33 | if [ $((LASTDEPTH+1)) -lt $MAXDEPTH ]; then 34 | CURDEPTH=$((LASTDEPTH+1)) 35 | cd $SRC 36 | tar -c -g $SNAR -f $DEST/$SRCNAME-$CURDEPTH.tar ./ 37 | ln -sf $DEST/$SRCNAME-$CURDEPTH.tar $DEST/$SRCNAME-latest.tar 38 | else 39 | CURDEPTH=0 40 | cd $SRC 41 | tar -c -g $SNAR.new -f $DEST/$SRCNAME-$CURDEPTH.tar ./ 42 | mv $SNAR.new $SNAR 43 | ln -sf $DEST/$SRCNAME-$CURDEPTH.tar $DEST/$SRCNAME-latest.tar 44 | fi 45 | echo "Created $CURDEPTH-depth backup $(readlink -f $DEST/$SRCNAME-latest.tar)." 46 | 47 | -------------------------------------------------------------------------------- /verify-local-repo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #coding=utf-8 3 | """Verify local repository. 4 | 5 | Syntax: 6 | verify-local-repo.py server-checksum [checksum-algorithm] [output] 7 | """ 8 | 9 | import sys 10 | from subprocess import run, PIPE, DEVNULL, Popen 11 | from os import path 12 | 13 | server_checksum_file = sys.argv[1] 14 | try: 15 | checksum_algorithm = sys.argv[2] 16 | except: 17 | checksum_algorithm = None 18 | try: 19 | output_file = sys.argv[3] 20 | except: 21 | output_file = 'local-repo-missing.txt' 22 | 23 | with open(output_file, 'w') as g: 24 | with open(server_checksum_file, 'r') as f: 25 | for line in f: 26 | checksum, fits = line.split() 27 | if path.exists(fits): 28 | if checksum_algorithm is not None: 29 | checksum_local = run([checksum_algorithm, '-b', fits], check=True, stdout=PIPE).stdout.decode().split()[0].lower() 30 | if checksum_local == checksum.lower(): 31 | print("{:<120}: OK".format(fits)) 32 | else: 33 | print("{:<120}: Conflict".format(fits)) 34 | g.write('{} {}\n'.format(checksum, fits)) 35 | else: 36 | print("{:<120}: OK".format(fits)) 37 | elif path.exists(fits+'.gz'): 38 | if checksum_algorithm is not None: 39 | gzip_proc = Popen(['gzip', '-d', '-c', '{}.gz'.format(fits)], stdout=PIPE) 40 | checksum_proc = Popen([checksum_algorithm, '-b'], stdin=gzip_proc.stdout, stdout=PIPE) 41 | checksum_local = checksum_proc.communicate()[0].decode().split()[0].lower() 42 | if checksum_local == checksum.lower(): 43 | print("{:<120}: OK".format(fits+'.gz')) 44 | else: 45 | print("{:<120}: Conflict".format(fits+'.gz')) 46 | g.write('{} {}\n'.format(checksum, fits)) 47 | else: 48 | print("{:<120}: OK".format(fits+'.gz')) 49 | else: 50 | print("{:<120}: Miss".format(fits)) 51 | g.write('{} {}\n'.format(checksum, fits)) 52 | -------------------------------------------------------------------------------- /vid2gif.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | usage="Convert video clip to GIF. 3 | Usage: $(basename $0) [options] DEST 4 | -i input video clip file. 5 | -s (optional) resize, in W:H. 6 | -r (optional) frame rate. 7 | -b (optional) input video clip begins at position. 8 | -t (optional) limit input video clip to duration. 9 | " 10 | while getopts ":hi:o:s:r:" opt; do 11 | case "${opt}" in 12 | h) 13 | echo "${usage}" 14 | exit 15 | ;; 16 | i) 17 | inputfile="${OPTARG}" 18 | ;; 19 | s) 20 | IFS=':' read -r -a size <<< "${OPTARG}" 21 | ;; 22 | r) 23 | fps="${OPTARG}" 24 | ;; 25 | b) 26 | ssopt=("-ss" "${OPTARG}") 27 | ;; 28 | t) 29 | topt=("-t" "${OPTARG}") 30 | ;; 31 | \?) 32 | echo "${opt} is not recognized." 33 | echo "${usage}" 34 | exit 35 | ;; 36 | esac 37 | done 38 | shift $((OPTIND -1)) 39 | dest=$@ 40 | ffmpeg -i "${inputfile}" ${ssopt[@]} ${topt[@]} -vf "scale=${size[0]}:${size[1]}:flags=lanczos,palettegen=stats_mode=full" palette.png 41 | ffmpeg -i "${inputfile}" -i palette.png ${ssopt[@]} ${topt[@]} -lavfi "fps=${fps},scale=${size[0]}:${size[1]}:flags=lanczos [x]; [x][1:v] paletteuse=dither=sierra2_4a" "${dest}" 42 | -------------------------------------------------------------------------------- /whatsnew.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #coding=utf-8 3 | """whatsnew.py compares signatures of two directories A and B in order to find files contained in A but not in B. 4 | 5 | Syntax: 6 | whatsnew.py A.sig B.sig 7 | A.sig and B.sig are signatures of directories A and B, generated as: 8 | find A -type f -exec CHECKSUM {} \; > A.sig 9 | where CHECKSUM can be md5sum, sha224sum or other compatible programs. 10 | 11 | Copyright: pigsboss@github 12 | """ 13 | import sys 14 | from os import path 15 | 16 | def compare_dict(a,b): 17 | """compare dict a and b, and list keys contained in a but not in b as well as values of those keys in a. 18 | """ 19 | c = {} 20 | for k in a: 21 | if k not in b: 22 | c[k] = a[k] 23 | return c 24 | def make_dict(input_file): 25 | """make dict of input signature file. 26 | """ 27 | d = {} 28 | with open(path.normpath(path.abspath(path.realpath(input_file))),'r') as fp: 29 | for l in fp.read().splitlines(): 30 | k,v = l.split(maxsplit=1) 31 | d[k] = v 32 | return d 33 | if __name__ == '__main__': 34 | a_sig = sys.argv[1] 35 | b_sig = sys.argv[2] 36 | a_dict = make_dict(a_sig) 37 | b_dict = make_dict(b_sig) 38 | c_dict = compare_dict(a_dict, b_dict) 39 | if len(c_dict) > 0: 40 | print("New files:") 41 | for v in c_dict.values(): 42 | print(" {}".format(v)) 43 | --------------------------------------------------------------------------------