├── .gitignore ├── KextExtractor.command ├── KextExtractor.py ├── LICENSE ├── README.md └── Scripts ├── __init__.py ├── bdmesg.py ├── disk.py ├── diskdump ├── plist.py ├── run.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Omit our settings file 7 | settings.json 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | env/ 91 | venv/ 92 | ENV/ 93 | env.bak/ 94 | venv.bak/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | -------------------------------------------------------------------------------- /KextExtractor.command: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Get the curent directory, the script name 4 | # and the script name with "py" substituted for the extension. 5 | args=( "$@" ) 6 | dir="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)" 7 | script="${0##*/}" 8 | target="${script%.*}.py" 9 | 10 | # use_py3: 11 | # TRUE = Use if found, use py2 otherwise 12 | # FALSE = Use py2 13 | # FORCE = Use py3 14 | use_py3="TRUE" 15 | 16 | # We'll parse if the first argument passed is 17 | # --install-python and if so, we'll just install 18 | just_installing="FALSE" 19 | 20 | tempdir="" 21 | 22 | compare_to_version () { 23 | # Compares our OS version to the passed OS version, and 24 | # return a 1 if we match the passed compare type, or a 0 if we don't. 25 | # $1 = 0 (equal), 1 (greater), 2 (less), 3 (gequal), 4 (lequal) 26 | # $2 = OS version to compare ours to 27 | if [ -z "$1" ] || [ -z "$2" ]; then 28 | # Missing info - bail. 29 | return 30 | fi 31 | local current_os= comp= 32 | current_os="$(sw_vers -productVersion)" 33 | comp="$(vercomp "$current_os" "$2")" 34 | # Check gequal and lequal first 35 | if [[ "$1" == "3" && ("$comp" == "1" || "$comp" == "0") ]] || [[ "$1" == "4" && ("$comp" == "2" || "$comp" == "0") ]] || [[ "$comp" == "$1" ]]; then 36 | # Matched 37 | echo "1" 38 | else 39 | # No match 40 | echo "0" 41 | fi 42 | } 43 | 44 | set_use_py3_if () { 45 | # Auto sets the "use_py3" variable based on 46 | # conditions passed 47 | # $1 = 0 (equal), 1 (greater), 2 (less), 3 (gequal), 4 (lequal) 48 | # $2 = OS version to compare 49 | # $3 = TRUE/FALSE/FORCE in case of match 50 | if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then 51 | # Missing vars - bail with no changes. 52 | return 53 | fi 54 | if [ "$(compare_to_version "$1" "$2")" == "1" ]; then 55 | use_py3="$3" 56 | fi 57 | } 58 | 59 | get_remote_py_version () { 60 | local pyurl= py_html= py_vers= py_num="3" 61 | pyurl="https://www.python.org/downloads/macos/" 62 | py_html="$(curl -L $pyurl --compressed 2>&1)" 63 | if [ -z "$use_py3" ]; then 64 | use_py3="TRUE" 65 | fi 66 | if [ "$use_py3" == "FALSE" ]; then 67 | py_num="2" 68 | fi 69 | py_vers="$(echo "$py_html" | grep -i "Latest Python $py_num Release" | awk '{print $8}' | cut -d'<' -f1)" 70 | echo "$py_vers" 71 | } 72 | 73 | download_py () { 74 | local vers="$1" url= 75 | clear 76 | echo " ### ###" 77 | echo " # Downloading Python #" 78 | echo "### ###" 79 | echo 80 | if [ -z "$vers" ]; then 81 | echo "Gathering latest version..." 82 | vers="$(get_remote_py_version)" 83 | fi 84 | if [ -z "$vers" ]; then 85 | # Didn't get it still - bail 86 | print_error 87 | fi 88 | echo "Located Version: $vers" 89 | echo 90 | echo "Building download url..." 91 | url="$(curl -L https://www.python.org/downloads/release/python-${vers//./}/ --compressed 2>&1 | grep -iE "python-$vers-macos.*.pkg\"" | awk -F'"' '{ print $2 }')" 92 | if [ -z "$url" ]; then 93 | # Couldn't get the URL - bail 94 | print_error 95 | fi 96 | echo " - $url" 97 | echo 98 | echo "Downloading..." 99 | echo 100 | # Create a temp dir and download to it 101 | tempdir="$(mktemp -d 2>/dev/null || mktemp -d -t 'tempdir')" 102 | curl "$url" -o "$tempdir/python.pkg" 103 | if [ "$?" != "0" ]; then 104 | echo 105 | echo " - Failed to download python installer!" 106 | echo 107 | exit $? 108 | fi 109 | echo 110 | echo "Running python install package..." 111 | echo 112 | sudo installer -pkg "$tempdir/python.pkg" -target / 113 | if [ "$?" != "0" ]; then 114 | echo 115 | echo " - Failed to install python!" 116 | echo 117 | exit $? 118 | fi 119 | # Now we expand the package and look for a shell update script 120 | pkgutil --expand "$tempdir/python.pkg" "$tempdir/python" 121 | if [ -e "$tempdir/python/Python_Shell_Profile_Updater.pkg/Scripts/postinstall" ]; then 122 | # Run the script 123 | echo 124 | echo "Updating PATH..." 125 | echo 126 | "$tempdir/python/Python_Shell_Profile_Updater.pkg/Scripts/postinstall" 127 | fi 128 | vers_folder="Python $(echo "$vers" | cut -d'.' -f1 -f2)" 129 | if [ -f "/Applications/$vers_folder/Install Certificates.command" ]; then 130 | # Certs script exists - let's execute that to make sure our certificates are updated 131 | echo 132 | echo "Updating Certificates..." 133 | echo 134 | "/Applications/$vers_folder/Install Certificates.command" 135 | fi 136 | echo 137 | echo "Cleaning up..." 138 | cleanup 139 | echo 140 | if [ "$just_installing" == "TRUE" ]; then 141 | echo "Done." 142 | else 143 | # Now we check for py again 144 | echo "Rechecking py..." 145 | downloaded="TRUE" 146 | clear 147 | main 148 | fi 149 | } 150 | 151 | cleanup () { 152 | if [ -d "$tempdir" ]; then 153 | rm -Rf "$tempdir" 154 | fi 155 | } 156 | 157 | print_error() { 158 | clear 159 | cleanup 160 | echo " ### ###" 161 | echo " # Python Not Found #" 162 | echo "### ###" 163 | echo 164 | echo "Python is not installed or not found in your PATH var." 165 | echo 166 | if [ "$kernel" == "Darwin" ]; then 167 | echo "Please go to https://www.python.org/downloads/macos/ to" 168 | echo "download and install the latest version, then try again." 169 | else 170 | echo "Please install python through your package manager and" 171 | echo "try again." 172 | fi 173 | echo 174 | exit 1 175 | } 176 | 177 | print_target_missing() { 178 | clear 179 | cleanup 180 | echo " ### ###" 181 | echo " # Target Not Found #" 182 | echo "### ###" 183 | echo 184 | echo "Could not locate $target!" 185 | echo 186 | exit 1 187 | } 188 | 189 | format_version () { 190 | local vers="$1" 191 | echo "$(echo "$1" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }')" 192 | } 193 | 194 | vercomp () { 195 | # Modified from: https://apple.stackexchange.com/a/123408/11374 196 | local ver1="$(format_version "$1")" ver2="$(format_version "$2")" 197 | if [ $ver1 -gt $ver2 ]; then 198 | echo "1" 199 | elif [ $ver1 -lt $ver2 ]; then 200 | echo "2" 201 | else 202 | echo "0" 203 | fi 204 | } 205 | 206 | get_local_python_version() { 207 | # $1 = Python bin name (defaults to python3) 208 | # Echoes the path to the highest version of the passed python bin if any 209 | local py_name="$1" max_version= python= python_version= python_path= 210 | if [ -z "$py_name" ]; then 211 | py_name="python3" 212 | fi 213 | py_list="$(which -a "$py_name" 2>/dev/null)" 214 | # Walk that newline separated list 215 | while read python; do 216 | if [ -z "$python" ]; then 217 | # Got a blank line - skip 218 | continue 219 | fi 220 | if [ "$check_py3_stub" == "1" ] && [ "$python" == "/usr/bin/python3" ]; then 221 | # See if we have a valid developer path 222 | xcode-select -p > /dev/null 2>&1 223 | if [ "$?" != "0" ]; then 224 | # /usr/bin/python3 path - but no valid developer dir 225 | continue 226 | fi 227 | fi 228 | python_version="$(get_python_version $python)" 229 | if [ -z "$python_version" ]; then 230 | # Didn't find a py version - skip 231 | continue 232 | fi 233 | # Got the py version - compare to our max 234 | if [ -z "$max_version" ] || [ "$(vercomp "$python_version" "$max_version")" == "1" ]; then 235 | # Max not set, or less than the current - update it 236 | max_version="$python_version" 237 | python_path="$python" 238 | fi 239 | done <<< "$py_list" 240 | echo "$python_path" 241 | } 242 | 243 | get_python_version() { 244 | local py_path="$1" py_version= 245 | # Get the python version by piping stderr into stdout (for py2), then grepping the output for 246 | # the word "python", getting the second element, and grepping for an alphanumeric version number 247 | py_version="$($py_path -V 2>&1 | grep -i python | cut -d' ' -f2 | grep -E "[A-Za-z\d\.]+")" 248 | if [ ! -z "$py_version" ]; then 249 | echo "$py_version" 250 | fi 251 | } 252 | 253 | prompt_and_download() { 254 | if [ "$downloaded" != "FALSE" ] || [ "$kernel" != "Darwin" ]; then 255 | # We already tried to download, or we're not on macOS - just bail 256 | print_error 257 | fi 258 | clear 259 | echo " ### ###" 260 | echo " # Python Not Found #" 261 | echo "### ###" 262 | echo 263 | target_py="Python 3" 264 | printed_py="Python 2 or 3" 265 | if [ "$use_py3" == "FORCE" ]; then 266 | printed_py="Python 3" 267 | elif [ "$use_py3" == "FALSE" ]; then 268 | target_py="Python 2" 269 | printed_py="Python 2" 270 | fi 271 | echo "Could not locate $printed_py!" 272 | echo 273 | echo "This script requires $printed_py to run." 274 | echo 275 | while true; do 276 | read -p "Would you like to install the latest $target_py now? (y/n): " yn 277 | case $yn in 278 | [Yy]* ) download_py;break;; 279 | [Nn]* ) print_error;; 280 | esac 281 | done 282 | } 283 | 284 | main() { 285 | local python= version= 286 | # Verify our target exists 287 | if [ ! -f "$dir/$target" ]; then 288 | # Doesn't exist 289 | print_target_missing 290 | fi 291 | if [ -z "$use_py3" ]; then 292 | use_py3="TRUE" 293 | fi 294 | if [ "$use_py3" != "FALSE" ]; then 295 | # Check for py3 first 296 | python="$(get_local_python_version python3)" 297 | fi 298 | if [ "$use_py3" != "FORCE" ] && [ -z "$python" ]; then 299 | # We aren't using py3 explicitly, and we don't already have a path 300 | python="$(get_local_python_version python2)" 301 | if [ -z "$python" ]; then 302 | # Try just looking for "python" 303 | python="$(get_local_python_version python)" 304 | fi 305 | fi 306 | if [ -z "$python" ]; then 307 | # Didn't ever find it - prompt 308 | prompt_and_download 309 | return 1 310 | fi 311 | # Found it - start our script and pass all args 312 | "$python" "$dir/$target" "${args[@]}" 313 | } 314 | 315 | # Keep track of whether or not we're on macOS to determine if 316 | # we can download and install python for the user as needed. 317 | kernel="$(uname -s)" 318 | # Check to see if we need to force based on 319 | # macOS version. 10.15 has a dummy python3 version 320 | # that can trip up some py3 detection in other scripts. 321 | # set_use_py3_if "3" "10.15" "FORCE" 322 | downloaded="FALSE" 323 | # Check for the aforementioned /usr/bin/python3 stub if 324 | # our OS version is 10.15 or greater. 325 | check_py3_stub="$(compare_to_version "3" "10.15")" 326 | trap cleanup EXIT 327 | if [ "$1" == "--install-python" ] && [ "$kernel" == "Darwin" ]; then 328 | just_installing="TRUE" 329 | download_py 330 | else 331 | main 332 | fi 333 | -------------------------------------------------------------------------------- /KextExtractor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 0.0.0 3 | from Scripts import bdmesg, disk, plist, run, utils 4 | import os, tempfile, datetime, shutil, time, plistlib, json, sys, glob, argparse, re 5 | 6 | class KextExtractor: 7 | def __init__(self, **kwargs): 8 | self.r = run.Run() 9 | self.d = disk.Disk() 10 | self.u = utils.Utils("KextExtractor") 11 | self.boot_manager = bdmesg.get_bootloader_uuid() 12 | self.clover = None 13 | self.efi = None 14 | self.exclude = None 15 | # Get the tools we need 16 | self.script_folder = "Scripts" 17 | self.settings_file = os.path.join("Scripts", "settings.json") 18 | cwd = os.getcwd() 19 | os.chdir(os.path.dirname(os.path.realpath(__file__))) 20 | if self.settings_file and os.path.exists(self.settings_file): 21 | self.settings = json.load(open(self.settings_file)) 22 | else: 23 | self.settings = { 24 | # Default settings here 25 | "archive" : False, 26 | "full" : False, 27 | "efi" : None, 28 | "kexts" : None, 29 | "exclude": None 30 | } 31 | # Ensure the exclude is valid regex, and that kexts exists 32 | try: self.exclude = re.compile(self.settings.get("exclude")) 33 | except: pass 34 | if self.settings.get("kexts") and not os.path.exists(self.settings["kexts"]): 35 | self.settings["kexts"] = None 36 | # Flush the settings to start 37 | self.flush_settings() 38 | os.chdir(cwd) 39 | 40 | def flush_settings(self): 41 | if self.settings_file: 42 | cwd = os.getcwd() 43 | os.chdir(os.path.dirname(os.path.realpath(__file__))) 44 | json.dump(self.settings, open(self.settings_file, "w"), indent=2) 45 | os.chdir(cwd) 46 | 47 | def get_binary(self, name): 48 | # Check the system, and local Scripts dir for the passed binary 49 | found = self.r.run({"args":["which", name]})[0].split("\n")[0].split("\r")[0] 50 | if len(found): 51 | # Found it on the system 52 | return found 53 | if os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)), name)): 54 | # Found it locally 55 | return os.path.join(os.path.dirname(os.path.realpath(__file__)), name) 56 | # Check the scripts folder 57 | if os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)), self.script_folder, name)): 58 | # Found it locally -> Scripts 59 | return os.path.join(os.path.dirname(os.path.realpath(__file__)), self.script_folder, name) 60 | # Not found 61 | return None 62 | 63 | def get_efi(self,allow_main=True): 64 | while True: 65 | self.d.update() 66 | pad = 4 67 | disk_string = "\n" 68 | if not self.settings.get("full"): 69 | boot_disk = self.d.get_parent(self.boot_manager) 70 | mounts = self.d.get_mounted_volume_dicts() 71 | # Gather some formatting info 72 | name_pad = size_pad = type_pad = 0 73 | index_pad = len(str(len(mounts))) 74 | for x in mounts: 75 | if len(str(x["name"])) > name_pad: name_pad = len(str(x["name"])) 76 | if len(x["size"]) > size_pad: size_pad = len(x["size"]) 77 | if len(str(x["readable_type"])) > type_pad: type_pad = len(str(x["readable_type"])) 78 | for i,d in enumerate(mounts,start=1): 79 | disk_string += "{}. {} | {} | {} | {}".format( 80 | str(i).rjust(index_pad), 81 | str(d["name"]).ljust(name_pad), 82 | d["size"].rjust(size_pad), 83 | str(d["readable_type"]).ljust(type_pad), 84 | d["identifier"] 85 | ) 86 | if boot_disk and self.d.get_parent(d["identifier"]) == boot_disk: 87 | disk_string += " *" 88 | disk_string += "\n" 89 | else: 90 | mounts = self.d.get_disks_and_partitions_dict() 91 | disks = list(mounts) 92 | index_pad = len(str(len(disks))) 93 | # Gather some formatting info 94 | name_pad = size_pad = type_pad = 0 95 | for d in disks: 96 | for x in mounts[d]["partitions"]: 97 | name = "Container for {}".format(x["container_for"]) if "container_for" in x else str(x["name"]) 98 | if len(name) > name_pad: name_pad = len(name) 99 | if len(x["size"]) > size_pad: size_pad = len(x["size"]) 100 | if len(str(x["readable_type"])) > type_pad: type_pad = len(str(x["readable_type"])) 101 | for i,d in enumerate(disks,start=1): 102 | disk_string+= "{}. {} ({}):\n".format( 103 | str(i).rjust(index_pad), 104 | d, 105 | mounts[d]["size"] 106 | ) 107 | if mounts[d].get("scheme"): 108 | disk_string += " {}\n".format(mounts[d]["scheme"]) 109 | if mounts[d].get("physical_stores"): 110 | disk_string += " Physical Store{} on {}\n".format( 111 | "" if len(mounts[d]["physical_stores"])==1 else "s", 112 | ", ".join(mounts[d]["physical_stores"]) 113 | ) 114 | parts = mounts[d]["partitions"] 115 | part_list = [] 116 | for p in parts: 117 | name = "Container for {}".format(p["container_for"]) if "container_for" in p else p["name"] 118 | p_text = " - {} | {} | {} | {}".format( 119 | str(name).ljust(name_pad), 120 | p["size"].rjust(size_pad), 121 | str(p["readable_type"]).ljust(type_pad), 122 | p["identifier"] 123 | ) 124 | if self.boot_manager and p["disk_uuid"] == self.boot_manager: 125 | # Got boot manager 126 | p_text += " *" 127 | part_list.append(p_text) 128 | if len(part_list): 129 | disk_string += "\n".join(part_list) + "\n" 130 | disk_string += "\nS. Switch to {} Output\n".format("Slim" if self.settings.get("full") else "Full") 131 | disk_string += "B. Select the Boot Drive's EFI\n" 132 | if self.boot_manager: 133 | disk_string += "C. Select the Booted EFI (Clover/OC)\n" 134 | disk_string += ("\nM. Main" if allow_main else "") + "\nQ. Quit\n" 135 | if self.boot_manager: 136 | disk_string += "\n(* denotes the booted EFI (Clover/OC)" 137 | height = max(len(disk_string.split("\n"))+pad,24) 138 | width = max((len(x) for x in disk_string.split("\n"))) 139 | if self.settings.get("resize_window",True): self.u.resize(max(80,width), height) 140 | self.u.head() 141 | print(disk_string) 142 | menu = self.u.grab("Pick the drive containing your EFI: ") 143 | if not len(menu): 144 | continue 145 | if menu.lower() == "q": 146 | if self.settings.get("resize_window",True): self.u.resize(80,24) 147 | self.u.custom_quit() 148 | elif allow_main and menu.lower() == "m": 149 | if self.settings.get("resize_window",True): self.u.resize(80,24) 150 | return 151 | elif menu.lower() == "s": 152 | self.settings["full"] = not self.settings.get("full") 153 | continue 154 | elif menu.lower() == "b": 155 | disk = "/" 156 | elif menu.lower() == "c" and self.boot_manager: 157 | disk = self.boot_manager 158 | else: 159 | try: disk = mounts[int(menu)-1]["identifier"] if isinstance(mounts,list) else list(mounts)[int(menu)-1] 160 | except: disk = menu 161 | if self.settings.get("resize_window",True): self.u.resize(80,24) 162 | iden = self.d.get_identifier(disk) 163 | if not iden: 164 | self.u.head("Invalid Disk") 165 | print("") 166 | print("'{}' is not a valid disk!".format(disk)) 167 | print("") 168 | self.u.grab("Returning in 5 seconds...", timeout=5) 169 | continue 170 | # Valid disk! 171 | efi = self.d.get_efi(iden) 172 | if not efi: 173 | self.u.head("No EFI Partition") 174 | print("") 175 | print("There is no EFI partition associated with {}!".format(iden)) 176 | print("") 177 | self.u.grab("Returning in 5 seconds...", timeout=5) 178 | continue 179 | return efi 180 | 181 | def qprint(self, message, quiet): 182 | if not quiet: 183 | print(message) 184 | 185 | def path_is_valid(self, test_path): 186 | # Check if any of the path elements end with .kext, or equal __MACOSX 187 | # as we don't want to find sub-kexts, extended attributes, or similar. 188 | return not any(x.lower().endswith(".kext") or x == "__MACOSX" for x in os.path.normpath(test_path).split(os.path.sep)) 189 | 190 | def get_kext_version(self, kext_path): 191 | # Walk the contents of the passed kext path and look for an Info.plist. 192 | # Pull the CFBundleShortVersionString, if any - fall back to the 193 | # CFBundleVersion otherwise. 194 | plist_full_path = version = None 195 | for kpath, ksubdirs, kfiles in os.walk(kext_path): 196 | for kname in kfiles: 197 | if kname.lower() == "info.plist": 198 | plist_full_path = os.path.join(kpath,kname) 199 | break 200 | if plist_full_path: break # Found it - break 201 | if plist_full_path: 202 | # Try to parse the plist 203 | try: 204 | with open(plist_full_path,"rb") as f: 205 | plist_data = plist.load(f) 206 | assert isinstance(plist_data,dict) 207 | version = plist_data.get("CFBundleShortVersionString",plist_data.get("CFBundleVersion")) 208 | except Exception: 209 | pass 210 | return version or "?.?.?" 211 | 212 | def mount_and_copy(self, disk = None, package = None, quiet = False, exclude = None, folder_path = None): 213 | # Mounts the passed disk and extracts the package target to the destination 214 | if not quiet: 215 | self.u.head("Extracting {} to {}...".format(os.path.basename(package), os.path.basename(folder_path) if folder_path else disk)) 216 | print("") 217 | if not package: 218 | print("No kext package passed! Aborting...") 219 | return False 220 | mounted = True # Default to avoid unmounting if not needed 221 | if not disk: 222 | if not folder_path: 223 | print("No disk or folder path provided! Aborting...") 224 | return False 225 | temp_path = self.u.check_path(folder_path) 226 | if not temp_path: 227 | print("{} was not found! Aborting...") 228 | return False 229 | if not os.path.isdir(temp_path): 230 | print("{} is not a directory! Aborting...") 231 | return False 232 | folder_path = temp_path # Set the path explicitly 233 | # Set our default paths 234 | clover_path = oc_path = None 235 | # Check if we have folder_path/EFI 236 | if os.path.isdir(os.path.join(folder_path,"EFI")): 237 | clover_path = os.path.join(folder_path,"EFI","CLOVER") 238 | oc_path = os.path.join(folder_path,"EFI","OC") 239 | # Check for folder_path/CLOVER|OC 240 | if os.path.isdir(os.path.join(folder_path,"CLOVER")): 241 | clover_path = os.path.join(folder_path,"CLOVER") 242 | if os.path.isdir(os.path.join(folder_path,"OC")): 243 | oc_path = os.path.join(folder_path,"OC") 244 | # Check for folder_path/OpenCore.efi|Clover.efi 245 | if os.path.isfile(os.path.join(folder_path,"Clover.efi")): 246 | clover_path = folder_path 247 | if os.path.isfile(os.path.join(folder_path,"OpenCore.efi")): 248 | oc_path = folder_path 249 | if not clover_path and not oc_path: 250 | print("Could not locate any valid Clover or OC install! Aborting...") 251 | return False 252 | else: 253 | self.d.update() 254 | mounted = self.d.is_mounted(disk) 255 | # Mount the EFI if needed 256 | if not mounted: 257 | self.qprint("Mounting {}...".format(disk), quiet) 258 | out = self.d.mount_partition(disk) 259 | if not out[2] == 0: 260 | print(out[1]) 261 | return False 262 | self.qprint(out[0].strip("\n"), quiet) 263 | self.qprint(" ", quiet) 264 | mp = self.d.get_mount_point(disk) 265 | clover_path = os.path.join(mp,"EFI","CLOVER") 266 | oc_path = os.path.join(mp,"EFI","OC") 267 | kexts = [] 268 | temp = None 269 | # We need to parse some lists 270 | # First we need to get a list of zips and extract them to the temp folder 271 | self.qprint("Gathering files...",quiet) 272 | zips = [x for x in os.listdir(package) if not x.startswith(".") and x.lower().endswith(".zip")] 273 | if len(zips): 274 | self.qprint("\n - Extracting zip files...",quiet) 275 | # Create a temp folder 276 | temp = tempfile.mkdtemp() 277 | for f in zips: 278 | ztemp = tempfile.mkdtemp(dir=temp) 279 | args = [ 280 | "unzip", 281 | os.path.join(package, f), 282 | "-d", 283 | ztemp 284 | ] 285 | self.qprint(" --> Extracting {}...".format(f), quiet) 286 | self.r.run({"args":args, "stream":False}) 287 | # Let's iterate through the temp dir 288 | self.qprint("\n - Walking temp folder...",quiet) 289 | for path, subdirs, files in os.walk(temp): 290 | if not self.path_is_valid(path): 291 | continue 292 | for name in subdirs: 293 | if name.lower().endswith(".kext"): 294 | # Save it 295 | self.qprint(" --> {}".format(name),quiet) 296 | kexts.append(os.path.join(path, name)) 297 | self.qprint("\n - Walking {}".format(package),quiet) 298 | for path, subdirs, files in os.walk(package): 299 | if not self.path_is_valid(path): 300 | continue 301 | for name in subdirs: 302 | if name.lower().endswith(".kext"): 303 | # Save it 304 | self.qprint(" --> {}".format(name),quiet) 305 | kexts.append(os.path.join(path, name)) 306 | # Got our lists 307 | if not len(kexts): 308 | self.qprint("\nNothing to install!", quiet) 309 | if temp: shutil.rmtree(temp, ignore_errors=True) 310 | return 311 | self.qprint("", quiet) 312 | for clear,k_f in ((clover_path,os.path.join(clover_path,"kexts") if clover_path else None),\ 313 | (oc_path,os.path.join(oc_path,"Kexts") if oc_path else None)): 314 | if not k_f: continue # Missing a path 315 | print("Checking for {}...".format(k_f)) 316 | if not os.path.exists(k_f): 317 | print(" - Not found! Skipping...\n".format(k_f)) 318 | continue 319 | print(" - Located! Iterating...") 320 | # Let's get a list of installed kexts - we'll want to omit any nested plugins though 321 | installed_kexts = {} 322 | for path, subdirs, files in os.walk(k_f): 323 | if not self.path_is_valid(path): 324 | continue 325 | for name in subdirs: 326 | if name.lower().endswith(".kext"): 327 | if not name.lower() in installed_kexts: 328 | installed_kexts[name.lower()] = [] 329 | installed_kexts[name.lower()].append(os.path.join(path, name)) 330 | # Let's walk our new kexts and update as we go 331 | for k in sorted(kexts, key=lambda x: os.path.basename(x).lower()): 332 | new_version = self.get_kext_version(k) 333 | k_name = os.path.basename(k) 334 | if not k_name.lower() in installed_kexts: 335 | continue 336 | for path in installed_kexts[k_name.lower()]: 337 | old_version = self.get_kext_version(path) 338 | dir_path = os.path.dirname(path)[len(clear):].lstrip("/") 339 | if exclude and exclude.match(k_name): 340 | # Excluded - print that we're skipping it 341 | print(" --> Excluding {}/{} ({}) per regex...".format( 342 | dir_path, 343 | k_name, 344 | old_version 345 | )) 346 | continue 347 | # Format the version string to show changes 348 | if old_version == new_version: 349 | version_string = old_version 350 | else: 351 | version_string = "{} -> {}".format(old_version,new_version) 352 | print(" --> Replacing {}/{} ({})...".format( 353 | dir_path, 354 | k_name, 355 | version_string 356 | )) 357 | if path.lower() == k.lower(): 358 | print(" ----> Source and target paths are the same - skipping!") 359 | continue 360 | # Back up if need be 361 | if self.settings.get("archive", False): 362 | print(" ----> Archiving...") 363 | cwd = os.getcwd() 364 | os.chdir(os.path.dirname(path)) 365 | zip_name = "{}-{}-Backup-{:%Y-%m-%d %H.%M.%S}.zip".format(k_name,old_version,datetime.datetime.now()) 366 | args = ["zip","-r",zip_name,os.path.basename(path)] 367 | out = self.r.run({"args":args, "stream":False}) 368 | os.chdir(cwd) 369 | if not out[2] == 0: 370 | print(" ------> Couldn't backup {} ({}) - skipping!".format(k_name,old_version)) 371 | continue 372 | # Replace the kext 373 | try: 374 | shutil.rmtree(path,ignore_errors=True) 375 | except: 376 | print(" ----> Could not remove target kext!") 377 | continue 378 | try: 379 | shutil.copytree(k,path) 380 | except: 381 | print(" ----> Failed to copy new kext!") 382 | continue 383 | print("") 384 | if temp: shutil.rmtree(temp, ignore_errors=True) 385 | # Unmount if need be 386 | if not mounted: 387 | self.qprint("Unmounting {}...\n".format(disk),quiet) 388 | self.d.unmount_partition(disk) 389 | 390 | def get_folder(self): 391 | self.u.head() 392 | print(" ") 393 | print("Q. Quit") 394 | print("M. Main Menu") 395 | print(" ") 396 | kexts = self.u.grab("Please drag and drop a folder containing kexts to copy: ") 397 | if kexts.lower() == "q": 398 | self.u.custom_quit() 399 | elif kexts.lower() == "m": 400 | return None 401 | kexts = self.u.check_path(kexts) 402 | if not kexts: 403 | self.u.grab("Folder doesn't exist!", timeout=5) 404 | self.get_folder() 405 | return kexts 406 | 407 | def default_folder(self): 408 | self.u.head() 409 | print(" ") 410 | print("Q. Quit") 411 | print("M. Main Menu") 412 | print(" ") 413 | kexts = self.u.grab("Please drag and drop a default folder containing kexts: ") 414 | if kexts.lower() == "q": 415 | self.u.custom_quit() 416 | elif kexts.lower() == "m": 417 | return self.settings.get("kexts",None) 418 | kexts = self.u.check_path(kexts) 419 | if not kexts: 420 | self.u.grab("Folder doesn't exist!", timeout=5) 421 | return self.default_folder() 422 | return kexts 423 | 424 | def default_disk(self): 425 | self.d.update() 426 | clover = bdmesg.get_bootloader_uuid() 427 | self.u.resize(80, 24) 428 | self.u.head("Select Default Disk") 429 | print(" ") 430 | print("1. None") 431 | print("2. Boot Disk") 432 | if clover: 433 | print("3. Booted Clover/OC") 434 | print(" ") 435 | print("M. Main Menu") 436 | print("Q. Quit") 437 | print(" ") 438 | menu = self.u.grab("Please pick a default disk: ") 439 | if not len(menu): 440 | return self.default_disk() 441 | menu = menu.lower() 442 | if menu in ["1","2"]: 443 | return [None, "boot"][int(menu)-1] 444 | elif menu == "3" and clover: 445 | return "clover" 446 | elif menu == "m": 447 | return self.settings.get("efi",None) 448 | elif menu == "q": 449 | self.u.custom_quit() 450 | return self.default_disk() 451 | 452 | def get_regex(self): 453 | while True: 454 | self.u.head("Exclusion Regex") 455 | print("") 456 | print("Current Exclusion: {}".format(None if self.exclude is None else self.exclude.pattern)) 457 | print("") 458 | print("Eg: To case-insenitively exclude any kext starting with \"hello\",") 459 | print("you can use the following:") 460 | print("") 461 | print("(?i)hello.*\\.kext") 462 | print("") 463 | print("C. Clear Exclusions") 464 | print("M. Return to Menu") 465 | print("Q. Quit") 466 | print("") 467 | menu = self.u.grab("Please enter the exclusion regex: ") 468 | if not len(menu): continue 469 | if menu.lower() == "m": return self.exclude 470 | elif menu.lower() == "q": self.u.custom_quit() 471 | elif menu.lower() == "c": return None 472 | try: 473 | regex = re.compile(menu) 474 | except Exception as e: 475 | self.u.head("Regex Compile Error") 476 | print("") 477 | print("That regex is not valid:\n\n{}".format(repr(e))) 478 | print("") 479 | self.u.head("Press [enter] to return...") 480 | continue 481 | return regex 482 | 483 | def main(self): 484 | efi = self.settings.get("efi", None) 485 | if efi == "clover": 486 | efi = self.d.get_identifier(bdmesg.get_bootloader_uuid()) 487 | elif efi == "boot": 488 | efi = self.d.get_efi("/") 489 | kexts = self.settings.get("kexts", None) 490 | while True: 491 | self.u.head("Kext Extractor") 492 | print(" ") 493 | print("Target EFI: "+str(efi)) 494 | print("Source Folder: "+str(kexts)) 495 | print("Archive: "+str(self.settings.get("archive", False))) 496 | print("Exclusion: {}".format(None if self.exclude is None else self.exclude.pattern)) 497 | print(" ") 498 | print("1. Select Target EFI") 499 | print("2. Select Source Kext Folder") 500 | print(" ") 501 | print("3. Toggle Archive") 502 | print("4. Pick Default Target EFI") 503 | print("5. Pick Default Source Kext Folder") 504 | print("6. Set Exclusion Regex") 505 | print(" ") 506 | print("7. Extract") 507 | print(" ") 508 | print("Q. Quit") 509 | print(" ") 510 | menu = self.u.grab("Please select an option: ") 511 | if not len(menu): 512 | continue 513 | menu = menu.lower() 514 | if menu == "q": 515 | self.u.custom_quit() 516 | elif menu == "1": 517 | temp_efi = self.get_efi() 518 | efi = temp_efi or efi 519 | elif menu == "2": 520 | k = self.get_folder() 521 | if not k: 522 | continue 523 | kexts = k 524 | elif menu == "3": 525 | arch = self.settings.get("archive", False) 526 | self.settings["archive"] = not arch 527 | self.flush_settings() 528 | elif menu == "4": 529 | efi = self.default_disk() 530 | self.settings["efi"] = efi 531 | if efi == "clover": 532 | efi = self.d.get_identifier(bdmesg.get_bootloader_uuid()) 533 | elif efi == "boot": 534 | efi = self.d.get_efi("/") 535 | self.flush_settings() 536 | elif menu == "5": 537 | kexts = self.default_folder() 538 | self.settings["kexts"] = kexts 539 | self.flush_settings() 540 | elif menu == "6": 541 | self.exclude = self.get_regex() 542 | self.settings["exclude"] = None if self.exclude is None else self.exclude.pattern 543 | self.flush_settings() 544 | elif menu == "7": 545 | if not efi: 546 | efi = self.get_efi() 547 | if not efi: 548 | continue 549 | if not kexts: 550 | k = self.get_folder() 551 | if not k: 552 | continue 553 | kexts = k 554 | # Got folder and EFI - let's do something... 555 | self.mount_and_copy(disk=efi,package=kexts,quiet=False,exclude=self.exclude) 556 | self.u.grab("Press [enter] to return...") 557 | 558 | def quiet_copy(self, args, explicit_disk = False, exclude = None, folder_path = None, quiet = True): 559 | # Iterate through the args 560 | func = self.d.get_identifier if explicit_disk else self.d.get_efi 561 | arg_pairs = zip(*[iter(args)]*2) 562 | for pair in arg_pairs: 563 | disk = folder_path = None 564 | if pair[1].lower().startswith("f="): 565 | folder_path = pair[1][2:] 566 | else: 567 | disk = func(pair[1]) 568 | if disk or folder_path: 569 | try: 570 | self.mount_and_copy(disk=disk,package=pair[0],quiet=quiet,exclude=exclude,folder_path=folder_path) 571 | except Exception as e: 572 | print(str(e)) 573 | 574 | if __name__ == '__main__': 575 | # Setup the cli args 576 | parser = argparse.ArgumentParser(prog="KextExtractor.command", description="KextExtractor - a py script that extracts and updates kexts.") 577 | parser.add_argument("kexts_and_disks",nargs="*", help="path pairs for source kexts and target disk - can also take f=/path/to/EFI instead of a disk (eg. kextpath1 disk1 kextpath2 disk2 kextpath3 f=/folder/path1)") 578 | parser.add_argument("-d", "--explicit-disk", help="treat all mount points/identifiers explicitly without resolving to EFI", action="store_true") 579 | parser.add_argument("-e", "--exclude", help="regex to exclude kexts by name matching (overrides settings.json, cli-only)") 580 | parser.add_argument("-x", "--disable-exclude", help="disable regex name exclusions (overrides --exclude and settings.json, cli-only)", action="store_true") 581 | parser.add_argument("-v", "--verbose", help="Uses verbose output instead of quiet", action="store_true") 582 | 583 | args = parser.parse_args() 584 | 585 | # Check for args 586 | if args.kexts_and_disks and len(args.kexts_and_disks) % 2: 587 | print("Kext folder and target disk arguments must be in pairs!") 588 | exit(1) 589 | 590 | c = KextExtractor() 591 | if args.kexts_and_disks: 592 | if args.disable_exclude: # Override any regex exclusions 593 | regex = None 594 | elif args.exclude: # Attempt to compile the regex override 595 | try: regex = re.compile(args.exclude) 596 | except: 597 | print("Passed regex is invalid!") 598 | exit(1) 599 | else: # Fall back on the original value 600 | regex = c.exclude 601 | c.quiet_copy(args.kexts_and_disks,explicit_disk=args.explicit_disk,exclude=regex,quiet=not args.verbose) 602 | else: 603 | c.main() 604 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 CorpNewt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # KextExtractor 2 | Small py script to extract kext files from a target folder and update them by name on a target drive's EFI partition. 3 | 4 | *** 5 | 6 | ## To install: 7 | 8 | Do the following one line at a time in Terminal: 9 | 10 | git clone https://github.com/corpnewt/KextExtractor 11 | cd KextExtractor 12 | chmod +x KextExtractor.command 13 | 14 | Then run with either `./KextExtractor.command` or by double-clicking *KextExtractor.command* 15 | 16 | *** 17 | 18 | ## Usage: 19 | 20 | Starting the script with no arguments will open it in interactive mode. 21 | 22 | If you want it to auto extract & copy, you can pass pairs of arguments to it like so (assumes you have a Kexts folder on the Desktop, and plan to extract it to the boot drive's EFI): 23 | 24 | ./KextExtractor.command ~/Desktop/Kexts / 25 | 26 | You can also pass multiple sets of argument pairs to extract multiple Kexts folders to EFIs. With our above example, if we also wanted to extract that same folder to `disk5`'s EFI, we could do: 27 | 28 | ./KextExtractor.command ~/Desktop/Kexts / ~/Desktop/Kexts disk5 29 | 30 | *** 31 | 32 | ## Thanks To: 33 | 34 | * Slice, apianti, vit9696, Download Fritz, Zenith432, STLVNUB, JrCs,cecekpawon, Needy, cvad, Rehabman, philip_petev, ErmaC and the rest of the Clover crew for Clover and bdmesg 35 | -------------------------------------------------------------------------------- /Scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from os.path import dirname, basename, isfile 2 | import glob 3 | modules = glob.glob(dirname(__file__)+"/*.py") 4 | __all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')] -------------------------------------------------------------------------------- /Scripts/bdmesg.py: -------------------------------------------------------------------------------- 1 | import binascii, subprocess, sys 2 | 3 | def get_clover_uuid(): 4 | bd = bdmesg() 5 | if not len(bd): 6 | return "" 7 | # Get bdmesg output - then parse for SelfDevicePath 8 | if not "SelfDevicePath=" in bd: 9 | # Not found 10 | return "" 11 | try: 12 | # Split to just the contents of that line 13 | line = bd.split("SelfDevicePath=")[1].split("\n")[0] 14 | # Get the HD section 15 | hd = line.split("HD(")[1].split(")")[0] 16 | # Get the UUID 17 | uuid = hd.split(",")[2] 18 | return uuid 19 | except: 20 | pass 21 | return "" 22 | 23 | def get_oc_uuid(): 24 | p = subprocess.Popen(["nvram","4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102:boot-path"], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 25 | oc, oe = p.communicate() 26 | oc = _decode(oc) 27 | try: 28 | path = oc.split("GPT,")[1].split(",")[0] 29 | except: 30 | path = "" 31 | return path 32 | 33 | def get_bootloader_uuid(): 34 | b_uuid = get_clover_uuid() 35 | if not b_uuid: 36 | b_uuid = get_oc_uuid() 37 | return b_uuid 38 | 39 | def bdmesg(just_clover = True): 40 | b = "" if just_clover else _bdmesg(["ioreg","-l","-p","IOService","-w0"]) 41 | if b == "": 42 | b = _bdmesg(["ioreg","-l","-p","IODeviceTree","-w0"]) 43 | return b 44 | 45 | def _decode(var): 46 | if sys.version_info >= (3,0) and isinstance(var, bytes): 47 | var = var.decode("utf-8","ignore") 48 | return var 49 | 50 | def _bdmesg(comm): 51 | # Runs ioreg -l -p IODeviceTree -w0 and searches for "boot-log" 52 | p = subprocess.Popen(comm, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 53 | bd, be = p.communicate() 54 | bd = _decode(bd) 55 | for line in bd.split("\n"): 56 | # We're just looking for the "boot-log" property, then we need to format it 57 | if not '"boot-log"' in line: 58 | # Skip it! 59 | continue 60 | # Must have found it - let's try to split it, then get the hex data and process it 61 | try: 62 | # Split it up, then convert from hex to ascii 63 | ascii_bytes = binascii.unhexlify(line.split("<")[1].split(">")[0].encode("utf-8")) 64 | ascii_bytes = _decode(ascii_bytes) 65 | return ascii_bytes 66 | except: 67 | # Failed to convert 68 | return "" 69 | # Didn't find it 70 | return "" 71 | -------------------------------------------------------------------------------- /Scripts/disk.py: -------------------------------------------------------------------------------- 1 | import os, sys, shutil, re 2 | sys.path.append(os.path.abspath(os.path.dirname(os.path.realpath(__file__)))) 3 | import run, plist 4 | 5 | # Info pulled from: https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs 6 | GPT_GUIDS = { 7 | "-": { 8 | "00000000-0000-0000-0000-000000000000": "Unused entry", 9 | "024DEE41-33E7-11D3-9D69-0008C781F39F": "MBR", 10 | "C12A7328-F81F-11D2-BA4B-00A0C93EC93B": "EFI", 11 | "21686148-6449-6E6F-744E-656564454649": "BIOS boot", 12 | "D3BFE2DE-3DAF-11DF-BA40-E3A556D89593": "Intel Fast Flash", 13 | "F4019732-066E-4E12-8273-346C5641494F": "Sony boot", 14 | "BFBFAFE7-A34F-448A-9A5B-6213EB736C22": "Lenovo boot" 15 | }, 16 | "Windows": { 17 | "E3C9E316-0B5C-4DB8-817D-F92DF00215AE": "Microsoft Reserved", 18 | "EBD0A0A2-B9E5-4433-87C0-68B6B72699C7": "Microsoft basic data", 19 | "5808C8AA-7E8F-42E0-85D2-E1E90434CFB3": "Logical Disk Manager metadata", 20 | "AF9B60A0-1431-4F62-BC68-3311714A69AD": "Logical Disk Manager data", 21 | "DE94BBA4-06D1-4D40-A16A-BFD50179D6AC": "Windows Recovery", 22 | "37AFFC90-EF7D-4E96-91C3-2D7AE055B174": "IBM General Parallel File System", 23 | "E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D": "Storage Spaces", 24 | "558D43C5-A1AC-43C0-AAC8-D1472B2923D1": "Storage Replica" 25 | }, 26 | "HP-UX": { 27 | "75894C1E-3AEB-11D3-B7C1-7B03A0000000": "Data", 28 | "E2A1E728-32E3-11D6-A682-7B03A0000000": "Service" 29 | }, 30 | "Linux": { 31 | "0FC63DAF-8483-4772-8E79-3D69D8477DE4": "Linux filesystem data", 32 | "A19D880F-05FC-4D3B-A006-743F0F84911E": "RAID", 33 | "44479540-F297-41B2-9AF7-D131D5F0458A": "Root (x86)", 34 | "4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709": "Root (x86-64)", 35 | "69DAD710-2CE4-4E3C-B16C-21A1D49ABED3": "Root (32-bit ARM)", 36 | "B921B045-1DF0-41C3-AF44-4C6F280D3FAE": "Root (64-bit ARM/AArch64)", 37 | "BC13C2FF-59E6-4262-A352-B275FD6F7172": "/boot", 38 | "0657FD6D-A4AB-43C4-84E5-0933C84B4F4F": "Swap", 39 | "E6D6D379-F507-44C2-A23C-238F2A3DF928": "Logical Volume Manager", 40 | "933AC7E1-2EB4-4F13-B844-0E14E2AEF915": "/home", 41 | "3B8F8425-20E0-4F3B-907F-1A25A76F98E8": "/srv", 42 | "7FFEC5C9-2D00-49B7-8941-3EA10A5586B7": "Plain dm-crypt", 43 | "CA7D7CCB-63ED-4C53-861C-1742536059CC": "LUKS", 44 | "8DA63339-0007-60C0-C436-083AC8230908": "Reserved" 45 | }, 46 | "FreeBSD": { 47 | "83BD6B9D-7F41-11DC-BE0B-001560B84F0F": "Boot", 48 | "516E7CB4-6ECF-11D6-8FF8-00022D09712B": "BSD disklabel", 49 | "516E7CB5-6ECF-11D6-8FF8-00022D09712B": "Swap", 50 | "516E7CB6-6ECF-11D6-8FF8-00022D09712B": "Unix File System", 51 | "516E7CB8-6ECF-11D6-8FF8-00022D09712B": "Vinum volume manager", 52 | "516E7CBA-6ECF-11D6-8FF8-00022D09712B": "ZFS", 53 | "74BA7DD9-A689-11E1-BD04-00E081286ACF": "nandfs" 54 | }, 55 | "macOS Darwin": { 56 | "48465300-0000-11AA-AA11-00306543ECAC": "Apple HFS+", 57 | "7C3457EF-0000-11AA-AA11-00306543ECAC": "Apple APFS container", 58 | "55465300-0000-11AA-AA11-00306543ECAC": "Apple UFS container", 59 | "6A898CC3-1DD2-11B2-99A6-080020736631": "ZFS", 60 | "52414944-0000-11AA-AA11-00306543ECAC": "Apple RAID", 61 | "52414944-5F4F-11AA-AA11-00306543ECAC": "Apple RAID, offline", 62 | "426F6F74-0000-11AA-AA11-00306543ECAC": "Apple Boot", 63 | "4C616265-6C00-11AA-AA11-00306543ECAC": "Apple Label", 64 | "5265636F-7665-11AA-AA11-00306543ECAC": "Apple TV Recovery", 65 | "53746F72-6167-11AA-AA11-00306543ECAC": "Apple Core Storage Container", 66 | "69646961-6700-11AA-AA11-00306543ECAC": "Apple APFS Preboot", 67 | "52637672-7900-11AA-AA11-00306543ECAC": "Apple APFS Recovery" 68 | }, 69 | "Solaris illumos": { 70 | "6A82CB45-1DD2-11B2-99A6-080020736631": "Boot", 71 | "6A85CF4D-1DD2-11B2-99A6-080020736631": "Root", 72 | "6A87C46F-1DD2-11B2-99A6-080020736631": "Swap", 73 | "6A8B642B-1DD2-11B2-99A6-080020736631": "Backup", 74 | "6A898CC3-1DD2-11B2-99A6-080020736631": "/usr", 75 | "6A8EF2E9-1DD2-11B2-99A6-080020736631": "/var", 76 | "6A90BA39-1DD2-11B2-99A6-080020736631": "/home", 77 | "6A9283A5-1DD2-11B2-99A6-080020736631": "Alternate sector", 78 | "6A945A3B-1DD2-11B2-99A6-080020736631": "Reserved", 79 | "6A9630D1-1DD2-11B2-99A6-080020736631": "Reserved", 80 | "6A980767-1DD2-11B2-99A6-080020736631": "Reserved", 81 | "6A96237F-1DD2-11B2-99A6-080020736631": "Reserved", 82 | "6A8D2AC7-1DD2-11B2-99A6-080020736631": "Reserved" 83 | }, 84 | "NetBSD": { 85 | "49F48D32-B10E-11DC-B99B-0019D1879648": "Swap", 86 | "49F48D5A-B10E-11DC-B99B-0019D1879648": "FFS", 87 | "49F48D82-B10E-11DC-B99B-0019D1879648": "LFS", 88 | "49F48DAA-B10E-11DC-B99B-0019D1879648": "RAID", 89 | "2DB519C4-B10F-11DC-B99B-0019D1879648": "Concatenated", 90 | "2DB519EC-B10F-11DC-B99B-0019D1879648": "Encrypted" 91 | }, 92 | "ChromeOS": { 93 | "FE3A2A5D-4F32-41A7-B725-ACCC3285A309": "ChromeOS kernel", 94 | "3CB8E202-3B7E-47DD-8A3C-7FF2A13CFCEC": "ChromeOS rootfs", 95 | "CAB6E88E-ABF3-4102-A07A-D4BB9BE3C1D3": "ChromeOS firmware", 96 | "2E0A753D-9E48-43B0-8337-B15192CB1B5E": "ChromeOS future use", 97 | "09845860-705F-4BB5-B16C-8A8A099CAF52": "ChromeOS miniOS", 98 | "3F0F8318-F146-4E6B-8222-C28C8F02E0D5": "ChromeOS hibernate" 99 | }, 100 | "Container Linux by CoreOS": { 101 | "5DFBF5F4-2848-4BAC-AA5E-0D9A20B745A6": "/usr", 102 | "3884DD41-8582-4404-B9A8-E9B84F2DF50E": "Resizable rootfs", 103 | "C95DC21A-DF0E-4340-8D7B-26CBFA9A03E0": "OEM customizations", 104 | "BE9067B9-EA49-4F15-B4F6-F36F8C9E1818": "Root filesystem on RAID" 105 | }, 106 | "Haiku": { 107 | "42465331-3BA3-10F1-802A-4861696B7521": "Haiku BFS" 108 | }, 109 | "MidnightBSD": { 110 | "85D5E45E-237C-11E1-B4B3-E89A8F7FC3A7": "Boot", 111 | "85D5E45A-237C-11E1-B4B3-E89A8F7FC3A7": "Data", 112 | "85D5E45B-237C-11E1-B4B3-E89A8F7FC3A7": "Swap", 113 | "0394EF8B-237E-11E1-B4B3-E89A8F7FC3A7": "Unix File System", 114 | "85D5E45C-237C-11E1-B4B3-E89A8F7FC3A7": "Vinum volume manager", 115 | "85D5E45D-237C-11E1-B4B3-E89A8F7FC3A7": "ZFS" 116 | }, 117 | "Ceph": { 118 | "45B0969E-9B03-4F30-B4C6-B4B80CEFF106": "Journal", 119 | "45B0969E-9B03-4F30-B4C6-5EC00CEFF106": "dm-crypt journal", 120 | "4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D": "OSD", 121 | "4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D": "dm-crypt OSD", 122 | "89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE": "Disk in creation", 123 | "89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE": "dm-crypt disk in creation", 124 | "CAFECAFE-9B03-4F30-B4C6-B4B80CEFF106": "Block", 125 | "30CD0809-C2B2-499C-8879-2D6B78529876": "Block DB", 126 | "5CE17FCE-4087-4169-B7FF-056CC58473F9": "Block write-ahead log", 127 | "FB3AABF9-D25F-47CC-BF5E-721D1816496B": "Lockbox for dm-crypt keys", 128 | "4FBD7E29-8AE0-4982-BF9D-5A8D867AF560": "Multipath OSD", 129 | "45B0969E-8AE0-4982-BF9D-5A8D867AF560": "Multipath journal", 130 | "CAFECAFE-8AE0-4982-BF9D-5A8D867AF560": "Multipath block", 131 | "7F4A666A-16F3-47A2-8445-152EF4D03F6C": "Multipath block", 132 | "EC6D6385-E346-45DC-BE91-DA2A7C8B3261": "Multipath block DB", 133 | "01B41E1B-002A-453C-9F17-88793989FF8F": "Multipath block write-ahead log", 134 | "CAFECAFE-9B03-4F30-B4C6-5EC00CEFF106": "dm-crypt block", 135 | "93B0052D-02D9-4D8A-A43B-33A3EE4DFBC3": "dm-crypt block DB", 136 | "306E8683-4FE2-4330-B7C0-00A917C16966": "dm-crypt block write-ahead log", 137 | "45B0969E-9B03-4F30-B4C6-35865CEFF106": "dm-crypt LUKS journal", 138 | "CAFECAFE-9B03-4F30-B4C6-35865CEFF106": "dm-crypt LUKS block", 139 | "166418DA-C469-4022-ADF4-B30AFD37F176": "dm-crypt LUKS block DB", 140 | "86A32090-3647-40B9-BBBD-38D8C573AA86": "dm-crypt LUKS block write-ahead log", 141 | "4FBD7E29-9D25-41B8-AFD0-35865CEFF05D": "dm-crypt LUKS OSD" 142 | }, 143 | "OpenBSD": { 144 | "824CC7A0-36A8-11E3-890A-952519AD3F61": "Data" 145 | }, 146 | "QNX": { 147 | "CEF5A9AD-73BC-4601-89F3-CDEEEEE321A1": "Power-safe (QNX6) file system" 148 | }, 149 | "Plan 9": { 150 | "C91818F9-8025-47AF-89D2-F030D7000C2C": "Plan 9" 151 | }, 152 | "VMware ESX": { 153 | "9D275380-40AD-11DB-BF97-000C2911D1B8": "vmkcore", 154 | "AA31E02A-400F-11DB-9590-000C2911D1B8": "VMFS filesystem", 155 | "9198EFFC-31C0-11DB-8F78-000C2911D1B8": "VMware Reserved" 156 | }, 157 | "Android-IA": { 158 | "2568845D-2332-4675-BC39-8FA5A4748D15": "Bootloader", 159 | "114EAFFE-1552-4022-B26E-9B053604CF84": "Bootloader2", 160 | "49A4D17F-93A3-45C1-A0DE-F50B2EBE2599": "Boot", 161 | "4177C722-9E92-4AAB-8644-43502BFD5506": "Recovery", 162 | "EF32A33B-A409-486C-9141-9FFB711F6266": "Misc", 163 | "20AC26BE-20B7-11E3-84C5-6CFDB94711E9": "Metadata", 164 | "38F428E6-D326-425D-9140-6E0EA133647C": "System", 165 | "A893EF21-E428-470A-9E55-0668FD91A2D9": "Cache", 166 | "DC76DDA9-5AC1-491C-AF42-A82591580C0D": "Data", 167 | "EBC597D0-2053-4B15-8B64-E0AAC75F4DB1": "Persistent", 168 | "C5A0AEEC-13EA-11E5-A1B1-001E67CA0C3C": "Vendor", 169 | "BD59408B-4514-490D-BF12-9878D963F378": "Config", 170 | "8F68CC74-C5E5-48DA-BE91-A0C8C15E9C80": "Factory", 171 | "9FDAA6EF-4B3F-40D2-BA8D-BFF16BFB887B": "Factory", 172 | "767941D0-2085-11E3-AD3B-6CFDB94711E9": "Fastboot / Tertiary", 173 | "AC6D7924-EB71-4DF8-B48D-E267B27148FF": "OEM" 174 | }, 175 | "Android 6.0+ ARM": { 176 | "19A710A2-B3CA-11E4-B026-10604B889DCF": "Android Meta", 177 | "193D1EA4-B3CA-11E4-B075-10604B889DCF": "Android EXT" 178 | }, 179 | "Open Network Install Environment (ONIE)": { 180 | "7412F7D5-A156-4B13-81DC-867174929325": "Boot", 181 | "D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149": "Config" 182 | }, 183 | "PowerPC": { 184 | "9E1A2D38-C612-4316-AA26-8B49521E5A8B": "PReP boot" 185 | }, 186 | "freedesktop.org OSes (Linux, etc.)": { 187 | "BC13C2FF-59E6-4262-A352-B275FD6F7172": "Shared boot loader configuration" 188 | }, 189 | "Atari TOS": { 190 | "734E5AFE-F61A-11E6-BC64-92361F002671": "Basic data" 191 | }, 192 | "VeraCrypt": { 193 | "8C8F8EFF-AC95-4770-814A-21994F2DBC8F": "Encrypted data" 194 | }, 195 | "OS/2": { 196 | "90B6FF38-B98F-4358-A21F-48F35B4A8AD3": "ArcaOS Type 1" 197 | }, 198 | "Storage Performance Development Kit (SPDK)": { 199 | "7C5222BD-8F5D-4087-9C00-BF9843C7B58C": "SPDK block device" 200 | }, 201 | "barebox bootloader": { 202 | "4778ED65-BF42-45FA-9C5B-287A1DC4AAB1": "barebox-state" 203 | }, 204 | "U-Boot bootloader": { 205 | "3DE21764-95BD-54BD-A5C3-4ABE786F38A8": "U-Boot" 206 | }, 207 | "SoftRAID": { 208 | "B6FA30DA-92D2-4A9A-96F1-871EC6486200": "SoftRAID_Status", 209 | "2E313465-19B9-463F-8126-8A7993773801": "SoftRAID_Scratch", 210 | "FA709C7E-65B1-4593-BFD5-E71D61DE9B02": "SoftRAID_Volume", 211 | "BBBA6DF5-F46F-4A89-8F59-8765B2727503": "SoftRAID_Cache" 212 | }, 213 | "Fuchsia standard partitions": { 214 | "FE8A2634-5E2E-46BA-99E3-3A192091A350": "Bootloader", 215 | "D9FD4535-106C-4CEC-8D37-DFC020CA87CB": "Durable mutable encrypted system data", 216 | "A409E16B-78AA-4ACC-995C-302352621A41": "Durable mutable bootloader data", 217 | "F95D940E-CABA-4578-9B93-BB6C90F29D3E": "Factory-provisioned read-only system data", 218 | "10B8DBAA-D2BF-42A9-98C6-A7C5DB3701E7": "Factory-provisioned read-only bootloader data", 219 | "49FD7CB8-DF15-4E73-B9D9-992070127F0F": "Fuchsia Volume Manager", 220 | "421A8BFC-85D9-4D85-ACDA-B64EEC0133E9": "Verified boot metadata", 221 | "9B37FFF6-2E58-466A-983A-F7926D0B04E0": "Zircon boot image" 222 | }, 223 | "Fuchsia legacy partitions": { 224 | "C12A7328-F81F-11D2-BA4B-00A0C93EC93B": "fuchsia-esp", 225 | "606B000B-B7C7-4653-A7D5-B737332C899D": "fuchsia-system", 226 | "08185F0C-892D-428A-A789-DBEEC8F55E6A": "fuchsia-data", 227 | "48435546-4953-2041-494E-5354414C4C52": "fuchsia-install", 228 | "2967380E-134C-4CBB-B6DA-17E7CE1CA45D": "fuchsia-blob", 229 | "41D0E340-57E3-954E-8C1E-17ECAC44CFF5": "fuchsia-fvm", 230 | "DE30CC86-1F4A-4A31-93C4-66F147D33E05": "Zircon boot image", 231 | "23CC04DF-C278-4CE7-8471-897D1A4BCDF7": "Zircon boot image", 232 | "A0E5CF57-2DEF-46BE-A80C-A2067C37CD49": "Zircon boot image", 233 | "4E5E989E-4C86-11E8-A15B-480FCF35F8E6": "sys-config", 234 | "5A3A90BE-4C86-11E8-A15B-480FCF35F8E6": "factory-config", 235 | "5ECE94FE-4C86-11E8-A15B-480FCF35F8E6": "bootloader", 236 | "8B94D043-30BE-4871-9DFA-D69556E8C1F3": "guid-test", 237 | "A13B4D9A-EC5F-11E8-97D8-6C3BE52705BF": "Verified boot metadata", 238 | "A288ABF2-EC5F-11E8-97D8-6C3BE52705BF": "Verified boot metadata", 239 | "6A2460C3-CD11-4E8B-80A8-12CCE268ED0A": "Verified boot metadata", 240 | "1D75395D-F2C6-476B-A8B7-45CC1C97B476": "misc", 241 | "900B0FC5-90CD-4D4F-84F9-9F8ED579DB88": "emmc-boot1", 242 | "B2B2E8D1-7C10-4EBC-A2D0-4614568260AD": "emmc-boot2" 243 | } 244 | } 245 | 246 | class Disk: 247 | def __init__(self): 248 | self.r = run.Run() 249 | self.version_re = re.compile(r"diskdump ([a-zA-z\d]+\.[a-zA-Z\d]+\.[a-zA-Z\d]+)") 250 | self.diskdump,self.diskdump_version = self.check_diskdump() 251 | self.full_os_version = self.r.run({"args":["sw_vers", "-productVersion"]})[0] 252 | if len(self.full_os_version.split(".")) < 3: 253 | # Ensure the format is XX.YY.ZZ 254 | self.full_os_version += ".0" 255 | self.os_version = ".".join(self.full_os_version.split(".")[:2]) 256 | self.sudo_mount_version = "10.13.6" 257 | self.efi_guids = ["C12A7328-F81F-11D2-BA4B-00A0C93EC93B"] 258 | self.disks = self.get_disks() 259 | 260 | def is_guid(self, guid = None): 261 | try: 262 | guid_parts = guid.split("-") 263 | assert len(guid_parts) == 5 264 | assert len(guid_parts[0]) == 8 265 | assert all((len(x) == 4 for x in guid_parts[1:4])) 266 | assert len(guid_parts[-1]) == 12 267 | assert all((x in "0123456789ABCDEF" for x in "".join(guid_parts))) 268 | except: # Not a GUID - return 269 | return False 270 | return True 271 | 272 | def get_size(self, size, suffix=None, use_1024=False, round_to=1, strip_zeroes=True): 273 | # size is the number of bytes 274 | # suffix is the target suffix to locate (B, KB, MB, etc) - if found 275 | # use_2014 denotes whether or not we display in MiB vs MB 276 | # round_to is the number of dedimal points to round our result to (0-15) 277 | # strip_zeroes denotes whether we strip out zeroes 278 | 279 | # Failsafe in case our size is unknown 280 | if size == -1: 281 | return "Unknown" 282 | # Get our suffixes based on use_1024 283 | ext = ["B","KiB","MiB","GiB","TiB","PiB"] if use_1024 else ["B","KB","MB","GB","TB","PB"] 284 | div = 1024 if use_1024 else 1000 285 | s = float(size) 286 | s_dict = {} # Initialize our dict 287 | # Iterate the ext list, and divide by 1000 or 1024 each time to setup the dict {ext:val} 288 | for e in ext: 289 | s_dict[e] = s 290 | s /= div 291 | # Get our suffix if provided - will be set to None if not found, or if started as None 292 | suffix = next((x for x in ext if x.lower() == suffix.lower()),None) if suffix else suffix 293 | # Get the largest value that's still over 1 294 | biggest = suffix if suffix else next((x for x in ext[::-1] if s_dict[x] >= 1), "B") 295 | # Determine our rounding approach - first make sure it's an int; default to 2 on error 296 | try:round_to=int(round_to) 297 | except:round_to=2 298 | round_to = 0 if round_to < 0 else 15 if round_to > 15 else round_to # Ensure it's between 0 and 15 299 | bval = round(s_dict[biggest], round_to) 300 | # Split our number based on decimal points 301 | a,b = str(bval).split(".") 302 | # Check if we need to strip or pad zeroes 303 | b = b.rstrip("0") if strip_zeroes else b.ljust(round_to,"0") if round_to > 0 else "" 304 | return "{:,}{} {}".format(int(a),"" if not b else "."+b,biggest) 305 | 306 | def get_diskdump_version(self, diskdump_path): 307 | # Helper to attempt to extract the version from 308 | # "diskdump version" given a path. 309 | version = "0.0.0" 310 | if os.path.exists(diskdump_path): 311 | if "com.apple.quarantine" in self.r.run({"args":["xattr",diskdump_path]})[0]: 312 | self.r.run({"args":["xattr","-d","com.apple.quarantine",diskdump_path]}) 313 | if not os.access(diskdump_path, os.X_OK): 314 | self.r.run({"args":["chmod","+x",diskdump_path]}) 315 | try: 316 | # Attempt to extract "diskdump A.B.C" from the "diskdump version" 317 | # results - and pull the "A.B.C" group from the regex match 318 | version = self.version_re.match( 319 | self.r.run({"args":[diskdump_path,"version"]})[0].split("\n")[0].strip() 320 | ).group(1) 321 | except: 322 | pass 323 | return version 324 | 325 | def check_diskdump(self): 326 | ddpath = os.path.join(os.path.dirname(os.path.realpath(__file__)),"diskdump") 327 | if not os.path.exists(ddpath): 328 | raise FileNotFoundError("Could not locate diskdump") 329 | # Get the local version, falling back to 0.0.0 if there's no match 330 | local_version = self.get_diskdump_version(ddpath) 331 | # Check if we're running as root, or if we're not in a folder nested within 332 | # ~/Desktop or ~/Downloads - as we don't have to worry about retaining 333 | # local copies of diskdump in those situations 334 | desktop = os.path.realpath(os.path.expanduser(os.path.join("~","Desktop")))+os.sep 335 | downloads = os.path.realpath(os.path.expanduser(os.path.join("~","Downloads")))+os.sep 336 | if os.getuid() == 0 or not ddpath.startswith((desktop,downloads)): 337 | return (ddpath,local_version) 338 | # If diskdump is run from a folder nested under Desktop or Downloads, it will 339 | # need sudo even for basic (non-ESP) mounts and unmounts. To work around this 340 | # we'll ensure we place it in the Application Support folder. We'll check the 341 | # version there, and see if it matches our local copy - and if not, we'll 342 | # replace it to ensure we're using the one we expect to use/packaged with disk.py 343 | ddfolder = os.path.expanduser(os.path.join("~","Library","Application Support","CorpNewt","DiskDump")) 344 | if not os.path.exists(ddfolder): 345 | # Attempt to create it 346 | try: 347 | os.makedirs(ddfolder) 348 | except Exception as e: 349 | raise FileNotFoundError("Failed to create ~/Library/Application Support/CorpNewt/DiskDump: {}".format(e)) 350 | # Got the folder - let's check if there's a diskdump bin already there, then 351 | # try to get the version, and compare with our own 352 | ddtarget = os.path.join(ddfolder,"diskdump") 353 | installed_version = self.get_diskdump_version(ddtarget) 354 | if self.compare_version(local_version,installed_version): 355 | # We need to replace the installed bin to match our local copy 356 | try: 357 | shutil.copy(ddpath,ddtarget) 358 | except Exception as e: 359 | raise FileNotFoundError("Failed to copy diskdump to ~/Library/Application Support/CorpNewt/DiskDump: {}".format(e)) 360 | # Update the installed_version to reflect the local_version we just 361 | # copied in 362 | installed_version = local_version 363 | return (ddtarget,installed_version) 364 | 365 | def update(self): 366 | # Refresh our disk list 367 | self.disks = self.get_disks() 368 | return self.disks 369 | 370 | def get_disks(self): 371 | # Check for our binary - and ensure it's setup to run 372 | if not os.path.exists(self.diskdump): return {} 373 | # Get our diskdump info. 374 | diskstring = self.r.run({"args":[self.diskdump]})[0] 375 | if not diskstring: return {} 376 | diskdump = plist.loads(diskstring) 377 | return diskdump 378 | 379 | def get_identifier(self, disk = None, disk_dict = None): 380 | # Should be able to take a mount point, disk name, or disk identifier, 381 | # and return the disk's identifier 382 | if isinstance(disk,dict): disk = disk.get("DAMediaBSDName") 383 | if not disk: return 384 | disk_dict = disk_dict or self.disks # Normalize the dict 385 | disk = disk[6:] if disk.lower().startswith("/dev/rdisk") else disk[5:] if disk.lower().startswith("/dev/disk") else disk 386 | if disk.lower() in disk_dict.get("AllDisks",[]): return disk 387 | for d in disk_dict.get("AllDisksAndPartitions", []): 388 | # Check the parent disk 389 | if any((disk.lower()==d.get(x,"").lower() for x in ("DAMediaBSDName","DAVolumeName","DAVolumeUUID","DAMediaUUID","DAVolumePath"))): 390 | return d.get("DAMediaBSDName") 391 | # Check the partitions 392 | for p in d.get("Partitions", []): 393 | if any((disk.lower()==p.get(x,"").lower() for x in ("DAMediaBSDName","DAVolumeName","DAVolumeUUID","DAMediaUUID","DAVolumePath"))): 394 | return p.get("DAMediaBSDName") 395 | # At this point, we didn't find it 396 | return None 397 | 398 | def get_parent(self, disk = None, disk_dict = None): 399 | # For backward compatibility with the old disk.py approach 400 | return self.get_physical_parent_identifiers(disk,disk_dict=disk_dict) 401 | 402 | def get_parent_identifier(self, disk = None, disk_dict = None): 403 | # Resolves the passed disk value and returns the parent disk/container. 404 | # i.e. Passing disk5s2s1 would return disk5 405 | disk = self.get_identifier(disk,disk_dict=disk_dict) 406 | if not disk: return 407 | return "disk"+disk.lower().split("disk")[1].split("s")[0] 408 | 409 | def get_physical_parent_identifier(self, disk = None, disk_dict = None): 410 | # Returns the first hit from get_physical_parent_identifiers() 411 | return next(iter(self.get_physical_parent_identifiers(disk, disk_dict=disk_dict) or []), None) 412 | 413 | def get_physical_parent_identifiers(self, disk = None, disk_dict = None): 414 | # Resolves the passed disk to the physical parent disk identifiers. Useful for APFS 415 | # and Core Storage volumes which are logical - and can span multiple disks. 416 | # If you have an APFS container on disk4 and its Physical Store lists 417 | # disk2s2, disk3s2 - this would return [disk2, disk3] 418 | return [self.get_identifier(x,disk_dict=disk_dict) for x in self.get_physical_parent_disks(disk,disk_dict=disk_dict)] 419 | 420 | def get_physical_parent_disks(self, disk = None, disk_dict = None): 421 | # Resolves the passed disk to the physical parent disk dicts. Useful for APFS 422 | # and Core Storage volumes which are logcial and can span multiple physical 423 | # disks. If you have an APFS container on disk4 and its Physical Store is on 424 | # disk2s2 and disk3s2 - this would return the disk dicts for disk2 and disk3. 425 | parent = self.get_parent_disk(disk, disk_dict=disk_dict) 426 | if not parent: return [] 427 | if not "physical_stores" in parent: return [parent] 428 | return [self.get_parent_disk(x,disk_dict=disk_dict) for x in parent.get("physical_stores",[])] 429 | 430 | def get_parent_disk(self, disk = None, disk_dict = None): 431 | # Returns the dict info for the parent of the passed mount point, name, identifier, etc 432 | return self.get_disk(self.get_parent_identifier(disk,disk_dict=disk_dict),disk_dict=disk_dict) 433 | 434 | def get_disk(self, disk = None, disk_dict = None): 435 | # Returns the dict info for the passed mount point, name, identifier, etc 436 | disk = self.get_identifier(disk,disk_dict=disk_dict) 437 | if not disk: return 438 | parent = self.get_parent_identifier(disk,disk_dict=disk_dict) 439 | # Walk AllDisksAndPartitions, and return the first hit 440 | for d in (disk_dict or self.disks).get("AllDisksAndPartitions",[]): 441 | d_ident = d.get("DAMediaBSDName") 442 | if d_ident == disk: 443 | return d # Got the disk 444 | elif d_ident == parent: 445 | # Got the parent - iterate the partitions 446 | return next((p for p in d.get("Partitions",[]) if p.get("DAMediaBSDName")==disk),None) 447 | return None # Didn't find it 448 | 449 | def get_efis(self, disk = None, disk_dict = None): 450 | # Returns the identifiers for any EFI partitions attached to the 451 | # parent disk(s) of the passed disk 452 | efis = [] 453 | for parent in self.get_physical_parent_identifiers(disk,disk_dict=disk_dict): 454 | parent_dict = self.get_disk(parent,disk_dict=disk_dict) 455 | if not parent_dict: continue 456 | for part in parent_dict.get("Partitions",[]): 457 | # Use the GUID instead of media name - as that can vary 458 | if part.get("DAMediaContent","").upper() in self.efi_guids: 459 | efis.append(part["DAMediaBSDName"]) 460 | # Normalize case for the DAMediaName; 461 | # macOS disks: "EFI System Partition", Windows disks: "EFI system partition" 462 | # Maybe use this approach as a fallback at some point - but for now, just use the GUID 463 | # if part.get("DAMediaName").lower() == "efi system partition": 464 | # efis.append(part["DAMediaBSDName"]) 465 | return efis 466 | 467 | def get_efi(self, disk = None, disk_dict = None): 468 | # Returns the identifier for the first EFI partition found for 469 | # the passed disk 470 | return next(iter(self.get_efis(disk,disk_dict=disk_dict) or []), None) 471 | 472 | def get_partition_type(self, disk = None, disk_dict = None): 473 | # Checks if we have a matched DAMediaContent GUID in the GPT_GUIDS dict, and returns 474 | # the resolved type. If it doesn't match - and is a GUID, we return it as is. If it's 475 | # not a GUID - we return none. 476 | disk = self.get_disk(disk,disk_dict=disk_dict) 477 | if not disk: return 478 | guid = disk.get("DAMediaContent","").upper() 479 | # Ensure we have a GUID 480 | if not self.is_guid(guid): return 481 | # At this point - we have a GUID 482 | for os in GPT_GUIDS: 483 | if guid in GPT_GUIDS[os]: 484 | return GPT_GUIDS[os][guid] 485 | # We didn't find it - return the GUID as-is 486 | return guid 487 | 488 | def get_volume_type(self, disk = None, disk_dict = None): 489 | # Returns teh DAVolumeType or DAVolumeKind of the passed disk if any 490 | disk = self.get_disk(disk,disk_dict=disk_dict) 491 | if not disk: return 492 | if "DAVolumeType" in disk: return disk["DAVolumeType"] 493 | if "DAVolumeKind" in disk: return disk["DAVolumeKind"].upper() 494 | 495 | def get_readable_type(self, disk = None, disk_dict = None): 496 | # Attempts to get the type of the passed disk. First - it tries to get the 497 | # partition type, then it falls back on the volume type. 498 | disk = self.get_disk(disk,disk_dict=disk_dict) 499 | if not disk: return 500 | if disk.get("DAMediaWhole") and not disk.get("DAMediaLeaf"): 501 | # Check if we have a partition scheme and return that 502 | scheme = self.get_readable_partition_scheme(disk,disk_dict=disk_dict) 503 | if scheme: return scheme 504 | part = self.get_partition_type(disk,disk_dict=disk_dict) 505 | if part and not self.is_guid(part): # We got a valid partition type 506 | return part 507 | vol = self.get_volume_type(disk,disk_dict=disk_dict) 508 | # Return the volume type - if we got one - or the partition type, whatever 509 | # it may be. 510 | return vol or part 511 | 512 | def get_readable_size(self, disk = None, disk_dict = None): 513 | # Returns the readable, rounded (to one decimal point) size of the passed disk 514 | disk = self.get_disk(disk,disk_dict=disk_dict) 515 | if not disk or not "DAMediaSize" in disk: return "Unknown" 516 | return self.get_size(disk["DAMediaSize"]) 517 | 518 | def get_mounted_volumes(self, disk_dict = None): 519 | # Returns a list of mounted volumes 520 | return (disk_dict or self.disks).get("MountPointsFromDisks",[]) 521 | 522 | def get_mounted_volume_dicts(self, disk_dict = None): 523 | # Returns a list of dicts of name, identifier, mount point dicts 524 | vol_list = [] 525 | for v in (disk_dict or self.disks).get("MountPointsFromDisks"): 526 | i = self.get_disk(v,disk_dict=disk_dict) 527 | if not i: continue # Skip - as it didn't resolve 528 | mount_point = self.get_mount_point(i,disk_dict=disk_dict) 529 | # Check if we're either not mounted - or not mounted in /Volumes/ 530 | if not v or not (v == "/" or v.lower().startswith("/volumes/")): 531 | continue 532 | vol = { 533 | "name": self.get_volume_name(i,disk_dict=disk_dict), 534 | "identifier": self.get_identifier(i,disk_dict=disk_dict), 535 | "mount_point": v, 536 | "disk_uuid": self.get_disk_uuid(i,disk_dict=disk_dict), 537 | "volume_uuid": self.get_volume_uuid(i,disk_dict=disk_dict), 538 | "size_bytes": i.get("DAMediaSize",-1), 539 | "size": self.get_readable_size(i,disk_dict=disk_dict), 540 | "readable_type": self.get_readable_type(i,disk_dict=disk_dict), 541 | "volume_type": self.get_volume_type(i,disk_dict=disk_dict), 542 | "partition_type": self.get_partition_type(i,disk_dict=disk_dict) 543 | } 544 | if "container_for" in i: vol["container_for"] = i["container_for"] 545 | vol_list.append(vol) 546 | return sorted(vol_list,key=lambda x:x["identifier"]) 547 | 548 | def get_disks_and_partitions_dict(self, disk_dict = None): 549 | # Returns a list of dictionaries like so: 550 | # { "disk0" : { 551 | # "container": true/false, 552 | # "physical_stores": [ 553 | # "diskAsB", 554 | # "diskXsY" 555 | # ], 556 | # "scheme": "Guid_partition_scheme", 557 | # "size": "X.Y GB", 558 | # "size_bytes" 123456, 559 | # "partitions" : [ 560 | # { 561 | # "identifier" : "disk0s1", 562 | # "name" : "EFI", 563 | # "mount_point" : "/Volumes/EFI", 564 | # "readable_type" : readable type - either from GPT_GUIDS or file system, 565 | # "volume_type" : "MS-DOS (FAT32)", 566 | # "partition_type" : "Microsoft System Reserved"/GUID, 567 | # "size": "X.Y GB", 568 | # "size_bytes" 123456, 569 | # "container_for": "diskCsD" 570 | # } 571 | # ] } } 572 | disks = {} 573 | for d in sorted((disk_dict or self.disks).get("AllDisksAndPartitions"),key=lambda x:x.get("DAMediaBSDName")): 574 | if not "DAMediaBSDName" in d: continue # Malformed 575 | parent = d["DAMediaBSDName"] 576 | disks[parent] = {"partitions":[]} 577 | # Save if the disk is logical - and a l)ist of its physical stores 578 | for x in ("container","physical_stores"): 579 | if x in d: disks[parent][x] = d[x] 580 | disks[parent]["scheme"] = self.get_readable_partition_scheme(d,disk_dict=disk_dict) 581 | disks[parent]["size_bytes"] = d.get("DAMediaSize",-1) 582 | disks[parent]["size"] = self.get_readable_size(d,disk_dict=disk_dict) 583 | # Check if this disk is also a volume - i.e. also a leaf, and insert it in the partitions list 584 | partitions = d.get("Partitions",[]) 585 | if d.get("DAMediaLeaf"): 586 | partitions.insert(0,d) 587 | for p in d.get("Partitions",[]): 588 | part = { 589 | "name": self.get_volume_name(p,disk_dict=disk_dict), 590 | "identifier": self.get_identifier(p,disk_dict=disk_dict), 591 | "mount_point": self.get_mount_point(p,disk_dict=disk_dict), 592 | "disk_uuid": self.get_disk_uuid(p,disk_dict=disk_dict), 593 | "volume_uuid": self.get_volume_uuid(p,disk_dict=disk_dict), 594 | "readable_type": self.get_readable_type(p,disk_dict=disk_dict), 595 | "volume_type": self.get_volume_type(p,disk_dict=disk_dict), 596 | "partition_type": self.get_partition_type(p,disk_dict=disk_dict), 597 | "size_bytes": p.get("DAMediaSize",-1), 598 | "size": self.get_readable_size(p,disk_dict=disk_dict) 599 | } 600 | if "container_for" in p: part["container_for"] = p["container_for"] 601 | disks[parent]["partitions"].append(part) 602 | disks[parent]["partitions"].sort(key=lambda x:x["identifier"]) 603 | return disks 604 | 605 | def _get_value(self, disk = None, value = None, disk_dict = None): 606 | if not disk or not value: return # Missing info 607 | if isinstance(disk,dict): return disk.get(value) 608 | try: return self.get_disk(disk,disk_dict=disk_dict).get(value) 609 | except: return 610 | 611 | def _is_uuid(self, value): 612 | # Helper to return whether a passed value is a UUID 613 | # 7C3CFDDF-920A-4924-AED6-7CD4AF6E4512 614 | if not isinstance(value,str): return False # Wrong type 615 | value = value.lower() 616 | # Check that all chars are hex or the separator 617 | if not all((x in "-0123456789abcdef" for x in value)): return False 618 | len_list = (8,4,4,4,12) 619 | chunks = value.split("-") 620 | # Make sure we have the right number of chunks - and 621 | # each chunk is the right length. 622 | if not len(chunks)==len(len_list): return False 623 | for i,chunk in enumerate(chunks): 624 | if not len(chunk)==len_list[i]: return False 625 | # Passed all the checks 626 | return True 627 | 628 | def get_partition_scheme(self, disk, allow_logical = True, disk_dict = None): 629 | # let's resolve the disk to its physical parents 630 | comm = self.get_parent_disk if allow_logical else self.get_physical_parent_disks 631 | p = comm(disk,disk_dict=disk_dict) 632 | if p: 633 | if isinstance(p,(list,tuple)): p = p[0] # Extract the first parent if need be 634 | if p.get("apfs"): return "APFS_container_scheme" 635 | elif p.get("core_storage"): return "Core_Storage_container_scheme" 636 | content = self.get_content(p,disk_dict=disk_dict) 637 | if content.lower().endswith("scheme"): 638 | return content 639 | 640 | def get_readable_partition_scheme(self, disk, allow_logical = True, disk_dict = None): 641 | s = self.get_partition_scheme(disk,disk_dict=disk_dict) 642 | if not s: return 643 | # We want to convert GUID_partition_scheme to GUID 644 | # We also want to translate FDisk to MBR 645 | joined = " ".join(["MBR" if x.lower() == "fdisk" else x.capitalize() if x!=x.upper() else x for x in s.replace("_"," ").split() if x]) 646 | return joined 647 | 648 | def get_content(self, disk, disk_dict = None): 649 | return self._get_value(disk,"DAMediaContent",disk_dict=disk_dict) 650 | 651 | def get_volume_name(self, disk, disk_dict = None): 652 | return self._get_value(disk,"DAVolumeName",disk_dict=disk_dict) 653 | 654 | def get_volume_uuid(self, disk, disk_dict = None): 655 | return self._get_value(disk,"DAVolumeUUID",disk_dict=disk_dict) 656 | 657 | def get_disk_uuid(self, disk, disk_dict = None): 658 | return self._get_value(disk,"DAMediaUUID",disk_dict=disk_dict) 659 | 660 | def get_mount_point(self, disk, disk_dict = None): 661 | return self._get_value(disk,"DAVolumePath",disk_dict=disk_dict) 662 | 663 | def open_mount_point(self, disk, new_window = False, disk_dict = None): 664 | disk = self.get_identifier(disk,disk_dict=disk_dict) 665 | if not disk: return 666 | mount = self.get_mount_point(disk) 667 | if not mount: return 668 | return self.r.run({"args":["open", mount]})[2] == 0 669 | 670 | def compare_version(self, v1, v2): 671 | # Splits the version numbers by periods and compare each value 672 | # Allows 0.0.10 > 0.0.9 where normal string comparison would return false 673 | # Also strips out any non-numeric values from each segment to avoid conflicts 674 | # 675 | # Returns True if v1 > v2, None if v1 == v2, and False if v1 < v2 676 | if not all((isinstance(x,str) for x in (v1,v2))): 677 | # Wrong types 678 | return False 679 | v1_seg = v1.split(".") 680 | v2_seg = v2.split(".") 681 | # Pad with 0s to ensure common length 682 | v1_seg += ["0"]*(len(v2_seg)-len(v1_seg)) 683 | v2_seg += ["0"]*(len(v1_seg)-len(v2_seg)) 684 | # Compare each segment - stripping non-numbers as needed 685 | for i in range(len(v1_seg)): 686 | a,b = v1_seg[i],v2_seg[i] 687 | try: a = int("".join([x for x in a if x.isdigit()])) 688 | except: a = 0 689 | try: b = int("".join([x for x in b if x.isdigit()])) 690 | except: b = 0 691 | if a > b: return True 692 | if a < b: return False 693 | # If we're here, both versions are the same 694 | return None 695 | 696 | def needs_sudo(self, disk = None, disk_dict = None): 697 | # Default to EFI if we didn't pass a disk 698 | if not disk: return self.compare_version(self.full_os_version,self.sudo_mount_version) in (True,None) 699 | return self.compare_version(self.full_os_version,self.sudo_mount_version) in (True,None) and self.get_content(disk,disk_dict=disk_dict).upper() in self.efi_guids 700 | 701 | def mount_partition(self, disk, disk_dict = None): 702 | disk = self.get_identifier(disk,disk_dict=disk_dict) 703 | if not disk: return 704 | sudo = self.needs_sudo(disk,disk_dict=disk_dict) 705 | out = self.r.run({"args":[self.diskdump,"mount",disk],"sudo":sudo}) 706 | self.update() 707 | return out 708 | 709 | def unmount_partition(self, disk, disk_dict = None, force = False): 710 | disk = self.get_identifier(disk,disk_dict=disk_dict) 711 | if not disk: return 712 | out = self.r.run({"args":[self.diskdump,"forceunmount" if force else "unmount",disk]}) 713 | self.update() 714 | return out 715 | 716 | def is_mounted(self, disk, disk_dict = None): 717 | disk = self.get_identifier(disk,disk_dict=disk_dict) 718 | if not disk: return 719 | m = self.get_mount_point(disk,disk_dict=disk_dict) 720 | return (m != None and len(m)) 721 | 722 | def get_volumes(self, disk_dict = None): 723 | # Returns a list object with all volumes from disks 724 | return sorted((disk_dict or self.disks).get("VolumesFromDisks",[])) 725 | 726 | if __name__ == '__main__': 727 | d = Disk() 728 | # Gather the args 729 | errors = [] 730 | args = [] 731 | for x in sys.argv[1:]: 732 | if x == "/": 733 | args.append(x) 734 | continue 735 | if x.endswith("/"): 736 | x = x[:-1] 737 | if not x.lower().startswith("/volumes/") or len(x.split("/")) > 3: 738 | errors.append("'{}' is not a volume.".format(x)) 739 | continue 740 | if not os.path.exists(x): 741 | # Doesn't exist, skip it 742 | errors.append("'{}' does not exist.".format(x)) 743 | continue 744 | args.append(x) 745 | mount_list = [] 746 | needs_sudo = d.needs_sudo() 747 | for x in args: 748 | name = d.get_volume_name(x) 749 | if not name: name = "Untitled" 750 | name = name.replace('"','\\"') # Escape double quotes in names 751 | diskdump = d.diskdump.replace('"','\\\\\\"') # Escape double quotes in names 752 | efi = d.get_efi(x) 753 | if efi: mount_list.append((efi,name,d.is_mounted(efi),"\\\"{}\\\" mount {}".format(diskdump,efi))) 754 | else: errors.append("'{}' has no ESP.".format(name)) 755 | if mount_list: 756 | # We have something to mount 757 | efis = [x[-1] for x in mount_list if not x[2]] # Only mount those that aren't mounted 758 | names = [x[1] for x in mount_list if not x[2]] 759 | if efis: # We have something to mount here 760 | command = "do shell script \"{}\" with prompt \"MountEFI would like to mount the ESP{} on {}\"{}".format( 761 | "; ".join(efis), 762 | "s" if len(names) > 1 else "", 763 | ", ".join(names), 764 | " with administrator privileges" if needs_sudo else "") 765 | o,e,r = d.r.run({"args":["osascript","-e",command]}) 766 | if r > 0 and len(e.strip()) and e.strip().lower().endswith("(-128)"): exit() # User canceled, bail 767 | # Update the disks 768 | d.update() 769 | # Walk the mounts and find out which aren't mounted 770 | for efi,name,mounted,comm in mount_list: 771 | mounted_at = d.get_mount_point(efi) 772 | if mounted_at: d.open_mount_point(mounted_at) 773 | else: errors.append("ESP for '{}' failed to mount.".format(name)) 774 | else: 775 | errors.append("No disks with ESPs selected.") 776 | if errors: 777 | # Display our errors before we leave 778 | d.r.run({"args":["osascript","-e","display dialog \"{}\" buttons {{\"OK\"}} default button \"OK\" with icon caution".format("\n".join(errors))]}) 779 | -------------------------------------------------------------------------------- /Scripts/diskdump: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/corpnewt/KextExtractor/122d5d2516df9378921b88a1cc1b4e80523144dd/Scripts/diskdump -------------------------------------------------------------------------------- /Scripts/plist.py: -------------------------------------------------------------------------------- 1 | ### ### 2 | # Imports # 3 | ### ### 4 | 5 | import datetime, os, plistlib, struct, sys, itertools, binascii 6 | from io import BytesIO 7 | 8 | if sys.version_info < (3,0): 9 | # Force use of StringIO instead of cStringIO as the latter 10 | # has issues with Unicode strings 11 | from StringIO import StringIO 12 | else: 13 | from io import StringIO 14 | 15 | try: 16 | basestring # Python 2 17 | unicode 18 | except NameError: 19 | basestring = str # Python 3 20 | unicode = str 21 | 22 | try: 23 | FMT_XML = plistlib.FMT_XML 24 | FMT_BINARY = plistlib.FMT_BINARY 25 | except AttributeError: 26 | FMT_XML = "FMT_XML" 27 | FMT_BINARY = "FMT_BINARY" 28 | 29 | ### ### 30 | # Helper Methods # 31 | ### ### 32 | 33 | def wrap_data(value): 34 | if not _check_py3(): return plistlib.Data(value) 35 | return value 36 | 37 | def extract_data(value): 38 | if not _check_py3() and isinstance(value,plistlib.Data): return value.data 39 | return value 40 | 41 | def _check_py3(): 42 | return sys.version_info >= (3, 0) 43 | 44 | def _is_binary(fp): 45 | if isinstance(fp, basestring): 46 | return fp.startswith(b"bplist00") 47 | header = fp.read(32) 48 | fp.seek(0) 49 | return header[:8] == b'bplist00' 50 | 51 | ### ### 52 | # Deprecated Functions - Remapped # 53 | ### ### 54 | 55 | def readPlist(pathOrFile): 56 | if not isinstance(pathOrFile, basestring): 57 | return load(pathOrFile) 58 | with open(pathOrFile, "rb") as f: 59 | return load(f) 60 | 61 | def writePlist(value, pathOrFile): 62 | if not isinstance(pathOrFile, basestring): 63 | return dump(value, pathOrFile, fmt=FMT_XML, sort_keys=True, skipkeys=False) 64 | with open(pathOrFile, "wb") as f: 65 | return dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False) 66 | 67 | ### ### 68 | # Remapped Functions # 69 | ### ### 70 | 71 | def load(fp, fmt=None, use_builtin_types=None, dict_type=dict): 72 | if _is_binary(fp): 73 | use_builtin_types = False if use_builtin_types is None else use_builtin_types 74 | try: 75 | p = _BinaryPlistParser(use_builtin_types=use_builtin_types, dict_type=dict_type) 76 | except: 77 | # Python 3.9 removed use_builtin_types 78 | p = _BinaryPlistParser(dict_type=dict_type) 79 | return p.parse(fp) 80 | elif _check_py3(): 81 | use_builtin_types = True if use_builtin_types is None else use_builtin_types 82 | # We need to monkey patch this to allow for hex integers - code taken/modified from 83 | # https://github.com/python/cpython/blob/3.8/Lib/plistlib.py 84 | if fmt is None: 85 | header = fp.read(32) 86 | fp.seek(0) 87 | for info in plistlib._FORMATS.values(): 88 | if info['detect'](header): 89 | P = info['parser'] 90 | break 91 | else: 92 | raise plistlib.InvalidFileException() 93 | else: 94 | P = plistlib._FORMATS[fmt]['parser'] 95 | try: 96 | p = P(use_builtin_types=use_builtin_types, dict_type=dict_type) 97 | except: 98 | # Python 3.9 removed use_builtin_types 99 | p = P(dict_type=dict_type) 100 | if isinstance(p,plistlib._PlistParser): 101 | # Monkey patch! 102 | def end_integer(): 103 | d = p.get_data() 104 | value = int(d,16) if d.lower().startswith("0x") else int(d) 105 | if -1 << 63 <= value < 1 << 64: 106 | p.add_object(value) 107 | else: 108 | raise OverflowError("Integer overflow at line {}".format(p.parser.CurrentLineNumber)) 109 | def end_data(): 110 | try: 111 | p.add_object(plistlib._decode_base64(p.get_data())) 112 | except Exception as e: 113 | raise Exception("Data error at line {}: {}".format(p.parser.CurrentLineNumber,e)) 114 | p.end_integer = end_integer 115 | p.end_data = end_data 116 | return p.parse(fp) 117 | else: 118 | # Is not binary - assume a string - and try to load 119 | # We avoid using readPlistFromString() as that uses 120 | # cStringIO and fails when Unicode strings are detected 121 | # Don't subclass - keep the parser local 122 | from xml.parsers.expat import ParserCreate 123 | # Create a new PlistParser object - then we need to set up 124 | # the values and parse. 125 | p = plistlib.PlistParser() 126 | parser = ParserCreate() 127 | parser.StartElementHandler = p.handleBeginElement 128 | parser.EndElementHandler = p.handleEndElement 129 | parser.CharacterDataHandler = p.handleData 130 | # We also need to monkey patch this to allow for other dict_types, hex int support 131 | # proper line output for data errors, and for unicode string decoding 132 | def begin_dict(attrs): 133 | d = dict_type() 134 | p.addObject(d) 135 | p.stack.append(d) 136 | def end_integer(): 137 | d = p.getData() 138 | value = int(d,16) if d.lower().startswith("0x") else int(d) 139 | if -1 << 63 <= value < 1 << 64: 140 | p.addObject(value) 141 | else: 142 | raise OverflowError("Integer overflow at line {}".format(parser.CurrentLineNumber)) 143 | def end_data(): 144 | try: 145 | p.addObject(plistlib.Data.fromBase64(p.getData())) 146 | except Exception as e: 147 | raise Exception("Data error at line {}: {}".format(parser.CurrentLineNumber,e)) 148 | def end_string(): 149 | d = p.getData() 150 | if isinstance(d,unicode): 151 | d = d.encode("utf-8") 152 | p.addObject(d) 153 | p.begin_dict = begin_dict 154 | p.end_integer = end_integer 155 | p.end_data = end_data 156 | p.end_string = end_string 157 | if isinstance(fp, unicode): 158 | # Encode unicode -> string; use utf-8 for safety 159 | fp = fp.encode("utf-8") 160 | if isinstance(fp, basestring): 161 | # It's a string - let's wrap it up 162 | fp = StringIO(fp) 163 | # Parse it 164 | parser.ParseFile(fp) 165 | return p.root 166 | 167 | def loads(value, fmt=None, use_builtin_types=None, dict_type=dict): 168 | if _check_py3() and isinstance(value, basestring): 169 | # If it's a string - encode it 170 | value = value.encode() 171 | try: 172 | return load(BytesIO(value),fmt=fmt,use_builtin_types=use_builtin_types,dict_type=dict_type) 173 | except: 174 | # Python 3.9 removed use_builtin_types 175 | return load(BytesIO(value),fmt=fmt,dict_type=dict_type) 176 | 177 | def dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False): 178 | if fmt == FMT_BINARY: 179 | # Assume binary at this point 180 | writer = _BinaryPlistWriter(fp, sort_keys=sort_keys, skipkeys=skipkeys) 181 | writer.write(value) 182 | elif fmt == FMT_XML: 183 | if _check_py3(): 184 | plistlib.dump(value, fp, fmt=fmt, sort_keys=sort_keys, skipkeys=skipkeys) 185 | else: 186 | # We need to monkey patch a bunch here too in order to avoid auto-sorting 187 | # of keys 188 | writer = plistlib.PlistWriter(fp) 189 | def writeDict(d): 190 | if d: 191 | writer.beginElement("dict") 192 | items = sorted(d.items()) if sort_keys else d.items() 193 | for key, value in items: 194 | if not isinstance(key, basestring): 195 | if skipkeys: 196 | continue 197 | raise TypeError("keys must be strings") 198 | writer.simpleElement("key", key) 199 | writer.writeValue(value) 200 | writer.endElement("dict") 201 | else: 202 | writer.simpleElement("dict") 203 | writer.writeDict = writeDict 204 | writer.writeln("") 205 | writer.writeValue(value) 206 | writer.writeln("") 207 | else: 208 | # Not a proper format 209 | raise ValueError("Unsupported format: {}".format(fmt)) 210 | 211 | def dumps(value, fmt=FMT_XML, skipkeys=False, sort_keys=True): 212 | # We avoid using writePlistToString() as that uses 213 | # cStringIO and fails when Unicode strings are detected 214 | f = BytesIO() if _check_py3() else StringIO() 215 | dump(value, f, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys) 216 | value = f.getvalue() 217 | if _check_py3(): 218 | value = value.decode("utf-8") 219 | return value 220 | 221 | ### ### 222 | # Binary Plist Stuff For Py2 # 223 | ### ### 224 | 225 | # From the python 3 plistlib.py source: https://github.com/python/cpython/blob/3.11/Lib/plistlib.py 226 | # Tweaked to function on both Python 2 and 3 227 | 228 | class UID: 229 | def __init__(self, data): 230 | if not isinstance(data, int): 231 | raise TypeError("data must be an int") 232 | # It seems Apple only uses 32-bit unsigned ints for UIDs. Although the comment in 233 | # CoreFoundation's CFBinaryPList.c detailing the binary plist format theoretically 234 | # allows for 64-bit UIDs, most functions in the same file use 32-bit unsigned ints, 235 | # with the sole function hinting at 64-bits appearing to be a leftover from copying 236 | # and pasting integer handling code internally, and this code has not changed since 237 | # it was added. (In addition, code in CFPropertyList.c to handle CF$UID also uses a 238 | # 32-bit unsigned int.) 239 | # 240 | # if data >= 1 << 64: 241 | # raise ValueError("UIDs cannot be >= 2**64") 242 | if data >= 1 << 32: 243 | raise ValueError("UIDs cannot be >= 2**32 (4294967296)") 244 | if data < 0: 245 | raise ValueError("UIDs must be positive") 246 | self.data = data 247 | 248 | def __index__(self): 249 | return self.data 250 | 251 | def __repr__(self): 252 | return "%s(%s)" % (self.__class__.__name__, repr(self.data)) 253 | 254 | def __reduce__(self): 255 | return self.__class__, (self.data,) 256 | 257 | def __eq__(self, other): 258 | if not isinstance(other, UID): 259 | return NotImplemented 260 | return self.data == other.data 261 | 262 | def __hash__(self): 263 | return hash(self.data) 264 | 265 | class InvalidFileException (ValueError): 266 | def __init__(self, message="Invalid file"): 267 | ValueError.__init__(self, message) 268 | 269 | _BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'} 270 | 271 | _undefined = object() 272 | 273 | class _BinaryPlistParser: 274 | """ 275 | Read or write a binary plist file, following the description of the binary 276 | format. Raise InvalidFileException in case of error, otherwise return the 277 | root object. 278 | see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c 279 | """ 280 | def __init__(self, use_builtin_types, dict_type): 281 | self._use_builtin_types = use_builtin_types 282 | self._dict_type = dict_type 283 | 284 | def parse(self, fp): 285 | try: 286 | # The basic file format: 287 | # HEADER 288 | # object... 289 | # refid->offset... 290 | # TRAILER 291 | self._fp = fp 292 | self._fp.seek(-32, os.SEEK_END) 293 | trailer = self._fp.read(32) 294 | if len(trailer) != 32: 295 | raise InvalidFileException() 296 | ( 297 | offset_size, self._ref_size, num_objects, top_object, 298 | offset_table_offset 299 | ) = struct.unpack('>6xBBQQQ', trailer) 300 | self._fp.seek(offset_table_offset) 301 | self._object_offsets = self._read_ints(num_objects, offset_size) 302 | self._objects = [_undefined] * num_objects 303 | return self._read_object(top_object) 304 | 305 | except (OSError, IndexError, struct.error, OverflowError, 306 | UnicodeDecodeError): 307 | raise InvalidFileException() 308 | 309 | def _get_size(self, tokenL): 310 | """ return the size of the next object.""" 311 | if tokenL == 0xF: 312 | m = self._fp.read(1)[0] 313 | if not _check_py3(): 314 | m = ord(m) 315 | m = m & 0x3 316 | s = 1 << m 317 | f = '>' + _BINARY_FORMAT[s] 318 | return struct.unpack(f, self._fp.read(s))[0] 319 | 320 | return tokenL 321 | 322 | def _read_ints(self, n, size): 323 | data = self._fp.read(size * n) 324 | if size in _BINARY_FORMAT: 325 | return struct.unpack('>' + _BINARY_FORMAT[size] * n, data) 326 | else: 327 | if not size or len(data) != size * n: 328 | raise InvalidFileException() 329 | return tuple(int(binascii.hexlify(data[i: i + size]),16) 330 | for i in range(0, size * n, size)) 331 | '''return tuple(int.from_bytes(data[i: i + size], 'big') 332 | for i in range(0, size * n, size))''' 333 | 334 | def _read_refs(self, n): 335 | return self._read_ints(n, self._ref_size) 336 | 337 | def _read_object(self, ref): 338 | """ 339 | read the object by reference. 340 | May recursively read sub-objects (content of an array/dict/set) 341 | """ 342 | result = self._objects[ref] 343 | if result is not _undefined: 344 | return result 345 | 346 | offset = self._object_offsets[ref] 347 | self._fp.seek(offset) 348 | token = self._fp.read(1)[0] 349 | if not _check_py3(): 350 | token = ord(token) 351 | tokenH, tokenL = token & 0xF0, token & 0x0F 352 | 353 | if token == 0x00: # \x00 or 0x00 354 | result = None 355 | 356 | elif token == 0x08: # \x08 or 0x08 357 | result = False 358 | 359 | elif token == 0x09: # \x09 or 0x09 360 | result = True 361 | 362 | # The referenced source code also mentions URL (0x0c, 0x0d) and 363 | # UUID (0x0e), but neither can be generated using the Cocoa libraries. 364 | 365 | elif token == 0x0f: # \x0f or 0x0f 366 | result = b'' 367 | 368 | elif tokenH == 0x10: # int 369 | result = int(binascii.hexlify(self._fp.read(1 << tokenL)),16) 370 | if tokenL >= 3: # Signed - adjust 371 | result = result-((result & 0x8000000000000000) << 1) 372 | 373 | elif token == 0x22: # real 374 | result = struct.unpack('>f', self._fp.read(4))[0] 375 | 376 | elif token == 0x23: # real 377 | result = struct.unpack('>d', self._fp.read(8))[0] 378 | 379 | elif token == 0x33: # date 380 | f = struct.unpack('>d', self._fp.read(8))[0] 381 | # timestamp 0 of binary plists corresponds to 1/1/2001 382 | # (year of Mac OS X 10.0), instead of 1/1/1970. 383 | result = (datetime.datetime(2001, 1, 1) + 384 | datetime.timedelta(seconds=f)) 385 | 386 | elif tokenH == 0x40: # data 387 | s = self._get_size(tokenL) 388 | if self._use_builtin_types or not hasattr(plistlib, "Data"): 389 | result = self._fp.read(s) 390 | else: 391 | result = plistlib.Data(self._fp.read(s)) 392 | 393 | elif tokenH == 0x50: # ascii string 394 | s = self._get_size(tokenL) 395 | result = self._fp.read(s).decode('ascii') 396 | result = result 397 | 398 | elif tokenH == 0x60: # unicode string 399 | s = self._get_size(tokenL) 400 | result = self._fp.read(s * 2).decode('utf-16be') 401 | 402 | elif tokenH == 0x80: # UID 403 | # used by Key-Archiver plist files 404 | result = UID(int(binascii.hexlify(self._fp.read(1 + tokenL)),16)) 405 | 406 | elif tokenH == 0xA0: # array 407 | s = self._get_size(tokenL) 408 | obj_refs = self._read_refs(s) 409 | result = [] 410 | self._objects[ref] = result 411 | result.extend(self._read_object(x) for x in obj_refs) 412 | 413 | # tokenH == 0xB0 is documented as 'ordset', but is not actually 414 | # implemented in the Apple reference code. 415 | 416 | # tokenH == 0xC0 is documented as 'set', but sets cannot be used in 417 | # plists. 418 | 419 | elif tokenH == 0xD0: # dict 420 | s = self._get_size(tokenL) 421 | key_refs = self._read_refs(s) 422 | obj_refs = self._read_refs(s) 423 | result = self._dict_type() 424 | self._objects[ref] = result 425 | for k, o in zip(key_refs, obj_refs): 426 | key = self._read_object(k) 427 | if hasattr(plistlib, "Data") and isinstance(key, plistlib.Data): 428 | key = key.data 429 | result[key] = self._read_object(o) 430 | 431 | else: 432 | raise InvalidFileException() 433 | 434 | self._objects[ref] = result 435 | return result 436 | 437 | def _count_to_size(count): 438 | if count < 1 << 8: 439 | return 1 440 | 441 | elif count < 1 << 16: 442 | return 2 443 | 444 | elif count < 1 << 32: 445 | return 4 446 | 447 | else: 448 | return 8 449 | 450 | _scalars = (str, int, float, datetime.datetime, bytes) 451 | 452 | class _BinaryPlistWriter (object): 453 | def __init__(self, fp, sort_keys, skipkeys): 454 | self._fp = fp 455 | self._sort_keys = sort_keys 456 | self._skipkeys = skipkeys 457 | 458 | def write(self, value): 459 | 460 | # Flattened object list: 461 | self._objlist = [] 462 | 463 | # Mappings from object->objectid 464 | # First dict has (type(object), object) as the key, 465 | # second dict is used when object is not hashable and 466 | # has id(object) as the key. 467 | self._objtable = {} 468 | self._objidtable = {} 469 | 470 | # Create list of all objects in the plist 471 | self._flatten(value) 472 | 473 | # Size of object references in serialized containers 474 | # depends on the number of objects in the plist. 475 | num_objects = len(self._objlist) 476 | self._object_offsets = [0]*num_objects 477 | self._ref_size = _count_to_size(num_objects) 478 | 479 | self._ref_format = _BINARY_FORMAT[self._ref_size] 480 | 481 | # Write file header 482 | self._fp.write(b'bplist00') 483 | 484 | # Write object list 485 | for obj in self._objlist: 486 | self._write_object(obj) 487 | 488 | # Write refnum->object offset table 489 | top_object = self._getrefnum(value) 490 | offset_table_offset = self._fp.tell() 491 | offset_size = _count_to_size(offset_table_offset) 492 | offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects 493 | self._fp.write(struct.pack(offset_format, *self._object_offsets)) 494 | 495 | # Write trailer 496 | sort_version = 0 497 | trailer = ( 498 | sort_version, offset_size, self._ref_size, num_objects, 499 | top_object, offset_table_offset 500 | ) 501 | self._fp.write(struct.pack('>5xBBBQQQ', *trailer)) 502 | 503 | def _flatten(self, value): 504 | # First check if the object is in the object table, not used for 505 | # containers to ensure that two subcontainers with the same contents 506 | # will be serialized as distinct values. 507 | if isinstance(value, _scalars): 508 | if (type(value), value) in self._objtable: 509 | return 510 | 511 | elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data): 512 | if (type(value.data), value.data) in self._objtable: 513 | return 514 | 515 | elif id(value) in self._objidtable: 516 | return 517 | 518 | # Add to objectreference map 519 | refnum = len(self._objlist) 520 | self._objlist.append(value) 521 | if isinstance(value, _scalars): 522 | self._objtable[(type(value), value)] = refnum 523 | elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data): 524 | self._objtable[(type(value.data), value.data)] = refnum 525 | else: 526 | self._objidtable[id(value)] = refnum 527 | 528 | # And finally recurse into containers 529 | if isinstance(value, dict): 530 | keys = [] 531 | values = [] 532 | items = value.items() 533 | if self._sort_keys: 534 | items = sorted(items) 535 | 536 | for k, v in items: 537 | if not isinstance(k, basestring): 538 | if self._skipkeys: 539 | continue 540 | raise TypeError("keys must be strings") 541 | keys.append(k) 542 | values.append(v) 543 | 544 | for o in itertools.chain(keys, values): 545 | self._flatten(o) 546 | 547 | elif isinstance(value, (list, tuple)): 548 | for o in value: 549 | self._flatten(o) 550 | 551 | def _getrefnum(self, value): 552 | if isinstance(value, _scalars): 553 | return self._objtable[(type(value), value)] 554 | elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data): 555 | return self._objtable[(type(value.data), value.data)] 556 | else: 557 | return self._objidtable[id(value)] 558 | 559 | def _write_size(self, token, size): 560 | if size < 15: 561 | self._fp.write(struct.pack('>B', token | size)) 562 | 563 | elif size < 1 << 8: 564 | self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size)) 565 | 566 | elif size < 1 << 16: 567 | self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size)) 568 | 569 | elif size < 1 << 32: 570 | self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size)) 571 | 572 | else: 573 | self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size)) 574 | 575 | def _write_object(self, value): 576 | ref = self._getrefnum(value) 577 | self._object_offsets[ref] = self._fp.tell() 578 | if value is None: 579 | self._fp.write(b'\x00') 580 | 581 | elif value is False: 582 | self._fp.write(b'\x08') 583 | 584 | elif value is True: 585 | self._fp.write(b'\x09') 586 | 587 | elif isinstance(value, int): 588 | if value < 0: 589 | try: 590 | self._fp.write(struct.pack('>Bq', 0x13, value)) 591 | except struct.error: 592 | raise OverflowError(value) # from None 593 | elif value < 1 << 8: 594 | self._fp.write(struct.pack('>BB', 0x10, value)) 595 | elif value < 1 << 16: 596 | self._fp.write(struct.pack('>BH', 0x11, value)) 597 | elif value < 1 << 32: 598 | self._fp.write(struct.pack('>BL', 0x12, value)) 599 | elif value < 1 << 63: 600 | self._fp.write(struct.pack('>BQ', 0x13, value)) 601 | elif value < 1 << 64: 602 | self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True)) 603 | else: 604 | raise OverflowError(value) 605 | 606 | elif isinstance(value, float): 607 | self._fp.write(struct.pack('>Bd', 0x23, value)) 608 | 609 | elif isinstance(value, datetime.datetime): 610 | f = (value - datetime.datetime(2001, 1, 1)).total_seconds() 611 | self._fp.write(struct.pack('>Bd', 0x33, f)) 612 | 613 | elif (_check_py3() and isinstance(value, (bytes, bytearray))) or (hasattr(plistlib, "Data") and isinstance(value, plistlib.Data)): 614 | if not isinstance(value, (bytes, bytearray)): 615 | value = value.data # Unpack it 616 | self._write_size(0x40, len(value)) 617 | self._fp.write(value) 618 | 619 | elif isinstance(value, basestring): 620 | try: 621 | t = value.encode('ascii') 622 | self._write_size(0x50, len(value)) 623 | except UnicodeEncodeError: 624 | t = value.encode('utf-16be') 625 | self._write_size(0x60, len(t) // 2) 626 | self._fp.write(t) 627 | 628 | elif isinstance(value, UID) or (hasattr(plistlib,"UID") and isinstance(value, plistlib.UID)): 629 | if value.data < 0: 630 | raise ValueError("UIDs must be positive") 631 | elif value.data < 1 << 8: 632 | self._fp.write(struct.pack('>BB', 0x80, value)) 633 | elif value.data < 1 << 16: 634 | self._fp.write(struct.pack('>BH', 0x81, value)) 635 | elif value.data < 1 << 32: 636 | self._fp.write(struct.pack('>BL', 0x83, value)) 637 | # elif value.data < 1 << 64: 638 | # self._fp.write(struct.pack('>BQ', 0x87, value)) 639 | else: 640 | raise OverflowError(value) 641 | 642 | elif isinstance(value, (list, tuple)): 643 | refs = [self._getrefnum(o) for o in value] 644 | s = len(refs) 645 | self._write_size(0xA0, s) 646 | self._fp.write(struct.pack('>' + self._ref_format * s, *refs)) 647 | 648 | elif isinstance(value, dict): 649 | keyRefs, valRefs = [], [] 650 | 651 | if self._sort_keys: 652 | rootItems = sorted(value.items()) 653 | else: 654 | rootItems = value.items() 655 | 656 | for k, v in rootItems: 657 | if not isinstance(k, basestring): 658 | if self._skipkeys: 659 | continue 660 | raise TypeError("keys must be strings") 661 | keyRefs.append(self._getrefnum(k)) 662 | valRefs.append(self._getrefnum(v)) 663 | 664 | s = len(keyRefs) 665 | self._write_size(0xD0, s) 666 | self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs)) 667 | self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs)) 668 | 669 | else: 670 | raise TypeError(value) 671 | -------------------------------------------------------------------------------- /Scripts/run.py: -------------------------------------------------------------------------------- 1 | import sys, subprocess, time, threading, shlex 2 | try: 3 | from Queue import Queue, Empty 4 | except: 5 | from queue import Queue, Empty 6 | 7 | ON_POSIX = 'posix' in sys.builtin_module_names 8 | 9 | class Run: 10 | 11 | def __init__(self): 12 | return 13 | 14 | def _read_output(self, pipe, q): 15 | try: 16 | for line in iter(lambda: pipe.read(1), b''): 17 | q.put(line) 18 | except ValueError: 19 | pass 20 | pipe.close() 21 | 22 | def _create_thread(self, output): 23 | # Creates a new queue and thread object to watch based on the output pipe sent 24 | q = Queue() 25 | t = threading.Thread(target=self._read_output, args=(output, q)) 26 | t.daemon = True 27 | return (q,t) 28 | 29 | def _stream_output(self, comm, shell = False): 30 | output = error = "" 31 | p = None 32 | try: 33 | if shell and type(comm) is list: 34 | comm = " ".join(shlex.quote(x) for x in comm) 35 | if not shell and type(comm) is str: 36 | comm = shlex.split(comm) 37 | p = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines=True, close_fds=ON_POSIX) 38 | # Setup the stdout thread/queue 39 | q,t = self._create_thread(p.stdout) 40 | qe,te = self._create_thread(p.stderr) 41 | # Start both threads 42 | t.start() 43 | te.start() 44 | 45 | while True: 46 | c = z = "" 47 | try: c = q.get_nowait() 48 | except Empty: pass 49 | else: 50 | sys.stdout.write(c) 51 | output += c 52 | sys.stdout.flush() 53 | try: z = qe.get_nowait() 54 | except Empty: pass 55 | else: 56 | sys.stderr.write(z) 57 | error += z 58 | sys.stderr.flush() 59 | if not c==z=="": continue # Keep going until empty 60 | # No output - see if still running 61 | p.poll() 62 | if p.returncode != None: 63 | # Subprocess ended 64 | break 65 | # No output, but subprocess still running - stall for 20ms 66 | time.sleep(0.02) 67 | 68 | o, e = p.communicate() 69 | return (output+o, error+e, p.returncode) 70 | except: 71 | if p: 72 | try: o, e = p.communicate() 73 | except: o = e = "" 74 | return (output+o, error+e, p.returncode) 75 | return ("", "Command not found!", 1) 76 | 77 | def _decode(self, value, encoding="utf-8", errors="ignore"): 78 | # Helper method to only decode if bytes type 79 | if sys.version_info >= (3,0) and isinstance(value, bytes): 80 | return value.decode(encoding,errors) 81 | return value 82 | 83 | def _run_command(self, comm, shell = False): 84 | c = None 85 | try: 86 | if shell and type(comm) is list: 87 | comm = " ".join(shlex.quote(x) for x in comm) 88 | if not shell and type(comm) is str: 89 | comm = shlex.split(comm) 90 | p = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 91 | c = p.communicate() 92 | except: 93 | if c == None: 94 | return ("", "Command not found!", 1) 95 | return (self._decode(c[0]), self._decode(c[1]), p.returncode) 96 | 97 | def run(self, command_list, leave_on_fail = False): 98 | # Command list should be an array of dicts 99 | if type(command_list) is dict: 100 | # We only have one command 101 | command_list = [command_list] 102 | output_list = [] 103 | for comm in command_list: 104 | args = comm.get("args", []) 105 | shell = comm.get("shell", False) 106 | stream = comm.get("stream", False) 107 | sudo = comm.get("sudo", False) 108 | stdout = comm.get("stdout", False) 109 | stderr = comm.get("stderr", False) 110 | mess = comm.get("message", None) 111 | show = comm.get("show", False) 112 | 113 | if not mess == None: 114 | print(mess) 115 | 116 | if not len(args): 117 | # nothing to process 118 | continue 119 | if sudo: 120 | # Check if we have sudo 121 | out = self._run_command(["which", "sudo"]) 122 | if "sudo" in out[0]: 123 | # Can sudo 124 | if type(args) is list: 125 | args.insert(0, out[0].replace("\n", "")) # add to start of list 126 | elif type(args) is str: 127 | args = out[0].replace("\n", "") + " " + args # add to start of string 128 | 129 | if show: 130 | print(" ".join(args)) 131 | 132 | if stream: 133 | # Stream it! 134 | out = self._stream_output(args, shell) 135 | else: 136 | # Just run and gather output 137 | out = self._run_command(args, shell) 138 | if stdout and len(out[0]): 139 | print(out[0]) 140 | if stderr and len(out[1]): 141 | print(out[1]) 142 | # Append output 143 | output_list.append(out) 144 | # Check for errors 145 | if leave_on_fail and out[2] != 0: 146 | # Got an error - leave 147 | break 148 | if len(output_list) == 1: 149 | # We only ran one command - just return that output 150 | return output_list[0] 151 | return output_list 152 | -------------------------------------------------------------------------------- /Scripts/utils.py: -------------------------------------------------------------------------------- 1 | import sys, os, time, re, json, datetime, ctypes, subprocess 2 | 3 | if os.name == "nt": 4 | # Windows 5 | import msvcrt 6 | else: 7 | # Not Windows \o/ 8 | import select 9 | 10 | class Utils: 11 | 12 | def __init__(self, name = "Python Script"): 13 | self.name = name 14 | # Init our colors before we need to print anything 15 | cwd = os.getcwd() 16 | os.chdir(os.path.dirname(os.path.realpath(__file__))) 17 | if os.path.exists("colors.json"): 18 | self.colors_dict = json.load(open("colors.json")) 19 | else: 20 | self.colors_dict = {} 21 | os.chdir(cwd) 22 | 23 | def check_admin(self): 24 | # Returns whether or not we're admin 25 | try: 26 | is_admin = os.getuid() == 0 27 | except AttributeError: 28 | is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0 29 | return is_admin 30 | 31 | def elevate(self, file): 32 | # Runs the passed file as admin 33 | if self.check_admin(): 34 | return 35 | if os.name == "nt": 36 | ctypes.windll.shell32.ShellExecuteW(None, "runas", '"{}"'.format(sys.executable), '"{}"'.format(file), None, 1) 37 | else: 38 | try: 39 | p = subprocess.Popen(["which", "sudo"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) 40 | c = p.communicate()[0].decode("utf-8", "ignore").replace("\n", "") 41 | os.execv(c, [ sys.executable, 'python'] + sys.argv) 42 | except: 43 | exit(1) 44 | 45 | def compare_versions(self, vers1, vers2, **kwargs): 46 | # Helper method to compare ##.## strings 47 | # 48 | # vers1 < vers2 = True 49 | # vers1 = vers2 = None 50 | # vers1 > vers2 = False 51 | 52 | # Sanitize the pads 53 | pad = str(kwargs.get("pad", "")) 54 | sep = str(kwargs.get("separator", ".")) 55 | 56 | ignore_case = kwargs.get("ignore_case", True) 57 | 58 | # Cast as strings 59 | vers1 = str(vers1) 60 | vers2 = str(vers2) 61 | 62 | if ignore_case: 63 | vers1 = vers1.lower() 64 | vers2 = vers2.lower() 65 | 66 | # Split and pad lists 67 | v1_parts, v2_parts = self.pad_length(vers1.split(sep), vers2.split(sep)) 68 | 69 | # Iterate and compare 70 | for i in range(len(v1_parts)): 71 | # Remove non-numeric 72 | v1 = ''.join(c.lower() for c in v1_parts[i] if c.isalnum()) 73 | v2 = ''.join(c.lower() for c in v2_parts[i] if c.isalnum()) 74 | # Equalize the lengths 75 | v1, v2 = self.pad_length(v1, v2) 76 | # Compare 77 | if str(v1) < str(v2): 78 | return True 79 | elif str(v1) > str(v2): 80 | return False 81 | # Never differed - return None, must be equal 82 | return None 83 | 84 | def pad_length(self, var1, var2, pad = "0"): 85 | # Pads the vars on the left side to make them equal length 86 | pad = "0" if len(str(pad)) < 1 else str(pad)[0] 87 | if not type(var1) == type(var2): 88 | # Type mismatch! Just return what we got 89 | return (var1, var2) 90 | if len(var1) < len(var2): 91 | if type(var1) is list: 92 | var1.extend([str(pad) for x in range(len(var2) - len(var1))]) 93 | else: 94 | var1 = "{}{}".format((pad*(len(var2)-len(var1))), var1) 95 | elif len(var2) < len(var1): 96 | if type(var2) is list: 97 | var2.extend([str(pad) for x in range(len(var1) - len(var2))]) 98 | else: 99 | var2 = "{}{}".format((pad*(len(var1)-len(var2))), var2) 100 | return (var1, var2) 101 | 102 | def check_path(self, path): 103 | # Let's loop until we either get a working path, or no changes 104 | test_path = path 105 | last_path = None 106 | while True: 107 | # Bail if we've looped at least once and the path didn't change 108 | if last_path != None and last_path == test_path: return None 109 | last_path = test_path 110 | # Check if we stripped everything out 111 | if not len(test_path): return None 112 | # Check if we have a valid path 113 | if os.path.exists(test_path): 114 | return os.path.abspath(test_path) 115 | # Check for quotes 116 | if test_path[0] == test_path[-1] and test_path[0] in ('"',"'"): 117 | test_path = test_path[1:-1] 118 | continue 119 | # Check for a tilde and expand if needed 120 | if test_path[0] == "~": 121 | tilde_expanded = os.path.expanduser(test_path) 122 | if tilde_expanded != test_path: 123 | # Got a change 124 | test_path = tilde_expanded 125 | continue 126 | # Let's check for spaces - strip from the left first, then the right 127 | if test_path[0] in (" ","\t"): 128 | test_path = test_path[1:] 129 | continue 130 | if test_path[-1] in (" ","\t"): 131 | test_path = test_path[:-1] 132 | continue 133 | # Maybe we have escapes to handle? 134 | test_path = "\\".join([x.replace("\\", "") for x in test_path.split("\\\\")]) 135 | 136 | def grab(self, prompt, **kwargs): 137 | # Takes a prompt, a default, and a timeout and shows it with that timeout 138 | # returning the result 139 | timeout = kwargs.get("timeout", 0) 140 | default = kwargs.get("default", None) 141 | # If we don't have a timeout - then skip the timed sections 142 | if timeout <= 0: 143 | if sys.version_info >= (3, 0): 144 | return input(prompt) 145 | else: 146 | return str(raw_input(prompt)) 147 | # Write our prompt 148 | sys.stdout.write(prompt) 149 | sys.stdout.flush() 150 | if os.name == "nt": 151 | start_time = time.time() 152 | i = '' 153 | while True: 154 | if msvcrt.kbhit(): 155 | c = msvcrt.getche() 156 | if ord(c) == 13: # enter_key 157 | break 158 | elif ord(c) >= 32: #space_char 159 | i += c 160 | if len(i) == 0 and (time.time() - start_time) > timeout: 161 | break 162 | else: 163 | i, o, e = select.select( [sys.stdin], [], [], timeout ) 164 | if i: 165 | i = sys.stdin.readline().strip() 166 | print('') # needed to move to next line 167 | if len(i) > 0: 168 | return i 169 | else: 170 | return default 171 | 172 | def cls(self): 173 | os.system('cls' if os.name=='nt' else 'clear') 174 | 175 | def cprint(self, message, **kwargs): 176 | strip_colors = kwargs.get("strip_colors", False) 177 | if os.name == "nt": 178 | strip_colors = True 179 | reset = u"\u001b[0m" 180 | # Requires sys import 181 | for c in self.colors: 182 | if strip_colors: 183 | message = message.replace(c["find"], "") 184 | else: 185 | message = message.replace(c["find"], c["replace"]) 186 | if strip_colors: 187 | return message 188 | sys.stdout.write(message) 189 | print(reset) 190 | 191 | # Needs work to resize the string if color chars exist 192 | '''# Header drawing method 193 | def head(self, text = None, width = 55): 194 | if text == None: 195 | text = self.name 196 | self.cls() 197 | print(" {}".format("#"*width)) 198 | len_text = self.cprint(text, strip_colors=True) 199 | mid_len = int(round(width/2-len(len_text)/2)-2) 200 | middle = " #{}{}{}#".format(" "*mid_len, len_text, " "*((width - mid_len - len(len_text))-2)) 201 | if len(middle) > width+1: 202 | # Get the difference 203 | di = len(middle) - width 204 | # Add the padding for the ...# 205 | di += 3 206 | # Trim the string 207 | middle = middle[:-di] 208 | newlen = len(middle) 209 | middle += "...#" 210 | find_list = [ c["find"] for c in self.colors ] 211 | 212 | # Translate colored string to len 213 | middle = middle.replace(len_text, text + self.rt_color) # always reset just in case 214 | self.cprint(middle) 215 | print("#"*width)''' 216 | 217 | # Header drawing method 218 | def head(self, text = None, width = 55): 219 | if text == None: 220 | text = self.name 221 | self.cls() 222 | print(" {}".format("#"*width)) 223 | mid_len = int(round(width/2-len(text)/2)-2) 224 | middle = " #{}{}{}#".format(" "*mid_len, text, " "*((width - mid_len - len(text))-2)) 225 | if len(middle) > width+1: 226 | # Get the difference 227 | di = len(middle) - width 228 | # Add the padding for the ...# 229 | di += 3 230 | # Trim the string 231 | middle = middle[:-di] + "...#" 232 | print(middle) 233 | print("#"*width) 234 | 235 | def resize(self, width, height): 236 | print('\033[8;{};{}t'.format(height, width)) 237 | 238 | def custom_quit(self): 239 | self.head() 240 | print("by CorpNewt\n") 241 | print("Thanks for testing it out, for bugs/comments/complaints") 242 | print("send me a message on Reddit, or check out my GitHub:\n") 243 | print("www.reddit.com/u/corpnewt") 244 | print("www.github.com/corpnewt\n") 245 | # Get the time and wish them a good morning, afternoon, evening, and night 246 | hr = datetime.datetime.now().time().hour 247 | if hr > 3 and hr < 12: 248 | print("Have a nice morning!\n\n") 249 | elif hr >= 12 and hr < 17: 250 | print("Have a nice afternoon!\n\n") 251 | elif hr >= 17 and hr < 21: 252 | print("Have a nice evening!\n\n") 253 | else: 254 | print("Have a nice night!\n\n") 255 | exit(0) 256 | --------------------------------------------------------------------------------