├── bin ├── jancox.prop └── linux │ ├── 7za │ ├── __init__.py │ ├── __pycache__ │ ├── blockimgdiff.cpython-36.pyc │ ├── common.cpython-36.pyc │ ├── ext4.cpython-36.pyc │ ├── rangelib.cpython-36.pyc │ └── sparse_img.cpython-36.pyc │ ├── blockimgdiff.py │ ├── brotli │ ├── busybox │ ├── common.py │ ├── ext4.py │ ├── img2sdat.py │ ├── imgextractor.py │ ├── magiskboot │ ├── make_ext4fs │ ├── rangelib.py │ ├── rimg2sdat │ ├── sdat2img.py │ ├── sparse_img.py │ └── utility.sh ├── cleanup.sh ├── credits.txt ├── repack.sh └── unpack.sh /bin/jancox.prop: -------------------------------------------------------------------------------- 1 | brotli.level=1 2 | archive.zip=enable 3 | clean.tmp=disable 4 | date.file.rom=2009-09-27 -------------------------------------------------------------------------------- /bin/linux/7za: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/7za -------------------------------------------------------------------------------- /bin/linux/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'unix3dgforce [MiuiPro.by DEV Team]' 2 | __copyright__ = 'Copyright (c) 2018 Miuipro.by' -------------------------------------------------------------------------------- /bin/linux/__pycache__/blockimgdiff.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/__pycache__/blockimgdiff.cpython-36.pyc -------------------------------------------------------------------------------- /bin/linux/__pycache__/common.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/__pycache__/common.cpython-36.pyc -------------------------------------------------------------------------------- /bin/linux/__pycache__/ext4.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/__pycache__/ext4.cpython-36.pyc -------------------------------------------------------------------------------- /bin/linux/__pycache__/rangelib.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/__pycache__/rangelib.cpython-36.pyc -------------------------------------------------------------------------------- /bin/linux/__pycache__/sparse_img.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/__pycache__/sparse_img.cpython-36.pyc -------------------------------------------------------------------------------- /bin/linux/blockimgdiff.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2014 The Android Open Source Project 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import print_function 16 | 17 | from collections import deque, OrderedDict 18 | from hashlib import sha1 19 | import array 20 | import common 21 | import functools 22 | import heapq 23 | import itertools 24 | import multiprocessing 25 | import os 26 | import re 27 | import subprocess 28 | import threading 29 | import time 30 | import tempfile 31 | 32 | from rangelib import RangeSet 33 | 34 | 35 | __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"] 36 | 37 | 38 | def compute_patch(src, tgt, imgdiff=False): 39 | srcfd, srcfile = tempfile.mkstemp(prefix="src-") 40 | tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-") 41 | patchfd, patchfile = tempfile.mkstemp(prefix="patch-") 42 | os.close(patchfd) 43 | 44 | try: 45 | with os.fdopen(srcfd, "wb") as f_src: 46 | for p in src: 47 | f_src.write(p) 48 | 49 | with os.fdopen(tgtfd, "wb") as f_tgt: 50 | for p in tgt: 51 | f_tgt.write(p) 52 | try: 53 | os.unlink(patchfile) 54 | except OSError: 55 | pass 56 | if imgdiff: 57 | p = subprocess.call(["imgdiff", "-z", srcfile, tgtfile, patchfile], 58 | stdout=open("/dev/null", "a"), 59 | stderr=subprocess.STDOUT) 60 | else: 61 | p = subprocess.call(["bsdiff", srcfile, tgtfile, patchfile]) 62 | 63 | if p: 64 | raise ValueError("diff failed: " + str(p)) 65 | 66 | with open(patchfile, "rb") as f: 67 | return f.read() 68 | finally: 69 | try: 70 | os.unlink(srcfile) 71 | os.unlink(tgtfile) 72 | os.unlink(patchfile) 73 | except OSError: 74 | pass 75 | 76 | 77 | class Image(object): 78 | def ReadRangeSet(self, ranges): 79 | raise NotImplementedError 80 | 81 | def TotalSha1(self, include_clobbered_blocks=False): 82 | raise NotImplementedError 83 | 84 | 85 | class EmptyImage(Image): 86 | """A zero-length image.""" 87 | blocksize = 4096 88 | care_map = RangeSet() 89 | clobbered_blocks = RangeSet() 90 | extended = RangeSet() 91 | total_blocks = 0 92 | file_map = {} 93 | def ReadRangeSet(self, ranges): 94 | return () 95 | def TotalSha1(self, include_clobbered_blocks=False): 96 | # EmptyImage always carries empty clobbered_blocks, so 97 | # include_clobbered_blocks can be ignored. 98 | assert self.clobbered_blocks.size() == 0 99 | return sha1().hexdigest() 100 | 101 | 102 | class DataImage(Image): 103 | """An image wrapped around a single string of data.""" 104 | 105 | def __init__(self, data, trim=False, pad=False): 106 | self.data = data 107 | self.blocksize = 4096 108 | 109 | assert not (trim and pad) 110 | 111 | partial = len(self.data) % self.blocksize 112 | padded = False 113 | if partial > 0: 114 | if trim: 115 | self.data = self.data[:-partial] 116 | elif pad: 117 | self.data += '\0' * (self.blocksize - partial) 118 | padded = True 119 | else: 120 | raise ValueError(("data for DataImage must be multiple of %d bytes " 121 | "unless trim or pad is specified") % 122 | (self.blocksize,)) 123 | 124 | assert len(self.data) % self.blocksize == 0 125 | 126 | self.total_blocks = len(self.data) / self.blocksize 127 | self.care_map = RangeSet(data=(0, self.total_blocks)) 128 | # When the last block is padded, we always write the whole block even for 129 | # incremental OTAs. Because otherwise the last block may get skipped if 130 | # unchanged for an incremental, but would fail the post-install 131 | # verification if it has non-zero contents in the padding bytes. 132 | # Bug: 23828506 133 | if padded: 134 | clobbered_blocks = [self.total_blocks-1, self.total_blocks] 135 | else: 136 | clobbered_blocks = [] 137 | self.clobbered_blocks = clobbered_blocks 138 | self.extended = RangeSet() 139 | 140 | zero_blocks = [] 141 | nonzero_blocks = [] 142 | reference = '\0' * self.blocksize 143 | 144 | for i in range(self.total_blocks-1 if padded else self.total_blocks): 145 | d = self.data[i*self.blocksize : (i+1)*self.blocksize] 146 | if d == reference: 147 | zero_blocks.append(i) 148 | zero_blocks.append(i+1) 149 | else: 150 | nonzero_blocks.append(i) 151 | nonzero_blocks.append(i+1) 152 | 153 | assert zero_blocks or nonzero_blocks or clobbered_blocks 154 | 155 | self.file_map = dict() 156 | if zero_blocks: 157 | self.file_map["__ZERO"] = RangeSet(data=zero_blocks) 158 | if nonzero_blocks: 159 | self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks) 160 | if clobbered_blocks: 161 | self.file_map["__COPY"] = RangeSet(data=clobbered_blocks) 162 | 163 | def ReadRangeSet(self, ranges): 164 | return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges] 165 | 166 | def TotalSha1(self, include_clobbered_blocks=False): 167 | if not include_clobbered_blocks: 168 | ranges = self.care_map.subtract(self.clobbered_blocks) 169 | return sha1(self.ReadRangeSet(ranges)).hexdigest() 170 | else: 171 | return sha1(self.data).hexdigest() 172 | 173 | 174 | class Transfer(object): 175 | def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, style, by_id): 176 | self.tgt_name = tgt_name 177 | self.src_name = src_name 178 | self.tgt_ranges = tgt_ranges 179 | self.src_ranges = src_ranges 180 | self.style = style 181 | self.intact = (getattr(tgt_ranges, "monotonic", False) and 182 | getattr(src_ranges, "monotonic", False)) 183 | 184 | # We use OrderedDict rather than dict so that the output is repeatable; 185 | # otherwise it would depend on the hash values of the Transfer objects. 186 | self.goes_before = OrderedDict() 187 | self.goes_after = OrderedDict() 188 | 189 | self.stash_before = [] 190 | self.use_stash = [] 191 | 192 | self.id = len(by_id) 193 | by_id.append(self) 194 | 195 | def NetStashChange(self): 196 | return (sum(sr.size() for (_, sr) in self.stash_before) - 197 | sum(sr.size() for (_, sr) in self.use_stash)) 198 | 199 | def ConvertToNew(self): 200 | assert self.style != "new" 201 | self.use_stash = [] 202 | self.style = "new" 203 | self.src_ranges = RangeSet() 204 | 205 | def __str__(self): 206 | return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style + 207 | " to " + str(self.tgt_ranges) + ">") 208 | 209 | 210 | @functools.total_ordering 211 | class HeapItem(object): 212 | def __init__(self, item): 213 | self.item = item 214 | # Negate the score since python's heap is a min-heap and we want 215 | # the maximum score. 216 | self.score = -item.score 217 | def clear(self): 218 | self.item = None 219 | def __bool__(self): 220 | return self.item is None 221 | def __eq__(self, other): 222 | return self.score == other.score 223 | def __le__(self, other): 224 | return self.score <= other.score 225 | 226 | 227 | # BlockImageDiff works on two image objects. An image object is 228 | # anything that provides the following attributes: 229 | # 230 | # blocksize: the size in bytes of a block, currently must be 4096. 231 | # 232 | # total_blocks: the total size of the partition/image, in blocks. 233 | # 234 | # care_map: a RangeSet containing which blocks (in the range [0, 235 | # total_blocks) we actually care about; i.e. which blocks contain 236 | # data. 237 | # 238 | # file_map: a dict that partitions the blocks contained in care_map 239 | # into smaller domains that are useful for doing diffs on. 240 | # (Typically a domain is a file, and the key in file_map is the 241 | # pathname.) 242 | # 243 | # clobbered_blocks: a RangeSet containing which blocks contain data 244 | # but may be altered by the FS. They need to be excluded when 245 | # verifying the partition integrity. 246 | # 247 | # ReadRangeSet(): a function that takes a RangeSet and returns the 248 | # data contained in the image blocks of that RangeSet. The data 249 | # is returned as a list or tuple of strings; concatenating the 250 | # elements together should produce the requested data. 251 | # Implementations are free to break up the data into list/tuple 252 | # elements in any way that is convenient. 253 | # 254 | # TotalSha1(): a function that returns (as a hex string) the SHA-1 255 | # hash of all the data in the image (ie, all the blocks in the 256 | # care_map minus clobbered_blocks, or including the clobbered 257 | # blocks if include_clobbered_blocks is True). 258 | # 259 | # When creating a BlockImageDiff, the src image may be None, in which 260 | # case the list of transfers produced will never read from the 261 | # original image. 262 | 263 | class BlockImageDiff(object): 264 | def __init__(self, tgt, src=None, version=4, threads=None, 265 | disable_imgdiff=False): 266 | if threads is None: 267 | threads = multiprocessing.cpu_count() // 2 268 | if threads == 0: 269 | threads = 1 270 | self.threads = threads 271 | self.version = version 272 | self.transfers = [] 273 | self.src_basenames = {} 274 | self.src_numpatterns = {} 275 | self._max_stashed_size = 0 276 | self.touched_src_ranges = RangeSet() 277 | self.touched_src_sha1 = None 278 | self.disable_imgdiff = disable_imgdiff 279 | 280 | assert version in (1, 2, 3, 4) 281 | 282 | self.tgt = tgt 283 | if src is None: 284 | src = EmptyImage() 285 | self.src = src 286 | 287 | # The updater code that installs the patch always uses 4k blocks. 288 | assert tgt.blocksize == 4096 289 | assert src.blocksize == 4096 290 | 291 | # The range sets in each filemap should comprise a partition of 292 | # the care map. 293 | self.AssertPartition(src.care_map, src.file_map.values()) 294 | self.AssertPartition(tgt.care_map, tgt.file_map.values()) 295 | 296 | @property 297 | def max_stashed_size(self): 298 | return self._max_stashed_size 299 | 300 | def Compute(self, prefix): 301 | # When looking for a source file to use as the diff input for a 302 | # target file, we try: 303 | # 1) an exact path match if available, otherwise 304 | # 2) a exact basename match if available, otherwise 305 | # 3) a basename match after all runs of digits are replaced by 306 | # "#" if available, otherwise 307 | # 4) we have no source for this target. 308 | self.AbbreviateSourceNames() 309 | self.FindTransfers() 310 | 311 | # Find the ordering dependencies among transfers (this is O(n^2) 312 | # in the number of transfers). 313 | self.GenerateDigraph() 314 | # Find a sequence of transfers that satisfies as many ordering 315 | # dependencies as possible (heuristically). 316 | self.FindVertexSequence() 317 | # Fix up the ordering dependencies that the sequence didn't 318 | # satisfy. 319 | if self.version == 1: 320 | self.RemoveBackwardEdges() 321 | else: 322 | self.ReverseBackwardEdges() 323 | self.ImproveVertexSequence() 324 | 325 | # Ensure the runtime stash size is under the limit. 326 | if self.version >= 2 and common.OPTIONS.cache_size is not None: 327 | self.ReviseStashSize() 328 | 329 | # Double-check our work. 330 | self.AssertSequenceGood() 331 | 332 | self.ComputePatches(prefix) 333 | self.WriteTransfers(prefix) 334 | 335 | def HashBlocks(self, source, ranges): # pylint: disable=no-self-use 336 | data = source.ReadRangeSet(ranges) 337 | ctx = sha1() 338 | 339 | for p in data: 340 | ctx.update(p) 341 | 342 | return ctx.hexdigest() 343 | 344 | def WriteTransfers(self, prefix): 345 | def WriteTransfersZero(out, to_zero): 346 | """Limit the number of blocks in command zero to 1024 blocks. 347 | 348 | This prevents the target size of one command from being too large; and 349 | might help to avoid fsync errors on some devices.""" 350 | 351 | zero_blocks_limit = 1024 352 | total = 0 353 | while to_zero.size() > 0: 354 | zero_blocks = to_zero.first(zero_blocks_limit) 355 | out.append("zero %s\n" % (zero_blocks.to_string_raw(),)) 356 | total += zero_blocks.size() 357 | to_zero = to_zero.subtract(zero_blocks) 358 | return total 359 | 360 | out = [] 361 | 362 | total = 0 363 | 364 | stashes = {} 365 | stashed_blocks = 0 366 | max_stashed_blocks = 0 367 | 368 | free_stash_ids = [] 369 | next_stash_id = 0 370 | 371 | for xf in self.transfers: 372 | 373 | if self.version < 2: 374 | assert not xf.stash_before 375 | assert not xf.use_stash 376 | 377 | for s, sr in xf.stash_before: 378 | assert s not in stashes 379 | if free_stash_ids: 380 | sid = heapq.heappop(free_stash_ids) 381 | else: 382 | sid = next_stash_id 383 | next_stash_id += 1 384 | stashes[s] = sid 385 | if self.version == 2: 386 | stashed_blocks += sr.size() 387 | out.append("stash %d %s\n" % (sid, sr.to_string_raw())) 388 | else: 389 | sh = self.HashBlocks(self.src, sr) 390 | if sh in stashes: 391 | stashes[sh] += 1 392 | else: 393 | stashes[sh] = 1 394 | stashed_blocks += sr.size() 395 | self.touched_src_ranges = self.touched_src_ranges.union(sr) 396 | out.append("stash %s %s\n" % (sh, sr.to_string_raw())) 397 | 398 | if stashed_blocks > max_stashed_blocks: 399 | max_stashed_blocks = stashed_blocks 400 | 401 | free_string = [] 402 | free_size = 0 403 | 404 | if self.version == 1: 405 | src_str = xf.src_ranges.to_string_raw() if xf.src_ranges.size() > 0 else "" 406 | elif self.version >= 2: 407 | 408 | # <# blocks> 409 | # OR 410 | # <# blocks> 411 | # OR 412 | # <# blocks> - 413 | 414 | size = xf.src_ranges.size() 415 | src_str = [str(size)] 416 | 417 | unstashed_src_ranges = xf.src_ranges 418 | mapped_stashes = [] 419 | for s, sr in xf.use_stash: 420 | sid = stashes.pop(s) 421 | unstashed_src_ranges = unstashed_src_ranges.subtract(sr) 422 | sh = self.HashBlocks(self.src, sr) 423 | sr = xf.src_ranges.map_within(sr) 424 | mapped_stashes.append(sr) 425 | if self.version == 2: 426 | src_str.append("%d:%s" % (sid, sr.to_string_raw())) 427 | # A stash will be used only once. We need to free the stash 428 | # immediately after the use, instead of waiting for the automatic 429 | # clean-up at the end. Because otherwise it may take up extra space 430 | # and lead to OTA failures. 431 | # Bug: 23119955 432 | free_string.append("free %d\n" % (sid,)) 433 | free_size += sr.size() 434 | else: 435 | assert sh in stashes 436 | src_str.append("%s:%s" % (sh, sr.to_string_raw())) 437 | stashes[sh] -= 1 438 | if stashes[sh] == 0: 439 | free_size += sr.size() 440 | free_string.append("free %s\n" % (sh)) 441 | stashes.pop(sh) 442 | heapq.heappush(free_stash_ids, sid) 443 | 444 | if unstashed_src_ranges.size() > 0: 445 | src_str.insert(1, unstashed_src_ranges.to_string_raw()) 446 | if xf.use_stash: 447 | mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges) 448 | src_str.insert(2, mapped_unstashed.to_string_raw()) 449 | mapped_stashes.append(mapped_unstashed) 450 | self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes) 451 | else: 452 | src_str.insert(1, "-") 453 | self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes) 454 | 455 | src_str = " ".join(src_str) 456 | 457 | # all versions: 458 | # zero 459 | # new 460 | # erase 461 | # 462 | # version 1: 463 | # bsdiff patchstart patchlen 464 | # imgdiff patchstart patchlen 465 | # move 466 | # 467 | # version 2: 468 | # bsdiff patchstart patchlen 469 | # imgdiff patchstart patchlen 470 | # move 471 | # 472 | # version 3: 473 | # bsdiff patchstart patchlen srchash tgthash 474 | # imgdiff patchstart patchlen srchash tgthash 475 | # move hash 476 | 477 | tgt_size = xf.tgt_ranges.size() 478 | 479 | if xf.style == "new": 480 | assert xf.tgt_ranges 481 | out.append("%s %s\n" % (xf.style, xf.tgt_ranges.to_string_raw())) 482 | total += tgt_size 483 | elif xf.style == "move": 484 | assert xf.tgt_ranges 485 | assert xf.src_ranges.size() == tgt_size 486 | if xf.src_ranges != xf.tgt_ranges: 487 | if self.version == 1: 488 | out.append("%s %s %s\n" % ( 489 | xf.style, 490 | xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw())) 491 | elif self.version == 2: 492 | out.append("%s %s %s\n" % ( 493 | xf.style, 494 | xf.tgt_ranges.to_string_raw(), src_str)) 495 | elif self.version >= 3: 496 | # take into account automatic stashing of overlapping blocks 497 | if xf.src_ranges.overlaps(xf.tgt_ranges): 498 | temp_stash_usage = stashed_blocks + xf.src_ranges.size() 499 | if temp_stash_usage > max_stashed_blocks: 500 | max_stashed_blocks = temp_stash_usage 501 | 502 | self.touched_src_ranges = self.touched_src_ranges.union( 503 | xf.src_ranges) 504 | 505 | out.append("%s %s %s %s\n" % ( 506 | xf.style, 507 | self.HashBlocks(self.tgt, xf.tgt_ranges), 508 | xf.tgt_ranges.to_string_raw(), src_str)) 509 | total += tgt_size 510 | elif xf.style in ("bsdiff", "imgdiff"): 511 | assert xf.tgt_ranges 512 | assert xf.src_ranges 513 | if self.version == 1: 514 | out.append("%s %d %d %s %s\n" % ( 515 | xf.style, xf.patch_start, xf.patch_len, 516 | xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw())) 517 | elif self.version == 2: 518 | out.append("%s %d %d %s %s\n" % ( 519 | xf.style, xf.patch_start, xf.patch_len, 520 | xf.tgt_ranges.to_string_raw(), src_str)) 521 | elif self.version >= 3: 522 | # take into account automatic stashing of overlapping blocks 523 | if xf.src_ranges.overlaps(xf.tgt_ranges): 524 | temp_stash_usage = stashed_blocks + xf.src_ranges.size() 525 | if temp_stash_usage > max_stashed_blocks: 526 | max_stashed_blocks = temp_stash_usage 527 | 528 | self.touched_src_ranges = self.touched_src_ranges.union( 529 | xf.src_ranges) 530 | 531 | out.append("%s %d %d %s %s %s %s\n" % ( 532 | xf.style, 533 | xf.patch_start, xf.patch_len, 534 | self.HashBlocks(self.src, xf.src_ranges), 535 | self.HashBlocks(self.tgt, xf.tgt_ranges), 536 | xf.tgt_ranges.to_string_raw(), src_str)) 537 | total += tgt_size 538 | elif xf.style == "zero": 539 | assert xf.tgt_ranges 540 | to_zero = xf.tgt_ranges.subtract(xf.src_ranges) 541 | assert WriteTransfersZero(out, to_zero) == to_zero.size() 542 | total += to_zero.size() 543 | else: 544 | raise ValueError("unknown transfer style '%s'\n" % xf.style) 545 | 546 | if free_string: 547 | out.append("".join(free_string)) 548 | stashed_blocks -= free_size 549 | 550 | if self.version >= 2 and common.OPTIONS.cache_size is not None: 551 | # Sanity check: abort if we're going to need more stash space than 552 | # the allowed size (cache_size * threshold). There are two purposes 553 | # of having a threshold here. a) Part of the cache may have been 554 | # occupied by some recovery logs. b) It will buy us some time to deal 555 | # with the oversize issue. 556 | cache_size = common.OPTIONS.cache_size 557 | stash_threshold = common.OPTIONS.stash_threshold 558 | max_allowed = cache_size * stash_threshold 559 | assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \ 560 | 'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % ( 561 | max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks, 562 | self.tgt.blocksize, max_allowed, cache_size, 563 | stash_threshold) 564 | 565 | if self.version >= 3: 566 | self.touched_src_sha1 = self.HashBlocks( 567 | self.src, self.touched_src_ranges) 568 | 569 | # Zero out extended blocks as a workaround for bug 20881595. 570 | if self.tgt.extended.size() > 0: 571 | assert (WriteTransfersZero(out, self.tgt.extended) == 572 | self.tgt.extended.size()) 573 | total += self.tgt.extended.size() 574 | 575 | # We erase all the blocks on the partition that a) don't contain useful 576 | # data in the new image; b) will not be touched by dm-verity. Out of those 577 | # blocks, we erase the ones that won't be used in this update at the 578 | # beginning of an update. The rest would be erased at the end. This is to 579 | # work around the eMMC issue observed on some devices, which may otherwise 580 | # get starving for clean blocks and thus fail the update. (b/28347095) 581 | all_tgt = RangeSet(data=(0, self.tgt.total_blocks)) 582 | all_tgt_minus_extended = all_tgt.subtract(self.tgt.extended) 583 | new_dontcare = all_tgt_minus_extended.subtract(self.tgt.care_map) 584 | 585 | erase_first = new_dontcare.subtract(self.touched_src_ranges) 586 | if erase_first.size() > 0: 587 | out.insert(0, "erase %s\n" % (erase_first.to_string_raw(),)) 588 | 589 | erase_last = new_dontcare.subtract(erase_first) 590 | if erase_last.size() > 0: 591 | out.append("erase %s\n" % (erase_last.to_string_raw(),)) 592 | 593 | out.insert(0, "%d\n" % (self.version,)) # format version number 594 | out.insert(1, "%d\n" % (total,)) 595 | if self.version >= 2: 596 | # version 2 only: after the total block count, we give the number 597 | # of stash slots needed, and the maximum size needed (in blocks) 598 | out.insert(2, str(next_stash_id) + "\n") 599 | out.insert(3, str(max_stashed_blocks) + "\n") 600 | 601 | with open(prefix + ".transfer.list", "wb") as f: 602 | for i in out: 603 | f.write(i.encode("UTF-8")) 604 | 605 | if self.version >= 2: 606 | self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize 607 | OPTIONS = common.OPTIONS 608 | if OPTIONS.cache_size is not None: 609 | max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold 610 | print("max stashed blocks: %d (%d bytes), " 611 | "limit: %d bytes (%.2f%%)\n" % ( 612 | max_stashed_blocks, self._max_stashed_size, max_allowed, 613 | self._max_stashed_size * 100.0 / max_allowed)) 614 | else: 615 | print("max stashed blocks: %d (%d bytes), limit: \n" % ( 616 | max_stashed_blocks, self._max_stashed_size)) 617 | 618 | def ReviseStashSize(self): 619 | print("Revising stash size...") 620 | stashes = {} 621 | 622 | # Create the map between a stash and its def/use points. For example, for a 623 | # given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd). 624 | for xf in self.transfers: 625 | # Command xf defines (stores) all the stashes in stash_before. 626 | for idx, sr in xf.stash_before: 627 | stashes[idx] = (sr, xf) 628 | 629 | # Record all the stashes command xf uses. 630 | for idx, _ in xf.use_stash: 631 | stashes[idx] += (xf,) 632 | 633 | # Compute the maximum blocks available for stash based on /cache size and 634 | # the threshold. 635 | cache_size = common.OPTIONS.cache_size 636 | stash_threshold = common.OPTIONS.stash_threshold 637 | max_allowed = cache_size * stash_threshold / self.tgt.blocksize 638 | 639 | stashed_blocks = 0 640 | new_blocks = 0 641 | 642 | # Now go through all the commands. Compute the required stash size on the 643 | # fly. If a command requires excess stash than available, it deletes the 644 | # stash by replacing the command that uses the stash with a "new" command 645 | # instead. 646 | for xf in self.transfers: 647 | replaced_cmds = [] 648 | 649 | # xf.stash_before generates explicit stash commands. 650 | for idx, sr in xf.stash_before: 651 | if stashed_blocks + sr.size() > max_allowed: 652 | # We cannot stash this one for a later command. Find out the command 653 | # that will use this stash and replace the command with "new". 654 | use_cmd = stashes[idx][2] 655 | replaced_cmds.append(use_cmd) 656 | print("%10d %9s %s" % (sr.size(), "explicit", use_cmd)) 657 | else: 658 | stashed_blocks += sr.size() 659 | 660 | # xf.use_stash generates free commands. 661 | for _, sr in xf.use_stash: 662 | stashed_blocks -= sr.size() 663 | 664 | # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to 665 | # ComputePatches(), they both have the style of "diff". 666 | if xf.style == "diff" and self.version >= 3: 667 | assert xf.tgt_ranges and xf.src_ranges 668 | if xf.src_ranges.overlaps(xf.tgt_ranges): 669 | if stashed_blocks + xf.src_ranges.size() > max_allowed: 670 | replaced_cmds.append(xf) 671 | print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf)) 672 | 673 | # Replace the commands in replaced_cmds with "new"s. 674 | for cmd in replaced_cmds: 675 | # It no longer uses any commands in "use_stash". Remove the def points 676 | # for all those stashes. 677 | for idx, sr in cmd.use_stash: 678 | def_cmd = stashes[idx][1] 679 | assert (idx, sr) in def_cmd.stash_before 680 | def_cmd.stash_before.remove((idx, sr)) 681 | 682 | # Add up blocks that violates space limit and print total number to 683 | # screen later. 684 | new_blocks += cmd.tgt_ranges.size() 685 | cmd.ConvertToNew() 686 | 687 | num_of_bytes = new_blocks * self.tgt.blocksize 688 | print(" Total %d blocks (%d bytes) are packed as new blocks due to " 689 | "insufficient cache size." % (new_blocks, num_of_bytes)) 690 | 691 | def ComputePatches(self, prefix): 692 | print("Reticulating splines...") 693 | diff_q = [] 694 | patch_num = 0 695 | with open(prefix + ".new.dat", "wb") as new_f: 696 | for xf in self.transfers: 697 | if xf.style == "zero": 698 | pass 699 | elif xf.style == "new": 700 | for piece in self.tgt.ReadRangeSet(xf.tgt_ranges): 701 | new_f.write(piece) 702 | elif xf.style == "diff": 703 | src = self.src.ReadRangeSet(xf.src_ranges) 704 | tgt = self.tgt.ReadRangeSet(xf.tgt_ranges) 705 | 706 | # We can't compare src and tgt directly because they may have 707 | # the same content but be broken up into blocks differently, eg: 708 | # 709 | # ["he", "llo"] vs ["h", "ello"] 710 | # 711 | # We want those to compare equal, ideally without having to 712 | # actually concatenate the strings (these may be tens of 713 | # megabytes). 714 | 715 | src_sha1 = sha1() 716 | for p in src: 717 | src_sha1.update(p) 718 | tgt_sha1 = sha1() 719 | tgt_size = 0 720 | for p in tgt: 721 | tgt_sha1.update(p) 722 | tgt_size += len(p) 723 | 724 | if src_sha1.digest() == tgt_sha1.digest(): 725 | # These are identical; we don't need to generate a patch, 726 | # just issue copy commands on the device. 727 | xf.style = "move" 728 | else: 729 | # For files in zip format (eg, APKs, JARs, etc.) we would 730 | # like to use imgdiff -z if possible (because it usually 731 | # produces significantly smaller patches than bsdiff). 732 | # This is permissible if: 733 | # 734 | # - imgdiff is not disabled, and 735 | # - the source and target files are monotonic (ie, the 736 | # data is stored with blocks in increasing order), and 737 | # - we haven't removed any blocks from the source set. 738 | # 739 | # If these conditions are satisfied then appending all the 740 | # blocks in the set together in order will produce a valid 741 | # zip file (plus possibly extra zeros in the last block), 742 | # which is what imgdiff needs to operate. (imgdiff is 743 | # fine with extra zeros at the end of the file.) 744 | imgdiff = (not self.disable_imgdiff and xf.intact and 745 | xf.tgt_name.split(".")[-1].lower() 746 | in ("apk", "jar", "zip")) 747 | xf.style = "imgdiff" if imgdiff else "bsdiff" 748 | diff_q.append((tgt_size, src, tgt, xf, patch_num)) 749 | patch_num += 1 750 | 751 | else: 752 | assert False, "unknown style " + xf.style 753 | 754 | if diff_q: 755 | if self.threads > 1: 756 | print("Computing patches (using %d threads)..." % (self.threads,)) 757 | else: 758 | print("Computing patches...") 759 | diff_q.sort() 760 | 761 | patches = [None] * patch_num 762 | 763 | # TODO: Rewrite with multiprocessing.ThreadPool? 764 | lock = threading.Lock() 765 | def diff_worker(): 766 | while True: 767 | with lock: 768 | if not diff_q: 769 | return 770 | tgt_size, src, tgt, xf, patchnum = diff_q.pop() 771 | patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff")) 772 | size = len(patch) 773 | with lock: 774 | patches[patchnum] = (patch, xf) 775 | print("%10d %10d (%6.2f%%) %7s %s" % ( 776 | size, tgt_size, size * 100.0 / tgt_size, xf.style, 777 | xf.tgt_name if xf.tgt_name == xf.src_name else ( 778 | xf.tgt_name + " (from " + xf.src_name + ")"))) 779 | 780 | threads = [threading.Thread(target=diff_worker) 781 | for _ in range(self.threads)] 782 | for th in threads: 783 | th.start() 784 | while threads: 785 | threads.pop().join() 786 | else: 787 | patches = [] 788 | 789 | p = 0 790 | with open(prefix + ".patch.dat", "wb") as patch_f: 791 | for patch, xf in patches: 792 | xf.patch_start = p 793 | xf.patch_len = len(patch) 794 | patch_f.write(patch) 795 | p += len(patch) 796 | 797 | def AssertSequenceGood(self): 798 | # Simulate the sequences of transfers we will output, and check that: 799 | # - we never read a block after writing it, and 800 | # - we write every block we care about exactly once. 801 | 802 | # Start with no blocks having been touched yet. 803 | touched = array.array("B", (0,) * self.tgt.total_blocks) 804 | 805 | # Imagine processing the transfers in order. 806 | for xf in self.transfers: 807 | # Check that the input blocks for this transfer haven't yet been touched. 808 | 809 | x = xf.src_ranges 810 | if self.version >= 2: 811 | for _, sr in xf.use_stash: 812 | x = x.subtract(sr) 813 | 814 | for s, e in x: 815 | # Source image could be larger. Don't check the blocks that are in the 816 | # source image only. Since they are not in 'touched', and won't ever 817 | # be touched. 818 | for i in range(s, min(e, self.tgt.total_blocks)): 819 | assert touched[i] == 0 820 | 821 | # Check that the output blocks for this transfer haven't yet 822 | # been touched, and touch all the blocks written by this 823 | # transfer. 824 | for s, e in xf.tgt_ranges: 825 | for i in range(s, e): 826 | assert touched[i] == 0 827 | touched[i] = 1 828 | 829 | # Check that we've written every target block. 830 | for s, e in self.tgt.care_map: 831 | for i in range(s, e): 832 | assert touched[i] == 1 833 | 834 | def ImproveVertexSequence(self): 835 | print("Improving vertex order...") 836 | 837 | # At this point our digraph is acyclic; we reversed any edges that 838 | # were backwards in the heuristically-generated sequence. The 839 | # previously-generated order is still acceptable, but we hope to 840 | # find a better order that needs less memory for stashed data. 841 | # Now we do a topological sort to generate a new vertex order, 842 | # using a greedy algorithm to choose which vertex goes next 843 | # whenever we have a choice. 844 | 845 | # Make a copy of the edge set; this copy will get destroyed by the 846 | # algorithm. 847 | for xf in self.transfers: 848 | xf.incoming = xf.goes_after.copy() 849 | xf.outgoing = xf.goes_before.copy() 850 | 851 | L = [] # the new vertex order 852 | 853 | # S is the set of sources in the remaining graph; we always choose 854 | # the one that leaves the least amount of stashed data after it's 855 | # executed. 856 | S = [(u.NetStashChange(), u.order, u) for u in self.transfers 857 | if not u.incoming] 858 | heapq.heapify(S) 859 | 860 | while S: 861 | _, _, xf = heapq.heappop(S) 862 | L.append(xf) 863 | for u in xf.outgoing: 864 | del u.incoming[xf] 865 | if not u.incoming: 866 | heapq.heappush(S, (u.NetStashChange(), u.order, u)) 867 | 868 | # if this fails then our graph had a cycle. 869 | assert len(L) == len(self.transfers) 870 | 871 | self.transfers = L 872 | for i, xf in enumerate(L): 873 | xf.order = i 874 | 875 | def RemoveBackwardEdges(self): 876 | print("Removing backward edges...") 877 | in_order = 0 878 | out_of_order = 0 879 | lost_source = 0 880 | 881 | for xf in self.transfers: 882 | lost = 0 883 | size = xf.src_ranges.size() 884 | for u in xf.goes_before: 885 | # xf should go before u 886 | if xf.order < u.order: 887 | # it does, hurray! 888 | in_order += 1 889 | else: 890 | # it doesn't, boo. trim the blocks that u writes from xf's 891 | # source, so that xf can go after u. 892 | out_of_order += 1 893 | assert xf.src_ranges.overlaps(u.tgt_ranges) 894 | xf.src_ranges = xf.src_ranges.subtract(u.tgt_ranges) 895 | xf.intact = False 896 | 897 | if xf.style == "diff" and not xf.src_ranges: 898 | # nothing left to diff from; treat as new data 899 | xf.style = "new" 900 | 901 | lost = size - xf.src_ranges.size() 902 | lost_source += lost 903 | 904 | print((" %d/%d dependencies (%.2f%%) were violated; " 905 | "%d source blocks removed.") % 906 | (out_of_order, in_order + out_of_order, 907 | (out_of_order * 100.0 / (in_order + out_of_order)) 908 | if (in_order + out_of_order) else 0.0, 909 | lost_source)) 910 | 911 | def ReverseBackwardEdges(self): 912 | print("Reversing backward edges...") 913 | in_order = 0 914 | out_of_order = 0 915 | stashes = 0 916 | stash_size = 0 917 | 918 | for xf in self.transfers: 919 | for u in xf.goes_before.copy(): 920 | # xf should go before u 921 | if xf.order < u.order: 922 | # it does, hurray! 923 | in_order += 1 924 | else: 925 | # it doesn't, boo. modify u to stash the blocks that it 926 | # writes that xf wants to read, and then require u to go 927 | # before xf. 928 | out_of_order += 1 929 | 930 | overlap = xf.src_ranges.intersect(u.tgt_ranges) 931 | assert overlap 932 | 933 | u.stash_before.append((stashes, overlap)) 934 | xf.use_stash.append((stashes, overlap)) 935 | stashes += 1 936 | stash_size += overlap.size() 937 | 938 | # reverse the edge direction; now xf must go after u 939 | del xf.goes_before[u] 940 | del u.goes_after[xf] 941 | xf.goes_after[u] = None # value doesn't matter 942 | u.goes_before[xf] = None 943 | 944 | print((" %d/%d dependencies (%.2f%%) were violated; " 945 | "%d source blocks stashed.") % 946 | (out_of_order, in_order + out_of_order, 947 | (out_of_order * 100.0 / (in_order + out_of_order)) 948 | if (in_order + out_of_order) else 0.0, 949 | stash_size)) 950 | 951 | def FindVertexSequence(self): 952 | print("Finding vertex sequence...") 953 | 954 | # This is based on "A Fast & Effective Heuristic for the Feedback 955 | # Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of 956 | # it as starting with the digraph G and moving all the vertices to 957 | # be on a horizontal line in some order, trying to minimize the 958 | # number of edges that end up pointing to the left. Left-pointing 959 | # edges will get removed to turn the digraph into a DAG. In this 960 | # case each edge has a weight which is the number of source blocks 961 | # we'll lose if that edge is removed; we try to minimize the total 962 | # weight rather than just the number of edges. 963 | 964 | # Make a copy of the edge set; this copy will get destroyed by the 965 | # algorithm. 966 | for xf in self.transfers: 967 | xf.incoming = xf.goes_after.copy() 968 | xf.outgoing = xf.goes_before.copy() 969 | xf.score = sum(xf.outgoing.values()) - sum(xf.incoming.values()) 970 | 971 | # We use an OrderedDict instead of just a set so that the output 972 | # is repeatable; otherwise it would depend on the hash values of 973 | # the transfer objects. 974 | G = OrderedDict() 975 | for xf in self.transfers: 976 | G[xf] = None 977 | s1 = deque() # the left side of the sequence, built from left to right 978 | s2 = deque() # the right side of the sequence, built from right to left 979 | 980 | heap = [] 981 | for xf in self.transfers: 982 | xf.heap_item = HeapItem(xf) 983 | heap.append(xf.heap_item) 984 | heapq.heapify(heap) 985 | 986 | sinks = set(u for u in G if not u.outgoing) 987 | sources = set(u for u in G if not u.incoming) 988 | 989 | def adjust_score(iu, delta): 990 | iu.score += delta 991 | iu.heap_item.clear() 992 | iu.heap_item = HeapItem(iu) 993 | heapq.heappush(heap, iu.heap_item) 994 | 995 | while G: 996 | # Put all sinks at the end of the sequence. 997 | while sinks: 998 | new_sinks = set() 999 | for u in sinks: 1000 | if u not in G: continue 1001 | s2.appendleft(u) 1002 | del G[u] 1003 | for iu in u.incoming: 1004 | adjust_score(iu, -iu.outgoing.pop(u)) 1005 | if not iu.outgoing: new_sinks.add(iu) 1006 | sinks = new_sinks 1007 | 1008 | # Put all the sources at the beginning of the sequence. 1009 | while sources: 1010 | new_sources = set() 1011 | for u in sources: 1012 | if u not in G: continue 1013 | s1.append(u) 1014 | del G[u] 1015 | for iu in u.outgoing: 1016 | adjust_score(iu, +iu.incoming.pop(u)) 1017 | if not iu.incoming: new_sources.add(iu) 1018 | sources = new_sources 1019 | 1020 | if not G: break 1021 | 1022 | # Find the "best" vertex to put next. "Best" is the one that 1023 | # maximizes the net difference in source blocks saved we get by 1024 | # pretending it's a source rather than a sink. 1025 | 1026 | while True: 1027 | u = heapq.heappop(heap) 1028 | if u and u.item in G: 1029 | u = u.item 1030 | break 1031 | 1032 | s1.append(u) 1033 | del G[u] 1034 | for iu in u.outgoing: 1035 | adjust_score(iu, +iu.incoming.pop(u)) 1036 | if not iu.incoming: sources.add(iu) 1037 | 1038 | for iu in u.incoming: 1039 | adjust_score(iu, -iu.outgoing.pop(u)) 1040 | if not iu.outgoing: sinks.add(iu) 1041 | 1042 | # Now record the sequence in the 'order' field of each transfer, 1043 | # and by rearranging self.transfers to be in the chosen sequence. 1044 | 1045 | new_transfers = [] 1046 | for x in itertools.chain(s1, s2): 1047 | x.order = len(new_transfers) 1048 | new_transfers.append(x) 1049 | del x.incoming 1050 | del x.outgoing 1051 | 1052 | self.transfers = new_transfers 1053 | 1054 | def GenerateDigraph(self): 1055 | print("Generating digraph...") 1056 | 1057 | # Each item of source_ranges will be: 1058 | # - None, if that block is not used as a source, 1059 | # - a transfer, if one transfer uses it as a source, or 1060 | # - a set of transfers. 1061 | source_ranges = [] 1062 | for b in self.transfers: 1063 | for s, e in b.src_ranges: 1064 | if e > len(source_ranges): 1065 | source_ranges.extend([None] * (e-len(source_ranges))) 1066 | for i in range(s, e): 1067 | if source_ranges[i] is None: 1068 | source_ranges[i] = b 1069 | else: 1070 | if not isinstance(source_ranges[i], set): 1071 | source_ranges[i] = set([source_ranges[i]]) 1072 | source_ranges[i].add(b) 1073 | 1074 | for a in self.transfers: 1075 | intersections = set() 1076 | for s, e in a.tgt_ranges: 1077 | for i in range(s, e): 1078 | if i >= len(source_ranges): break 1079 | b = source_ranges[i] 1080 | if b is not None: 1081 | if isinstance(b, set): 1082 | intersections.update(b) 1083 | else: 1084 | intersections.add(b) 1085 | 1086 | for b in intersections: 1087 | if a is b: continue 1088 | 1089 | # If the blocks written by A are read by B, then B needs to go before A. 1090 | i = a.tgt_ranges.intersect(b.src_ranges) 1091 | if i: 1092 | if b.src_name == "__ZERO": 1093 | # the cost of removing source blocks for the __ZERO domain 1094 | # is (nearly) zero. 1095 | size = 0 1096 | else: 1097 | size = i.size() 1098 | b.goes_before[a] = size 1099 | a.goes_after[b] = size 1100 | 1101 | def FindTransfers(self): 1102 | """Parse the file_map to generate all the transfers.""" 1103 | 1104 | def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id, 1105 | split=False): 1106 | """Wrapper function for adding a Transfer(). 1107 | 1108 | For BBOTA v3, we need to stash source blocks for resumable feature. 1109 | However, with the growth of file size and the shrink of the cache 1110 | partition source blocks are too large to be stashed. If a file occupies 1111 | too many blocks (greater than MAX_BLOCKS_PER_DIFF_TRANSFER), we split it 1112 | into smaller pieces by getting multiple Transfer()s. 1113 | 1114 | The downside is that after splitting, we may increase the package size 1115 | since the split pieces don't align well. According to our experiments, 1116 | 1/8 of the cache size as the per-piece limit appears to be optimal. 1117 | Compared to the fixed 1024-block limit, it reduces the overall package 1118 | size by 30% volantis, and 20% for angler and bullhead.""" 1119 | 1120 | # We care about diff transfers only. 1121 | if style != "diff" or not split: 1122 | Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id) 1123 | return 1124 | 1125 | pieces = 0 1126 | cache_size = common.OPTIONS.cache_size 1127 | split_threshold = 0.125 1128 | max_blocks_per_transfer = int(cache_size * split_threshold / 1129 | self.tgt.blocksize) 1130 | 1131 | # Change nothing for small files. 1132 | if (tgt_ranges.size() <= max_blocks_per_transfer and 1133 | src_ranges.size() <= max_blocks_per_transfer): 1134 | Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id) 1135 | return 1136 | 1137 | while (tgt_ranges.size() > max_blocks_per_transfer and 1138 | src_ranges.size() > max_blocks_per_transfer): 1139 | tgt_split_name = "%s-%d" % (tgt_name, pieces) 1140 | src_split_name = "%s-%d" % (src_name, pieces) 1141 | tgt_first = tgt_ranges.first(max_blocks_per_transfer) 1142 | src_first = src_ranges.first(max_blocks_per_transfer) 1143 | 1144 | Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style, 1145 | by_id) 1146 | 1147 | tgt_ranges = tgt_ranges.subtract(tgt_first) 1148 | src_ranges = src_ranges.subtract(src_first) 1149 | pieces += 1 1150 | 1151 | # Handle remaining blocks. 1152 | if tgt_ranges.size() or src_ranges.size(): 1153 | # Must be both non-empty. 1154 | assert tgt_ranges.size() and src_ranges.size() 1155 | tgt_split_name = "%s-%d" % (tgt_name, pieces) 1156 | src_split_name = "%s-%d" % (src_name, pieces) 1157 | Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style, 1158 | by_id) 1159 | 1160 | empty = RangeSet() 1161 | for tgt_fn, tgt_ranges in self.tgt.file_map.items(): 1162 | if tgt_fn == "__ZERO": 1163 | # the special "__ZERO" domain is all the blocks not contained 1164 | # in any file and that are filled with zeros. We have a 1165 | # special transfer style for zero blocks. 1166 | src_ranges = self.src.file_map.get("__ZERO", empty) 1167 | AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges, 1168 | "zero", self.transfers) 1169 | continue 1170 | 1171 | elif tgt_fn == "__COPY": 1172 | # "__COPY" domain includes all the blocks not contained in any 1173 | # file and that need to be copied unconditionally to the target. 1174 | AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) 1175 | continue 1176 | 1177 | elif tgt_fn in self.src.file_map: 1178 | # Look for an exact pathname match in the source. 1179 | AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn], 1180 | "diff", self.transfers, self.version >= 3) 1181 | continue 1182 | 1183 | b = os.path.basename(tgt_fn) 1184 | if b in self.src_basenames: 1185 | # Look for an exact basename match in the source. 1186 | src_fn = self.src_basenames[b] 1187 | AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn], 1188 | "diff", self.transfers, self.version >= 3) 1189 | continue 1190 | 1191 | b = re.sub("[0-9]+", "#", b) 1192 | if b in self.src_numpatterns: 1193 | # Look for a 'number pattern' match (a basename match after 1194 | # all runs of digits are replaced by "#"). (This is useful 1195 | # for .so files that contain version numbers in the filename 1196 | # that get bumped.) 1197 | src_fn = self.src_numpatterns[b] 1198 | AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn], 1199 | "diff", self.transfers, self.version >= 3) 1200 | continue 1201 | 1202 | AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) 1203 | 1204 | def AbbreviateSourceNames(self): 1205 | for k in self.src.file_map.keys(): 1206 | b = os.path.basename(k) 1207 | self.src_basenames[b] = k 1208 | b = re.sub("[0-9]+", "#", b) 1209 | self.src_numpatterns[b] = k 1210 | 1211 | @staticmethod 1212 | def AssertPartition(total, seq): 1213 | """Assert that all the RangeSets in 'seq' form a partition of the 1214 | 'total' RangeSet (ie, they are nonintersecting and their union 1215 | equals 'total').""" 1216 | 1217 | so_far = RangeSet() 1218 | for i in seq: 1219 | assert not so_far.overlaps(i) 1220 | so_far = so_far.union(i) 1221 | assert so_far == total 1222 | -------------------------------------------------------------------------------- /bin/linux/brotli: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/brotli -------------------------------------------------------------------------------- /bin/linux/busybox: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/busybox -------------------------------------------------------------------------------- /bin/linux/common.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2008 The Android Open Source Project 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import print_function 16 | 17 | import copy 18 | import errno 19 | import getopt 20 | import getpass 21 | import imp 22 | import os 23 | import platform 24 | import re 25 | import shlex 26 | import shutil 27 | import subprocess 28 | import sys 29 | import tempfile 30 | import threading 31 | import time 32 | import zipfile 33 | 34 | import blockimgdiff 35 | 36 | from hashlib import sha1 as sha1 37 | 38 | 39 | class Options(object): 40 | def __init__(self): 41 | platform_search_path = { 42 | "linux2": "out/host/linux-x86", 43 | "darwin": "out/host/darwin-x86", 44 | } 45 | 46 | self.search_path = platform_search_path.get(sys.platform, None) 47 | self.signapk_path = "framework/signapk.jar" # Relative to search_path 48 | self.signapk_shared_library_path = "lib64" # Relative to search_path 49 | self.extra_signapk_args = [] 50 | self.java_path = "java" # Use the one on the path by default. 51 | self.java_args = ["-Xmx2048m"] # The default JVM args. 52 | self.public_key_suffix = ".x509.pem" 53 | self.private_key_suffix = ".pk8" 54 | # use otatools built boot_signer by default 55 | self.boot_signer_path = "boot_signer" 56 | self.boot_signer_args = [] 57 | self.verity_signer_path = None 58 | self.verity_signer_args = [] 59 | self.verbose = False 60 | self.tempfiles = [] 61 | self.device_specific = None 62 | self.extras = {} 63 | self.info_dict = None 64 | self.source_info_dict = None 65 | self.target_info_dict = None 66 | self.worker_threads = None 67 | # Stash size cannot exceed cache_size * threshold. 68 | self.cache_size = None 69 | self.stash_threshold = 0.8 70 | 71 | 72 | OPTIONS = Options() 73 | 74 | 75 | # Values for "certificate" in apkcerts that mean special things. 76 | SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") 77 | 78 | class ErrorCode(object): 79 | """Define error_codes for failures that happen during the actual 80 | update package installation. 81 | 82 | Error codes 0-999 are reserved for failures before the package 83 | installation (i.e. low battery, package verification failure). 84 | Detailed code in 'bootable/recovery/error_code.h' """ 85 | 86 | SYSTEM_VERIFICATION_FAILURE = 1000 87 | SYSTEM_UPDATE_FAILURE = 1001 88 | SYSTEM_UNEXPECTED_CONTENTS = 1002 89 | SYSTEM_NONZERO_CONTENTS = 1003 90 | SYSTEM_RECOVER_FAILURE = 1004 91 | VENDOR_VERIFICATION_FAILURE = 2000 92 | VENDOR_UPDATE_FAILURE = 2001 93 | VENDOR_UNEXPECTED_CONTENTS = 2002 94 | VENDOR_NONZERO_CONTENTS = 2003 95 | VENDOR_RECOVER_FAILURE = 2004 96 | OEM_PROP_MISMATCH = 3000 97 | FINGERPRINT_MISMATCH = 3001 98 | THUMBPRINT_MISMATCH = 3002 99 | OLDER_BUILD = 3003 100 | DEVICE_MISMATCH = 3004 101 | BAD_PATCH_FILE = 3005 102 | INSUFFICIENT_CACHE_SPACE = 3006 103 | TUNE_PARTITION_FAILURE = 3007 104 | APPLY_PATCH_FAILURE = 3008 105 | 106 | class ExternalError(RuntimeError): 107 | pass 108 | 109 | 110 | def Run(args, **kwargs): 111 | """Create and return a subprocess.Popen object, printing the command 112 | line on the terminal if -v was specified.""" 113 | if OPTIONS.verbose: 114 | print(" running: ", " ".join(args)) 115 | return subprocess.Popen(args, **kwargs) 116 | 117 | 118 | def CloseInheritedPipes(): 119 | """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds 120 | before doing other work.""" 121 | if platform.system() != "Darwin": 122 | return 123 | for d in range(3, 1025): 124 | try: 125 | stat = os.fstat(d) 126 | if stat is not None: 127 | pipebit = stat[0] & 0x1000 128 | if pipebit != 0: 129 | os.close(d) 130 | except OSError: 131 | pass 132 | 133 | 134 | def LoadInfoDict(input_file, input_dir=None): 135 | """Read and parse the META/misc_info.txt key/value pairs from the 136 | input target files and return a dict.""" 137 | 138 | def read_helper(fn): 139 | if isinstance(input_file, zipfile.ZipFile): 140 | return input_file.read(fn) 141 | else: 142 | path = os.path.join(input_file, *fn.split("/")) 143 | try: 144 | with open(path) as f: 145 | return f.read() 146 | except IOError as e: 147 | if e.errno == errno.ENOENT: 148 | raise KeyError(fn) 149 | d = {} 150 | try: 151 | d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n")) 152 | except KeyError: 153 | # ok if misc_info.txt doesn't exist 154 | pass 155 | 156 | # backwards compatibility: These values used to be in their own 157 | # files. Look for them, in case we're processing an old 158 | # target_files zip. 159 | 160 | if "mkyaffs2_extra_flags" not in d: 161 | try: 162 | d["mkyaffs2_extra_flags"] = read_helper( 163 | "META/mkyaffs2-extra-flags.txt").strip() 164 | except KeyError: 165 | # ok if flags don't exist 166 | pass 167 | 168 | if "recovery_api_version" not in d: 169 | try: 170 | d["recovery_api_version"] = read_helper( 171 | "META/recovery-api-version.txt").strip() 172 | except KeyError: 173 | raise ValueError("can't find recovery API version in input target-files") 174 | 175 | if "tool_extensions" not in d: 176 | try: 177 | d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip() 178 | except KeyError: 179 | # ok if extensions don't exist 180 | pass 181 | 182 | if "fstab_version" not in d: 183 | d["fstab_version"] = "1" 184 | 185 | # A few properties are stored as links to the files in the out/ directory. 186 | # It works fine with the build system. However, they are no longer available 187 | # when (re)generating from target_files zip. If input_dir is not None, we 188 | # are doing repacking. Redirect those properties to the actual files in the 189 | # unzipped directory. 190 | if input_dir is not None: 191 | # We carry a copy of file_contexts.bin under META/. If not available, 192 | # search BOOT/RAMDISK/. Note that sometimes we may need a different file 193 | # to build images than the one running on device, such as when enabling 194 | # system_root_image. In that case, we must have the one for image 195 | # generation copied to META/. 196 | fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts")) 197 | fc_config = os.path.join(input_dir, "META", fc_basename) 198 | if d.get("system_root_image") == "true": 199 | assert os.path.exists(fc_config) 200 | if not os.path.exists(fc_config): 201 | fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename) 202 | if not os.path.exists(fc_config): 203 | fc_config = None 204 | 205 | if fc_config: 206 | d["selinux_fc"] = fc_config 207 | 208 | # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config". 209 | if d.get("system_root_image") == "true": 210 | d["ramdisk_dir"] = os.path.join(input_dir, "ROOT") 211 | d["ramdisk_fs_config"] = os.path.join( 212 | input_dir, "META", "root_filesystem_config.txt") 213 | 214 | # Redirect {system,vendor}_base_fs_file. 215 | if "system_base_fs_file" in d: 216 | basename = os.path.basename(d["system_base_fs_file"]) 217 | system_base_fs_file = os.path.join(input_dir, "META", basename) 218 | if os.path.exists(system_base_fs_file): 219 | d["system_base_fs_file"] = system_base_fs_file 220 | else: 221 | print("Warning: failed to find system base fs file: %s" % ( 222 | system_base_fs_file,)) 223 | del d["system_base_fs_file"] 224 | 225 | if "vendor_base_fs_file" in d: 226 | basename = os.path.basename(d["vendor_base_fs_file"]) 227 | vendor_base_fs_file = os.path.join(input_dir, "META", basename) 228 | if os.path.exists(vendor_base_fs_file): 229 | d["vendor_base_fs_file"] = vendor_base_fs_file 230 | else: 231 | print("Warning: failed to find vendor base fs file: %s" % ( 232 | vendor_base_fs_file,)) 233 | del d["vendor_base_fs_file"] 234 | 235 | try: 236 | data = read_helper("META/imagesizes.txt") 237 | for line in data.split("\n"): 238 | if not line: 239 | continue 240 | name, value = line.split(" ", 1) 241 | if not value: 242 | continue 243 | if name == "blocksize": 244 | d[name] = value 245 | else: 246 | d[name + "_size"] = value 247 | except KeyError: 248 | pass 249 | 250 | def makeint(key): 251 | if key in d: 252 | d[key] = int(d[key], 0) 253 | 254 | makeint("recovery_api_version") 255 | makeint("blocksize") 256 | makeint("system_size") 257 | makeint("vendor_size") 258 | makeint("userdata_size") 259 | makeint("cache_size") 260 | makeint("recovery_size") 261 | makeint("boot_size") 262 | makeint("fstab_version") 263 | 264 | if d.get("no_recovery", False) == "true": 265 | d["fstab"] = None 266 | else: 267 | d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"], 268 | d.get("system_root_image", False)) 269 | d["build.prop"] = LoadBuildProp(read_helper) 270 | return d 271 | 272 | def LoadBuildProp(read_helper): 273 | try: 274 | data = read_helper("SYSTEM/build.prop") 275 | except KeyError: 276 | print("Warning: could not find SYSTEM/build.prop in %s" % zip) 277 | data = "" 278 | return LoadDictionaryFromLines(data.split("\n")) 279 | 280 | def LoadDictionaryFromLines(lines): 281 | d = {} 282 | for line in lines: 283 | line = line.strip() 284 | if not line or line.startswith("#"): 285 | continue 286 | if "=" in line: 287 | name, value = line.split("=", 1) 288 | d[name] = value 289 | return d 290 | 291 | def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False): 292 | class Partition(object): 293 | def __init__(self, mount_point, fs_type, device, length, device2, context): 294 | self.mount_point = mount_point 295 | self.fs_type = fs_type 296 | self.device = device 297 | self.length = length 298 | self.device2 = device2 299 | self.context = context 300 | 301 | try: 302 | data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab") 303 | except KeyError: 304 | print("Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab") 305 | data = "" 306 | 307 | if fstab_version == 1: 308 | d = {} 309 | for line in data.split("\n"): 310 | line = line.strip() 311 | if not line or line.startswith("#"): 312 | continue 313 | pieces = line.split() 314 | if not 3 <= len(pieces) <= 4: 315 | raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) 316 | options = None 317 | if len(pieces) >= 4: 318 | if pieces[3].startswith("/"): 319 | device2 = pieces[3] 320 | if len(pieces) >= 5: 321 | options = pieces[4] 322 | else: 323 | device2 = None 324 | options = pieces[3] 325 | else: 326 | device2 = None 327 | 328 | mount_point = pieces[0] 329 | length = 0 330 | if options: 331 | options = options.split(",") 332 | for i in options: 333 | if i.startswith("length="): 334 | length = int(i[7:]) 335 | else: 336 | print("%s: unknown option \"%s\"" % (mount_point, i)) 337 | 338 | d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1], 339 | device=pieces[2], length=length, 340 | device2=device2) 341 | 342 | elif fstab_version == 2: 343 | d = {} 344 | for line in data.split("\n"): 345 | line = line.strip() 346 | if not line or line.startswith("#"): 347 | continue 348 | # 349 | pieces = line.split() 350 | if len(pieces) != 5: 351 | raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) 352 | 353 | # Ignore entries that are managed by vold 354 | options = pieces[4] 355 | if "voldmanaged=" in options: 356 | continue 357 | 358 | # It's a good line, parse it 359 | length = 0 360 | options = options.split(",") 361 | for i in options: 362 | if i.startswith("length="): 363 | length = int(i[7:]) 364 | else: 365 | # Ignore all unknown options in the unified fstab 366 | continue 367 | 368 | mount_flags = pieces[3] 369 | # Honor the SELinux context if present. 370 | context = None 371 | for i in mount_flags.split(","): 372 | if i.startswith("context="): 373 | context = i 374 | 375 | mount_point = pieces[1] 376 | d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2], 377 | device=pieces[0], length=length, 378 | device2=None, context=context) 379 | 380 | else: 381 | raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,)) 382 | 383 | # / is used for the system mount point when the root directory is included in 384 | # system. Other areas assume system is always at "/system" so point /system 385 | # at /. 386 | if system_root_image: 387 | assert not d.has_key("/system") and d.has_key("/") 388 | d["/system"] = d["/"] 389 | return d 390 | 391 | 392 | def DumpInfoDict(d): 393 | for k, v in sorted(d.items()): 394 | print("%-25s = (%s) %s" % (k, type(v).__name__, v)) 395 | 396 | 397 | def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None, 398 | has_ramdisk=False): 399 | """Build a bootable image from the specified sourcedir. 400 | 401 | Take a kernel, cmdline, and optionally a ramdisk directory from the input (in 402 | 'sourcedir'), and turn them into a boot image. Return the image data, or 403 | None if sourcedir does not appear to contains files for building the 404 | requested image.""" 405 | 406 | def make_ramdisk(): 407 | ramdisk_img = tempfile.NamedTemporaryFile() 408 | 409 | if os.access(fs_config_file, os.F_OK): 410 | cmd = ["mkbootfs", "-f", fs_config_file, 411 | os.path.join(sourcedir, "RAMDISK")] 412 | else: 413 | cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")] 414 | p1 = Run(cmd, stdout=subprocess.PIPE) 415 | p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) 416 | 417 | p2.wait() 418 | p1.wait() 419 | assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) 420 | assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,) 421 | 422 | return ramdisk_img 423 | 424 | if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK): 425 | return None 426 | 427 | if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK): 428 | return None 429 | 430 | if info_dict is None: 431 | info_dict = OPTIONS.info_dict 432 | 433 | img = tempfile.NamedTemporaryFile() 434 | 435 | if has_ramdisk: 436 | ramdisk_img = make_ramdisk() 437 | 438 | # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set 439 | mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" 440 | 441 | cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")] 442 | 443 | fn = os.path.join(sourcedir, "second") 444 | if os.access(fn, os.F_OK): 445 | cmd.append("--second") 446 | cmd.append(fn) 447 | 448 | fn = os.path.join(sourcedir, "cmdline") 449 | if os.access(fn, os.F_OK): 450 | cmd.append("--cmdline") 451 | cmd.append(open(fn).read().rstrip("\n")) 452 | 453 | fn = os.path.join(sourcedir, "base") 454 | if os.access(fn, os.F_OK): 455 | cmd.append("--base") 456 | cmd.append(open(fn).read().rstrip("\n")) 457 | 458 | fn = os.path.join(sourcedir, "pagesize") 459 | if os.access(fn, os.F_OK): 460 | cmd.append("--pagesize") 461 | cmd.append(open(fn).read().rstrip("\n")) 462 | 463 | args = info_dict.get("mkbootimg_args", None) 464 | if args and args.strip(): 465 | cmd.extend(shlex.split(args)) 466 | 467 | args = info_dict.get("mkbootimg_version_args", None) 468 | if args and args.strip(): 469 | cmd.extend(shlex.split(args)) 470 | 471 | if has_ramdisk: 472 | cmd.extend(["--ramdisk", ramdisk_img.name]) 473 | 474 | img_unsigned = None 475 | if info_dict.get("vboot", None): 476 | img_unsigned = tempfile.NamedTemporaryFile() 477 | cmd.extend(["--output", img_unsigned.name]) 478 | else: 479 | cmd.extend(["--output", img.name]) 480 | 481 | p = Run(cmd, stdout=subprocess.PIPE) 482 | p.communicate() 483 | assert p.returncode == 0, "mkbootimg of %s image failed" % ( 484 | os.path.basename(sourcedir),) 485 | 486 | if (info_dict.get("boot_signer", None) == "true" and 487 | info_dict.get("verity_key", None)): 488 | path = "/" + os.path.basename(sourcedir).lower() 489 | cmd = [OPTIONS.boot_signer_path] 490 | cmd.extend(OPTIONS.boot_signer_args) 491 | cmd.extend([path, img.name, 492 | info_dict["verity_key"] + ".pk8", 493 | info_dict["verity_key"] + ".x509.pem", img.name]) 494 | p = Run(cmd, stdout=subprocess.PIPE) 495 | p.communicate() 496 | assert p.returncode == 0, "boot_signer of %s image failed" % path 497 | 498 | # Sign the image if vboot is non-empty. 499 | elif info_dict.get("vboot", None): 500 | path = "/" + os.path.basename(sourcedir).lower() 501 | img_keyblock = tempfile.NamedTemporaryFile() 502 | cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"], 503 | img_unsigned.name, info_dict["vboot_key"] + ".vbpubk", 504 | info_dict["vboot_key"] + ".vbprivk", 505 | info_dict["vboot_subkey"] + ".vbprivk", 506 | img_keyblock.name, 507 | img.name] 508 | p = Run(cmd, stdout=subprocess.PIPE) 509 | p.communicate() 510 | assert p.returncode == 0, "vboot_signer of %s image failed" % path 511 | 512 | # Clean up the temp files. 513 | img_unsigned.close() 514 | img_keyblock.close() 515 | 516 | img.seek(os.SEEK_SET, 0) 517 | data = img.read() 518 | 519 | if has_ramdisk: 520 | ramdisk_img.close() 521 | img.close() 522 | 523 | return data 524 | 525 | 526 | def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, 527 | info_dict=None): 528 | """Return a File object with the desired bootable image. 529 | 530 | Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name', 531 | otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from 532 | the source files in 'unpack_dir'/'tree_subdir'.""" 533 | 534 | prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) 535 | if os.path.exists(prebuilt_path): 536 | print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)) 537 | return File.FromLocalFile(name, prebuilt_path) 538 | 539 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) 540 | if os.path.exists(prebuilt_path): 541 | print("using prebuilt %s from IMAGES..." % (prebuilt_name,)) 542 | return File.FromLocalFile(name, prebuilt_path) 543 | 544 | print("building image from target_files %s..." % (tree_subdir,)) 545 | 546 | if info_dict is None: 547 | info_dict = OPTIONS.info_dict 548 | 549 | # With system_root_image == "true", we don't pack ramdisk into the boot image. 550 | # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk 551 | # for recovery. 552 | has_ramdisk = (info_dict.get("system_root_image") != "true" or 553 | prebuilt_name != "boot.img" or 554 | info_dict.get("recovery_as_boot") == "true") 555 | 556 | fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" 557 | data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir), 558 | os.path.join(unpack_dir, fs_config), 559 | info_dict, has_ramdisk) 560 | if data: 561 | return File(name, data) 562 | return None 563 | 564 | 565 | def UnzipTemp(filename, pattern=None): 566 | """Unzip the given archive into a temporary directory and return the name. 567 | 568 | If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a 569 | temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. 570 | 571 | Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the 572 | main file), open for reading. 573 | """ 574 | 575 | tmp = tempfile.mkdtemp(prefix="targetfiles-") 576 | OPTIONS.tempfiles.append(tmp) 577 | 578 | def unzip_to_dir(filename, dirname): 579 | cmd = ["unzip", "-o", "-q", filename, "-d", dirname] 580 | if pattern is not None: 581 | cmd.extend(pattern) 582 | p = Run(cmd, stdout=subprocess.PIPE) 583 | p.communicate() 584 | if p.returncode != 0: 585 | raise ExternalError("failed to unzip input target-files \"%s\"" % 586 | (filename,)) 587 | 588 | m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) 589 | if m: 590 | unzip_to_dir(m.group(1), tmp) 591 | unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES")) 592 | filename = m.group(1) 593 | else: 594 | unzip_to_dir(filename, tmp) 595 | 596 | return tmp, zipfile.ZipFile(filename, "r") 597 | 598 | 599 | def GetKeyPasswords(keylist): 600 | """Given a list of keys, prompt the user to enter passwords for 601 | those which require them. Return a {key: password} dict. password 602 | will be None if the key has no password.""" 603 | 604 | no_passwords = [] 605 | need_passwords = [] 606 | key_passwords = {} 607 | devnull = open("/dev/null", "w+b") 608 | for k in sorted(keylist): 609 | # We don't need a password for things that aren't really keys. 610 | if k in SPECIAL_CERT_STRINGS: 611 | no_passwords.append(k) 612 | continue 613 | 614 | p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, 615 | "-inform", "DER", "-nocrypt"], 616 | stdin=devnull.fileno(), 617 | stdout=devnull.fileno(), 618 | stderr=subprocess.STDOUT) 619 | p.communicate() 620 | if p.returncode == 0: 621 | # Definitely an unencrypted key. 622 | no_passwords.append(k) 623 | else: 624 | p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, 625 | "-inform", "DER", "-passin", "pass:"], 626 | stdin=devnull.fileno(), 627 | stdout=devnull.fileno(), 628 | stderr=subprocess.PIPE) 629 | _, stderr = p.communicate() 630 | if p.returncode == 0: 631 | # Encrypted key with empty string as password. 632 | key_passwords[k] = '' 633 | elif stderr.startswith('Error decrypting key'): 634 | # Definitely encrypted key. 635 | # It would have said "Error reading key" if it didn't parse correctly. 636 | need_passwords.append(k) 637 | else: 638 | # Potentially, a type of key that openssl doesn't understand. 639 | # We'll let the routines in signapk.jar handle it. 640 | no_passwords.append(k) 641 | devnull.close() 642 | 643 | key_passwords.update(PasswordManager().GetPasswords(need_passwords)) 644 | key_passwords.update(dict.fromkeys(no_passwords, None)) 645 | return key_passwords 646 | 647 | 648 | def GetMinSdkVersion(apk_name): 649 | """Get the minSdkVersion delared in the APK. This can be both a decimal number 650 | (API Level) or a codename. 651 | """ 652 | 653 | p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE) 654 | output, err = p.communicate() 655 | if err: 656 | raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s" 657 | % (p.returncode,)) 658 | 659 | for line in output.split("\n"): 660 | # Looking for lines such as sdkVersion:'23' or sdkVersion:'M' 661 | m = re.match(r'sdkVersion:\'([^\']*)\'', line) 662 | if m: 663 | return m.group(1) 664 | raise ExternalError("No minSdkVersion returned by aapt") 665 | 666 | 667 | def GetMinSdkVersionInt(apk_name, codename_to_api_level_map): 668 | """Get the minSdkVersion declared in the APK as a number (API Level). If 669 | minSdkVersion is set to a codename, it is translated to a number using the 670 | provided map. 671 | """ 672 | 673 | version = GetMinSdkVersion(apk_name) 674 | try: 675 | return int(version) 676 | except ValueError: 677 | # Not a decimal number. Codename? 678 | if version in codename_to_api_level_map: 679 | return codename_to_api_level_map[version] 680 | else: 681 | raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s" 682 | % (version, codename_to_api_level_map)) 683 | 684 | 685 | def SignFile(input_name, output_name, key, password, min_api_level=None, 686 | codename_to_api_level_map=dict(), 687 | whole_file=False): 688 | """Sign the input_name zip/jar/apk, producing output_name. Use the 689 | given key and password (the latter may be None if the key does not 690 | have a password. 691 | 692 | If whole_file is true, use the "-w" option to SignApk to embed a 693 | signature that covers the whole file in the archive comment of the 694 | zip file. 695 | 696 | min_api_level is the API Level (int) of the oldest platform this file may end 697 | up on. If not specified for an APK, the API Level is obtained by interpreting 698 | the minSdkVersion attribute of the APK's AndroidManifest.xml. 699 | 700 | codename_to_api_level_map is needed to translate the codename which may be 701 | encountered as the APK's minSdkVersion. 702 | """ 703 | 704 | java_library_path = os.path.join( 705 | OPTIONS.search_path, OPTIONS.signapk_shared_library_path) 706 | 707 | cmd = [OPTIONS.java_path, OPTIONS.java_args, 708 | "-Djava.library.path=" + java_library_path, 709 | "-jar", 710 | os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] 711 | cmd.extend(OPTIONS.extra_signapk_args) 712 | if whole_file: 713 | cmd.append("-w") 714 | 715 | min_sdk_version = min_api_level 716 | if min_sdk_version is None: 717 | if not whole_file: 718 | min_sdk_version = GetMinSdkVersionInt( 719 | input_name, codename_to_api_level_map) 720 | if min_sdk_version is not None: 721 | cmd.extend(["--min-sdk-version", str(min_sdk_version)]) 722 | 723 | cmd.extend([key + OPTIONS.public_key_suffix, 724 | key + OPTIONS.private_key_suffix, 725 | input_name, output_name]) 726 | 727 | p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) 728 | if password is not None: 729 | password += "\n" 730 | p.communicate(password) 731 | if p.returncode != 0: 732 | raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,)) 733 | 734 | 735 | def CheckSize(data, target, info_dict): 736 | """Check the data string passed against the max size limit, if 737 | any, for the given target. Raise exception if the data is too big. 738 | Print a warning if the data is nearing the maximum size.""" 739 | 740 | if target.endswith(".img"): 741 | target = target[:-4] 742 | mount_point = "/" + target 743 | 744 | fs_type = None 745 | limit = None 746 | if info_dict["fstab"]: 747 | if mount_point == "/userdata": 748 | mount_point = "/data" 749 | p = info_dict["fstab"][mount_point] 750 | fs_type = p.fs_type 751 | device = p.device 752 | if "/" in device: 753 | device = device[device.rfind("/")+1:] 754 | limit = info_dict.get(device + "_size", None) 755 | if not fs_type or not limit: 756 | return 757 | 758 | if fs_type == "yaffs2": 759 | # image size should be increased by 1/64th to account for the 760 | # spare area (64 bytes per 2k page) 761 | limit = limit / 2048 * (2048+64) 762 | size = len(data) 763 | pct = float(size) * 100.0 / limit 764 | msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) 765 | if pct >= 99.0: 766 | raise ExternalError(msg) 767 | elif pct >= 95.0: 768 | print("\n WARNING: %s\n" % (msg,)) 769 | elif OPTIONS.verbose: 770 | print(" ", msg) 771 | 772 | 773 | def ReadApkCerts(tf_zip): 774 | """Given a target_files ZipFile, parse the META/apkcerts.txt file 775 | and return a {package: cert} dict.""" 776 | certmap = {} 777 | for line in tf_zip.read("META/apkcerts.txt").split("\n"): 778 | line = line.strip() 779 | if not line: 780 | continue 781 | m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+' 782 | r'private_key="(.*)"$', line) 783 | if m: 784 | name, cert, privkey = m.groups() 785 | public_key_suffix_len = len(OPTIONS.public_key_suffix) 786 | private_key_suffix_len = len(OPTIONS.private_key_suffix) 787 | if cert in SPECIAL_CERT_STRINGS and not privkey: 788 | certmap[name] = cert 789 | elif (cert.endswith(OPTIONS.public_key_suffix) and 790 | privkey.endswith(OPTIONS.private_key_suffix) and 791 | cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]): 792 | certmap[name] = cert[:-public_key_suffix_len] 793 | else: 794 | raise ValueError("failed to parse line from apkcerts.txt:\n" + line) 795 | return certmap 796 | 797 | 798 | COMMON_DOCSTRING = """ 799 | -p (--path) 800 | Prepend /bin to the list of places to search for binaries 801 | run by this script, and expect to find jars in /framework. 802 | 803 | -s (--device_specific) 804 | Path to the python module containing device-specific 805 | releasetools code. 806 | 807 | -x (--extra) 808 | Add a key/value pair to the 'extras' dict, which device-specific 809 | extension code may look at. 810 | 811 | -v (--verbose) 812 | Show command lines being executed. 813 | 814 | -h (--help) 815 | Display this usage message and exit. 816 | """ 817 | 818 | def Usage(docstring): 819 | print(docstring.rstrip("\n")) 820 | print(COMMON_DOCSTRING) 821 | 822 | 823 | def ParseOptions(argv, 824 | docstring, 825 | extra_opts="", extra_long_opts=(), 826 | extra_option_handler=None): 827 | """Parse the options in argv and return any arguments that aren't 828 | flags. docstring is the calling module's docstring, to be displayed 829 | for errors and -h. extra_opts and extra_long_opts are for flags 830 | defined by the caller, which are processed by passing them to 831 | extra_option_handler.""" 832 | 833 | try: 834 | opts, args = getopt.getopt( 835 | argv, "hvp:s:x:" + extra_opts, 836 | ["help", "verbose", "path=", "signapk_path=", 837 | "signapk_shared_library_path=", "extra_signapk_args=", 838 | "java_path=", "java_args=", "public_key_suffix=", 839 | "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", 840 | "verity_signer_path=", "verity_signer_args=", "device_specific=", 841 | "extra="] + 842 | list(extra_long_opts)) 843 | except getopt.GetoptError as err: 844 | Usage(docstring) 845 | print("**", str(err), "**") 846 | sys.exit(2) 847 | 848 | for o, a in opts: 849 | if o in ("-h", "--help"): 850 | Usage(docstring) 851 | sys.exit() 852 | elif o in ("-v", "--verbose"): 853 | OPTIONS.verbose = True 854 | elif o in ("-p", "--path"): 855 | OPTIONS.search_path = a 856 | elif o in ("--signapk_path",): 857 | OPTIONS.signapk_path = a 858 | elif o in ("--signapk_shared_library_path",): 859 | OPTIONS.signapk_shared_library_path = a 860 | elif o in ("--extra_signapk_args",): 861 | OPTIONS.extra_signapk_args = shlex.split(a) 862 | elif o in ("--java_path",): 863 | OPTIONS.java_path = a 864 | elif o in ("--java_args",): 865 | OPTIONS.java_args = shlex.split(a) 866 | elif o in ("--public_key_suffix",): 867 | OPTIONS.public_key_suffix = a 868 | elif o in ("--private_key_suffix",): 869 | OPTIONS.private_key_suffix = a 870 | elif o in ("--boot_signer_path",): 871 | OPTIONS.boot_signer_path = a 872 | elif o in ("--boot_signer_args",): 873 | OPTIONS.boot_signer_args = shlex.split(a) 874 | elif o in ("--verity_signer_path",): 875 | OPTIONS.verity_signer_path = a 876 | elif o in ("--verity_signer_args",): 877 | OPTIONS.verity_signer_args = shlex.split(a) 878 | elif o in ("-s", "--device_specific"): 879 | OPTIONS.device_specific = a 880 | elif o in ("-x", "--extra"): 881 | key, value = a.split("=", 1) 882 | OPTIONS.extras[key] = value 883 | else: 884 | if extra_option_handler is None or not extra_option_handler(o, a): 885 | assert False, "unknown option \"%s\"" % (o,) 886 | 887 | if OPTIONS.search_path: 888 | os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + 889 | os.pathsep + os.environ["PATH"]) 890 | 891 | return args 892 | 893 | 894 | def MakeTempFile(prefix=None, suffix=None): 895 | """Make a temp file and add it to the list of things to be deleted 896 | when Cleanup() is called. Return the filename.""" 897 | fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix) 898 | os.close(fd) 899 | OPTIONS.tempfiles.append(fn) 900 | return fn 901 | 902 | 903 | def Cleanup(): 904 | for i in OPTIONS.tempfiles: 905 | if os.path.isdir(i): 906 | shutil.rmtree(i) 907 | else: 908 | os.remove(i) 909 | 910 | 911 | class PasswordManager(object): 912 | def __init__(self): 913 | self.editor = os.getenv("EDITOR", None) 914 | self.pwfile = os.getenv("ANDROID_PW_FILE", None) 915 | 916 | def GetPasswords(self, items): 917 | """Get passwords corresponding to each string in 'items', 918 | returning a dict. (The dict may have keys in addition to the 919 | values in 'items'.) 920 | 921 | Uses the passwords in $ANDROID_PW_FILE if available, letting the 922 | user edit that file to add more needed passwords. If no editor is 923 | available, or $ANDROID_PW_FILE isn't define, prompts the user 924 | interactively in the ordinary way. 925 | """ 926 | 927 | current = self.ReadFile() 928 | 929 | first = True 930 | while True: 931 | missing = [] 932 | for i in items: 933 | if i not in current or not current[i]: 934 | missing.append(i) 935 | # Are all the passwords already in the file? 936 | if not missing: 937 | return current 938 | 939 | for i in missing: 940 | current[i] = "" 941 | 942 | if not first: 943 | print("key file %s still missing some passwords." % (self.pwfile,)) 944 | answer = raw_input("try to edit again? [y]> ").strip() 945 | if answer and answer[0] not in 'yY': 946 | raise RuntimeError("key passwords unavailable") 947 | first = False 948 | 949 | current = self.UpdateAndReadFile(current) 950 | 951 | def PromptResult(self, current): # pylint: disable=no-self-use 952 | """Prompt the user to enter a value (password) for each key in 953 | 'current' whose value is fales. Returns a new dict with all the 954 | values. 955 | """ 956 | result = {} 957 | for k, v in sorted(current.iteritems()): 958 | if v: 959 | result[k] = v 960 | else: 961 | while True: 962 | result[k] = getpass.getpass( 963 | "Enter password for %s key> " % k).strip() 964 | if result[k]: 965 | break 966 | return result 967 | 968 | def UpdateAndReadFile(self, current): 969 | if not self.editor or not self.pwfile: 970 | return self.PromptResult(current) 971 | 972 | f = open(self.pwfile, "w") 973 | os.chmod(self.pwfile, 0o600) 974 | f.write("# Enter key passwords between the [[[ ]]] brackets.\n") 975 | f.write("# (Additional spaces are harmless.)\n\n") 976 | 977 | first_line = None 978 | sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()]) 979 | for i, (_, k, v) in enumerate(sorted_list): 980 | f.write("[[[ %s ]]] %s\n" % (v, k)) 981 | if not v and first_line is None: 982 | # position cursor on first line with no password. 983 | first_line = i + 4 984 | f.close() 985 | 986 | p = Run([self.editor, "+%d" % (first_line,), self.pwfile]) 987 | _, _ = p.communicate() 988 | 989 | return self.ReadFile() 990 | 991 | def ReadFile(self): 992 | result = {} 993 | if self.pwfile is None: 994 | return result 995 | try: 996 | f = open(self.pwfile, "r") 997 | for line in f: 998 | line = line.strip() 999 | if not line or line[0] == '#': 1000 | continue 1001 | m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) 1002 | if not m: 1003 | print("failed to parse password file: ", line) 1004 | else: 1005 | result[m.group(2)] = m.group(1) 1006 | f.close() 1007 | except IOError as e: 1008 | if e.errno != errno.ENOENT: 1009 | print("error reading password file: ", str(e)) 1010 | return result 1011 | 1012 | 1013 | def ZipWrite(zip_file, filename, arcname=None, perms=0o644, 1014 | compress_type=None): 1015 | import datetime 1016 | 1017 | # http://b/18015246 1018 | # Python 2.7's zipfile implementation wrongly thinks that zip64 is required 1019 | # for files larger than 2GiB. We can work around this by adjusting their 1020 | # limit. Note that `zipfile.writestr()` will not work for strings larger than 1021 | # 2GiB. The Python interpreter sometimes rejects strings that large (though 1022 | # it isn't clear to me exactly what circumstances cause this). 1023 | # `zipfile.write()` must be used directly to work around this. 1024 | # 1025 | # This mess can be avoided if we port to python3. 1026 | saved_zip64_limit = zipfile.ZIP64_LIMIT 1027 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 1028 | 1029 | if compress_type is None: 1030 | compress_type = zip_file.compression 1031 | if arcname is None: 1032 | arcname = filename 1033 | 1034 | saved_stat = os.stat(filename) 1035 | 1036 | try: 1037 | # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the 1038 | # file to be zipped and reset it when we're done. 1039 | os.chmod(filename, perms) 1040 | 1041 | # Use a fixed timestamp so the output is repeatable. 1042 | epoch = datetime.datetime.fromtimestamp(0) 1043 | timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds() 1044 | os.utime(filename, (timestamp, timestamp)) 1045 | 1046 | zip_file.write(filename, arcname=arcname, compress_type=compress_type) 1047 | finally: 1048 | os.chmod(filename, saved_stat.st_mode) 1049 | os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime)) 1050 | zipfile.ZIP64_LIMIT = saved_zip64_limit 1051 | 1052 | 1053 | def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None, 1054 | compress_type=None): 1055 | """Wrap zipfile.writestr() function to work around the zip64 limit. 1056 | 1057 | Even with the ZIP64_LIMIT workaround, it won't allow writing a string 1058 | longer than 2GiB. It gives 'OverflowError: size does not fit in an int' 1059 | when calling crc32(bytes). 1060 | 1061 | But it still works fine to write a shorter string into a large zip file. 1062 | We should use ZipWrite() whenever possible, and only use ZipWriteStr() 1063 | when we know the string won't be too long. 1064 | """ 1065 | 1066 | saved_zip64_limit = zipfile.ZIP64_LIMIT 1067 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 1068 | 1069 | if not isinstance(zinfo_or_arcname, zipfile.ZipInfo): 1070 | zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname) 1071 | zinfo.compress_type = zip_file.compression 1072 | if perms is None: 1073 | perms = 0o100644 1074 | else: 1075 | zinfo = zinfo_or_arcname 1076 | 1077 | # If compress_type is given, it overrides the value in zinfo. 1078 | if compress_type is not None: 1079 | zinfo.compress_type = compress_type 1080 | 1081 | # If perms is given, it has a priority. 1082 | if perms is not None: 1083 | # If perms doesn't set the file type, mark it as a regular file. 1084 | if perms & 0o770000 == 0: 1085 | perms |= 0o100000 1086 | zinfo.external_attr = perms << 16 1087 | 1088 | # Use a fixed timestamp so the output is repeatable. 1089 | zinfo.date_time = (2009, 1, 1, 0, 0, 0) 1090 | 1091 | zip_file.writestr(zinfo, data) 1092 | zipfile.ZIP64_LIMIT = saved_zip64_limit 1093 | 1094 | 1095 | def ZipClose(zip_file): 1096 | # http://b/18015246 1097 | # zipfile also refers to ZIP64_LIMIT during close() when it writes out the 1098 | # central directory. 1099 | saved_zip64_limit = zipfile.ZIP64_LIMIT 1100 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 1101 | 1102 | zip_file.close() 1103 | 1104 | zipfile.ZIP64_LIMIT = saved_zip64_limit 1105 | 1106 | 1107 | class DeviceSpecificParams(object): 1108 | module = None 1109 | def __init__(self, **kwargs): 1110 | """Keyword arguments to the constructor become attributes of this 1111 | object, which is passed to all functions in the device-specific 1112 | module.""" 1113 | for k, v in kwargs.iteritems(): 1114 | setattr(self, k, v) 1115 | self.extras = OPTIONS.extras 1116 | 1117 | if self.module is None: 1118 | path = OPTIONS.device_specific 1119 | if not path: 1120 | return 1121 | try: 1122 | if os.path.isdir(path): 1123 | info = imp.find_module("releasetools", [path]) 1124 | else: 1125 | d, f = os.path.split(path) 1126 | b, x = os.path.splitext(f) 1127 | if x == ".py": 1128 | f = b 1129 | info = imp.find_module(f, [d]) 1130 | print("loaded device-specific extensions from", path) 1131 | self.module = imp.load_module("device_specific", *info) 1132 | except ImportError: 1133 | print("unable to load device-specific module; assuming none") 1134 | 1135 | def _DoCall(self, function_name, *args, **kwargs): 1136 | """Call the named function in the device-specific module, passing 1137 | the given args and kwargs. The first argument to the call will be 1138 | the DeviceSpecific object itself. If there is no module, or the 1139 | module does not define the function, return the value of the 1140 | 'default' kwarg (which itself defaults to None).""" 1141 | if self.module is None or not hasattr(self.module, function_name): 1142 | return kwargs.get("default", None) 1143 | return getattr(self.module, function_name)(*((self,) + args), **kwargs) 1144 | 1145 | def FullOTA_Assertions(self): 1146 | """Called after emitting the block of assertions at the top of a 1147 | full OTA package. Implementations can add whatever additional 1148 | assertions they like.""" 1149 | return self._DoCall("FullOTA_Assertions") 1150 | 1151 | def FullOTA_InstallBegin(self): 1152 | """Called at the start of full OTA installation.""" 1153 | return self._DoCall("FullOTA_InstallBegin") 1154 | 1155 | def FullOTA_InstallEnd(self): 1156 | """Called at the end of full OTA installation; typically this is 1157 | used to install the image for the device's baseband processor.""" 1158 | return self._DoCall("FullOTA_InstallEnd") 1159 | 1160 | def IncrementalOTA_Assertions(self): 1161 | """Called after emitting the block of assertions at the top of an 1162 | incremental OTA package. Implementations can add whatever 1163 | additional assertions they like.""" 1164 | return self._DoCall("IncrementalOTA_Assertions") 1165 | 1166 | def IncrementalOTA_VerifyBegin(self): 1167 | """Called at the start of the verification phase of incremental 1168 | OTA installation; additional checks can be placed here to abort 1169 | the script before any changes are made.""" 1170 | return self._DoCall("IncrementalOTA_VerifyBegin") 1171 | 1172 | def IncrementalOTA_VerifyEnd(self): 1173 | """Called at the end of the verification phase of incremental OTA 1174 | installation; additional checks can be placed here to abort the 1175 | script before any changes are made.""" 1176 | return self._DoCall("IncrementalOTA_VerifyEnd") 1177 | 1178 | def IncrementalOTA_InstallBegin(self): 1179 | """Called at the start of incremental OTA installation (after 1180 | verification is complete).""" 1181 | return self._DoCall("IncrementalOTA_InstallBegin") 1182 | 1183 | def IncrementalOTA_InstallEnd(self): 1184 | """Called at the end of incremental OTA installation; typically 1185 | this is used to install the image for the device's baseband 1186 | processor.""" 1187 | return self._DoCall("IncrementalOTA_InstallEnd") 1188 | 1189 | def VerifyOTA_Assertions(self): 1190 | return self._DoCall("VerifyOTA_Assertions") 1191 | 1192 | class File(object): 1193 | def __init__(self, name, data): 1194 | self.name = name 1195 | self.data = data 1196 | self.size = len(data) 1197 | self.sha1 = sha1(data).hexdigest() 1198 | 1199 | @classmethod 1200 | def FromLocalFile(cls, name, diskname): 1201 | f = open(diskname, "rb") 1202 | data = f.read() 1203 | f.close() 1204 | return File(name, data) 1205 | 1206 | def WriteToTemp(self): 1207 | t = tempfile.NamedTemporaryFile() 1208 | t.write(self.data) 1209 | t.flush() 1210 | return t 1211 | 1212 | def AddToZip(self, z, compression=None): 1213 | ZipWriteStr(z, self.name, self.data, compress_type=compression) 1214 | 1215 | DIFF_PROGRAM_BY_EXT = { 1216 | ".gz" : "imgdiff", 1217 | ".zip" : ["imgdiff", "-z"], 1218 | ".jar" : ["imgdiff", "-z"], 1219 | ".apk" : ["imgdiff", "-z"], 1220 | ".img" : "imgdiff", 1221 | } 1222 | 1223 | class Difference(object): 1224 | def __init__(self, tf, sf, diff_program=None): 1225 | self.tf = tf 1226 | self.sf = sf 1227 | self.patch = None 1228 | self.diff_program = diff_program 1229 | 1230 | def ComputePatch(self): 1231 | """Compute the patch (as a string of data) needed to turn sf into 1232 | tf. Returns the same tuple as GetPatch().""" 1233 | 1234 | tf = self.tf 1235 | sf = self.sf 1236 | 1237 | if self.diff_program: 1238 | diff_program = self.diff_program 1239 | else: 1240 | ext = os.path.splitext(tf.name)[1] 1241 | diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") 1242 | 1243 | ttemp = tf.WriteToTemp() 1244 | stemp = sf.WriteToTemp() 1245 | 1246 | ext = os.path.splitext(tf.name)[1] 1247 | 1248 | try: 1249 | ptemp = tempfile.NamedTemporaryFile() 1250 | if isinstance(diff_program, list): 1251 | cmd = copy.copy(diff_program) 1252 | else: 1253 | cmd = [diff_program] 1254 | cmd.append(stemp.name) 1255 | cmd.append(ttemp.name) 1256 | cmd.append(ptemp.name) 1257 | p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 1258 | err = [] 1259 | def run(): 1260 | _, e = p.communicate() 1261 | if e: 1262 | err.append(e) 1263 | th = threading.Thread(target=run) 1264 | th.start() 1265 | th.join(timeout=300) # 5 mins 1266 | if th.is_alive(): 1267 | print("WARNING: diff command timed out") 1268 | p.terminate() 1269 | th.join(5) 1270 | if th.is_alive(): 1271 | p.kill() 1272 | th.join() 1273 | 1274 | if err or p.returncode != 0: 1275 | print("WARNING: failure running %s:\n%s\n" % ( 1276 | diff_program, "".join(err))) 1277 | self.patch = None 1278 | return None, None, None 1279 | diff = ptemp.read() 1280 | finally: 1281 | ptemp.close() 1282 | stemp.close() 1283 | ttemp.close() 1284 | 1285 | self.patch = diff 1286 | return self.tf, self.sf, self.patch 1287 | 1288 | 1289 | def GetPatch(self): 1290 | """Return a tuple (target_file, source_file, patch_data). 1291 | patch_data may be None if ComputePatch hasn't been called, or if 1292 | computing the patch failed.""" 1293 | return self.tf, self.sf, self.patch 1294 | 1295 | 1296 | def ComputeDifferences(diffs): 1297 | """Call ComputePatch on all the Difference objects in 'diffs'.""" 1298 | print(len(diffs), "diffs to compute") 1299 | 1300 | # Do the largest files first, to try and reduce the long-pole effect. 1301 | by_size = [(i.tf.size, i) for i in diffs] 1302 | by_size.sort(reverse=True) 1303 | by_size = [i[1] for i in by_size] 1304 | 1305 | lock = threading.Lock() 1306 | diff_iter = iter(by_size) # accessed under lock 1307 | 1308 | def worker(): 1309 | try: 1310 | lock.acquire() 1311 | for d in diff_iter: 1312 | lock.release() 1313 | start = time.time() 1314 | d.ComputePatch() 1315 | dur = time.time() - start 1316 | lock.acquire() 1317 | 1318 | tf, sf, patch = d.GetPatch() 1319 | if sf.name == tf.name: 1320 | name = tf.name 1321 | else: 1322 | name = "%s (%s)" % (tf.name, sf.name) 1323 | if patch is None: 1324 | print("patching failed! %s" % (name,)) 1325 | else: 1326 | print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( 1327 | dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)) 1328 | lock.release() 1329 | except Exception as e: 1330 | print(e) 1331 | raise 1332 | 1333 | # start worker threads; wait for them all to finish. 1334 | threads = [threading.Thread(target=worker) 1335 | for i in range(OPTIONS.worker_threads)] 1336 | for th in threads: 1337 | th.start() 1338 | while threads: 1339 | threads.pop().join() 1340 | 1341 | 1342 | class BlockDifference(object): 1343 | def __init__(self, partition, tgt, src=None, check_first_block=False, 1344 | version=None, disable_imgdiff=False): 1345 | self.tgt = tgt 1346 | self.src = src 1347 | self.partition = partition 1348 | self.check_first_block = check_first_block 1349 | self.disable_imgdiff = disable_imgdiff 1350 | 1351 | if version is None: 1352 | version = 1 1353 | if OPTIONS.info_dict: 1354 | version = max( 1355 | int(i) for i in 1356 | OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) 1357 | self.version = version 1358 | 1359 | b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads, 1360 | version=self.version, 1361 | disable_imgdiff=self.disable_imgdiff) 1362 | tmpdir = tempfile.mkdtemp() 1363 | OPTIONS.tempfiles.append(tmpdir) 1364 | self.path = os.path.join(tmpdir, partition) 1365 | b.Compute(self.path) 1366 | self._required_cache = b.max_stashed_size 1367 | self.touched_src_ranges = b.touched_src_ranges 1368 | self.touched_src_sha1 = b.touched_src_sha1 1369 | 1370 | 1371 | @property 1372 | def required_cache(self): 1373 | return self._required_cache 1374 | 1375 | def WriteScript(self, script, output_zip, progress=None): 1376 | if not self.src: 1377 | # write the output unconditionally 1378 | script.Print("Patching %s image unconditionally..." % (self.partition,)) 1379 | else: 1380 | script.Print("Patching %s image after verification." % (self.partition,)) 1381 | 1382 | if progress: 1383 | script.ShowProgress(progress, 0) 1384 | self._WriteUpdate(script, output_zip) 1385 | if OPTIONS.verify: 1386 | self._WritePostInstallVerifyScript(script) 1387 | 1388 | def WriteStrictVerifyScript(self, script): 1389 | """Verify all the blocks in the care_map, including clobbered blocks. 1390 | 1391 | This differs from the WriteVerifyScript() function: a) it prints different 1392 | error messages; b) it doesn't allow half-way updated images to pass the 1393 | verification.""" 1394 | 1395 | partition = self.partition 1396 | script.Print("Verifying %s..." % (partition,)) 1397 | ranges = self.tgt.care_map 1398 | ranges_str = ranges.to_string_raw() 1399 | script.AppendExtra('range_sha1("%s", "%s") == "%s" && ' 1400 | 'ui_print(" Verified.") || ' 1401 | 'ui_print("\\"%s\\" has unexpected contents.");' % ( 1402 | self.device, ranges_str, 1403 | self.tgt.TotalSha1(include_clobbered_blocks=True), 1404 | self.device)) 1405 | script.AppendExtra("") 1406 | 1407 | def WriteVerifyScript(self, script, touched_blocks_only=False): 1408 | partition = self.partition 1409 | 1410 | # full OTA 1411 | if not self.src: 1412 | script.Print("Image %s will be patched unconditionally." % (partition,)) 1413 | 1414 | # incremental OTA 1415 | else: 1416 | if touched_blocks_only and self.version >= 3: 1417 | ranges = self.touched_src_ranges 1418 | expected_sha1 = self.touched_src_sha1 1419 | else: 1420 | ranges = self.src.care_map.subtract(self.src.clobbered_blocks) 1421 | expected_sha1 = self.src.TotalSha1() 1422 | 1423 | # No blocks to be checked, skipping. 1424 | if not ranges: 1425 | return 1426 | 1427 | ranges_str = ranges.to_string_raw() 1428 | if self.version >= 4: 1429 | script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || ' 1430 | 'block_image_verify("%s", ' 1431 | 'package_extract_file("%s.transfer.list"), ' 1432 | '"%s.new.dat", "%s.patch.dat")) then') % ( 1433 | self.device, ranges_str, expected_sha1, 1434 | self.device, partition, partition, partition)) 1435 | elif self.version == 3: 1436 | script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || ' 1437 | 'block_image_verify("%s", ' 1438 | 'package_extract_file("%s.transfer.list"), ' 1439 | '"%s.new.dat", "%s.patch.dat")) then') % ( 1440 | self.device, ranges_str, expected_sha1, 1441 | self.device, partition, partition, partition)) 1442 | else: 1443 | script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1444 | self.device, ranges_str, self.src.TotalSha1())) 1445 | script.Print('Verified %s image...' % (partition,)) 1446 | script.AppendExtra('else') 1447 | 1448 | if self.version >= 4: 1449 | 1450 | # Bug: 21124327 1451 | # When generating incrementals for the system and vendor partitions in 1452 | # version 4 or newer, explicitly check the first block (which contains 1453 | # the superblock) of the partition to see if it's what we expect. If 1454 | # this check fails, give an explicit log message about the partition 1455 | # having been remounted R/W (the most likely explanation). 1456 | if self.check_first_block: 1457 | script.AppendExtra('check_first_block("%s");' % (self.device,)) 1458 | 1459 | # If version >= 4, try block recovery before abort update 1460 | if partition == "system": 1461 | code = ErrorCode.SYSTEM_RECOVER_FAILURE 1462 | else: 1463 | code = ErrorCode.VENDOR_RECOVER_FAILURE 1464 | script.AppendExtra(( 1465 | 'ifelse (block_image_recover("{device}", "{ranges}") && ' 1466 | 'block_image_verify("{device}", ' 1467 | 'package_extract_file("{partition}.transfer.list"), ' 1468 | '"{partition}.new.dat", "{partition}.patch.dat"), ' 1469 | 'ui_print("{partition} recovered successfully."), ' 1470 | 'abort("E{code}: {partition} partition fails to recover"));\n' 1471 | 'endif;').format(device=self.device, ranges=ranges_str, 1472 | partition=partition, code=code)) 1473 | 1474 | # Abort the OTA update. Note that the incremental OTA cannot be applied 1475 | # even if it may match the checksum of the target partition. 1476 | # a) If version < 3, operations like move and erase will make changes 1477 | # unconditionally and damage the partition. 1478 | # b) If version >= 3, it won't even reach here. 1479 | else: 1480 | if partition == "system": 1481 | code = ErrorCode.SYSTEM_VERIFICATION_FAILURE 1482 | else: 1483 | code = ErrorCode.VENDOR_VERIFICATION_FAILURE 1484 | script.AppendExtra(( 1485 | 'abort("E%d: %s partition has unexpected contents");\n' 1486 | 'endif;') % (code, partition)) 1487 | 1488 | def _WritePostInstallVerifyScript(self, script): 1489 | partition = self.partition 1490 | script.Print('Verifying the updated %s image...' % (partition,)) 1491 | # Unlike pre-install verification, clobbered_blocks should not be ignored. 1492 | ranges = self.tgt.care_map 1493 | ranges_str = ranges.to_string_raw() 1494 | script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1495 | self.device, ranges_str, 1496 | self.tgt.TotalSha1(include_clobbered_blocks=True))) 1497 | 1498 | # Bug: 20881595 1499 | # Verify that extended blocks are really zeroed out. 1500 | if self.tgt.extended: 1501 | ranges_str = self.tgt.extended.to_string_raw() 1502 | script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1503 | self.device, ranges_str, 1504 | self._HashZeroBlocks(self.tgt.extended.size()))) 1505 | script.Print('Verified the updated %s image.' % (partition,)) 1506 | if partition == "system": 1507 | code = ErrorCode.SYSTEM_NONZERO_CONTENTS 1508 | else: 1509 | code = ErrorCode.VENDOR_NONZERO_CONTENTS 1510 | script.AppendExtra( 1511 | 'else\n' 1512 | ' abort("E%d: %s partition has unexpected non-zero contents after ' 1513 | 'OTA update");\n' 1514 | 'endif;' % (code, partition)) 1515 | else: 1516 | script.Print('Verified the updated %s image.' % (partition,)) 1517 | 1518 | if partition == "system": 1519 | code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS 1520 | else: 1521 | code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS 1522 | 1523 | script.AppendExtra( 1524 | 'else\n' 1525 | ' abort("E%d: %s partition has unexpected contents after OTA ' 1526 | 'update");\n' 1527 | 'endif;' % (code, partition)) 1528 | 1529 | def _WriteUpdate(self, script, output_zip): 1530 | ZipWrite(output_zip, 1531 | '{}.transfer.list'.format(self.path), 1532 | '{}.transfer.list'.format(self.partition)) 1533 | ZipWrite(output_zip, 1534 | '{}.new.dat'.format(self.path), 1535 | '{}.new.dat'.format(self.partition)) 1536 | ZipWrite(output_zip, 1537 | '{}.patch.dat'.format(self.path), 1538 | '{}.patch.dat'.format(self.partition), 1539 | compress_type=zipfile.ZIP_STORED) 1540 | 1541 | if self.partition == "system": 1542 | code = ErrorCode.SYSTEM_UPDATE_FAILURE 1543 | else: 1544 | code = ErrorCode.VENDOR_UPDATE_FAILURE 1545 | 1546 | call = ('block_image_update("{device}", ' 1547 | 'package_extract_file("{partition}.transfer.list"), ' 1548 | '"{partition}.new.dat", "{partition}.patch.dat") ||\n' 1549 | ' abort("E{code}: Failed to update {partition} image.");'.format( 1550 | device=self.device, partition=self.partition, code=code)) 1551 | script.AppendExtra(script.WordWrap(call)) 1552 | 1553 | def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use 1554 | data = source.ReadRangeSet(ranges) 1555 | ctx = sha1() 1556 | 1557 | for p in data: 1558 | ctx.update(p) 1559 | 1560 | return ctx.hexdigest() 1561 | 1562 | def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use 1563 | """Return the hash value for all zero blocks.""" 1564 | zero_block = '\x00' * 4096 1565 | ctx = sha1() 1566 | for _ in range(num_blocks): 1567 | ctx.update(zero_block) 1568 | 1569 | return ctx.hexdigest() 1570 | 1571 | 1572 | DataImage = blockimgdiff.DataImage 1573 | 1574 | # map recovery.fstab's fs_types to mount/format "partition types" 1575 | PARTITION_TYPES = { 1576 | "yaffs2": "MTD", 1577 | "mtd": "MTD", 1578 | "ext4": "EMMC", 1579 | "emmc": "EMMC", 1580 | "f2fs": "EMMC", 1581 | "squashfs": "EMMC" 1582 | } 1583 | 1584 | def GetTypeAndDevice(mount_point, info): 1585 | fstab = info["fstab"] 1586 | if fstab: 1587 | return (PARTITION_TYPES[fstab[mount_point].fs_type], 1588 | fstab[mount_point].device) 1589 | else: 1590 | raise KeyError 1591 | 1592 | 1593 | def ParseCertificate(data): 1594 | """Parse a PEM-format certificate.""" 1595 | cert = [] 1596 | save = False 1597 | for line in data.split("\n"): 1598 | if "--END CERTIFICATE--" in line: 1599 | break 1600 | if save: 1601 | cert.append(line) 1602 | if "--BEGIN CERTIFICATE--" in line: 1603 | save = True 1604 | cert = "".join(cert).decode('base64') 1605 | return cert 1606 | 1607 | def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, 1608 | info_dict=None): 1609 | """Generate a binary patch that creates the recovery image starting 1610 | with the boot image. (Most of the space in these images is just the 1611 | kernel, which is identical for the two, so the resulting patch 1612 | should be efficient.) Add it to the output zip, along with a shell 1613 | script that is run from init.rc on first boot to actually do the 1614 | patching and install the new recovery image. 1615 | 1616 | recovery_img and boot_img should be File objects for the 1617 | corresponding images. info should be the dictionary returned by 1618 | common.LoadInfoDict() on the input target_files. 1619 | """ 1620 | 1621 | if info_dict is None: 1622 | info_dict = OPTIONS.info_dict 1623 | 1624 | full_recovery_image = info_dict.get("full_recovery_image", None) == "true" 1625 | system_root_image = info_dict.get("system_root_image", None) == "true" 1626 | 1627 | if full_recovery_image: 1628 | output_sink("etc/recovery.img", recovery_img.data) 1629 | 1630 | else: 1631 | diff_program = ["imgdiff"] 1632 | path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat") 1633 | if os.path.exists(path): 1634 | diff_program.append("-b") 1635 | diff_program.append(path) 1636 | bonus_args = "-b /system/etc/recovery-resource.dat" 1637 | else: 1638 | bonus_args = "" 1639 | 1640 | d = Difference(recovery_img, boot_img, diff_program=diff_program) 1641 | _, _, patch = d.ComputePatch() 1642 | output_sink("recovery-from-boot.p", patch) 1643 | 1644 | try: 1645 | # The following GetTypeAndDevice()s need to use the path in the target 1646 | # info_dict instead of source_info_dict. 1647 | boot_type, boot_device = GetTypeAndDevice("/boot", info_dict) 1648 | recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict) 1649 | except KeyError: 1650 | return 1651 | 1652 | if full_recovery_image: 1653 | sh = """#!/system/bin/sh 1654 | if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then 1655 | applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed" 1656 | else 1657 | log -t recovery "Recovery image already installed" 1658 | fi 1659 | """ % {'type': recovery_type, 1660 | 'device': recovery_device, 1661 | 'sha1': recovery_img.sha1, 1662 | 'size': recovery_img.size} 1663 | else: 1664 | sh = """#!/system/bin/sh 1665 | if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then 1666 | applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed" 1667 | else 1668 | log -t recovery "Recovery image already installed" 1669 | fi 1670 | """ % {'boot_size': boot_img.size, 1671 | 'boot_sha1': boot_img.sha1, 1672 | 'recovery_size': recovery_img.size, 1673 | 'recovery_sha1': recovery_img.sha1, 1674 | 'boot_type': boot_type, 1675 | 'boot_device': boot_device, 1676 | 'recovery_type': recovery_type, 1677 | 'recovery_device': recovery_device, 1678 | 'bonus_args': bonus_args} 1679 | 1680 | # The install script location moved from /system/etc to /system/bin 1681 | # in the L release. Parse init.*.rc files to find out where the 1682 | # target-files expects it to be, and put it there. 1683 | sh_location = "etc/install-recovery.sh" 1684 | found = False 1685 | if system_root_image: 1686 | init_rc_dir = os.path.join(input_dir, "ROOT") 1687 | else: 1688 | init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK") 1689 | init_rc_files = os.listdir(init_rc_dir) 1690 | for init_rc_file in init_rc_files: 1691 | if (not init_rc_file.startswith('init.') or 1692 | not init_rc_file.endswith('.rc')): 1693 | continue 1694 | 1695 | with open(os.path.join(init_rc_dir, init_rc_file)) as f: 1696 | for line in f: 1697 | m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line) 1698 | if m: 1699 | sh_location = m.group(1) 1700 | found = True 1701 | break 1702 | 1703 | if found: 1704 | break 1705 | 1706 | print("putting script in", sh_location) 1707 | 1708 | output_sink(sh_location, sh) 1709 | -------------------------------------------------------------------------------- /bin/linux/ext4.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import functools 3 | import io 4 | import math 5 | import queue 6 | 7 | def wcscmp(str_a, str_b): 8 | for a, b in zip(str_a, str_b): 9 | tmp = ord(a) - ord(b) 10 | if tmp != 0: return -1 if tmp < 0 else 1 11 | 12 | tmp = len(str_a) - len(str_b) 13 | return -1 if tmp < 0 else 1 if tmp > 0 else 0 14 | 15 | 16 | class Ext4Error(Exception): 17 | pass 18 | 19 | 20 | class BlockMapError(Ext4Error): 21 | pass 22 | 23 | 24 | class EndOfStreamError(Ext4Error): 25 | pass 26 | 27 | 28 | class MagicError(Ext4Error): 29 | pass 30 | 31 | 32 | # ----------------------------- LOW LEVEL ------------------------------ 33 | 34 | class ext4_struct(ctypes.LittleEndianStructure): 35 | def __getattr__(self, name): 36 | try: 37 | # Combining *_lo and *_hi fields 38 | lo_field = ctypes.LittleEndianStructure.__getattribute__(type(self), name + "_lo") 39 | size = lo_field.size 40 | 41 | lo = lo_field.__get__(self) 42 | hi = ctypes.LittleEndianStructure.__getattribute__(self, name + "_hi") 43 | 44 | return (hi << (8 * size)) | lo 45 | except AttributeError: 46 | return ctypes.LittleEndianStructure.__getattribute__(self, name) 47 | 48 | def __setattr__(self, name, value): 49 | try: 50 | # Combining *_lo and *_hi fields 51 | lo_field = lo_field = ctypes.LittleEndianStructure.__getattribute__(type(self), name + "_lo") 52 | size = lo_field.size 53 | 54 | lo_field.__set__(self, value & ((1 << (8 * size)) - 1)) 55 | ctypes.LittleEndianStructure.__setattr__(self, name + "_hi", value >> (8 * size)) 56 | except AttributeError: 57 | ctypes.LittleEndianStructure.__setattr__(self, name, value) 58 | 59 | 60 | class ext4_dir_entry_2(ext4_struct): 61 | _fields_ = [ 62 | ("inode", ctypes.c_uint), # 0x0 63 | ("rec_len", ctypes.c_ushort), # 0x4 64 | ("name_len", ctypes.c_ubyte), # 0x6 65 | ("file_type", ctypes.c_ubyte) # 0x7 66 | # Variable length field "name" missing at 0x8 67 | ] 68 | 69 | def _from_buffer_copy(raw, offset=0, platform64=True): 70 | struct = ext4_dir_entry_2.from_buffer_copy(raw, offset) 71 | struct.name = raw[offset + 0x8: offset + 0x8 + struct.name_len] 72 | return struct 73 | 74 | 75 | class ext4_extent(ext4_struct): 76 | _fields_ = [ 77 | ("ee_block", ctypes.c_uint), # 0x0000 78 | ("ee_len", ctypes.c_ushort), # 0x0004 79 | ("ee_start_hi", ctypes.c_ushort), # 0x0006 80 | ("ee_start_lo", ctypes.c_uint) # 0x0008 81 | ] 82 | 83 | 84 | class ext4_extent_header(ext4_struct): 85 | _fields_ = [ 86 | ("eh_magic", ctypes.c_ushort), # 0x0000, Must be 0xF30A 87 | ("eh_entries", ctypes.c_ushort), # 0x0002 88 | ("eh_max", ctypes.c_ushort), # 0x0004 89 | ("eh_depth", ctypes.c_ushort), # 0x0006 90 | ("eh_generation", ctypes.c_uint) # 0x0008 91 | ] 92 | 93 | 94 | class ext4_extent_idx(ext4_struct): 95 | _fields_ = [ 96 | ("ei_block", ctypes.c_uint), # 0x0000 97 | ("ei_leaf_lo", ctypes.c_uint), # 0x0004 98 | ("ei_leaf_hi", ctypes.c_ushort), # 0x0008 99 | ("ei_unused", ctypes.c_ushort) # 0x000A 100 | ] 101 | 102 | 103 | class ext4_group_descriptor(ext4_struct): 104 | _fields_ = [ 105 | ("bg_block_bitmap_lo", ctypes.c_uint), # 0x0000 106 | ("bg_inode_bitmap_lo", ctypes.c_uint), # 0x0004 107 | ("bg_inode_table_lo", ctypes.c_uint), # 0x0008 108 | ("bg_free_blocks_count_lo", ctypes.c_ushort), # 0x000C 109 | ("bg_free_inodes_count_lo", ctypes.c_ushort), # 0x000E 110 | ("bg_used_dirs_count_lo", ctypes.c_ushort), # 0x0010 111 | ("bg_flags", ctypes.c_ushort), # 0x0012 112 | ("bg_exclude_bitmap_lo", ctypes.c_uint), # 0x0014 113 | ("bg_block_bitmap_csum_lo", ctypes.c_ushort), # 0x0018 114 | ("bg_inode_bitmap_csum_lo", ctypes.c_ushort), # 0x001A 115 | ("bg_itable_unused_lo", ctypes.c_ushort), # 0x001C 116 | ("bg_checksum", ctypes.c_ushort), # 0x001E 117 | 118 | # 64-bit fields 119 | ("bg_block_bitmap_hi", ctypes.c_uint), # 0x0020 120 | ("bg_inode_bitmap_hi", ctypes.c_uint), # 0x0024 121 | ("bg_inode_table_hi", ctypes.c_uint), # 0x0028 122 | ("bg_free_blocks_count_hi", ctypes.c_ushort), # 0x002C 123 | ("bg_free_inodes_count_hi", ctypes.c_ushort), # 0x002E 124 | ("bg_used_dirs_count_hi", ctypes.c_ushort), # 0x0030 125 | ("bg_itable_unused_hi", ctypes.c_ushort), # 0x0032 126 | ("bg_exclude_bitmap_hi", ctypes.c_uint), # 0x0034 127 | ("bg_block_bitmap_csum_hi", ctypes.c_ushort), # 0x0038 128 | ("bg_inode_bitmap_csum_hi", ctypes.c_ushort), # 0x003A 129 | ("bg_reserved", ctypes.c_uint), # 0x003C 130 | ] 131 | 132 | def _from_buffer_copy(raw, platform64=True): 133 | struct = ext4_group_descriptor.from_buffer_copy(raw) 134 | 135 | if not platform64: 136 | struct.bg_block_bitmap_hi = 0 137 | struct.bg_inode_bitmap_hi = 0 138 | struct.bg_inode_table_hi = 0 139 | struct.bg_free_blocks_count_hi = 0 140 | struct.bg_free_inodes_count_hi = 0 141 | struct.bg_used_dirs_count_hi = 0 142 | struct.bg_itable_unused_hi = 0 143 | struct.bg_exclude_bitmap_hi = 0 144 | struct.bg_block_bitmap_csum_hi = 0 145 | struct.bg_inode_bitmap_csum_hi = 0 146 | struct.bg_reserved = 0 147 | 148 | return struct 149 | 150 | 151 | class ext4_inode(ext4_struct): 152 | EXT2_GOOD_OLD_INODE_SIZE = 128 # Every field passing 128 bytes is "additional data", whose size is specified by i_extra_isize. 153 | 154 | # i_mode 155 | S_IXOTH = 0x1 # Others can execute 156 | S_IWOTH = 0x2 # Others can write 157 | S_IROTH = 0x4 # Others can read 158 | S_IXGRP = 0x8 # Group can execute 159 | S_IWGRP = 0x10 # Group can write 160 | S_IRGRP = 0x20 # Group can read 161 | S_IXUSR = 0x40 # Owner can execute 162 | S_IWUSR = 0x80 # Owner can write 163 | S_IRUSR = 0x100 # Owner can read 164 | S_ISVTX = 0x200 # Sticky bit (only owner can delete) 165 | S_ISGID = 0x400 # Set GID (execute with privileges of group owner of the file's group) 166 | S_ISUID = 0x800 # Set UID (execute with privileges of the file's owner) 167 | S_IFIFO = 0x1000 # FIFO device (named pipe) 168 | S_IFCHR = 0x2000 # Character device (raw, unbuffered, aligned, direct access to hardware storage) 169 | S_IFDIR = 0x4000 # Directory 170 | S_IFBLK = 0x6000 # Block device (buffered, arbitrary access to storage) 171 | S_IFREG = 0x8000 # Regular file 172 | S_IFLNK = 0xA000 # Symbolic link 173 | S_IFSOCK = 0xC000 # Socket 174 | 175 | # i_flags 176 | EXT4_INDEX_FL = 0x1000 # Uses hash trees 177 | EXT4_EXTENTS_FL = 0x80000 # Uses extents 178 | EXT4_EA_INODE_FL = 0x200000 # Inode stores large xattr 179 | EXT4_INLINE_DATA_FL = 0x10000000 # Has inline data 180 | 181 | _fields_ = [ 182 | ("i_mode", ctypes.c_ushort), # 0x0000 183 | ("i_uid_lo", ctypes.c_ushort), # 0x0002, Originally named i_uid 184 | ("i_size_lo", ctypes.c_uint), # 0x0004 185 | ("i_atime", ctypes.c_uint), # 0x0008 186 | ("i_ctime", ctypes.c_uint), # 0x000C 187 | ("i_mtime", ctypes.c_uint), # 0x0010 188 | ("i_dtime", ctypes.c_uint), # 0x0014 189 | ("i_gid_lo", ctypes.c_ushort), # 0x0018, Originally named i_gid 190 | ("i_links_count", ctypes.c_ushort), # 0x001A 191 | ("i_blocks_lo", ctypes.c_uint), # 0x001C 192 | ("i_flags", ctypes.c_uint), # 0x0020 193 | ("osd1", ctypes.c_uint), # 0x0024 194 | ("i_block", ctypes.c_uint * 15), # 0x0028 195 | ("i_generation", ctypes.c_uint), # 0x0064 196 | ("i_file_acl_lo", ctypes.c_uint), # 0x0068 197 | ("i_size_hi", ctypes.c_uint), # 0x006C, Originally named i_size_high 198 | ("i_obso_faddr", ctypes.c_uint), # 0x0070 199 | ("i_osd2_blocks_high", ctypes.c_ushort), # 0x0074, Originally named i_osd2.linux2.l_i_blocks_high 200 | ("i_file_acl_hi", ctypes.c_ushort), # 0x0076, Originally named i_osd2.linux2.l_i_file_acl_high 201 | ("i_uid_hi", ctypes.c_ushort), # 0x0078, Originally named i_osd2.linux2.l_i_uid_high 202 | ("i_gid_hi", ctypes.c_ushort), # 0x007A, Originally named i_osd2.linux2.l_i_gid_high 203 | ("i_osd2_checksum_lo", ctypes.c_ushort), # 0x007C, Originally named i_osd2.linux2.l_i_checksum_lo 204 | ("i_osd2_reserved", ctypes.c_ushort), # 0x007E, Originally named i_osd2.linux2.l_i_reserved 205 | ("i_extra_isize", ctypes.c_ushort), # 0x0080 206 | ("i_checksum_hi", ctypes.c_ushort), # 0x0082 207 | ("i_ctime_extra", ctypes.c_uint), # 0x0084 208 | ("i_mtime_extra", ctypes.c_uint), # 0x0088 209 | ("i_atime_extra", ctypes.c_uint), # 0x008C 210 | ("i_crtime", ctypes.c_uint), # 0x0090 211 | ("i_crtime_extra", ctypes.c_uint), # 0x0094 212 | ("i_version_hi", ctypes.c_uint), # 0x0098 213 | ("i_projid", ctypes.c_uint), # 0x009C 214 | ] 215 | 216 | 217 | class ext4_superblock(ext4_struct): 218 | EXT2_DESC_SIZE = 0x20 # Default value for s_desc_size, if INCOMPAT_64BIT is not set (NEEDS CONFIRMATION) 219 | 220 | # s_feature_incompat 221 | INCOMPAT_64BIT = 0x80 # Uses 64-bit features (e.g. *_hi structure fields in ext4_group_descriptor) 222 | INCOMPAT_FILETYPE = 0x2 # Directory entries record file type (instead of inode flags) 223 | _fields_ = [ 224 | ("s_inodes_count", ctypes.c_uint), # 0x0000 225 | ("s_blocks_count_lo", ctypes.c_uint), # 0x0004 226 | ("s_r_blocks_count_lo", ctypes.c_uint), # 0x0008 227 | ("s_free_blocks_count_lo", ctypes.c_uint), # 0x000C 228 | ("s_free_inodes_count", ctypes.c_uint), # 0x0010 229 | ("s_first_data_block", ctypes.c_uint), # 0x0014 230 | ("s_log_block_size", ctypes.c_uint), # 0x0018 231 | ("s_log_cluster_size", ctypes.c_uint), # 0x001C 232 | ("s_blocks_per_group", ctypes.c_uint), # 0x0020 233 | ("s_clusters_per_group", ctypes.c_uint), # 0x0024 234 | ("s_inodes_per_group", ctypes.c_uint), # 0x0028 235 | ("s_mtime", ctypes.c_uint), # 0x002C 236 | ("s_wtime", ctypes.c_uint), # 0x0030 237 | ("s_mnt_count", ctypes.c_ushort), # 0x0034 238 | ("s_max_mnt_count", ctypes.c_ushort), # 0x0036 239 | ("s_magic", ctypes.c_ushort), # 0x0038, Must be 0xEF53 240 | ("s_state", ctypes.c_ushort), # 0x003A 241 | ("s_errors", ctypes.c_ushort), # 0x003C 242 | ("s_minor_rev_level", ctypes.c_ushort), # 0x003E 243 | ("s_lastcheck", ctypes.c_uint), # 0x0040 244 | ("s_checkinterval", ctypes.c_uint), # 0x0044 245 | ("s_creator_os", ctypes.c_uint), # 0x0048 246 | ("s_rev_level", ctypes.c_uint), # 0x004C 247 | ("s_def_resuid", ctypes.c_ushort), # 0x0050 248 | ("s_def_resgid", ctypes.c_ushort), # 0x0052 249 | ("s_first_ino", ctypes.c_uint), # 0x0054 250 | ("s_inode_size", ctypes.c_ushort), # 0x0058 251 | ("s_block_group_nr", ctypes.c_ushort), # 0x005A 252 | ("s_feature_compat", ctypes.c_uint), # 0x005C 253 | ("s_feature_incompat", ctypes.c_uint), # 0x0060 254 | ("s_feature_ro_compat", ctypes.c_uint), # 0x0064 255 | ("s_uuid", ctypes.c_ubyte * 16), # 0x0068 256 | ("s_volume_name", ctypes.c_char * 16), # 0x0078 257 | ("s_last_mounted", ctypes.c_char * 64), # 0x0088 258 | ("s_algorithm_usage_bitmap", ctypes.c_uint), # 0x00C8 259 | ("s_prealloc_blocks", ctypes.c_ubyte), # 0x00CC 260 | ("s_prealloc_dir_blocks", ctypes.c_ubyte), # 0x00CD 261 | ("s_reserved_gdt_blocks", ctypes.c_ushort), # 0x00CE 262 | ("s_journal_uuid", ctypes.c_ubyte * 16), # 0x00D0 263 | ("s_journal_inum", ctypes.c_uint), # 0x00E0 264 | ("s_journal_dev", ctypes.c_uint), # 0x00E4 265 | ("s_last_orphan", ctypes.c_uint), # 0x00E8 266 | ("s_hash_seed", ctypes.c_uint * 4), # 0x00EC 267 | ("s_def_hash_version", ctypes.c_ubyte), # 0x00FC 268 | ("s_jnl_backup_type", ctypes.c_ubyte), # 0x00FD 269 | ("s_desc_size", ctypes.c_ushort), # 0x00FE 270 | ("s_default_mount_opts", ctypes.c_uint), # 0x0100 271 | ("s_first_meta_bg", ctypes.c_uint), # 0x0104 272 | ("s_mkfs_time", ctypes.c_uint), # 0x0108 273 | ("s_jnl_blocks", ctypes.c_uint * 17), # 0x010C 274 | 275 | # 64-bit fields 276 | ("s_blocks_count_hi", ctypes.c_uint), # 0x0150 277 | ("s_r_blocks_count_hi", ctypes.c_uint), # 0x0154 278 | ("s_free_blocks_count_hi", ctypes.c_uint), # 0x0158 279 | ("s_min_extra_isize", ctypes.c_ushort), # 0x015C 280 | ("s_want_extra_isize", ctypes.c_ushort), # 0x015E 281 | ("s_flags", ctypes.c_uint), # 0x0160 282 | ("s_raid_stride", ctypes.c_ushort), # 0x0164 283 | ("s_mmp_interval", ctypes.c_ushort), # 0x0166 284 | ("s_mmp_block", ctypes.c_ulonglong), # 0x0168 285 | ("s_raid_stripe_width", ctypes.c_uint), # 0x0170 286 | ("s_log_groups_per_flex", ctypes.c_ubyte), # 0x0174 287 | ("s_checksum_type", ctypes.c_ubyte), # 0x0175 288 | ("s_reserved_pad", ctypes.c_ushort), # 0x0176 289 | ("s_kbytes_written", ctypes.c_ulonglong), # 0x0178 290 | ("s_snapshot_inum", ctypes.c_uint), # 0x0180 291 | ("s_snapshot_id", ctypes.c_uint), # 0x0184 292 | ("s_snapshot_r_blocks_count", ctypes.c_ulonglong), # 0x0188 293 | ("s_snapshot_list", ctypes.c_uint), # 0x0190 294 | ("s_error_count", ctypes.c_uint), # 0x0194 295 | ("s_first_error_time", ctypes.c_uint), # 0x0198 296 | ("s_first_error_ino", ctypes.c_uint), # 0x019C 297 | ("s_first_error_block", ctypes.c_ulonglong), # 0x01A0 298 | ("s_first_error_func", ctypes.c_ubyte * 32), # 0x01A8 299 | ("s_first_error_line", ctypes.c_uint), # 0x01C8 300 | ("s_last_error_time", ctypes.c_uint), # 0x01CC 301 | ("s_last_error_ino", ctypes.c_uint), # 0x01D0 302 | ("s_last_error_line", ctypes.c_uint), # 0x01D4 303 | ("s_last_error_block", ctypes.c_ulonglong), # 0x01D8 304 | ("s_last_error_func", ctypes.c_ubyte * 32), # 0x01E0 305 | ("s_mount_opts", ctypes.c_ubyte * 64), # 0x0200 306 | ("s_usr_quota_inum", ctypes.c_uint), # 0x0240 307 | ("s_grp_quota_inum", ctypes.c_uint), # 0x0244 308 | ("s_overhead_blocks", ctypes.c_uint), # 0x0248 309 | ("s_backup_bgs", ctypes.c_uint * 2), # 0x024C 310 | ("s_encrypt_algos", ctypes.c_ubyte * 4), # 0x0254 311 | ("s_encrypt_pw_salt", ctypes.c_ubyte * 16), # 0x0258 312 | ("s_lpf_ino", ctypes.c_uint), # 0x0268 313 | ("s_prj_quota_inum", ctypes.c_uint), # 0x026C 314 | ("s_checksum_seed", ctypes.c_uint), # 0x0270 315 | ("s_reserved", ctypes.c_uint * 98), # 0x0274 316 | ("s_checksum", ctypes.c_uint) # 0x03FC 317 | ] 318 | 319 | def _from_buffer_copy(raw, platform64=True): 320 | struct = ext4_superblock.from_buffer_copy(raw) 321 | 322 | if not platform64: 323 | struct.s_blocks_count_hi = 0 324 | struct.s_r_blocks_count_hi = 0 325 | struct.s_free_blocks_count_hi = 0 326 | struct.s_min_extra_isize = 0 327 | struct.s_want_extra_isize = 0 328 | struct.s_flags = 0 329 | struct.s_raid_stride = 0 330 | struct.s_mmp_interval = 0 331 | struct.s_mmp_block = 0 332 | struct.s_raid_stripe_width = 0 333 | struct.s_log_groups_per_flex = 0 334 | struct.s_checksum_type = 0 335 | struct.s_reserved_pad = 0 336 | struct.s_kbytes_written = 0 337 | struct.s_snapshot_inum = 0 338 | struct.s_snapshot_id = 0 339 | struct.s_snapshot_r_blocks_count = 0 340 | struct.s_snapshot_list = 0 341 | struct.s_error_count = 0 342 | struct.s_first_error_time = 0 343 | struct.s_first_error_ino = 0 344 | struct.s_first_error_block = 0 345 | struct.s_first_error_func = 0 346 | struct.s_first_error_line = 0 347 | struct.s_last_error_time = 0 348 | struct.s_last_error_ino = 0 349 | struct.s_last_error_line = 0 350 | struct.s_last_error_block = 0 351 | struct.s_last_error_func = 0 352 | struct.s_mount_opts = 0 353 | struct.s_usr_quota_inum = 0 354 | struct.s_grp_quota_inum = 0 355 | struct.s_overhead_blocks = 0 356 | struct.s_backup_bgs = 0 357 | struct.s_encrypt_algos = 0 358 | struct.s_encrypt_pw_salt = 0 359 | struct.s_lpf_ino = 0 360 | struct.s_prj_quota_inum = 0 361 | struct.s_checksum_seed = 0 362 | struct.s_reserved = 0 363 | struct.s_checksum = 0 364 | 365 | if (struct.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) == 0: 366 | struct.s_desc_size = ext4_superblock.EXT2_DESC_SIZE 367 | 368 | return struct 369 | 370 | 371 | class ext4_xattr_entry(ext4_struct): 372 | _fields_ = [ 373 | ("e_name_len", ctypes.c_ubyte), # 0x00 374 | ("e_name_index", ctypes.c_ubyte), # 0x01 375 | ("e_value_offs", ctypes.c_ushort), # 0x02 376 | ("e_value_inum", ctypes.c_uint), # 0x04 377 | ("e_value_size", ctypes.c_uint), # 0x08 378 | ("e_hash", ctypes.c_uint) # 0x0C 379 | # Variable length field "e_name" missing at 0x10 380 | ] 381 | 382 | def _from_buffer_copy(raw, offset=0, platform64=True): 383 | struct = ext4_xattr_entry.from_buffer_copy(raw, offset) 384 | struct.e_name = raw[offset + 0x10: offset + 0x10 + struct.e_name_len] 385 | return struct 386 | 387 | @property 388 | def _size(self): return 4 * ((ctypes.sizeof(type(self)) + self.e_name_len + 3) // 4) # 4-byte alignment 389 | 390 | 391 | class ext4_xattr_header(ext4_struct): 392 | _fields_ = [ 393 | ("h_magic", ctypes.c_uint), # 0x0, Must be 0xEA020000 394 | ("h_refcount", ctypes.c_uint), # 0x4 395 | ("h_blocks", ctypes.c_uint), # 0x8 396 | ("h_hash", ctypes.c_uint), # 0xC 397 | ("h_checksum", ctypes.c_uint), # 0x10 398 | ("h_reserved", ctypes.c_uint * 3), # 0x14 399 | ] 400 | 401 | 402 | class ext4_xattr_ibody_header(ext4_struct): 403 | _fields_ = [ 404 | ("h_magic", ctypes.c_uint) # 0x0, Must be 0xEA020000 405 | ] 406 | 407 | 408 | class InodeType: 409 | UNKNOWN = 0x0 # Unknown file type 410 | FILE = 0x1 # Regular file 411 | DIRECTORY = 0x2 # Directory 412 | CHARACTER_DEVICE = 0x3 # Character device 413 | BLOCK_DEVICE = 0x4 # Block device 414 | FIFO = 0x5 # FIFO 415 | SOCKET = 0x6 # Socket 416 | SYMBOLIC_LINK = 0x7 # Symbolic link 417 | CHECKSUM = 0xDE # Checksum entry; not really a file type, but a type of directory entry 418 | 419 | 420 | # ----------------------------- HIGH LEVEL ------------------------------ 421 | 422 | class MappingEntry: 423 | def __init__(self, file_block_idx, disk_block_idx, block_count=1): 424 | self.file_block_idx = file_block_idx 425 | self.disk_block_idx = disk_block_idx 426 | self.block_count = block_count 427 | 428 | def __iter__(self): 429 | yield self.file_block_idx 430 | yield self.disk_block_idx 431 | yield self.block_count 432 | 433 | def __repr__(self): 434 | return "{type:s}({file_block_idx!r:s}, {disk_block_idx!r:s}, {blocK_count!r:s})".format( 435 | blocK_count=self.block_count, 436 | disk_block_idx=self.disk_block_idx, 437 | file_block_idx=self.file_block_idx, 438 | type=type(self).__name__ 439 | ) 440 | 441 | def copy(self): 442 | return MappingEntry(self.file_block_idx, self.disk_block_idx, self.block_count) 443 | 444 | def create_mapping(*entries): 445 | file_block_idx = 0 446 | result = [None] * len(entries) 447 | 448 | for i, entry in enumerate(entries): 449 | disk_block_idx, block_count = entry 450 | result[i] = MappingEntry(file_block_idx, disk_block_idx, block_count) 451 | file_block_idx += block_count 452 | 453 | return result 454 | 455 | def optimize(entries): 456 | entries.sort(key=lambda entry: entry.file_block_idx) 457 | 458 | idx = 0 459 | while idx < len(entries): 460 | while idx + 1 < len(entries) \ 461 | and entries[idx].file_block_idx + entries[idx].block_count == entries[idx + 1].file_block_idx \ 462 | and entries[idx].disk_block_idx + entries[idx].block_count == entries[idx + 1].disk_block_idx: 463 | tmp = entries.pop(idx + 1) 464 | entries[idx].block_count += tmp.block_count 465 | 466 | idx += 1 467 | 468 | class Volume: 469 | ROOT_INODE = 2 470 | 471 | def __init__(self, stream, offset=0, ignore_flags=False, ignore_magic=False): 472 | self.ignore_flags = ignore_flags 473 | self.ignore_magic = ignore_magic 474 | self.offset = offset 475 | self.platform64 = True # Initial value needed for Volume.read_struct 476 | self.stream = stream 477 | 478 | # Superblock 479 | self.superblock = self.read_struct(ext4_superblock, 0x400) 480 | self.platform64 = (self.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) != 0 481 | 482 | if not ignore_magic and self.superblock.s_magic != 0xEF53: 483 | raise MagicError("Invalid magic value in superblock: 0x{magic:04X} (expected 0xEF53)".format( 484 | magic=self.superblock.s_magic)) 485 | 486 | # Group descriptors 487 | self.group_descriptors = [None] * (self.superblock.s_inodes_count // self.superblock.s_inodes_per_group) 488 | 489 | group_desc_table_offset = (0x400 // self.block_size + 1) * self.block_size # First block after superblock 490 | for group_desc_idx in range(len(self.group_descriptors)): 491 | group_desc_offset = group_desc_table_offset + group_desc_idx * self.superblock.s_desc_size 492 | self.group_descriptors[group_desc_idx] = self.read_struct(ext4_group_descriptor, group_desc_offset) 493 | 494 | def __repr__(self): 495 | return "{type_name:s}(volume_name = {volume_name!r:s}, uuid = {uuid!r:s}, last_mounted = {last_mounted!r:s})".format( 496 | last_mounted=self.superblock.s_last_mounted, 497 | type_name=type(self).__name__, 498 | uuid=self.uuid, 499 | volume_name=self.superblock.s_volume_name 500 | ) 501 | 502 | @property 503 | def block_size(self): 504 | return 1 << (10 + self.superblock.s_log_block_size) 505 | 506 | def get_inode(self, inode_idx, file_type=InodeType.UNKNOWN): 507 | group_idx, inode_table_entry_idx = self.get_inode_group(inode_idx) 508 | 509 | inode_table_offset = self.group_descriptors[group_idx].bg_inode_table * self.block_size 510 | inode_offset = inode_table_offset + inode_table_entry_idx * self.superblock.s_inode_size 511 | 512 | return Inode(self, inode_offset, inode_idx, file_type) 513 | 514 | def get_inode_group(self, inode_idx): 515 | group_idx = (inode_idx - 1) // self.superblock.s_inodes_per_group 516 | inode_table_entry_idx = (inode_idx - 1) % self.superblock.s_inodes_per_group 517 | return (group_idx, inode_table_entry_idx) 518 | 519 | def read(self, offset, byte_len): 520 | if self.offset + offset != self.stream.tell(): 521 | self.stream.seek(self.offset + offset, io.SEEK_SET) 522 | 523 | return self.stream.read(byte_len) 524 | 525 | def read_struct(self, structure, offset, platform64=None): 526 | raw = self.read(offset, ctypes.sizeof(structure)) 527 | 528 | if hasattr(structure, "_from_buffer_copy"): 529 | return structure._from_buffer_copy(raw, platform64=platform64 if platform64 != None else self.platform64) 530 | else: 531 | return structure.from_buffer_copy(raw) 532 | 533 | @property 534 | def root(self): 535 | return self.get_inode(Volume.ROOT_INODE, InodeType.DIRECTORY) 536 | 537 | @property 538 | def uuid(self): 539 | uuid = self.superblock.s_uuid 540 | uuid = [uuid[:4], uuid[4: 6], uuid[6: 8], uuid[8: 10], uuid[10:]] 541 | return "-".join("".join("{0:02X}".format(c) for c in part) for part in uuid) 542 | 543 | 544 | class Inode: 545 | def __init__(self, volume, offset, inode_idx, file_type=InodeType.UNKNOWN): 546 | self.inode_idx = inode_idx 547 | self.offset = offset 548 | self.volume = volume 549 | 550 | self.file_type = file_type 551 | self.inode = volume.read_struct(ext4_inode, offset) 552 | 553 | def __len__(self): 554 | return self.inode.i_size 555 | 556 | def __repr__(self): 557 | if self.inode_idx != None: 558 | return "{type_name:s}(inode_idx = {inode!r:s}, offset = 0x{offset:X}, volume_uuid = {uuid!r:s})".format( 559 | inode=self.inode_idx, 560 | offset=self.offset, 561 | type_name=type(self).__name__, 562 | uuid=self.volume.uuid 563 | ) 564 | else: 565 | return "{type_name:s}(offset = 0x{offset:X}, volume_uuid = {uuid!r:s})".format( 566 | offset=self.offset, 567 | type_name=type(self).__name__, 568 | uuid=self.volume.uuid 569 | ) 570 | 571 | def _parse_xattrs(self, raw_data, offset, prefix_override={}): 572 | prefixes = { 573 | 0: "", 574 | 1: "user.", 575 | 2: "system.posix_acl_access", 576 | 3: "system.posix_acl_default", 577 | 4: "trusted.", 578 | 6: "security.", 579 | 7: "system.", 580 | 8: "system.richacl" 581 | } 582 | prefixes.update(prefixes) 583 | 584 | # Iterator over ext4_xattr_entry structures 585 | i = 0 586 | while i < len(raw_data): 587 | xattr_entry = ext4_xattr_entry._from_buffer_copy(raw_data, i, platform64=self.volume.platform64) 588 | 589 | if ( 590 | xattr_entry.e_name_len | xattr_entry.e_name_index | xattr_entry.e_value_offs | xattr_entry.e_value_inum) == 0: 591 | # End of ext4_xattr_entry list 592 | break 593 | 594 | if not xattr_entry.e_name_index in prefixes: 595 | raise Ext4Error("Unknown attribute prefix {prefix:d} in inode {inode:d}".format( 596 | inode=self.inode_idx, 597 | prefix=xattr_entry.e_name_index 598 | )) 599 | 600 | xattr_name = prefixes[xattr_entry.e_name_index] + xattr_entry.e_name.decode("iso-8859-2") 601 | 602 | if xattr_entry.e_value_inum != 0: 603 | # external xattr 604 | xattr_inode = self.volume.get_inode(xattr.e_value_inum, InodeType.FILE) 605 | 606 | if not self.volume.ignore_flags and (xattr_inode.inode.i_flags & ext4_inode.EXT4_EA_INODE_FL) != 0: 607 | raise Ext4Error( 608 | "Inode {value_indoe:d} associated with the extended attribute {xattr_name!r:s} of inode {inode:d} is not marked as large extended attribute value.".format( 609 | inode=self.inode_idx, 610 | value_inode=xattr_inode.inode_idx, 611 | xattr_name=xattr_name 612 | )) 613 | 614 | # TODO Use xattr_entry.e_value_size or xattr_inode.inode.i_size? 615 | xattr_value = xattr_inode.open_read().read() 616 | else: 617 | # internal xattr 618 | xattr_value = raw_data[ 619 | xattr_entry.e_value_offs + offset: xattr_entry.e_value_offs + offset + xattr_entry.e_value_size] 620 | 621 | yield (xattr_name, xattr_value) 622 | 623 | i += xattr_entry._size 624 | 625 | def directory_entry_comparator(dir_a, dir_b): 626 | file_name_a, _, file_type_a = dir_a 627 | file_name_b, _, file_type_b = dir_b 628 | 629 | if file_type_a == InodeType.DIRECTORY == file_type_b or file_type_a != InodeType.DIRECTORY != file_type_b: 630 | tmp = wcscmp(file_name_a.lower(), file_name_b.lower()) 631 | return tmp if tmp != 0 else wcscmp(file_name_a, file_name_b) 632 | else: 633 | return -1 if file_type_a == InodeType.DIRECTORY else 1 634 | 635 | directory_entry_key = functools.cmp_to_key(directory_entry_comparator) 636 | 637 | def get_inode(self, *relative_path, decode_name=None): 638 | if not self.is_dir: 639 | raise Ext4Error("Inode {inode:d} is not a directory.".format(inode=self.inode_idx)) 640 | 641 | current_inode = self 642 | 643 | for i, part in enumerate(relative_path): 644 | if not self.volume.ignore_flags and not current_inode.is_dir: 645 | current_path = "/".join(relative_path[:i]) 646 | raise Ext4Error("{current_path!r:s} (Inode {inode:d}) is not a directory.".format( 647 | current_path=current_path, 648 | inode=inode_idx 649 | )) 650 | 651 | file_name, inode_idx, file_type = next( 652 | filter(lambda entry: entry[0] == part, current_inode.open_dir(decode_name)), (None, None, None)) 653 | 654 | if inode_idx == None: 655 | current_path = "/".join(relative_path[:i]) 656 | raise FileNotFoundError("{part!r:s} not found in {current_path!r:s} (Inode {inode:d}).".format( 657 | current_path=current_path, 658 | inode=current_inode.inode_idx, 659 | part=part 660 | )) 661 | 662 | current_inode = current_inode.volume.get_inode(inode_idx, file_type) 663 | 664 | return current_inode 665 | 666 | @property 667 | def is_dir(self): 668 | if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0: 669 | return (self.inode.i_mode & ext4_inode.S_IFDIR) != 0 670 | else: 671 | return self.file_type == InodeType.DIRECTORY 672 | 673 | @property 674 | def is_file(self): 675 | if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0: 676 | return (self.inode.i_mode & ext4_inode.S_IFREG) != 0 677 | else: 678 | return self.file_type == InodeType.FILE 679 | 680 | @property 681 | def is_symlink(self): 682 | if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0: 683 | return (self.inode.i_mode & ext4_inode.S_IFLNK) != 0 684 | else: 685 | return self.file_type == InodeType.SYMBOLIC_LINK 686 | 687 | @property 688 | def is_in_use(self): 689 | group_idx, bitmap_bit = self.volume.get_inode_group(self.inode_idx) 690 | 691 | inode_usage_bitmap_offset = self.volume.group_descriptors[group_idx].bg_inode_bitmap * self.volume.block_size 692 | inode_usage_byte = self.volume.read(inode_usage_bitmap_offset + bitmap_bit // 8, 1)[0] 693 | 694 | return ((inode_usage_byte >> (7 - bitmap_bit % 8)) & 1) != 0 695 | 696 | @property 697 | def mode_str(self): 698 | special_flag = lambda letter, execute, special: { 699 | (False, False): "-", 700 | (False, True): letter.upper(), 701 | (True, False): "x", 702 | (True, True): letter.lower() 703 | }[(execute, special)] 704 | 705 | try: 706 | if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0: 707 | device_type = { 708 | ext4_inode.S_IFIFO: "p", 709 | ext4_inode.S_IFCHR: "c", 710 | ext4_inode.S_IFDIR: "d", 711 | ext4_inode.S_IFBLK: "b", 712 | ext4_inode.S_IFREG: "-", 713 | ext4_inode.S_IFLNK: "l", 714 | ext4_inode.S_IFSOCK: "s", 715 | }[self.inode.i_mode & 0xF000] 716 | else: 717 | device_type = { 718 | InodeType.FILE: "-", 719 | InodeType.DIRECTORY: "d", 720 | InodeType.CHARACTER_DEVICE: "c", 721 | InodeType.BLOCK_DEVICE: "b", 722 | InodeType.FIFO: "p", 723 | InodeType.SOCKET: "s", 724 | InodeType.SYMBOLIC_LINK: "l" 725 | }[self.file_type] 726 | except KeyError: 727 | device_type = "?" 728 | 729 | return "".join([ 730 | device_type, 731 | 732 | "r" if (self.inode.i_mode & ext4_inode.S_IRUSR) != 0 else "-", 733 | "w" if (self.inode.i_mode & ext4_inode.S_IWUSR) != 0 else "-", 734 | special_flag("s", (self.inode.i_mode & ext4_inode.S_IXUSR) != 0, 735 | (self.inode.i_mode & ext4_inode.S_ISUID) != 0), 736 | 737 | "r" if (self.inode.i_mode & ext4_inode.S_IRGRP) != 0 else "-", 738 | "w" if (self.inode.i_mode & ext4_inode.S_IWGRP) != 0 else "-", 739 | special_flag("s", (self.inode.i_mode & ext4_inode.S_IXGRP) != 0, 740 | (self.inode.i_mode & ext4_inode.S_ISGID) != 0), 741 | 742 | "r" if (self.inode.i_mode & ext4_inode.S_IROTH) != 0 else "-", 743 | "w" if (self.inode.i_mode & ext4_inode.S_IWOTH) != 0 else "-", 744 | special_flag("t", (self.inode.i_mode & ext4_inode.S_IXOTH) != 0, 745 | (self.inode.i_mode & ext4_inode.S_ISVTX) != 0), 746 | ]) 747 | 748 | def open_dir(self, decode_name=None): 749 | # Parse args 750 | if decode_name == None: 751 | decode_name = lambda raw: raw.decode("utf8") 752 | 753 | if not self.volume.ignore_flags and not self.is_dir: 754 | raise Ext4Error("Inode ({inode:d}) is not a directory.".format(inode=self.inode_idx)) 755 | 756 | # # Hash trees are compatible with linear arrays 757 | if (self.inode.i_flags & ext4_inode.EXT4_INDEX_FL) != 0: 758 | raise NotImplementedError("Hash trees are not implemented yet.") 759 | 760 | # Read raw directory content 761 | raw_data = self.open_read().read() 762 | offset = 0 763 | 764 | while offset < len(raw_data): 765 | dirent = ext4_dir_entry_2._from_buffer_copy(raw_data, offset, platform64=self.volume.platform64) 766 | 767 | if dirent.file_type != InodeType.CHECKSUM: 768 | yield (decode_name(dirent.name), dirent.inode, dirent.file_type) 769 | 770 | offset += dirent.rec_len 771 | 772 | def open_read(self): 773 | if (self.inode.i_flags & ext4_inode.EXT4_EXTENTS_FL) != 0: 774 | # Obtain mapping from extents 775 | mapping = [] # List of MappingEntry instances 776 | 777 | nodes = queue.Queue() 778 | nodes.put_nowait(self.offset + ext4_inode.i_block.offset) 779 | 780 | while nodes.qsize() != 0: 781 | header_offset = nodes.get_nowait() 782 | header = self.volume.read_struct(ext4_extent_header, header_offset) 783 | 784 | if not self.volume.ignore_magic and header.eh_magic != 0xF30A: 785 | raise MagicError( 786 | "Invalid magic value in extent header at offset 0x{header_offset:X} of inode {inode:d}: 0x{header_magic:04X} (expected 0xF30A)".format( 787 | header_magic=header.eh_magic, 788 | header_offset=self.inode_idx, 789 | inode=self.inode_idx 790 | )) 791 | 792 | if header.eh_depth != 0: 793 | indices = self.volume.read_struct(ext4_extent_idx * header.eh_entries, 794 | header_offset + ctypes.sizeof(ext4_extent_header)) 795 | for idx in indices: nodes.put_nowait(idx.ei_leaf * self.volume.block_size) 796 | else: 797 | extents = self.volume.read_struct(ext4_extent * header.eh_entries, 798 | header_offset + ctypes.sizeof(ext4_extent_header)) 799 | for extent in extents: 800 | mapping.append(MappingEntry(extent.ee_block, extent.ee_start, extent.ee_len)) 801 | 802 | MappingEntry.optimize(mapping) 803 | return BlockReader(self.volume, len(self), mapping) 804 | else: 805 | # Inode uses inline data 806 | i_block = self.volume.read(self.offset + ext4_inode.i_block.offset, ext4_inode.i_block.size) 807 | return io.BytesIO(i_block[:self.inode.i_size]) 808 | 809 | @property 810 | def size_readable(self): 811 | if self.inode.i_size < 1024: 812 | return "{0:d} bytes".format(self.inode.i_size) if self.inode.i_size != 1 else "1 byte" 813 | else: 814 | units = ["KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"] 815 | unit_idx = min(int(math.log(self.inode.i_size, 1024)), len(units)) 816 | 817 | return "{size:.2f} {unit:s}".format( 818 | size=self.inode.i_size / (1024 ** unit_idx), 819 | unit=units[unit_idx - 1] 820 | ) 821 | 822 | def xattrs(self, check_inline=True, check_block=True, force_inline=False, prefix_override={}): 823 | # Inline xattrs 824 | inline_data_offset = self.offset + ext4_inode.EXT2_GOOD_OLD_INODE_SIZE + self.inode.i_extra_isize 825 | inline_data_length = self.offset + self.volume.superblock.s_inode_size - inline_data_offset 826 | 827 | if check_inline and inline_data_length > ctypes.sizeof(ext4_xattr_ibody_header): 828 | inline_data = self.volume.read(inline_data_offset, inline_data_length) 829 | xattrs_header = ext4_xattr_ibody_header.from_buffer_copy(inline_data) 830 | 831 | # TODO Find way to detect inline xattrs without checking the h_magic field to enable error detection with the h_magic field. 832 | if force_inline or xattrs_header.h_magic == 0xEA020000: 833 | offset = 4 * ((ctypes.sizeof( 834 | ext4_xattr_ibody_header) + 3) // 4) # The ext4_xattr_entry following the header is aligned on a 4-byte boundary 835 | for xattr_name, xattr_value in self._parse_xattrs(inline_data[offset:], 0, 836 | prefix_override=prefix_override): 837 | yield (xattr_name, xattr_value) 838 | 839 | # xattr block(s) 840 | if check_block and self.inode.i_file_acl != 0: 841 | xattrs_block_start = self.inode.i_file_acl * self.volume.block_size 842 | xattrs_block = self.volume.read(xattrs_block_start, self.volume.block_size) 843 | 844 | xattrs_header = ext4_xattr_header.from_buffer_copy(xattrs_block) 845 | if not self.volume.ignore_magic and xattrs_header.h_magic != 0xEA020000: 846 | raise MagicError( 847 | "Invalid magic value in xattrs block header at offset 0x{xattrs_block_start:X} of inode {inode:d}: 0x{xattrs_header} (expected 0xEA020000)".format( 848 | inode=self.inode_idx, 849 | xattrs_block_start=xattrs_block_start, 850 | xattrs_header=xattrs_header.h_magic 851 | )) 852 | 853 | if xattrs_header.h_blocks != 1: 854 | raise Ext4Error( 855 | "Invalid number of xattr blocks at offset 0x{xattrs_block_start:X} of inode {inode:d}: {xattrs_header:d} (expected 1)".format( 856 | inode=self.inode_idx, 857 | xattrs_header=xattrs_header.h_blocks, 858 | xattrs_block_start=xattrs_block_start 859 | )) 860 | 861 | offset = 4 * ((ctypes.sizeof( 862 | ext4_xattr_header) + 3) // 4) # The ext4_xattr_entry following the header is aligned on a 4-byte boundary 863 | for xattr_name, xattr_value in self._parse_xattrs(xattrs_block[offset:], -offset, 864 | prefix_override=prefix_override): 865 | yield (xattr_name, xattr_value) 866 | 867 | 868 | class BlockReader: 869 | # OSError 870 | EINVAL = 22 871 | 872 | def __init__(self, volume, byte_size, block_map): 873 | self.byte_size = byte_size 874 | self.volume = volume 875 | 876 | self.cursor = 0 877 | 878 | block_map = list(map(MappingEntry.copy, block_map)) 879 | 880 | # Optimize mapping (stich together) 881 | MappingEntry.optimize(block_map) 882 | self.block_map = block_map 883 | 884 | def __repr__(self): 885 | return "{type_name:s}(byte_size = {size!r:s}, block_map = {block_map!r:s}, volume_uuid = {uuid!r:s})".format( 886 | block_map=self.block_map, 887 | size=self.byte_size, 888 | type_name=type(self).__name__, 889 | uuid=self.volume.uuid 890 | ) 891 | 892 | def get_block_mapping(self, file_block_idx): 893 | disk_block_idx = None 894 | 895 | # Find disk block 896 | for entry in self.block_map: 897 | if entry.file_block_idx <= file_block_idx < entry.file_block_idx + entry.block_count: 898 | block_diff = file_block_idx - entry.file_block_idx 899 | disk_block_idx = entry.disk_block_idx + block_diff 900 | break 901 | 902 | return disk_block_idx 903 | 904 | def read(self, byte_len=-1): 905 | # Parse args 906 | if byte_len < -1: raise ValueError("byte_len must be non-negative or -1") 907 | 908 | bytes_remaining = self.byte_size - self.cursor 909 | byte_len = bytes_remaining if byte_len == -1 else max(0, min(byte_len, bytes_remaining)) 910 | 911 | if byte_len == 0: return b"" 912 | 913 | # Reading blocks 914 | start_block_idx = self.cursor // self.volume.block_size 915 | end_block_idx = (self.cursor + byte_len - 1) // self.volume.block_size 916 | end_of_stream_check = byte_len 917 | 918 | blocks = [self.read_block(i) for i in range(start_block_idx, end_block_idx - start_block_idx + 1)] 919 | 920 | start_offset = self.cursor % self.volume.block_size 921 | if start_offset != 0: blocks[0] = blocks[0][start_offset:] 922 | byte_len = (byte_len + start_offset - self.volume.block_size - 1) % self.volume.block_size + 1 923 | blocks[-1] = blocks[-1][:byte_len] 924 | 925 | result = b"".join(blocks) 926 | 927 | # Check read 928 | if len(result) != end_of_stream_check: 929 | raise EndOfStreamError( 930 | "The volume's underlying stream ended {0:d} bytes before EOF.".format(byte_len - len(result))) 931 | 932 | self.cursor += len(result) 933 | return result 934 | 935 | def read_block(self, file_block_idx): 936 | disk_block_idx = self.get_block_mapping(file_block_idx) 937 | 938 | if disk_block_idx != None: 939 | return self.volume.read(disk_block_idx * self.volume.block_size, self.volume.block_size) 940 | else: 941 | return bytes([0] * self.volume.block_size) 942 | 943 | def seek(self, seek, seek_mode=io.SEEK_SET): 944 | if seek_mode == io.SEEK_CUR: 945 | seek += self.cursor 946 | elif seek_mode == io.SEEK_END: 947 | seek += self.byte_size 948 | # elif seek_mode == io.SEEK_SET: 949 | # seek += 0 950 | 951 | if seek < 0: 952 | raise OSError(BlockReader.EINVAL, "Invalid argument") # Exception behavior copied from IOBase.seek 953 | 954 | self.cursor = seek 955 | return seek 956 | 957 | def tell(self): 958 | return self.cursor 959 | -------------------------------------------------------------------------------- /bin/linux/img2sdat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | #==================================================== 4 | # FILE: img2sdat.py 5 | # AUTHORS: xpirt - luxi78 - howellzhu 6 | # DATE: 2018-05-25 12:19:12 CEST 7 | #==================================================== 8 | 9 | from __future__ import print_function 10 | 11 | import sys, os, errno, tempfile 12 | import common, blockimgdiff, sparse_img 13 | 14 | def main(INPUT_IMAGE, OUTDIR='.', VERSION=None, PREFIX='system'): 15 | global input 16 | 17 | __version__ = '1.7' 18 | 19 | if sys.hexversion < 0x02070000: 20 | print >> sys.stderr, "Python 2.7 or newer is required." 21 | try: 22 | input = raw_input 23 | except NameError: pass 24 | input('Press ENTER to exit...') 25 | sys.exit(1) 26 | else: 27 | print('img2sdat binary - version: %s\n' % __version__) 28 | 29 | if not os.path.isdir(OUTDIR): 30 | os.makedirs(OUTDIR) 31 | 32 | OUTDIR = OUTDIR + '/'+ PREFIX 33 | 34 | if not VERSION: 35 | VERSION = 4 36 | while True: 37 | print(''' 1. Android Lollipop 5.0 38 | 2. Android Lollipop 5.1 39 | 3. Android Marshmallow 6.0 40 | 4. Android Nougat 7.0/7.1/8.0/8.1 41 | ''') 42 | try: 43 | input = raw_input 44 | except NameError: pass 45 | item = input('Choose system version: ') 46 | if item == '1': 47 | VERSION = 1 48 | break 49 | elif item == '2': 50 | VERSION = 2 51 | break 52 | elif item == '3': 53 | VERSION = 3 54 | break 55 | elif item == '4': 56 | VERSION = 4 57 | break 58 | else: 59 | return 60 | 61 | # Get sparse image 62 | image = sparse_img.SparseImage(INPUT_IMAGE, tempfile.mkstemp()[1], '0') 63 | 64 | # Generate output files 65 | b = blockimgdiff.BlockImageDiff(image, None, VERSION) 66 | b.Compute(OUTDIR) 67 | 68 | print('Done! Output files: %s' % os.path.dirname(OUTDIR)) 69 | return 70 | 71 | if __name__ == '__main__': 72 | import argparse 73 | 74 | parser = argparse.ArgumentParser(description='Visit xda thread for more information.') 75 | parser.add_argument('image', help='input system image') 76 | parser.add_argument('-o', '--outdir', help='output directory (current directory by default)') 77 | parser.add_argument('-v', '--version', help='transfer list version number, will be asked by default - more info on xda thread)') 78 | parser.add_argument('-p', '--prefix', help='name of image (prefix.new.dat)') 79 | 80 | args = parser.parse_args() 81 | 82 | INPUT_IMAGE = args.image 83 | 84 | if args.outdir: 85 | OUTDIR = args.outdir 86 | else: 87 | OUTDIR = '.' 88 | 89 | if args.version: 90 | VERSION = int(args.version) 91 | else: 92 | VERSION = None 93 | 94 | if args.prefix: 95 | PREFIX = args.prefix 96 | else: 97 | PREFIX = 'system' 98 | 99 | main(INPUT_IMAGE, OUTDIR, VERSION, PREFIX) 100 | -------------------------------------------------------------------------------- /bin/linux/imgextractor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import struct 4 | import traceback 5 | import shutil 6 | 7 | EXT4_HEADER_MAGIC = 0xED26FF3A 8 | EXT4_SPARSE_HEADER_LEN = 28 9 | EXT4_CHUNK_HEADER_SIZE = 12 10 | 11 | 12 | class ext4_file_header(object): 13 | def __init__(self, buf): 14 | (self.magic, 15 | self.major, 16 | self.minor, 17 | self.file_header_size, 18 | self.chunk_header_size, 19 | self.block_size, 20 | self.total_blocks, 21 | self.total_chunks, 22 | self.crc32) = struct.unpack(' 10: 72 | return 73 | if len(arg) > 8: 74 | arg = arg[1:] 75 | oor, ow, ox, gr, gw, gx, wr, ww, wx = list(arg) 76 | o, g, w, s = 0, 0, 0, 0 77 | if oor == 'r': o += 4 78 | if ow == 'w': o += 2 79 | if ox == 'x': o += 1 80 | if ox == 'S': s += 4 81 | if ox == 's': s += 4; o += 1 82 | if gr == 'r': g += 4 83 | if gw == 'w': g += 2 84 | if gx == 'x': g += 1 85 | if gx == 'S': s += 2 86 | if gx == 's': s += 2; g += 1 87 | if wr == 'r': w += 4 88 | if ww == 'w': w += 2 89 | if wx == 'x': w += 1 90 | if wx == 'T': s += 1 91 | if wx == 't': s += 1; w += 1 92 | return str(s) + str(o) + str(g) + str(w) 93 | 94 | def __ext4extractor(self): 95 | import ext4, string, struct 96 | #fs_config_file = self.FileName + '_statfile.txt' 97 | contexts = self.BASE_DIR + self.FileName + "_file_contexts" #08.05.18 98 | fs_config_file = self.FileName + '_fs_config' 99 | def scan_dir(root_inode, root_path=""): 100 | for entry_name, entry_inode_idx, entry_type in root_inode.open_dir(): 101 | if entry_name in ['.', '..', 'lost+found'] or entry_name.endswith(' (2)'): 102 | continue 103 | entry_inode = root_inode.volume.get_inode(entry_inode_idx, entry_type) 104 | entry_inode_path = root_path + '/' + entry_name 105 | mode = self.__getperm(entry_inode.mode_str) 106 | uid = entry_inode.inode.i_uid 107 | gid = entry_inode.inode.i_gid 108 | con = '' 109 | cap = '' 110 | for i in list(entry_inode.xattrs()): 111 | if i[0] == 'security.selinux': 112 | con = i[1].decode('utf8')[:-1] 113 | elif i[0] == 'security.capability': 114 | cap = ' capabilities=' + str(hex(struct.unpack("sx', index.encode('utf-8')) 208 | out.write(tmp + struct.pack('xx')) 209 | os.system('attrib +s "%s"' % target.replace('/', os.sep)) 210 | if not all(c in string.printable for c in link_target): 211 | pass 212 | if entry_inode_path[1:] == entry_name or link_target[1:] == entry_name: 213 | self.symlinks.append('%s %s' % (link_target, entry_inode_path[1:])) 214 | else: 215 | self.symlinks.append('%s %s' % (link_target, self.DIR + entry_inode_path)) 216 | except: 217 | try: 218 | link_target_block = int.from_bytes(entry_inode.open_read().read(), "little") 219 | link_target = root_inode.volume.read(link_target_block * root_inode.volume.block_size, entry_inode.inode.i_size).decode("utf8") 220 | target = self.EXTRACT_DIR + entry_inode_path 221 | if link_target and all(c in string.printable for c in link_target): 222 | if cap == '' and con == '': 223 | self.fsconfig.append('%s %s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode, link_target)) 224 | else: 225 | if cap == '': 226 | self.fsconfig.append('%s %s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode, link_target)) 227 | if con != 'u:object_r:' + self.FileName + '_file:s0':#11.05.18 228 | self.context.append('/%s %s' % (self.DIR + entry_inode_path, con)) 229 | else: 230 | if con == '': 231 | self.fsconfig.append('%s %s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode + cap, link_target)) 232 | else: 233 | self.fsconfig.append('%s %s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode + cap, link_target)) 234 | if con != 'u:object_r:' + self.FileName + '_file:s0':#11.05.18 235 | self.context.append('/%s %s' % (self.DIR + entry_inode_path, con)) 236 | if os.name == 'posix': 237 | os.symlink(link_target, target) 238 | if os.name == 'nt': 239 | with open(target.replace('/', os.sep), 'wb') as out: 240 | tmp = bytes.fromhex('213C73796D6C696E6B3EFFFE') 241 | for index in list(link_target): 242 | tmp = tmp + struct.pack('>sx', index.encode('utf-8')) 243 | out.write(tmp + struct.pack('xx')) 244 | os.system('attrib +s %s' % target.replace('/', os.sep)) 245 | else: 246 | pass 247 | except: 248 | pass 249 | 250 | f = open(self.EXTRACT_DIR + '_size.txt', 'tw', encoding='utf-8') 251 | self.__appendf(os.path.getsize(self.OUTPUT_IMAGE_FILE), self.EXTRACT_DIR + '_size.txt') 252 | f.close() 253 | with open(self.OUTPUT_IMAGE_FILE, 'rb') as file: 254 | root = ext4.Volume(file).root 255 | dirlist = [] 256 | for file_name, inode_idx, file_type in root.open_dir(): 257 | dirlist.append(file_name) 258 | dirr = self.__file_name(os.path.basename(self.OUTPUT_IMAGE_FILE).split('.')[0]) #11.05.18 259 | setattr(self, 'DIR', dirr) 260 | if dirr == 'system': 261 | self.fsconfig = [dirr +'/lost+found 0 0 0700']#11.05.18 262 | elif dirr == 'vendor': 263 | self.fsconfig = [dirr +'/lost+found 0 0 0700']#11.05.18 264 | self.context = ['/' + dirr + '(/.*)? u:object_r:' + dirr + '_file:s0']#11.05.18 265 | scan_dir(root) 266 | self.fsconfig.sort() 267 | self.__appendf('\n'.join(self.fsconfig), self.BASE_DIR + os.sep + fs_config_file) 268 | if self.context:#11.05.18 269 | self.context.sort()#11.05.18 270 | self.__appendf('\n'.join(self.context), contexts)#11.05.18 271 | 272 | def __converSimgToImg(self, target): 273 | with open(target, "rb") as img_file: 274 | if self.sign_offset > 0: 275 | img_file.seek(self.sign_offset, 0) 276 | header = ext4_file_header(img_file.read(28)) 277 | total_chunks = header.total_chunks 278 | if header.file_header_size > EXT4_SPARSE_HEADER_LEN: 279 | img_file.seek(header.file_header_size - EXT4_SPARSE_HEADER_LEN, 1) 280 | with open(target.replace(".img", ".raw.img"), "wb") as raw_img_file: 281 | sector_base = 82528 282 | output_len = 0 283 | while total_chunks > 0: 284 | chunk_header = ext4_chunk_header(img_file.read(EXT4_CHUNK_HEADER_SIZE)) 285 | sector_size = (chunk_header.chunk_size * header.block_size) >> 9 286 | chunk_data_size = chunk_header.total_size - header.chunk_header_size 287 | if chunk_header.type == 0xCAC1: # CHUNK_TYPE_RAW 288 | if header.chunk_header_size > EXT4_CHUNK_HEADER_SIZE: 289 | img_file.seek(header.chunk_header_size - EXT4_CHUNK_HEADER_SIZE, 1) 290 | data = img_file.read(chunk_data_size) 291 | len_data = len(data) 292 | if len_data == (sector_size << 9): 293 | raw_img_file.write(data) 294 | output_len += len_data 295 | sector_base += sector_size 296 | else: 297 | if chunk_header.type == 0xCAC2: # CHUNK_TYPE_FILL 298 | if header.chunk_header_size > EXT4_CHUNK_HEADER_SIZE: 299 | img_file.seek(header.chunk_header_size - EXT4_CHUNK_HEADER_SIZE, 1) 300 | data = img_file.read(chunk_data_size) 301 | len_data = sector_size << 9 302 | raw_img_file.write(struct.pack("B", 0) * len_data) 303 | output_len += len(data) 304 | sector_base += sector_size 305 | else: 306 | if chunk_header.type == 0xCAC3: # CHUNK_TYPE_DONT_CARE 307 | if header.chunk_header_size > EXT4_CHUNK_HEADER_SIZE: 308 | img_file.seek(header.chunk_header_size - EXT4_CHUNK_HEADER_SIZE, 1) 309 | data = img_file.read(chunk_data_size) 310 | len_data = sector_size << 9 311 | raw_img_file.write(struct.pack("B", 0) * len_data) 312 | output_len += len(data) 313 | sector_base += sector_size 314 | else: 315 | len_data = sector_size << 9 316 | raw_img_file.write(struct.pack("B", 0) * len_data) 317 | sector_base += sector_size 318 | total_chunks -= 1 319 | # if os.path.exists(target): 320 | # self.__remove(target) 321 | # os.rename(target.replace(".img", ".raw.img"), target) 322 | 323 | self.OUTPUT_IMAGE_FILE = target.replace(".img", ".raw.img") 324 | 325 | def checkSignOffset(self, file): 326 | import mmap 327 | mm = mmap.mmap(file.fileno(), 52428800, access=mmap.ACCESS_READ) # 52428800=50Mb 328 | offset = mm.find(struct.pack(' 0: 337 | img_file.seek(self.sign_offset, 0) 338 | header = ext4_file_header(img_file.read(28)) 339 | if header.magic != EXT4_HEADER_MAGIC: 340 | return 'img' 341 | else: 342 | return 'simg' 343 | 344 | def main(self, target, output_dir): 345 | self.BASE_DIR = (os.path.realpath(os.path.dirname(target)) + os.sep) 346 | self.EXTRACT_DIR = os.path.realpath(os.path.dirname(output_dir)) + os.sep + self.__file_name(os.path.basename(output_dir)) #output_dir 347 | self.OUTPUT_IMAGE_FILE = self.BASE_DIR + os.path.basename(target) 348 | self.FileName = self.__file_name(os.path.basename(target)) 349 | target_type = self.__getTypeTarget(target) 350 | if target_type == 'simg': 351 | print("Convert %s to %s" % (os.path.basename(target), os.path.basename(target).replace(".img", ".raw.img"))) 352 | self.__converSimgToImg(target) 353 | print("Extraction from %s to %s" % (os.path.basename(target), os.path.basename(self.EXTRACT_DIR))) 354 | self.__ext4extractor() 355 | if target_type == 'img': 356 | print("Extraction from %s to %s" % (os.path.basename(target), os.path.basename(self.EXTRACT_DIR))) 357 | self.__ext4extractor() 358 | 359 | 360 | if __name__ == '__main__': 361 | if sys.argv.__len__() == 3: 362 | Extractor().main(sys.argv[1], sys.argv[2]) 363 | else: 364 | if sys.argv.__len__() == 2: 365 | Extractor().main(sys.argv[1], os.path.realpath(os.path.dirname(sys.argv[1])) + os.sep + os.path.basename(sys.argv[1]).split('.')[0]) 366 | else: 367 | print("Must be at least 1 argument...") 368 | sys.exit(1) 369 | -------------------------------------------------------------------------------- /bin/linux/magiskboot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/magiskboot -------------------------------------------------------------------------------- /bin/linux/make_ext4fs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/make_ext4fs -------------------------------------------------------------------------------- /bin/linux/rangelib.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2014 The Android Open Source Project 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import print_function 16 | import heapq 17 | import itertools 18 | 19 | __all__ = ["RangeSet"] 20 | 21 | class RangeSet(object): 22 | """A RangeSet represents a set of nonoverlapping ranges on the 23 | integers (ie, a set of integers, but efficient when the set contains 24 | lots of runs.""" 25 | 26 | def __init__(self, data=None): 27 | self.monotonic = False 28 | if isinstance(data, str): 29 | self._parse_internal(data) 30 | elif data: 31 | assert len(data) % 2 == 0 32 | self.data = tuple(self._remove_pairs(data)) 33 | self.monotonic = all(x < y for x, y in zip(self.data, self.data[1:])) 34 | else: 35 | self.data = () 36 | 37 | def __iter__(self): 38 | for i in range(0, len(self.data), 2): 39 | yield self.data[i:i+2] 40 | 41 | def __eq__(self, other): 42 | return self.data == other.data 43 | 44 | def __ne__(self, other): 45 | return self.data != other.data 46 | 47 | def __nonzero__(self): 48 | return bool(self.data) 49 | 50 | def __str__(self): 51 | if not self.data: 52 | return "empty" 53 | else: 54 | return self.to_string() 55 | 56 | def __repr__(self): 57 | return '' 58 | 59 | @classmethod 60 | def parse(cls, text): 61 | """Parse a text string consisting of a space-separated list of 62 | blocks and ranges, eg "10-20 30 35-40". Ranges are interpreted to 63 | include both their ends (so the above example represents 18 64 | individual blocks. Returns a RangeSet object. 65 | 66 | If the input has all its blocks in increasing order, then returned 67 | RangeSet will have an extra attribute 'monotonic' that is set to 68 | True. For example the input "10-20 30" is monotonic, but the input 69 | "15-20 30 10-14" is not, even though they represent the same set 70 | of blocks (and the two RangeSets will compare equal with ==). 71 | """ 72 | return cls(text) 73 | 74 | def _parse_internal(self, text): 75 | data = [] 76 | last = -1 77 | monotonic = True 78 | for p in text.split(): 79 | if "-" in p: 80 | s, e = (int(x) for x in p.split("-")) 81 | data.append(s) 82 | data.append(e+1) 83 | if last <= s <= e: 84 | last = e 85 | else: 86 | monotonic = False 87 | else: 88 | s = int(p) 89 | data.append(s) 90 | data.append(s+1) 91 | if last <= s: 92 | last = s+1 93 | else: 94 | monotonic = False 95 | data.sort() 96 | self.data = tuple(self._remove_pairs(data)) 97 | self.monotonic = monotonic 98 | 99 | @staticmethod 100 | def _remove_pairs(source): 101 | """Remove consecutive duplicate items to simplify the result. 102 | 103 | [1, 2, 2, 5, 5, 10] will become [1, 10].""" 104 | last = None 105 | for i in source: 106 | if i == last: 107 | last = None 108 | else: 109 | if last is not None: 110 | yield last 111 | last = i 112 | if last is not None: 113 | yield last 114 | 115 | def to_string(self): 116 | out = [] 117 | for i in range(0, len(self.data), 2): 118 | s, e = self.data[i:i+2] 119 | if e == s+1: 120 | out.append(str(s)) 121 | else: 122 | out.append(str(s) + "-" + str(e-1)) 123 | return " ".join(out) 124 | 125 | def to_string_raw(self): 126 | assert self.data 127 | return str(len(self.data)) + "," + ",".join(str(i) for i in self.data) 128 | 129 | def union(self, other): 130 | """Return a new RangeSet representing the union of this RangeSet 131 | with the argument. 132 | 133 | >>> RangeSet("10-19 30-34").union(RangeSet("18-29")) 134 | 135 | >>> RangeSet("10-19 30-34").union(RangeSet("22 32")) 136 | 137 | """ 138 | out = [] 139 | z = 0 140 | for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))), 141 | zip(other.data, itertools.cycle((+1, -1)))): 142 | if (z == 0 and d == 1) or (z == 1 and d == -1): 143 | out.append(p) 144 | z += d 145 | return RangeSet(data=out) 146 | 147 | def intersect(self, other): 148 | """Return a new RangeSet representing the intersection of this 149 | RangeSet with the argument. 150 | 151 | >>> RangeSet("10-19 30-34").intersect(RangeSet("18-32")) 152 | 153 | >>> RangeSet("10-19 30-34").intersect(RangeSet("22-28")) 154 | 155 | """ 156 | out = [] 157 | z = 0 158 | for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))), 159 | zip(other.data, itertools.cycle((+1, -1)))): 160 | if (z == 1 and d == 1) or (z == 2 and d == -1): 161 | out.append(p) 162 | z += d 163 | return RangeSet(data=out) 164 | 165 | def subtract(self, other): 166 | """Return a new RangeSet representing subtracting the argument 167 | from this RangeSet. 168 | 169 | >>> RangeSet("10-19 30-34").subtract(RangeSet("18-32")) 170 | 171 | >>> RangeSet("10-19 30-34").subtract(RangeSet("22-28")) 172 | 173 | """ 174 | 175 | out = [] 176 | z = 0 177 | for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))), 178 | zip(other.data, itertools.cycle((-1, +1)))): 179 | if (z == 0 and d == 1) or (z == 1 and d == -1): 180 | out.append(p) 181 | z += d 182 | return RangeSet(data=out) 183 | 184 | def overlaps(self, other): 185 | """Returns true if the argument has a nonempty overlap with this 186 | RangeSet. 187 | 188 | >>> RangeSet("10-19 30-34").overlaps(RangeSet("18-32")) 189 | True 190 | >>> RangeSet("10-19 30-34").overlaps(RangeSet("22-28")) 191 | False 192 | """ 193 | 194 | # This is like intersect, but we can stop as soon as we discover the 195 | # output is going to be nonempty. 196 | z = 0 197 | for _, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))), 198 | zip(other.data, itertools.cycle((+1, -1)))): 199 | if (z == 1 and d == 1) or (z == 2 and d == -1): 200 | return True 201 | z += d 202 | return False 203 | 204 | def size(self): 205 | """Returns the total size of the RangeSet (ie, how many integers 206 | are in the set). 207 | 208 | >>> RangeSet("10-19 30-34").size() 209 | 15 210 | """ 211 | 212 | total = 0 213 | for i, p in enumerate(self.data): 214 | if i % 2: 215 | total += p 216 | else: 217 | total -= p 218 | return total 219 | 220 | def map_within(self, other): 221 | """'other' should be a subset of 'self'. Returns a RangeSet 222 | representing what 'other' would get translated to if the integers 223 | of 'self' were translated down to be contiguous starting at zero. 224 | 225 | >>> RangeSet("0-9").map_within(RangeSet("3-4")) 226 | 227 | >>> RangeSet("10-19").map_within(RangeSet("13-14")) 228 | 229 | >>> RangeSet("10-19 30-39").map_within(RangeSet("17-19 30-32")) 230 | 231 | >>> RangeSet("10-19 30-39").map_within(RangeSet("12-13 17-19 30-32")) 232 | 233 | """ 234 | 235 | out = [] 236 | offset = 0 237 | start = None 238 | for p, d in heapq.merge(zip(self.data, itertools.cycle((-5, +5))), 239 | zip(other.data, itertools.cycle((-1, +1)))): 240 | if d == -5: 241 | start = p 242 | elif d == +5: 243 | offset += p-start 244 | start = None 245 | else: 246 | out.append(offset + p - start) 247 | return RangeSet(data=out) 248 | 249 | def extend(self, n): 250 | """Extend the RangeSet by 'n' blocks. 251 | 252 | The lower bound is guaranteed to be non-negative. 253 | 254 | >>> RangeSet("0-9").extend(1) 255 | 256 | >>> RangeSet("10-19").extend(15) 257 | 258 | >>> RangeSet("10-19 30-39").extend(4) 259 | 260 | >>> RangeSet("10-19 30-39").extend(10) 261 | 262 | """ 263 | out = self 264 | for i in range(0, len(self.data), 2): 265 | s, e = self.data[i:i+2] 266 | s1 = max(0, s - n) 267 | e1 = e + n 268 | out = out.union(RangeSet(str(s1) + "-" + str(e1-1))) 269 | return out 270 | 271 | def first(self, n): 272 | """Return the RangeSet that contains at most the first 'n' integers. 273 | 274 | >>> RangeSet("0-9").first(1) 275 | 276 | >>> RangeSet("10-19").first(5) 277 | 278 | >>> RangeSet("10-19").first(15) 279 | 280 | >>> RangeSet("10-19 30-39").first(3) 281 | 282 | >>> RangeSet("10-19 30-39").first(15) 283 | 284 | >>> RangeSet("10-19 30-39").first(30) 285 | 286 | >>> RangeSet("0-9").first(0) 287 | 288 | """ 289 | 290 | if self.size() <= n: 291 | return self 292 | 293 | out = [] 294 | for s, e in self: 295 | if e - s >= n: 296 | out += (s, s+n) 297 | break 298 | else: 299 | out += (s, e) 300 | n -= e - s 301 | return RangeSet(data=out) 302 | 303 | 304 | if __name__ == "__main__": 305 | import doctest 306 | doctest.testmod() 307 | -------------------------------------------------------------------------------- /bin/linux/rimg2sdat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wahyu6070/Jancox-tool-linux/629346ff6559cd0e595f1b103f0dde6a62569284/bin/linux/rimg2sdat -------------------------------------------------------------------------------- /bin/linux/sdat2img.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | #==================================================== 4 | # FILE: sdat2img.py 5 | # AUTHORS: xpirt - luxi78 - howellzhu 6 | # DATE: 2017-01-04 2:01:45 CEST 7 | #==================================================== 8 | 9 | import sys, os, errno 10 | 11 | __version__ = '1.0' 12 | 13 | if sys.hexversion < 0x02070000: 14 | print >> sys.stderr, "Python 2.7 or newer is required." 15 | try: 16 | input = raw_input 17 | except NameError: pass 18 | input('Press ENTER to exit...') 19 | sys.exit(1) 20 | else: 21 | print('sdat2img binary - version: %s\n' % __version__) 22 | 23 | try: 24 | TRANSFER_LIST_FILE = str(sys.argv[1]) 25 | NEW_DATA_FILE = str(sys.argv[2]) 26 | except IndexError: 27 | print('\nUsage: sdat2img.py [system_img]\n') 28 | print(' : transfer list file') 29 | print(' : system new dat file') 30 | print(' [system_img]: output system image\n\n') 31 | print('Visit xda thread for more information.\n') 32 | try: 33 | input = raw_input 34 | except NameError: pass 35 | input('Press ENTER to exit...') 36 | sys.exit() 37 | 38 | try: 39 | OUTPUT_IMAGE_FILE = str(sys.argv[3]) 40 | except IndexError: 41 | OUTPUT_IMAGE_FILE = 'system.img' 42 | 43 | BLOCK_SIZE = 4096 44 | 45 | def rangeset(src): 46 | src_set = src.split(',') 47 | num_set = [int(item) for item in src_set] 48 | if len(num_set) != num_set[0]+1: 49 | print('Error on parsing following data to rangeset:\n%s' % src) 50 | sys.exit(1) 51 | 52 | return tuple ([ (num_set[i], num_set[i+1]) for i in range(1, len(num_set), 2) ]) 53 | 54 | def parse_transfer_list_file(path): 55 | trans_list = open(TRANSFER_LIST_FILE, 'r') 56 | 57 | # First line in transfer list is the version number 58 | version = int(trans_list.readline()) 59 | 60 | # Second line in transfer list is the total number of blocks we expect to write 61 | new_blocks = int(trans_list.readline()) 62 | 63 | if version >= 2: 64 | # Third line is how many stash entries are needed simultaneously 65 | trans_list.readline() 66 | # Fourth line is the maximum number of blocks that will be stashed simultaneously 67 | trans_list.readline() 68 | 69 | # Subsequent lines are all individual transfer commands 70 | commands = [] 71 | for line in trans_list: 72 | line = line.split(' ') 73 | cmd = line[0] 74 | if cmd in ['erase', 'new', 'zero']: 75 | commands.append([cmd, rangeset(line[1])]) 76 | else: 77 | # Skip lines starting with numbers, they are not commands anyway 78 | if not cmd[0].isdigit(): 79 | print('Command "%s" is not valid.' % cmd) 80 | trans_list.close() 81 | sys.exit(1) 82 | 83 | trans_list.close() 84 | return version, new_blocks, commands 85 | 86 | def main(argv): 87 | version, new_blocks, commands = parse_transfer_list_file(TRANSFER_LIST_FILE) 88 | 89 | if version == 1: 90 | print('Android Lollipop 5.0 detected!\n') 91 | elif version == 2: 92 | print('Android Lollipop 5.1 detected!\n') 93 | elif version == 3: 94 | print('Android Marshmallow 6.x detected!\n') 95 | elif version == 4: 96 | print('Android Nougat 7.x / Oreo 8.x detected!\n') 97 | else: 98 | print('Unknown Android version!\n') 99 | 100 | # Don't clobber existing files to avoid accidental data loss 101 | try: 102 | output_img = open(OUTPUT_IMAGE_FILE, 'wb') 103 | except IOError as e: 104 | if e.errno == errno.EEXIST: 105 | print('Error: the output file "{}" already exists'.format(e.filename)) 106 | print('Remove it, rename it, or choose a different file name.') 107 | sys.exit(e.errno) 108 | else: 109 | raise 110 | 111 | new_data_file = open(NEW_DATA_FILE, 'rb') 112 | all_block_sets = [i for command in commands for i in command[1]] 113 | max_file_size = max(pair[1] for pair in all_block_sets)*BLOCK_SIZE 114 | 115 | for command in commands: 116 | if command[0] == 'new': 117 | for block in command[1]: 118 | begin = block[0] 119 | end = block[1] 120 | block_count = end - begin 121 | print('Copying {} blocks into position {}...'.format(block_count, begin)) 122 | 123 | # Position output file 124 | output_img.seek(begin*BLOCK_SIZE) 125 | 126 | # Copy one block at a time 127 | while(block_count > 0): 128 | output_img.write(new_data_file.read(BLOCK_SIZE)) 129 | block_count -= 1 130 | else: 131 | print('Skipping command %s...' % command[0]) 132 | 133 | # Make file larger if necessary 134 | if(output_img.tell() < max_file_size): 135 | output_img.truncate(max_file_size) 136 | 137 | output_img.close() 138 | new_data_file.close() 139 | print('Done! Output image: %s' % os.path.realpath(output_img.name)) 140 | 141 | if __name__ == '__main__': 142 | main(sys.argv) 143 | -------------------------------------------------------------------------------- /bin/linux/sparse_img.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2014 The Android Open Source Project 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import bisect 16 | import os 17 | import sys 18 | import struct 19 | from hashlib import sha1 20 | 21 | import rangelib 22 | 23 | 24 | class SparseImage(object): 25 | """Wraps a sparse image file into an image object. 26 | 27 | Wraps a sparse image file (and optional file map and clobbered_blocks) into 28 | an image object suitable for passing to BlockImageDiff. file_map contains 29 | the mapping between files and their blocks. clobbered_blocks contains the set 30 | of blocks that should be always written to the target regardless of the old 31 | contents (i.e. copying instead of patching). clobbered_blocks should be in 32 | the form of a string like "0" or "0 1-5 8". 33 | """ 34 | 35 | def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None, 36 | mode="rb", build_map=True): 37 | self.simg_f = f = open(simg_fn, mode) 38 | 39 | header_bin = f.read(28) 40 | header = struct.unpack("> 2)) 189 | to_read -= this_read 190 | 191 | while to_read > 0: 192 | # continue with following chunks if this range spans multiple chunks. 193 | idx += 1 194 | chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] 195 | this_read = min(chunk_len, to_read) 196 | if filepos is not None: 197 | f.seek(filepos, os.SEEK_SET) 198 | yield f.read(this_read * self.blocksize) 199 | else: 200 | yield fill_data * (this_read * (self.blocksize >> 2)) 201 | to_read -= this_read 202 | 203 | def LoadFileBlockMap(self, fn, clobbered_blocks): 204 | remaining = self.care_map 205 | self.file_map = out = {} 206 | 207 | with open(fn) as f: 208 | for line in f: 209 | fn, ranges = line.split(None, 1) 210 | ranges = rangelib.RangeSet.parse(ranges) 211 | out[fn] = ranges 212 | assert ranges.size() == ranges.intersect(remaining).size() 213 | 214 | # Currently we assume that blocks in clobbered_blocks are not part of 215 | # any file. 216 | assert not clobbered_blocks.overlaps(ranges) 217 | remaining = remaining.subtract(ranges) 218 | 219 | remaining = remaining.subtract(clobbered_blocks) 220 | 221 | # For all the remaining blocks in the care_map (ie, those that 222 | # aren't part of the data for any file nor part of the clobbered_blocks), 223 | # divide them into blocks that are all zero and blocks that aren't. 224 | # (Zero blocks are handled specially because (1) there are usually 225 | # a lot of them and (2) bsdiff handles files with long sequences of 226 | # repeated bytes especially poorly.) 227 | 228 | zero_blocks = [] 229 | nonzero_blocks = [] 230 | if sys.version_info[:2] >= (3, 0): 231 | reference = bytes('\0' * self.blocksize, encoding="UTF-8") 232 | else: 233 | reference = '\0' * self.blocksize 234 | 235 | # Workaround for bug 23227672. For squashfs, we don't have a system.map. So 236 | # the whole system image will be treated as a single file. But for some 237 | # unknown bug, the updater will be killed due to OOM when writing back the 238 | # patched image to flash (observed on lenok-userdebug MEA49). Prior to 239 | # getting a real fix, we evenly divide the non-zero blocks into smaller 240 | # groups (currently 1024 blocks or 4MB per group). 241 | # Bug: 23227672 242 | MAX_BLOCKS_PER_GROUP = 1024 243 | nonzero_groups = [] 244 | 245 | f = self.simg_f 246 | for s, e in remaining: 247 | for b in range(s, e): 248 | idx = bisect.bisect_right(self.offset_index, b) - 1 249 | chunk_start, _, filepos, fill_data = self.offset_map[idx] 250 | if filepos is not None: 251 | filepos += (b-chunk_start) * self.blocksize 252 | f.seek(filepos, os.SEEK_SET) 253 | data = f.read(self.blocksize) 254 | else: 255 | if fill_data == reference[:4]: # fill with all zeros 256 | data = reference 257 | else: 258 | data = None 259 | 260 | if data == reference: 261 | zero_blocks.append(b) 262 | zero_blocks.append(b+1) 263 | else: 264 | nonzero_blocks.append(b) 265 | nonzero_blocks.append(b+1) 266 | 267 | if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP: 268 | nonzero_groups.append(nonzero_blocks) 269 | # Clear the list. 270 | nonzero_blocks = [] 271 | 272 | if nonzero_blocks: 273 | nonzero_groups.append(nonzero_blocks) 274 | nonzero_blocks = [] 275 | 276 | assert zero_blocks or nonzero_groups or clobbered_blocks 277 | 278 | if zero_blocks: 279 | out["__ZERO"] = rangelib.RangeSet(data=zero_blocks) 280 | if nonzero_groups: 281 | for i, blocks in enumerate(nonzero_groups): 282 | out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks) 283 | if clobbered_blocks: 284 | out["__COPY"] = clobbered_blocks 285 | 286 | def ResetFileMap(self): 287 | """Throw away the file map and treat the entire image as 288 | undifferentiated data.""" 289 | self.file_map = {"__DATA": self.care_map} 290 | -------------------------------------------------------------------------------- /bin/linux/utility.sh: -------------------------------------------------------------------------------- 1 | # 2 | #Jancox-tool 3 | #By wahyu6070 4 | 5 | # 6 | edit=./editor 7 | rominfo=$edit/rom.info 8 | 9 | #functions 10 | print(){ 11 | echo "$1" 12 | } 13 | getprop() { grep "$1" "$2" | cut -d "=" -f 2; } 14 | 15 | # 16 | # 17 | if [ -d $edit/system/system ]; then 18 | system=system/system 19 | systemroot=true 20 | else 21 | system=system 22 | systemroot=false 23 | fi; 24 | 25 | # 26 | case $1 in 27 | rom-info) 28 | if [ $(grep -q secure=0 $edit/vendor/default.prop) ]; then dmverity=true; 29 | elif [ $(grep forceencrypt $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 30 | elif [ $(grep forcefdeorfbe $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 31 | elif [ $(grep fileencryption $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 32 | elif [ $(grep .dmverity=true $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 33 | elif [ $(grep fileencryption $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 34 | #elif [ -f $edit/$system/recovery-from-boot.p ]; then dmverity=true; 35 | else 36 | dmverity=false 37 | fi; 38 | echo "- Android Version = $(getprop ro.build.version.release $edit/$system/build.prop) " 39 | echo "- Name ROM = $(getprop ro.build.display.id $edit/$system/build.prop) " 40 | echo "- Device = $(getprop ro.product.vendor.device $edit/vendor/build.prop) " 41 | echo "- System as-root = $systemroot " 42 | echo "- Dm-verity = $dmverity " 43 | ;; 44 | esac 45 | -------------------------------------------------------------------------------- /cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #jancox-tool 3 | #by wahyu6070 4 | 5 | if [ ! -O editor ] && [ -d editor ] && [ ! -w editor]; then sudo=sudo; fi; 6 | 7 | if [ ! $1 ]; then 8 | echo " Jancox-Tool-Linux" 9 | echo " " 10 | echo "- Cleaning..." 11 | $sudo rm -rf editor new_rom.zip >/dev/null 12 | $sudo rm -rf ./bin/tmp 2>/dev/null 13 | echo "- Done" 14 | sleep 1s 15 | fi 16 | -------------------------------------------------------------------------------- /credits.txt: -------------------------------------------------------------------------------- 1 | - Jamflux SUR windows : https://github.com/jamflux/SUR 2 | - magiskboot : https://github.com/topjohnwu/Magisk 3 | - make_ext4fs : https://github.com/superr/make_ext4fs 4 | - busybox : https://busybox.net/ 5 | - busybox : https://github.com/Magisk-Modules-Repo/busybox-ndk 6 | - 7za : https://www.7-zip.org/ 7 | - brotli : https://github.com/google/brotli 8 | - img2sdat/sdat2img : https://github.com/xpirt/img2sdat 9 | - python : based termux 10 | 11 | jancox-tool : 12 | linux : https://github.com/Wahyu6070/Jancox-tool-linux 13 | android : https://github.com/Wahyu6070/Jancox-tool-android 14 | windows : 15 | youtube (guide) : https://www.youtube.com/c/wahyu6070 16 | -------------------------------------------------------------------------------- /repack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #jancox tool 3 | #by wahyu6070 4 | 5 | #util functions 6 | . ./bin/linux/utility.sh 7 | 8 | # 9 | localdir=`cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd` 10 | out=./output 11 | tmp=./bin/tmp 12 | bin=./bin/linux 13 | prop=./bin/jancox.prop 14 | clear 15 | if [ ! -d $edit/system ]; then echo " Please Unpack !"; sleep 3s; exit;fi; 16 | echo " Jancox Tool by wahyu6070" 17 | echo " Repack " 18 | echo " " 19 | $bin/utility.sh rom-info 20 | echo " " 21 | [ ! -d $tmp ] && mkdir $tmp 22 | if [ -d $edit/system ]; then 23 | echo "- Repack system" 24 | size1=`du -sk $edit/system | awk '{$1*=1024;$1=int($1*1.05);printf $1}'` 25 | $bin/make_ext4fs -s -L system -T 2009110000 -S $edit/system_file_contexts -C $edit/system_fs_config -l $size1 -a system $tmp/system.img $edit/system/ > /dev/null 26 | fi 27 | 28 | if [ -d $edit/vendor ]; then 29 | echo "- Repack vendor" 30 | size2=`du -sk $edit/vendor | awk '{$1*=1024;$1=int($1*1.05);printf $1}'` 31 | $bin/make_ext4fs -s -L vendor -T 2009110000 -S $edit/vendor_file_contexts -C $edit/vendor_fs_config -l $size2 -a vendor $tmp/vendor.img $edit/vendor/ > /dev/null 32 | fi; 33 | 34 | 35 | if [ -f $tmp/system.img ]; then 36 | echo "- Repack system.img" 37 | [ -f $tmp/system.new.dat ] && rm -rf tmp/system.new.dat 38 | python3 $bin/img2sdat.py $tmp/system.img -o $tmp -v 4 > /dev/null 39 | [ -f $tmp/system.img ] && rm -rf $tmp/system.img 40 | fi 41 | 42 | if [ -f $tmp/vendor.img ]; then 43 | echo "- Repack vendor.img " 44 | [ -f $tmp/vendor.new.dat ] && rm -rf tmp/vendor.new.dat 45 | python3 $bin/img2sdat.py $tmp/vendor.img -o $tmp -v 4 -p vendor > /dev/null 46 | [ -f $tmp/vendor.img ] && rm -rf $tmp/vendor.img 47 | fi 48 | 49 | #level brotli 50 | brlvl=$(getprop brotli.level bin/jancox.prop) 51 | # 52 | if [ -f $tmp/system.new.dat ]; then 53 | echo "- Repack system.new.dat" 54 | [ -f $tmp/system.new.dat.br ] && rm -rf $tmp/system.new.dat.br 55 | $bin/brotli -$brlvl -j -w 24 $tmp/system.new.dat -o $tmp/system.new.dat.br 56 | fi 57 | 58 | if [ -f $tmp/vendor.new.dat ]; then 59 | [ -f $tmp/vendor.new.dat.br ] && rm -rf $tmp/vendor.new.dat.br 60 | echo "- Repack vendor.new.dat" 61 | $bin/brotli -$brlvl -j -w 24 $tmp/vendor.new.dat -o $tmp/vendor.new.dat.br 62 | fi 63 | 64 | if [ -d $edit/boot ] && [ -f $edit/boot.img ]; then 65 | echo "- Repack boot" 66 | [ -f editor/boot/kernel ] && cp -f $edit/boot/kernel ./ 67 | [ -f editor/boot/kernel_dtb ] && cp -f $edit/boot/kernel_dtb ./ 68 | [ -f editor/boot/ramdisk.cpio ] && cp -f $edit/boot/ramdisk.cpio ./ 69 | [ -f editor/boot/second ] && cp -f $edit/boot/second ./ 70 | $bin/magiskboot repack $edit/boot.img >/dev/null 2>/dev/null 71 | sleep 1s 72 | [ -f new-boot.img ] && mv -f ./new-boot.img $tmp/boot.img 73 | rm -rf kernel kernel_dtb ramdisk.cpio second >/dev/null 2>/dev/null 74 | fi 75 | 76 | [ -d $edit/META-INF ] && cp -a $edit/META-INF $tmp/ 77 | [ -d $edit/install ] && cp -a $edit/install $tmp/ 78 | [ -d $edit/system2 ] && cp -a $edit/system2 $tmp/system 79 | [ -d $edit/firmware-update ] && cp -a $edit/firmware-update $tmp/ 80 | [ -f $edit/compatibility.zip ] && cp -f $edit/compatibility.zip $tmp/ 81 | [ -f $edit/compatibility_no_nfc.zip ] && cp -f $edit/compatibility_no_nfc.zip $tmp/ 82 | 83 | datefile=$(getprop date.file.rom ./bin/jancox.prop) 84 | touch -cd $datefile 15:00:00 $tmp/* 85 | touch -cd $datefile 15:00:00 $tmp/firmware-update/* 86 | touch -cd $datefile 15:00:00 $tmp/META-INF/com/android/* 87 | touch -cd $datefile 15:00:00 $tmp/META-INF/com/google/android/* 88 | 89 | 90 | if [ -d $tmp/META-INF ]; then 91 | echo "- Zipping" 92 | [ -f ./new_rom.zip ] && rm -rf ./new_rom.zip 93 | $bin/7za a -tzip new_rom.zip $tmp/* >/dev/null 2>/dev/null 94 | fi 95 | 96 | 97 | if [ -f ./new_rom.zip ]; then 98 | [ -d $tmp ] && rm -rf $tmp 99 | echo "- Repack done" 100 | else 101 | echo "- Repack error" 102 | fi 103 | -------------------------------------------------------------------------------- /unpack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #jancox-tool 3 | #by whyu6070 4 | 5 | #sudo permissions 6 | sudo echo 7 | 8 | # 9 | #util_functions 10 | 11 | . ./bin/linux/utility.sh 12 | 13 | # 14 | 15 | clear 16 | echo " Jancox Tool by wahyu6070" 17 | echo " " 18 | echo " Unpack" 19 | echo " " 20 | localdir=`cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd` 21 | bin=./bin/linux 22 | sdat2img=$bin/sdat2img.py 23 | img=$bin/imgextractor.py 24 | p7za=$bin/7za 25 | brotli=$bin/brotli 26 | edit=./editor 27 | bb=./bin/linux/busybox 28 | tmp=./bin/tmp 29 | 30 | [ -d ./editor ] && sudo rm -rf editor; mkdir editor; 31 | [ -d ./bin/tmp ] && sudo rm -rf ./bin/tmp; mkdir bin/tmp; 32 | [ ! -d $tmp ] && mkdir $tmp; 33 | 34 | 35 | chmod -R 777 $bin 36 | if [ -f ./input.zip ]; then 37 | input=./input.zip 38 | elif [ -f ./rom.zip ]; then 39 | input=./rom.zip 40 | else 41 | input="$(zenity --title "Pick your ROM" --file-selection 2>/dev/null)" 42 | fi 43 | 44 | sleep 1s 45 | if [ $(whoami) == root ]; then 46 | echo -n "Username Your PC : " 47 | read username 48 | else 49 | username=$(whoami) 50 | fi 51 | echo "- Using input from $input " 52 | if [ -f "$input" ]; then 53 | echo "- Extracting input.zip ..." 54 | $bb unzip -o "$input" -d $tmp >/dev/null 55 | else 56 | echo "- File zip not found" 57 | exit 58 | fi 59 | if [ -f $tmp/system.new.dat.br ]; then 60 | echo "- Unpack system.new.dat.br..." 61 | $brotli -d $tmp/system.new.dat.br 62 | sudo rm -rf $tmp/system.new.dat.br 63 | fi 64 | 65 | if [ -f $tmp/vendor.new.dat.br ]; then 66 | echo "- Unpack vendor.new.dat.br..." 67 | $brotli -d $tmp/vendor.new.dat.br 68 | sudo rm -rf $tmp/vendor.new.dat.br 69 | fi 70 | 71 | if [ -f $tmp/system.new.dat ]; then 72 | echo "- Unpack system.new.dat..."; 73 | python3 $sdat2img $tmp/system.transfer.list $tmp/system.new.dat $tmp/system.img > /dev/null 74 | sudo rm -rf $tmp/system.transfer.list $tmp/system.new.dat $tmp/system.patch.dat 75 | fi 76 | 77 | if [ -f $tmp/vendor.new.dat ]; then 78 | echo "- Unpack system.new.dat..."; 79 | python3 $sdat2img $tmp/vendor.transfer.list $tmp/vendor.new.dat $tmp/vendor.img > /dev/null 80 | sudo rm -rf $tmp/vendor.transfer.list $tmp/vendor.new.dat $tmp/vendor.patch.dat 81 | fi 82 | 83 | if [ -f $tmp/system.img ]; then 84 | echo "- Unpack system.img..." 85 | sudo python3 $img $tmp/system.img $edit/system > /dev/null 86 | sudo rm -rf $tmp/system.img 87 | fi 88 | if [ -f $tmp/vendor.img ]; then 89 | echo "- Unpack vendor.img..." 90 | sudo python3 $img $tmp/vendor.img $edit/vendor > /dev/null 91 | sudo rm -rf $tmp/vendor.img 92 | fi 93 | if [ -f $tmp/boot.img ]; then 94 | echo "- Unpack boot.img" 95 | $bin/magiskboot unpack $tmp/boot.img 2>/dev/null 96 | [ ! -d $edit/boot ] && mkdir $edit/boot 97 | [ -f ramdisk.cpio ] && mv ramdisk.cpio $edit/boot/ 98 | [ -f kernel ] && mv kernel $edit/boot/ 99 | [ -f kernel_dtb ] && mv kernel_dtb $edit/boot/ 100 | [ -f header ] && mv header $edit/boot.info 101 | [ -f second ] && mv second $edit/boot/ 102 | fi 103 | 104 | 105 | echo "- Set permissions by $username..." 106 | sudo chown -R $username:$username $edit 2>/dev/null 107 | sudo chown -R $username:$username $tmp 2>/dev/null 108 | [ -f $tmp/system_file_contexts ] && mv -f $tmp/system_file_contexts $edit/ 109 | [ -f $tmp/vendor_file_contexts ] && mv -f $tmp/vendor_file_contexts $edit/ 110 | [ -f $tmp/system_fs_config ] && mv -f $tmp/system_fs_config $edit/ 111 | [ -f $tmp/vendor_fs_config ] && mv -f $tmp/vendor_fs_config $edit/ 112 | [ -f $tmp/boot.img ] && mv -f $tmp/boot.img $edit/ 113 | [ -f $tmp/compatibility.zip ] && mv -f $tmp/compatibility.zip $edit/ 114 | [ -f $tmp/compatibility_no_nfc.zip ] && mv -f $tmp/compatibility_no_nfc.zip $edit/ 115 | [ -d $tmp/install ] && mv -f $tmp/install $edit/ 116 | [ -d $tmp/firmware-update ] && mv -f $tmp/firmware-update $edit/ 117 | [ -d $tmp/META-INF ] && mv -f $tmp/META-INF $edit/ 118 | [ -d $tmp/install ] && mv -f $tmp/install $edit/ 119 | [ -d $tmp/system ] && mv -f $tmp/system $edit/system2 120 | 121 | sleep 2s 122 | if [ -f $edit/$system/build.prop ]; then 123 | sudo rm -rf $tmp >/dev/null 2>/dev/null 124 | echo "- Unpack done" 125 | echo " " 126 | #$bin/utility.sh rom-info | tee -a $edit/rom-info 127 | if [ $(grep -q secure=0 $edit/vendor/default.prop) ]; then dmverity=true; 128 | elif [ $(grep forceencrypt $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 129 | elif [ $(grep forcefdeorfbe $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 130 | elif [ $(grep fileencryption $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 131 | elif [ $(grep .dmverity=true $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 132 | elif [ $(grep fileencryption $edit/vendor/etc/fstab.qcom) ]; then dmverity=true; 133 | #elif [ -f $edit/$system/recovery-from-boot.p ]; then dmverity=true; 134 | else 135 | dmverity=false 136 | fi; 137 | . ./bin/linux/utility.sh rom-info 138 | else 139 | echo "- Unpack done" 140 | fi 141 | sleep 1s --------------------------------------------------------------------------------