├── README.md ├── add_img_extension_output.sh ├── bspatch ├── extract.py ├── lib64 ├── libbase.so ├── libbrillo.so ├── libc++.so ├── libchrome.so ├── libevent-host.so ├── liblog.so └── libprotobuf-cpp-lite.so ├── puffin ├── remove_img_extension_old.sh └── update_payload ├── __init__.py ├── applier.py ├── checker.py ├── checker_unittest.py ├── common.py ├── error.py ├── format_utils.py ├── format_utils_unittest.py ├── histogram.py ├── histogram_unittest.py ├── payload-test-key.pem ├── payload-test-key.pub ├── payload.py ├── test_utils.py ├── update-payload-key.pub.pem └── update_metadata_pb2.py /README.md: -------------------------------------------------------------------------------- 1 | ## System requirement 2 | 3 | - Python3, pip 4 | - google protobuf for python `pip3 install protobuf` 5 | 6 | ### Full OTA 7 | 8 | - LD_LIBRARY_PATH=./lib64/ ./extract.py --output_dir output/ payload.bin 9 | - This will start to extract the images within the payload.bin file to the output folder you are in. 10 | 11 | ### Incremental OTA 12 | 13 | - Copy original images (from full OTA or dumped from devices) to old folder (with part name without file extension, ex: boot, system) 14 | - LD_LIBRARY_PATH=./lib64/ ./extract.py --output_dir output/ --old_dir old/ payload.bin 15 | 16 | NOTE: this has been fixed for Incremental updates. Just ensure you use the ROM that was meant to be PATCHED in the old/ directory 17 | AS THE HASH CHECKS ARE TURNED OFF. The original project never worked because the HASH of the Incremental update and the prior full ROM 18 | always had different signatures. So there is NO ERROR CHECKING HAPPENING. Works as of 8/2021, tested on Op8T. 19 | -------------------------------------------------------------------------------- /add_img_extension_output.sh: -------------------------------------------------------------------------------- 1 | cd output 2 | find . -type f -exec mv '{}' '{}'.img \; 3 | cd .. 4 | -------------------------------------------------------------------------------- /bspatch: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/bspatch -------------------------------------------------------------------------------- /extract.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import errno 5 | import os 6 | 7 | import update_payload 8 | from update_payload import applier 9 | 10 | 11 | def list_content(payload_file_name): 12 | with open(payload_file_name, 'rb') as payload_file: 13 | payload = update_payload.Payload(payload_file) 14 | payload.Init() 15 | 16 | for part in payload.manifest.partitions: 17 | print("{} ({} bytes)".format(part.partition_name, 18 | part.new_partition_info.size)) 19 | 20 | 21 | def extract(payload_file_name, output_dir="output", old_dir="old", partition_names=None): 22 | try: 23 | os.makedirs(output_dir) 24 | except OSError as e: 25 | if e.errno != errno.EEXIST: 26 | raise 27 | 28 | with open(payload_file_name, 'rb') as payload_file: 29 | payload = update_payload.Payload(payload_file) 30 | payload.Init() 31 | 32 | helper = applier.PayloadApplier(payload) 33 | for part in payload.manifest.partitions: 34 | if partition_names and part.partition_name not in partition_names: 35 | continue 36 | print("Extracting {}".format(part.partition_name)) 37 | output_file = os.path.join(output_dir, part.partition_name) 38 | if payload.IsDelta(): 39 | old_file = os.path.join(old_dir, part.partition_name) 40 | helper._ApplyToPartition( 41 | part.operations, part.partition_name, 42 | 'install_operations', output_file, 43 | part.new_partition_info, old_file, part.old_partition_info) 44 | else: 45 | helper._ApplyToPartition( 46 | part.operations, part.partition_name, 47 | 'install_operations', output_file, 48 | part.new_partition_info) 49 | 50 | if __name__ == '__main__': 51 | parser = argparse.ArgumentParser() 52 | parser.add_argument("payload", metavar="payload.bin", 53 | help="Path to the payload.bin") 54 | parser.add_argument("--output_dir", default="output", 55 | help="Output directory") 56 | parser.add_argument("--old_dir", default="old", 57 | help="Old directory") 58 | parser.add_argument("--partitions", type=str, nargs='+', 59 | help="Name of the partitions to extract") 60 | parser.add_argument("--list_partitions", action="store_true", 61 | help="List the partitions included in the payload.bin") 62 | 63 | args = parser.parse_args() 64 | if args.list_partitions: 65 | list_content(args.payload) 66 | else: 67 | extract(args.payload, args.output_dir, args.old_dir, args.partitions) 68 | -------------------------------------------------------------------------------- /lib64/libbase.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/libbase.so -------------------------------------------------------------------------------- /lib64/libbrillo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/libbrillo.so -------------------------------------------------------------------------------- /lib64/libc++.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/libc++.so -------------------------------------------------------------------------------- /lib64/libchrome.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/libchrome.so -------------------------------------------------------------------------------- /lib64/libevent-host.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/libevent-host.so -------------------------------------------------------------------------------- /lib64/liblog.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/liblog.so -------------------------------------------------------------------------------- /lib64/libprotobuf-cpp-lite.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/lib64/libprotobuf-cpp-lite.so -------------------------------------------------------------------------------- /puffin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrslezak/update_payload_extractor/21333635c1e021b754cb75fc6ae5efc85f9675a7/puffin -------------------------------------------------------------------------------- /remove_img_extension_old.sh: -------------------------------------------------------------------------------- 1 | cd old 2 | for i in ./*.img; do mv -i "$i" "${i%.img}"; done 3 | cd .. 4 | -------------------------------------------------------------------------------- /update_payload/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Library for processing, verifying and applying Chrome OS update payloads.""" 18 | 19 | # Just raise the interface classes to the root namespace. 20 | from __future__ import absolute_import 21 | 22 | from update_payload.checker import CHECKS_TO_DISABLE 23 | from update_payload.error import PayloadError 24 | from update_payload.payload import Payload 25 | -------------------------------------------------------------------------------- /update_payload/applier.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Applying a Chrome OS update payload. 18 | 19 | This module is used internally by the main Payload class for applying an update 20 | payload. The interface for invoking the applier is as follows: 21 | 22 | applier = PayloadApplier(payload) 23 | applier.Run(...) 24 | 25 | """ 26 | 27 | from __future__ import absolute_import 28 | from __future__ import print_function 29 | 30 | import array 31 | import bz2 32 | import hashlib 33 | # Not everywhere we can have the lzma library so we ignore it if we didn't have 34 | # it because it is not going to be used. For example, 'cros flash' uses 35 | # devserver code which eventually loads this file, but the lzma library is not 36 | # included in the client test devices, and it is not necessary to do so. But 37 | # lzma is not used in 'cros flash' so it should be fine. Python 3.x include 38 | # lzma, but for backward compatibility with Python 2.7, backports-lzma is 39 | # needed. 40 | try: 41 | import lzma 42 | except ImportError: 43 | try: 44 | from backports import lzma 45 | except ImportError: 46 | pass 47 | import os 48 | import subprocess 49 | import sys 50 | import tempfile 51 | 52 | from update_payload import common 53 | from update_payload.error import PayloadError 54 | 55 | # 56 | # Helper functions. 57 | # 58 | def _VerifySha256(file_obj, expected_hash, name, length=-1): 59 | """Verifies the SHA256 hash of a file. 60 | 61 | Args: 62 | file_obj: file object to read 63 | expected_hash: the hash digest we expect to be getting 64 | name: name string of this hash, for error reporting 65 | length: precise length of data to verify (optional) 66 | 67 | Raises: 68 | PayloadError if computed hash doesn't match expected one, or if fails to 69 | read the specified length of data. 70 | """ 71 | 72 | hasher = hashlib.sha256() 73 | block_length = 1024 * 1024 74 | max_length = length if length >= 0 else sys.maxsize 75 | 76 | while max_length > 0: 77 | read_length = min(max_length, block_length) 78 | data = file_obj.read(read_length) 79 | if not data: 80 | break 81 | max_length -= len(data) 82 | hasher.update(data) 83 | 84 | if length >= 0 and max_length > 0: 85 | raise PayloadError( 86 | 'insufficient data (%d instead of %d) when verifying %s' % 87 | (length - max_length, length, name)) 88 | 89 | actual_hash = hasher.digest() 90 | if actual_hash != expected_hash: 91 | raise PayloadError('%s hash (%s) not as expected (%s)' % 92 | (name, common.FormatSha256(actual_hash), 93 | common.FormatSha256(expected_hash))) 94 | 95 | 96 | def _ReadExtents(file_obj, extents, block_size, max_length=-1): 97 | """Reads data from file as defined by extent sequence. 98 | 99 | This tries to be efficient by not copying data as it is read in chunks. 100 | 101 | Args: 102 | file_obj: file object 103 | extents: sequence of block extents (offset and length) 104 | block_size: size of each block 105 | max_length: maximum length to read (optional) 106 | 107 | Returns: 108 | A character array containing the concatenated read data. 109 | """ 110 | data = array.array('B') 111 | if max_length < 0: 112 | max_length = sys.maxsize 113 | for ex in extents: 114 | if max_length == 0: 115 | break 116 | read_length = min(max_length, ex.num_blocks * block_size) 117 | 118 | file_obj.seek(ex.start_block * block_size) 119 | data.fromfile(file_obj, read_length) 120 | 121 | max_length -= read_length 122 | 123 | return data 124 | 125 | 126 | def _WriteExtents(file_obj, data, extents, block_size, base_name): 127 | """Writes data to file as defined by extent sequence. 128 | 129 | This tries to be efficient by not copy data as it is written in chunks. 130 | 131 | Args: 132 | file_obj: file object 133 | data: data to write 134 | extents: sequence of block extents (offset and length) 135 | block_size: size of each block 136 | base_name: name string of extent sequence for error reporting 137 | 138 | Raises: 139 | PayloadError when things don't add up. 140 | """ 141 | data_offset = 0 142 | data_length = len(data) 143 | for ex, ex_name in common.ExtentIter(extents, base_name): 144 | if not data_length: 145 | raise PayloadError('%s: more write extents than data' % ex_name) 146 | write_length = min(data_length, ex.num_blocks * block_size) 147 | file_obj.seek(ex.start_block * block_size) 148 | file_obj.write(data[data_offset:(data_offset + write_length)]) 149 | 150 | data_offset += write_length 151 | data_length -= write_length 152 | 153 | if data_length: 154 | raise PayloadError('%s: more data than write extents' % base_name) 155 | 156 | 157 | def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1): 158 | """Translates an extent sequence into a bspatch-compatible string argument. 159 | 160 | Args: 161 | extents: sequence of block extents (offset and length) 162 | block_size: size of each block 163 | base_name: name string of extent sequence for error reporting 164 | data_length: the actual total length of the data in bytes (optional) 165 | 166 | Returns: 167 | A tuple consisting of (i) a string of the form 168 | "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed 169 | for filling the last extent, (iii) the length of the padding (zero means no 170 | padding is needed and the extents cover the full length of data). 171 | 172 | Raises: 173 | PayloadError if data_length is too short or too long. 174 | """ 175 | arg = '' 176 | pad_off = pad_len = 0 177 | if data_length < 0: 178 | data_length = sys.maxsize 179 | for ex, ex_name in common.ExtentIter(extents, base_name): 180 | if not data_length: 181 | raise PayloadError('%s: more extents than total data length' % ex_name) 182 | 183 | start_byte = ex.start_block * block_size 184 | num_bytes = ex.num_blocks * block_size 185 | if data_length < num_bytes: 186 | # We're only padding a real extent. 187 | pad_off = start_byte + data_length 188 | pad_len = num_bytes - data_length 189 | num_bytes = data_length 190 | 191 | arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes) 192 | data_length -= num_bytes 193 | 194 | if data_length: 195 | raise PayloadError('%s: extents not covering full data length' % base_name) 196 | 197 | return arg, pad_off, pad_len 198 | 199 | 200 | # 201 | # Payload application. 202 | # 203 | class PayloadApplier(object): 204 | """Applying an update payload. 205 | 206 | This is a short-lived object whose purpose is to isolate the logic used for 207 | applying an update payload. 208 | """ 209 | 210 | def __init__(self, payload, bsdiff_in_place=True, bspatch_path="./bspatch", 211 | puffpatch_path="./puffin", truncate_to_expected_size=True): 212 | """Initialize the applier. 213 | 214 | Args: 215 | payload: the payload object to check 216 | bsdiff_in_place: whether to perform BSDIFF operation in-place (optional) 217 | bspatch_path: path to the bspatch binary (optional) 218 | puffpatch_path: path to the puffpatch binary (optional) 219 | truncate_to_expected_size: whether to truncate the resulting partitions 220 | to their expected sizes, as specified in the 221 | payload (optional) 222 | """ 223 | assert payload.is_init, 'uninitialized update payload' 224 | self.payload = payload 225 | self.block_size = payload.manifest.block_size 226 | self.minor_version = payload.manifest.minor_version 227 | self.bsdiff_in_place = bsdiff_in_place 228 | self.bspatch_path = bspatch_path or 'bspatch' 229 | self.puffpatch_path = puffpatch_path or 'puffin' 230 | self.truncate_to_expected_size = truncate_to_expected_size 231 | 232 | def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): 233 | """Applies a REPLACE{,_BZ,_XZ} operation. 234 | 235 | Args: 236 | op: the operation object 237 | op_name: name string for error reporting 238 | out_data: the data to be written 239 | part_file: the partition file object 240 | part_size: the size of the partition 241 | 242 | Raises: 243 | PayloadError if something goes wrong. 244 | """ 245 | block_size = self.block_size 246 | data_length = len(out_data) 247 | 248 | # Decompress data if needed. 249 | if op.type == common.OpType.REPLACE_BZ: 250 | out_data = bz2.decompress(out_data) 251 | data_length = len(out_data) 252 | elif op.type == common.OpType.REPLACE_XZ: 253 | # pylint: disable=no-member 254 | out_data = lzma.decompress(out_data) 255 | data_length = len(out_data) 256 | 257 | # Write data to blocks specified in dst extents. 258 | data_start = 0 259 | for ex, ex_name in common.ExtentIter(op.dst_extents, 260 | '%s.dst_extents' % op_name): 261 | start_block = ex.start_block 262 | num_blocks = ex.num_blocks 263 | count = num_blocks * block_size 264 | 265 | data_end = data_start + count 266 | 267 | # Make sure we're not running past partition boundary. 268 | if (start_block + num_blocks) * block_size > part_size: 269 | raise PayloadError( 270 | '%s: extent (%s) exceeds partition size (%d)' % 271 | (ex_name, common.FormatExtent(ex, block_size), 272 | part_size)) 273 | 274 | # Make sure that we have enough data to write. 275 | if data_end >= data_length + block_size: 276 | raise PayloadError( 277 | '%s: more dst blocks than data (even with padding)') 278 | 279 | # Pad with zeros if necessary. 280 | if data_end > data_length: 281 | padding = data_end - data_length 282 | out_data += b'\0' * padding 283 | 284 | self.payload.payload_file.seek(start_block * block_size) 285 | part_file.seek(start_block * block_size) 286 | part_file.write(out_data[data_start:data_end]) 287 | 288 | data_start += count 289 | 290 | # Make sure we wrote all data. 291 | if data_start < data_length: 292 | raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' % 293 | (op_name, data_start, data_length)) 294 | 295 | def _ApplyZeroOperation(self, op, op_name, part_file): 296 | """Applies a ZERO operation. 297 | 298 | Args: 299 | op: the operation object 300 | op_name: name string for error reporting 301 | part_file: the partition file object 302 | 303 | Raises: 304 | PayloadError if something goes wrong. 305 | """ 306 | block_size = self.block_size 307 | base_name = '%s.dst_extents' % op_name 308 | 309 | # Iterate over the extents and write zero. 310 | # pylint: disable=unused-variable 311 | for ex, ex_name in common.ExtentIter(op.dst_extents, base_name): 312 | part_file.seek(ex.start_block * block_size) 313 | part_file.write(b'\0' * (ex.num_blocks * block_size)) 314 | 315 | def _ApplySourceCopyOperation(self, op, op_name, old_part_file, 316 | new_part_file): 317 | """Applies a SOURCE_COPY operation. 318 | 319 | Args: 320 | op: the operation object 321 | op_name: name string for error reporting 322 | old_part_file: the old partition file object 323 | new_part_file: the new partition file object 324 | 325 | Raises: 326 | PayloadError if something goes wrong. 327 | """ 328 | if not old_part_file: 329 | raise PayloadError( 330 | '%s: no source partition file provided for operation type (%d)' % 331 | (op_name, op.type)) 332 | 333 | block_size = self.block_size 334 | 335 | # Gather input raw data from src extents. 336 | in_data = _ReadExtents(old_part_file, op.src_extents, block_size) 337 | 338 | # Dump extracted data to dst extents. 339 | _WriteExtents(new_part_file, in_data, op.dst_extents, block_size, 340 | '%s.dst_extents' % op_name) 341 | 342 | def _BytesInExtents(self, extents, base_name): 343 | """Counts the length of extents in bytes. 344 | 345 | Args: 346 | extents: The list of Extents. 347 | base_name: For error reporting. 348 | 349 | Returns: 350 | The number of bytes in extents. 351 | """ 352 | 353 | length = 0 354 | # pylint: disable=unused-variable 355 | for ex, ex_name in common.ExtentIter(extents, base_name): 356 | length += ex.num_blocks * self.block_size 357 | return length 358 | 359 | def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, 360 | new_part_file): 361 | """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation. 362 | 363 | Args: 364 | op: the operation object 365 | op_name: name string for error reporting 366 | patch_data: the binary patch content 367 | old_part_file: the source partition file object 368 | new_part_file: the target partition file object 369 | 370 | Raises: 371 | PayloadError if something goes wrong. 372 | """ 373 | if not old_part_file: 374 | raise PayloadError( 375 | '%s: no source partition file provided for operation type (%d)' % 376 | (op_name, op.type)) 377 | 378 | block_size = self.block_size 379 | 380 | # Dump patch data to file. 381 | with tempfile.NamedTemporaryFile(delete=False) as patch_file: 382 | patch_file_name = patch_file.name 383 | patch_file.write(patch_data) 384 | 385 | if (hasattr(new_part_file, 'fileno') and 386 | ((not old_part_file) or hasattr(old_part_file, 'fileno'))): 387 | # Construct input and output extents argument for bspatch. 388 | 389 | in_extents_arg, _, _ = _ExtentsToBspatchArg( 390 | op.src_extents, block_size, '%s.src_extents' % op_name, 391 | data_length=op.src_length if op.src_length else 392 | self._BytesInExtents(op.src_extents, "%s.src_extents")) 393 | out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg( 394 | op.dst_extents, block_size, '%s.dst_extents' % op_name, 395 | data_length=op.dst_length if op.dst_length else 396 | self._BytesInExtents(op.dst_extents, "%s.dst_extents")) 397 | 398 | new_file_name = new_part_file.name 399 | # Diff from source partition. 400 | old_file_name = old_part_file.name 401 | 402 | # In python3, file descriptors(fd) are not passed to child processes by 403 | # default. To pass the fds to the child processes, we need to set the flag 404 | # 'inheritable' in the fds and make the subprocess calls with the argument 405 | # close_fds set to False. 406 | if sys.version_info.major >= 3: 407 | os.set_inheritable(new_part_file.fileno(), True) 408 | os.set_inheritable(old_part_file.fileno(), True) 409 | 410 | if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): 411 | # Invoke bspatch on partition file with extents args. 412 | bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name, 413 | patch_file_name, in_extents_arg, out_extents_arg] 414 | subprocess.check_call(bspatch_cmd, close_fds=False) 415 | elif op.type == common.OpType.PUFFDIFF: 416 | # Invoke puffpatch on partition file with extents args. 417 | puffpatch_cmd = [self.puffpatch_path, 418 | "--operation=puffpatch", 419 | "--src_file=%s" % old_file_name, 420 | "--dst_file=%s" % new_file_name, 421 | "--patch_file=%s" % patch_file_name, 422 | "--src_extents=%s" % in_extents_arg, 423 | "--dst_extents=%s" % out_extents_arg] 424 | subprocess.check_call(puffpatch_cmd, close_fds=False) 425 | else: 426 | raise PayloadError("Unknown operation %s" % op.type) 427 | 428 | # Pad with zeros past the total output length. 429 | if pad_len: 430 | new_part_file.seek(pad_off) 431 | new_part_file.write(b'\0' * pad_len) 432 | else: 433 | # Gather input raw data and write to a temp file. 434 | input_part_file = old_part_file if old_part_file else new_part_file 435 | in_data = _ReadExtents(input_part_file, op.src_extents, block_size, 436 | max_length=op.src_length if op.src_length else 437 | self._BytesInExtents(op.src_extents, 438 | "%s.src_extents")) 439 | with tempfile.NamedTemporaryFile(delete=False) as in_file: 440 | in_file_name = in_file.name 441 | in_file.write(in_data) 442 | 443 | # Allocate temporary output file. 444 | with tempfile.NamedTemporaryFile(delete=False) as out_file: 445 | out_file_name = out_file.name 446 | 447 | if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): 448 | # Invoke bspatch. 449 | bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name, 450 | patch_file_name] 451 | subprocess.check_call(bspatch_cmd) 452 | elif op.type == common.OpType.PUFFDIFF: 453 | # Invoke puffpatch. 454 | puffpatch_cmd = [self.puffpatch_path, 455 | "--operation=puffpatch", 456 | "--src_file=%s" % in_file_name, 457 | "--dst_file=%s" % out_file_name, 458 | "--patch_file=%s" % patch_file_name] 459 | subprocess.check_call(puffpatch_cmd) 460 | else: 461 | raise PayloadError("Unknown operation %s" % op.type) 462 | 463 | # Read output. 464 | with open(out_file_name, 'rb') as out_file: 465 | out_data = out_file.read() 466 | if len(out_data) != op.dst_length: 467 | raise PayloadError( 468 | '%s: actual patched data length (%d) not as expected (%d)' % 469 | (op_name, len(out_data), op.dst_length)) 470 | 471 | # Write output back to partition, with padding. 472 | unaligned_out_len = len(out_data) % block_size 473 | if unaligned_out_len: 474 | out_data += b'\0' * (block_size - unaligned_out_len) 475 | _WriteExtents(new_part_file, out_data, op.dst_extents, block_size, 476 | '%s.dst_extents' % op_name) 477 | 478 | # Delete input/output files. 479 | os.remove(in_file_name) 480 | os.remove(out_file_name) 481 | 482 | # Delete patch file. 483 | os.remove(patch_file_name) 484 | 485 | def _ApplyOperations(self, operations, base_name, old_part_file, 486 | new_part_file, part_size): 487 | """Applies a sequence of update operations to a partition. 488 | 489 | Args: 490 | operations: the sequence of operations 491 | base_name: the name of the operation sequence 492 | old_part_file: the old partition file object, open for reading/writing 493 | new_part_file: the new partition file object, open for reading/writing 494 | part_size: the partition size 495 | 496 | Raises: 497 | PayloadError if anything goes wrong while processing the payload. 498 | """ 499 | for op, op_name in common.OperationIter(operations, base_name): 500 | # Read data blob. 501 | data = self.payload.ReadDataBlob(op.data_offset, op.data_length) 502 | 503 | if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, 504 | common.OpType.REPLACE_XZ): 505 | self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size) 506 | elif op.type == common.OpType.ZERO: 507 | self._ApplyZeroOperation(op, op_name, new_part_file) 508 | elif op.type == common.OpType.SOURCE_COPY: 509 | self._ApplySourceCopyOperation(op, op_name, old_part_file, 510 | new_part_file) 511 | elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF, 512 | common.OpType.BROTLI_BSDIFF): 513 | self._ApplyDiffOperation(op, op_name, data, old_part_file, 514 | new_part_file) 515 | else: 516 | raise PayloadError('%s: unknown operation type (%d)' % 517 | (op_name, op.type)) 518 | 519 | def _ApplyToPartition(self, operations, part_name, base_name, 520 | new_part_file_name, new_part_info, 521 | old_part_file_name=None, old_part_info=None): 522 | """Applies an update to a partition. 523 | 524 | Args: 525 | operations: the sequence of update operations to apply 526 | part_name: the name of the partition, for error reporting 527 | base_name: the name of the operation sequence 528 | new_part_file_name: file name to write partition data to 529 | new_part_info: size and expected hash of dest partition 530 | old_part_file_name: file name of source partition (optional) 531 | old_part_info: size and expected hash of source partition (optional) 532 | 533 | Raises: 534 | PayloadError if anything goes wrong with the update. 535 | """ 536 | # Do we have a source partition? 537 | if old_part_file_name: 538 | # Verify the source partition. 539 | # with open(old_part_file_name, 'rb') as old_part_file: 540 | # _VerifySha256(old_part_file, old_part_info.hash, 541 | # 'old ' + part_name, length=old_part_info.size) 542 | new_part_file_mode = 'r+b' 543 | open(new_part_file_name, 'w').close() 544 | 545 | else: 546 | # We need to create/truncate the dst partition file. 547 | new_part_file_mode = 'w+b' 548 | 549 | # Apply operations. 550 | with open(new_part_file_name, new_part_file_mode) as new_part_file: 551 | old_part_file = (open(old_part_file_name, 'r+b') 552 | if old_part_file_name else None) 553 | try: 554 | self._ApplyOperations(operations, base_name, old_part_file, 555 | new_part_file, new_part_info.size) 556 | finally: 557 | if old_part_file: 558 | old_part_file.close() 559 | 560 | # Truncate the result, if so instructed. 561 | if self.truncate_to_expected_size: 562 | new_part_file.seek(0, 2) 563 | if new_part_file.tell() > new_part_info.size: 564 | new_part_file.seek(new_part_info.size) 565 | new_part_file.truncate() 566 | 567 | # Verify the resulting partition. 568 | # with open(new_part_file_name, 'rb') as new_part_file: 569 | # _VerifySha256(new_part_file, new_part_info.hash, 570 | # 'new ' + part_name, length=new_part_info.size) 571 | 572 | def Run(self, new_parts, old_parts=None): 573 | """Applier entry point, invoking all update operations. 574 | 575 | Args: 576 | new_parts: map of partition name to dest partition file 577 | old_parts: map of partition name to source partition file (optional) 578 | 579 | Raises: 580 | PayloadError if payload application failed. 581 | """ 582 | if old_parts is None: 583 | old_parts = {} 584 | 585 | self.payload.ResetFile() 586 | 587 | new_part_info = {} 588 | old_part_info = {} 589 | install_operations = [] 590 | 591 | manifest = self.payload.manifest 592 | for part in manifest.partitions: 593 | name = part.partition_name 594 | new_part_info[name] = part.new_partition_info 595 | old_part_info[name] = part.old_partition_info 596 | install_operations.append((name, part.operations)) 597 | 598 | part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys() 599 | 600 | # Make sure the arguments are sane and match the payload. 601 | new_part_names = set(new_parts.keys()) 602 | if new_part_names != part_names: 603 | raise PayloadError('missing dst partition(s) %s' % 604 | ', '.join(part_names - new_part_names)) 605 | 606 | old_part_names = set(old_parts.keys()) 607 | if part_names - old_part_names: 608 | if self.payload.IsDelta(): 609 | raise PayloadError('trying to apply a delta update without src ' 610 | 'partition(s) %s' % 611 | ', '.join(part_names - old_part_names)) 612 | elif old_part_names == part_names: 613 | if self.payload.IsFull(): 614 | raise PayloadError('trying to apply a full update onto src partitions') 615 | else: 616 | raise PayloadError('not all src partitions provided') 617 | 618 | for name, operations in install_operations: 619 | # Apply update to partition. 620 | self._ApplyToPartition( 621 | operations, name, '%s_install_operations' % name, new_parts[name], 622 | new_part_info[name], old_parts.get(name, None), old_part_info[name]) 623 | -------------------------------------------------------------------------------- /update_payload/checker.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Verifying the integrity of a Chrome OS update payload. 18 | 19 | This module is used internally by the main Payload class for verifying the 20 | integrity of an update payload. The interface for invoking the checks is as 21 | follows: 22 | 23 | checker = PayloadChecker(payload) 24 | checker.Run(...) 25 | """ 26 | 27 | from __future__ import absolute_import 28 | from __future__ import print_function 29 | 30 | import array 31 | import base64 32 | import collections 33 | import hashlib 34 | import itertools 35 | import os 36 | import subprocess 37 | 38 | from six.moves import range 39 | 40 | from update_payload import common 41 | from update_payload import error 42 | from update_payload import format_utils 43 | from update_payload import histogram 44 | from update_payload import update_metadata_pb2 45 | 46 | # 47 | # Constants. 48 | # 49 | 50 | _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block' 51 | _CHECK_PAYLOAD_SIG = 'payload-sig' 52 | CHECKS_TO_DISABLE = ( 53 | _CHECK_MOVE_SAME_SRC_DST_BLOCK, 54 | _CHECK_PAYLOAD_SIG, 55 | ) 56 | 57 | _TYPE_FULL = 'full' 58 | _TYPE_DELTA = 'delta' 59 | 60 | _DEFAULT_BLOCK_SIZE = 4096 61 | 62 | _DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem' 63 | _DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), 64 | _DEFAULT_PUBKEY_BASE_NAME) 65 | 66 | # Supported minor version map to payload types allowed to be using them. 67 | _SUPPORTED_MINOR_VERSIONS = { 68 | 0: (_TYPE_FULL,), 69 | 2: (_TYPE_DELTA,), 70 | 3: (_TYPE_DELTA,), 71 | 4: (_TYPE_DELTA,), 72 | 5: (_TYPE_DELTA,), 73 | 6: (_TYPE_DELTA,), 74 | } 75 | 76 | 77 | # 78 | # Helper functions. 79 | # 80 | 81 | def _IsPowerOfTwo(val): 82 | """Returns True iff val is a power of two.""" 83 | return val > 0 and (val & (val - 1)) == 0 84 | 85 | 86 | def _AddFormat(format_func, value): 87 | """Adds a custom formatted representation to ordinary string representation. 88 | 89 | Args: 90 | format_func: A value formatter. 91 | value: Value to be formatted and returned. 92 | 93 | Returns: 94 | A string 'x (y)' where x = str(value) and y = format_func(value). 95 | """ 96 | ret = str(value) 97 | formatted_str = format_func(value) 98 | if formatted_str: 99 | ret += ' (%s)' % formatted_str 100 | return ret 101 | 102 | 103 | def _AddHumanReadableSize(size): 104 | """Adds a human readable representation to a byte size value.""" 105 | return _AddFormat(format_utils.BytesToHumanReadable, size) 106 | 107 | 108 | # 109 | # Payload report generator. 110 | # 111 | 112 | class _PayloadReport(object): 113 | """A payload report generator. 114 | 115 | A report is essentially a sequence of nodes, which represent data points. It 116 | is initialized to have a "global", untitled section. A node may be a 117 | sub-report itself. 118 | """ 119 | 120 | # Report nodes: Field, sub-report, section. 121 | class Node(object): 122 | """A report node interface.""" 123 | 124 | @staticmethod 125 | def _Indent(indent, line): 126 | """Indents a line by a given indentation amount. 127 | 128 | Args: 129 | indent: The indentation amount. 130 | line: The line content (string). 131 | 132 | Returns: 133 | The properly indented line (string). 134 | """ 135 | return '%*s%s' % (indent, '', line) 136 | 137 | def GenerateLines(self, base_indent, sub_indent, curr_section): 138 | """Generates the report lines for this node. 139 | 140 | Args: 141 | base_indent: Base indentation for each line. 142 | sub_indent: Additional indentation for sub-nodes. 143 | curr_section: The current report section object. 144 | 145 | Returns: 146 | A pair consisting of a list of properly indented report lines and a new 147 | current section object. 148 | """ 149 | raise NotImplementedError 150 | 151 | class FieldNode(Node): 152 | """A field report node, representing a (name, value) pair.""" 153 | 154 | def __init__(self, name, value, linebreak, indent): 155 | super(_PayloadReport.FieldNode, self).__init__() 156 | self.name = name 157 | self.value = value 158 | self.linebreak = linebreak 159 | self.indent = indent 160 | 161 | def GenerateLines(self, base_indent, sub_indent, curr_section): 162 | """Generates a properly formatted 'name : value' entry.""" 163 | report_output = '' 164 | if self.name: 165 | report_output += self.name.ljust(curr_section.max_field_name_len) + ' :' 166 | value_lines = str(self.value).splitlines() 167 | if self.linebreak and self.name: 168 | report_output += '\n' + '\n'.join( 169 | ['%*s%s' % (self.indent, '', line) for line in value_lines]) 170 | else: 171 | if self.name: 172 | report_output += ' ' 173 | report_output += '%*s' % (self.indent, '') 174 | cont_line_indent = len(report_output) 175 | indented_value_lines = [value_lines[0]] 176 | indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line) 177 | for line in value_lines[1:]]) 178 | report_output += '\n'.join(indented_value_lines) 179 | 180 | report_lines = [self._Indent(base_indent, line + '\n') 181 | for line in report_output.split('\n')] 182 | return report_lines, curr_section 183 | 184 | class SubReportNode(Node): 185 | """A sub-report node, representing a nested report.""" 186 | 187 | def __init__(self, title, report): 188 | super(_PayloadReport.SubReportNode, self).__init__() 189 | self.title = title 190 | self.report = report 191 | 192 | def GenerateLines(self, base_indent, sub_indent, curr_section): 193 | """Recurse with indentation.""" 194 | report_lines = [self._Indent(base_indent, self.title + ' =>\n')] 195 | report_lines.extend(self.report.GenerateLines(base_indent + sub_indent, 196 | sub_indent)) 197 | return report_lines, curr_section 198 | 199 | class SectionNode(Node): 200 | """A section header node.""" 201 | 202 | def __init__(self, title=None): 203 | super(_PayloadReport.SectionNode, self).__init__() 204 | self.title = title 205 | self.max_field_name_len = 0 206 | 207 | def GenerateLines(self, base_indent, sub_indent, curr_section): 208 | """Dump a title line, return self as the (new) current section.""" 209 | report_lines = [] 210 | if self.title: 211 | report_lines.append(self._Indent(base_indent, 212 | '=== %s ===\n' % self.title)) 213 | return report_lines, self 214 | 215 | def __init__(self): 216 | self.report = [] 217 | self.last_section = self.global_section = self.SectionNode() 218 | self.is_finalized = False 219 | 220 | def GenerateLines(self, base_indent, sub_indent): 221 | """Generates the lines in the report, properly indented. 222 | 223 | Args: 224 | base_indent: The indentation used for root-level report lines. 225 | sub_indent: The indentation offset used for sub-reports. 226 | 227 | Returns: 228 | A list of indented report lines. 229 | """ 230 | report_lines = [] 231 | curr_section = self.global_section 232 | for node in self.report: 233 | node_report_lines, curr_section = node.GenerateLines( 234 | base_indent, sub_indent, curr_section) 235 | report_lines.extend(node_report_lines) 236 | 237 | return report_lines 238 | 239 | def Dump(self, out_file, base_indent=0, sub_indent=2): 240 | """Dumps the report to a file. 241 | 242 | Args: 243 | out_file: File object to output the content to. 244 | base_indent: Base indentation for report lines. 245 | sub_indent: Added indentation for sub-reports. 246 | """ 247 | report_lines = self.GenerateLines(base_indent, sub_indent) 248 | if report_lines and not self.is_finalized: 249 | report_lines.append('(incomplete report)\n') 250 | 251 | for line in report_lines: 252 | out_file.write(line) 253 | 254 | def AddField(self, name, value, linebreak=False, indent=0): 255 | """Adds a field/value pair to the payload report. 256 | 257 | Args: 258 | name: The field's name. 259 | value: The field's value. 260 | linebreak: Whether the value should be printed on a new line. 261 | indent: Amount of extra indent for each line of the value. 262 | """ 263 | assert not self.is_finalized 264 | if name and self.last_section.max_field_name_len < len(name): 265 | self.last_section.max_field_name_len = len(name) 266 | self.report.append(self.FieldNode(name, value, linebreak, indent)) 267 | 268 | def AddSubReport(self, title): 269 | """Adds and returns a sub-report with a title.""" 270 | assert not self.is_finalized 271 | sub_report = self.SubReportNode(title, type(self)()) 272 | self.report.append(sub_report) 273 | return sub_report.report 274 | 275 | def AddSection(self, title): 276 | """Adds a new section title.""" 277 | assert not self.is_finalized 278 | self.last_section = self.SectionNode(title) 279 | self.report.append(self.last_section) 280 | 281 | def Finalize(self): 282 | """Seals the report, marking it as complete.""" 283 | self.is_finalized = True 284 | 285 | 286 | # 287 | # Payload verification. 288 | # 289 | 290 | class PayloadChecker(object): 291 | """Checking the integrity of an update payload. 292 | 293 | This is a short-lived object whose purpose is to isolate the logic used for 294 | verifying the integrity of an update payload. 295 | """ 296 | 297 | def __init__(self, payload, assert_type=None, block_size=0, 298 | allow_unhashed=False, disabled_tests=()): 299 | """Initialize the checker. 300 | 301 | Args: 302 | payload: The payload object to check. 303 | assert_type: Assert that payload is either 'full' or 'delta' (optional). 304 | block_size: Expected filesystem / payload block size (optional). 305 | allow_unhashed: Allow operations with unhashed data blobs. 306 | disabled_tests: Sequence of tests to disable. 307 | """ 308 | if not payload.is_init: 309 | raise ValueError('Uninitialized update payload.') 310 | 311 | # Set checker configuration. 312 | self.payload = payload 313 | self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE 314 | if not _IsPowerOfTwo(self.block_size): 315 | raise error.PayloadError( 316 | 'Expected block (%d) size is not a power of two.' % self.block_size) 317 | if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA): 318 | raise error.PayloadError('Invalid assert_type value (%r).' % 319 | assert_type) 320 | self.payload_type = assert_type 321 | self.allow_unhashed = allow_unhashed 322 | 323 | # Disable specific tests. 324 | self.check_move_same_src_dst_block = ( 325 | _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests) 326 | self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests 327 | 328 | # Reset state; these will be assigned when the manifest is checked. 329 | self.sigs_offset = 0 330 | self.sigs_size = 0 331 | self.old_part_info = {} 332 | self.new_part_info = {} 333 | self.new_fs_sizes = collections.defaultdict(int) 334 | self.old_fs_sizes = collections.defaultdict(int) 335 | self.minor_version = None 336 | self.major_version = None 337 | 338 | @staticmethod 339 | def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str, 340 | msg_name=None, linebreak=False, indent=0): 341 | """Adds an element from a protobuf message to the payload report. 342 | 343 | Checks to see whether a message contains a given element, and if so adds 344 | the element value to the provided report. A missing mandatory element 345 | causes an exception to be raised. 346 | 347 | Args: 348 | msg: The message containing the element. 349 | name: The name of the element. 350 | report: A report object to add the element name/value to. 351 | is_mandatory: Whether or not this element must be present. 352 | is_submsg: Whether this element is itself a message. 353 | convert: A function for converting the element value for reporting. 354 | msg_name: The name of the message object (for error reporting). 355 | linebreak: Whether the value report should induce a line break. 356 | indent: Amount of indent used for reporting the value. 357 | 358 | Returns: 359 | A pair consisting of the element value and the generated sub-report for 360 | it (if the element is a sub-message, None otherwise). If the element is 361 | missing, returns (None, None). 362 | 363 | Raises: 364 | error.PayloadError if a mandatory element is missing. 365 | """ 366 | element_result = collections.namedtuple('element_result', ['msg', 'report']) 367 | 368 | if not msg.HasField(name): 369 | if is_mandatory: 370 | raise error.PayloadError('%smissing mandatory %s %r.' % 371 | (msg_name + ' ' if msg_name else '', 372 | 'sub-message' if is_submsg else 'field', 373 | name)) 374 | return element_result(None, None) 375 | 376 | value = getattr(msg, name) 377 | if is_submsg: 378 | return element_result(value, report and report.AddSubReport(name)) 379 | else: 380 | if report: 381 | report.AddField(name, convert(value), linebreak=linebreak, 382 | indent=indent) 383 | return element_result(value, None) 384 | 385 | @staticmethod 386 | def _CheckRepeatedElemNotPresent(msg, field_name, msg_name): 387 | """Checks that a repeated element is not specified in the message. 388 | 389 | Args: 390 | msg: The message containing the element. 391 | field_name: The name of the element. 392 | msg_name: The name of the message object (for error reporting). 393 | 394 | Raises: 395 | error.PayloadError if the repeated element is present or non-empty. 396 | """ 397 | if getattr(msg, field_name, None): 398 | raise error.PayloadError('%sfield %r not empty.' % 399 | (msg_name + ' ' if msg_name else '', field_name)) 400 | 401 | @staticmethod 402 | def _CheckElemNotPresent(msg, field_name, msg_name): 403 | """Checks that an element is not specified in the message. 404 | 405 | Args: 406 | msg: The message containing the element. 407 | field_name: The name of the element. 408 | msg_name: The name of the message object (for error reporting). 409 | 410 | Raises: 411 | error.PayloadError if the repeated element is present. 412 | """ 413 | if msg.HasField(field_name): 414 | raise error.PayloadError('%sfield %r exists.' % 415 | (msg_name + ' ' if msg_name else '', field_name)) 416 | 417 | @staticmethod 418 | def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str, 419 | linebreak=False, indent=0): 420 | """Adds a mandatory field; returning first component from _CheckElem.""" 421 | return PayloadChecker._CheckElem(msg, field_name, report, True, False, 422 | convert=convert, msg_name=msg_name, 423 | linebreak=linebreak, indent=indent)[0] 424 | 425 | @staticmethod 426 | def _CheckOptionalField(msg, field_name, report, convert=str, 427 | linebreak=False, indent=0): 428 | """Adds an optional field; returning first component from _CheckElem.""" 429 | return PayloadChecker._CheckElem(msg, field_name, report, False, False, 430 | convert=convert, linebreak=linebreak, 431 | indent=indent)[0] 432 | 433 | @staticmethod 434 | def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name): 435 | """Adds a mandatory sub-message; wrapper for _CheckElem.""" 436 | return PayloadChecker._CheckElem(msg, submsg_name, report, True, True, 437 | msg_name) 438 | 439 | @staticmethod 440 | def _CheckOptionalSubMsg(msg, submsg_name, report): 441 | """Adds an optional sub-message; wrapper for _CheckElem.""" 442 | return PayloadChecker._CheckElem(msg, submsg_name, report, False, True) 443 | 444 | @staticmethod 445 | def _CheckPresentIff(val1, val2, name1, name2, obj_name): 446 | """Checks that val1 is None iff val2 is None. 447 | 448 | Args: 449 | val1: first value to be compared. 450 | val2: second value to be compared. 451 | name1: name of object holding the first value. 452 | name2: name of object holding the second value. 453 | obj_name: Name of the object containing these values. 454 | 455 | Raises: 456 | error.PayloadError if assertion does not hold. 457 | """ 458 | if None in (val1, val2) and val1 is not val2: 459 | present, missing = (name1, name2) if val2 is None else (name2, name1) 460 | raise error.PayloadError('%r present without %r%s.' % 461 | (present, missing, 462 | ' in ' + obj_name if obj_name else '')) 463 | 464 | @staticmethod 465 | def _CheckPresentIffMany(vals, name, obj_name): 466 | """Checks that a set of vals and names imply every other element. 467 | 468 | Args: 469 | vals: The set of values to be compared. 470 | name: The name of the objects holding the corresponding value. 471 | obj_name: Name of the object containing these values. 472 | 473 | Raises: 474 | error.PayloadError if assertion does not hold. 475 | """ 476 | if any(vals) and not all(vals): 477 | raise error.PayloadError('%r is not present in all values%s.' % 478 | (name, ' in ' + obj_name if obj_name else '')) 479 | 480 | @staticmethod 481 | def _Run(cmd, send_data=None): 482 | """Runs a subprocess, returns its output. 483 | 484 | Args: 485 | cmd: Sequence of command-line argument for invoking the subprocess. 486 | send_data: Data to feed to the process via its stdin. 487 | 488 | Returns: 489 | A tuple containing the stdout and stderr output of the process. 490 | """ 491 | run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE, 492 | stdout=subprocess.PIPE) 493 | try: 494 | result = run_process.communicate(input=send_data) 495 | finally: 496 | exit_code = run_process.wait() 497 | 498 | if exit_code: 499 | raise RuntimeError('Subprocess %r failed with code %r.' % 500 | (cmd, exit_code)) 501 | 502 | return result 503 | 504 | @staticmethod 505 | def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name): 506 | """Verifies an actual hash against a signed one. 507 | 508 | Args: 509 | sig_data: The raw signature data. 510 | pubkey_file_name: Public key used for verifying signature. 511 | actual_hash: The actual hash digest. 512 | sig_name: Signature name for error reporting. 513 | 514 | Raises: 515 | error.PayloadError if signature could not be verified. 516 | """ 517 | if len(sig_data) != 256: 518 | raise error.PayloadError( 519 | '%s: signature size (%d) not as expected (256).' % 520 | (sig_name, len(sig_data))) 521 | signed_data, _ = PayloadChecker._Run( 522 | ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name], 523 | send_data=sig_data) 524 | 525 | if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32: 526 | raise error.PayloadError('%s: unexpected signed data length (%d).' % 527 | (sig_name, len(signed_data))) 528 | 529 | if not signed_data.startswith(common.SIG_ASN1_HEADER): 530 | raise error.PayloadError('%s: not containing standard ASN.1 prefix.' % 531 | sig_name) 532 | 533 | signed_hash = signed_data[len(common.SIG_ASN1_HEADER):] 534 | if signed_hash != actual_hash: 535 | raise error.PayloadError( 536 | '%s: signed hash (%s) different from actual (%s).' % 537 | (sig_name, common.FormatSha256(signed_hash), 538 | common.FormatSha256(actual_hash))) 539 | 540 | @staticmethod 541 | def _CheckBlocksFitLength(length, num_blocks, block_size, length_name, 542 | block_name=None): 543 | """Checks that a given length fits given block space. 544 | 545 | This ensures that the number of blocks allocated is appropriate for the 546 | length of the data residing in these blocks. 547 | 548 | Args: 549 | length: The actual length of the data. 550 | num_blocks: The number of blocks allocated for it. 551 | block_size: The size of each block in bytes. 552 | length_name: Name of length (used for error reporting). 553 | block_name: Name of block (used for error reporting). 554 | 555 | Raises: 556 | error.PayloadError if the aforementioned invariant is not satisfied. 557 | """ 558 | # Check: length <= num_blocks * block_size. 559 | if length > num_blocks * block_size: 560 | raise error.PayloadError( 561 | '%s (%d) > num %sblocks (%d) * block_size (%d).' % 562 | (length_name, length, block_name or '', num_blocks, block_size)) 563 | 564 | # Check: length > (num_blocks - 1) * block_size. 565 | if length <= (num_blocks - 1) * block_size: 566 | raise error.PayloadError( 567 | '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' % 568 | (length_name, length, block_name or '', num_blocks - 1, block_size)) 569 | 570 | def _CheckManifestMinorVersion(self, report): 571 | """Checks the payload manifest minor_version field. 572 | 573 | Args: 574 | report: The report object to add to. 575 | 576 | Raises: 577 | error.PayloadError if any of the checks fail. 578 | """ 579 | self.minor_version = self._CheckOptionalField(self.payload.manifest, 580 | 'minor_version', report) 581 | if self.minor_version in _SUPPORTED_MINOR_VERSIONS: 582 | if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]: 583 | raise error.PayloadError( 584 | 'Minor version %d not compatible with payload type %s.' % 585 | (self.minor_version, self.payload_type)) 586 | elif self.minor_version is None: 587 | raise error.PayloadError('Minor version is not set.') 588 | else: 589 | raise error.PayloadError('Unsupported minor version: %d' % 590 | self.minor_version) 591 | 592 | def _CheckManifest(self, report, part_sizes=None): 593 | """Checks the payload manifest. 594 | 595 | Args: 596 | report: A report object to add to. 597 | part_sizes: Map of partition label to partition size in bytes. 598 | 599 | Returns: 600 | A tuple consisting of the partition block size used during the update 601 | (integer), the signatures block offset and size. 602 | 603 | Raises: 604 | error.PayloadError if any of the checks fail. 605 | """ 606 | self.major_version = self.payload.header.version 607 | 608 | part_sizes = part_sizes or collections.defaultdict(int) 609 | manifest = self.payload.manifest 610 | report.AddSection('manifest') 611 | 612 | # Check: block_size must exist and match the expected value. 613 | actual_block_size = self._CheckMandatoryField(manifest, 'block_size', 614 | report, 'manifest') 615 | if actual_block_size != self.block_size: 616 | raise error.PayloadError('Block_size (%d) not as expected (%d).' % 617 | (actual_block_size, self.block_size)) 618 | 619 | # Check: signatures_offset <==> signatures_size. 620 | self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset', 621 | report) 622 | self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size', 623 | report) 624 | self._CheckPresentIff(self.sigs_offset, self.sigs_size, 625 | 'signatures_offset', 'signatures_size', 'manifest') 626 | 627 | for part in manifest.partitions: 628 | name = part.partition_name 629 | self.old_part_info[name] = self._CheckOptionalSubMsg( 630 | part, 'old_partition_info', report) 631 | self.new_part_info[name] = self._CheckMandatorySubMsg( 632 | part, 'new_partition_info', report, 'manifest.partitions') 633 | 634 | # Check: Old-style partition infos should not be specified. 635 | for _, part in common.CROS_PARTITIONS: 636 | self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') 637 | self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') 638 | 639 | # Check: If old_partition_info is specified anywhere, it must be 640 | # specified everywhere. 641 | old_part_msgs = [part.msg for part in self.old_part_info.values() if part] 642 | self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', 643 | 'manifest.partitions') 644 | 645 | is_delta = any(part and part.msg for part in self.old_part_info.values()) 646 | if is_delta: 647 | # Assert/mark delta payload. 648 | if self.payload_type == _TYPE_FULL: 649 | raise error.PayloadError( 650 | 'Apparent full payload contains old_{kernel,rootfs}_info.') 651 | self.payload_type = _TYPE_DELTA 652 | 653 | for part, (msg, part_report) in self.old_part_info.items(): 654 | # Check: {size, hash} present in old_{kernel,rootfs}_info. 655 | field = 'old_%s_info' % part 656 | self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', 657 | part_report, field) 658 | self._CheckMandatoryField(msg, 'hash', part_report, field, 659 | convert=common.FormatSha256) 660 | 661 | # Check: old_{kernel,rootfs} size must fit in respective partition. 662 | if self.old_fs_sizes[part] > part_sizes[part] > 0: 663 | raise error.PayloadError( 664 | 'Old %s content (%d) exceed partition size (%d).' % 665 | (part, self.old_fs_sizes[part], part_sizes[part])) 666 | else: 667 | # Assert/mark full payload. 668 | if self.payload_type == _TYPE_DELTA: 669 | raise error.PayloadError( 670 | 'Apparent delta payload missing old_{kernel,rootfs}_info.') 671 | self.payload_type = _TYPE_FULL 672 | 673 | # Check: new_{kernel,rootfs}_info present; contains {size, hash}. 674 | for part, (msg, part_report) in self.new_part_info.items(): 675 | field = 'new_%s_info' % part 676 | self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', 677 | part_report, field) 678 | self._CheckMandatoryField(msg, 'hash', part_report, field, 679 | convert=common.FormatSha256) 680 | 681 | # Check: new_{kernel,rootfs} size must fit in respective partition. 682 | if self.new_fs_sizes[part] > part_sizes[part] > 0: 683 | raise error.PayloadError( 684 | 'New %s content (%d) exceed partition size (%d).' % 685 | (part, self.new_fs_sizes[part], part_sizes[part])) 686 | 687 | # Check: minor_version makes sense for the payload type. This check should 688 | # run after the payload type has been set. 689 | self._CheckManifestMinorVersion(report) 690 | 691 | def _CheckLength(self, length, total_blocks, op_name, length_name): 692 | """Checks whether a length matches the space designated in extents. 693 | 694 | Args: 695 | length: The total length of the data. 696 | total_blocks: The total number of blocks in extents. 697 | op_name: Operation name (for error reporting). 698 | length_name: Length name (for error reporting). 699 | 700 | Raises: 701 | error.PayloadError is there a problem with the length. 702 | """ 703 | # Check: length is non-zero. 704 | if length == 0: 705 | raise error.PayloadError('%s: %s is zero.' % (op_name, length_name)) 706 | 707 | # Check that length matches number of blocks. 708 | self._CheckBlocksFitLength(length, total_blocks, self.block_size, 709 | '%s: %s' % (op_name, length_name)) 710 | 711 | def _CheckExtents(self, extents, usable_size, block_counters, name): 712 | """Checks a sequence of extents. 713 | 714 | Args: 715 | extents: The sequence of extents to check. 716 | usable_size: The usable size of the partition to which the extents apply. 717 | block_counters: Array of counters corresponding to the number of blocks. 718 | name: The name of the extent block. 719 | 720 | Returns: 721 | The total number of blocks in the extents. 722 | 723 | Raises: 724 | error.PayloadError if any of the entailed checks fails. 725 | """ 726 | total_num_blocks = 0 727 | for ex, ex_name in common.ExtentIter(extents, name): 728 | # Check: Mandatory fields. 729 | start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block', 730 | None, ex_name) 731 | num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None, 732 | ex_name) 733 | end_block = start_block + num_blocks 734 | 735 | # Check: num_blocks > 0. 736 | if num_blocks == 0: 737 | raise error.PayloadError('%s: extent length is zero.' % ex_name) 738 | 739 | # Check: Make sure we're within the partition limit. 740 | if usable_size and end_block * self.block_size > usable_size: 741 | raise error.PayloadError( 742 | '%s: extent (%s) exceeds usable partition size (%d).' % 743 | (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) 744 | 745 | # Record block usage. 746 | for i in range(start_block, end_block): 747 | block_counters[i] += 1 748 | 749 | total_num_blocks += num_blocks 750 | 751 | return total_num_blocks 752 | 753 | def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name): 754 | """Specific checks for REPLACE/REPLACE_BZ/REPLACE_XZ operations. 755 | 756 | Args: 757 | op: The operation object from the manifest. 758 | data_length: The length of the data blob associated with the operation. 759 | total_dst_blocks: Total number of blocks in dst_extents. 760 | op_name: Operation name for error reporting. 761 | 762 | Raises: 763 | error.PayloadError if any check fails. 764 | """ 765 | # Check: total_dst_blocks is not a floating point. 766 | if isinstance(total_dst_blocks, float): 767 | raise error.PayloadError('%s: contains invalid data type of ' 768 | 'total_dst_blocks.' % op_name) 769 | 770 | # Check: Does not contain src extents. 771 | if op.src_extents: 772 | raise error.PayloadError('%s: contains src_extents.' % op_name) 773 | 774 | # Check: Contains data. 775 | if data_length is None: 776 | raise error.PayloadError('%s: missing data_{offset,length}.' % op_name) 777 | 778 | if op.type == common.OpType.REPLACE: 779 | PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks, 780 | self.block_size, 781 | op_name + '.data_length', 'dst') 782 | else: 783 | # Check: data_length must be smaller than the allotted dst blocks. 784 | if data_length >= total_dst_blocks * self.block_size: 785 | raise error.PayloadError( 786 | '%s: data_length (%d) must be less than allotted dst block ' 787 | 'space (%d * %d).' % 788 | (op_name, data_length, total_dst_blocks, self.block_size)) 789 | 790 | def _CheckZeroOperation(self, op, op_name): 791 | """Specific checks for ZERO operations. 792 | 793 | Args: 794 | op: The operation object from the manifest. 795 | op_name: Operation name for error reporting. 796 | 797 | Raises: 798 | error.PayloadError if any check fails. 799 | """ 800 | # Check: Does not contain src extents, data_length and data_offset. 801 | if op.src_extents: 802 | raise error.PayloadError('%s: contains src_extents.' % op_name) 803 | if op.data_length: 804 | raise error.PayloadError('%s: contains data_length.' % op_name) 805 | if op.data_offset: 806 | raise error.PayloadError('%s: contains data_offset.' % op_name) 807 | 808 | def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): 809 | """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF 810 | operations. 811 | 812 | Args: 813 | op: The operation. 814 | data_length: The length of the data blob associated with the operation. 815 | total_dst_blocks: Total number of blocks in dst_extents. 816 | op_name: Operation name for error reporting. 817 | 818 | Raises: 819 | error.PayloadError if any check fails. 820 | """ 821 | # Check: data_{offset,length} present. 822 | if data_length is None: 823 | raise error.PayloadError('%s: missing data_{offset,length}.' % op_name) 824 | 825 | # Check: data_length is strictly smaller than the allotted dst blocks. 826 | if data_length >= total_dst_blocks * self.block_size: 827 | raise error.PayloadError( 828 | '%s: data_length (%d) must be smaller than allotted dst space ' 829 | '(%d * %d = %d).' % 830 | (op_name, data_length, total_dst_blocks, self.block_size, 831 | total_dst_blocks * self.block_size)) 832 | 833 | # Check the existence of src_length and dst_length for legacy bsdiffs. 834 | if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3: 835 | if not op.HasField('src_length') or not op.HasField('dst_length'): 836 | raise error.PayloadError('%s: require {src,dst}_length.' % op_name) 837 | else: 838 | if op.HasField('src_length') or op.HasField('dst_length'): 839 | raise error.PayloadError('%s: unneeded {src,dst}_length.' % op_name) 840 | 841 | def _CheckSourceCopyOperation(self, data_offset, total_src_blocks, 842 | total_dst_blocks, op_name): 843 | """Specific checks for SOURCE_COPY. 844 | 845 | Args: 846 | data_offset: The offset of a data blob for the operation. 847 | total_src_blocks: Total number of blocks in src_extents. 848 | total_dst_blocks: Total number of blocks in dst_extents. 849 | op_name: Operation name for error reporting. 850 | 851 | Raises: 852 | error.PayloadError if any check fails. 853 | """ 854 | # Check: No data_{offset,length}. 855 | if data_offset is not None: 856 | raise error.PayloadError('%s: contains data_{offset,length}.' % op_name) 857 | 858 | # Check: total_src_blocks == total_dst_blocks. 859 | if total_src_blocks != total_dst_blocks: 860 | raise error.PayloadError( 861 | '%s: total src blocks (%d) != total dst blocks (%d).' % 862 | (op_name, total_src_blocks, total_dst_blocks)) 863 | 864 | def _CheckAnySourceOperation(self, op, total_src_blocks, op_name): 865 | """Specific checks for SOURCE_* operations. 866 | 867 | Args: 868 | op: The operation object from the manifest. 869 | total_src_blocks: Total number of blocks in src_extents. 870 | op_name: Operation name for error reporting. 871 | 872 | Raises: 873 | error.PayloadError if any check fails. 874 | """ 875 | # Check: total_src_blocks != 0. 876 | if total_src_blocks == 0: 877 | raise error.PayloadError('%s: no src blocks in a source op.' % op_name) 878 | 879 | # Check: src_sha256_hash present in minor version >= 3. 880 | if self.minor_version >= 3 and op.src_sha256_hash is None: 881 | raise error.PayloadError('%s: source hash missing.' % op_name) 882 | 883 | def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters, 884 | old_usable_size, new_usable_size, prev_data_offset, 885 | blob_hash_counts): 886 | """Checks a single update operation. 887 | 888 | Args: 889 | op: The operation object. 890 | op_name: Operation name string for error reporting. 891 | old_block_counters: Arrays of block read counters. 892 | new_block_counters: Arrays of block write counters. 893 | old_usable_size: The overall usable size for src data in bytes. 894 | new_usable_size: The overall usable size for dst data in bytes. 895 | prev_data_offset: Offset of last used data bytes. 896 | blob_hash_counts: Counters for hashed/unhashed blobs. 897 | 898 | Returns: 899 | The amount of data blob associated with the operation. 900 | 901 | Raises: 902 | error.PayloadError if any check has failed. 903 | """ 904 | # Check extents. 905 | total_src_blocks = self._CheckExtents( 906 | op.src_extents, old_usable_size, old_block_counters, 907 | op_name + '.src_extents') 908 | total_dst_blocks = self._CheckExtents( 909 | op.dst_extents, new_usable_size, new_block_counters, 910 | op_name + '.dst_extents') 911 | 912 | # Check: data_offset present <==> data_length present. 913 | data_offset = self._CheckOptionalField(op, 'data_offset', None) 914 | data_length = self._CheckOptionalField(op, 'data_length', None) 915 | self._CheckPresentIff(data_offset, data_length, 'data_offset', 916 | 'data_length', op_name) 917 | 918 | # Check: At least one dst_extent. 919 | if not op.dst_extents: 920 | raise error.PayloadError('%s: dst_extents is empty.' % op_name) 921 | 922 | # Check {src,dst}_length, if present. 923 | if op.HasField('src_length'): 924 | self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length') 925 | if op.HasField('dst_length'): 926 | self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length') 927 | 928 | if op.HasField('data_sha256_hash'): 929 | blob_hash_counts['hashed'] += 1 930 | 931 | # Check: Operation carries data. 932 | if data_offset is None: 933 | raise error.PayloadError( 934 | '%s: data_sha256_hash present but no data_{offset,length}.' % 935 | op_name) 936 | 937 | # Check: Hash verifies correctly. 938 | actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset, 939 | data_length)) 940 | if op.data_sha256_hash != actual_hash.digest(): 941 | raise error.PayloadError( 942 | '%s: data_sha256_hash (%s) does not match actual hash (%s).' % 943 | (op_name, common.FormatSha256(op.data_sha256_hash), 944 | common.FormatSha256(actual_hash.digest()))) 945 | elif data_offset is not None: 946 | if self.allow_unhashed: 947 | blob_hash_counts['unhashed'] += 1 948 | else: 949 | raise error.PayloadError('%s: unhashed operation not allowed.' % 950 | op_name) 951 | 952 | if data_offset is not None: 953 | # Check: Contiguous use of data section. 954 | if data_offset != prev_data_offset: 955 | raise error.PayloadError( 956 | '%s: data offset (%d) not matching amount used so far (%d).' % 957 | (op_name, data_offset, prev_data_offset)) 958 | 959 | # Type-specific checks. 960 | if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, 961 | common.OpType.REPLACE_XZ): 962 | self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) 963 | elif op.type == common.OpType.ZERO and self.minor_version >= 4: 964 | self._CheckZeroOperation(op, op_name) 965 | elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2: 966 | self._CheckSourceCopyOperation(data_offset, total_src_blocks, 967 | total_dst_blocks, op_name) 968 | self._CheckAnySourceOperation(op, total_src_blocks, op_name) 969 | elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2: 970 | self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) 971 | self._CheckAnySourceOperation(op, total_src_blocks, op_name) 972 | elif op.type == common.OpType.BROTLI_BSDIFF and self.minor_version >= 4: 973 | self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) 974 | self._CheckAnySourceOperation(op, total_src_blocks, op_name) 975 | elif op.type == common.OpType.PUFFDIFF and self.minor_version >= 5: 976 | self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) 977 | self._CheckAnySourceOperation(op, total_src_blocks, op_name) 978 | else: 979 | raise error.PayloadError( 980 | 'Operation %s (type %d) not allowed in minor version %d' % 981 | (op_name, op.type, self.minor_version)) 982 | return data_length if data_length is not None else 0 983 | 984 | def _SizeToNumBlocks(self, size): 985 | """Returns the number of blocks needed to contain a given byte size.""" 986 | return (size + self.block_size - 1) // self.block_size 987 | 988 | def _AllocBlockCounters(self, total_size): 989 | """Returns a freshly initialized array of block counters. 990 | 991 | Note that the generated array is not portable as is due to byte-ordering 992 | issues, hence it should not be serialized. 993 | 994 | Args: 995 | total_size: The total block size in bytes. 996 | 997 | Returns: 998 | An array of unsigned short elements initialized to zero, one for each of 999 | the blocks necessary for containing the partition. 1000 | """ 1001 | return array.array('H', 1002 | itertools.repeat(0, self._SizeToNumBlocks(total_size))) 1003 | 1004 | def _CheckOperations(self, operations, report, base_name, old_fs_size, 1005 | new_fs_size, old_usable_size, new_usable_size, 1006 | prev_data_offset): 1007 | """Checks a sequence of update operations. 1008 | 1009 | Args: 1010 | operations: The sequence of operations to check. 1011 | report: The report object to add to. 1012 | base_name: The name of the operation block. 1013 | old_fs_size: The old filesystem size in bytes. 1014 | new_fs_size: The new filesystem size in bytes. 1015 | old_usable_size: The overall usable size of the old partition in bytes. 1016 | new_usable_size: The overall usable size of the new partition in bytes. 1017 | prev_data_offset: Offset of last used data bytes. 1018 | 1019 | Returns: 1020 | The total data blob size used. 1021 | 1022 | Raises: 1023 | error.PayloadError if any of the checks fails. 1024 | """ 1025 | # The total size of data blobs used by operations scanned thus far. 1026 | total_data_used = 0 1027 | # Counts of specific operation types. 1028 | op_counts = { 1029 | common.OpType.REPLACE: 0, 1030 | common.OpType.REPLACE_BZ: 0, 1031 | common.OpType.REPLACE_XZ: 0, 1032 | common.OpType.ZERO: 0, 1033 | common.OpType.SOURCE_COPY: 0, 1034 | common.OpType.SOURCE_BSDIFF: 0, 1035 | common.OpType.PUFFDIFF: 0, 1036 | common.OpType.BROTLI_BSDIFF: 0, 1037 | } 1038 | # Total blob sizes for each operation type. 1039 | op_blob_totals = { 1040 | common.OpType.REPLACE: 0, 1041 | common.OpType.REPLACE_BZ: 0, 1042 | common.OpType.REPLACE_XZ: 0, 1043 | # SOURCE_COPY operations don't have blobs. 1044 | common.OpType.SOURCE_BSDIFF: 0, 1045 | common.OpType.PUFFDIFF: 0, 1046 | common.OpType.BROTLI_BSDIFF: 0, 1047 | } 1048 | # Counts of hashed vs unhashed operations. 1049 | blob_hash_counts = { 1050 | 'hashed': 0, 1051 | 'unhashed': 0, 1052 | } 1053 | 1054 | # Allocate old and new block counters. 1055 | old_block_counters = (self._AllocBlockCounters(old_usable_size) 1056 | if old_fs_size else None) 1057 | new_block_counters = self._AllocBlockCounters(new_usable_size) 1058 | 1059 | # Process and verify each operation. 1060 | op_num = 0 1061 | for op, op_name in common.OperationIter(operations, base_name): 1062 | op_num += 1 1063 | 1064 | # Check: Type is valid. 1065 | if op.type not in op_counts: 1066 | raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type)) 1067 | op_counts[op.type] += 1 1068 | 1069 | curr_data_used = self._CheckOperation( 1070 | op, op_name, old_block_counters, new_block_counters, 1071 | old_usable_size, new_usable_size, 1072 | prev_data_offset + total_data_used, blob_hash_counts) 1073 | if curr_data_used: 1074 | op_blob_totals[op.type] += curr_data_used 1075 | total_data_used += curr_data_used 1076 | 1077 | # Report totals and breakdown statistics. 1078 | report.AddField('total operations', op_num) 1079 | report.AddField( 1080 | None, 1081 | histogram.Histogram.FromCountDict(op_counts, 1082 | key_names=common.OpType.NAMES), 1083 | indent=1) 1084 | report.AddField('total blobs', sum(blob_hash_counts.values())) 1085 | report.AddField(None, 1086 | histogram.Histogram.FromCountDict(blob_hash_counts), 1087 | indent=1) 1088 | report.AddField('total blob size', _AddHumanReadableSize(total_data_used)) 1089 | report.AddField( 1090 | None, 1091 | histogram.Histogram.FromCountDict(op_blob_totals, 1092 | formatter=_AddHumanReadableSize, 1093 | key_names=common.OpType.NAMES), 1094 | indent=1) 1095 | 1096 | # Report read/write histograms. 1097 | if old_block_counters: 1098 | report.AddField('block read hist', 1099 | histogram.Histogram.FromKeyList(old_block_counters), 1100 | linebreak=True, indent=1) 1101 | 1102 | new_write_hist = histogram.Histogram.FromKeyList( 1103 | new_block_counters[:self._SizeToNumBlocks(new_fs_size)]) 1104 | report.AddField('block write hist', new_write_hist, linebreak=True, 1105 | indent=1) 1106 | 1107 | # Check: Full update must write each dst block once. 1108 | if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]: 1109 | raise error.PayloadError( 1110 | '%s: not all blocks written exactly once during full update.' % 1111 | base_name) 1112 | 1113 | return total_data_used 1114 | 1115 | def _CheckSignatures(self, report, pubkey_file_name): 1116 | """Checks a payload's signature block.""" 1117 | sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size) 1118 | sigs = update_metadata_pb2.Signatures() 1119 | sigs.ParseFromString(sigs_raw) 1120 | report.AddSection('signatures') 1121 | 1122 | # Check: At least one signature present. 1123 | if not sigs.signatures: 1124 | raise error.PayloadError('Signature block is empty.') 1125 | 1126 | # Check that we don't have the signature operation blob at the end (used to 1127 | # be for major version 1). 1128 | last_partition = self.payload.manifest.partitions[-1] 1129 | if last_partition.operations: 1130 | last_op = last_partition.operations[-1] 1131 | # Check: signatures_{offset,size} must match the last (fake) operation. 1132 | if (last_op.type == common.OpType.REPLACE and 1133 | last_op.data_offset == self.sigs_offset and 1134 | last_op.data_length == self.sigs_size): 1135 | raise error.PayloadError('It seems like the last operation is the ' 1136 | 'signature blob. This is an invalid payload.') 1137 | 1138 | # Compute the checksum of all data up to signature blob. 1139 | # TODO(garnold) we're re-reading the whole data section into a string 1140 | # just to compute the checksum; instead, we could do it incrementally as 1141 | # we read the blobs one-by-one, under the assumption that we're reading 1142 | # them in order (which currently holds). This should be reconsidered. 1143 | payload_hasher = self.payload.manifest_hasher.copy() 1144 | common.Read(self.payload.payload_file, self.sigs_offset, 1145 | offset=self.payload.data_offset, hasher=payload_hasher) 1146 | 1147 | for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'): 1148 | sig_report = report.AddSubReport(sig_name) 1149 | 1150 | # Check: Signature contains mandatory fields. 1151 | self._CheckMandatoryField(sig, 'version', sig_report, sig_name) 1152 | self._CheckMandatoryField(sig, 'data', None, sig_name) 1153 | sig_report.AddField('data len', len(sig.data)) 1154 | 1155 | # Check: Signatures pertains to actual payload hash. 1156 | if sig.version == 1: 1157 | self._CheckSha256Signature(sig.data, pubkey_file_name, 1158 | payload_hasher.digest(), sig_name) 1159 | else: 1160 | raise error.PayloadError('Unknown signature version (%d).' % 1161 | sig.version) 1162 | 1163 | def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, 1164 | part_sizes=None, report_out_file=None): 1165 | """Checker entry point, invoking all checks. 1166 | 1167 | Args: 1168 | pubkey_file_name: Public key used for signature verification. 1169 | metadata_sig_file: Metadata signature, if verification is desired. 1170 | metadata_size: Metadata size, if verification is desired. 1171 | part_sizes: Mapping of partition label to size in bytes (default: infer 1172 | based on payload type and version or filesystem). 1173 | report_out_file: File object to dump the report to. 1174 | 1175 | Raises: 1176 | error.PayloadError if payload verification failed. 1177 | """ 1178 | if not pubkey_file_name: 1179 | pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME 1180 | 1181 | report = _PayloadReport() 1182 | 1183 | # Get payload file size. 1184 | self.payload.payload_file.seek(0, 2) 1185 | payload_file_size = self.payload.payload_file.tell() 1186 | self.payload.ResetFile() 1187 | 1188 | try: 1189 | # Check metadata_size (if provided). 1190 | if metadata_size and self.payload.metadata_size != metadata_size: 1191 | raise error.PayloadError('Invalid payload metadata size in payload(%d) ' 1192 | 'vs given(%d)' % (self.payload.metadata_size, 1193 | metadata_size)) 1194 | 1195 | # Check metadata signature (if provided). 1196 | if metadata_sig_file: 1197 | metadata_sig = base64.b64decode(metadata_sig_file.read()) 1198 | self._CheckSha256Signature(metadata_sig, pubkey_file_name, 1199 | self.payload.manifest_hasher.digest(), 1200 | 'metadata signature') 1201 | 1202 | # Part 1: Check the file header. 1203 | report.AddSection('header') 1204 | # Check: Payload version is valid. 1205 | if self.payload.header.version not in (1, 2): 1206 | raise error.PayloadError('Unknown payload version (%d).' % 1207 | self.payload.header.version) 1208 | report.AddField('version', self.payload.header.version) 1209 | report.AddField('manifest len', self.payload.header.manifest_len) 1210 | 1211 | # Part 2: Check the manifest. 1212 | self._CheckManifest(report, part_sizes) 1213 | assert self.payload_type, 'payload type should be known by now' 1214 | 1215 | # Make sure deprecated values are not present in the payload. 1216 | for field in ('install_operations', 'kernel_install_operations'): 1217 | self._CheckRepeatedElemNotPresent(self.payload.manifest, field, 1218 | 'manifest') 1219 | for field in ('old_kernel_info', 'old_rootfs_info', 1220 | 'new_kernel_info', 'new_rootfs_info'): 1221 | self._CheckElemNotPresent(self.payload.manifest, field, 'manifest') 1222 | 1223 | total_blob_size = 0 1224 | for part, operations in ((p.partition_name, p.operations) 1225 | for p in self.payload.manifest.partitions): 1226 | report.AddSection('%s operations' % part) 1227 | 1228 | new_fs_usable_size = self.new_fs_sizes[part] 1229 | old_fs_usable_size = self.old_fs_sizes[part] 1230 | 1231 | if part_sizes is not None and part_sizes.get(part, None): 1232 | new_fs_usable_size = old_fs_usable_size = part_sizes[part] 1233 | 1234 | # TODO(chromium:243559) only default to the filesystem size if no 1235 | # explicit size provided *and* the partition size is not embedded in the 1236 | # payload; see issue for more details. 1237 | total_blob_size += self._CheckOperations( 1238 | operations, report, '%s_install_operations' % part, 1239 | self.old_fs_sizes[part], self.new_fs_sizes[part], 1240 | old_fs_usable_size, new_fs_usable_size, total_blob_size) 1241 | 1242 | # Check: Operations data reach the end of the payload file. 1243 | used_payload_size = self.payload.data_offset + total_blob_size 1244 | # Major versions 2 and higher have a signature at the end, so it should be 1245 | # considered in the total size of the image. 1246 | if self.sigs_size: 1247 | used_payload_size += self.sigs_size 1248 | 1249 | if used_payload_size != payload_file_size: 1250 | raise error.PayloadError( 1251 | 'Used payload size (%d) different from actual file size (%d).' % 1252 | (used_payload_size, payload_file_size)) 1253 | 1254 | # Part 4: Handle payload signatures message. 1255 | if self.check_payload_sig and self.sigs_size: 1256 | self._CheckSignatures(report, pubkey_file_name) 1257 | 1258 | # Part 5: Summary. 1259 | report.AddSection('summary') 1260 | report.AddField('update type', self.payload_type) 1261 | 1262 | report.Finalize() 1263 | finally: 1264 | if report_out_file: 1265 | report.Dump(report_out_file) 1266 | -------------------------------------------------------------------------------- /update_payload/common.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Utilities for update payload processing.""" 18 | 19 | from __future__ import absolute_import 20 | from __future__ import print_function 21 | 22 | import base64 23 | 24 | from update_payload import update_metadata_pb2 25 | from update_payload.error import PayloadError 26 | 27 | 28 | # 29 | # Constants. 30 | # 31 | SIG_ASN1_HEADER = ( 32 | b'\x30\x31\x30\x0d\x06\x09\x60\x86' 33 | b'\x48\x01\x65\x03\x04\x02\x01\x05' 34 | b'\x00\x04\x20' 35 | ) 36 | 37 | BRILLO_MAJOR_PAYLOAD_VERSION = 2 38 | 39 | SOURCE_MINOR_PAYLOAD_VERSION = 2 40 | OPSRCHASH_MINOR_PAYLOAD_VERSION = 3 41 | BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4 42 | PUFFDIFF_MINOR_PAYLOAD_VERSION = 5 43 | 44 | KERNEL = 'kernel' 45 | ROOTFS = 'root' 46 | # Tuple of (name in system, name in protobuf). 47 | CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs')) 48 | 49 | 50 | # 51 | # Payload operation types. 52 | # 53 | class OpType(object): 54 | """Container for operation type constants.""" 55 | _CLASS = update_metadata_pb2.InstallOperation 56 | REPLACE = _CLASS.REPLACE 57 | REPLACE_BZ = _CLASS.REPLACE_BZ 58 | SOURCE_COPY = _CLASS.SOURCE_COPY 59 | SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF 60 | ZERO = _CLASS.ZERO 61 | DISCARD = _CLASS.DISCARD 62 | REPLACE_XZ = _CLASS.REPLACE_XZ 63 | PUFFDIFF = _CLASS.PUFFDIFF 64 | BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF 65 | ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO, 66 | DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF) 67 | NAMES = { 68 | REPLACE: 'REPLACE', 69 | REPLACE_BZ: 'REPLACE_BZ', 70 | SOURCE_COPY: 'SOURCE_COPY', 71 | SOURCE_BSDIFF: 'SOURCE_BSDIFF', 72 | ZERO: 'ZERO', 73 | DISCARD: 'DISCARD', 74 | REPLACE_XZ: 'REPLACE_XZ', 75 | PUFFDIFF: 'PUFFDIFF', 76 | BROTLI_BSDIFF: 'BROTLI_BSDIFF', 77 | } 78 | 79 | def __init__(self): 80 | pass 81 | 82 | 83 | # 84 | # Checked and hashed reading of data. 85 | # 86 | def IntPackingFmtStr(size, is_unsigned): 87 | """Returns an integer format string for use by the struct module. 88 | 89 | Args: 90 | size: the integer size in bytes (2, 4 or 8) 91 | is_unsigned: whether it is signed or not 92 | 93 | Returns: 94 | A format string for packing/unpacking integer values; assumes network byte 95 | order (big-endian). 96 | 97 | Raises: 98 | PayloadError if something is wrong with the arguments. 99 | """ 100 | # Determine the base conversion format. 101 | if size == 2: 102 | fmt = 'h' 103 | elif size == 4: 104 | fmt = 'i' 105 | elif size == 8: 106 | fmt = 'q' 107 | else: 108 | raise PayloadError('unsupport numeric field size (%s)' % size) 109 | 110 | # Signed or unsigned? 111 | if is_unsigned: 112 | fmt = fmt.upper() 113 | 114 | # Make it network byte order (big-endian). 115 | fmt = '!' + fmt 116 | 117 | return fmt 118 | 119 | 120 | def Read(file_obj, length, offset=None, hasher=None): 121 | """Reads binary data from a file. 122 | 123 | Args: 124 | file_obj: an open file object 125 | length: the length of the data to read 126 | offset: an offset to seek to prior to reading; this is an absolute offset 127 | from either the beginning (non-negative) or end (negative) of the 128 | file. (optional) 129 | hasher: a hashing object to pass the read data through (optional) 130 | 131 | Returns: 132 | A string containing the read data. 133 | 134 | Raises: 135 | PayloadError if a read error occurred or not enough data was read. 136 | """ 137 | if offset is not None: 138 | if offset >= 0: 139 | file_obj.seek(offset) 140 | else: 141 | file_obj.seek(offset, 2) 142 | 143 | try: 144 | data = file_obj.read(length) 145 | except IOError as e: 146 | raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e)) 147 | 148 | if len(data) != length: 149 | raise PayloadError( 150 | 'reading from file (%s) too short (%d instead of %d bytes)' % 151 | (file_obj.name, len(data), length)) 152 | 153 | if hasher: 154 | hasher.update(data) 155 | 156 | return data 157 | 158 | 159 | # 160 | # Formatting functions. 161 | # 162 | def FormatExtent(ex, block_size=0): 163 | end_block = ex.start_block + ex.num_blocks 164 | if block_size: 165 | return '%d->%d * %d' % (ex.start_block, end_block, block_size) 166 | return '%d->%d' % (ex.start_block, end_block) 167 | 168 | 169 | def FormatSha256(digest): 170 | """Returns a canonical string representation of a SHA256 digest.""" 171 | return base64.b64encode(digest).decode('utf-8') 172 | 173 | 174 | # 175 | # Useful iterators. 176 | # 177 | def _ObjNameIter(items, base_name, reverse=False, name_format_func=None): 178 | """A generic (item, name) tuple iterators. 179 | 180 | Args: 181 | items: the sequence of objects to iterate on 182 | base_name: the base name for all objects 183 | reverse: whether iteration should be in reverse order 184 | name_format_func: a function to apply to the name string 185 | 186 | Yields: 187 | An iterator whose i-th invocation returns (items[i], name), where name == 188 | base_name + '[i]' (with a formatting function optionally applied to it). 189 | """ 190 | idx, inc = (len(items), -1) if reverse else (1, 1) 191 | if reverse: 192 | items = reversed(items) 193 | for item in items: 194 | item_name = '%s[%d]' % (base_name, idx) 195 | if name_format_func: 196 | item_name = name_format_func(item, item_name) 197 | yield (item, item_name) 198 | idx += inc 199 | 200 | 201 | def _OperationNameFormatter(op, op_name): 202 | return '%s(%s)' % (op_name, OpType.NAMES.get(op.type, '?')) 203 | 204 | 205 | def OperationIter(operations, base_name, reverse=False): 206 | """An (item, name) iterator for update operations.""" 207 | return _ObjNameIter(operations, base_name, reverse=reverse, 208 | name_format_func=_OperationNameFormatter) 209 | 210 | 211 | def ExtentIter(extents, base_name, reverse=False): 212 | """An (item, name) iterator for operation extents.""" 213 | return _ObjNameIter(extents, base_name, reverse=reverse) 214 | 215 | 216 | def SignatureIter(sigs, base_name, reverse=False): 217 | """An (item, name) iterator for signatures.""" 218 | return _ObjNameIter(sigs, base_name, reverse=reverse) 219 | -------------------------------------------------------------------------------- /update_payload/error.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Payload handling errors.""" 18 | 19 | 20 | class PayloadError(Exception): 21 | """An update payload general processing error.""" 22 | -------------------------------------------------------------------------------- /update_payload/format_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Various formatting functions.""" 18 | 19 | from __future__ import division 20 | 21 | 22 | def NumToPercent(num, total, min_precision=1, max_precision=5): 23 | """Returns the percentage (string) of |num| out of |total|. 24 | 25 | If the percentage includes a fraction, it will be computed down to the least 26 | precision that yields a non-zero and ranging between |min_precision| and 27 | |max_precision|. Values are always rounded down. All arithmetic operations 28 | are integer built-ins. Examples (using default precision): 29 | 30 | (1, 1) => 100% 31 | (3, 10) => 30% 32 | (3, 9) => 33.3% 33 | (3, 900) => 0.3% 34 | (3, 9000000) => 0.00003% 35 | (3, 900000000) => 0% 36 | (5, 2) => 250% 37 | 38 | Args: 39 | num: the value of the part 40 | total: the value of the whole 41 | min_precision: minimum precision for fractional percentage 42 | max_precision: maximum precision for fractional percentage 43 | Returns: 44 | Percentage string, or None if percent cannot be computed (i.e. total is 45 | zero). 46 | 47 | """ 48 | if total == 0: 49 | return None 50 | 51 | percent = 0 52 | precision = min(min_precision, max_precision) 53 | factor = 10 ** precision 54 | while precision <= max_precision: 55 | percent = num * 100 * factor // total 56 | if percent: 57 | break 58 | factor *= 10 59 | precision += 1 60 | 61 | whole, frac = divmod(percent, factor) 62 | while frac and not frac % 10: 63 | frac /= 10 64 | precision -= 1 65 | 66 | return '%d%s%%' % (whole, '.%0*d' % (precision, frac) if frac else '') 67 | 68 | 69 | def BytesToHumanReadable(size, precision=1, decimal=False): 70 | """Returns a human readable representation of a given |size|. 71 | 72 | The returned string includes unit notations in either binary (KiB, MiB, etc) 73 | or decimal (kB, MB, etc), based on the value of |decimal|. The chosen unit is 74 | the largest that yields a whole (or mixed) number. It may contain up to 75 | |precision| fractional digits. Values are always rounded down. Largest unit 76 | is an exabyte. All arithmetic operations are integer built-ins. Examples 77 | (using default precision and binary units): 78 | 79 | 4096 => 4 KiB 80 | 5000 => 4.8 KiB 81 | 500000 => 488.2 KiB 82 | 5000000 => 4.7 MiB 83 | 84 | Args: 85 | size: the size in bytes 86 | precision: the number of digits past the decimal point 87 | decimal: whether to compute/present decimal or binary units 88 | Returns: 89 | Readable size string, or None if no conversion is applicable (i.e. size is 90 | less than the smallest unit). 91 | 92 | """ 93 | constants = ( 94 | (('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'), 1024), 95 | (('kB', 'MB', 'GB', 'TB', 'PB', 'EB'), 1000) 96 | ) 97 | suffixes, base = constants[decimal] 98 | exp, magnitude = 0, 1 99 | while exp < len(suffixes): 100 | next_magnitude = magnitude * base 101 | if size < next_magnitude: 102 | break 103 | exp += 1 104 | magnitude = next_magnitude 105 | 106 | if exp != 0: 107 | whole = size // magnitude 108 | frac = (size % magnitude) * (10 ** precision) // magnitude 109 | while frac and not frac % 10: 110 | frac /= 10 111 | return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1]) 112 | -------------------------------------------------------------------------------- /update_payload/format_utils_unittest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2013 The Android Open Source Project 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | """Unit tests for format_utils.py.""" 19 | 20 | # Disable check for function names to avoid errors based on old code 21 | # pylint: disable-msg=invalid-name 22 | 23 | from __future__ import absolute_import 24 | 25 | import unittest 26 | 27 | from update_payload import format_utils 28 | 29 | 30 | class NumToPercentTest(unittest.TestCase): 31 | """ Tests number conversion to percentage format.""" 32 | def testHundredPercent(self): 33 | self.assertEqual(format_utils.NumToPercent(1, 1), '100%') 34 | 35 | def testOverHundredPercent(self): 36 | self.assertEqual(format_utils.NumToPercent(5, 2), '250%') 37 | 38 | def testWholePercent(self): 39 | self.assertEqual(format_utils.NumToPercent(3, 10), '30%') 40 | 41 | def testDefaultMinPrecision(self): 42 | self.assertEqual(format_utils.NumToPercent(3, 9), '33.3%') 43 | self.assertEqual(format_utils.NumToPercent(3, 900), '0.3%') 44 | 45 | def testDefaultMaxPrecision(self): 46 | self.assertEqual(format_utils.NumToPercent(3, 9000000), '0.00003%') 47 | self.assertEqual(format_utils.NumToPercent(3, 90000000), '0%') 48 | 49 | def testCustomMinPrecision(self): 50 | self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=3), 51 | '33.333%') 52 | self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=0), 53 | '33%') 54 | 55 | def testCustomMaxPrecision(self): 56 | self.assertEqual(format_utils.NumToPercent(3, 900, max_precision=1), 57 | '0.3%') 58 | self.assertEqual(format_utils.NumToPercent(3, 9000, max_precision=1), 59 | '0%') 60 | 61 | 62 | class BytesToHumanReadableTest(unittest.TestCase): 63 | """ Tests number conversion to human readable format.""" 64 | def testBaseTwo(self): 65 | self.assertEqual(format_utils.BytesToHumanReadable(0x1000), '4 KiB') 66 | self.assertEqual(format_utils.BytesToHumanReadable(0x400000), '4 MiB') 67 | self.assertEqual(format_utils.BytesToHumanReadable(0x100000000), '4 GiB') 68 | self.assertEqual(format_utils.BytesToHumanReadable(0x40000000000), '4 TiB') 69 | 70 | def testDecimal(self): 71 | self.assertEqual(format_utils.BytesToHumanReadable(5000, decimal=True), 72 | '5 kB') 73 | self.assertEqual(format_utils.BytesToHumanReadable(5000000, decimal=True), 74 | '5 MB') 75 | self.assertEqual(format_utils.BytesToHumanReadable(5000000000, 76 | decimal=True), 77 | '5 GB') 78 | 79 | def testDefaultPrecision(self): 80 | self.assertEqual(format_utils.BytesToHumanReadable(5000), '4.8 KiB') 81 | self.assertEqual(format_utils.BytesToHumanReadable(500000), '488.2 KiB') 82 | self.assertEqual(format_utils.BytesToHumanReadable(5000000), '4.7 MiB') 83 | 84 | def testCustomPrecision(self): 85 | self.assertEqual(format_utils.BytesToHumanReadable(5000, precision=3), 86 | '4.882 KiB') 87 | self.assertEqual(format_utils.BytesToHumanReadable(500000, precision=0), 88 | '488 KiB') 89 | self.assertEqual(format_utils.BytesToHumanReadable(5000000, precision=5), 90 | '4.76837 MiB') 91 | 92 | 93 | if __name__ == '__main__': 94 | unittest.main() 95 | -------------------------------------------------------------------------------- /update_payload/histogram.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Histogram generation tools.""" 18 | 19 | from __future__ import absolute_import 20 | from __future__ import division 21 | 22 | from collections import defaultdict 23 | 24 | from update_payload import format_utils 25 | 26 | 27 | class Histogram(object): 28 | """A histogram generating object. 29 | 30 | This object serves the sole purpose of formatting (key, val) pairs as an 31 | ASCII histogram, including bars and percentage markers, and taking care of 32 | label alignment, scaling, etc. In addition to the standard __init__ 33 | interface, two static methods are provided for conveniently converting data 34 | in different formats into a histogram. Histogram generation is exported via 35 | its __str__ method, and looks as follows: 36 | 37 | Yes |################ | 5 (83.3%) 38 | No |### | 1 (16.6%) 39 | 40 | TODO(garnold) we may want to add actual methods for adding data or tweaking 41 | the output layout and formatting. For now, though, this is fine. 42 | 43 | """ 44 | 45 | def __init__(self, data, scale=20, formatter=None): 46 | """Initialize a histogram object. 47 | 48 | Args: 49 | data: list of (key, count) pairs constituting the histogram 50 | scale: number of characters used to indicate 100% 51 | formatter: function used for formatting raw histogram values 52 | 53 | """ 54 | self.data = data 55 | self.scale = scale 56 | self.formatter = formatter or str 57 | self.max_key_len = max([len(str(key)) for key, count in self.data]) 58 | self.total = sum([count for key, count in self.data]) 59 | 60 | @staticmethod 61 | def FromCountDict(count_dict, scale=20, formatter=None, key_names=None): 62 | """Takes a dictionary of counts and returns a histogram object. 63 | 64 | This simply converts a mapping from names to counts into a list of (key, 65 | count) pairs, optionally translating keys into name strings, then 66 | generating and returning a histogram for them. This is a useful convenience 67 | call for clients that update a dictionary of counters as they (say) scan a 68 | data stream. 69 | 70 | Args: 71 | count_dict: dictionary mapping keys to occurrence counts 72 | scale: number of characters used to indicate 100% 73 | formatter: function used for formatting raw histogram values 74 | key_names: dictionary mapping keys to name strings 75 | Returns: 76 | A histogram object based on the given data. 77 | 78 | """ 79 | namer = None 80 | if key_names: 81 | namer = lambda key: key_names[key] 82 | else: 83 | namer = lambda key: key 84 | 85 | hist = [(namer(key), count) for key, count in count_dict.items()] 86 | return Histogram(hist, scale, formatter) 87 | 88 | @staticmethod 89 | def FromKeyList(key_list, scale=20, formatter=None, key_names=None): 90 | """Takes a list of (possibly recurring) keys and returns a histogram object. 91 | 92 | This converts the list into a dictionary of counters, then uses 93 | FromCountDict() to generate the actual histogram. For example: 94 | 95 | ['a', 'a', 'b', 'a', 'b'] --> {'a': 3, 'b': 2} --> ... 96 | 97 | Args: 98 | key_list: list of (possibly recurring) keys 99 | scale: number of characters used to indicate 100% 100 | formatter: function used for formatting raw histogram values 101 | key_names: dictionary mapping keys to name strings 102 | Returns: 103 | A histogram object based on the given data. 104 | 105 | """ 106 | count_dict = defaultdict(int) # Unset items default to zero 107 | for key in key_list: 108 | count_dict[key] += 1 109 | return Histogram.FromCountDict(count_dict, scale, formatter, key_names) 110 | 111 | def __str__(self): 112 | hist_lines = [] 113 | hist_bar = '|' 114 | for key, count in self.data: 115 | if self.total: 116 | bar_len = count * self.scale // self.total 117 | hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale) 118 | 119 | line = '%s %s %s' % ( 120 | str(key).ljust(self.max_key_len), 121 | hist_bar, 122 | self.formatter(count)) 123 | percent_str = format_utils.NumToPercent(count, self.total) 124 | if percent_str: 125 | line += ' (%s)' % percent_str 126 | hist_lines.append(line) 127 | 128 | return '\n'.join(hist_lines) 129 | 130 | def GetKeys(self): 131 | """Returns the keys of the histogram.""" 132 | return [key for key, _ in self.data] 133 | -------------------------------------------------------------------------------- /update_payload/histogram_unittest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2013 The Android Open Source Project 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | """Unit tests for histogram.py.""" 19 | 20 | # Disable check for function names to avoid errors based on old code 21 | # pylint: disable-msg=invalid-name 22 | 23 | from __future__ import absolute_import 24 | 25 | import unittest 26 | 27 | from update_payload import format_utils 28 | from update_payload import histogram 29 | 30 | 31 | class HistogramTest(unittest.TestCase): 32 | """ Tests histogram""" 33 | 34 | @staticmethod 35 | def AddHumanReadableSize(size): 36 | fmt = format_utils.BytesToHumanReadable(size) 37 | return '%s (%s)' % (size, fmt) if fmt else str(size) 38 | 39 | def CompareToExpectedDefault(self, actual_str): 40 | expected_str = ( 41 | 'Yes |################ | 5 (83.3%)\n' 42 | 'No |### | 1 (16.6%)' 43 | ) 44 | self.assertEqual(actual_str, expected_str) 45 | 46 | def testExampleHistogram(self): 47 | self.CompareToExpectedDefault(str(histogram.Histogram( 48 | [('Yes', 5), ('No', 1)]))) 49 | 50 | def testFromCountDict(self): 51 | self.CompareToExpectedDefault(str(histogram.Histogram.FromCountDict( 52 | {'Yes': 5, 'No': 1}))) 53 | 54 | def testFromKeyList(self): 55 | self.CompareToExpectedDefault(str(histogram.Histogram.FromKeyList( 56 | ['Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes']))) 57 | 58 | def testCustomScale(self): 59 | expected_str = ( 60 | 'Yes |#### | 5 (83.3%)\n' 61 | 'No | | 1 (16.6%)' 62 | ) 63 | actual_str = str(histogram.Histogram([('Yes', 5), ('No', 1)], scale=5)) 64 | self.assertEqual(actual_str, expected_str) 65 | 66 | def testCustomFormatter(self): 67 | expected_str = ( 68 | 'Yes |################ | 5000 (4.8 KiB) (83.3%)\n' 69 | 'No |### | 1000 (16.6%)' 70 | ) 71 | actual_str = str(histogram.Histogram( 72 | [('Yes', 5000), ('No', 1000)], formatter=self.AddHumanReadableSize)) 73 | self.assertEqual(actual_str, expected_str) 74 | 75 | 76 | if __name__ == '__main__': 77 | unittest.main() 78 | -------------------------------------------------------------------------------- /update_payload/payload-test-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpQIBAAKCAQEAvtGHtqO21Uhy2wGz9fluIpIUR8G7dZoCZhZukGkm4mlfgL71 3 | xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3HCkCOurZLpi2L5Ver6qrxKFh6WBVZ 4 | 0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+RazfrsXhd4cy3dBMxouGwH7R7QQXTFCo 5 | Cc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP0bfPwH9cAXuMjHXiZatim0tF+ivp 6 | kM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c0mNmBNFaV54cHEUW2SlNIiRun7L0 7 | 1nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5cQIDAQABAoIBADmE2X7hbJxwAUcp 8 | BUExFdTP6dMTf9lcOjrhqiRXvgPjtYkOhvD+rsdWq/cf2zhiKibTdEEzUMr+BM3N 9 | r7eyntvlR+DaUIVgF1pjigvryVPbD837aZ5NftRv194PC5FInttq1Dsf0ZEz8p8X 10 | uS/xg1+ggG1SUK/yOSJkLpNZ5xelbclQJ9bnJST8PR8XbEieA83xt5M2DcooPzq0 11 | /99m/daA5hmSWs6n8sFrIZDQxDhLyyW4J72jjoNTE87eCpwK855yXMelpEPDZNQi 12 | nB3x5Y/bGbl81PInqL2q14lekrVYdYZ7bOBVlsmyvz6f1e4OOE1aaAM+w6ArA4az 13 | 6elZQE0CgYEA4GOU6BBu9jLqFdqV9jIkWsgz5ZWINz8PLJPtZzk5I9KO1m+GAUy2 14 | h/1IGGR6qRQR49hMtq4C0lUifxquq0xivzJ87U9oxKC9yEeTxkmDe5csVHsnAtqT 15 | xRgVM7Ysrut5NLU1zm0q3jBmkDu7d99LvscM/3n7eJ6RiYpnA54O6I8CgYEA2bNA 16 | 34PTvxBS2deRoxKQNlVU14FtirE+q0+k0wcE85wr7wIMpR13al8T1TpE8J1yvvZM 17 | 92HMGFGfYNDB46b8VfJ5AxEUFwdruec6sTVVfkMZMOqM/A08yiaLzQ1exDxNwaja 18 | fLuG5FAVRD/2g7fLBcsmosyNgcgNr1XA8Q/nvf8CgYEAwaSOg7py19rWcqehlMZu 19 | 4z00tCNYWzz7LmA2l0clzYlPJTU3MvXt6+ujhRFpXXJpgfRPN7Nx0ewQihoPtNqF 20 | uTSr5OwLoOyK+0Tx/UPByS2L3xgscWUJ8yQ2X9sOMqIZhmf/mDZTsU2ZpU03GlrE 21 | dk43JF4zq0NEm6qp/dAwU3cCgYEAvECl+KKmmLIk8vvWlI2Y52Mi2rixYR2kc7+L 22 | aHDJd1+1HhlHlgDFItbU765Trz5322phZArN0rnCeJYNFC9yRWBIBL7gAIoKPdgW 23 | iOb15xlez04EXHGV/7kVa1wEdu0u0CiTxwjivMwDl+E36u8kQP5LirwYIgI800H0 24 | doCqhUECgYEAjvA38OS7hy56Q4LQtmHFBuRIn4E5SrIGMwNIH6TGbEKQix3ajTCQ 25 | 0fSoLDGTkU6dH+T4v0WheveN2a2Kofqm0UQx5V2rfnY/Ut1fAAWgL/lsHLDnzPUZ 26 | bvTOANl8TbT49xAfNXTaGWe7F7nYz+bK0UDif1tJNDLQw7USD5I8lbQ= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /update_payload/payload-test-key.pub: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvtGHtqO21Uhy2wGz9flu 3 | IpIUR8G7dZoCZhZukGkm4mlfgL71xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3H 4 | CkCOurZLpi2L5Ver6qrxKFh6WBVZ0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+Razf 5 | rsXhd4cy3dBMxouGwH7R7QQXTFCoCc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP 6 | 0bfPwH9cAXuMjHXiZatim0tF+ivpkM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c 7 | 0mNmBNFaV54cHEUW2SlNIiRun7L01nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5 8 | cQIDAQAB 9 | -----END PUBLIC KEY----- 10 | -------------------------------------------------------------------------------- /update_payload/payload.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Tools for reading, verifying and applying Chrome OS update payloads.""" 18 | 19 | from __future__ import absolute_import 20 | from __future__ import print_function 21 | 22 | import hashlib 23 | import struct 24 | 25 | from update_payload import applier 26 | from update_payload import checker 27 | from update_payload import common 28 | from update_payload import update_metadata_pb2 29 | from update_payload.error import PayloadError 30 | 31 | 32 | # 33 | # Helper functions. 34 | # 35 | def _ReadInt(file_obj, size, is_unsigned, hasher=None): 36 | """Reads a binary-encoded integer from a file. 37 | 38 | It will do the correct conversion based on the reported size and whether or 39 | not a signed number is expected. Assumes a network (big-endian) byte 40 | ordering. 41 | 42 | Args: 43 | file_obj: a file object 44 | size: the integer size in bytes (2, 4 or 8) 45 | is_unsigned: whether it is signed or not 46 | hasher: an optional hasher to pass the value through 47 | 48 | Returns: 49 | An "unpacked" (Python) integer value. 50 | 51 | Raises: 52 | PayloadError if an read error occurred. 53 | """ 54 | return struct.unpack(common.IntPackingFmtStr(size, is_unsigned), 55 | common.Read(file_obj, size, hasher=hasher))[0] 56 | 57 | 58 | # 59 | # Update payload. 60 | # 61 | class Payload(object): 62 | """Chrome OS update payload processor.""" 63 | 64 | class _PayloadHeader(object): 65 | """Update payload header struct.""" 66 | 67 | # Header constants; sizes are in bytes. 68 | _MAGIC = b'CrAU' 69 | _VERSION_SIZE = 8 70 | _MANIFEST_LEN_SIZE = 8 71 | _METADATA_SIGNATURE_LEN_SIZE = 4 72 | 73 | def __init__(self): 74 | self.version = None 75 | self.manifest_len = None 76 | self.metadata_signature_len = None 77 | self.size = None 78 | 79 | def ReadFromPayload(self, payload_file, hasher=None): 80 | """Reads the payload header from a file. 81 | 82 | Reads the payload header from the |payload_file| and updates the |hasher| 83 | if one is passed. The parsed header is stored in the _PayloadHeader 84 | instance attributes. 85 | 86 | Args: 87 | payload_file: a file object 88 | hasher: an optional hasher to pass the value through 89 | 90 | Returns: 91 | None. 92 | 93 | Raises: 94 | PayloadError if a read error occurred or the header is invalid. 95 | """ 96 | # Verify magic 97 | magic = common.Read(payload_file, len(self._MAGIC), hasher=hasher) 98 | if magic != self._MAGIC: 99 | raise PayloadError('invalid payload magic: %s' % magic) 100 | 101 | self.version = _ReadInt(payload_file, self._VERSION_SIZE, True, 102 | hasher=hasher) 103 | self.manifest_len = _ReadInt(payload_file, self._MANIFEST_LEN_SIZE, True, 104 | hasher=hasher) 105 | self.size = (len(self._MAGIC) + self._VERSION_SIZE + 106 | self._MANIFEST_LEN_SIZE) 107 | self.metadata_signature_len = 0 108 | 109 | if self.version == common.BRILLO_MAJOR_PAYLOAD_VERSION: 110 | self.size += self._METADATA_SIGNATURE_LEN_SIZE 111 | self.metadata_signature_len = _ReadInt( 112 | payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True, 113 | hasher=hasher) 114 | 115 | def __init__(self, payload_file, payload_file_offset=0): 116 | """Initialize the payload object. 117 | 118 | Args: 119 | payload_file: update payload file object open for reading 120 | payload_file_offset: the offset of the actual payload 121 | """ 122 | self.payload_file = payload_file 123 | self.payload_file_offset = payload_file_offset 124 | self.manifest_hasher = None 125 | self.is_init = False 126 | self.header = None 127 | self.manifest = None 128 | self.data_offset = None 129 | self.metadata_signature = None 130 | self.metadata_size = None 131 | 132 | def _ReadHeader(self): 133 | """Reads and returns the payload header. 134 | 135 | Returns: 136 | A payload header object. 137 | 138 | Raises: 139 | PayloadError if a read error occurred. 140 | """ 141 | header = self._PayloadHeader() 142 | header.ReadFromPayload(self.payload_file, self.manifest_hasher) 143 | return header 144 | 145 | def _ReadManifest(self): 146 | """Reads and returns the payload manifest. 147 | 148 | Returns: 149 | A string containing the payload manifest in binary form. 150 | 151 | Raises: 152 | PayloadError if a read error occurred. 153 | """ 154 | if not self.header: 155 | raise PayloadError('payload header not present') 156 | 157 | return common.Read(self.payload_file, self.header.manifest_len, 158 | hasher=self.manifest_hasher) 159 | 160 | def _ReadMetadataSignature(self): 161 | """Reads and returns the metadata signatures. 162 | 163 | Returns: 164 | A string containing the metadata signatures protobuf in binary form or 165 | an empty string if no metadata signature found in the payload. 166 | 167 | Raises: 168 | PayloadError if a read error occurred. 169 | """ 170 | if not self.header: 171 | raise PayloadError('payload header not present') 172 | 173 | return common.Read( 174 | self.payload_file, self.header.metadata_signature_len, 175 | offset=self.payload_file_offset + self.header.size + 176 | self.header.manifest_len) 177 | 178 | def ReadDataBlob(self, offset, length): 179 | """Reads and returns a single data blob from the update payload. 180 | 181 | Args: 182 | offset: offset to the beginning of the blob from the end of the manifest 183 | length: the blob's length 184 | 185 | Returns: 186 | A string containing the raw blob data. 187 | 188 | Raises: 189 | PayloadError if a read error occurred. 190 | """ 191 | return common.Read(self.payload_file, length, 192 | offset=self.payload_file_offset + self.data_offset + 193 | offset) 194 | 195 | def Init(self): 196 | """Initializes the payload object. 197 | 198 | This is a prerequisite for any other public API call. 199 | 200 | Raises: 201 | PayloadError if object already initialized or fails to initialize 202 | correctly. 203 | """ 204 | if self.is_init: 205 | raise PayloadError('payload object already initialized') 206 | 207 | self.manifest_hasher = hashlib.sha256() 208 | 209 | # Read the file header. 210 | self.payload_file.seek(self.payload_file_offset) 211 | self.header = self._ReadHeader() 212 | 213 | # Read the manifest. 214 | manifest_raw = self._ReadManifest() 215 | self.manifest = update_metadata_pb2.DeltaArchiveManifest() 216 | self.manifest.ParseFromString(manifest_raw) 217 | 218 | # Read the metadata signature (if any). 219 | metadata_signature_raw = self._ReadMetadataSignature() 220 | if metadata_signature_raw: 221 | self.metadata_signature = update_metadata_pb2.Signatures() 222 | self.metadata_signature.ParseFromString(metadata_signature_raw) 223 | 224 | self.metadata_size = self.header.size + self.header.manifest_len 225 | self.data_offset = self.metadata_size + self.header.metadata_signature_len 226 | 227 | self.is_init = True 228 | 229 | def Describe(self): 230 | """Emits the payload embedded description data to standard output.""" 231 | def _DescribeImageInfo(description, image_info): 232 | """Display info about the image.""" 233 | def _DisplayIndentedValue(name, value): 234 | print(' {:<14} {}'.format(name+':', value)) 235 | 236 | print('%s:' % description) 237 | _DisplayIndentedValue('Channel', image_info.channel) 238 | _DisplayIndentedValue('Board', image_info.board) 239 | _DisplayIndentedValue('Version', image_info.version) 240 | _DisplayIndentedValue('Key', image_info.key) 241 | 242 | if image_info.build_channel != image_info.channel: 243 | _DisplayIndentedValue('Build channel', image_info.build_channel) 244 | 245 | if image_info.build_version != image_info.version: 246 | _DisplayIndentedValue('Build version', image_info.build_version) 247 | 248 | if self.manifest.HasField('old_image_info'): 249 | _DescribeImageInfo('Old Image', self.manifest.old_image_info) 250 | 251 | if self.manifest.HasField('new_image_info'): 252 | _DescribeImageInfo('New Image', self.manifest.new_image_info) 253 | 254 | def _AssertInit(self): 255 | """Raises an exception if the object was not initialized.""" 256 | if not self.is_init: 257 | raise PayloadError('payload object not initialized') 258 | 259 | def ResetFile(self): 260 | """Resets the offset of the payload file to right past the manifest.""" 261 | self.payload_file.seek(self.payload_file_offset + self.data_offset) 262 | 263 | def IsDelta(self): 264 | """Returns True iff the payload appears to be a delta.""" 265 | self._AssertInit() 266 | return (any(partition.HasField('old_partition_info') 267 | for partition in self.manifest.partitions)) 268 | 269 | def IsFull(self): 270 | """Returns True iff the payload appears to be a full.""" 271 | return not self.IsDelta() 272 | 273 | def Check(self, pubkey_file_name=None, metadata_sig_file=None, 274 | metadata_size=0, report_out_file=None, assert_type=None, 275 | block_size=0, part_sizes=None, allow_unhashed=False, 276 | disabled_tests=()): 277 | """Checks the payload integrity. 278 | 279 | Args: 280 | pubkey_file_name: public key used for signature verification 281 | metadata_sig_file: metadata signature, if verification is desired 282 | metadata_size: metadata size, if verification is desired 283 | report_out_file: file object to dump the report to 284 | assert_type: assert that payload is either 'full' or 'delta' 285 | block_size: expected filesystem / payload block size 286 | part_sizes: map of partition label to (physical) size in bytes 287 | allow_unhashed: allow unhashed operation blobs 288 | disabled_tests: list of tests to disable 289 | 290 | Raises: 291 | PayloadError if payload verification failed. 292 | """ 293 | self._AssertInit() 294 | 295 | # Create a short-lived payload checker object and run it. 296 | helper = checker.PayloadChecker( 297 | self, assert_type=assert_type, block_size=block_size, 298 | allow_unhashed=allow_unhashed, disabled_tests=disabled_tests) 299 | helper.Run(pubkey_file_name=pubkey_file_name, 300 | metadata_sig_file=metadata_sig_file, 301 | metadata_size=metadata_size, 302 | part_sizes=part_sizes, 303 | report_out_file=report_out_file) 304 | 305 | def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True, 306 | bspatch_path=None, puffpatch_path=None, 307 | truncate_to_expected_size=True): 308 | """Applies the update payload. 309 | 310 | Args: 311 | new_parts: map of partition name to dest partition file 312 | old_parts: map of partition name to partition file (optional) 313 | bsdiff_in_place: whether to perform BSDIFF operations in-place (optional) 314 | bspatch_path: path to the bspatch binary (optional) 315 | puffpatch_path: path to the puffpatch binary (optional) 316 | truncate_to_expected_size: whether to truncate the resulting partitions 317 | to their expected sizes, as specified in the 318 | payload (optional) 319 | 320 | Raises: 321 | PayloadError if payload application failed. 322 | """ 323 | self._AssertInit() 324 | 325 | # Create a short-lived payload applier object and run it. 326 | helper = applier.PayloadApplier( 327 | self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path, 328 | puffpatch_path=puffpatch_path, 329 | truncate_to_expected_size=truncate_to_expected_size) 330 | helper.Run(new_parts, old_parts=old_parts) 331 | -------------------------------------------------------------------------------- /update_payload/test_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2013 The Android Open Source Project 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | """Utilities for unit testing.""" 18 | 19 | from __future__ import absolute_import 20 | from __future__ import print_function 21 | 22 | import io 23 | import hashlib 24 | import os 25 | import struct 26 | import subprocess 27 | 28 | from update_payload import common 29 | from update_payload import payload 30 | from update_payload import update_metadata_pb2 31 | 32 | 33 | class TestError(Exception): 34 | """An error during testing of update payload code.""" 35 | 36 | 37 | # Private/public RSA keys used for testing. 38 | _PRIVKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), 39 | 'payload-test-key.pem') 40 | _PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), 41 | 'payload-test-key.pub') 42 | 43 | 44 | def KiB(count): 45 | return count << 10 46 | 47 | 48 | def MiB(count): 49 | return count << 20 50 | 51 | 52 | def GiB(count): 53 | return count << 30 54 | 55 | 56 | def _WriteInt(file_obj, size, is_unsigned, val): 57 | """Writes a binary-encoded integer to a file. 58 | 59 | It will do the correct conversion based on the reported size and whether or 60 | not a signed number is expected. Assumes a network (big-endian) byte 61 | ordering. 62 | 63 | Args: 64 | file_obj: a file object 65 | size: the integer size in bytes (2, 4 or 8) 66 | is_unsigned: whether it is signed or not 67 | val: integer value to encode 68 | 69 | Raises: 70 | PayloadError if a write error occurred. 71 | """ 72 | try: 73 | file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val)) 74 | except IOError as e: 75 | raise payload.PayloadError('error writing to file (%s): %s' % 76 | (file_obj.name, e)) 77 | 78 | 79 | def _SetMsgField(msg, field_name, val): 80 | """Sets or clears a field in a protobuf message.""" 81 | if val is None: 82 | msg.ClearField(field_name) 83 | else: 84 | setattr(msg, field_name, val) 85 | 86 | 87 | def SignSha256(data, privkey_file_name): 88 | """Signs the data's SHA256 hash with an RSA private key. 89 | 90 | Args: 91 | data: the data whose SHA256 hash we want to sign 92 | privkey_file_name: private key used for signing data 93 | 94 | Returns: 95 | The signature string, prepended with an ASN1 header. 96 | 97 | Raises: 98 | TestError if something goes wrong. 99 | """ 100 | data_sha256_hash = common.SIG_ASN1_HEADER + hashlib.sha256(data).digest() 101 | sign_cmd = ['openssl', 'rsautl', '-sign', '-inkey', privkey_file_name] 102 | try: 103 | sign_process = subprocess.Popen(sign_cmd, stdin=subprocess.PIPE, 104 | stdout=subprocess.PIPE) 105 | sig, _ = sign_process.communicate(input=data_sha256_hash) 106 | except Exception as e: 107 | raise TestError('signing subprocess failed: %s' % e) 108 | 109 | return sig 110 | 111 | 112 | class SignaturesGenerator(object): 113 | """Generates a payload signatures data block.""" 114 | 115 | def __init__(self): 116 | self.sigs = update_metadata_pb2.Signatures() 117 | 118 | def AddSig(self, version, data): 119 | """Adds a signature to the signature sequence. 120 | 121 | Args: 122 | version: signature version (None means do not assign) 123 | data: signature binary data (None means do not assign) 124 | """ 125 | sig = self.sigs.signatures.add() 126 | if version is not None: 127 | sig.version = version 128 | if data is not None: 129 | sig.data = data 130 | 131 | def ToBinary(self): 132 | """Returns the binary representation of the signature block.""" 133 | return self.sigs.SerializeToString() 134 | 135 | 136 | class PayloadGenerator(object): 137 | """Generates an update payload allowing low-level control. 138 | 139 | Attributes: 140 | manifest: the protobuf containing the payload manifest 141 | version: the payload version identifier 142 | block_size: the block size pertaining to update operations 143 | 144 | """ 145 | 146 | def __init__(self, version=1): 147 | self.manifest = update_metadata_pb2.DeltaArchiveManifest() 148 | self.version = version 149 | self.block_size = 0 150 | 151 | @staticmethod 152 | def _WriteExtent(ex, val): 153 | """Returns an Extent message.""" 154 | start_block, num_blocks = val 155 | _SetMsgField(ex, 'start_block', start_block) 156 | _SetMsgField(ex, 'num_blocks', num_blocks) 157 | 158 | @staticmethod 159 | def _AddValuesToRepeatedField(repeated_field, values, write_func): 160 | """Adds values to a repeated message field.""" 161 | if values: 162 | for val in values: 163 | new_item = repeated_field.add() 164 | write_func(new_item, val) 165 | 166 | @staticmethod 167 | def _AddExtents(extents_field, values): 168 | """Adds extents to an extents field.""" 169 | PayloadGenerator._AddValuesToRepeatedField( 170 | extents_field, values, PayloadGenerator._WriteExtent) 171 | 172 | def SetBlockSize(self, block_size): 173 | """Sets the payload's block size.""" 174 | self.block_size = block_size 175 | _SetMsgField(self.manifest, 'block_size', block_size) 176 | 177 | def SetPartInfo(self, part_name, is_new, part_size, part_hash): 178 | """Set the partition info entry. 179 | 180 | Args: 181 | part_name: The name of the partition. 182 | is_new: Whether to set old (False) or new (True) info. 183 | part_size: The partition size (in fact, filesystem size). 184 | part_hash: The partition hash. 185 | """ 186 | partition = next((x for x in self.manifest.partitions 187 | if x.partition_name == part_name), None) 188 | if partition is None: 189 | partition = self.manifest.partitions.add() 190 | partition.partition_name = part_name 191 | 192 | part_info = (partition.new_partition_info if is_new 193 | else partition.old_partition_info) 194 | _SetMsgField(part_info, 'size', part_size) 195 | _SetMsgField(part_info, 'hash', part_hash) 196 | 197 | def AddOperation(self, part_name, op_type, data_offset=None, 198 | data_length=None, src_extents=None, src_length=None, 199 | dst_extents=None, dst_length=None, data_sha256_hash=None): 200 | """Adds an InstallOperation entry.""" 201 | partition = next((x for x in self.manifest.partitions 202 | if x.partition_name == part_name), None) 203 | if partition is None: 204 | partition = self.manifest.partitions.add() 205 | partition.partition_name = part_name 206 | 207 | operations = partition.operations 208 | op = operations.add() 209 | op.type = op_type 210 | 211 | _SetMsgField(op, 'data_offset', data_offset) 212 | _SetMsgField(op, 'data_length', data_length) 213 | 214 | self._AddExtents(op.src_extents, src_extents) 215 | _SetMsgField(op, 'src_length', src_length) 216 | 217 | self._AddExtents(op.dst_extents, dst_extents) 218 | _SetMsgField(op, 'dst_length', dst_length) 219 | 220 | _SetMsgField(op, 'data_sha256_hash', data_sha256_hash) 221 | 222 | def SetSignatures(self, sigs_offset, sigs_size): 223 | """Set the payload's signature block descriptors.""" 224 | _SetMsgField(self.manifest, 'signatures_offset', sigs_offset) 225 | _SetMsgField(self.manifest, 'signatures_size', sigs_size) 226 | 227 | def SetMinorVersion(self, minor_version): 228 | """Set the payload's minor version field.""" 229 | _SetMsgField(self.manifest, 'minor_version', minor_version) 230 | 231 | def _WriteHeaderToFile(self, file_obj, manifest_len): 232 | """Writes a payload heaer to a file.""" 233 | # We need to access protected members in Payload for writing the header. 234 | # pylint: disable=W0212 235 | file_obj.write(payload.Payload._PayloadHeader._MAGIC) 236 | _WriteInt(file_obj, payload.Payload._PayloadHeader._VERSION_SIZE, True, 237 | self.version) 238 | _WriteInt(file_obj, payload.Payload._PayloadHeader._MANIFEST_LEN_SIZE, True, 239 | manifest_len) 240 | 241 | def WriteToFile(self, file_obj, manifest_len=-1, data_blobs=None, 242 | sigs_data=None, padding=None): 243 | """Writes the payload content to a file. 244 | 245 | Args: 246 | file_obj: a file object open for writing 247 | manifest_len: manifest len to dump (otherwise computed automatically) 248 | data_blobs: a list of data blobs to be concatenated to the payload 249 | sigs_data: a binary Signatures message to be concatenated to the payload 250 | padding: stuff to dump past the normal data blobs provided (optional) 251 | """ 252 | manifest = self.manifest.SerializeToString() 253 | if manifest_len < 0: 254 | manifest_len = len(manifest) 255 | self._WriteHeaderToFile(file_obj, manifest_len) 256 | file_obj.write(manifest) 257 | if data_blobs: 258 | for data_blob in data_blobs: 259 | file_obj.write(data_blob) 260 | if sigs_data: 261 | file_obj.write(sigs_data) 262 | if padding: 263 | file_obj.write(padding) 264 | 265 | 266 | class EnhancedPayloadGenerator(PayloadGenerator): 267 | """Payload generator with automatic handling of data blobs. 268 | 269 | Attributes: 270 | data_blobs: a list of blobs, in the order they were added 271 | curr_offset: the currently consumed offset of blobs added to the payload 272 | """ 273 | 274 | def __init__(self): 275 | super(EnhancedPayloadGenerator, self).__init__() 276 | self.data_blobs = [] 277 | self.curr_offset = 0 278 | 279 | def AddData(self, data_blob): 280 | """Adds a (possibly orphan) data blob.""" 281 | data_length = len(data_blob) 282 | data_offset = self.curr_offset 283 | self.curr_offset += data_length 284 | self.data_blobs.append(data_blob) 285 | return data_length, data_offset 286 | 287 | def AddOperationWithData(self, part_name, op_type, src_extents=None, 288 | src_length=None, dst_extents=None, dst_length=None, 289 | data_blob=None, do_hash_data_blob=True): 290 | """Adds an install operation and associated data blob. 291 | 292 | This takes care of obtaining a hash of the data blob (if so instructed) 293 | and appending it to the internally maintained list of blobs, including the 294 | necessary offset/length accounting. 295 | 296 | Args: 297 | part_name: The name of the partition (e.g. kernel or root). 298 | op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ. 299 | src_extents: list of (start, length) pairs indicating src block ranges 300 | src_length: size of the src data in bytes (needed for diff operations) 301 | dst_extents: list of (start, length) pairs indicating dst block ranges 302 | dst_length: size of the dst data in bytes (needed for diff operations) 303 | data_blob: a data blob associated with this operation 304 | do_hash_data_blob: whether or not to compute and add a data blob hash 305 | """ 306 | data_offset = data_length = data_sha256_hash = None 307 | if data_blob is not None: 308 | if do_hash_data_blob: 309 | data_sha256_hash = hashlib.sha256(data_blob).digest() 310 | data_length, data_offset = self.AddData(data_blob) 311 | 312 | self.AddOperation(part_name, op_type, data_offset=data_offset, 313 | data_length=data_length, src_extents=src_extents, 314 | src_length=src_length, dst_extents=dst_extents, 315 | dst_length=dst_length, data_sha256_hash=data_sha256_hash) 316 | 317 | def WriteToFileWithData(self, file_obj, sigs_data=None, 318 | privkey_file_name=None, padding=None): 319 | """Writes the payload content to a file, optionally signing the content. 320 | 321 | Args: 322 | file_obj: a file object open for writing 323 | sigs_data: signatures blob to be appended to the payload (optional; 324 | payload signature fields assumed to be preset by the caller) 325 | privkey_file_name: key used for signing the payload (optional; used only 326 | if explicit signatures blob not provided) 327 | padding: stuff to dump past the normal data blobs provided (optional) 328 | 329 | Raises: 330 | TestError: if arguments are inconsistent or something goes wrong. 331 | """ 332 | sigs_len = len(sigs_data) if sigs_data else 0 333 | 334 | # Do we need to generate a genuine signatures blob? 335 | do_generate_sigs_data = sigs_data is None and privkey_file_name 336 | 337 | if do_generate_sigs_data: 338 | # First, sign some arbitrary data to obtain the size of a signature blob. 339 | fake_sig = SignSha256(b'fake-payload-data', privkey_file_name) 340 | fake_sigs_gen = SignaturesGenerator() 341 | fake_sigs_gen.AddSig(1, fake_sig) 342 | sigs_len = len(fake_sigs_gen.ToBinary()) 343 | 344 | # Update the payload with proper signature attributes. 345 | self.SetSignatures(self.curr_offset, sigs_len) 346 | 347 | if do_generate_sigs_data: 348 | # Once all payload fields are updated, dump and sign it. 349 | temp_payload_file = io.BytesIO() 350 | self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs) 351 | sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name) 352 | sigs_gen = SignaturesGenerator() 353 | sigs_gen.AddSig(1, sig) 354 | sigs_data = sigs_gen.ToBinary() 355 | assert len(sigs_data) == sigs_len, 'signature blob lengths mismatch' 356 | 357 | # Dump the whole thing, complete with data and signature blob, to a file. 358 | self.WriteToFile(file_obj, data_blobs=self.data_blobs, sigs_data=sigs_data, 359 | padding=padding) 360 | -------------------------------------------------------------------------------- /update_payload/update-payload-key.pub.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1Bg9BnjWhX3jJyECeXqF 3 | O28nkYTF1NHWLlFHgzAGg+ysva22BL3S5LlsNejnYVg/xzx3izvAQyOF3I1TJVOy 4 | 2fH1DoZOWyKuckMyUrFQbO6OV1VIvPUPKckHadWcXSsHj2lBdDPH9xRDEBsXeztf 5 | nAGBD8GlAyTU7iH+Bf+xzyK9k4BmITf4Nx4xWhRZ6gm2Fc2SEP3x5N5fohkLv5ZP 6 | kFr0fj5wUK+0XF95rkGFBLIq2XACS3dmxMFToFl1HMM1HonUg9TAH+3dVH93zue1 7 | y81mkTuGnNX+zYya5ov2kD8zW1V10iTOSJfOlho5T8FpKbG37o3yYcUiyMHKO1Iv 8 | PQIDAQAB 9 | -----END PUBLIC KEY----- 10 | -------------------------------------------------------------------------------- /update_payload/update_metadata_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: update_metadata.proto 4 | 5 | from google.protobuf import descriptor as _descriptor 6 | from google.protobuf import message as _message 7 | from google.protobuf import reflection as _reflection 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor.FileDescriptor( 17 | name='update_metadata.proto', 18 | package='chromeos_update_engine', 19 | syntax='proto2', 20 | serialized_options=b'H\003', 21 | serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xdb\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x42\x02H\x03' 22 | ) 23 | 24 | 25 | 26 | _INSTALLOPERATION_TYPE = _descriptor.EnumDescriptor( 27 | name='Type', 28 | full_name='chromeos_update_engine.InstallOperation.Type', 29 | filename=None, 30 | file=DESCRIPTOR, 31 | values=[ 32 | _descriptor.EnumValueDescriptor( 33 | name='REPLACE', index=0, number=0, 34 | serialized_options=None, 35 | type=None), 36 | _descriptor.EnumValueDescriptor( 37 | name='REPLACE_BZ', index=1, number=1, 38 | serialized_options=None, 39 | type=None), 40 | _descriptor.EnumValueDescriptor( 41 | name='MOVE', index=2, number=2, 42 | serialized_options=None, 43 | type=None), 44 | _descriptor.EnumValueDescriptor( 45 | name='BSDIFF', index=3, number=3, 46 | serialized_options=None, 47 | type=None), 48 | _descriptor.EnumValueDescriptor( 49 | name='SOURCE_COPY', index=4, number=4, 50 | serialized_options=None, 51 | type=None), 52 | _descriptor.EnumValueDescriptor( 53 | name='SOURCE_BSDIFF', index=5, number=5, 54 | serialized_options=None, 55 | type=None), 56 | _descriptor.EnumValueDescriptor( 57 | name='REPLACE_XZ', index=6, number=8, 58 | serialized_options=None, 59 | type=None), 60 | _descriptor.EnumValueDescriptor( 61 | name='ZERO', index=7, number=6, 62 | serialized_options=None, 63 | type=None), 64 | _descriptor.EnumValueDescriptor( 65 | name='DISCARD', index=8, number=7, 66 | serialized_options=None, 67 | type=None), 68 | _descriptor.EnumValueDescriptor( 69 | name='BROTLI_BSDIFF', index=9, number=10, 70 | serialized_options=None, 71 | type=None), 72 | _descriptor.EnumValueDescriptor( 73 | name='PUFFDIFF', index=10, number=9, 74 | serialized_options=None, 75 | type=None), 76 | ], 77 | containing_type=None, 78 | serialized_options=None, 79 | serialized_start=712, 80 | serialized_end=877, 81 | ) 82 | _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) 83 | 84 | 85 | _EXTENT = _descriptor.Descriptor( 86 | name='Extent', 87 | full_name='chromeos_update_engine.Extent', 88 | filename=None, 89 | file=DESCRIPTOR, 90 | containing_type=None, 91 | fields=[ 92 | _descriptor.FieldDescriptor( 93 | name='start_block', full_name='chromeos_update_engine.Extent.start_block', index=0, 94 | number=1, type=4, cpp_type=4, label=1, 95 | has_default_value=False, default_value=0, 96 | message_type=None, enum_type=None, containing_type=None, 97 | is_extension=False, extension_scope=None, 98 | serialized_options=None, file=DESCRIPTOR), 99 | _descriptor.FieldDescriptor( 100 | name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1, 101 | number=2, type=4, cpp_type=4, label=1, 102 | has_default_value=False, default_value=0, 103 | message_type=None, enum_type=None, containing_type=None, 104 | is_extension=False, extension_scope=None, 105 | serialized_options=None, file=DESCRIPTOR), 106 | ], 107 | extensions=[ 108 | ], 109 | nested_types=[], 110 | enum_types=[ 111 | ], 112 | serialized_options=None, 113 | is_extendable=False, 114 | syntax='proto2', 115 | extension_ranges=[], 116 | oneofs=[ 117 | ], 118 | serialized_start=49, 119 | serialized_end=98, 120 | ) 121 | 122 | 123 | _SIGNATURES_SIGNATURE = _descriptor.Descriptor( 124 | name='Signature', 125 | full_name='chromeos_update_engine.Signatures.Signature', 126 | filename=None, 127 | file=DESCRIPTOR, 128 | containing_type=None, 129 | fields=[ 130 | _descriptor.FieldDescriptor( 131 | name='version', full_name='chromeos_update_engine.Signatures.Signature.version', index=0, 132 | number=1, type=13, cpp_type=3, label=1, 133 | has_default_value=False, default_value=0, 134 | message_type=None, enum_type=None, containing_type=None, 135 | is_extension=False, extension_scope=None, 136 | serialized_options=None, file=DESCRIPTOR), 137 | _descriptor.FieldDescriptor( 138 | name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1, 139 | number=2, type=12, cpp_type=9, label=1, 140 | has_default_value=False, default_value=b"", 141 | message_type=None, enum_type=None, containing_type=None, 142 | is_extension=False, extension_scope=None, 143 | serialized_options=None, file=DESCRIPTOR), 144 | ], 145 | extensions=[ 146 | ], 147 | nested_types=[], 148 | enum_types=[ 149 | ], 150 | serialized_options=None, 151 | is_extendable=False, 152 | syntax='proto2', 153 | extension_ranges=[], 154 | oneofs=[ 155 | ], 156 | serialized_start=180, 157 | serialized_end=222, 158 | ) 159 | 160 | _SIGNATURES = _descriptor.Descriptor( 161 | name='Signatures', 162 | full_name='chromeos_update_engine.Signatures', 163 | filename=None, 164 | file=DESCRIPTOR, 165 | containing_type=None, 166 | fields=[ 167 | _descriptor.FieldDescriptor( 168 | name='signatures', full_name='chromeos_update_engine.Signatures.signatures', index=0, 169 | number=1, type=11, cpp_type=10, label=3, 170 | has_default_value=False, default_value=[], 171 | message_type=None, enum_type=None, containing_type=None, 172 | is_extension=False, extension_scope=None, 173 | serialized_options=None, file=DESCRIPTOR), 174 | ], 175 | extensions=[ 176 | ], 177 | nested_types=[_SIGNATURES_SIGNATURE, ], 178 | enum_types=[ 179 | ], 180 | serialized_options=None, 181 | is_extendable=False, 182 | syntax='proto2', 183 | extension_ranges=[], 184 | oneofs=[ 185 | ], 186 | serialized_start=100, 187 | serialized_end=222, 188 | ) 189 | 190 | 191 | _PARTITIONINFO = _descriptor.Descriptor( 192 | name='PartitionInfo', 193 | full_name='chromeos_update_engine.PartitionInfo', 194 | filename=None, 195 | file=DESCRIPTOR, 196 | containing_type=None, 197 | fields=[ 198 | _descriptor.FieldDescriptor( 199 | name='size', full_name='chromeos_update_engine.PartitionInfo.size', index=0, 200 | number=1, type=4, cpp_type=4, label=1, 201 | has_default_value=False, default_value=0, 202 | message_type=None, enum_type=None, containing_type=None, 203 | is_extension=False, extension_scope=None, 204 | serialized_options=None, file=DESCRIPTOR), 205 | _descriptor.FieldDescriptor( 206 | name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1, 207 | number=2, type=12, cpp_type=9, label=1, 208 | has_default_value=False, default_value=b"", 209 | message_type=None, enum_type=None, containing_type=None, 210 | is_extension=False, extension_scope=None, 211 | serialized_options=None, file=DESCRIPTOR), 212 | ], 213 | extensions=[ 214 | ], 215 | nested_types=[], 216 | enum_types=[ 217 | ], 218 | serialized_options=None, 219 | is_extendable=False, 220 | syntax='proto2', 221 | extension_ranges=[], 222 | oneofs=[ 223 | ], 224 | serialized_start=224, 225 | serialized_end=267, 226 | ) 227 | 228 | 229 | _IMAGEINFO = _descriptor.Descriptor( 230 | name='ImageInfo', 231 | full_name='chromeos_update_engine.ImageInfo', 232 | filename=None, 233 | file=DESCRIPTOR, 234 | containing_type=None, 235 | fields=[ 236 | _descriptor.FieldDescriptor( 237 | name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0, 238 | number=1, type=9, cpp_type=9, label=1, 239 | has_default_value=False, default_value=b"".decode('utf-8'), 240 | message_type=None, enum_type=None, containing_type=None, 241 | is_extension=False, extension_scope=None, 242 | serialized_options=None, file=DESCRIPTOR), 243 | _descriptor.FieldDescriptor( 244 | name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1, 245 | number=2, type=9, cpp_type=9, label=1, 246 | has_default_value=False, default_value=b"".decode('utf-8'), 247 | message_type=None, enum_type=None, containing_type=None, 248 | is_extension=False, extension_scope=None, 249 | serialized_options=None, file=DESCRIPTOR), 250 | _descriptor.FieldDescriptor( 251 | name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2, 252 | number=3, type=9, cpp_type=9, label=1, 253 | has_default_value=False, default_value=b"".decode('utf-8'), 254 | message_type=None, enum_type=None, containing_type=None, 255 | is_extension=False, extension_scope=None, 256 | serialized_options=None, file=DESCRIPTOR), 257 | _descriptor.FieldDescriptor( 258 | name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3, 259 | number=4, type=9, cpp_type=9, label=1, 260 | has_default_value=False, default_value=b"".decode('utf-8'), 261 | message_type=None, enum_type=None, containing_type=None, 262 | is_extension=False, extension_scope=None, 263 | serialized_options=None, file=DESCRIPTOR), 264 | _descriptor.FieldDescriptor( 265 | name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4, 266 | number=5, type=9, cpp_type=9, label=1, 267 | has_default_value=False, default_value=b"".decode('utf-8'), 268 | message_type=None, enum_type=None, containing_type=None, 269 | is_extension=False, extension_scope=None, 270 | serialized_options=None, file=DESCRIPTOR), 271 | _descriptor.FieldDescriptor( 272 | name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5, 273 | number=6, type=9, cpp_type=9, label=1, 274 | has_default_value=False, default_value=b"".decode('utf-8'), 275 | message_type=None, enum_type=None, containing_type=None, 276 | is_extension=False, extension_scope=None, 277 | serialized_options=None, file=DESCRIPTOR), 278 | ], 279 | extensions=[ 280 | ], 281 | nested_types=[], 282 | enum_types=[ 283 | ], 284 | serialized_options=None, 285 | is_extendable=False, 286 | syntax='proto2', 287 | extension_ranges=[], 288 | oneofs=[ 289 | ], 290 | serialized_start=269, 291 | serialized_end=388, 292 | ) 293 | 294 | 295 | _INSTALLOPERATION = _descriptor.Descriptor( 296 | name='InstallOperation', 297 | full_name='chromeos_update_engine.InstallOperation', 298 | filename=None, 299 | file=DESCRIPTOR, 300 | containing_type=None, 301 | fields=[ 302 | _descriptor.FieldDescriptor( 303 | name='type', full_name='chromeos_update_engine.InstallOperation.type', index=0, 304 | number=1, type=14, cpp_type=8, label=2, 305 | has_default_value=False, default_value=0, 306 | message_type=None, enum_type=None, containing_type=None, 307 | is_extension=False, extension_scope=None, 308 | serialized_options=None, file=DESCRIPTOR), 309 | _descriptor.FieldDescriptor( 310 | name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1, 311 | number=2, type=13, cpp_type=3, label=1, 312 | has_default_value=False, default_value=0, 313 | message_type=None, enum_type=None, containing_type=None, 314 | is_extension=False, extension_scope=None, 315 | serialized_options=None, file=DESCRIPTOR), 316 | _descriptor.FieldDescriptor( 317 | name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2, 318 | number=3, type=13, cpp_type=3, label=1, 319 | has_default_value=False, default_value=0, 320 | message_type=None, enum_type=None, containing_type=None, 321 | is_extension=False, extension_scope=None, 322 | serialized_options=None, file=DESCRIPTOR), 323 | _descriptor.FieldDescriptor( 324 | name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3, 325 | number=4, type=11, cpp_type=10, label=3, 326 | has_default_value=False, default_value=[], 327 | message_type=None, enum_type=None, containing_type=None, 328 | is_extension=False, extension_scope=None, 329 | serialized_options=None, file=DESCRIPTOR), 330 | _descriptor.FieldDescriptor( 331 | name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4, 332 | number=5, type=4, cpp_type=4, label=1, 333 | has_default_value=False, default_value=0, 334 | message_type=None, enum_type=None, containing_type=None, 335 | is_extension=False, extension_scope=None, 336 | serialized_options=None, file=DESCRIPTOR), 337 | _descriptor.FieldDescriptor( 338 | name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5, 339 | number=6, type=11, cpp_type=10, label=3, 340 | has_default_value=False, default_value=[], 341 | message_type=None, enum_type=None, containing_type=None, 342 | is_extension=False, extension_scope=None, 343 | serialized_options=None, file=DESCRIPTOR), 344 | _descriptor.FieldDescriptor( 345 | name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6, 346 | number=7, type=4, cpp_type=4, label=1, 347 | has_default_value=False, default_value=0, 348 | message_type=None, enum_type=None, containing_type=None, 349 | is_extension=False, extension_scope=None, 350 | serialized_options=None, file=DESCRIPTOR), 351 | _descriptor.FieldDescriptor( 352 | name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7, 353 | number=8, type=12, cpp_type=9, label=1, 354 | has_default_value=False, default_value=b"", 355 | message_type=None, enum_type=None, containing_type=None, 356 | is_extension=False, extension_scope=None, 357 | serialized_options=None, file=DESCRIPTOR), 358 | _descriptor.FieldDescriptor( 359 | name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8, 360 | number=9, type=12, cpp_type=9, label=1, 361 | has_default_value=False, default_value=b"", 362 | message_type=None, enum_type=None, containing_type=None, 363 | is_extension=False, extension_scope=None, 364 | serialized_options=None, file=DESCRIPTOR), 365 | ], 366 | extensions=[ 367 | ], 368 | nested_types=[], 369 | enum_types=[ 370 | _INSTALLOPERATION_TYPE, 371 | ], 372 | serialized_options=None, 373 | is_extendable=False, 374 | syntax='proto2', 375 | extension_ranges=[], 376 | oneofs=[ 377 | ], 378 | serialized_start=391, 379 | serialized_end=877, 380 | ) 381 | 382 | 383 | _PARTITIONUPDATE = _descriptor.Descriptor( 384 | name='PartitionUpdate', 385 | full_name='chromeos_update_engine.PartitionUpdate', 386 | filename=None, 387 | file=DESCRIPTOR, 388 | containing_type=None, 389 | fields=[ 390 | _descriptor.FieldDescriptor( 391 | name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0, 392 | number=1, type=9, cpp_type=9, label=2, 393 | has_default_value=False, default_value=b"".decode('utf-8'), 394 | message_type=None, enum_type=None, containing_type=None, 395 | is_extension=False, extension_scope=None, 396 | serialized_options=None, file=DESCRIPTOR), 397 | _descriptor.FieldDescriptor( 398 | name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1, 399 | number=2, type=8, cpp_type=7, label=1, 400 | has_default_value=False, default_value=False, 401 | message_type=None, enum_type=None, containing_type=None, 402 | is_extension=False, extension_scope=None, 403 | serialized_options=None, file=DESCRIPTOR), 404 | _descriptor.FieldDescriptor( 405 | name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2, 406 | number=3, type=9, cpp_type=9, label=1, 407 | has_default_value=False, default_value=b"".decode('utf-8'), 408 | message_type=None, enum_type=None, containing_type=None, 409 | is_extension=False, extension_scope=None, 410 | serialized_options=None, file=DESCRIPTOR), 411 | _descriptor.FieldDescriptor( 412 | name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3, 413 | number=4, type=9, cpp_type=9, label=1, 414 | has_default_value=False, default_value=b"".decode('utf-8'), 415 | message_type=None, enum_type=None, containing_type=None, 416 | is_extension=False, extension_scope=None, 417 | serialized_options=None, file=DESCRIPTOR), 418 | _descriptor.FieldDescriptor( 419 | name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4, 420 | number=5, type=11, cpp_type=10, label=3, 421 | has_default_value=False, default_value=[], 422 | message_type=None, enum_type=None, containing_type=None, 423 | is_extension=False, extension_scope=None, 424 | serialized_options=None, file=DESCRIPTOR), 425 | _descriptor.FieldDescriptor( 426 | name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5, 427 | number=6, type=11, cpp_type=10, label=1, 428 | has_default_value=False, default_value=None, 429 | message_type=None, enum_type=None, containing_type=None, 430 | is_extension=False, extension_scope=None, 431 | serialized_options=None, file=DESCRIPTOR), 432 | _descriptor.FieldDescriptor( 433 | name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6, 434 | number=7, type=11, cpp_type=10, label=1, 435 | has_default_value=False, default_value=None, 436 | message_type=None, enum_type=None, containing_type=None, 437 | is_extension=False, extension_scope=None, 438 | serialized_options=None, file=DESCRIPTOR), 439 | _descriptor.FieldDescriptor( 440 | name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7, 441 | number=8, type=11, cpp_type=10, label=3, 442 | has_default_value=False, default_value=[], 443 | message_type=None, enum_type=None, containing_type=None, 444 | is_extension=False, extension_scope=None, 445 | serialized_options=None, file=DESCRIPTOR), 446 | _descriptor.FieldDescriptor( 447 | name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8, 448 | number=9, type=8, cpp_type=7, label=1, 449 | has_default_value=False, default_value=False, 450 | message_type=None, enum_type=None, containing_type=None, 451 | is_extension=False, extension_scope=None, 452 | serialized_options=None, file=DESCRIPTOR), 453 | _descriptor.FieldDescriptor( 454 | name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9, 455 | number=10, type=11, cpp_type=10, label=1, 456 | has_default_value=False, default_value=None, 457 | message_type=None, enum_type=None, containing_type=None, 458 | is_extension=False, extension_scope=None, 459 | serialized_options=None, file=DESCRIPTOR), 460 | _descriptor.FieldDescriptor( 461 | name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10, 462 | number=11, type=11, cpp_type=10, label=1, 463 | has_default_value=False, default_value=None, 464 | message_type=None, enum_type=None, containing_type=None, 465 | is_extension=False, extension_scope=None, 466 | serialized_options=None, file=DESCRIPTOR), 467 | _descriptor.FieldDescriptor( 468 | name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11, 469 | number=12, type=9, cpp_type=9, label=1, 470 | has_default_value=False, default_value=b"".decode('utf-8'), 471 | message_type=None, enum_type=None, containing_type=None, 472 | is_extension=False, extension_scope=None, 473 | serialized_options=None, file=DESCRIPTOR), 474 | _descriptor.FieldDescriptor( 475 | name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12, 476 | number=13, type=12, cpp_type=9, label=1, 477 | has_default_value=False, default_value=b"", 478 | message_type=None, enum_type=None, containing_type=None, 479 | is_extension=False, extension_scope=None, 480 | serialized_options=None, file=DESCRIPTOR), 481 | _descriptor.FieldDescriptor( 482 | name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13, 483 | number=14, type=11, cpp_type=10, label=1, 484 | has_default_value=False, default_value=None, 485 | message_type=None, enum_type=None, containing_type=None, 486 | is_extension=False, extension_scope=None, 487 | serialized_options=None, file=DESCRIPTOR), 488 | _descriptor.FieldDescriptor( 489 | name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14, 490 | number=15, type=11, cpp_type=10, label=1, 491 | has_default_value=False, default_value=None, 492 | message_type=None, enum_type=None, containing_type=None, 493 | is_extension=False, extension_scope=None, 494 | serialized_options=None, file=DESCRIPTOR), 495 | _descriptor.FieldDescriptor( 496 | name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15, 497 | number=16, type=13, cpp_type=3, label=1, 498 | has_default_value=True, default_value=2, 499 | message_type=None, enum_type=None, containing_type=None, 500 | is_extension=False, extension_scope=None, 501 | serialized_options=None, file=DESCRIPTOR), 502 | ], 503 | extensions=[ 504 | ], 505 | nested_types=[], 506 | enum_types=[ 507 | ], 508 | serialized_options=None, 509 | is_extendable=False, 510 | syntax='proto2', 511 | extension_ranges=[], 512 | oneofs=[ 513 | ], 514 | serialized_start=880, 515 | serialized_end=1607, 516 | ) 517 | 518 | 519 | _DYNAMICPARTITIONGROUP = _descriptor.Descriptor( 520 | name='DynamicPartitionGroup', 521 | full_name='chromeos_update_engine.DynamicPartitionGroup', 522 | filename=None, 523 | file=DESCRIPTOR, 524 | containing_type=None, 525 | fields=[ 526 | _descriptor.FieldDescriptor( 527 | name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0, 528 | number=1, type=9, cpp_type=9, label=2, 529 | has_default_value=False, default_value=b"".decode('utf-8'), 530 | message_type=None, enum_type=None, containing_type=None, 531 | is_extension=False, extension_scope=None, 532 | serialized_options=None, file=DESCRIPTOR), 533 | _descriptor.FieldDescriptor( 534 | name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1, 535 | number=2, type=4, cpp_type=4, label=1, 536 | has_default_value=False, default_value=0, 537 | message_type=None, enum_type=None, containing_type=None, 538 | is_extension=False, extension_scope=None, 539 | serialized_options=None, file=DESCRIPTOR), 540 | _descriptor.FieldDescriptor( 541 | name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2, 542 | number=3, type=9, cpp_type=9, label=3, 543 | has_default_value=False, default_value=[], 544 | message_type=None, enum_type=None, containing_type=None, 545 | is_extension=False, extension_scope=None, 546 | serialized_options=None, file=DESCRIPTOR), 547 | ], 548 | extensions=[ 549 | ], 550 | nested_types=[], 551 | enum_types=[ 552 | ], 553 | serialized_options=None, 554 | is_extendable=False, 555 | syntax='proto2', 556 | extension_ranges=[], 557 | oneofs=[ 558 | ], 559 | serialized_start=1609, 560 | serialized_end=1685, 561 | ) 562 | 563 | 564 | _DYNAMICPARTITIONMETADATA = _descriptor.Descriptor( 565 | name='DynamicPartitionMetadata', 566 | full_name='chromeos_update_engine.DynamicPartitionMetadata', 567 | filename=None, 568 | file=DESCRIPTOR, 569 | containing_type=None, 570 | fields=[ 571 | _descriptor.FieldDescriptor( 572 | name='groups', full_name='chromeos_update_engine.DynamicPartitionMetadata.groups', index=0, 573 | number=1, type=11, cpp_type=10, label=3, 574 | has_default_value=False, default_value=[], 575 | message_type=None, enum_type=None, containing_type=None, 576 | is_extension=False, extension_scope=None, 577 | serialized_options=None, file=DESCRIPTOR), 578 | ], 579 | extensions=[ 580 | ], 581 | nested_types=[], 582 | enum_types=[ 583 | ], 584 | serialized_options=None, 585 | is_extendable=False, 586 | syntax='proto2', 587 | extension_ranges=[], 588 | oneofs=[ 589 | ], 590 | serialized_start=1687, 591 | serialized_end=1776, 592 | ) 593 | 594 | 595 | _DELTAARCHIVEMANIFEST = _descriptor.Descriptor( 596 | name='DeltaArchiveManifest', 597 | full_name='chromeos_update_engine.DeltaArchiveManifest', 598 | filename=None, 599 | file=DESCRIPTOR, 600 | containing_type=None, 601 | fields=[ 602 | _descriptor.FieldDescriptor( 603 | name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0, 604 | number=1, type=11, cpp_type=10, label=3, 605 | has_default_value=False, default_value=[], 606 | message_type=None, enum_type=None, containing_type=None, 607 | is_extension=False, extension_scope=None, 608 | serialized_options=None, file=DESCRIPTOR), 609 | _descriptor.FieldDescriptor( 610 | name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, 611 | number=2, type=11, cpp_type=10, label=3, 612 | has_default_value=False, default_value=[], 613 | message_type=None, enum_type=None, containing_type=None, 614 | is_extension=False, extension_scope=None, 615 | serialized_options=None, file=DESCRIPTOR), 616 | _descriptor.FieldDescriptor( 617 | name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, 618 | number=3, type=13, cpp_type=3, label=1, 619 | has_default_value=True, default_value=4096, 620 | message_type=None, enum_type=None, containing_type=None, 621 | is_extension=False, extension_scope=None, 622 | serialized_options=None, file=DESCRIPTOR), 623 | _descriptor.FieldDescriptor( 624 | name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3, 625 | number=4, type=4, cpp_type=4, label=1, 626 | has_default_value=False, default_value=0, 627 | message_type=None, enum_type=None, containing_type=None, 628 | is_extension=False, extension_scope=None, 629 | serialized_options=None, file=DESCRIPTOR), 630 | _descriptor.FieldDescriptor( 631 | name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4, 632 | number=5, type=4, cpp_type=4, label=1, 633 | has_default_value=False, default_value=0, 634 | message_type=None, enum_type=None, containing_type=None, 635 | is_extension=False, extension_scope=None, 636 | serialized_options=None, file=DESCRIPTOR), 637 | _descriptor.FieldDescriptor( 638 | name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5, 639 | number=6, type=11, cpp_type=10, label=1, 640 | has_default_value=False, default_value=None, 641 | message_type=None, enum_type=None, containing_type=None, 642 | is_extension=False, extension_scope=None, 643 | serialized_options=None, file=DESCRIPTOR), 644 | _descriptor.FieldDescriptor( 645 | name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, 646 | number=7, type=11, cpp_type=10, label=1, 647 | has_default_value=False, default_value=None, 648 | message_type=None, enum_type=None, containing_type=None, 649 | is_extension=False, extension_scope=None, 650 | serialized_options=None, file=DESCRIPTOR), 651 | _descriptor.FieldDescriptor( 652 | name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, 653 | number=8, type=11, cpp_type=10, label=1, 654 | has_default_value=False, default_value=None, 655 | message_type=None, enum_type=None, containing_type=None, 656 | is_extension=False, extension_scope=None, 657 | serialized_options=None, file=DESCRIPTOR), 658 | _descriptor.FieldDescriptor( 659 | name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, 660 | number=9, type=11, cpp_type=10, label=1, 661 | has_default_value=False, default_value=None, 662 | message_type=None, enum_type=None, containing_type=None, 663 | is_extension=False, extension_scope=None, 664 | serialized_options=None, file=DESCRIPTOR), 665 | _descriptor.FieldDescriptor( 666 | name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, 667 | number=10, type=11, cpp_type=10, label=1, 668 | has_default_value=False, default_value=None, 669 | message_type=None, enum_type=None, containing_type=None, 670 | is_extension=False, extension_scope=None, 671 | serialized_options=None, file=DESCRIPTOR), 672 | _descriptor.FieldDescriptor( 673 | name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10, 674 | number=11, type=11, cpp_type=10, label=1, 675 | has_default_value=False, default_value=None, 676 | message_type=None, enum_type=None, containing_type=None, 677 | is_extension=False, extension_scope=None, 678 | serialized_options=None, file=DESCRIPTOR), 679 | _descriptor.FieldDescriptor( 680 | name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11, 681 | number=12, type=13, cpp_type=3, label=1, 682 | has_default_value=True, default_value=0, 683 | message_type=None, enum_type=None, containing_type=None, 684 | is_extension=False, extension_scope=None, 685 | serialized_options=None, file=DESCRIPTOR), 686 | _descriptor.FieldDescriptor( 687 | name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12, 688 | number=13, type=11, cpp_type=10, label=3, 689 | has_default_value=False, default_value=[], 690 | message_type=None, enum_type=None, containing_type=None, 691 | is_extension=False, extension_scope=None, 692 | serialized_options=None, file=DESCRIPTOR), 693 | _descriptor.FieldDescriptor( 694 | name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13, 695 | number=14, type=3, cpp_type=2, label=1, 696 | has_default_value=False, default_value=0, 697 | message_type=None, enum_type=None, containing_type=None, 698 | is_extension=False, extension_scope=None, 699 | serialized_options=None, file=DESCRIPTOR), 700 | ], 701 | extensions=[ 702 | ], 703 | nested_types=[], 704 | enum_types=[ 705 | ], 706 | serialized_options=None, 707 | is_extendable=False, 708 | syntax='proto2', 709 | extension_ranges=[], 710 | oneofs=[ 711 | ], 712 | serialized_start=1779, 713 | serialized_end=2510, 714 | ) 715 | 716 | _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES 717 | _SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE 718 | _INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE 719 | _INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT 720 | _INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT 721 | _INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION 722 | _PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE 723 | _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO 724 | _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO 725 | _PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION 726 | _PARTITIONUPDATE.fields_by_name['hash_tree_data_extent'].message_type = _EXTENT 727 | _PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT 728 | _PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT 729 | _PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT 730 | _DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP 731 | _DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION 732 | _DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION 733 | _DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO 734 | _DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO 735 | _DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO 736 | _DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO 737 | _DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO 738 | _DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO 739 | _DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE 740 | DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT 741 | DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES 742 | DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO 743 | DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO 744 | DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION 745 | DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE 746 | DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP 747 | DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA 748 | DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST 749 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 750 | 751 | Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), { 752 | 'DESCRIPTOR' : _EXTENT, 753 | '__module__' : 'update_metadata_pb2' 754 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent) 755 | }) 756 | _sym_db.RegisterMessage(Extent) 757 | 758 | Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), { 759 | 760 | 'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), { 761 | 'DESCRIPTOR' : _SIGNATURES_SIGNATURE, 762 | '__module__' : 'update_metadata_pb2' 763 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature) 764 | }) 765 | , 766 | 'DESCRIPTOR' : _SIGNATURES, 767 | '__module__' : 'update_metadata_pb2' 768 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures) 769 | }) 770 | _sym_db.RegisterMessage(Signatures) 771 | _sym_db.RegisterMessage(Signatures.Signature) 772 | 773 | PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), { 774 | 'DESCRIPTOR' : _PARTITIONINFO, 775 | '__module__' : 'update_metadata_pb2' 776 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo) 777 | }) 778 | _sym_db.RegisterMessage(PartitionInfo) 779 | 780 | ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), { 781 | 'DESCRIPTOR' : _IMAGEINFO, 782 | '__module__' : 'update_metadata_pb2' 783 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo) 784 | }) 785 | _sym_db.RegisterMessage(ImageInfo) 786 | 787 | InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), { 788 | 'DESCRIPTOR' : _INSTALLOPERATION, 789 | '__module__' : 'update_metadata_pb2' 790 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation) 791 | }) 792 | _sym_db.RegisterMessage(InstallOperation) 793 | 794 | PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), { 795 | 'DESCRIPTOR' : _PARTITIONUPDATE, 796 | '__module__' : 'update_metadata_pb2' 797 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate) 798 | }) 799 | _sym_db.RegisterMessage(PartitionUpdate) 800 | 801 | DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), { 802 | 'DESCRIPTOR' : _DYNAMICPARTITIONGROUP, 803 | '__module__' : 'update_metadata_pb2' 804 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup) 805 | }) 806 | _sym_db.RegisterMessage(DynamicPartitionGroup) 807 | 808 | DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), { 809 | 'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA, 810 | '__module__' : 'update_metadata_pb2' 811 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata) 812 | }) 813 | _sym_db.RegisterMessage(DynamicPartitionMetadata) 814 | 815 | DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), { 816 | 'DESCRIPTOR' : _DELTAARCHIVEMANIFEST, 817 | '__module__' : 'update_metadata_pb2' 818 | # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest) 819 | }) 820 | _sym_db.RegisterMessage(DeltaArchiveManifest) 821 | 822 | 823 | DESCRIPTOR._options = None 824 | # @@protoc_insertion_point(module_scope) 825 | --------------------------------------------------------------------------------