1#!/usr/bin/env python 2 3# Copyright (C) 2017 The Android Open Source Project 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16 17""" 18Validate a given (signed) target_files.zip. 19 20It performs the following checks to assert the integrity of the input zip. 21 22 - It verifies the file consistency between the ones in IMAGES/system.img (read 23 via IMAGES/system.map) and the ones under unpacked folder of SYSTEM/. The 24 same check also applies to the vendor image if present. 25 26 - It verifies the install-recovery script consistency, by comparing the 27 checksums in the script against the ones of IMAGES/{boot,recovery}.img. 28 29 - It verifies the signed Verified Boot related images, for both of Verified 30 Boot 1.0 and 2.0 (aka AVB). 31""" 32 33import argparse 34import filecmp 35import logging 36import os.path 37import re 38import zipfile 39 40from hashlib import sha1 41from common import IsSparseImage 42 43import common 44import rangelib 45 46 47def _ReadFile(file_name, unpacked_name, round_up=False): 48 """Constructs and returns a File object. Rounds up its size if needed.""" 49 assert os.path.exists(unpacked_name) 50 with open(unpacked_name, 'rb') as f: 51 file_data = f.read() 52 file_size = len(file_data) 53 if round_up: 54 file_size_rounded_up = common.RoundUpTo4K(file_size) 55 file_data += b'\0' * (file_size_rounded_up - file_size) 56 return common.File(file_name, file_data) 57 58 59def ValidateFileAgainstSha1(input_tmp, file_name, file_path, expected_sha1): 60 """Check if the file has the expected SHA-1.""" 61 62 logging.info('Validating the SHA-1 of %s', file_name) 63 unpacked_name = os.path.join(input_tmp, file_path) 64 assert os.path.exists(unpacked_name) 65 actual_sha1 = _ReadFile(file_name, unpacked_name, False).sha1 66 assert actual_sha1 == expected_sha1, \ 67 'SHA-1 mismatches for {}. actual {}, expected {}'.format( 68 file_name, actual_sha1, expected_sha1) 69 70 71def ValidateFileConsistency(input_zip, input_tmp, info_dict): 72 """Compare the files from image files and unpacked folders.""" 73 74 def CheckAllFiles(which): 75 logging.info('Checking %s image.', which) 76 path = os.path.join(input_tmp, "IMAGES", which + ".img") 77 if not IsSparseImage(path): 78 logging.info("%s is non-sparse image", which) 79 image = common.GetNonSparseImage(which, input_tmp) 80 else: 81 logging.info("%s is sparse image", which) 82 # Allow having shared blocks when loading the sparse image, because allowing 83 # that doesn't affect the checks below (we will have all the blocks on file, 84 # unless it's skipped due to the holes). 85 image = common.GetSparseImage(which, input_tmp, input_zip, True) 86 prefix = '/' + which 87 for entry in image.file_map: 88 # Skip entries like '__NONZERO-0'. 89 if not entry.startswith(prefix): 90 continue 91 92 # Read the blocks that the file resides. Note that it will contain the 93 # bytes past the file length, which is expected to be padded with '\0's. 94 ranges = image.file_map[entry] 95 96 # Use the original RangeSet if applicable, which includes the shared 97 # blocks. And this needs to happen before checking the monotonicity flag. 98 if ranges.extra.get('uses_shared_blocks'): 99 file_ranges = ranges.extra['uses_shared_blocks'] 100 else: 101 file_ranges = ranges 102 103 incomplete = file_ranges.extra.get('incomplete', False) 104 if incomplete: 105 logging.warning('Skipping %s that has incomplete block list', entry) 106 continue 107 108 # If the file has non-monotonic ranges, read each range in order. 109 if not file_ranges.monotonic: 110 h = sha1() 111 for file_range in file_ranges.extra['text_str'].split(' '): 112 for data in image.ReadRangeSet(rangelib.RangeSet(file_range)): 113 h.update(data) 114 blocks_sha1 = h.hexdigest() 115 else: 116 blocks_sha1 = image.RangeSha1(file_ranges) 117 118 # The filename under unpacked directory, such as SYSTEM/bin/sh. 119 unpacked_name = os.path.join( 120 input_tmp, which.upper(), entry[(len(prefix) + 1):]) 121 unpacked_file = _ReadFile(entry, unpacked_name, True) 122 file_sha1 = unpacked_file.sha1 123 assert blocks_sha1 == file_sha1, \ 124 'file: %s, range: %s, blocks_sha1: %s, file_sha1: %s' % ( 125 entry, file_ranges, blocks_sha1, file_sha1) 126 127 logging.info('Validating file consistency.') 128 129 # TODO(b/79617342): Validate non-sparse images. 130 if info_dict.get('extfs_sparse_flag') != '-s': 131 logging.warning('Skipped due to target using non-sparse images') 132 return 133 134 # Verify IMAGES/system.img if applicable. 135 # Some targets, e.g., gki_arm64, gki_x86_64, etc., are system.img-less. 136 if 'IMAGES/system.img' in input_zip.namelist(): 137 CheckAllFiles('system') 138 139 # Verify IMAGES/vendor.img if applicable. 140 if 'VENDOR/' in input_zip.namelist(): 141 CheckAllFiles('vendor') 142 143 # Not checking IMAGES/system_other.img since it doesn't have the map file. 144 145 146def ValidateInstallRecoveryScript(input_tmp, info_dict): 147 """Validate the SHA-1 embedded in install-recovery.sh. 148 149 install-recovery.sh is written in common.py and has the following format: 150 151 1. full recovery: 152 ... 153 if ! applypatch --check type:device:size:sha1; then 154 applypatch --flash /vendor/etc/recovery.img \\ 155 type:device:size:sha1 && \\ 156 ... 157 158 2. recovery from boot: 159 ... 160 if ! applypatch --check type:recovery_device:recovery_size:recovery_sha1; then 161 applypatch [--bonus bonus_args] \\ 162 --patch /vendor/recovery-from-boot.p \\ 163 --source type:boot_device:boot_size:boot_sha1 \\ 164 --target type:recovery_device:recovery_size:recovery_sha1 && \\ 165 ... 166 167 For full recovery, we want to calculate the SHA-1 of /vendor/etc/recovery.img 168 and compare it against the one embedded in the script. While for recovery 169 from boot, we want to check the SHA-1 for both recovery.img and boot.img 170 under IMAGES/. 171 """ 172 173 board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true" 174 175 if board_uses_vendorimage: 176 script_path = 'VENDOR/bin/install-recovery.sh' 177 recovery_img = 'VENDOR/etc/recovery.img' 178 else: 179 script_path = 'SYSTEM/vendor/bin/install-recovery.sh' 180 recovery_img = 'SYSTEM/vendor/etc/recovery.img' 181 182 if not os.path.exists(os.path.join(input_tmp, script_path)): 183 logging.info('%s does not exist in input_tmp', script_path) 184 return 185 186 logging.info('Checking %s', script_path) 187 with open(os.path.join(input_tmp, script_path), 'r') as script: 188 lines = script.read().strip().split('\n') 189 assert len(lines) >= 10 190 check_cmd = re.search(r'if ! applypatch --check (\w+:.+:\w+:\w+);', 191 lines[1].strip()) 192 check_partition = check_cmd.group(1) 193 assert len(check_partition.split(':')) == 4 194 195 full_recovery_image = info_dict.get("full_recovery_image") == "true" 196 if full_recovery_image: 197 assert len(lines) == 10, "Invalid line count: {}".format(lines) 198 199 # Expect something like "EMMC:/dev/block/recovery:28:5f9c..62e3". 200 target = re.search(r'--target (.+) &&', lines[4].strip()) 201 assert target is not None, \ 202 "Failed to parse target line \"{}\"".format(lines[4]) 203 flash_partition = target.group(1) 204 205 # Check we have the same recovery target in the check and flash commands. 206 assert check_partition == flash_partition, \ 207 "Mismatching targets: {} vs {}".format( 208 check_partition, flash_partition) 209 210 # Validate the SHA-1 of the recovery image. 211 recovery_sha1 = flash_partition.split(':')[3] 212 ValidateFileAgainstSha1( 213 input_tmp, 'recovery.img', recovery_img, recovery_sha1) 214 else: 215 assert len(lines) == 11, "Invalid line count: {}".format(lines) 216 217 # --source boot_type:boot_device:boot_size:boot_sha1 218 source = re.search(r'--source (\w+:.+:\w+:\w+) \\', lines[4].strip()) 219 assert source is not None, \ 220 "Failed to parse source line \"{}\"".format(lines[4]) 221 222 source_partition = source.group(1) 223 source_info = source_partition.split(':') 224 assert len(source_info) == 4, \ 225 "Invalid source partition: {}".format(source_partition) 226 ValidateFileAgainstSha1(input_tmp, file_name='boot.img', 227 file_path='IMAGES/boot.img', 228 expected_sha1=source_info[3]) 229 230 # --target recovery_type:recovery_device:recovery_size:recovery_sha1 231 target = re.search(r'--target (\w+:.+:\w+:\w+) && \\', lines[5].strip()) 232 assert target is not None, \ 233 "Failed to parse target line \"{}\"".format(lines[5]) 234 target_partition = target.group(1) 235 236 # Check we have the same recovery target in the check and patch commands. 237 assert check_partition == target_partition, \ 238 "Mismatching targets: {} vs {}".format( 239 check_partition, target_partition) 240 241 recovery_info = target_partition.split(':') 242 assert len(recovery_info) == 4, \ 243 "Invalid target partition: {}".format(target_partition) 244 ValidateFileAgainstSha1(input_tmp, file_name='recovery.img', 245 file_path='IMAGES/recovery.img', 246 expected_sha1=recovery_info[3]) 247 248 logging.info('Done checking %s', script_path) 249 250 251# Symlink files in `src` to `dst`, if the files do not 252# already exists in `dst` directory. 253def symlinkIfNotExists(src, dst): 254 if not os.path.isdir(src): 255 return 256 for filename in os.listdir(src): 257 if os.path.exists(os.path.join(dst, filename)): 258 continue 259 os.symlink(os.path.join(src, filename), os.path.join(dst, filename)) 260 261 262def ValidatePartitionFingerprints(input_tmp, info_dict): 263 build_info = common.BuildInfo(info_dict) 264 # Expected format: 265 # Prop: com.android.build.vendor.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys' 266 # Prop: com.android.build.vendor_boot.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys' 267 p = re.compile( 268 r"Prop: com.android.build.(?P<partition>\w+).fingerprint -> '(?P<fingerprint>[\w\/:\.-]+)'") 269 for vbmeta_partition in ["vbmeta", "vbmeta_system"]: 270 image = os.path.join(input_tmp, "IMAGES", vbmeta_partition + ".img") 271 if not os.path.exists(image): 272 assert vbmeta_partition != "vbmeta",\ 273 "{} is a required partition for AVB.".format( 274 vbmeta_partition) 275 logging.info("vb partition %s not present, skipping", vbmeta_partition) 276 continue 277 278 output = common.RunAndCheckOutput( 279 [info_dict["avb_avbtool"], "info_image", "--image", image]) 280 matches = p.findall(output) 281 for (partition, fingerprint) in matches: 282 actual_fingerprint = build_info.GetPartitionFingerprint( 283 partition) 284 if actual_fingerprint is None: 285 logging.warning( 286 "Failed to get fingerprint for partition %s", partition) 287 continue 288 assert fingerprint == actual_fingerprint, "Fingerprint mismatch for partition {}, expected: {} actual: {}".format( 289 partition, fingerprint, actual_fingerprint) 290 291 292def ValidateVerifiedBootImages(input_tmp, info_dict, options): 293 """Validates the Verified Boot related images. 294 295 For Verified Boot 1.0, it verifies the signatures of the bootable images 296 (boot/recovery etc), as well as the dm-verity metadata in system images 297 (system/vendor/product). For Verified Boot 2.0, it calls avbtool to verify 298 vbmeta.img, which in turn verifies all the descriptors listed in vbmeta. 299 300 Args: 301 input_tmp: The top-level directory of unpacked target-files.zip. 302 info_dict: The loaded info dict. 303 options: A dict that contains the user-supplied public keys to be used for 304 image verification. In particular, 'verity_key' is used to verify the 305 bootable images in VB 1.0, and the vbmeta image in VB 2.0, where 306 applicable. 'verity_key_mincrypt' will be used to verify the system 307 images in VB 1.0. 308 309 Raises: 310 AssertionError: On any verification failure. 311 """ 312 # See bug 159299583 313 # After commit 5277d1015, some images (e.g. acpio.img and tos.img) are no 314 # longer copied from RADIO to the IMAGES folder. But avbtool assumes that 315 # images are in IMAGES folder. So we symlink them. 316 symlinkIfNotExists(os.path.join(input_tmp, "RADIO"), 317 os.path.join(input_tmp, "IMAGES")) 318 # Verified boot 1.0 (images signed with boot_signer and verity_signer). 319 if info_dict.get('boot_signer') == 'true': 320 logging.info('Verifying Verified Boot images...') 321 322 # Verify the boot/recovery images (signed with boot_signer), against the 323 # given X.509 encoded pubkey (or falling back to the one in the info_dict if 324 # none given). 325 verity_key = options['verity_key'] 326 if verity_key is None: 327 verity_key = info_dict['verity_key'] + '.x509.pem' 328 for image in ('boot.img', 'recovery.img', 'recovery-two-step.img'): 329 if image == 'recovery-two-step.img': 330 image_path = os.path.join(input_tmp, 'OTA', image) 331 else: 332 image_path = os.path.join(input_tmp, 'IMAGES', image) 333 if not os.path.exists(image_path): 334 continue 335 336 cmd = ['boot_signer', '-verify', image_path, '-certificate', verity_key] 337 proc = common.Run(cmd) 338 stdoutdata, _ = proc.communicate() 339 assert proc.returncode == 0, \ 340 'Failed to verify {} with boot_signer:\n{}'.format(image, stdoutdata) 341 logging.info( 342 'Verified %s with boot_signer (key: %s):\n%s', image, verity_key, 343 stdoutdata.rstrip()) 344 345 # Verify verity signed system images in Verified Boot 1.0. Note that not using 346 # 'elif' here, since 'boot_signer' and 'verity' are not bundled in VB 1.0. 347 if info_dict.get('verity') == 'true': 348 # First verify that the verity key is built into the root image (regardless 349 # of system-as-root). 350 verity_key_mincrypt = os.path.join(input_tmp, 'ROOT', 'verity_key') 351 assert os.path.exists(verity_key_mincrypt), 'Missing verity_key' 352 353 # Verify /verity_key matches the one given via command line, if any. 354 if options['verity_key_mincrypt'] is None: 355 logging.warn( 356 'Skipped checking the content of /verity_key, as the key file not ' 357 'provided. Use --verity_key_mincrypt to specify.') 358 else: 359 expected_key = options['verity_key_mincrypt'] 360 assert filecmp.cmp(expected_key, verity_key_mincrypt, shallow=False), \ 361 "Mismatching mincrypt verity key files" 362 logging.info('Verified the content of /verity_key') 363 364 # For devices with a separate ramdisk (i.e. non-system-as-root), there must 365 # be a copy in ramdisk. 366 if info_dict.get("system_root_image") != "true": 367 verity_key_ramdisk = os.path.join( 368 input_tmp, 'BOOT', 'RAMDISK', 'verity_key') 369 assert os.path.exists( 370 verity_key_ramdisk), 'Missing verity_key in ramdisk' 371 372 assert filecmp.cmp( 373 verity_key_mincrypt, verity_key_ramdisk, shallow=False), \ 374 'Mismatching verity_key files in root and ramdisk' 375 logging.info('Verified the content of /verity_key in ramdisk') 376 377 # Then verify the verity signed system/vendor/product images, against the 378 # verity pubkey in mincrypt format. 379 for image in ('system.img', 'vendor.img', 'product.img'): 380 image_path = os.path.join(input_tmp, 'IMAGES', image) 381 382 # We are not checking if the image is actually enabled via info_dict (e.g. 383 # 'system_verity_block_device=...'). Because it's most likely a bug that 384 # skips signing some of the images in signed target-files.zip, while 385 # having the top-level verity flag enabled. 386 if not os.path.exists(image_path): 387 continue 388 389 cmd = ['verity_verifier', image_path, '-mincrypt', verity_key_mincrypt] 390 proc = common.Run(cmd) 391 stdoutdata, _ = proc.communicate() 392 assert proc.returncode == 0, \ 393 'Failed to verify {} with verity_verifier (key: {}):\n{}'.format( 394 image, verity_key_mincrypt, stdoutdata) 395 logging.info( 396 'Verified %s with verity_verifier (key: %s):\n%s', image, 397 verity_key_mincrypt, stdoutdata.rstrip()) 398 399 # Handle the case of Verified Boot 2.0 (AVB). 400 if info_dict.get("avb_building_vbmeta_image") == "true": 401 logging.info('Verifying Verified Boot 2.0 (AVB) images...') 402 403 key = options['verity_key'] 404 if key is None: 405 key = info_dict['avb_vbmeta_key_path'] 406 407 ValidatePartitionFingerprints(input_tmp, info_dict) 408 409 # avbtool verifies all the images that have descriptors listed in vbmeta. 410 # Using `--follow_chain_partitions` so it would additionally verify chained 411 # vbmeta partitions (e.g. vbmeta_system). 412 image = os.path.join(input_tmp, 'IMAGES', 'vbmeta.img') 413 cmd = [info_dict['avb_avbtool'], 'verify_image', '--image', image, 414 '--follow_chain_partitions'] 415 416 # Custom images. 417 custom_partitions = info_dict.get( 418 "avb_custom_images_partition_list", "").strip().split() 419 420 # Append the args for chained partitions if any. 421 for partition in (common.AVB_PARTITIONS + common.AVB_VBMETA_PARTITIONS + 422 tuple(custom_partitions)): 423 key_name = 'avb_' + partition + '_key_path' 424 if info_dict.get(key_name) is not None: 425 if info_dict.get('ab_update') != 'true' and partition == 'recovery': 426 continue 427 428 # Use the key file from command line if specified; otherwise fall back 429 # to the one in info dict. 430 key_file = options.get(key_name, info_dict[key_name]) 431 chained_partition_arg = common.GetAvbChainedPartitionArg( 432 partition, info_dict, key_file) 433 cmd.extend(['--expected_chain_partition', chained_partition_arg]) 434 435 # Handle the boot image with a non-default name, e.g. boot-5.4.img 436 boot_images = info_dict.get("boot_images") 437 if boot_images: 438 # we used the 1st boot image to generate the vbmeta. Rename the filename 439 # to boot.img so that avbtool can find it correctly. 440 first_image_name = boot_images.split()[0] 441 first_image_path = os.path.join(input_tmp, 'IMAGES', first_image_name) 442 assert os.path.isfile(first_image_path) 443 renamed_boot_image_path = os.path.join(input_tmp, 'IMAGES', 'boot.img') 444 os.rename(first_image_path, renamed_boot_image_path) 445 446 proc = common.Run(cmd) 447 stdoutdata, _ = proc.communicate() 448 assert proc.returncode == 0, \ 449 'Failed to verify {} with avbtool (key: {}):\n{}'.format( 450 image, key, stdoutdata) 451 452 logging.info( 453 'Verified %s with avbtool (key: %s):\n%s', image, key, 454 stdoutdata.rstrip()) 455 456 # avbtool verifies recovery image for non-A/B devices. 457 if (info_dict.get('ab_update') != 'true' and 458 info_dict.get('no_recovery') != 'true'): 459 image = os.path.join(input_tmp, 'IMAGES', 'recovery.img') 460 key = info_dict['avb_recovery_key_path'] 461 cmd = [info_dict['avb_avbtool'], 'verify_image', '--image', image, 462 '--key', key] 463 proc = common.Run(cmd) 464 stdoutdata, _ = proc.communicate() 465 assert proc.returncode == 0, \ 466 'Failed to verify {} with avbtool (key: {}):\n{}'.format( 467 image, key, stdoutdata) 468 logging.info( 469 'Verified %s with avbtool (key: %s):\n%s', image, key, 470 stdoutdata.rstrip()) 471 472 473def CheckDataInconsistency(lines): 474 build_prop = {} 475 for line in lines: 476 if line.startswith("import") or line.startswith("#"): 477 continue 478 if "=" not in line: 479 continue 480 481 key, value = line.rstrip().split("=", 1) 482 if key in build_prop: 483 logging.info("Duplicated key found for {}".format(key)) 484 if value != build_prop[key]: 485 logging.error("Key {} is defined twice with different values {} vs {}" 486 .format(key, value, build_prop[key])) 487 return key 488 build_prop[key] = value 489 490 491def CheckBuildPropDuplicity(input_tmp): 492 """Check all buld.prop files inside directory input_tmp, raise error 493 if they contain duplicates""" 494 495 if not os.path.isdir(input_tmp): 496 raise ValueError("Expect {} to be a directory".format(input_tmp)) 497 for name in os.listdir(input_tmp): 498 if not name.isupper(): 499 continue 500 for prop_file in ['build.prop', 'etc/build.prop']: 501 path = os.path.join(input_tmp, name, prop_file) 502 if not os.path.exists(path): 503 continue 504 logging.info("Checking {}".format(path)) 505 with open(path, 'r') as fp: 506 dupKey = CheckDataInconsistency(fp.readlines()) 507 if dupKey: 508 raise ValueError("{} contains duplicate keys for {}".format( 509 path, dupKey)) 510 511 512def main(): 513 parser = argparse.ArgumentParser( 514 description=__doc__, 515 formatter_class=argparse.RawDescriptionHelpFormatter) 516 parser.add_argument( 517 'target_files', 518 help='the input target_files.zip to be validated') 519 parser.add_argument( 520 '--verity_key', 521 help='the verity public key to verify the bootable images (Verified ' 522 'Boot 1.0), or the vbmeta image (Verified Boot 2.0, aka AVB), where ' 523 'applicable') 524 for partition in common.AVB_PARTITIONS + common.AVB_VBMETA_PARTITIONS: 525 parser.add_argument( 526 '--avb_' + partition + '_key_path', 527 help='the public or private key in PEM format to verify AVB chained ' 528 'partition of {}'.format(partition)) 529 parser.add_argument( 530 '--verity_key_mincrypt', 531 help='the verity public key in mincrypt format to verify the system ' 532 'images, if target using Verified Boot 1.0') 533 args = parser.parse_args() 534 535 # Unprovided args will have 'None' as the value. 536 options = vars(args) 537 538 logging_format = '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s' 539 date_format = '%Y/%m/%d %H:%M:%S' 540 logging.basicConfig(level=logging.INFO, format=logging_format, 541 datefmt=date_format) 542 543 logging.info("Unzipping the input target_files.zip: %s", args.target_files) 544 input_tmp = common.UnzipTemp(args.target_files) 545 546 info_dict = common.LoadInfoDict(input_tmp) 547 with zipfile.ZipFile(args.target_files, 'r', allowZip64=True) as input_zip: 548 ValidateFileConsistency(input_zip, input_tmp, info_dict) 549 550 CheckBuildPropDuplicity(input_tmp) 551 552 ValidateInstallRecoveryScript(input_tmp, info_dict) 553 554 ValidateVerifiedBootImages(input_tmp, info_dict, options) 555 556 # TODO: Check if the OTA keys have been properly updated (the ones on /system, 557 # in recovery image). 558 559 logging.info("Done.") 560 561 562if __name__ == '__main__': 563 try: 564 main() 565 finally: 566 common.Cleanup() 567