1# Copyright (C) 2008 The Android Open Source Project 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14 15import copy 16import errno 17import getopt 18import getpass 19import imp 20import os 21import platform 22import re 23import shlex 24import shutil 25import subprocess 26import sys 27import tempfile 28import threading 29import time 30import zipfile 31 32import blockimgdiff 33import rangelib 34 35from hashlib import sha1 as sha1 36 37 38class Options(object): 39 def __init__(self): 40 platform_search_path = { 41 "linux2": "out/host/linux-x86", 42 "darwin": "out/host/darwin-x86", 43 } 44 45 self.search_path = platform_search_path.get(sys.platform, None) 46 self.signapk_path = "framework/signapk.jar" # Relative to search_path 47 self.extra_signapk_args = [] 48 self.java_path = "java" # Use the one on the path by default. 49 self.java_args = "-Xmx2048m" # JVM Args 50 self.public_key_suffix = ".x509.pem" 51 self.private_key_suffix = ".pk8" 52 # use otatools built boot_signer by default 53 self.boot_signer_path = "boot_signer" 54 self.boot_signer_args = [] 55 self.verity_signer_path = None 56 self.verity_signer_args = [] 57 self.verbose = False 58 self.tempfiles = [] 59 self.device_specific = None 60 self.extras = {} 61 self.info_dict = None 62 self.source_info_dict = None 63 self.target_info_dict = None 64 self.worker_threads = None 65 66 67OPTIONS = Options() 68 69 70# Values for "certificate" in apkcerts that mean special things. 71SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") 72 73 74class ExternalError(RuntimeError): 75 pass 76 77 78def Run(args, **kwargs): 79 """Create and return a subprocess.Popen object, printing the command 80 line on the terminal if -v was specified.""" 81 if OPTIONS.verbose: 82 print " running: ", " ".join(args) 83 return subprocess.Popen(args, **kwargs) 84 85 86def CloseInheritedPipes(): 87 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds 88 before doing other work.""" 89 if platform.system() != "Darwin": 90 return 91 for d in range(3, 1025): 92 try: 93 stat = os.fstat(d) 94 if stat is not None: 95 pipebit = stat[0] & 0x1000 96 if pipebit != 0: 97 os.close(d) 98 except OSError: 99 pass 100 101 102def LoadInfoDict(input_file): 103 """Read and parse the META/misc_info.txt key/value pairs from the 104 input target files and return a dict.""" 105 106 def read_helper(fn): 107 if isinstance(input_file, zipfile.ZipFile): 108 return input_file.read(fn) 109 else: 110 path = os.path.join(input_file, *fn.split("/")) 111 try: 112 with open(path) as f: 113 return f.read() 114 except IOError as e: 115 if e.errno == errno.ENOENT: 116 raise KeyError(fn) 117 d = {} 118 try: 119 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n")) 120 except KeyError: 121 # ok if misc_info.txt doesn't exist 122 pass 123 124 # backwards compatibility: These values used to be in their own 125 # files. Look for them, in case we're processing an old 126 # target_files zip. 127 128 if "mkyaffs2_extra_flags" not in d: 129 try: 130 d["mkyaffs2_extra_flags"] = read_helper( 131 "META/mkyaffs2-extra-flags.txt").strip() 132 except KeyError: 133 # ok if flags don't exist 134 pass 135 136 if "recovery_api_version" not in d: 137 try: 138 d["recovery_api_version"] = read_helper( 139 "META/recovery-api-version.txt").strip() 140 except KeyError: 141 raise ValueError("can't find recovery API version in input target-files") 142 143 if "tool_extensions" not in d: 144 try: 145 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip() 146 except KeyError: 147 # ok if extensions don't exist 148 pass 149 150 if "fstab_version" not in d: 151 d["fstab_version"] = "1" 152 153 try: 154 data = read_helper("META/imagesizes.txt") 155 for line in data.split("\n"): 156 if not line: 157 continue 158 name, value = line.split(" ", 1) 159 if not value: 160 continue 161 if name == "blocksize": 162 d[name] = value 163 else: 164 d[name + "_size"] = value 165 except KeyError: 166 pass 167 168 def makeint(key): 169 if key in d: 170 d[key] = int(d[key], 0) 171 172 makeint("recovery_api_version") 173 makeint("blocksize") 174 makeint("system_size") 175 makeint("vendor_size") 176 makeint("userdata_size") 177 makeint("cache_size") 178 makeint("recovery_size") 179 makeint("boot_size") 180 makeint("fstab_version") 181 182 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"]) 183 d["build.prop"] = LoadBuildProp(read_helper) 184 return d 185 186def LoadBuildProp(read_helper): 187 try: 188 data = read_helper("SYSTEM/build.prop") 189 except KeyError: 190 print "Warning: could not find SYSTEM/build.prop in %s" % zip 191 data = "" 192 return LoadDictionaryFromLines(data.split("\n")) 193 194def LoadDictionaryFromLines(lines): 195 d = {} 196 for line in lines: 197 line = line.strip() 198 if not line or line.startswith("#"): 199 continue 200 if "=" in line: 201 name, value = line.split("=", 1) 202 d[name] = value 203 return d 204 205def LoadRecoveryFSTab(read_helper, fstab_version): 206 class Partition(object): 207 def __init__(self, mount_point, fs_type, device, length, device2, context): 208 self.mount_point = mount_point 209 self.fs_type = fs_type 210 self.device = device 211 self.length = length 212 self.device2 = device2 213 self.context = context 214 215 try: 216 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab") 217 except KeyError: 218 print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab" 219 data = "" 220 221 if fstab_version == 1: 222 d = {} 223 for line in data.split("\n"): 224 line = line.strip() 225 if not line or line.startswith("#"): 226 continue 227 pieces = line.split() 228 if not 3 <= len(pieces) <= 4: 229 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) 230 options = None 231 if len(pieces) >= 4: 232 if pieces[3].startswith("/"): 233 device2 = pieces[3] 234 if len(pieces) >= 5: 235 options = pieces[4] 236 else: 237 device2 = None 238 options = pieces[3] 239 else: 240 device2 = None 241 242 mount_point = pieces[0] 243 length = 0 244 if options: 245 options = options.split(",") 246 for i in options: 247 if i.startswith("length="): 248 length = int(i[7:]) 249 else: 250 print "%s: unknown option \"%s\"" % (mount_point, i) 251 252 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1], 253 device=pieces[2], length=length, 254 device2=device2) 255 256 elif fstab_version == 2: 257 d = {} 258 for line in data.split("\n"): 259 line = line.strip() 260 if not line or line.startswith("#"): 261 continue 262 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags> 263 pieces = line.split() 264 if len(pieces) != 5: 265 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) 266 267 # Ignore entries that are managed by vold 268 options = pieces[4] 269 if "voldmanaged=" in options: 270 continue 271 272 # It's a good line, parse it 273 length = 0 274 options = options.split(",") 275 for i in options: 276 if i.startswith("length="): 277 length = int(i[7:]) 278 else: 279 # Ignore all unknown options in the unified fstab 280 continue 281 282 mount_flags = pieces[3] 283 # Honor the SELinux context if present. 284 context = None 285 for i in mount_flags.split(","): 286 if i.startswith("context="): 287 context = i 288 289 mount_point = pieces[1] 290 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2], 291 device=pieces[0], length=length, 292 device2=None, context=context) 293 294 else: 295 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,)) 296 297 return d 298 299 300def DumpInfoDict(d): 301 for k, v in sorted(d.items()): 302 print "%-25s = (%s) %s" % (k, type(v).__name__, v) 303 304 305def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): 306 """Take a kernel, cmdline, and ramdisk directory from the input (in 307 'sourcedir'), and turn them into a boot image. Return the image 308 data, or None if sourcedir does not appear to contains files for 309 building the requested image.""" 310 311 if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or 312 not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)): 313 return None 314 315 if info_dict is None: 316 info_dict = OPTIONS.info_dict 317 318 ramdisk_img = tempfile.NamedTemporaryFile() 319 img = tempfile.NamedTemporaryFile() 320 321 if os.access(fs_config_file, os.F_OK): 322 cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")] 323 else: 324 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")] 325 p1 = Run(cmd, stdout=subprocess.PIPE) 326 p2 = Run(["minigzip"], 327 stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) 328 329 p2.wait() 330 p1.wait() 331 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) 332 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,) 333 334 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set 335 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" 336 337 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")] 338 339 fn = os.path.join(sourcedir, "second") 340 if os.access(fn, os.F_OK): 341 cmd.append("--second") 342 cmd.append(fn) 343 344 fn = os.path.join(sourcedir, "cmdline") 345 if os.access(fn, os.F_OK): 346 cmd.append("--cmdline") 347 cmd.append(open(fn).read().rstrip("\n")) 348 349 fn = os.path.join(sourcedir, "base") 350 if os.access(fn, os.F_OK): 351 cmd.append("--base") 352 cmd.append(open(fn).read().rstrip("\n")) 353 354 fn = os.path.join(sourcedir, "pagesize") 355 if os.access(fn, os.F_OK): 356 cmd.append("--pagesize") 357 cmd.append(open(fn).read().rstrip("\n")) 358 359 args = info_dict.get("mkbootimg_args", None) 360 if args and args.strip(): 361 cmd.extend(shlex.split(args)) 362 363 img_unsigned = None 364 if info_dict.get("vboot", None): 365 img_unsigned = tempfile.NamedTemporaryFile() 366 cmd.extend(["--ramdisk", ramdisk_img.name, 367 "--output", img_unsigned.name]) 368 else: 369 cmd.extend(["--ramdisk", ramdisk_img.name, 370 "--output", img.name]) 371 372 p = Run(cmd, stdout=subprocess.PIPE) 373 p.communicate() 374 assert p.returncode == 0, "mkbootimg of %s image failed" % ( 375 os.path.basename(sourcedir),) 376 377 if (info_dict.get("boot_signer", None) == "true" and 378 info_dict.get("verity_key", None)): 379 path = "/" + os.path.basename(sourcedir).lower() 380 cmd = [OPTIONS.boot_signer_path] 381 cmd.extend(OPTIONS.boot_signer_args) 382 cmd.extend([path, img.name, 383 info_dict["verity_key"] + ".pk8", 384 info_dict["verity_key"] + ".x509.pem", img.name]) 385 p = Run(cmd, stdout=subprocess.PIPE) 386 p.communicate() 387 assert p.returncode == 0, "boot_signer of %s image failed" % path 388 389 # Sign the image if vboot is non-empty. 390 elif info_dict.get("vboot", None): 391 path = "/" + os.path.basename(sourcedir).lower() 392 img_keyblock = tempfile.NamedTemporaryFile() 393 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"], 394 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk", 395 info_dict["vboot_key"] + ".vbprivk", 396 info_dict["vboot_subkey"] + ".vbprivk", 397 img_keyblock.name, 398 img.name] 399 p = Run(cmd, stdout=subprocess.PIPE) 400 p.communicate() 401 assert p.returncode == 0, "vboot_signer of %s image failed" % path 402 403 # Clean up the temp files. 404 img_unsigned.close() 405 img_keyblock.close() 406 407 img.seek(os.SEEK_SET, 0) 408 data = img.read() 409 410 ramdisk_img.close() 411 img.close() 412 413 return data 414 415 416def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, 417 info_dict=None): 418 """Return a File object (with name 'name') with the desired bootable 419 image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 420 'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES, 421 otherwise construct it from the source files in 422 'unpack_dir'/'tree_subdir'.""" 423 424 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) 425 if os.path.exists(prebuilt_path): 426 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,) 427 return File.FromLocalFile(name, prebuilt_path) 428 429 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) 430 if os.path.exists(prebuilt_path): 431 print "using prebuilt %s from IMAGES..." % (prebuilt_name,) 432 return File.FromLocalFile(name, prebuilt_path) 433 434 print "building image from target_files %s..." % (tree_subdir,) 435 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" 436 data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir), 437 os.path.join(unpack_dir, fs_config), 438 info_dict) 439 if data: 440 return File(name, data) 441 return None 442 443 444def UnzipTemp(filename, pattern=None): 445 """Unzip the given archive into a temporary directory and return the name. 446 447 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a 448 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. 449 450 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the 451 main file), open for reading. 452 """ 453 454 tmp = tempfile.mkdtemp(prefix="targetfiles-") 455 OPTIONS.tempfiles.append(tmp) 456 457 def unzip_to_dir(filename, dirname): 458 cmd = ["unzip", "-o", "-q", filename, "-d", dirname] 459 if pattern is not None: 460 cmd.append(pattern) 461 p = Run(cmd, stdout=subprocess.PIPE) 462 p.communicate() 463 if p.returncode != 0: 464 raise ExternalError("failed to unzip input target-files \"%s\"" % 465 (filename,)) 466 467 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) 468 if m: 469 unzip_to_dir(m.group(1), tmp) 470 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES")) 471 filename = m.group(1) 472 else: 473 unzip_to_dir(filename, tmp) 474 475 return tmp, zipfile.ZipFile(filename, "r") 476 477 478def GetKeyPasswords(keylist): 479 """Given a list of keys, prompt the user to enter passwords for 480 those which require them. Return a {key: password} dict. password 481 will be None if the key has no password.""" 482 483 no_passwords = [] 484 need_passwords = [] 485 key_passwords = {} 486 devnull = open("/dev/null", "w+b") 487 for k in sorted(keylist): 488 # We don't need a password for things that aren't really keys. 489 if k in SPECIAL_CERT_STRINGS: 490 no_passwords.append(k) 491 continue 492 493 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, 494 "-inform", "DER", "-nocrypt"], 495 stdin=devnull.fileno(), 496 stdout=devnull.fileno(), 497 stderr=subprocess.STDOUT) 498 p.communicate() 499 if p.returncode == 0: 500 # Definitely an unencrypted key. 501 no_passwords.append(k) 502 else: 503 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, 504 "-inform", "DER", "-passin", "pass:"], 505 stdin=devnull.fileno(), 506 stdout=devnull.fileno(), 507 stderr=subprocess.PIPE) 508 _, stderr = p.communicate() 509 if p.returncode == 0: 510 # Encrypted key with empty string as password. 511 key_passwords[k] = '' 512 elif stderr.startswith('Error decrypting key'): 513 # Definitely encrypted key. 514 # It would have said "Error reading key" if it didn't parse correctly. 515 need_passwords.append(k) 516 else: 517 # Potentially, a type of key that openssl doesn't understand. 518 # We'll let the routines in signapk.jar handle it. 519 no_passwords.append(k) 520 devnull.close() 521 522 key_passwords.update(PasswordManager().GetPasswords(need_passwords)) 523 key_passwords.update(dict.fromkeys(no_passwords, None)) 524 return key_passwords 525 526 527def SignFile(input_name, output_name, key, password, align=None, 528 whole_file=False): 529 """Sign the input_name zip/jar/apk, producing output_name. Use the 530 given key and password (the latter may be None if the key does not 531 have a password. 532 533 If align is an integer > 1, zipalign is run to align stored files in 534 the output zip on 'align'-byte boundaries. 535 536 If whole_file is true, use the "-w" option to SignApk to embed a 537 signature that covers the whole file in the archive comment of the 538 zip file. 539 """ 540 541 if align == 0 or align == 1: 542 align = None 543 544 if align: 545 temp = tempfile.NamedTemporaryFile() 546 sign_name = temp.name 547 else: 548 sign_name = output_name 549 550 cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar", 551 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] 552 cmd.extend(OPTIONS.extra_signapk_args) 553 if whole_file: 554 cmd.append("-w") 555 cmd.extend([key + OPTIONS.public_key_suffix, 556 key + OPTIONS.private_key_suffix, 557 input_name, sign_name]) 558 559 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) 560 if password is not None: 561 password += "\n" 562 p.communicate(password) 563 if p.returncode != 0: 564 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,)) 565 566 if align: 567 p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name]) 568 p.communicate() 569 if p.returncode != 0: 570 raise ExternalError("zipalign failed: return code %s" % (p.returncode,)) 571 temp.close() 572 573 574def CheckSize(data, target, info_dict): 575 """Check the data string passed against the max size limit, if 576 any, for the given target. Raise exception if the data is too big. 577 Print a warning if the data is nearing the maximum size.""" 578 579 if target.endswith(".img"): 580 target = target[:-4] 581 mount_point = "/" + target 582 583 fs_type = None 584 limit = None 585 if info_dict["fstab"]: 586 if mount_point == "/userdata": 587 mount_point = "/data" 588 p = info_dict["fstab"][mount_point] 589 fs_type = p.fs_type 590 device = p.device 591 if "/" in device: 592 device = device[device.rfind("/")+1:] 593 limit = info_dict.get(device + "_size", None) 594 if not fs_type or not limit: 595 return 596 597 if fs_type == "yaffs2": 598 # image size should be increased by 1/64th to account for the 599 # spare area (64 bytes per 2k page) 600 limit = limit / 2048 * (2048+64) 601 size = len(data) 602 pct = float(size) * 100.0 / limit 603 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) 604 if pct >= 99.0: 605 raise ExternalError(msg) 606 elif pct >= 95.0: 607 print 608 print " WARNING: ", msg 609 print 610 elif OPTIONS.verbose: 611 print " ", msg 612 613 614def ReadApkCerts(tf_zip): 615 """Given a target_files ZipFile, parse the META/apkcerts.txt file 616 and return a {package: cert} dict.""" 617 certmap = {} 618 for line in tf_zip.read("META/apkcerts.txt").split("\n"): 619 line = line.strip() 620 if not line: 621 continue 622 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+' 623 r'private_key="(.*)"$', line) 624 if m: 625 name, cert, privkey = m.groups() 626 public_key_suffix_len = len(OPTIONS.public_key_suffix) 627 private_key_suffix_len = len(OPTIONS.private_key_suffix) 628 if cert in SPECIAL_CERT_STRINGS and not privkey: 629 certmap[name] = cert 630 elif (cert.endswith(OPTIONS.public_key_suffix) and 631 privkey.endswith(OPTIONS.private_key_suffix) and 632 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]): 633 certmap[name] = cert[:-public_key_suffix_len] 634 else: 635 raise ValueError("failed to parse line from apkcerts.txt:\n" + line) 636 return certmap 637 638 639COMMON_DOCSTRING = """ 640 -p (--path) <dir> 641 Prepend <dir>/bin to the list of places to search for binaries 642 run by this script, and expect to find jars in <dir>/framework. 643 644 -s (--device_specific) <file> 645 Path to the python module containing device-specific 646 releasetools code. 647 648 -x (--extra) <key=value> 649 Add a key/value pair to the 'extras' dict, which device-specific 650 extension code may look at. 651 652 -v (--verbose) 653 Show command lines being executed. 654 655 -h (--help) 656 Display this usage message and exit. 657""" 658 659def Usage(docstring): 660 print docstring.rstrip("\n") 661 print COMMON_DOCSTRING 662 663 664def ParseOptions(argv, 665 docstring, 666 extra_opts="", extra_long_opts=(), 667 extra_option_handler=None): 668 """Parse the options in argv and return any arguments that aren't 669 flags. docstring is the calling module's docstring, to be displayed 670 for errors and -h. extra_opts and extra_long_opts are for flags 671 defined by the caller, which are processed by passing them to 672 extra_option_handler.""" 673 674 try: 675 opts, args = getopt.getopt( 676 argv, "hvp:s:x:" + extra_opts, 677 ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=", 678 "java_path=", "java_args=", "public_key_suffix=", 679 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", 680 "verity_signer_path=", "verity_signer_args=", "device_specific=", 681 "extra="] + 682 list(extra_long_opts)) 683 except getopt.GetoptError as err: 684 Usage(docstring) 685 print "**", str(err), "**" 686 sys.exit(2) 687 688 for o, a in opts: 689 if o in ("-h", "--help"): 690 Usage(docstring) 691 sys.exit() 692 elif o in ("-v", "--verbose"): 693 OPTIONS.verbose = True 694 elif o in ("-p", "--path"): 695 OPTIONS.search_path = a 696 elif o in ("--signapk_path",): 697 OPTIONS.signapk_path = a 698 elif o in ("--extra_signapk_args",): 699 OPTIONS.extra_signapk_args = shlex.split(a) 700 elif o in ("--java_path",): 701 OPTIONS.java_path = a 702 elif o in ("--java_args",): 703 OPTIONS.java_args = a 704 elif o in ("--public_key_suffix",): 705 OPTIONS.public_key_suffix = a 706 elif o in ("--private_key_suffix",): 707 OPTIONS.private_key_suffix = a 708 elif o in ("--boot_signer_path",): 709 OPTIONS.boot_signer_path = a 710 elif o in ("--boot_signer_args",): 711 OPTIONS.boot_signer_args = shlex.split(a) 712 elif o in ("--verity_signer_path",): 713 OPTIONS.verity_signer_path = a 714 elif o in ("--verity_signer_args",): 715 OPTIONS.verity_signer_args = shlex.split(a) 716 elif o in ("-s", "--device_specific"): 717 OPTIONS.device_specific = a 718 elif o in ("-x", "--extra"): 719 key, value = a.split("=", 1) 720 OPTIONS.extras[key] = value 721 else: 722 if extra_option_handler is None or not extra_option_handler(o, a): 723 assert False, "unknown option \"%s\"" % (o,) 724 725 if OPTIONS.search_path: 726 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + 727 os.pathsep + os.environ["PATH"]) 728 729 return args 730 731 732def MakeTempFile(prefix=None, suffix=None): 733 """Make a temp file and add it to the list of things to be deleted 734 when Cleanup() is called. Return the filename.""" 735 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix) 736 os.close(fd) 737 OPTIONS.tempfiles.append(fn) 738 return fn 739 740 741def Cleanup(): 742 for i in OPTIONS.tempfiles: 743 if os.path.isdir(i): 744 shutil.rmtree(i) 745 else: 746 os.remove(i) 747 748 749class PasswordManager(object): 750 def __init__(self): 751 self.editor = os.getenv("EDITOR", None) 752 self.pwfile = os.getenv("ANDROID_PW_FILE", None) 753 754 def GetPasswords(self, items): 755 """Get passwords corresponding to each string in 'items', 756 returning a dict. (The dict may have keys in addition to the 757 values in 'items'.) 758 759 Uses the passwords in $ANDROID_PW_FILE if available, letting the 760 user edit that file to add more needed passwords. If no editor is 761 available, or $ANDROID_PW_FILE isn't define, prompts the user 762 interactively in the ordinary way. 763 """ 764 765 current = self.ReadFile() 766 767 first = True 768 while True: 769 missing = [] 770 for i in items: 771 if i not in current or not current[i]: 772 missing.append(i) 773 # Are all the passwords already in the file? 774 if not missing: 775 return current 776 777 for i in missing: 778 current[i] = "" 779 780 if not first: 781 print "key file %s still missing some passwords." % (self.pwfile,) 782 answer = raw_input("try to edit again? [y]> ").strip() 783 if answer and answer[0] not in 'yY': 784 raise RuntimeError("key passwords unavailable") 785 first = False 786 787 current = self.UpdateAndReadFile(current) 788 789 def PromptResult(self, current): # pylint: disable=no-self-use 790 """Prompt the user to enter a value (password) for each key in 791 'current' whose value is fales. Returns a new dict with all the 792 values. 793 """ 794 result = {} 795 for k, v in sorted(current.iteritems()): 796 if v: 797 result[k] = v 798 else: 799 while True: 800 result[k] = getpass.getpass( 801 "Enter password for %s key> " % k).strip() 802 if result[k]: 803 break 804 return result 805 806 def UpdateAndReadFile(self, current): 807 if not self.editor or not self.pwfile: 808 return self.PromptResult(current) 809 810 f = open(self.pwfile, "w") 811 os.chmod(self.pwfile, 0o600) 812 f.write("# Enter key passwords between the [[[ ]]] brackets.\n") 813 f.write("# (Additional spaces are harmless.)\n\n") 814 815 first_line = None 816 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()]) 817 for i, (_, k, v) in enumerate(sorted_list): 818 f.write("[[[ %s ]]] %s\n" % (v, k)) 819 if not v and first_line is None: 820 # position cursor on first line with no password. 821 first_line = i + 4 822 f.close() 823 824 p = Run([self.editor, "+%d" % (first_line,), self.pwfile]) 825 _, _ = p.communicate() 826 827 return self.ReadFile() 828 829 def ReadFile(self): 830 result = {} 831 if self.pwfile is None: 832 return result 833 try: 834 f = open(self.pwfile, "r") 835 for line in f: 836 line = line.strip() 837 if not line or line[0] == '#': 838 continue 839 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) 840 if not m: 841 print "failed to parse password file: ", line 842 else: 843 result[m.group(2)] = m.group(1) 844 f.close() 845 except IOError as e: 846 if e.errno != errno.ENOENT: 847 print "error reading password file: ", str(e) 848 return result 849 850 851def ZipWrite(zip_file, filename, arcname=None, perms=0o644, 852 compress_type=None): 853 import datetime 854 855 # http://b/18015246 856 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required 857 # for files larger than 2GiB. We can work around this by adjusting their 858 # limit. Note that `zipfile.writestr()` will not work for strings larger than 859 # 2GiB. The Python interpreter sometimes rejects strings that large (though 860 # it isn't clear to me exactly what circumstances cause this). 861 # `zipfile.write()` must be used directly to work around this. 862 # 863 # This mess can be avoided if we port to python3. 864 saved_zip64_limit = zipfile.ZIP64_LIMIT 865 zipfile.ZIP64_LIMIT = (1 << 32) - 1 866 867 if compress_type is None: 868 compress_type = zip_file.compression 869 if arcname is None: 870 arcname = filename 871 872 saved_stat = os.stat(filename) 873 874 try: 875 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the 876 # file to be zipped and reset it when we're done. 877 os.chmod(filename, perms) 878 879 # Use a fixed timestamp so the output is repeatable. 880 epoch = datetime.datetime.fromtimestamp(0) 881 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds() 882 os.utime(filename, (timestamp, timestamp)) 883 884 zip_file.write(filename, arcname=arcname, compress_type=compress_type) 885 finally: 886 os.chmod(filename, saved_stat.st_mode) 887 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime)) 888 zipfile.ZIP64_LIMIT = saved_zip64_limit 889 890 891def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None, 892 compress_type=None): 893 """Wrap zipfile.writestr() function to work around the zip64 limit. 894 895 Even with the ZIP64_LIMIT workaround, it won't allow writing a string 896 longer than 2GiB. It gives 'OverflowError: size does not fit in an int' 897 when calling crc32(bytes). 898 899 But it still works fine to write a shorter string into a large zip file. 900 We should use ZipWrite() whenever possible, and only use ZipWriteStr() 901 when we know the string won't be too long. 902 """ 903 904 saved_zip64_limit = zipfile.ZIP64_LIMIT 905 zipfile.ZIP64_LIMIT = (1 << 32) - 1 906 907 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo): 908 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname) 909 zinfo.compress_type = zip_file.compression 910 if perms is None: 911 perms = 0o644 912 else: 913 zinfo = zinfo_or_arcname 914 915 # If compress_type is given, it overrides the value in zinfo. 916 if compress_type is not None: 917 zinfo.compress_type = compress_type 918 919 # If perms is given, it has a priority. 920 if perms is not None: 921 zinfo.external_attr = perms << 16 922 923 # Use a fixed timestamp so the output is repeatable. 924 zinfo.date_time = (2009, 1, 1, 0, 0, 0) 925 926 zip_file.writestr(zinfo, data) 927 zipfile.ZIP64_LIMIT = saved_zip64_limit 928 929 930def ZipClose(zip_file): 931 # http://b/18015246 932 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the 933 # central directory. 934 saved_zip64_limit = zipfile.ZIP64_LIMIT 935 zipfile.ZIP64_LIMIT = (1 << 32) - 1 936 937 zip_file.close() 938 939 zipfile.ZIP64_LIMIT = saved_zip64_limit 940 941 942class DeviceSpecificParams(object): 943 module = None 944 def __init__(self, **kwargs): 945 """Keyword arguments to the constructor become attributes of this 946 object, which is passed to all functions in the device-specific 947 module.""" 948 for k, v in kwargs.iteritems(): 949 setattr(self, k, v) 950 self.extras = OPTIONS.extras 951 952 if self.module is None: 953 path = OPTIONS.device_specific 954 if not path: 955 return 956 try: 957 if os.path.isdir(path): 958 info = imp.find_module("releasetools", [path]) 959 else: 960 d, f = os.path.split(path) 961 b, x = os.path.splitext(f) 962 if x == ".py": 963 f = b 964 info = imp.find_module(f, [d]) 965 print "loaded device-specific extensions from", path 966 self.module = imp.load_module("device_specific", *info) 967 except ImportError: 968 print "unable to load device-specific module; assuming none" 969 970 def _DoCall(self, function_name, *args, **kwargs): 971 """Call the named function in the device-specific module, passing 972 the given args and kwargs. The first argument to the call will be 973 the DeviceSpecific object itself. If there is no module, or the 974 module does not define the function, return the value of the 975 'default' kwarg (which itself defaults to None).""" 976 if self.module is None or not hasattr(self.module, function_name): 977 return kwargs.get("default", None) 978 return getattr(self.module, function_name)(*((self,) + args), **kwargs) 979 980 def FullOTA_Assertions(self): 981 """Called after emitting the block of assertions at the top of a 982 full OTA package. Implementations can add whatever additional 983 assertions they like.""" 984 return self._DoCall("FullOTA_Assertions") 985 986 def FullOTA_InstallBegin(self): 987 """Called at the start of full OTA installation.""" 988 return self._DoCall("FullOTA_InstallBegin") 989 990 def FullOTA_InstallEnd(self): 991 """Called at the end of full OTA installation; typically this is 992 used to install the image for the device's baseband processor.""" 993 return self._DoCall("FullOTA_InstallEnd") 994 995 def IncrementalOTA_Assertions(self): 996 """Called after emitting the block of assertions at the top of an 997 incremental OTA package. Implementations can add whatever 998 additional assertions they like.""" 999 return self._DoCall("IncrementalOTA_Assertions") 1000 1001 def IncrementalOTA_VerifyBegin(self): 1002 """Called at the start of the verification phase of incremental 1003 OTA installation; additional checks can be placed here to abort 1004 the script before any changes are made.""" 1005 return self._DoCall("IncrementalOTA_VerifyBegin") 1006 1007 def IncrementalOTA_VerifyEnd(self): 1008 """Called at the end of the verification phase of incremental OTA 1009 installation; additional checks can be placed here to abort the 1010 script before any changes are made.""" 1011 return self._DoCall("IncrementalOTA_VerifyEnd") 1012 1013 def IncrementalOTA_InstallBegin(self): 1014 """Called at the start of incremental OTA installation (after 1015 verification is complete).""" 1016 return self._DoCall("IncrementalOTA_InstallBegin") 1017 1018 def IncrementalOTA_InstallEnd(self): 1019 """Called at the end of incremental OTA installation; typically 1020 this is used to install the image for the device's baseband 1021 processor.""" 1022 return self._DoCall("IncrementalOTA_InstallEnd") 1023 1024class File(object): 1025 def __init__(self, name, data): 1026 self.name = name 1027 self.data = data 1028 self.size = len(data) 1029 self.sha1 = sha1(data).hexdigest() 1030 1031 @classmethod 1032 def FromLocalFile(cls, name, diskname): 1033 f = open(diskname, "rb") 1034 data = f.read() 1035 f.close() 1036 return File(name, data) 1037 1038 def WriteToTemp(self): 1039 t = tempfile.NamedTemporaryFile() 1040 t.write(self.data) 1041 t.flush() 1042 return t 1043 1044 def AddToZip(self, z, compression=None): 1045 ZipWriteStr(z, self.name, self.data, compress_type=compression) 1046 1047DIFF_PROGRAM_BY_EXT = { 1048 ".gz" : "imgdiff", 1049 ".zip" : ["imgdiff", "-z"], 1050 ".jar" : ["imgdiff", "-z"], 1051 ".apk" : ["imgdiff", "-z"], 1052 ".img" : "imgdiff", 1053 } 1054 1055class Difference(object): 1056 def __init__(self, tf, sf, diff_program=None): 1057 self.tf = tf 1058 self.sf = sf 1059 self.patch = None 1060 self.diff_program = diff_program 1061 1062 def ComputePatch(self): 1063 """Compute the patch (as a string of data) needed to turn sf into 1064 tf. Returns the same tuple as GetPatch().""" 1065 1066 tf = self.tf 1067 sf = self.sf 1068 1069 if self.diff_program: 1070 diff_program = self.diff_program 1071 else: 1072 ext = os.path.splitext(tf.name)[1] 1073 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") 1074 1075 ttemp = tf.WriteToTemp() 1076 stemp = sf.WriteToTemp() 1077 1078 ext = os.path.splitext(tf.name)[1] 1079 1080 try: 1081 ptemp = tempfile.NamedTemporaryFile() 1082 if isinstance(diff_program, list): 1083 cmd = copy.copy(diff_program) 1084 else: 1085 cmd = [diff_program] 1086 cmd.append(stemp.name) 1087 cmd.append(ttemp.name) 1088 cmd.append(ptemp.name) 1089 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 1090 err = [] 1091 def run(): 1092 _, e = p.communicate() 1093 if e: 1094 err.append(e) 1095 th = threading.Thread(target=run) 1096 th.start() 1097 th.join(timeout=300) # 5 mins 1098 if th.is_alive(): 1099 print "WARNING: diff command timed out" 1100 p.terminate() 1101 th.join(5) 1102 if th.is_alive(): 1103 p.kill() 1104 th.join() 1105 1106 if err or p.returncode != 0: 1107 print "WARNING: failure running %s:\n%s\n" % ( 1108 diff_program, "".join(err)) 1109 self.patch = None 1110 return None, None, None 1111 diff = ptemp.read() 1112 finally: 1113 ptemp.close() 1114 stemp.close() 1115 ttemp.close() 1116 1117 self.patch = diff 1118 return self.tf, self.sf, self.patch 1119 1120 1121 def GetPatch(self): 1122 """Return a tuple (target_file, source_file, patch_data). 1123 patch_data may be None if ComputePatch hasn't been called, or if 1124 computing the patch failed.""" 1125 return self.tf, self.sf, self.patch 1126 1127 1128def ComputeDifferences(diffs): 1129 """Call ComputePatch on all the Difference objects in 'diffs'.""" 1130 print len(diffs), "diffs to compute" 1131 1132 # Do the largest files first, to try and reduce the long-pole effect. 1133 by_size = [(i.tf.size, i) for i in diffs] 1134 by_size.sort(reverse=True) 1135 by_size = [i[1] for i in by_size] 1136 1137 lock = threading.Lock() 1138 diff_iter = iter(by_size) # accessed under lock 1139 1140 def worker(): 1141 try: 1142 lock.acquire() 1143 for d in diff_iter: 1144 lock.release() 1145 start = time.time() 1146 d.ComputePatch() 1147 dur = time.time() - start 1148 lock.acquire() 1149 1150 tf, sf, patch = d.GetPatch() 1151 if sf.name == tf.name: 1152 name = tf.name 1153 else: 1154 name = "%s (%s)" % (tf.name, sf.name) 1155 if patch is None: 1156 print "patching failed! %s" % (name,) 1157 else: 1158 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( 1159 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name) 1160 lock.release() 1161 except Exception as e: 1162 print e 1163 raise 1164 1165 # start worker threads; wait for them all to finish. 1166 threads = [threading.Thread(target=worker) 1167 for i in range(OPTIONS.worker_threads)] 1168 for th in threads: 1169 th.start() 1170 while threads: 1171 threads.pop().join() 1172 1173 1174class BlockDifference(object): 1175 def __init__(self, partition, tgt, src=None, check_first_block=False, 1176 version=None): 1177 self.tgt = tgt 1178 self.src = src 1179 self.partition = partition 1180 self.check_first_block = check_first_block 1181 1182 # Due to http://b/20939131, check_first_block is disabled temporarily. 1183 assert not self.check_first_block 1184 1185 if version is None: 1186 version = 1 1187 if OPTIONS.info_dict: 1188 version = max( 1189 int(i) for i in 1190 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) 1191 self.version = version 1192 1193 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads, 1194 version=self.version) 1195 tmpdir = tempfile.mkdtemp() 1196 OPTIONS.tempfiles.append(tmpdir) 1197 self.path = os.path.join(tmpdir, partition) 1198 b.Compute(self.path) 1199 1200 if src is None: 1201 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict) 1202 else: 1203 _, self.device = GetTypeAndDevice("/" + partition, 1204 OPTIONS.source_info_dict) 1205 1206 def WriteScript(self, script, output_zip, progress=None): 1207 if not self.src: 1208 # write the output unconditionally 1209 script.Print("Patching %s image unconditionally..." % (self.partition,)) 1210 else: 1211 script.Print("Patching %s image after verification." % (self.partition,)) 1212 1213 if progress: 1214 script.ShowProgress(progress, 0) 1215 self._WriteUpdate(script, output_zip) 1216 self._WritePostInstallVerifyScript(script) 1217 1218 def WriteVerifyScript(self, script): 1219 partition = self.partition 1220 if not self.src: 1221 script.Print("Image %s will be patched unconditionally." % (partition,)) 1222 else: 1223 ranges = self.src.care_map.subtract(self.src.clobbered_blocks) 1224 ranges_str = ranges.to_string_raw() 1225 if self.version >= 3: 1226 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || ' 1227 'block_image_verify("%s", ' 1228 'package_extract_file("%s.transfer.list"), ' 1229 '"%s.new.dat", "%s.patch.dat")) then') % ( 1230 self.device, ranges_str, self.src.TotalSha1(), 1231 self.device, partition, partition, partition)) 1232 else: 1233 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1234 self.device, ranges_str, self.src.TotalSha1())) 1235 script.Print('Verified %s image...' % (partition,)) 1236 script.AppendExtra('else') 1237 1238 # When generating incrementals for the system and vendor partitions, 1239 # explicitly check the first block (which contains the superblock) of 1240 # the partition to see if it's what we expect. If this check fails, 1241 # give an explicit log message about the partition having been 1242 # remounted R/W (the most likely explanation) and the need to flash to 1243 # get OTAs working again. 1244 if self.check_first_block: 1245 self._CheckFirstBlock(script) 1246 1247 # Abort the OTA update. Note that the incremental OTA cannot be applied 1248 # even if it may match the checksum of the target partition. 1249 # a) If version < 3, operations like move and erase will make changes 1250 # unconditionally and damage the partition. 1251 # b) If version >= 3, it won't even reach here. 1252 script.AppendExtra(('abort("%s partition has unexpected contents");\n' 1253 'endif;') % (partition,)) 1254 1255 def _WritePostInstallVerifyScript(self, script): 1256 partition = self.partition 1257 script.Print('Verifying the updated %s image...' % (partition,)) 1258 # Unlike pre-install verification, clobbered_blocks should not be ignored. 1259 ranges = self.tgt.care_map 1260 ranges_str = ranges.to_string_raw() 1261 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1262 self.device, ranges_str, 1263 self.tgt.TotalSha1(include_clobbered_blocks=True))) 1264 1265 # Bug: 20881595 1266 # Verify that extended blocks are really zeroed out. 1267 if self.tgt.extended: 1268 ranges_str = self.tgt.extended.to_string_raw() 1269 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1270 self.device, ranges_str, 1271 self._HashZeroBlocks(self.tgt.extended.size()))) 1272 script.Print('Verified the updated %s image.' % (partition,)) 1273 script.AppendExtra( 1274 'else\n' 1275 ' abort("%s partition has unexpected non-zero contents after OTA ' 1276 'update");\n' 1277 'endif;' % (partition,)) 1278 else: 1279 script.Print('Verified the updated %s image.' % (partition,)) 1280 1281 script.AppendExtra( 1282 'else\n' 1283 ' abort("%s partition has unexpected contents after OTA update");\n' 1284 'endif;' % (partition,)) 1285 1286 def _WriteUpdate(self, script, output_zip): 1287 ZipWrite(output_zip, 1288 '{}.transfer.list'.format(self.path), 1289 '{}.transfer.list'.format(self.partition)) 1290 ZipWrite(output_zip, 1291 '{}.new.dat'.format(self.path), 1292 '{}.new.dat'.format(self.partition)) 1293 ZipWrite(output_zip, 1294 '{}.patch.dat'.format(self.path), 1295 '{}.patch.dat'.format(self.partition), 1296 compress_type=zipfile.ZIP_STORED) 1297 1298 call = ('block_image_update("{device}", ' 1299 'package_extract_file("{partition}.transfer.list"), ' 1300 '"{partition}.new.dat", "{partition}.patch.dat");\n'.format( 1301 device=self.device, partition=self.partition)) 1302 script.AppendExtra(script.WordWrap(call)) 1303 1304 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use 1305 data = source.ReadRangeSet(ranges) 1306 ctx = sha1() 1307 1308 for p in data: 1309 ctx.update(p) 1310 1311 return ctx.hexdigest() 1312 1313 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use 1314 """Return the hash value for all zero blocks.""" 1315 zero_block = '\x00' * 4096 1316 ctx = sha1() 1317 for _ in range(num_blocks): 1318 ctx.update(zero_block) 1319 1320 return ctx.hexdigest() 1321 1322 # TODO(tbao): Due to http://b/20939131, block 0 may be changed without 1323 # remounting R/W. Will change the checking to a finer-grained way to 1324 # mask off those bits. 1325 def _CheckFirstBlock(self, script): 1326 r = rangelib.RangeSet((0, 1)) 1327 srchash = self._HashBlocks(self.src, r) 1328 1329 script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || ' 1330 'abort("%s has been remounted R/W; ' 1331 'reflash device to reenable OTA updates");') 1332 % (self.device, r.to_string_raw(), srchash, 1333 self.device)) 1334 1335DataImage = blockimgdiff.DataImage 1336 1337 1338# map recovery.fstab's fs_types to mount/format "partition types" 1339PARTITION_TYPES = { 1340 "yaffs2": "MTD", 1341 "mtd": "MTD", 1342 "ext4": "EMMC", 1343 "emmc": "EMMC", 1344 "f2fs": "EMMC", 1345 "squashfs": "EMMC" 1346} 1347 1348def GetTypeAndDevice(mount_point, info): 1349 fstab = info["fstab"] 1350 if fstab: 1351 return (PARTITION_TYPES[fstab[mount_point].fs_type], 1352 fstab[mount_point].device) 1353 else: 1354 raise KeyError 1355 1356 1357def ParseCertificate(data): 1358 """Parse a PEM-format certificate.""" 1359 cert = [] 1360 save = False 1361 for line in data.split("\n"): 1362 if "--END CERTIFICATE--" in line: 1363 break 1364 if save: 1365 cert.append(line) 1366 if "--BEGIN CERTIFICATE--" in line: 1367 save = True 1368 cert = "".join(cert).decode('base64') 1369 return cert 1370 1371def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, 1372 info_dict=None): 1373 """Generate a binary patch that creates the recovery image starting 1374 with the boot image. (Most of the space in these images is just the 1375 kernel, which is identical for the two, so the resulting patch 1376 should be efficient.) Add it to the output zip, along with a shell 1377 script that is run from init.rc on first boot to actually do the 1378 patching and install the new recovery image. 1379 1380 recovery_img and boot_img should be File objects for the 1381 corresponding images. info should be the dictionary returned by 1382 common.LoadInfoDict() on the input target_files. 1383 """ 1384 1385 if info_dict is None: 1386 info_dict = OPTIONS.info_dict 1387 1388 diff_program = ["imgdiff"] 1389 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat") 1390 if os.path.exists(path): 1391 diff_program.append("-b") 1392 diff_program.append(path) 1393 bonus_args = "-b /system/etc/recovery-resource.dat" 1394 else: 1395 bonus_args = "" 1396 1397 d = Difference(recovery_img, boot_img, diff_program=diff_program) 1398 _, _, patch = d.ComputePatch() 1399 output_sink("recovery-from-boot.p", patch) 1400 1401 try: 1402 # The following GetTypeAndDevice()s need to use the path in the target 1403 # info_dict instead of source_info_dict. 1404 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict) 1405 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict) 1406 except KeyError: 1407 return 1408 1409 sh = """#!/system/bin/sh 1410if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then 1411 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed" 1412else 1413 log -t recovery "Recovery image already installed" 1414fi 1415""" % {'boot_size': boot_img.size, 1416 'boot_sha1': boot_img.sha1, 1417 'recovery_size': recovery_img.size, 1418 'recovery_sha1': recovery_img.sha1, 1419 'boot_type': boot_type, 1420 'boot_device': boot_device, 1421 'recovery_type': recovery_type, 1422 'recovery_device': recovery_device, 1423 'bonus_args': bonus_args} 1424 1425 # The install script location moved from /system/etc to /system/bin 1426 # in the L release. Parse the init.rc file to find out where the 1427 # target-files expects it to be, and put it there. 1428 sh_location = "etc/install-recovery.sh" 1429 try: 1430 with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f: 1431 for line in f: 1432 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line) 1433 if m: 1434 sh_location = m.group(1) 1435 print "putting script in", sh_location 1436 break 1437 except (OSError, IOError) as e: 1438 print "failed to read init.rc: %s" % (e,) 1439 1440 output_sink(sh_location, sh) 1441