• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import copy
18import errno
19import getopt
20import getpass
21import gzip
22import imp
23import os
24import platform
25import re
26import shlex
27import shutil
28import subprocess
29import sys
30import tempfile
31import threading
32import time
33import zipfile
34
35import blockimgdiff
36
37from hashlib import sha1 as sha1
38
39
40class Options(object):
41  def __init__(self):
42    platform_search_path = {
43        "linux2": "out/host/linux-x86",
44        "darwin": "out/host/darwin-x86",
45    }
46
47    self.search_path = platform_search_path.get(sys.platform, None)
48    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
49    self.signapk_shared_library_path = "lib64"   # Relative to search_path
50    self.extra_signapk_args = []
51    self.java_path = "java"  # Use the one on the path by default.
52    self.java_args = ["-Xmx2048m"]  # The default JVM args.
53    self.public_key_suffix = ".x509.pem"
54    self.private_key_suffix = ".pk8"
55    # use otatools built boot_signer by default
56    self.boot_signer_path = "boot_signer"
57    self.boot_signer_args = []
58    self.verity_signer_path = None
59    self.verity_signer_args = []
60    self.verbose = False
61    self.tempfiles = []
62    self.device_specific = None
63    self.extras = {}
64    self.info_dict = None
65    self.source_info_dict = None
66    self.target_info_dict = None
67    self.worker_threads = None
68    # Stash size cannot exceed cache_size * threshold.
69    self.cache_size = None
70    self.stash_threshold = 0.8
71
72
73OPTIONS = Options()
74
75
76# Values for "certificate" in apkcerts that mean special things.
77SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
78
79class ErrorCode(object):
80  """Define error_codes for failures that happen during the actual
81  update package installation.
82
83  Error codes 0-999 are reserved for failures before the package
84  installation (i.e. low battery, package verification failure).
85  Detailed code in 'bootable/recovery/error_code.h' """
86
87  SYSTEM_VERIFICATION_FAILURE = 1000
88  SYSTEM_UPDATE_FAILURE = 1001
89  SYSTEM_UNEXPECTED_CONTENTS = 1002
90  SYSTEM_NONZERO_CONTENTS = 1003
91  SYSTEM_RECOVER_FAILURE = 1004
92  VENDOR_VERIFICATION_FAILURE = 2000
93  VENDOR_UPDATE_FAILURE = 2001
94  VENDOR_UNEXPECTED_CONTENTS = 2002
95  VENDOR_NONZERO_CONTENTS = 2003
96  VENDOR_RECOVER_FAILURE = 2004
97  OEM_PROP_MISMATCH = 3000
98  FINGERPRINT_MISMATCH = 3001
99  THUMBPRINT_MISMATCH = 3002
100  OLDER_BUILD = 3003
101  DEVICE_MISMATCH = 3004
102  BAD_PATCH_FILE = 3005
103  INSUFFICIENT_CACHE_SPACE = 3006
104  TUNE_PARTITION_FAILURE = 3007
105  APPLY_PATCH_FAILURE = 3008
106
107class ExternalError(RuntimeError):
108  pass
109
110
111def Run(args, verbose=None, **kwargs):
112  """Create and return a subprocess.Popen object.
113
114  Caller can specify if the command line should be printed. The global
115  OPTIONS.verbose will be used if not specified.
116  """
117  if verbose is None:
118    verbose = OPTIONS.verbose
119  if verbose:
120    print("  running: ", " ".join(args))
121  return subprocess.Popen(args, **kwargs)
122
123
124def CloseInheritedPipes():
125  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
126  before doing other work."""
127  if platform.system() != "Darwin":
128    return
129  for d in range(3, 1025):
130    try:
131      stat = os.fstat(d)
132      if stat is not None:
133        pipebit = stat[0] & 0x1000
134        if pipebit != 0:
135          os.close(d)
136    except OSError:
137      pass
138
139
140def LoadInfoDict(input_file, input_dir=None):
141  """Read and parse the META/misc_info.txt key/value pairs from the
142  input target files and return a dict."""
143
144  def read_helper(fn):
145    if isinstance(input_file, zipfile.ZipFile):
146      return input_file.read(fn)
147    else:
148      path = os.path.join(input_file, *fn.split("/"))
149      try:
150        with open(path) as f:
151          return f.read()
152      except IOError as e:
153        if e.errno == errno.ENOENT:
154          raise KeyError(fn)
155
156  try:
157    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
158  except KeyError:
159    raise ValueError("can't find META/misc_info.txt in input target-files")
160
161  assert "recovery_api_version" in d
162  assert "fstab_version" in d
163
164  # A few properties are stored as links to the files in the out/ directory.
165  # It works fine with the build system. However, they are no longer available
166  # when (re)generating from target_files zip. If input_dir is not None, we
167  # are doing repacking. Redirect those properties to the actual files in the
168  # unzipped directory.
169  if input_dir is not None:
170    # We carry a copy of file_contexts.bin under META/. If not available,
171    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
172    # to build images than the one running on device, such as when enabling
173    # system_root_image. In that case, we must have the one for image
174    # generation copied to META/.
175    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
176    fc_config = os.path.join(input_dir, "META", fc_basename)
177    if d.get("system_root_image") == "true":
178      assert os.path.exists(fc_config)
179    if not os.path.exists(fc_config):
180      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
181      if not os.path.exists(fc_config):
182        fc_config = None
183
184    if fc_config:
185      d["selinux_fc"] = fc_config
186
187    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
188    if d.get("system_root_image") == "true":
189      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
190      d["ramdisk_fs_config"] = os.path.join(
191          input_dir, "META", "root_filesystem_config.txt")
192
193    # Redirect {system,vendor}_base_fs_file.
194    if "system_base_fs_file" in d:
195      basename = os.path.basename(d["system_base_fs_file"])
196      system_base_fs_file = os.path.join(input_dir, "META", basename)
197      if os.path.exists(system_base_fs_file):
198        d["system_base_fs_file"] = system_base_fs_file
199      else:
200        print("Warning: failed to find system base fs file: %s" % (
201            system_base_fs_file,))
202        del d["system_base_fs_file"]
203
204    if "vendor_base_fs_file" in d:
205      basename = os.path.basename(d["vendor_base_fs_file"])
206      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
207      if os.path.exists(vendor_base_fs_file):
208        d["vendor_base_fs_file"] = vendor_base_fs_file
209      else:
210        print("Warning: failed to find vendor base fs file: %s" % (
211            vendor_base_fs_file,))
212        del d["vendor_base_fs_file"]
213
214  try:
215    data = read_helper("META/imagesizes.txt")
216    for line in data.split("\n"):
217      if not line:
218        continue
219      name, value = line.split(" ", 1)
220      if not value:
221        continue
222      if name == "blocksize":
223        d[name] = value
224      else:
225        d[name + "_size"] = value
226  except KeyError:
227    pass
228
229  def makeint(key):
230    if key in d:
231      d[key] = int(d[key], 0)
232
233  makeint("recovery_api_version")
234  makeint("blocksize")
235  makeint("system_size")
236  makeint("vendor_size")
237  makeint("userdata_size")
238  makeint("cache_size")
239  makeint("recovery_size")
240  makeint("boot_size")
241  makeint("fstab_version")
242
243  system_root_image = d.get("system_root_image", None) == "true"
244  if d.get("no_recovery", None) != "true":
245    recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
246    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
247        recovery_fstab_path, system_root_image)
248  elif d.get("recovery_as_boot", None) == "true":
249    recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
250    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
251        recovery_fstab_path, system_root_image)
252  else:
253    d["fstab"] = None
254
255  d["build.prop"] = LoadBuildProp(read_helper)
256  return d
257
258
259def LoadBuildProp(read_helper):
260  try:
261    data = read_helper("SYSTEM/build.prop")
262  except KeyError:
263    print("Warning: could not find SYSTEM/build.prop in %s" % (zip,))
264    data = ""
265  return LoadDictionaryFromLines(data.split("\n"))
266
267
268def LoadDictionaryFromLines(lines):
269  d = {}
270  for line in lines:
271    line = line.strip()
272    if not line or line.startswith("#"):
273      continue
274    if "=" in line:
275      name, value = line.split("=", 1)
276      d[name] = value
277  return d
278
279
280def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
281                      system_root_image=False):
282  class Partition(object):
283    def __init__(self, mount_point, fs_type, device, length, context):
284      self.mount_point = mount_point
285      self.fs_type = fs_type
286      self.device = device
287      self.length = length
288      self.context = context
289
290  try:
291    data = read_helper(recovery_fstab_path)
292  except KeyError:
293    print("Warning: could not find {}".format(recovery_fstab_path))
294    data = ""
295
296  assert fstab_version == 2
297
298  d = {}
299  for line in data.split("\n"):
300    line = line.strip()
301    if not line or line.startswith("#"):
302      continue
303
304    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
305    pieces = line.split()
306    if len(pieces) != 5:
307      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
308
309    # Ignore entries that are managed by vold.
310    options = pieces[4]
311    if "voldmanaged=" in options:
312      continue
313
314    # It's a good line, parse it.
315    length = 0
316    options = options.split(",")
317    for i in options:
318      if i.startswith("length="):
319        length = int(i[7:])
320      else:
321        # Ignore all unknown options in the unified fstab.
322        continue
323
324    mount_flags = pieces[3]
325    # Honor the SELinux context if present.
326    context = None
327    for i in mount_flags.split(","):
328      if i.startswith("context="):
329        context = i
330
331    mount_point = pieces[1]
332    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
333                               device=pieces[0], length=length, context=context)
334
335  # / is used for the system mount point when the root directory is included in
336  # system. Other areas assume system is always at "/system" so point /system
337  # at /.
338  if system_root_image:
339    assert not d.has_key("/system") and d.has_key("/")
340    d["/system"] = d["/"]
341  return d
342
343
344def DumpInfoDict(d):
345  for k, v in sorted(d.items()):
346    print("%-25s = (%s) %s" % (k, type(v).__name__, v))
347
348
349def AppendAVBSigningArgs(cmd, partition):
350  """Append signing arguments for avbtool."""
351  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
352  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
353  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
354  if key_path and algorithm:
355    cmd.extend(["--key", key_path, "--algorithm", algorithm])
356  avb_salt = OPTIONS.info_dict.get("avb_salt")
357  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
358  if avb_salt and partition != "vbmeta":
359    cmd.extend(["--salt", avb_salt])
360
361
362def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
363                        has_ramdisk=False, two_step_image=False):
364  """Build a bootable image from the specified sourcedir.
365
366  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
367  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
368  we are building a two-step special image (i.e. building a recovery image to
369  be loaded into /boot in two-step OTAs).
370
371  Return the image data, or None if sourcedir does not appear to contains files
372  for building the requested image.
373  """
374
375  def make_ramdisk():
376    ramdisk_img = tempfile.NamedTemporaryFile()
377
378    if os.access(fs_config_file, os.F_OK):
379      cmd = ["mkbootfs", "-f", fs_config_file,
380             os.path.join(sourcedir, "RAMDISK")]
381    else:
382      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
383    p1 = Run(cmd, stdout=subprocess.PIPE)
384    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
385
386    p2.wait()
387    p1.wait()
388    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
389    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
390
391    return ramdisk_img
392
393  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
394    return None
395
396  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
397    return None
398
399  if info_dict is None:
400    info_dict = OPTIONS.info_dict
401
402  img = tempfile.NamedTemporaryFile()
403
404  if has_ramdisk:
405    ramdisk_img = make_ramdisk()
406
407  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
408  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
409
410  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
411
412  fn = os.path.join(sourcedir, "second")
413  if os.access(fn, os.F_OK):
414    cmd.append("--second")
415    cmd.append(fn)
416
417  fn = os.path.join(sourcedir, "cmdline")
418  if os.access(fn, os.F_OK):
419    cmd.append("--cmdline")
420    cmd.append(open(fn).read().rstrip("\n"))
421
422  fn = os.path.join(sourcedir, "base")
423  if os.access(fn, os.F_OK):
424    cmd.append("--base")
425    cmd.append(open(fn).read().rstrip("\n"))
426
427  fn = os.path.join(sourcedir, "pagesize")
428  if os.access(fn, os.F_OK):
429    cmd.append("--pagesize")
430    cmd.append(open(fn).read().rstrip("\n"))
431
432  args = info_dict.get("mkbootimg_args", None)
433  if args and args.strip():
434    cmd.extend(shlex.split(args))
435
436  args = info_dict.get("mkbootimg_version_args", None)
437  if args and args.strip():
438    cmd.extend(shlex.split(args))
439
440  if has_ramdisk:
441    cmd.extend(["--ramdisk", ramdisk_img.name])
442
443  img_unsigned = None
444  if info_dict.get("vboot", None):
445    img_unsigned = tempfile.NamedTemporaryFile()
446    cmd.extend(["--output", img_unsigned.name])
447  else:
448    cmd.extend(["--output", img.name])
449
450  p = Run(cmd, stdout=subprocess.PIPE)
451  p.communicate()
452  assert p.returncode == 0, "mkbootimg of %s image failed" % (
453      os.path.basename(sourcedir),)
454
455  if (info_dict.get("boot_signer", None) == "true" and
456      info_dict.get("verity_key", None)):
457    # Hard-code the path as "/boot" for two-step special recovery image (which
458    # will be loaded into /boot during the two-step OTA).
459    if two_step_image:
460      path = "/boot"
461    else:
462      path = "/" + os.path.basename(sourcedir).lower()
463    cmd = [OPTIONS.boot_signer_path]
464    cmd.extend(OPTIONS.boot_signer_args)
465    cmd.extend([path, img.name,
466                info_dict["verity_key"] + ".pk8",
467                info_dict["verity_key"] + ".x509.pem", img.name])
468    p = Run(cmd, stdout=subprocess.PIPE)
469    p.communicate()
470    assert p.returncode == 0, "boot_signer of %s image failed" % path
471
472  # Sign the image if vboot is non-empty.
473  elif info_dict.get("vboot", None):
474    path = "/" + os.path.basename(sourcedir).lower()
475    img_keyblock = tempfile.NamedTemporaryFile()
476    # We have switched from the prebuilt futility binary to using the tool
477    # (futility-host) built from the source. Override the setting in the old
478    # TF.zip.
479    futility = info_dict["futility"]
480    if futility.startswith("prebuilts/"):
481      futility = "futility-host"
482    cmd = [info_dict["vboot_signer_cmd"], futility,
483           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
484           info_dict["vboot_key"] + ".vbprivk",
485           info_dict["vboot_subkey"] + ".vbprivk",
486           img_keyblock.name,
487           img.name]
488    p = Run(cmd, stdout=subprocess.PIPE)
489    p.communicate()
490    assert p.returncode == 0, "vboot_signer of %s image failed" % path
491
492    # Clean up the temp files.
493    img_unsigned.close()
494    img_keyblock.close()
495
496  # AVB: if enabled, calculate and add hash to boot.img.
497  if info_dict.get("avb_enable") == "true":
498    avbtool = os.getenv('AVBTOOL') or info_dict["avb_avbtool"]
499    part_size = info_dict["boot_size"]
500    cmd = [avbtool, "add_hash_footer", "--image", img.name,
501           "--partition_size", str(part_size), "--partition_name", "boot"]
502    AppendAVBSigningArgs(cmd, "boot")
503    args = info_dict.get("avb_boot_add_hash_footer_args")
504    if args and args.strip():
505      cmd.extend(shlex.split(args))
506    p = Run(cmd, stdout=subprocess.PIPE)
507    p.communicate()
508    assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
509        os.path.basename(OPTIONS.input_tmp))
510
511  img.seek(os.SEEK_SET, 0)
512  data = img.read()
513
514  if has_ramdisk:
515    ramdisk_img.close()
516  img.close()
517
518  return data
519
520
521def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
522                     info_dict=None, two_step_image=False):
523  """Return a File object with the desired bootable image.
524
525  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
526  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
527  the source files in 'unpack_dir'/'tree_subdir'."""
528
529  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
530  if os.path.exists(prebuilt_path):
531    print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
532    return File.FromLocalFile(name, prebuilt_path)
533
534  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
535  if os.path.exists(prebuilt_path):
536    print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
537    return File.FromLocalFile(name, prebuilt_path)
538
539  print("building image from target_files %s..." % (tree_subdir,))
540
541  if info_dict is None:
542    info_dict = OPTIONS.info_dict
543
544  # With system_root_image == "true", we don't pack ramdisk into the boot image.
545  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
546  # for recovery.
547  has_ramdisk = (info_dict.get("system_root_image") != "true" or
548                 prebuilt_name != "boot.img" or
549                 info_dict.get("recovery_as_boot") == "true")
550
551  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
552  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
553                             os.path.join(unpack_dir, fs_config),
554                             info_dict, has_ramdisk, two_step_image)
555  if data:
556    return File(name, data)
557  return None
558
559
560def Gunzip(in_filename, out_filename):
561  """Gunzip the given gzip compressed file to a given output file.
562  """
563  with gzip.open(in_filename, "rb") as in_file, open(out_filename, "wb") as out_file:
564    shutil.copyfileobj(in_file, out_file)
565
566
567def UnzipTemp(filename, pattern=None):
568  """Unzip the given archive into a temporary directory and return the name.
569
570  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
571  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
572
573  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
574  main file), open for reading.
575  """
576
577  tmp = tempfile.mkdtemp(prefix="targetfiles-")
578  OPTIONS.tempfiles.append(tmp)
579
580  def unzip_to_dir(filename, dirname):
581    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
582    if pattern is not None:
583      cmd.extend(pattern)
584    p = Run(cmd, stdout=subprocess.PIPE)
585    p.communicate()
586    if p.returncode != 0:
587      raise ExternalError("failed to unzip input target-files \"%s\"" %
588                          (filename,))
589
590  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
591  if m:
592    unzip_to_dir(m.group(1), tmp)
593    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
594    filename = m.group(1)
595  else:
596    unzip_to_dir(filename, tmp)
597
598  return tmp, zipfile.ZipFile(filename, "r")
599
600
601def GetKeyPasswords(keylist):
602  """Given a list of keys, prompt the user to enter passwords for
603  those which require them.  Return a {key: password} dict.  password
604  will be None if the key has no password."""
605
606  no_passwords = []
607  need_passwords = []
608  key_passwords = {}
609  devnull = open("/dev/null", "w+b")
610  for k in sorted(keylist):
611    # We don't need a password for things that aren't really keys.
612    if k in SPECIAL_CERT_STRINGS:
613      no_passwords.append(k)
614      continue
615
616    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
617             "-inform", "DER", "-nocrypt"],
618            stdin=devnull.fileno(),
619            stdout=devnull.fileno(),
620            stderr=subprocess.STDOUT)
621    p.communicate()
622    if p.returncode == 0:
623      # Definitely an unencrypted key.
624      no_passwords.append(k)
625    else:
626      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
627               "-inform", "DER", "-passin", "pass:"],
628              stdin=devnull.fileno(),
629              stdout=devnull.fileno(),
630              stderr=subprocess.PIPE)
631      _, stderr = p.communicate()
632      if p.returncode == 0:
633        # Encrypted key with empty string as password.
634        key_passwords[k] = ''
635      elif stderr.startswith('Error decrypting key'):
636        # Definitely encrypted key.
637        # It would have said "Error reading key" if it didn't parse correctly.
638        need_passwords.append(k)
639      else:
640        # Potentially, a type of key that openssl doesn't understand.
641        # We'll let the routines in signapk.jar handle it.
642        no_passwords.append(k)
643  devnull.close()
644
645  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
646  key_passwords.update(dict.fromkeys(no_passwords, None))
647  return key_passwords
648
649
650def GetMinSdkVersion(apk_name):
651  """Get the minSdkVersion delared in the APK. This can be both a decimal number
652  (API Level) or a codename.
653  """
654
655  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
656  output, err = p.communicate()
657  if err:
658    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
659        % (p.returncode,))
660
661  for line in output.split("\n"):
662    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
663    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
664    if m:
665      return m.group(1)
666  raise ExternalError("No minSdkVersion returned by aapt")
667
668
669def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
670  """Get the minSdkVersion declared in the APK as a number (API Level). If
671  minSdkVersion is set to a codename, it is translated to a number using the
672  provided map.
673  """
674
675  version = GetMinSdkVersion(apk_name)
676  try:
677    return int(version)
678  except ValueError:
679    # Not a decimal number. Codename?
680    if version in codename_to_api_level_map:
681      return codename_to_api_level_map[version]
682    else:
683      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
684                          % (version, codename_to_api_level_map))
685
686
687def SignFile(input_name, output_name, key, password, min_api_level=None,
688    codename_to_api_level_map=dict(),
689    whole_file=False):
690  """Sign the input_name zip/jar/apk, producing output_name.  Use the
691  given key and password (the latter may be None if the key does not
692  have a password.
693
694  If whole_file is true, use the "-w" option to SignApk to embed a
695  signature that covers the whole file in the archive comment of the
696  zip file.
697
698  min_api_level is the API Level (int) of the oldest platform this file may end
699  up on. If not specified for an APK, the API Level is obtained by interpreting
700  the minSdkVersion attribute of the APK's AndroidManifest.xml.
701
702  codename_to_api_level_map is needed to translate the codename which may be
703  encountered as the APK's minSdkVersion.
704  """
705
706  java_library_path = os.path.join(
707      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
708
709  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
710         ["-Djava.library.path=" + java_library_path,
711          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
712         OPTIONS.extra_signapk_args)
713  if whole_file:
714    cmd.append("-w")
715
716  min_sdk_version = min_api_level
717  if min_sdk_version is None:
718    if not whole_file:
719      min_sdk_version = GetMinSdkVersionInt(
720          input_name, codename_to_api_level_map)
721  if min_sdk_version is not None:
722    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
723
724  cmd.extend([key + OPTIONS.public_key_suffix,
725              key + OPTIONS.private_key_suffix,
726              input_name, output_name])
727
728  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
729  if password is not None:
730    password += "\n"
731  p.communicate(password)
732  if p.returncode != 0:
733    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
734
735
736def CheckSize(data, target, info_dict):
737  """Check the data string passed against the max size limit, if
738  any, for the given target.  Raise exception if the data is too big.
739  Print a warning if the data is nearing the maximum size."""
740
741  if target.endswith(".img"):
742    target = target[:-4]
743  mount_point = "/" + target
744
745  fs_type = None
746  limit = None
747  if info_dict["fstab"]:
748    if mount_point == "/userdata":
749      mount_point = "/data"
750    p = info_dict["fstab"][mount_point]
751    fs_type = p.fs_type
752    device = p.device
753    if "/" in device:
754      device = device[device.rfind("/")+1:]
755    limit = info_dict.get(device + "_size", None)
756  if not fs_type or not limit:
757    return
758
759  size = len(data)
760  pct = float(size) * 100.0 / limit
761  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
762  if pct >= 99.0:
763    raise ExternalError(msg)
764  elif pct >= 95.0:
765    print("\n  WARNING: %s\n" % (msg,))
766  elif OPTIONS.verbose:
767    print("  ", msg)
768
769
770def ReadApkCerts(tf_zip):
771  """Given a target_files ZipFile, parse the META/apkcerts.txt file
772  and return a tuple with the following elements: (1) a dictionary that maps
773  packages to certs (based on the "certificate" and "private_key" attributes
774  in the file. (2) A string representing the extension of compressed APKs in
775  the target files (e.g ".gz" ".bro")."""
776  certmap = {}
777  compressed_extension = None
778
779  # META/apkcerts.txt contains the info for _all_ the packages known at build
780  # time. Filter out the ones that are not installed.
781  installed_files = set()
782  for name in tf_zip.namelist():
783    basename = os.path.basename(name)
784    if basename:
785      installed_files.add(basename)
786
787  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
788    line = line.strip()
789    if not line:
790      continue
791    m = re.match(r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
792                 r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*)")?$',
793                 line)
794    if m:
795      matches = m.groupdict()
796      cert = matches["CERT"]
797      privkey = matches["PRIVKEY"]
798      name = matches["NAME"]
799      this_compressed_extension = matches["COMPRESSED"]
800      public_key_suffix_len = len(OPTIONS.public_key_suffix)
801      private_key_suffix_len = len(OPTIONS.private_key_suffix)
802      if cert in SPECIAL_CERT_STRINGS and not privkey:
803        certmap[name] = cert
804      elif (cert.endswith(OPTIONS.public_key_suffix) and
805            privkey.endswith(OPTIONS.private_key_suffix) and
806            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
807        certmap[name] = cert[:-public_key_suffix_len]
808      else:
809        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
810      if this_compressed_extension:
811        # Only count the installed files.
812        filename = name + '.' + this_compressed_extension
813        if filename not in installed_files:
814          continue
815        # Make sure that all the values in the compression map have the same
816        # extension. We don't support multiple compression methods in the same
817        # system image.
818        if compressed_extension:
819          if this_compressed_extension != compressed_extension:
820            raise ValueError("multiple compressed extensions : %s vs %s",
821                             (compressed_extension, this_compressed_extension))
822        else:
823          compressed_extension = this_compressed_extension
824
825  return (certmap, ("." + compressed_extension) if compressed_extension else None)
826
827
828COMMON_DOCSTRING = """
829  -p  (--path)  <dir>
830      Prepend <dir>/bin to the list of places to search for binaries
831      run by this script, and expect to find jars in <dir>/framework.
832
833  -s  (--device_specific) <file>
834      Path to the python module containing device-specific
835      releasetools code.
836
837  -x  (--extra)  <key=value>
838      Add a key/value pair to the 'extras' dict, which device-specific
839      extension code may look at.
840
841  -v  (--verbose)
842      Show command lines being executed.
843
844  -h  (--help)
845      Display this usage message and exit.
846"""
847
848def Usage(docstring):
849  print(docstring.rstrip("\n"))
850  print(COMMON_DOCSTRING)
851
852
853def ParseOptions(argv,
854                 docstring,
855                 extra_opts="", extra_long_opts=(),
856                 extra_option_handler=None):
857  """Parse the options in argv and return any arguments that aren't
858  flags.  docstring is the calling module's docstring, to be displayed
859  for errors and -h.  extra_opts and extra_long_opts are for flags
860  defined by the caller, which are processed by passing them to
861  extra_option_handler."""
862
863  try:
864    opts, args = getopt.getopt(
865        argv, "hvp:s:x:" + extra_opts,
866        ["help", "verbose", "path=", "signapk_path=",
867         "signapk_shared_library_path=", "extra_signapk_args=",
868         "java_path=", "java_args=", "public_key_suffix=",
869         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
870         "verity_signer_path=", "verity_signer_args=", "device_specific=",
871         "extra="] +
872        list(extra_long_opts))
873  except getopt.GetoptError as err:
874    Usage(docstring)
875    print("**", str(err), "**")
876    sys.exit(2)
877
878  for o, a in opts:
879    if o in ("-h", "--help"):
880      Usage(docstring)
881      sys.exit()
882    elif o in ("-v", "--verbose"):
883      OPTIONS.verbose = True
884    elif o in ("-p", "--path"):
885      OPTIONS.search_path = a
886    elif o in ("--signapk_path",):
887      OPTIONS.signapk_path = a
888    elif o in ("--signapk_shared_library_path",):
889      OPTIONS.signapk_shared_library_path = a
890    elif o in ("--extra_signapk_args",):
891      OPTIONS.extra_signapk_args = shlex.split(a)
892    elif o in ("--java_path",):
893      OPTIONS.java_path = a
894    elif o in ("--java_args",):
895      OPTIONS.java_args = shlex.split(a)
896    elif o in ("--public_key_suffix",):
897      OPTIONS.public_key_suffix = a
898    elif o in ("--private_key_suffix",):
899      OPTIONS.private_key_suffix = a
900    elif o in ("--boot_signer_path",):
901      OPTIONS.boot_signer_path = a
902    elif o in ("--boot_signer_args",):
903      OPTIONS.boot_signer_args = shlex.split(a)
904    elif o in ("--verity_signer_path",):
905      OPTIONS.verity_signer_path = a
906    elif o in ("--verity_signer_args",):
907      OPTIONS.verity_signer_args = shlex.split(a)
908    elif o in ("-s", "--device_specific"):
909      OPTIONS.device_specific = a
910    elif o in ("-x", "--extra"):
911      key, value = a.split("=", 1)
912      OPTIONS.extras[key] = value
913    else:
914      if extra_option_handler is None or not extra_option_handler(o, a):
915        assert False, "unknown option \"%s\"" % (o,)
916
917  if OPTIONS.search_path:
918    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
919                          os.pathsep + os.environ["PATH"])
920
921  return args
922
923
924def MakeTempFile(prefix='tmp', suffix=''):
925  """Make a temp file and add it to the list of things to be deleted
926  when Cleanup() is called.  Return the filename."""
927  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
928  os.close(fd)
929  OPTIONS.tempfiles.append(fn)
930  return fn
931
932
933def Cleanup():
934  for i in OPTIONS.tempfiles:
935    if os.path.isdir(i):
936      shutil.rmtree(i)
937    else:
938      os.remove(i)
939
940
941class PasswordManager(object):
942  def __init__(self):
943    self.editor = os.getenv("EDITOR", None)
944    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
945
946  def GetPasswords(self, items):
947    """Get passwords corresponding to each string in 'items',
948    returning a dict.  (The dict may have keys in addition to the
949    values in 'items'.)
950
951    Uses the passwords in $ANDROID_PW_FILE if available, letting the
952    user edit that file to add more needed passwords.  If no editor is
953    available, or $ANDROID_PW_FILE isn't define, prompts the user
954    interactively in the ordinary way.
955    """
956
957    current = self.ReadFile()
958
959    first = True
960    while True:
961      missing = []
962      for i in items:
963        if i not in current or not current[i]:
964          missing.append(i)
965      # Are all the passwords already in the file?
966      if not missing:
967        return current
968
969      for i in missing:
970        current[i] = ""
971
972      if not first:
973        print("key file %s still missing some passwords." % (self.pwfile,))
974        answer = raw_input("try to edit again? [y]> ").strip()
975        if answer and answer[0] not in 'yY':
976          raise RuntimeError("key passwords unavailable")
977      first = False
978
979      current = self.UpdateAndReadFile(current)
980
981  def PromptResult(self, current): # pylint: disable=no-self-use
982    """Prompt the user to enter a value (password) for each key in
983    'current' whose value is fales.  Returns a new dict with all the
984    values.
985    """
986    result = {}
987    for k, v in sorted(current.iteritems()):
988      if v:
989        result[k] = v
990      else:
991        while True:
992          result[k] = getpass.getpass(
993              "Enter password for %s key> " % k).strip()
994          if result[k]:
995            break
996    return result
997
998  def UpdateAndReadFile(self, current):
999    if not self.editor or not self.pwfile:
1000      return self.PromptResult(current)
1001
1002    f = open(self.pwfile, "w")
1003    os.chmod(self.pwfile, 0o600)
1004    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
1005    f.write("# (Additional spaces are harmless.)\n\n")
1006
1007    first_line = None
1008    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
1009    for i, (_, k, v) in enumerate(sorted_list):
1010      f.write("[[[  %s  ]]] %s\n" % (v, k))
1011      if not v and first_line is None:
1012        # position cursor on first line with no password.
1013        first_line = i + 4
1014    f.close()
1015
1016    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
1017    _, _ = p.communicate()
1018
1019    return self.ReadFile()
1020
1021  def ReadFile(self):
1022    result = {}
1023    if self.pwfile is None:
1024      return result
1025    try:
1026      f = open(self.pwfile, "r")
1027      for line in f:
1028        line = line.strip()
1029        if not line or line[0] == '#':
1030          continue
1031        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1032        if not m:
1033          print("failed to parse password file: ", line)
1034        else:
1035          result[m.group(2)] = m.group(1)
1036      f.close()
1037    except IOError as e:
1038      if e.errno != errno.ENOENT:
1039        print("error reading password file: ", str(e))
1040    return result
1041
1042
1043def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1044             compress_type=None):
1045  import datetime
1046
1047  # http://b/18015246
1048  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1049  # for files larger than 2GiB. We can work around this by adjusting their
1050  # limit. Note that `zipfile.writestr()` will not work for strings larger than
1051  # 2GiB. The Python interpreter sometimes rejects strings that large (though
1052  # it isn't clear to me exactly what circumstances cause this).
1053  # `zipfile.write()` must be used directly to work around this.
1054  #
1055  # This mess can be avoided if we port to python3.
1056  saved_zip64_limit = zipfile.ZIP64_LIMIT
1057  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1058
1059  if compress_type is None:
1060    compress_type = zip_file.compression
1061  if arcname is None:
1062    arcname = filename
1063
1064  saved_stat = os.stat(filename)
1065
1066  try:
1067    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1068    # file to be zipped and reset it when we're done.
1069    os.chmod(filename, perms)
1070
1071    # Use a fixed timestamp so the output is repeatable.
1072    epoch = datetime.datetime.fromtimestamp(0)
1073    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1074    os.utime(filename, (timestamp, timestamp))
1075
1076    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1077  finally:
1078    os.chmod(filename, saved_stat.st_mode)
1079    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1080    zipfile.ZIP64_LIMIT = saved_zip64_limit
1081
1082
1083def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1084                compress_type=None):
1085  """Wrap zipfile.writestr() function to work around the zip64 limit.
1086
1087  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1088  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1089  when calling crc32(bytes).
1090
1091  But it still works fine to write a shorter string into a large zip file.
1092  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1093  when we know the string won't be too long.
1094  """
1095
1096  saved_zip64_limit = zipfile.ZIP64_LIMIT
1097  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1098
1099  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1100    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1101    zinfo.compress_type = zip_file.compression
1102    if perms is None:
1103      perms = 0o100644
1104  else:
1105    zinfo = zinfo_or_arcname
1106
1107  # If compress_type is given, it overrides the value in zinfo.
1108  if compress_type is not None:
1109    zinfo.compress_type = compress_type
1110
1111  # If perms is given, it has a priority.
1112  if perms is not None:
1113    # If perms doesn't set the file type, mark it as a regular file.
1114    if perms & 0o770000 == 0:
1115      perms |= 0o100000
1116    zinfo.external_attr = perms << 16
1117
1118  # Use a fixed timestamp so the output is repeatable.
1119  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1120
1121  zip_file.writestr(zinfo, data)
1122  zipfile.ZIP64_LIMIT = saved_zip64_limit
1123
1124
1125def ZipClose(zip_file):
1126  # http://b/18015246
1127  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1128  # central directory.
1129  saved_zip64_limit = zipfile.ZIP64_LIMIT
1130  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1131
1132  zip_file.close()
1133
1134  zipfile.ZIP64_LIMIT = saved_zip64_limit
1135
1136
1137class DeviceSpecificParams(object):
1138  module = None
1139  def __init__(self, **kwargs):
1140    """Keyword arguments to the constructor become attributes of this
1141    object, which is passed to all functions in the device-specific
1142    module."""
1143    for k, v in kwargs.iteritems():
1144      setattr(self, k, v)
1145    self.extras = OPTIONS.extras
1146
1147    if self.module is None:
1148      path = OPTIONS.device_specific
1149      if not path:
1150        return
1151      try:
1152        if os.path.isdir(path):
1153          info = imp.find_module("releasetools", [path])
1154        else:
1155          d, f = os.path.split(path)
1156          b, x = os.path.splitext(f)
1157          if x == ".py":
1158            f = b
1159          info = imp.find_module(f, [d])
1160        print("loaded device-specific extensions from", path)
1161        self.module = imp.load_module("device_specific", *info)
1162      except ImportError:
1163        print("unable to load device-specific module; assuming none")
1164
1165  def _DoCall(self, function_name, *args, **kwargs):
1166    """Call the named function in the device-specific module, passing
1167    the given args and kwargs.  The first argument to the call will be
1168    the DeviceSpecific object itself.  If there is no module, or the
1169    module does not define the function, return the value of the
1170    'default' kwarg (which itself defaults to None)."""
1171    if self.module is None or not hasattr(self.module, function_name):
1172      return kwargs.get("default", None)
1173    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1174
1175  def FullOTA_Assertions(self):
1176    """Called after emitting the block of assertions at the top of a
1177    full OTA package.  Implementations can add whatever additional
1178    assertions they like."""
1179    return self._DoCall("FullOTA_Assertions")
1180
1181  def FullOTA_InstallBegin(self):
1182    """Called at the start of full OTA installation."""
1183    return self._DoCall("FullOTA_InstallBegin")
1184
1185  def FullOTA_InstallEnd(self):
1186    """Called at the end of full OTA installation; typically this is
1187    used to install the image for the device's baseband processor."""
1188    return self._DoCall("FullOTA_InstallEnd")
1189
1190  def IncrementalOTA_Assertions(self):
1191    """Called after emitting the block of assertions at the top of an
1192    incremental OTA package.  Implementations can add whatever
1193    additional assertions they like."""
1194    return self._DoCall("IncrementalOTA_Assertions")
1195
1196  def IncrementalOTA_VerifyBegin(self):
1197    """Called at the start of the verification phase of incremental
1198    OTA installation; additional checks can be placed here to abort
1199    the script before any changes are made."""
1200    return self._DoCall("IncrementalOTA_VerifyBegin")
1201
1202  def IncrementalOTA_VerifyEnd(self):
1203    """Called at the end of the verification phase of incremental OTA
1204    installation; additional checks can be placed here to abort the
1205    script before any changes are made."""
1206    return self._DoCall("IncrementalOTA_VerifyEnd")
1207
1208  def IncrementalOTA_InstallBegin(self):
1209    """Called at the start of incremental OTA installation (after
1210    verification is complete)."""
1211    return self._DoCall("IncrementalOTA_InstallBegin")
1212
1213  def IncrementalOTA_InstallEnd(self):
1214    """Called at the end of incremental OTA installation; typically
1215    this is used to install the image for the device's baseband
1216    processor."""
1217    return self._DoCall("IncrementalOTA_InstallEnd")
1218
1219  def VerifyOTA_Assertions(self):
1220    return self._DoCall("VerifyOTA_Assertions")
1221
1222class File(object):
1223  def __init__(self, name, data, compress_size = None):
1224    self.name = name
1225    self.data = data
1226    self.size = len(data)
1227    self.compress_size = compress_size or self.size
1228    self.sha1 = sha1(data).hexdigest()
1229
1230  @classmethod
1231  def FromLocalFile(cls, name, diskname):
1232    f = open(diskname, "rb")
1233    data = f.read()
1234    f.close()
1235    return File(name, data)
1236
1237  def WriteToTemp(self):
1238    t = tempfile.NamedTemporaryFile()
1239    t.write(self.data)
1240    t.flush()
1241    return t
1242
1243  def WriteToDir(self, d):
1244    with open(os.path.join(d, self.name), "wb") as fp:
1245      fp.write(self.data)
1246
1247  def AddToZip(self, z, compression=None):
1248    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1249
1250DIFF_PROGRAM_BY_EXT = {
1251    ".gz" : "imgdiff",
1252    ".zip" : ["imgdiff", "-z"],
1253    ".jar" : ["imgdiff", "-z"],
1254    ".apk" : ["imgdiff", "-z"],
1255    ".img" : "imgdiff",
1256    }
1257
1258class Difference(object):
1259  def __init__(self, tf, sf, diff_program=None):
1260    self.tf = tf
1261    self.sf = sf
1262    self.patch = None
1263    self.diff_program = diff_program
1264
1265  def ComputePatch(self):
1266    """Compute the patch (as a string of data) needed to turn sf into
1267    tf.  Returns the same tuple as GetPatch()."""
1268
1269    tf = self.tf
1270    sf = self.sf
1271
1272    if self.diff_program:
1273      diff_program = self.diff_program
1274    else:
1275      ext = os.path.splitext(tf.name)[1]
1276      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1277
1278    ttemp = tf.WriteToTemp()
1279    stemp = sf.WriteToTemp()
1280
1281    ext = os.path.splitext(tf.name)[1]
1282
1283    try:
1284      ptemp = tempfile.NamedTemporaryFile()
1285      if isinstance(diff_program, list):
1286        cmd = copy.copy(diff_program)
1287      else:
1288        cmd = [diff_program]
1289      cmd.append(stemp.name)
1290      cmd.append(ttemp.name)
1291      cmd.append(ptemp.name)
1292      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1293      err = []
1294      def run():
1295        _, e = p.communicate()
1296        if e:
1297          err.append(e)
1298      th = threading.Thread(target=run)
1299      th.start()
1300      th.join(timeout=300)   # 5 mins
1301      if th.is_alive():
1302        print("WARNING: diff command timed out")
1303        p.terminate()
1304        th.join(5)
1305        if th.is_alive():
1306          p.kill()
1307          th.join()
1308
1309      if err or p.returncode != 0:
1310        print("WARNING: failure running %s:\n%s\n" % (
1311            diff_program, "".join(err)))
1312        self.patch = None
1313        return None, None, None
1314      diff = ptemp.read()
1315    finally:
1316      ptemp.close()
1317      stemp.close()
1318      ttemp.close()
1319
1320    self.patch = diff
1321    return self.tf, self.sf, self.patch
1322
1323
1324  def GetPatch(self):
1325    """Return a tuple (target_file, source_file, patch_data).
1326    patch_data may be None if ComputePatch hasn't been called, or if
1327    computing the patch failed."""
1328    return self.tf, self.sf, self.patch
1329
1330
1331def ComputeDifferences(diffs):
1332  """Call ComputePatch on all the Difference objects in 'diffs'."""
1333  print(len(diffs), "diffs to compute")
1334
1335  # Do the largest files first, to try and reduce the long-pole effect.
1336  by_size = [(i.tf.size, i) for i in diffs]
1337  by_size.sort(reverse=True)
1338  by_size = [i[1] for i in by_size]
1339
1340  lock = threading.Lock()
1341  diff_iter = iter(by_size)   # accessed under lock
1342
1343  def worker():
1344    try:
1345      lock.acquire()
1346      for d in diff_iter:
1347        lock.release()
1348        start = time.time()
1349        d.ComputePatch()
1350        dur = time.time() - start
1351        lock.acquire()
1352
1353        tf, sf, patch = d.GetPatch()
1354        if sf.name == tf.name:
1355          name = tf.name
1356        else:
1357          name = "%s (%s)" % (tf.name, sf.name)
1358        if patch is None:
1359          print("patching failed!                                  %s" % (name,))
1360        else:
1361          print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1362              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
1363      lock.release()
1364    except Exception as e:
1365      print(e)
1366      raise
1367
1368  # start worker threads; wait for them all to finish.
1369  threads = [threading.Thread(target=worker)
1370             for i in range(OPTIONS.worker_threads)]
1371  for th in threads:
1372    th.start()
1373  while threads:
1374    threads.pop().join()
1375
1376
1377class BlockDifference(object):
1378  def __init__(self, partition, tgt, src=None, check_first_block=False,
1379               version=None, disable_imgdiff=False):
1380    self.tgt = tgt
1381    self.src = src
1382    self.partition = partition
1383    self.check_first_block = check_first_block
1384    self.disable_imgdiff = disable_imgdiff
1385
1386    if version is None:
1387      version = 1
1388      if OPTIONS.info_dict:
1389        version = max(
1390            int(i) for i in
1391            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1392    assert version >= 3
1393    self.version = version
1394
1395    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1396                                    version=self.version,
1397                                    disable_imgdiff=self.disable_imgdiff)
1398    tmpdir = tempfile.mkdtemp()
1399    OPTIONS.tempfiles.append(tmpdir)
1400    self.path = os.path.join(tmpdir, partition)
1401    b.Compute(self.path)
1402    self._required_cache = b.max_stashed_size
1403    self.touched_src_ranges = b.touched_src_ranges
1404    self.touched_src_sha1 = b.touched_src_sha1
1405
1406    if src is None:
1407      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1408    else:
1409      _, self.device = GetTypeAndDevice("/" + partition,
1410                                        OPTIONS.source_info_dict)
1411
1412  @property
1413  def required_cache(self):
1414    return self._required_cache
1415
1416  def WriteScript(self, script, output_zip, progress=None):
1417    if not self.src:
1418      # write the output unconditionally
1419      script.Print("Patching %s image unconditionally..." % (self.partition,))
1420    else:
1421      script.Print("Patching %s image after verification." % (self.partition,))
1422
1423    if progress:
1424      script.ShowProgress(progress, 0)
1425    self._WriteUpdate(script, output_zip)
1426    if OPTIONS.verify:
1427      self._WritePostInstallVerifyScript(script)
1428
1429  def WriteStrictVerifyScript(self, script):
1430    """Verify all the blocks in the care_map, including clobbered blocks.
1431
1432    This differs from the WriteVerifyScript() function: a) it prints different
1433    error messages; b) it doesn't allow half-way updated images to pass the
1434    verification."""
1435
1436    partition = self.partition
1437    script.Print("Verifying %s..." % (partition,))
1438    ranges = self.tgt.care_map
1439    ranges_str = ranges.to_string_raw()
1440    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1441                       'ui_print("    Verified.") || '
1442                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1443                       self.device, ranges_str,
1444                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1445                       self.device))
1446    script.AppendExtra("")
1447
1448  def WriteVerifyScript(self, script, touched_blocks_only=False):
1449    partition = self.partition
1450
1451    # full OTA
1452    if not self.src:
1453      script.Print("Image %s will be patched unconditionally." % (partition,))
1454
1455    # incremental OTA
1456    else:
1457      if touched_blocks_only:
1458        ranges = self.touched_src_ranges
1459        expected_sha1 = self.touched_src_sha1
1460      else:
1461        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1462        expected_sha1 = self.src.TotalSha1()
1463
1464      # No blocks to be checked, skipping.
1465      if not ranges:
1466        return
1467
1468      ranges_str = ranges.to_string_raw()
1469      script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1470                          'block_image_verify("%s", '
1471                          'package_extract_file("%s.transfer.list"), '
1472                          '"%s.new.dat", "%s.patch.dat")) then') % (
1473                          self.device, ranges_str, expected_sha1,
1474                          self.device, partition, partition, partition))
1475      script.Print('Verified %s image...' % (partition,))
1476      script.AppendExtra('else')
1477
1478      if self.version >= 4:
1479
1480        # Bug: 21124327
1481        # When generating incrementals for the system and vendor partitions in
1482        # version 4 or newer, explicitly check the first block (which contains
1483        # the superblock) of the partition to see if it's what we expect. If
1484        # this check fails, give an explicit log message about the partition
1485        # having been remounted R/W (the most likely explanation).
1486        if self.check_first_block:
1487          script.AppendExtra('check_first_block("%s");' % (self.device,))
1488
1489        # If version >= 4, try block recovery before abort update
1490        if partition == "system":
1491          code = ErrorCode.SYSTEM_RECOVER_FAILURE
1492        else:
1493          code = ErrorCode.VENDOR_RECOVER_FAILURE
1494        script.AppendExtra((
1495            'ifelse (block_image_recover("{device}", "{ranges}") && '
1496            'block_image_verify("{device}", '
1497            'package_extract_file("{partition}.transfer.list"), '
1498            '"{partition}.new.dat", "{partition}.patch.dat"), '
1499            'ui_print("{partition} recovered successfully."), '
1500            'abort("E{code}: {partition} partition fails to recover"));\n'
1501            'endif;').format(device=self.device, ranges=ranges_str,
1502                             partition=partition, code=code))
1503
1504      # Abort the OTA update. Note that the incremental OTA cannot be applied
1505      # even if it may match the checksum of the target partition.
1506      # a) If version < 3, operations like move and erase will make changes
1507      #    unconditionally and damage the partition.
1508      # b) If version >= 3, it won't even reach here.
1509      else:
1510        if partition == "system":
1511          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1512        else:
1513          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1514        script.AppendExtra((
1515            'abort("E%d: %s partition has unexpected contents");\n'
1516            'endif;') % (code, partition))
1517
1518  def _WritePostInstallVerifyScript(self, script):
1519    partition = self.partition
1520    script.Print('Verifying the updated %s image...' % (partition,))
1521    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1522    ranges = self.tgt.care_map
1523    ranges_str = ranges.to_string_raw()
1524    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1525                       self.device, ranges_str,
1526                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1527
1528    # Bug: 20881595
1529    # Verify that extended blocks are really zeroed out.
1530    if self.tgt.extended:
1531      ranges_str = self.tgt.extended.to_string_raw()
1532      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1533                         self.device, ranges_str,
1534                         self._HashZeroBlocks(self.tgt.extended.size())))
1535      script.Print('Verified the updated %s image.' % (partition,))
1536      if partition == "system":
1537        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1538      else:
1539        code = ErrorCode.VENDOR_NONZERO_CONTENTS
1540      script.AppendExtra(
1541          'else\n'
1542          '  abort("E%d: %s partition has unexpected non-zero contents after '
1543          'OTA update");\n'
1544          'endif;' % (code, partition))
1545    else:
1546      script.Print('Verified the updated %s image.' % (partition,))
1547
1548    if partition == "system":
1549      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1550    else:
1551      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1552
1553    script.AppendExtra(
1554        'else\n'
1555        '  abort("E%d: %s partition has unexpected contents after OTA '
1556        'update");\n'
1557        'endif;' % (code, partition))
1558
1559  def _WriteUpdate(self, script, output_zip):
1560    ZipWrite(output_zip,
1561             '{}.transfer.list'.format(self.path),
1562             '{}.transfer.list'.format(self.partition))
1563
1564    # For full OTA, compress the new.dat with brotli with quality 6 to reduce its size. Quailty 9
1565    # almost triples the compression time but doesn't further reduce the size too much.
1566    # For a typical 1.8G system.new.dat
1567    #                       zip  | brotli(quality 6)  | brotli(quality 9)
1568    #   compressed_size:    942M | 869M (~8% reduced) | 854M
1569    #   compression_time:   75s  | 265s               | 719s
1570    #   decompression_time: 15s  | 25s                | 25s
1571
1572    if not self.src:
1573      bro_cmd = ['bro', '--quality', '6',
1574                 '--input', '{}.new.dat'.format(self.path),
1575                 '--output', '{}.new.dat.br'.format(self.path)]
1576      print("Compressing {}.new.dat with brotli".format(self.partition))
1577      p = Run(bro_cmd, stdout=subprocess.PIPE)
1578      p.communicate()
1579      assert p.returncode == 0,\
1580          'compression of {}.new.dat failed'.format(self.partition)
1581
1582      new_data_name = '{}.new.dat.br'.format(self.partition)
1583      ZipWrite(output_zip,
1584               '{}.new.dat.br'.format(self.path),
1585               new_data_name,
1586               compress_type=zipfile.ZIP_STORED)
1587    else:
1588      new_data_name = '{}.new.dat'.format(self.partition)
1589      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
1590
1591    ZipWrite(output_zip,
1592             '{}.patch.dat'.format(self.path),
1593             '{}.patch.dat'.format(self.partition),
1594             compress_type=zipfile.ZIP_STORED)
1595
1596    if self.partition == "system":
1597      code = ErrorCode.SYSTEM_UPDATE_FAILURE
1598    else:
1599      code = ErrorCode.VENDOR_UPDATE_FAILURE
1600
1601    call = ('block_image_update("{device}", '
1602            'package_extract_file("{partition}.transfer.list"), '
1603            '"{new_data_name}", "{partition}.patch.dat") ||\n'
1604            '  abort("E{code}: Failed to update {partition} image.");'.format(
1605                device=self.device, partition=self.partition,
1606                new_data_name=new_data_name, code=code))
1607    script.AppendExtra(script.WordWrap(call))
1608
1609  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1610    data = source.ReadRangeSet(ranges)
1611    ctx = sha1()
1612
1613    for p in data:
1614      ctx.update(p)
1615
1616    return ctx.hexdigest()
1617
1618  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1619    """Return the hash value for all zero blocks."""
1620    zero_block = '\x00' * 4096
1621    ctx = sha1()
1622    for _ in range(num_blocks):
1623      ctx.update(zero_block)
1624
1625    return ctx.hexdigest()
1626
1627
1628DataImage = blockimgdiff.DataImage
1629
1630# map recovery.fstab's fs_types to mount/format "partition types"
1631PARTITION_TYPES = {
1632    "ext4": "EMMC",
1633    "emmc": "EMMC",
1634    "f2fs": "EMMC",
1635    "squashfs": "EMMC"
1636}
1637
1638def GetTypeAndDevice(mount_point, info):
1639  fstab = info["fstab"]
1640  if fstab:
1641    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1642            fstab[mount_point].device)
1643  else:
1644    raise KeyError
1645
1646
1647def ParseCertificate(data):
1648  """Parse a PEM-format certificate."""
1649  cert = []
1650  save = False
1651  for line in data.split("\n"):
1652    if "--END CERTIFICATE--" in line:
1653      break
1654    if save:
1655      cert.append(line)
1656    if "--BEGIN CERTIFICATE--" in line:
1657      save = True
1658  cert = "".join(cert).decode('base64')
1659  return cert
1660
1661def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1662                      info_dict=None):
1663  """Generate a binary patch that creates the recovery image starting
1664  with the boot image.  (Most of the space in these images is just the
1665  kernel, which is identical for the two, so the resulting patch
1666  should be efficient.)  Add it to the output zip, along with a shell
1667  script that is run from init.rc on first boot to actually do the
1668  patching and install the new recovery image.
1669
1670  recovery_img and boot_img should be File objects for the
1671  corresponding images.  info should be the dictionary returned by
1672  common.LoadInfoDict() on the input target_files.
1673  """
1674
1675  if info_dict is None:
1676    info_dict = OPTIONS.info_dict
1677
1678  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1679
1680  if full_recovery_image:
1681    output_sink("etc/recovery.img", recovery_img.data)
1682
1683  else:
1684    diff_program = ["imgdiff"]
1685    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1686    if os.path.exists(path):
1687      diff_program.append("-b")
1688      diff_program.append(path)
1689      bonus_args = "-b /system/etc/recovery-resource.dat"
1690    else:
1691      bonus_args = ""
1692
1693    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1694    _, _, patch = d.ComputePatch()
1695    output_sink("recovery-from-boot.p", patch)
1696
1697  try:
1698    # The following GetTypeAndDevice()s need to use the path in the target
1699    # info_dict instead of source_info_dict.
1700    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1701    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1702  except KeyError:
1703    return
1704
1705  if full_recovery_image:
1706    sh = """#!/system/bin/sh
1707if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1708  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1709else
1710  log -t recovery "Recovery image already installed"
1711fi
1712""" % {'type': recovery_type,
1713       'device': recovery_device,
1714       'sha1': recovery_img.sha1,
1715       'size': recovery_img.size}
1716  else:
1717    sh = """#!/system/bin/sh
1718if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1719  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1720else
1721  log -t recovery "Recovery image already installed"
1722fi
1723""" % {'boot_size': boot_img.size,
1724       'boot_sha1': boot_img.sha1,
1725       'recovery_size': recovery_img.size,
1726       'recovery_sha1': recovery_img.sha1,
1727       'boot_type': boot_type,
1728       'boot_device': boot_device,
1729       'recovery_type': recovery_type,
1730       'recovery_device': recovery_device,
1731       'bonus_args': bonus_args}
1732
1733  # The install script location moved from /system/etc to /system/bin
1734  # in the L release.
1735  sh_location = "bin/install-recovery.sh"
1736
1737  print("putting script in", sh_location)
1738
1739  output_sink(sh_location, sh)
1740