• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import errno
21import fnmatch
22import getopt
23import getpass
24import gzip
25import imp
26import json
27import logging
28import logging.config
29import os
30import platform
31import re
32import shlex
33import shutil
34import subprocess
35import sys
36import tempfile
37import threading
38import time
39import zipfile
40from hashlib import sha1, sha256
41
42import images
43import sparse_img
44from blockimgdiff import BlockImageDiff
45
46logger = logging.getLogger(__name__)
47
48
49class Options(object):
50
51  def __init__(self):
52    # Set up search path, in order to find framework/ and lib64/. At the time of
53    # running this function, user-supplied search path (`--path`) hasn't been
54    # available. So the value set here is the default, which might be overridden
55    # by commandline flag later.
56    exec_path = sys.argv[0]
57    if exec_path.endswith('.py'):
58      script_name = os.path.basename(exec_path)
59      # logger hasn't been initialized yet at this point. Use print to output
60      # warnings.
61      print(
62          'Warning: releasetools script should be invoked as hermetic Python '
63          'executable -- build and run `{}` directly.'.format(script_name[:-3]),
64          file=sys.stderr)
65    self.search_path = os.path.realpath(os.path.join(os.path.dirname(exec_path), '..'))
66
67    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
68    self.signapk_shared_library_path = "lib64"   # Relative to search_path
69    self.extra_signapk_args = []
70    self.java_path = "java"  # Use the one on the path by default.
71    self.java_args = ["-Xmx2048m"]  # The default JVM args.
72    self.android_jar_path = None
73    self.public_key_suffix = ".x509.pem"
74    self.private_key_suffix = ".pk8"
75    # use otatools built boot_signer by default
76    self.boot_signer_path = "boot_signer"
77    self.boot_signer_args = []
78    self.verity_signer_path = None
79    self.verity_signer_args = []
80    self.aftl_server = None
81    self.aftl_key_path = None
82    self.aftl_manufacturer_key_path = None
83    self.aftl_signer_helper = None
84    self.verbose = False
85    self.tempfiles = []
86    self.device_specific = None
87    self.extras = {}
88    self.info_dict = None
89    self.source_info_dict = None
90    self.target_info_dict = None
91    self.worker_threads = None
92    # Stash size cannot exceed cache_size * threshold.
93    self.cache_size = None
94    self.stash_threshold = 0.8
95    self.logfile = None
96
97
98OPTIONS = Options()
99
100# The block size that's used across the releasetools scripts.
101BLOCK_SIZE = 4096
102
103# Values for "certificate" in apkcerts that mean special things.
104SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
105
106# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
107# that system_other is not in the list because we don't want to include its
108# descriptor into vbmeta.img.
109AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'recovery', 'system',
110                  'system_ext', 'vendor', 'vendor_boot')
111
112# Chained VBMeta partitions.
113AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
114
115# Partitions that should have their care_map added to META/care_map.pb
116PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'system_ext', 'odm')
117
118
119class ErrorCode(object):
120  """Define error_codes for failures that happen during the actual
121  update package installation.
122
123  Error codes 0-999 are reserved for failures before the package
124  installation (i.e. low battery, package verification failure).
125  Detailed code in 'bootable/recovery/error_code.h' """
126
127  SYSTEM_VERIFICATION_FAILURE = 1000
128  SYSTEM_UPDATE_FAILURE = 1001
129  SYSTEM_UNEXPECTED_CONTENTS = 1002
130  SYSTEM_NONZERO_CONTENTS = 1003
131  SYSTEM_RECOVER_FAILURE = 1004
132  VENDOR_VERIFICATION_FAILURE = 2000
133  VENDOR_UPDATE_FAILURE = 2001
134  VENDOR_UNEXPECTED_CONTENTS = 2002
135  VENDOR_NONZERO_CONTENTS = 2003
136  VENDOR_RECOVER_FAILURE = 2004
137  OEM_PROP_MISMATCH = 3000
138  FINGERPRINT_MISMATCH = 3001
139  THUMBPRINT_MISMATCH = 3002
140  OLDER_BUILD = 3003
141  DEVICE_MISMATCH = 3004
142  BAD_PATCH_FILE = 3005
143  INSUFFICIENT_CACHE_SPACE = 3006
144  TUNE_PARTITION_FAILURE = 3007
145  APPLY_PATCH_FAILURE = 3008
146
147
148class ExternalError(RuntimeError):
149  pass
150
151
152def InitLogging():
153  DEFAULT_LOGGING_CONFIG = {
154      'version': 1,
155      'disable_existing_loggers': False,
156      'formatters': {
157          'standard': {
158              'format':
159                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
160              'datefmt': '%Y-%m-%d %H:%M:%S',
161          },
162      },
163      'handlers': {
164          'default': {
165              'class': 'logging.StreamHandler',
166              'formatter': 'standard',
167              'level': 'WARNING',
168          },
169      },
170      'loggers': {
171          '': {
172              'handlers': ['default'],
173              'propagate': True,
174              'level': 'INFO',
175          }
176      }
177  }
178  env_config = os.getenv('LOGGING_CONFIG')
179  if env_config:
180    with open(env_config) as f:
181      config = json.load(f)
182  else:
183    config = DEFAULT_LOGGING_CONFIG
184
185    # Increase the logging level for verbose mode.
186    if OPTIONS.verbose:
187      config = copy.deepcopy(config)
188      config['handlers']['default']['level'] = 'INFO'
189
190    if OPTIONS.logfile:
191      config = copy.deepcopy(config)
192      config['handlers']['logfile'] = {
193        'class': 'logging.FileHandler',
194        'formatter': 'standard',
195        'level': 'INFO',
196        'mode': 'w',
197        'filename': OPTIONS.logfile,
198      }
199      config['loggers']['']['handlers'].append('logfile')
200
201  logging.config.dictConfig(config)
202
203
204def Run(args, verbose=None, **kwargs):
205  """Creates and returns a subprocess.Popen object.
206
207  Args:
208    args: The command represented as a list of strings.
209    verbose: Whether the commands should be shown. Default to the global
210        verbosity if unspecified.
211    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
212        stdin, etc. stdout and stderr will default to subprocess.PIPE and
213        subprocess.STDOUT respectively unless caller specifies any of them.
214        universal_newlines will default to True, as most of the users in
215        releasetools expect string output.
216
217  Returns:
218    A subprocess.Popen object.
219  """
220  if 'stdout' not in kwargs and 'stderr' not in kwargs:
221    kwargs['stdout'] = subprocess.PIPE
222    kwargs['stderr'] = subprocess.STDOUT
223  if 'universal_newlines' not in kwargs:
224    kwargs['universal_newlines'] = True
225  # Don't log any if caller explicitly says so.
226  if verbose != False:
227    logger.info("  Running: \"%s\"", " ".join(args))
228  return subprocess.Popen(args, **kwargs)
229
230
231def RunAndWait(args, verbose=None, **kwargs):
232  """Runs the given command waiting for it to complete.
233
234  Args:
235    args: The command represented as a list of strings.
236    verbose: Whether the commands should be shown. Default to the global
237        verbosity if unspecified.
238    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
239        stdin, etc. stdout and stderr will default to subprocess.PIPE and
240        subprocess.STDOUT respectively unless caller specifies any of them.
241
242  Raises:
243    ExternalError: On non-zero exit from the command.
244  """
245  proc = Run(args, verbose=verbose, **kwargs)
246  proc.wait()
247
248  if proc.returncode != 0:
249    raise ExternalError(
250        "Failed to run command '{}' (exit code {})".format(
251            args, proc.returncode))
252
253
254def RunAndCheckOutput(args, verbose=None, **kwargs):
255  """Runs the given command and returns the output.
256
257  Args:
258    args: The command represented as a list of strings.
259    verbose: Whether the commands should be shown. Default to the global
260        verbosity if unspecified.
261    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
262        stdin, etc. stdout and stderr will default to subprocess.PIPE and
263        subprocess.STDOUT respectively unless caller specifies any of them.
264
265  Returns:
266    The output string.
267
268  Raises:
269    ExternalError: On non-zero exit from the command.
270  """
271  proc = Run(args, verbose=verbose, **kwargs)
272  output, _ = proc.communicate()
273  if output is None:
274    output = ""
275  # Don't log any if caller explicitly says so.
276  if verbose != False:
277    logger.info("%s", output.rstrip())
278  if proc.returncode != 0:
279    raise ExternalError(
280        "Failed to run command '{}' (exit code {}):\n{}".format(
281            args, proc.returncode, output))
282  return output
283
284
285def RoundUpTo4K(value):
286  rounded_up = value + 4095
287  return rounded_up - (rounded_up % 4096)
288
289
290def CloseInheritedPipes():
291  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
292  before doing other work."""
293  if platform.system() != "Darwin":
294    return
295  for d in range(3, 1025):
296    try:
297      stat = os.fstat(d)
298      if stat is not None:
299        pipebit = stat[0] & 0x1000
300        if pipebit != 0:
301          os.close(d)
302    except OSError:
303      pass
304
305
306class BuildInfo(object):
307  """A class that holds the information for a given build.
308
309  This class wraps up the property querying for a given source or target build.
310  It abstracts away the logic of handling OEM-specific properties, and caches
311  the commonly used properties such as fingerprint.
312
313  There are two types of info dicts: a) build-time info dict, which is generated
314  at build time (i.e. included in a target_files zip); b) OEM info dict that is
315  specified at package generation time (via command line argument
316  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
317  having "oem_fingerprint_properties" in build-time info dict), all the queries
318  would be answered based on build-time info dict only. Otherwise if using
319  OEM-specific properties, some of them will be calculated from two info dicts.
320
321  Users can query properties similarly as using a dict() (e.g. info['fstab']),
322  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
323
324  Attributes:
325    info_dict: The build-time info dict.
326    is_ab: Whether it's a build that uses A/B OTA.
327    oem_dicts: A list of OEM dicts.
328    oem_props: A list of OEM properties that should be read from OEM dicts; None
329        if the build doesn't use any OEM-specific property.
330    fingerprint: The fingerprint of the build, which would be calculated based
331        on OEM properties if applicable.
332    device: The device name, which could come from OEM dicts if applicable.
333  """
334
335  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
336                               "ro.product.manufacturer", "ro.product.model",
337                               "ro.product.name"]
338  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
339      "product", "odm", "vendor", "system_ext", "system"]
340  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
341      "product", "product_services", "odm", "vendor", "system"]
342  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
343
344  def __init__(self, info_dict, oem_dicts=None):
345    """Initializes a BuildInfo instance with the given dicts.
346
347    Note that it only wraps up the given dicts, without making copies.
348
349    Arguments:
350      info_dict: The build-time info dict.
351      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
352          that it always uses the first dict to calculate the fingerprint or the
353          device name. The rest would be used for asserting OEM properties only
354          (e.g. one package can be installed on one of these devices).
355
356    Raises:
357      ValueError: On invalid inputs.
358    """
359    self.info_dict = info_dict
360    self.oem_dicts = oem_dicts
361
362    self._is_ab = info_dict.get("ab_update") == "true"
363
364    # Skip _oem_props if oem_dicts is None to use BuildInfo in
365    # sign_target_files_apks
366    if self.oem_dicts:
367      self._oem_props = info_dict.get("oem_fingerprint_properties")
368    else:
369      self._oem_props = None
370
371    def check_fingerprint(fingerprint):
372      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
373        raise ValueError(
374            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
375            "3.2.2. Build Parameters.".format(fingerprint))
376
377
378    self._partition_fingerprints = {}
379    for partition in PARTITIONS_WITH_CARE_MAP:
380      try:
381        fingerprint = self.CalculatePartitionFingerprint(partition)
382        check_fingerprint(fingerprint)
383        self._partition_fingerprints[partition] = fingerprint
384      except ExternalError:
385        continue
386    if "system" in self._partition_fingerprints:
387      # system_other is not included in PARTITIONS_WITH_CARE_MAP, but does
388      # need a fingerprint when creating the image.
389      self._partition_fingerprints[
390          "system_other"] = self._partition_fingerprints["system"]
391
392    # These two should be computed only after setting self._oem_props.
393    self._device = self.GetOemProperty("ro.product.device")
394    self._fingerprint = self.CalculateFingerprint()
395    check_fingerprint(self._fingerprint)
396
397  @property
398  def is_ab(self):
399    return self._is_ab
400
401  @property
402  def device(self):
403    return self._device
404
405  @property
406  def fingerprint(self):
407    return self._fingerprint
408
409  @property
410  def oem_props(self):
411    return self._oem_props
412
413  def __getitem__(self, key):
414    return self.info_dict[key]
415
416  def __setitem__(self, key, value):
417    self.info_dict[key] = value
418
419  def get(self, key, default=None):
420    return self.info_dict.get(key, default)
421
422  def items(self):
423    return self.info_dict.items()
424
425  def _GetRawBuildProp(self, prop, partition):
426    prop_file = '{}.build.prop'.format(
427        partition) if partition else 'build.prop'
428    partition_props = self.info_dict.get(prop_file)
429    if not partition_props:
430      return None
431    return partition_props.GetProp(prop)
432
433  def GetPartitionBuildProp(self, prop, partition):
434    """Returns the inquired build property for the provided partition."""
435    # If provided a partition for this property, only look within that
436    # partition's build.prop.
437    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
438      prop = prop.replace("ro.product", "ro.product.{}".format(partition))
439    else:
440      prop = prop.replace("ro.", "ro.{}.".format(partition))
441
442    prop_val = self._GetRawBuildProp(prop, partition)
443    if prop_val is not None:
444      return prop_val
445    raise ExternalError("couldn't find %s in %s.build.prop" %
446                        (prop, partition))
447
448  def GetBuildProp(self, prop):
449    """Returns the inquired build property from the standard build.prop file."""
450    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
451      return self._ResolveRoProductBuildProp(prop)
452
453    prop_val = self._GetRawBuildProp(prop, None)
454    if prop_val is not None:
455      return prop_val
456
457    raise ExternalError("couldn't find %s in build.prop" % (prop,))
458
459  def _ResolveRoProductBuildProp(self, prop):
460    """Resolves the inquired ro.product.* build property"""
461    prop_val = self._GetRawBuildProp(prop, None)
462    if prop_val:
463      return prop_val
464
465    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
466    source_order_val = self._GetRawBuildProp(
467        "ro.product.property_source_order", None)
468    if source_order_val:
469      source_order = source_order_val.split(",")
470    else:
471      source_order = default_source_order
472
473    # Check that all sources in ro.product.property_source_order are valid
474    if any([x not in default_source_order for x in source_order]):
475      raise ExternalError(
476          "Invalid ro.product.property_source_order '{}'".format(source_order))
477
478    for source_partition in source_order:
479      source_prop = prop.replace(
480          "ro.product", "ro.product.{}".format(source_partition), 1)
481      prop_val = self._GetRawBuildProp(source_prop, source_partition)
482      if prop_val:
483        return prop_val
484
485    raise ExternalError("couldn't resolve {}".format(prop))
486
487  def _GetRoProductPropsDefaultSourceOrder(self):
488    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
489    # values of these properties for each Android release.
490    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
491    if android_codename == "REL":
492      android_version = self._GetRawBuildProp("ro.build.version.release", None)
493      if android_version == "10":
494        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
495      # NOTE: float() conversion of android_version will have rounding error.
496      # We are checking for "9" or less, and using "< 10" is well outside of
497      # possible floating point rounding.
498      try:
499        android_version_val = float(android_version)
500      except ValueError:
501        android_version_val = 0
502      if android_version_val < 10:
503        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
504    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
505
506  def GetOemProperty(self, key):
507    if self.oem_props is not None and key in self.oem_props:
508      return self.oem_dicts[0][key]
509    return self.GetBuildProp(key)
510
511  def GetPartitionFingerprint(self, partition):
512    return self._partition_fingerprints.get(partition, None)
513
514  def CalculatePartitionFingerprint(self, partition):
515    try:
516      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
517    except ExternalError:
518      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
519          self.GetPartitionBuildProp("ro.product.brand", partition),
520          self.GetPartitionBuildProp("ro.product.name", partition),
521          self.GetPartitionBuildProp("ro.product.device", partition),
522          self.GetPartitionBuildProp("ro.build.version.release", partition),
523          self.GetPartitionBuildProp("ro.build.id", partition),
524          self.GetPartitionBuildProp("ro.build.version.incremental", partition),
525          self.GetPartitionBuildProp("ro.build.type", partition),
526          self.GetPartitionBuildProp("ro.build.tags", partition))
527
528  def CalculateFingerprint(self):
529    if self.oem_props is None:
530      try:
531        return self.GetBuildProp("ro.build.fingerprint")
532      except ExternalError:
533        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
534            self.GetBuildProp("ro.product.brand"),
535            self.GetBuildProp("ro.product.name"),
536            self.GetBuildProp("ro.product.device"),
537            self.GetBuildProp("ro.build.version.release"),
538            self.GetBuildProp("ro.build.id"),
539            self.GetBuildProp("ro.build.version.incremental"),
540            self.GetBuildProp("ro.build.type"),
541            self.GetBuildProp("ro.build.tags"))
542    return "%s/%s/%s:%s" % (
543        self.GetOemProperty("ro.product.brand"),
544        self.GetOemProperty("ro.product.name"),
545        self.GetOemProperty("ro.product.device"),
546        self.GetBuildProp("ro.build.thumbprint"))
547
548  def WriteMountOemScript(self, script):
549    assert self.oem_props is not None
550    recovery_mount_options = self.info_dict.get("recovery_mount_options")
551    script.Mount("/oem", recovery_mount_options)
552
553  def WriteDeviceAssertions(self, script, oem_no_mount):
554    # Read the property directly if not using OEM properties.
555    if not self.oem_props:
556      script.AssertDevice(self.device)
557      return
558
559    # Otherwise assert OEM properties.
560    if not self.oem_dicts:
561      raise ExternalError(
562          "No OEM file provided to answer expected assertions")
563
564    for prop in self.oem_props.split():
565      values = []
566      for oem_dict in self.oem_dicts:
567        if prop in oem_dict:
568          values.append(oem_dict[prop])
569      if not values:
570        raise ExternalError(
571            "The OEM file is missing the property %s" % (prop,))
572      script.AssertOemProperty(prop, values, oem_no_mount)
573
574
575def ReadFromInputFile(input_file, fn):
576  """Reads the contents of fn from input zipfile or directory."""
577  if isinstance(input_file, zipfile.ZipFile):
578    return input_file.read(fn).decode()
579  else:
580    path = os.path.join(input_file, *fn.split("/"))
581    try:
582      with open(path) as f:
583        return f.read()
584    except IOError as e:
585      if e.errno == errno.ENOENT:
586        raise KeyError(fn)
587
588
589def LoadInfoDict(input_file, repacking=False):
590  """Loads the key/value pairs from the given input target_files.
591
592  It reads `META/misc_info.txt` file in the target_files input, does sanity
593  checks and returns the parsed key/value pairs for to the given build. It's
594  usually called early when working on input target_files files, e.g. when
595  generating OTAs, or signing builds. Note that the function may be called
596  against an old target_files file (i.e. from past dessert releases). So the
597  property parsing needs to be backward compatible.
598
599  In a `META/misc_info.txt`, a few properties are stored as links to the files
600  in the PRODUCT_OUT directory. It works fine with the build system. However,
601  they are no longer available when (re)generating images from target_files zip.
602  When `repacking` is True, redirect these properties to the actual files in the
603  unzipped directory.
604
605  Args:
606    input_file: The input target_files file, which could be an open
607        zipfile.ZipFile instance, or a str for the dir that contains the files
608        unzipped from a target_files file.
609    repacking: Whether it's trying repack an target_files file after loading the
610        info dict (default: False). If so, it will rewrite a few loaded
611        properties (e.g. selinux_fc, root_dir) to point to the actual files in
612        target_files file. When doing repacking, `input_file` must be a dir.
613
614  Returns:
615    A dict that contains the parsed key/value pairs.
616
617  Raises:
618    AssertionError: On invalid input arguments.
619    ValueError: On malformed input values.
620  """
621  if repacking:
622    assert isinstance(input_file, str), \
623        "input_file must be a path str when doing repacking"
624
625  def read_helper(fn):
626    return ReadFromInputFile(input_file, fn)
627
628  try:
629    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
630  except KeyError:
631    raise ValueError("Failed to find META/misc_info.txt in input target-files")
632
633  if "recovery_api_version" not in d:
634    raise ValueError("Failed to find 'recovery_api_version'")
635  if "fstab_version" not in d:
636    raise ValueError("Failed to find 'fstab_version'")
637
638  if repacking:
639    # "selinux_fc" properties should point to the file_contexts files
640    # (file_contexts.bin) under META/.
641    for key in d:
642      if key.endswith("selinux_fc"):
643        fc_basename = os.path.basename(d[key])
644        fc_config = os.path.join(input_file, "META", fc_basename)
645        assert os.path.exists(fc_config)
646
647        d[key] = fc_config
648
649    # Similarly we need to redirect "root_dir", and "root_fs_config".
650    d["root_dir"] = os.path.join(input_file, "ROOT")
651    d["root_fs_config"] = os.path.join(
652        input_file, "META", "root_filesystem_config.txt")
653
654    # Redirect {partition}_base_fs_file for each of the named partitions.
655    for part_name in ["system", "vendor", "system_ext", "product", "odm"]:
656      key_name = part_name + "_base_fs_file"
657      if key_name not in d:
658        continue
659      basename = os.path.basename(d[key_name])
660      base_fs_file = os.path.join(input_file, "META", basename)
661      if os.path.exists(base_fs_file):
662        d[key_name] = base_fs_file
663      else:
664        logger.warning(
665            "Failed to find %s base fs file: %s", part_name, base_fs_file)
666        del d[key_name]
667
668  def makeint(key):
669    if key in d:
670      d[key] = int(d[key], 0)
671
672  makeint("recovery_api_version")
673  makeint("blocksize")
674  makeint("system_size")
675  makeint("vendor_size")
676  makeint("userdata_size")
677  makeint("cache_size")
678  makeint("recovery_size")
679  makeint("fstab_version")
680
681  boot_images = "boot.img"
682  if "boot_images" in d:
683    boot_images = d["boot_images"]
684  for b in boot_images.split():
685    makeint(b.replace(".img","_size"))
686
687  # Load recovery fstab if applicable.
688  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
689
690  # Tries to load the build props for all partitions with care_map, including
691  # system and vendor.
692  for partition in PARTITIONS_WITH_CARE_MAP:
693    partition_prop = "{}.build.prop".format(partition)
694    d[partition_prop] = PartitionBuildProps.FromInputFile(
695        input_file, partition)
696  d["build.prop"] = d["system.build.prop"]
697
698  # Set up the salt (based on fingerprint) that will be used when adding AVB
699  # hash / hashtree footers.
700  if d.get("avb_enable") == "true":
701    build_info = BuildInfo(d)
702    for partition in PARTITIONS_WITH_CARE_MAP:
703      fingerprint = build_info.GetPartitionFingerprint(partition)
704      if fingerprint:
705        d["avb_{}_salt".format(partition)] = sha256(fingerprint).hexdigest()
706
707  return d
708
709
710def LoadListFromFile(file_path):
711  with open(file_path) as f:
712    return f.read().splitlines()
713
714
715def LoadDictionaryFromFile(file_path):
716  lines = LoadListFromFile(file_path)
717  return LoadDictionaryFromLines(lines)
718
719
720def LoadDictionaryFromLines(lines):
721  d = {}
722  for line in lines:
723    line = line.strip()
724    if not line or line.startswith("#"):
725      continue
726    if "=" in line:
727      name, value = line.split("=", 1)
728      d[name] = value
729  return d
730
731
732class PartitionBuildProps(object):
733  """The class holds the build prop of a particular partition.
734
735  This class loads the build.prop and holds the build properties for a given
736  partition. It also partially recognizes the 'import' statement in the
737  build.prop; and calculates alternative values of some specific build
738  properties during runtime.
739
740  Attributes:
741    input_file: a zipped target-file or an unzipped target-file directory.
742    partition: name of the partition.
743    props_allow_override: a list of build properties to search for the
744        alternative values during runtime.
745    build_props: a dict of build properties for the given partition.
746    prop_overrides: a set of props that are overridden by import.
747    placeholder_values: A dict of runtime variables' values to replace the
748        placeholders in the build.prop file. We expect exactly one value for
749        each of the variables.
750  """
751  def __init__(self, input_file, name, placeholder_values=None):
752    self.input_file = input_file
753    self.partition = name
754    self.props_allow_override = [props.format(name) for props in [
755        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
756    self.build_props = {}
757    self.prop_overrides = set()
758    self.placeholder_values = {}
759    if placeholder_values:
760      self.placeholder_values = copy.deepcopy(placeholder_values)
761
762  @staticmethod
763  def FromDictionary(name, build_props):
764    """Constructs an instance from a build prop dictionary."""
765
766    props = PartitionBuildProps("unknown", name)
767    props.build_props = build_props.copy()
768    return props
769
770  @staticmethod
771  def FromInputFile(input_file, name, placeholder_values=None):
772    """Loads the build.prop file and builds the attributes."""
773    data = ''
774    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
775                      '{}/build.prop'.format(name.upper())]:
776      try:
777        data = ReadFromInputFile(input_file, prop_file)
778        break
779      except KeyError:
780        logger.warning('Failed to read %s', prop_file)
781
782    props = PartitionBuildProps(input_file, name, placeholder_values)
783    props._LoadBuildProp(data)
784    return props
785
786  def _LoadBuildProp(self, data):
787    for line in data.split('\n'):
788      line = line.strip()
789      if not line or line.startswith("#"):
790        continue
791      if line.startswith("import"):
792        overrides = self._ImportParser(line)
793        duplicates = self.prop_overrides.intersection(overrides.keys())
794        if duplicates:
795          raise ValueError('prop {} is overridden multiple times'.format(
796              ','.join(duplicates)))
797        self.prop_overrides = self.prop_overrides.union(overrides.keys())
798        self.build_props.update(overrides)
799      elif "=" in line:
800        name, value = line.split("=", 1)
801        if name in self.prop_overrides:
802          raise ValueError('prop {} is set again after overridden by import '
803                           'statement'.format(name))
804        self.build_props[name] = value
805
806  def _ImportParser(self, line):
807    """Parses the build prop in a given import statement."""
808
809    tokens = line.split()
810    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3) :
811      raise ValueError('Unrecognized import statement {}'.format(line))
812
813    if len(tokens) == 3:
814      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
815      return {}
816
817    import_path = tokens[1]
818    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
819      raise ValueError('Unrecognized import path {}'.format(line))
820
821    # We only recognize a subset of import statement that the init process
822    # supports. And we can loose the restriction based on how the dynamic
823    # fingerprint is used in practice. The placeholder format should be
824    # ${placeholder}, and its value should be provided by the caller through
825    # the placeholder_values.
826    for prop, value in self.placeholder_values.items():
827      prop_place_holder = '${{{}}}'.format(prop)
828      if prop_place_holder in import_path:
829        import_path = import_path.replace(prop_place_holder, value)
830    if '$' in import_path:
831      logger.info('Unresolved place holder in import path %s', import_path)
832      return {}
833
834    import_path = import_path.replace('/{}'.format(self.partition),
835                                      self.partition.upper())
836    logger.info('Parsing build props override from %s', import_path)
837
838    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
839    d = LoadDictionaryFromLines(lines)
840    return {key: val for key, val in d.items()
841            if key in self.props_allow_override}
842
843  def GetProp(self, prop):
844    return self.build_props.get(prop)
845
846
847def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
848                      system_root_image=False):
849  class Partition(object):
850    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
851      self.mount_point = mount_point
852      self.fs_type = fs_type
853      self.device = device
854      self.length = length
855      self.context = context
856      self.slotselect = slotselect
857
858  try:
859    data = read_helper(recovery_fstab_path)
860  except KeyError:
861    logger.warning("Failed to find %s", recovery_fstab_path)
862    data = ""
863
864  assert fstab_version == 2
865
866  d = {}
867  for line in data.split("\n"):
868    line = line.strip()
869    if not line or line.startswith("#"):
870      continue
871
872    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
873    pieces = line.split()
874    if len(pieces) != 5:
875      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
876
877    # Ignore entries that are managed by vold.
878    options = pieces[4]
879    if "voldmanaged=" in options:
880      continue
881
882    # It's a good line, parse it.
883    length = 0
884    slotselect = False
885    options = options.split(",")
886    for i in options:
887      if i.startswith("length="):
888        length = int(i[7:])
889      elif i == "slotselect":
890        slotselect = True
891      else:
892        # Ignore all unknown options in the unified fstab.
893        continue
894
895    mount_flags = pieces[3]
896    # Honor the SELinux context if present.
897    context = None
898    for i in mount_flags.split(","):
899      if i.startswith("context="):
900        context = i
901
902    mount_point = pieces[1]
903    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
904                               device=pieces[0], length=length, context=context,
905                               slotselect=slotselect)
906
907  # / is used for the system mount point when the root directory is included in
908  # system. Other areas assume system is always at "/system" so point /system
909  # at /.
910  if system_root_image:
911    assert '/system' not in d and '/' in d
912    d["/system"] = d["/"]
913  return d
914
915
916def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
917  """Finds the path to recovery fstab and loads its contents."""
918  # recovery fstab is only meaningful when installing an update via recovery
919  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
920  if info_dict.get('ab_update') == 'true' and \
921     info_dict.get("allow_non_ab") != "true":
922    return None
923
924  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
925  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
926  # cases, since it may load the info_dict from an old build (e.g. when
927  # generating incremental OTAs from that build).
928  system_root_image = info_dict.get('system_root_image') == 'true'
929  if info_dict.get('no_recovery') != 'true':
930    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
931    if isinstance(input_file, zipfile.ZipFile):
932      if recovery_fstab_path not in input_file.namelist():
933        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
934    else:
935      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
936      if not os.path.exists(path):
937        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
938    return LoadRecoveryFSTab(
939        read_helper, info_dict['fstab_version'], recovery_fstab_path,
940        system_root_image)
941
942  if info_dict.get('recovery_as_boot') == 'true':
943    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
944    if isinstance(input_file, zipfile.ZipFile):
945      if recovery_fstab_path not in input_file.namelist():
946        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
947    else:
948      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
949      if not os.path.exists(path):
950        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
951    return LoadRecoveryFSTab(
952        read_helper, info_dict['fstab_version'], recovery_fstab_path,
953        system_root_image)
954
955  return None
956
957
958def DumpInfoDict(d):
959  for k, v in sorted(d.items()):
960    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
961
962
963def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
964  """Merges dynamic partition info variables.
965
966  Args:
967    framework_dict: The dictionary of dynamic partition info variables from the
968      partial framework target files.
969    vendor_dict: The dictionary of dynamic partition info variables from the
970      partial vendor target files.
971
972  Returns:
973    The merged dynamic partition info dictionary.
974  """
975  merged_dict = {}
976  # Partition groups and group sizes are defined by the vendor dict because
977  # these values may vary for each board that uses a shared system image.
978  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
979  framework_dynamic_partition_list = framework_dict.get(
980      "dynamic_partition_list", "")
981  vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list", "")
982  merged_dict["dynamic_partition_list"] = ("%s %s" % (
983      framework_dynamic_partition_list, vendor_dynamic_partition_list)).strip()
984  for partition_group in merged_dict["super_partition_groups"].split(" "):
985    # Set the partition group's size using the value from the vendor dict.
986    key = "super_%s_group_size" % partition_group
987    if key not in vendor_dict:
988      raise ValueError("Vendor dict does not contain required key %s." % key)
989    merged_dict[key] = vendor_dict[key]
990
991    # Set the partition group's partition list using a concatenation of the
992    # framework and vendor partition lists.
993    key = "super_%s_partition_list" % partition_group
994    merged_dict[key] = (
995        "%s %s" %
996        (framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
997
998  # Pick virtual ab related flags from vendor dict, if defined.
999  if "virtual_ab" in vendor_dict.keys():
1000     merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
1001  if "virtual_ab_retrofit" in vendor_dict.keys():
1002     merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
1003  return merged_dict
1004
1005
1006def AppendAVBSigningArgs(cmd, partition):
1007  """Append signing arguments for avbtool."""
1008  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1009  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
1010  if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
1011    new_key_path = os.path.join(OPTIONS.search_path, key_path)
1012    if os.path.exists(new_key_path):
1013      key_path = new_key_path
1014  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1015  if key_path and algorithm:
1016    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1017  avb_salt = OPTIONS.info_dict.get("avb_salt")
1018  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1019  if avb_salt and not partition.startswith("vbmeta"):
1020    cmd.extend(["--salt", avb_salt])
1021
1022
1023def GetAvbPartitionArg(partition, image, info_dict=None):
1024  """Returns the VBMeta arguments for partition.
1025
1026  It sets up the VBMeta argument by including the partition descriptor from the
1027  given 'image', or by configuring the partition as a chained partition.
1028
1029  Args:
1030    partition: The name of the partition (e.g. "system").
1031    image: The path to the partition image.
1032    info_dict: A dict returned by common.LoadInfoDict(). Will use
1033        OPTIONS.info_dict if None has been given.
1034
1035  Returns:
1036    A list of VBMeta arguments.
1037  """
1038  if info_dict is None:
1039    info_dict = OPTIONS.info_dict
1040
1041  # Check if chain partition is used.
1042  key_path = info_dict.get("avb_" + partition + "_key_path")
1043  if not key_path:
1044    return ["--include_descriptors_from_image", image]
1045
1046  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1047  # into vbmeta.img. The recovery image will be configured on an independent
1048  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1049  # See details at
1050  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1051  if info_dict.get("ab_update") != "true" and partition == "recovery":
1052    return []
1053
1054  # Otherwise chain the partition into vbmeta.
1055  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1056  return ["--chain_partition", chained_partition_arg]
1057
1058
1059def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1060  """Constructs and returns the arg to build or verify a chained partition.
1061
1062  Args:
1063    partition: The partition name.
1064    info_dict: The info dict to look up the key info and rollback index
1065        location.
1066    key: The key to be used for building or verifying the partition. Defaults to
1067        the key listed in info_dict.
1068
1069  Returns:
1070    A string of form "partition:rollback_index_location:key" that can be used to
1071    build or verify vbmeta image.
1072  """
1073  if key is None:
1074    key = info_dict["avb_" + partition + "_key_path"]
1075  if key and not os.path.exists(key) and OPTIONS.search_path:
1076    new_key_path = os.path.join(OPTIONS.search_path, key)
1077    if os.path.exists(new_key_path):
1078      key = new_key_path
1079  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1080  rollback_index_location = info_dict[
1081      "avb_" + partition + "_rollback_index_location"]
1082  return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
1083
1084
1085def BuildVBMeta(image_path, partitions, name, needed_partitions):
1086  """Creates a VBMeta image.
1087
1088  It generates the requested VBMeta image. The requested image could be for
1089  top-level or chained VBMeta image, which is determined based on the name.
1090
1091  Args:
1092    image_path: The output path for the new VBMeta image.
1093    partitions: A dict that's keyed by partition names with image paths as
1094        values. Only valid partition names are accepted, as partitions listed
1095        in common.AVB_PARTITIONS and custom partitions listed in
1096        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1097    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1098    needed_partitions: Partitions whose descriptors should be included into the
1099        generated VBMeta image.
1100
1101  Raises:
1102    AssertionError: On invalid input args.
1103  """
1104  avbtool = OPTIONS.info_dict["avb_avbtool"]
1105  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1106  AppendAVBSigningArgs(cmd, name)
1107
1108  custom_partitions = OPTIONS.info_dict.get(
1109      "avb_custom_images_partition_list", "").strip().split()
1110
1111  for partition, path in partitions.items():
1112    if partition not in needed_partitions:
1113      continue
1114    assert (partition in AVB_PARTITIONS or
1115            partition in AVB_VBMETA_PARTITIONS or
1116            partition in custom_partitions), \
1117        'Unknown partition: {}'.format(partition)
1118    assert os.path.exists(path), \
1119        'Failed to find {} for {}'.format(path, partition)
1120    cmd.extend(GetAvbPartitionArg(partition, path))
1121
1122  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1123  if args and args.strip():
1124    split_args = shlex.split(args)
1125    for index, arg in enumerate(split_args[:-1]):
1126      # Sanity check that the image file exists. Some images might be defined
1127      # as a path relative to source tree, which may not be available at the
1128      # same location when running this script (we have the input target_files
1129      # zip only). For such cases, we additionally scan other locations (e.g.
1130      # IMAGES/, RADIO/, etc) before bailing out.
1131      if arg == '--include_descriptors_from_image':
1132        image_path = split_args[index + 1]
1133        if os.path.exists(image_path):
1134          continue
1135        found = False
1136        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1137          alt_path = os.path.join(
1138              OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
1139          if os.path.exists(alt_path):
1140            split_args[index + 1] = alt_path
1141            found = True
1142            break
1143        assert found, 'Failed to find {}'.format(image_path)
1144    cmd.extend(split_args)
1145
1146  RunAndCheckOutput(cmd)
1147
1148  if OPTIONS.aftl_server is not None:
1149    # Ensure the other AFTL parameters are set as well.
1150    assert OPTIONS.aftl_key_path is not None, 'No AFTL key provided.'
1151    assert OPTIONS.aftl_manufacturer_key_path is not None, 'No AFTL manufacturer key provided.'
1152    assert OPTIONS.aftl_signer_helper is not None, 'No AFTL signer helper provided.'
1153    # AFTL inclusion proof generation code will go here.
1154
1155def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False):
1156  ramdisk_img = tempfile.NamedTemporaryFile()
1157
1158  if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
1159    cmd = ["mkbootfs", "-f", fs_config_file,
1160           os.path.join(sourcedir, "RAMDISK")]
1161  else:
1162    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
1163  p1 = Run(cmd, stdout=subprocess.PIPE)
1164  if lz4_ramdisks:
1165    p2 = Run(["lz4", "-l", "-12" , "--favor-decSpeed"], stdin=p1.stdout,
1166             stdout=ramdisk_img.file.fileno())
1167  else:
1168    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1169
1170  p2.wait()
1171  p1.wait()
1172  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1173  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1174
1175  return ramdisk_img
1176
1177
1178def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
1179                        has_ramdisk=False, two_step_image=False):
1180  """Build a bootable image from the specified sourcedir.
1181
1182  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1183  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1184  we are building a two-step special image (i.e. building a recovery image to
1185  be loaded into /boot in two-step OTAs).
1186
1187  Return the image data, or None if sourcedir does not appear to contains files
1188  for building the requested image.
1189  """
1190
1191  # "boot" or "recovery", without extension.
1192  partition_name = os.path.basename(sourcedir).lower()
1193
1194  if partition_name == "recovery":
1195    kernel = "kernel"
1196  else:
1197    kernel = image_name.replace("boot", "kernel")
1198    kernel = kernel.replace(".img","")
1199  if not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1200    return None
1201
1202  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1203    return None
1204
1205  if info_dict is None:
1206    info_dict = OPTIONS.info_dict
1207
1208  img = tempfile.NamedTemporaryFile()
1209
1210  if has_ramdisk:
1211    use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
1212    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, lz4_ramdisks=use_lz4)
1213
1214  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1215  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1216
1217  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, kernel)]
1218
1219  fn = os.path.join(sourcedir, "second")
1220  if os.access(fn, os.F_OK):
1221    cmd.append("--second")
1222    cmd.append(fn)
1223
1224  fn = os.path.join(sourcedir, "dtb")
1225  if os.access(fn, os.F_OK):
1226    cmd.append("--dtb")
1227    cmd.append(fn)
1228
1229  fn = os.path.join(sourcedir, "cmdline")
1230  if os.access(fn, os.F_OK):
1231    cmd.append("--cmdline")
1232    cmd.append(open(fn).read().rstrip("\n"))
1233
1234  fn = os.path.join(sourcedir, "base")
1235  if os.access(fn, os.F_OK):
1236    cmd.append("--base")
1237    cmd.append(open(fn).read().rstrip("\n"))
1238
1239  fn = os.path.join(sourcedir, "pagesize")
1240  if os.access(fn, os.F_OK):
1241    cmd.append("--pagesize")
1242    cmd.append(open(fn).read().rstrip("\n"))
1243
1244  if partition_name == "recovery":
1245    args = info_dict.get("recovery_mkbootimg_args")
1246    if not args:
1247      # Fall back to "mkbootimg_args" for recovery image
1248      # in case "recovery_mkbootimg_args" is not set.
1249      args = info_dict.get("mkbootimg_args")
1250  else:
1251    args = info_dict.get("mkbootimg_args")
1252  if args and args.strip():
1253    cmd.extend(shlex.split(args))
1254
1255  args = info_dict.get("mkbootimg_version_args")
1256  if args and args.strip():
1257    cmd.extend(shlex.split(args))
1258
1259  if has_ramdisk:
1260    cmd.extend(["--ramdisk", ramdisk_img.name])
1261
1262  img_unsigned = None
1263  if info_dict.get("vboot"):
1264    img_unsigned = tempfile.NamedTemporaryFile()
1265    cmd.extend(["--output", img_unsigned.name])
1266  else:
1267    cmd.extend(["--output", img.name])
1268
1269  if partition_name == "recovery":
1270    if info_dict.get("include_recovery_dtbo") == "true":
1271      fn = os.path.join(sourcedir, "recovery_dtbo")
1272      cmd.extend(["--recovery_dtbo", fn])
1273    if info_dict.get("include_recovery_acpio") == "true":
1274      fn = os.path.join(sourcedir, "recovery_acpio")
1275      cmd.extend(["--recovery_acpio", fn])
1276
1277  RunAndCheckOutput(cmd)
1278
1279  if (info_dict.get("boot_signer") == "true" and
1280      info_dict.get("verity_key")):
1281    # Hard-code the path as "/boot" for two-step special recovery image (which
1282    # will be loaded into /boot during the two-step OTA).
1283    if two_step_image:
1284      path = "/boot"
1285    else:
1286      path = "/" + partition_name
1287    cmd = [OPTIONS.boot_signer_path]
1288    cmd.extend(OPTIONS.boot_signer_args)
1289    cmd.extend([path, img.name,
1290                info_dict["verity_key"] + ".pk8",
1291                info_dict["verity_key"] + ".x509.pem", img.name])
1292    RunAndCheckOutput(cmd)
1293
1294  # Sign the image if vboot is non-empty.
1295  elif info_dict.get("vboot"):
1296    path = "/" + partition_name
1297    img_keyblock = tempfile.NamedTemporaryFile()
1298    # We have switched from the prebuilt futility binary to using the tool
1299    # (futility-host) built from the source. Override the setting in the old
1300    # TF.zip.
1301    futility = info_dict["futility"]
1302    if futility.startswith("prebuilts/"):
1303      futility = "futility-host"
1304    cmd = [info_dict["vboot_signer_cmd"], futility,
1305           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
1306           info_dict["vboot_key"] + ".vbprivk",
1307           info_dict["vboot_subkey"] + ".vbprivk",
1308           img_keyblock.name,
1309           img.name]
1310    RunAndCheckOutput(cmd)
1311
1312    # Clean up the temp files.
1313    img_unsigned.close()
1314    img_keyblock.close()
1315
1316  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1317  if info_dict.get("avb_enable") == "true":
1318    avbtool = info_dict["avb_avbtool"]
1319    if partition_name == "recovery":
1320      part_size = info_dict["recovery_size"]
1321    else:
1322      part_size = info_dict[image_name.replace(".img","_size")]
1323    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1324           "--partition_size", str(part_size), "--partition_name",
1325           partition_name]
1326    AppendAVBSigningArgs(cmd, partition_name)
1327    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1328    if args and args.strip():
1329      cmd.extend(shlex.split(args))
1330    RunAndCheckOutput(cmd)
1331
1332  img.seek(os.SEEK_SET, 0)
1333  data = img.read()
1334
1335  if has_ramdisk:
1336    ramdisk_img.close()
1337  img.close()
1338
1339  return data
1340
1341
1342def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1343                     info_dict=None, two_step_image=False):
1344  """Return a File object with the desired bootable image.
1345
1346  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1347  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1348  the source files in 'unpack_dir'/'tree_subdir'."""
1349
1350  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1351  if os.path.exists(prebuilt_path):
1352    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1353    return File.FromLocalFile(name, prebuilt_path)
1354
1355  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1356  if os.path.exists(prebuilt_path):
1357    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1358    return File.FromLocalFile(name, prebuilt_path)
1359
1360  logger.info("building image from target_files %s...", tree_subdir)
1361
1362  if info_dict is None:
1363    info_dict = OPTIONS.info_dict
1364
1365  # With system_root_image == "true", we don't pack ramdisk into the boot image.
1366  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
1367  # for recovery.
1368  has_ramdisk = (info_dict.get("system_root_image") != "true" or
1369                 prebuilt_name != "boot.img" or
1370                 info_dict.get("recovery_as_boot") == "true")
1371
1372  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1373  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1374                             os.path.join(unpack_dir, fs_config),
1375                             info_dict, has_ramdisk, two_step_image)
1376  if data:
1377    return File(name, data)
1378  return None
1379
1380
1381def _BuildVendorBootImage(sourcedir, info_dict=None):
1382  """Build a vendor boot image from the specified sourcedir.
1383
1384  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1385  turn them into a vendor boot image.
1386
1387  Return the image data, or None if sourcedir does not appear to contains files
1388  for building the requested image.
1389  """
1390
1391  if info_dict is None:
1392    info_dict = OPTIONS.info_dict
1393
1394  img = tempfile.NamedTemporaryFile()
1395
1396  use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
1397  ramdisk_img = _MakeRamdisk(sourcedir, lz4_ramdisks=use_lz4)
1398
1399  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1400  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1401
1402  cmd = [mkbootimg]
1403
1404  fn = os.path.join(sourcedir, "dtb")
1405  if os.access(fn, os.F_OK):
1406    cmd.append("--dtb")
1407    cmd.append(fn)
1408
1409  fn = os.path.join(sourcedir, "vendor_cmdline")
1410  if os.access(fn, os.F_OK):
1411    cmd.append("--vendor_cmdline")
1412    cmd.append(open(fn).read().rstrip("\n"))
1413
1414  fn = os.path.join(sourcedir, "base")
1415  if os.access(fn, os.F_OK):
1416    cmd.append("--base")
1417    cmd.append(open(fn).read().rstrip("\n"))
1418
1419  fn = os.path.join(sourcedir, "pagesize")
1420  if os.access(fn, os.F_OK):
1421    cmd.append("--pagesize")
1422    cmd.append(open(fn).read().rstrip("\n"))
1423
1424  args = info_dict.get("mkbootimg_args")
1425  if args and args.strip():
1426    cmd.extend(shlex.split(args))
1427
1428  args = info_dict.get("mkbootimg_version_args")
1429  if args and args.strip():
1430    cmd.extend(shlex.split(args))
1431
1432  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
1433  cmd.extend(["--vendor_boot", img.name])
1434
1435  RunAndCheckOutput(cmd)
1436
1437  # AVB: if enabled, calculate and add hash.
1438  if info_dict.get("avb_enable") == "true":
1439    avbtool = info_dict["avb_avbtool"]
1440    part_size = info_dict["vendor_boot_size"]
1441    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1442           "--partition_size", str(part_size), "--partition_name", "vendor_boot"]
1443    AppendAVBSigningArgs(cmd, "vendor_boot")
1444    args = info_dict.get("avb_vendor_boot_add_hash_footer_args")
1445    if args and args.strip():
1446      cmd.extend(shlex.split(args))
1447    RunAndCheckOutput(cmd)
1448
1449  img.seek(os.SEEK_SET, 0)
1450  data = img.read()
1451
1452  ramdisk_img.close()
1453  img.close()
1454
1455  return data
1456
1457
1458def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
1459                       info_dict=None):
1460  """Return a File object with the desired vendor boot image.
1461
1462  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1463  the source files in 'unpack_dir'/'tree_subdir'."""
1464
1465  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1466  if os.path.exists(prebuilt_path):
1467    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1468    return File.FromLocalFile(name, prebuilt_path)
1469
1470  logger.info("building image from target_files %s...", tree_subdir)
1471
1472  if info_dict is None:
1473    info_dict = OPTIONS.info_dict
1474
1475  data = _BuildVendorBootImage(os.path.join(unpack_dir, tree_subdir), info_dict)
1476  if data:
1477    return File(name, data)
1478  return None
1479
1480
1481def Gunzip(in_filename, out_filename):
1482  """Gunzips the given gzip compressed file to a given output file."""
1483  with gzip.open(in_filename, "rb") as in_file, \
1484       open(out_filename, "wb") as out_file:
1485    shutil.copyfileobj(in_file, out_file)
1486
1487
1488def UnzipToDir(filename, dirname, patterns=None):
1489  """Unzips the archive to the given directory.
1490
1491  Args:
1492    filename: The name of the zip file to unzip.
1493    dirname: Where the unziped files will land.
1494    patterns: Files to unzip from the archive. If omitted, will unzip the entire
1495        archvie. Non-matching patterns will be filtered out. If there's no match
1496        after the filtering, no file will be unzipped.
1497  """
1498  cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
1499  if patterns is not None:
1500    # Filter out non-matching patterns. unzip will complain otherwise.
1501    with zipfile.ZipFile(filename) as input_zip:
1502      names = input_zip.namelist()
1503    filtered = [
1504        pattern for pattern in patterns if fnmatch.filter(names, pattern)]
1505
1506    # There isn't any matching files. Don't unzip anything.
1507    if not filtered:
1508      return
1509    cmd.extend(filtered)
1510
1511  RunAndCheckOutput(cmd)
1512
1513
1514def UnzipTemp(filename, pattern=None):
1515  """Unzips the given archive into a temporary directory and returns the name.
1516
1517  Args:
1518    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
1519    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
1520
1521    pattern: Files to unzip from the archive. If omitted, will unzip the entire
1522    archvie.
1523
1524  Returns:
1525    The name of the temporary directory.
1526  """
1527
1528  tmp = MakeTempDir(prefix="targetfiles-")
1529  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
1530  if m:
1531    UnzipToDir(m.group(1), tmp, pattern)
1532    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern)
1533    filename = m.group(1)
1534  else:
1535    UnzipToDir(filename, tmp, pattern)
1536
1537  return tmp
1538
1539
1540def GetUserImage(which, tmpdir, input_zip,
1541                 info_dict=None,
1542                 allow_shared_blocks=None,
1543                 hashtree_info_generator=None,
1544                 reset_file_map=False):
1545  """Returns an Image object suitable for passing to BlockImageDiff.
1546
1547  This function loads the specified image from the given path. If the specified
1548  image is sparse, it also performs additional processing for OTA purpose. For
1549  example, it always adds block 0 to clobbered blocks list. It also detects
1550  files that cannot be reconstructed from the block list, for whom we should
1551  avoid applying imgdiff.
1552
1553  Args:
1554    which: The partition name.
1555    tmpdir: The directory that contains the prebuilt image and block map file.
1556    input_zip: The target-files ZIP archive.
1557    info_dict: The dict to be looked up for relevant info.
1558    allow_shared_blocks: If image is sparse, whether having shared blocks is
1559        allowed. If none, it is looked up from info_dict.
1560    hashtree_info_generator: If present and image is sparse, generates the
1561        hashtree_info for this sparse image.
1562    reset_file_map: If true and image is sparse, reset file map before returning
1563        the image.
1564  Returns:
1565    A Image object. If it is a sparse image and reset_file_map is False, the
1566    image will have file_map info loaded.
1567  """
1568  if info_dict is None:
1569    info_dict = LoadInfoDict(input_zip)
1570
1571  is_sparse = info_dict.get("extfs_sparse_flag")
1572
1573  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
1574  # shared blocks (i.e. some blocks will show up in multiple files' block
1575  # list). We can only allocate such shared blocks to the first "owner", and
1576  # disable imgdiff for all later occurrences.
1577  if allow_shared_blocks is None:
1578    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
1579
1580  if is_sparse:
1581    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
1582                         hashtree_info_generator)
1583    if reset_file_map:
1584      img.ResetFileMap()
1585    return img
1586  else:
1587    return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
1588
1589
1590def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
1591  """Returns a Image object suitable for passing to BlockImageDiff.
1592
1593  This function loads the specified non-sparse image from the given path.
1594
1595  Args:
1596    which: The partition name.
1597    tmpdir: The directory that contains the prebuilt image and block map file.
1598  Returns:
1599    A Image object.
1600  """
1601  path = os.path.join(tmpdir, "IMAGES", which + ".img")
1602  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
1603
1604  # The image and map files must have been created prior to calling
1605  # ota_from_target_files.py (since LMP).
1606  assert os.path.exists(path) and os.path.exists(mappath)
1607
1608  return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
1609
1610
1611def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
1612                   hashtree_info_generator=None):
1613  """Returns a SparseImage object suitable for passing to BlockImageDiff.
1614
1615  This function loads the specified sparse image from the given path, and
1616  performs additional processing for OTA purpose. For example, it always adds
1617  block 0 to clobbered blocks list. It also detects files that cannot be
1618  reconstructed from the block list, for whom we should avoid applying imgdiff.
1619
1620  Args:
1621    which: The partition name, e.g. "system", "vendor".
1622    tmpdir: The directory that contains the prebuilt image and block map file.
1623    input_zip: The target-files ZIP archive.
1624    allow_shared_blocks: Whether having shared blocks is allowed.
1625    hashtree_info_generator: If present, generates the hashtree_info for this
1626        sparse image.
1627  Returns:
1628    A SparseImage object, with file_map info loaded.
1629  """
1630  path = os.path.join(tmpdir, "IMAGES", which + ".img")
1631  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
1632
1633  # The image and map files must have been created prior to calling
1634  # ota_from_target_files.py (since LMP).
1635  assert os.path.exists(path) and os.path.exists(mappath)
1636
1637  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
1638  # it to clobbered_blocks so that it will be written to the target
1639  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
1640  clobbered_blocks = "0"
1641
1642  image = sparse_img.SparseImage(
1643      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
1644      hashtree_info_generator=hashtree_info_generator)
1645
1646  # block.map may contain less blocks, because mke2fs may skip allocating blocks
1647  # if they contain all zeros. We can't reconstruct such a file from its block
1648  # list. Tag such entries accordingly. (Bug: 65213616)
1649  for entry in image.file_map:
1650    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
1651    if not entry.startswith('/'):
1652      continue
1653
1654    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
1655    # filename listed in system.map may contain an additional leading slash
1656    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
1657    # results.
1658    arcname = entry.replace(which, which.upper(), 1).lstrip('/')
1659
1660    # Special handling another case, where files not under /system
1661    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
1662    if which == 'system' and not arcname.startswith('SYSTEM'):
1663      arcname = 'ROOT/' + arcname
1664
1665    assert arcname in input_zip.namelist(), \
1666        "Failed to find the ZIP entry for {}".format(entry)
1667
1668    info = input_zip.getinfo(arcname)
1669    ranges = image.file_map[entry]
1670
1671    # If a RangeSet has been tagged as using shared blocks while loading the
1672    # image, check the original block list to determine its completeness. Note
1673    # that the 'incomplete' flag would be tagged to the original RangeSet only.
1674    if ranges.extra.get('uses_shared_blocks'):
1675      ranges = ranges.extra['uses_shared_blocks']
1676
1677    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
1678      ranges.extra['incomplete'] = True
1679
1680  return image
1681
1682
1683def GetKeyPasswords(keylist):
1684  """Given a list of keys, prompt the user to enter passwords for
1685  those which require them.  Return a {key: password} dict.  password
1686  will be None if the key has no password."""
1687
1688  no_passwords = []
1689  need_passwords = []
1690  key_passwords = {}
1691  devnull = open("/dev/null", "w+b")
1692  for k in sorted(keylist):
1693    # We don't need a password for things that aren't really keys.
1694    if k in SPECIAL_CERT_STRINGS:
1695      no_passwords.append(k)
1696      continue
1697
1698    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
1699             "-inform", "DER", "-nocrypt"],
1700            stdin=devnull.fileno(),
1701            stdout=devnull.fileno(),
1702            stderr=subprocess.STDOUT)
1703    p.communicate()
1704    if p.returncode == 0:
1705      # Definitely an unencrypted key.
1706      no_passwords.append(k)
1707    else:
1708      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
1709               "-inform", "DER", "-passin", "pass:"],
1710              stdin=devnull.fileno(),
1711              stdout=devnull.fileno(),
1712              stderr=subprocess.PIPE)
1713      _, stderr = p.communicate()
1714      if p.returncode == 0:
1715        # Encrypted key with empty string as password.
1716        key_passwords[k] = ''
1717      elif stderr.startswith('Error decrypting key'):
1718        # Definitely encrypted key.
1719        # It would have said "Error reading key" if it didn't parse correctly.
1720        need_passwords.append(k)
1721      else:
1722        # Potentially, a type of key that openssl doesn't understand.
1723        # We'll let the routines in signapk.jar handle it.
1724        no_passwords.append(k)
1725  devnull.close()
1726
1727  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
1728  key_passwords.update(dict.fromkeys(no_passwords))
1729  return key_passwords
1730
1731
1732def GetMinSdkVersion(apk_name):
1733  """Gets the minSdkVersion declared in the APK.
1734
1735  It calls 'aapt2' to query the embedded minSdkVersion from the given APK file.
1736  This can be both a decimal number (API Level) or a codename.
1737
1738  Args:
1739    apk_name: The APK filename.
1740
1741  Returns:
1742    The parsed SDK version string.
1743
1744  Raises:
1745    ExternalError: On failing to obtain the min SDK version.
1746  """
1747  proc = Run(
1748      ["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE,
1749      stderr=subprocess.PIPE)
1750  stdoutdata, stderrdata = proc.communicate()
1751  if proc.returncode != 0:
1752    raise ExternalError(
1753        "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
1754            proc.returncode, stdoutdata, stderrdata))
1755
1756  for line in stdoutdata.split("\n"):
1757    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
1758    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
1759    if m:
1760      return m.group(1)
1761  raise ExternalError("No minSdkVersion returned by aapt2")
1762
1763
1764def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
1765  """Returns the minSdkVersion declared in the APK as a number (API Level).
1766
1767  If minSdkVersion is set to a codename, it is translated to a number using the
1768  provided map.
1769
1770  Args:
1771    apk_name: The APK filename.
1772
1773  Returns:
1774    The parsed SDK version number.
1775
1776  Raises:
1777    ExternalError: On failing to get the min SDK version number.
1778  """
1779  version = GetMinSdkVersion(apk_name)
1780  try:
1781    return int(version)
1782  except ValueError:
1783    # Not a decimal number. Codename?
1784    if version in codename_to_api_level_map:
1785      return codename_to_api_level_map[version]
1786    else:
1787      raise ExternalError(
1788          "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
1789              version, codename_to_api_level_map))
1790
1791
1792def SignFile(input_name, output_name, key, password, min_api_level=None,
1793             codename_to_api_level_map=None, whole_file=False,
1794             extra_signapk_args=None):
1795  """Sign the input_name zip/jar/apk, producing output_name.  Use the
1796  given key and password (the latter may be None if the key does not
1797  have a password.
1798
1799  If whole_file is true, use the "-w" option to SignApk to embed a
1800  signature that covers the whole file in the archive comment of the
1801  zip file.
1802
1803  min_api_level is the API Level (int) of the oldest platform this file may end
1804  up on. If not specified for an APK, the API Level is obtained by interpreting
1805  the minSdkVersion attribute of the APK's AndroidManifest.xml.
1806
1807  codename_to_api_level_map is needed to translate the codename which may be
1808  encountered as the APK's minSdkVersion.
1809
1810  Caller may optionally specify extra args to be passed to SignApk, which
1811  defaults to OPTIONS.extra_signapk_args if omitted.
1812  """
1813  if codename_to_api_level_map is None:
1814    codename_to_api_level_map = {}
1815  if extra_signapk_args is None:
1816    extra_signapk_args = OPTIONS.extra_signapk_args
1817
1818  java_library_path = os.path.join(
1819      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
1820
1821  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
1822         ["-Djava.library.path=" + java_library_path,
1823          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
1824         extra_signapk_args)
1825  if whole_file:
1826    cmd.append("-w")
1827
1828  min_sdk_version = min_api_level
1829  if min_sdk_version is None:
1830    if not whole_file:
1831      min_sdk_version = GetMinSdkVersionInt(
1832          input_name, codename_to_api_level_map)
1833  if min_sdk_version is not None:
1834    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
1835
1836  cmd.extend([key + OPTIONS.public_key_suffix,
1837              key + OPTIONS.private_key_suffix,
1838              input_name, output_name])
1839
1840  proc = Run(cmd, stdin=subprocess.PIPE)
1841  if password is not None:
1842    password += "\n"
1843  stdoutdata, _ = proc.communicate(password)
1844  if proc.returncode != 0:
1845    raise ExternalError(
1846        "Failed to run signapk.jar: return code {}:\n{}".format(
1847            proc.returncode, stdoutdata))
1848
1849
1850def CheckSize(data, target, info_dict):
1851  """Checks the data string passed against the max size limit.
1852
1853  For non-AVB images, raise exception if the data is too big. Print a warning
1854  if the data is nearing the maximum size.
1855
1856  For AVB images, the actual image size should be identical to the limit.
1857
1858  Args:
1859    data: A string that contains all the data for the partition.
1860    target: The partition name. The ".img" suffix is optional.
1861    info_dict: The dict to be looked up for relevant info.
1862  """
1863  if target.endswith(".img"):
1864    target = target[:-4]
1865  mount_point = "/" + target
1866
1867  fs_type = None
1868  limit = None
1869  if info_dict["fstab"]:
1870    if mount_point == "/userdata":
1871      mount_point = "/data"
1872    p = info_dict["fstab"][mount_point]
1873    fs_type = p.fs_type
1874    device = p.device
1875    if "/" in device:
1876      device = device[device.rfind("/")+1:]
1877    limit = info_dict.get(device + "_size")
1878  if not fs_type or not limit:
1879    return
1880
1881  size = len(data)
1882  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
1883  # path.
1884  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
1885    if size != limit:
1886      raise ExternalError(
1887          "Mismatching image size for %s: expected %d actual %d" % (
1888              target, limit, size))
1889  else:
1890    pct = float(size) * 100.0 / limit
1891    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
1892    if pct >= 99.0:
1893      raise ExternalError(msg)
1894    elif pct >= 95.0:
1895      logger.warning("\n  WARNING: %s\n", msg)
1896    else:
1897      logger.info("  %s", msg)
1898
1899
1900def ReadApkCerts(tf_zip):
1901  """Parses the APK certs info from a given target-files zip.
1902
1903  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
1904  tuple with the following elements: (1) a dictionary that maps packages to
1905  certs (based on the "certificate" and "private_key" attributes in the file;
1906  (2) a string representing the extension of compressed APKs in the target files
1907  (e.g ".gz", ".bro").
1908
1909  Args:
1910    tf_zip: The input target_files ZipFile (already open).
1911
1912  Returns:
1913    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
1914        the extension string of compressed APKs (e.g. ".gz"), or None if there's
1915        no compressed APKs.
1916  """
1917  certmap = {}
1918  compressed_extension = None
1919
1920  # META/apkcerts.txt contains the info for _all_ the packages known at build
1921  # time. Filter out the ones that are not installed.
1922  installed_files = set()
1923  for name in tf_zip.namelist():
1924    basename = os.path.basename(name)
1925    if basename:
1926      installed_files.add(basename)
1927
1928  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
1929    line = line.strip()
1930    if not line:
1931      continue
1932    m = re.match(
1933        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
1934        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
1935        r'(\s+partition="(?P<PARTITION>.*?)")?$',
1936        line)
1937    if not m:
1938      continue
1939
1940    matches = m.groupdict()
1941    cert = matches["CERT"]
1942    privkey = matches["PRIVKEY"]
1943    name = matches["NAME"]
1944    this_compressed_extension = matches["COMPRESSED"]
1945
1946    public_key_suffix_len = len(OPTIONS.public_key_suffix)
1947    private_key_suffix_len = len(OPTIONS.private_key_suffix)
1948    if cert in SPECIAL_CERT_STRINGS and not privkey:
1949      certmap[name] = cert
1950    elif (cert.endswith(OPTIONS.public_key_suffix) and
1951          privkey.endswith(OPTIONS.private_key_suffix) and
1952          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
1953      certmap[name] = cert[:-public_key_suffix_len]
1954    else:
1955      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
1956
1957    if not this_compressed_extension:
1958      continue
1959
1960    # Only count the installed files.
1961    filename = name + '.' + this_compressed_extension
1962    if filename not in installed_files:
1963      continue
1964
1965    # Make sure that all the values in the compression map have the same
1966    # extension. We don't support multiple compression methods in the same
1967    # system image.
1968    if compressed_extension:
1969      if this_compressed_extension != compressed_extension:
1970        raise ValueError(
1971            "Multiple compressed extensions: {} vs {}".format(
1972                compressed_extension, this_compressed_extension))
1973    else:
1974      compressed_extension = this_compressed_extension
1975
1976  return (certmap,
1977          ("." + compressed_extension) if compressed_extension else None)
1978
1979
1980COMMON_DOCSTRING = """
1981Global options
1982
1983  -p  (--path) <dir>
1984      Prepend <dir>/bin to the list of places to search for binaries run by this
1985      script, and expect to find jars in <dir>/framework.
1986
1987  -s  (--device_specific) <file>
1988      Path to the Python module containing device-specific releasetools code.
1989
1990  -x  (--extra) <key=value>
1991      Add a key/value pair to the 'extras' dict, which device-specific extension
1992      code may look at.
1993
1994  -v  (--verbose)
1995      Show command lines being executed.
1996
1997  -h  (--help)
1998      Display this usage message and exit.
1999
2000  --logfile <file>
2001      Put verbose logs to specified file (regardless of --verbose option.)
2002"""
2003
2004def Usage(docstring):
2005  print(docstring.rstrip("\n"))
2006  print(COMMON_DOCSTRING)
2007
2008
2009def ParseOptions(argv,
2010                 docstring,
2011                 extra_opts="", extra_long_opts=(),
2012                 extra_option_handler=None):
2013  """Parse the options in argv and return any arguments that aren't
2014  flags.  docstring is the calling module's docstring, to be displayed
2015  for errors and -h.  extra_opts and extra_long_opts are for flags
2016  defined by the caller, which are processed by passing them to
2017  extra_option_handler."""
2018
2019  try:
2020    opts, args = getopt.getopt(
2021        argv, "hvp:s:x:" + extra_opts,
2022        ["help", "verbose", "path=", "signapk_path=",
2023         "signapk_shared_library_path=", "extra_signapk_args=",
2024         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2025         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2026         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2027         "extra=", "logfile=", "aftl_server=", "aftl_key_path=",
2028         "aftl_manufacturer_key_path=", "aftl_signer_helper="] +
2029        list(extra_long_opts))
2030  except getopt.GetoptError as err:
2031    Usage(docstring)
2032    print("**", str(err), "**")
2033    sys.exit(2)
2034
2035  for o, a in opts:
2036    if o in ("-h", "--help"):
2037      Usage(docstring)
2038      sys.exit()
2039    elif o in ("-v", "--verbose"):
2040      OPTIONS.verbose = True
2041    elif o in ("-p", "--path"):
2042      OPTIONS.search_path = a
2043    elif o in ("--signapk_path",):
2044      OPTIONS.signapk_path = a
2045    elif o in ("--signapk_shared_library_path",):
2046      OPTIONS.signapk_shared_library_path = a
2047    elif o in ("--extra_signapk_args",):
2048      OPTIONS.extra_signapk_args = shlex.split(a)
2049    elif o in ("--java_path",):
2050      OPTIONS.java_path = a
2051    elif o in ("--java_args",):
2052      OPTIONS.java_args = shlex.split(a)
2053    elif o in ("--android_jar_path",):
2054      OPTIONS.android_jar_path = a
2055    elif o in ("--public_key_suffix",):
2056      OPTIONS.public_key_suffix = a
2057    elif o in ("--private_key_suffix",):
2058      OPTIONS.private_key_suffix = a
2059    elif o in ("--boot_signer_path",):
2060      OPTIONS.boot_signer_path = a
2061    elif o in ("--boot_signer_args",):
2062      OPTIONS.boot_signer_args = shlex.split(a)
2063    elif o in ("--verity_signer_path",):
2064      OPTIONS.verity_signer_path = a
2065    elif o in ("--verity_signer_args",):
2066      OPTIONS.verity_signer_args = shlex.split(a)
2067    elif o in ("--aftl_server",):
2068      OPTIONS.aftl_server = a
2069    elif o in ("--aftl_key_path",):
2070      OPTIONS.aftl_key_path = a
2071    elif o in ("--aftl_manufacturer_key_path",):
2072      OPTIONS.aftl_manufacturer_key_path = a
2073    elif o in ("--aftl_signer_helper",):
2074      OPTIONS.aftl_signer_helper = a
2075    elif o in ("-s", "--device_specific"):
2076      OPTIONS.device_specific = a
2077    elif o in ("-x", "--extra"):
2078      key, value = a.split("=", 1)
2079      OPTIONS.extras[key] = value
2080    elif o in ("--logfile",):
2081      OPTIONS.logfile = a
2082    else:
2083      if extra_option_handler is None or not extra_option_handler(o, a):
2084        assert False, "unknown option \"%s\"" % (o,)
2085
2086  if OPTIONS.search_path:
2087    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2088                          os.pathsep + os.environ["PATH"])
2089
2090  return args
2091
2092
2093def MakeTempFile(prefix='tmp', suffix=''):
2094  """Make a temp file and add it to the list of things to be deleted
2095  when Cleanup() is called.  Return the filename."""
2096  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2097  os.close(fd)
2098  OPTIONS.tempfiles.append(fn)
2099  return fn
2100
2101
2102def MakeTempDir(prefix='tmp', suffix=''):
2103  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2104
2105  Returns:
2106    The absolute pathname of the new directory.
2107  """
2108  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2109  OPTIONS.tempfiles.append(dir_name)
2110  return dir_name
2111
2112
2113def Cleanup():
2114  for i in OPTIONS.tempfiles:
2115    if os.path.isdir(i):
2116      shutil.rmtree(i, ignore_errors=True)
2117    else:
2118      os.remove(i)
2119  del OPTIONS.tempfiles[:]
2120
2121
2122class PasswordManager(object):
2123  def __init__(self):
2124    self.editor = os.getenv("EDITOR")
2125    self.pwfile = os.getenv("ANDROID_PW_FILE")
2126
2127  def GetPasswords(self, items):
2128    """Get passwords corresponding to each string in 'items',
2129    returning a dict.  (The dict may have keys in addition to the
2130    values in 'items'.)
2131
2132    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2133    user edit that file to add more needed passwords.  If no editor is
2134    available, or $ANDROID_PW_FILE isn't define, prompts the user
2135    interactively in the ordinary way.
2136    """
2137
2138    current = self.ReadFile()
2139
2140    first = True
2141    while True:
2142      missing = []
2143      for i in items:
2144        if i not in current or not current[i]:
2145          missing.append(i)
2146      # Are all the passwords already in the file?
2147      if not missing:
2148        return current
2149
2150      for i in missing:
2151        current[i] = ""
2152
2153      if not first:
2154        print("key file %s still missing some passwords." % (self.pwfile,))
2155        if sys.version_info[0] >= 3:
2156          raw_input = input  # pylint: disable=redefined-builtin
2157        answer = raw_input("try to edit again? [y]> ").strip()
2158        if answer and answer[0] not in 'yY':
2159          raise RuntimeError("key passwords unavailable")
2160      first = False
2161
2162      current = self.UpdateAndReadFile(current)
2163
2164  def PromptResult(self, current): # pylint: disable=no-self-use
2165    """Prompt the user to enter a value (password) for each key in
2166    'current' whose value is fales.  Returns a new dict with all the
2167    values.
2168    """
2169    result = {}
2170    for k, v in sorted(current.items()):
2171      if v:
2172        result[k] = v
2173      else:
2174        while True:
2175          result[k] = getpass.getpass(
2176              "Enter password for %s key> " % k).strip()
2177          if result[k]:
2178            break
2179    return result
2180
2181  def UpdateAndReadFile(self, current):
2182    if not self.editor or not self.pwfile:
2183      return self.PromptResult(current)
2184
2185    f = open(self.pwfile, "w")
2186    os.chmod(self.pwfile, 0o600)
2187    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2188    f.write("# (Additional spaces are harmless.)\n\n")
2189
2190    first_line = None
2191    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2192    for i, (_, k, v) in enumerate(sorted_list):
2193      f.write("[[[  %s  ]]] %s\n" % (v, k))
2194      if not v and first_line is None:
2195        # position cursor on first line with no password.
2196        first_line = i + 4
2197    f.close()
2198
2199    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2200
2201    return self.ReadFile()
2202
2203  def ReadFile(self):
2204    result = {}
2205    if self.pwfile is None:
2206      return result
2207    try:
2208      f = open(self.pwfile, "r")
2209      for line in f:
2210        line = line.strip()
2211        if not line or line[0] == '#':
2212          continue
2213        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2214        if not m:
2215          logger.warning("Failed to parse password file: %s", line)
2216        else:
2217          result[m.group(2)] = m.group(1)
2218      f.close()
2219    except IOError as e:
2220      if e.errno != errno.ENOENT:
2221        logger.exception("Error reading password file:")
2222    return result
2223
2224
2225def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2226             compress_type=None):
2227  import datetime
2228
2229  # http://b/18015246
2230  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2231  # for files larger than 2GiB. We can work around this by adjusting their
2232  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2233  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2234  # it isn't clear to me exactly what circumstances cause this).
2235  # `zipfile.write()` must be used directly to work around this.
2236  #
2237  # This mess can be avoided if we port to python3.
2238  saved_zip64_limit = zipfile.ZIP64_LIMIT
2239  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2240
2241  if compress_type is None:
2242    compress_type = zip_file.compression
2243  if arcname is None:
2244    arcname = filename
2245
2246  saved_stat = os.stat(filename)
2247
2248  try:
2249    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2250    # file to be zipped and reset it when we're done.
2251    os.chmod(filename, perms)
2252
2253    # Use a fixed timestamp so the output is repeatable.
2254    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2255    # intentional. zip stores datetimes in local time without a time zone
2256    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2257    # in the zip archive.
2258    local_epoch = datetime.datetime.fromtimestamp(0)
2259    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
2260    os.utime(filename, (timestamp, timestamp))
2261
2262    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
2263  finally:
2264    os.chmod(filename, saved_stat.st_mode)
2265    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
2266    zipfile.ZIP64_LIMIT = saved_zip64_limit
2267
2268
2269def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
2270                compress_type=None):
2271  """Wrap zipfile.writestr() function to work around the zip64 limit.
2272
2273  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
2274  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
2275  when calling crc32(bytes).
2276
2277  But it still works fine to write a shorter string into a large zip file.
2278  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
2279  when we know the string won't be too long.
2280  """
2281
2282  saved_zip64_limit = zipfile.ZIP64_LIMIT
2283  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2284
2285  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
2286    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
2287    zinfo.compress_type = zip_file.compression
2288    if perms is None:
2289      perms = 0o100644
2290  else:
2291    zinfo = zinfo_or_arcname
2292    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
2293    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
2294    # such a case (since
2295    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
2296    # which seems to make more sense. Otherwise the entry will have 0o000 as the
2297    # permission bits. We follow the logic in Python 3 to get consistent
2298    # behavior between using the two versions.
2299    if not zinfo.external_attr:
2300      zinfo.external_attr = 0o600 << 16
2301
2302  # If compress_type is given, it overrides the value in zinfo.
2303  if compress_type is not None:
2304    zinfo.compress_type = compress_type
2305
2306  # If perms is given, it has a priority.
2307  if perms is not None:
2308    # If perms doesn't set the file type, mark it as a regular file.
2309    if perms & 0o770000 == 0:
2310      perms |= 0o100000
2311    zinfo.external_attr = perms << 16
2312
2313  # Use a fixed timestamp so the output is repeatable.
2314  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
2315
2316  zip_file.writestr(zinfo, data)
2317  zipfile.ZIP64_LIMIT = saved_zip64_limit
2318
2319
2320def ZipDelete(zip_filename, entries):
2321  """Deletes entries from a ZIP file.
2322
2323  Since deleting entries from a ZIP file is not supported, it shells out to
2324  'zip -d'.
2325
2326  Args:
2327    zip_filename: The name of the ZIP file.
2328    entries: The name of the entry, or the list of names to be deleted.
2329
2330  Raises:
2331    AssertionError: In case of non-zero return from 'zip'.
2332  """
2333  if isinstance(entries, str):
2334    entries = [entries]
2335  cmd = ["zip", "-d", zip_filename] + entries
2336  RunAndCheckOutput(cmd)
2337
2338
2339def ZipClose(zip_file):
2340  # http://b/18015246
2341  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
2342  # central directory.
2343  saved_zip64_limit = zipfile.ZIP64_LIMIT
2344  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2345
2346  zip_file.close()
2347
2348  zipfile.ZIP64_LIMIT = saved_zip64_limit
2349
2350
2351class DeviceSpecificParams(object):
2352  module = None
2353  def __init__(self, **kwargs):
2354    """Keyword arguments to the constructor become attributes of this
2355    object, which is passed to all functions in the device-specific
2356    module."""
2357    for k, v in kwargs.items():
2358      setattr(self, k, v)
2359    self.extras = OPTIONS.extras
2360
2361    if self.module is None:
2362      path = OPTIONS.device_specific
2363      if not path:
2364        return
2365      try:
2366        if os.path.isdir(path):
2367          info = imp.find_module("releasetools", [path])
2368        else:
2369          d, f = os.path.split(path)
2370          b, x = os.path.splitext(f)
2371          if x == ".py":
2372            f = b
2373          info = imp.find_module(f, [d])
2374        logger.info("loaded device-specific extensions from %s", path)
2375        self.module = imp.load_module("device_specific", *info)
2376      except ImportError:
2377        logger.info("unable to load device-specific module; assuming none")
2378
2379  def _DoCall(self, function_name, *args, **kwargs):
2380    """Call the named function in the device-specific module, passing
2381    the given args and kwargs.  The first argument to the call will be
2382    the DeviceSpecific object itself.  If there is no module, or the
2383    module does not define the function, return the value of the
2384    'default' kwarg (which itself defaults to None)."""
2385    if self.module is None or not hasattr(self.module, function_name):
2386      return kwargs.get("default")
2387    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
2388
2389  def FullOTA_Assertions(self):
2390    """Called after emitting the block of assertions at the top of a
2391    full OTA package.  Implementations can add whatever additional
2392    assertions they like."""
2393    return self._DoCall("FullOTA_Assertions")
2394
2395  def FullOTA_InstallBegin(self):
2396    """Called at the start of full OTA installation."""
2397    return self._DoCall("FullOTA_InstallBegin")
2398
2399  def FullOTA_GetBlockDifferences(self):
2400    """Called during full OTA installation and verification.
2401    Implementation should return a list of BlockDifference objects describing
2402    the update on each additional partitions.
2403    """
2404    return self._DoCall("FullOTA_GetBlockDifferences")
2405
2406  def FullOTA_InstallEnd(self):
2407    """Called at the end of full OTA installation; typically this is
2408    used to install the image for the device's baseband processor."""
2409    return self._DoCall("FullOTA_InstallEnd")
2410
2411  def IncrementalOTA_Assertions(self):
2412    """Called after emitting the block of assertions at the top of an
2413    incremental OTA package.  Implementations can add whatever
2414    additional assertions they like."""
2415    return self._DoCall("IncrementalOTA_Assertions")
2416
2417  def IncrementalOTA_VerifyBegin(self):
2418    """Called at the start of the verification phase of incremental
2419    OTA installation; additional checks can be placed here to abort
2420    the script before any changes are made."""
2421    return self._DoCall("IncrementalOTA_VerifyBegin")
2422
2423  def IncrementalOTA_VerifyEnd(self):
2424    """Called at the end of the verification phase of incremental OTA
2425    installation; additional checks can be placed here to abort the
2426    script before any changes are made."""
2427    return self._DoCall("IncrementalOTA_VerifyEnd")
2428
2429  def IncrementalOTA_InstallBegin(self):
2430    """Called at the start of incremental OTA installation (after
2431    verification is complete)."""
2432    return self._DoCall("IncrementalOTA_InstallBegin")
2433
2434  def IncrementalOTA_GetBlockDifferences(self):
2435    """Called during incremental OTA installation and verification.
2436    Implementation should return a list of BlockDifference objects describing
2437    the update on each additional partitions.
2438    """
2439    return self._DoCall("IncrementalOTA_GetBlockDifferences")
2440
2441  def IncrementalOTA_InstallEnd(self):
2442    """Called at the end of incremental OTA installation; typically
2443    this is used to install the image for the device's baseband
2444    processor."""
2445    return self._DoCall("IncrementalOTA_InstallEnd")
2446
2447  def VerifyOTA_Assertions(self):
2448    return self._DoCall("VerifyOTA_Assertions")
2449
2450
2451class File(object):
2452  def __init__(self, name, data, compress_size=None):
2453    self.name = name
2454    self.data = data
2455    self.size = len(data)
2456    self.compress_size = compress_size or self.size
2457    self.sha1 = sha1(data).hexdigest()
2458
2459  @classmethod
2460  def FromLocalFile(cls, name, diskname):
2461    f = open(diskname, "rb")
2462    data = f.read()
2463    f.close()
2464    return File(name, data)
2465
2466  def WriteToTemp(self):
2467    t = tempfile.NamedTemporaryFile()
2468    t.write(self.data)
2469    t.flush()
2470    return t
2471
2472  def WriteToDir(self, d):
2473    with open(os.path.join(d, self.name), "wb") as fp:
2474      fp.write(self.data)
2475
2476  def AddToZip(self, z, compression=None):
2477    ZipWriteStr(z, self.name, self.data, compress_type=compression)
2478
2479
2480DIFF_PROGRAM_BY_EXT = {
2481    ".gz" : "imgdiff",
2482    ".zip" : ["imgdiff", "-z"],
2483    ".jar" : ["imgdiff", "-z"],
2484    ".apk" : ["imgdiff", "-z"],
2485    ".img" : "imgdiff",
2486    }
2487
2488
2489class Difference(object):
2490  def __init__(self, tf, sf, diff_program=None):
2491    self.tf = tf
2492    self.sf = sf
2493    self.patch = None
2494    self.diff_program = diff_program
2495
2496  def ComputePatch(self):
2497    """Compute the patch (as a string of data) needed to turn sf into
2498    tf.  Returns the same tuple as GetPatch()."""
2499
2500    tf = self.tf
2501    sf = self.sf
2502
2503    if self.diff_program:
2504      diff_program = self.diff_program
2505    else:
2506      ext = os.path.splitext(tf.name)[1]
2507      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
2508
2509    ttemp = tf.WriteToTemp()
2510    stemp = sf.WriteToTemp()
2511
2512    ext = os.path.splitext(tf.name)[1]
2513
2514    try:
2515      ptemp = tempfile.NamedTemporaryFile()
2516      if isinstance(diff_program, list):
2517        cmd = copy.copy(diff_program)
2518      else:
2519        cmd = [diff_program]
2520      cmd.append(stemp.name)
2521      cmd.append(ttemp.name)
2522      cmd.append(ptemp.name)
2523      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2524      err = []
2525      def run():
2526        _, e = p.communicate()
2527        if e:
2528          err.append(e)
2529      th = threading.Thread(target=run)
2530      th.start()
2531      th.join(timeout=300)   # 5 mins
2532      if th.is_alive():
2533        logger.warning("diff command timed out")
2534        p.terminate()
2535        th.join(5)
2536        if th.is_alive():
2537          p.kill()
2538          th.join()
2539
2540      if p.returncode != 0:
2541        logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
2542        self.patch = None
2543        return None, None, None
2544      diff = ptemp.read()
2545    finally:
2546      ptemp.close()
2547      stemp.close()
2548      ttemp.close()
2549
2550    self.patch = diff
2551    return self.tf, self.sf, self.patch
2552
2553
2554  def GetPatch(self):
2555    """Returns a tuple of (target_file, source_file, patch_data).
2556
2557    patch_data may be None if ComputePatch hasn't been called, or if
2558    computing the patch failed.
2559    """
2560    return self.tf, self.sf, self.patch
2561
2562
2563def ComputeDifferences(diffs):
2564  """Call ComputePatch on all the Difference objects in 'diffs'."""
2565  logger.info("%d diffs to compute", len(diffs))
2566
2567  # Do the largest files first, to try and reduce the long-pole effect.
2568  by_size = [(i.tf.size, i) for i in diffs]
2569  by_size.sort(reverse=True)
2570  by_size = [i[1] for i in by_size]
2571
2572  lock = threading.Lock()
2573  diff_iter = iter(by_size)   # accessed under lock
2574
2575  def worker():
2576    try:
2577      lock.acquire()
2578      for d in diff_iter:
2579        lock.release()
2580        start = time.time()
2581        d.ComputePatch()
2582        dur = time.time() - start
2583        lock.acquire()
2584
2585        tf, sf, patch = d.GetPatch()
2586        if sf.name == tf.name:
2587          name = tf.name
2588        else:
2589          name = "%s (%s)" % (tf.name, sf.name)
2590        if patch is None:
2591          logger.error("patching failed! %40s", name)
2592        else:
2593          logger.info(
2594              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
2595              tf.size, 100.0 * len(patch) / tf.size, name)
2596      lock.release()
2597    except Exception:
2598      logger.exception("Failed to compute diff from worker")
2599      raise
2600
2601  # start worker threads; wait for them all to finish.
2602  threads = [threading.Thread(target=worker)
2603             for i in range(OPTIONS.worker_threads)]
2604  for th in threads:
2605    th.start()
2606  while threads:
2607    threads.pop().join()
2608
2609
2610class BlockDifference(object):
2611  def __init__(self, partition, tgt, src=None, check_first_block=False,
2612               version=None, disable_imgdiff=False):
2613    self.tgt = tgt
2614    self.src = src
2615    self.partition = partition
2616    self.check_first_block = check_first_block
2617    self.disable_imgdiff = disable_imgdiff
2618
2619    if version is None:
2620      version = max(
2621          int(i) for i in
2622          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
2623    assert version >= 3
2624    self.version = version
2625
2626    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
2627                       version=self.version,
2628                       disable_imgdiff=self.disable_imgdiff)
2629    self.path = os.path.join(MakeTempDir(), partition)
2630    b.Compute(self.path)
2631    self._required_cache = b.max_stashed_size
2632    self.touched_src_ranges = b.touched_src_ranges
2633    self.touched_src_sha1 = b.touched_src_sha1
2634
2635    # On devices with dynamic partitions, for new partitions,
2636    # src is None but OPTIONS.source_info_dict is not.
2637    if OPTIONS.source_info_dict is None:
2638      is_dynamic_build = OPTIONS.info_dict.get(
2639          "use_dynamic_partitions") == "true"
2640      is_dynamic_source = False
2641    else:
2642      is_dynamic_build = OPTIONS.source_info_dict.get(
2643          "use_dynamic_partitions") == "true"
2644      is_dynamic_source = partition in shlex.split(
2645          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
2646
2647    is_dynamic_target = partition in shlex.split(
2648        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
2649
2650    # For dynamic partitions builds, check partition list in both source
2651    # and target build because new partitions may be added, and existing
2652    # partitions may be removed.
2653    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
2654
2655    if is_dynamic:
2656      self.device = 'map_partition("%s")' % partition
2657    else:
2658      if OPTIONS.source_info_dict is None:
2659        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
2660                                              OPTIONS.info_dict)
2661      else:
2662        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
2663                                              OPTIONS.source_info_dict)
2664      self.device = device_expr
2665
2666  @property
2667  def required_cache(self):
2668    return self._required_cache
2669
2670  def WriteScript(self, script, output_zip, progress=None,
2671                  write_verify_script=False):
2672    if not self.src:
2673      # write the output unconditionally
2674      script.Print("Patching %s image unconditionally..." % (self.partition,))
2675    else:
2676      script.Print("Patching %s image after verification." % (self.partition,))
2677
2678    if progress:
2679      script.ShowProgress(progress, 0)
2680    self._WriteUpdate(script, output_zip)
2681
2682    if write_verify_script:
2683      self.WritePostInstallVerifyScript(script)
2684
2685  def WriteStrictVerifyScript(self, script):
2686    """Verify all the blocks in the care_map, including clobbered blocks.
2687
2688    This differs from the WriteVerifyScript() function: a) it prints different
2689    error messages; b) it doesn't allow half-way updated images to pass the
2690    verification."""
2691
2692    partition = self.partition
2693    script.Print("Verifying %s..." % (partition,))
2694    ranges = self.tgt.care_map
2695    ranges_str = ranges.to_string_raw()
2696    script.AppendExtra(
2697        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
2698        'ui_print("%s has unexpected contents.");' % (
2699            self.device, ranges_str,
2700            self.tgt.TotalSha1(include_clobbered_blocks=True),
2701            self.partition))
2702    script.AppendExtra("")
2703
2704  def WriteVerifyScript(self, script, touched_blocks_only=False):
2705    partition = self.partition
2706
2707    # full OTA
2708    if not self.src:
2709      script.Print("Image %s will be patched unconditionally." % (partition,))
2710
2711    # incremental OTA
2712    else:
2713      if touched_blocks_only:
2714        ranges = self.touched_src_ranges
2715        expected_sha1 = self.touched_src_sha1
2716      else:
2717        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
2718        expected_sha1 = self.src.TotalSha1()
2719
2720      # No blocks to be checked, skipping.
2721      if not ranges:
2722        return
2723
2724      ranges_str = ranges.to_string_raw()
2725      script.AppendExtra(
2726          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
2727          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
2728          '"%s.patch.dat")) then' % (
2729              self.device, ranges_str, expected_sha1,
2730              self.device, partition, partition, partition))
2731      script.Print('Verified %s image...' % (partition,))
2732      script.AppendExtra('else')
2733
2734      if self.version >= 4:
2735
2736        # Bug: 21124327
2737        # When generating incrementals for the system and vendor partitions in
2738        # version 4 or newer, explicitly check the first block (which contains
2739        # the superblock) of the partition to see if it's what we expect. If
2740        # this check fails, give an explicit log message about the partition
2741        # having been remounted R/W (the most likely explanation).
2742        if self.check_first_block:
2743          script.AppendExtra('check_first_block(%s);' % (self.device,))
2744
2745        # If version >= 4, try block recovery before abort update
2746        if partition == "system":
2747          code = ErrorCode.SYSTEM_RECOVER_FAILURE
2748        else:
2749          code = ErrorCode.VENDOR_RECOVER_FAILURE
2750        script.AppendExtra((
2751            'ifelse (block_image_recover({device}, "{ranges}") && '
2752            'block_image_verify({device}, '
2753            'package_extract_file("{partition}.transfer.list"), '
2754            '"{partition}.new.dat", "{partition}.patch.dat"), '
2755            'ui_print("{partition} recovered successfully."), '
2756            'abort("E{code}: {partition} partition fails to recover"));\n'
2757            'endif;').format(device=self.device, ranges=ranges_str,
2758                             partition=partition, code=code))
2759
2760      # Abort the OTA update. Note that the incremental OTA cannot be applied
2761      # even if it may match the checksum of the target partition.
2762      # a) If version < 3, operations like move and erase will make changes
2763      #    unconditionally and damage the partition.
2764      # b) If version >= 3, it won't even reach here.
2765      else:
2766        if partition == "system":
2767          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
2768        else:
2769          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
2770        script.AppendExtra((
2771            'abort("E%d: %s partition has unexpected contents");\n'
2772            'endif;') % (code, partition))
2773
2774  def WritePostInstallVerifyScript(self, script):
2775    partition = self.partition
2776    script.Print('Verifying the updated %s image...' % (partition,))
2777    # Unlike pre-install verification, clobbered_blocks should not be ignored.
2778    ranges = self.tgt.care_map
2779    ranges_str = ranges.to_string_raw()
2780    script.AppendExtra(
2781        'if range_sha1(%s, "%s") == "%s" then' % (
2782            self.device, ranges_str,
2783            self.tgt.TotalSha1(include_clobbered_blocks=True)))
2784
2785    # Bug: 20881595
2786    # Verify that extended blocks are really zeroed out.
2787    if self.tgt.extended:
2788      ranges_str = self.tgt.extended.to_string_raw()
2789      script.AppendExtra(
2790          'if range_sha1(%s, "%s") == "%s" then' % (
2791              self.device, ranges_str,
2792              self._HashZeroBlocks(self.tgt.extended.size())))
2793      script.Print('Verified the updated %s image.' % (partition,))
2794      if partition == "system":
2795        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
2796      else:
2797        code = ErrorCode.VENDOR_NONZERO_CONTENTS
2798      script.AppendExtra(
2799          'else\n'
2800          '  abort("E%d: %s partition has unexpected non-zero contents after '
2801          'OTA update");\n'
2802          'endif;' % (code, partition))
2803    else:
2804      script.Print('Verified the updated %s image.' % (partition,))
2805
2806    if partition == "system":
2807      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
2808    else:
2809      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
2810
2811    script.AppendExtra(
2812        'else\n'
2813        '  abort("E%d: %s partition has unexpected contents after OTA '
2814        'update");\n'
2815        'endif;' % (code, partition))
2816
2817  def _WriteUpdate(self, script, output_zip):
2818    ZipWrite(output_zip,
2819             '{}.transfer.list'.format(self.path),
2820             '{}.transfer.list'.format(self.partition))
2821
2822    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
2823    # its size. Quailty 9 almost triples the compression time but doesn't
2824    # further reduce the size too much. For a typical 1.8G system.new.dat
2825    #                       zip  | brotli(quality 6)  | brotli(quality 9)
2826    #   compressed_size:    942M | 869M (~8% reduced) | 854M
2827    #   compression_time:   75s  | 265s               | 719s
2828    #   decompression_time: 15s  | 25s                | 25s
2829
2830    if not self.src:
2831      brotli_cmd = ['brotli', '--quality=6',
2832                    '--output={}.new.dat.br'.format(self.path),
2833                    '{}.new.dat'.format(self.path)]
2834      print("Compressing {}.new.dat with brotli".format(self.partition))
2835      RunAndCheckOutput(brotli_cmd)
2836
2837      new_data_name = '{}.new.dat.br'.format(self.partition)
2838      ZipWrite(output_zip,
2839               '{}.new.dat.br'.format(self.path),
2840               new_data_name,
2841               compress_type=zipfile.ZIP_STORED)
2842    else:
2843      new_data_name = '{}.new.dat'.format(self.partition)
2844      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
2845
2846    ZipWrite(output_zip,
2847             '{}.patch.dat'.format(self.path),
2848             '{}.patch.dat'.format(self.partition),
2849             compress_type=zipfile.ZIP_STORED)
2850
2851    if self.partition == "system":
2852      code = ErrorCode.SYSTEM_UPDATE_FAILURE
2853    else:
2854      code = ErrorCode.VENDOR_UPDATE_FAILURE
2855
2856    call = ('block_image_update({device}, '
2857            'package_extract_file("{partition}.transfer.list"), '
2858            '"{new_data_name}", "{partition}.patch.dat") ||\n'
2859            '  abort("E{code}: Failed to update {partition} image.");'.format(
2860                device=self.device, partition=self.partition,
2861                new_data_name=new_data_name, code=code))
2862    script.AppendExtra(script.WordWrap(call))
2863
2864  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
2865    data = source.ReadRangeSet(ranges)
2866    ctx = sha1()
2867
2868    for p in data:
2869      ctx.update(p)
2870
2871    return ctx.hexdigest()
2872
2873  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
2874    """Return the hash value for all zero blocks."""
2875    zero_block = '\x00' * 4096
2876    ctx = sha1()
2877    for _ in range(num_blocks):
2878      ctx.update(zero_block)
2879
2880    return ctx.hexdigest()
2881
2882
2883# Expose these two classes to support vendor-specific scripts
2884DataImage = images.DataImage
2885EmptyImage = images.EmptyImage
2886
2887
2888# map recovery.fstab's fs_types to mount/format "partition types"
2889PARTITION_TYPES = {
2890    "ext4": "EMMC",
2891    "emmc": "EMMC",
2892    "f2fs": "EMMC",
2893    "squashfs": "EMMC"
2894}
2895
2896def GetTypeAndDevice(mount_point, info, check_no_slot=True):
2897  """
2898  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
2899  backwards compatibility. It aborts if the fstab entry has slotselect option
2900  (unless check_no_slot is explicitly set to False).
2901  """
2902  fstab = info["fstab"]
2903  if fstab:
2904    if check_no_slot:
2905      assert not fstab[mount_point].slotselect, \
2906             "Use GetTypeAndDeviceExpr instead"
2907    return (PARTITION_TYPES[fstab[mount_point].fs_type],
2908            fstab[mount_point].device)
2909  else:
2910    raise KeyError
2911
2912
2913def GetTypeAndDeviceExpr(mount_point, info):
2914  """
2915  Return the filesystem of the partition, and an edify expression that evaluates
2916  to the device at runtime.
2917  """
2918  fstab = info["fstab"]
2919  if fstab:
2920    p = fstab[mount_point]
2921    device_expr = '"%s"' % fstab[mount_point].device
2922    if p.slotselect:
2923      device_expr = 'add_slot_suffix(%s)' % device_expr
2924    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
2925  else:
2926    raise KeyError
2927
2928
2929def GetEntryForDevice(fstab, device):
2930  """
2931  Returns:
2932    The first entry in fstab whose device is the given value.
2933  """
2934  if not fstab:
2935    return None
2936  for mount_point in fstab:
2937    if fstab[mount_point].device == device:
2938      return fstab[mount_point]
2939  return None
2940
2941def ParseCertificate(data):
2942  """Parses and converts a PEM-encoded certificate into DER-encoded.
2943
2944  This gives the same result as `openssl x509 -in <filename> -outform DER`.
2945
2946  Returns:
2947    The decoded certificate bytes.
2948  """
2949  cert_buffer = []
2950  save = False
2951  for line in data.split("\n"):
2952    if "--END CERTIFICATE--" in line:
2953      break
2954    if save:
2955      cert_buffer.append(line)
2956    if "--BEGIN CERTIFICATE--" in line:
2957      save = True
2958  cert = base64.b64decode("".join(cert_buffer))
2959  return cert
2960
2961
2962def ExtractPublicKey(cert):
2963  """Extracts the public key (PEM-encoded) from the given certificate file.
2964
2965  Args:
2966    cert: The certificate filename.
2967
2968  Returns:
2969    The public key string.
2970
2971  Raises:
2972    AssertionError: On non-zero return from 'openssl'.
2973  """
2974  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
2975  # While openssl 1.1 writes the key into the given filename followed by '-out',
2976  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
2977  # stdout instead.
2978  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
2979  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2980  pubkey, stderrdata = proc.communicate()
2981  assert proc.returncode == 0, \
2982      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
2983  return pubkey
2984
2985
2986def ExtractAvbPublicKey(avbtool, key):
2987  """Extracts the AVB public key from the given public or private key.
2988
2989  Args:
2990    avbtool: The AVB tool to use.
2991    key: The input key file, which should be PEM-encoded public or private key.
2992
2993  Returns:
2994    The path to the extracted AVB public key file.
2995  """
2996  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
2997  RunAndCheckOutput(
2998      [avbtool, 'extract_public_key', "--key", key, "--output", output])
2999  return output
3000
3001
3002def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3003                      info_dict=None):
3004  """Generates the recovery-from-boot patch and writes the script to output.
3005
3006  Most of the space in the boot and recovery images is just the kernel, which is
3007  identical for the two, so the resulting patch should be efficient. Add it to
3008  the output zip, along with a shell script that is run from init.rc on first
3009  boot to actually do the patching and install the new recovery image.
3010
3011  Args:
3012    input_dir: The top-level input directory of the target-files.zip.
3013    output_sink: The callback function that writes the result.
3014    recovery_img: File object for the recovery image.
3015    boot_img: File objects for the boot image.
3016    info_dict: A dict returned by common.LoadInfoDict() on the input
3017        target_files. Will use OPTIONS.info_dict if None has been given.
3018  """
3019  if info_dict is None:
3020    info_dict = OPTIONS.info_dict
3021
3022  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3023  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3024
3025  if board_uses_vendorimage:
3026    # In this case, the output sink is rooted at VENDOR
3027    recovery_img_path = "etc/recovery.img"
3028    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3029    sh_dir = "bin"
3030  else:
3031    # In this case the output sink is rooted at SYSTEM
3032    recovery_img_path = "vendor/etc/recovery.img"
3033    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3034    sh_dir = "vendor/bin"
3035
3036  if full_recovery_image:
3037    output_sink(recovery_img_path, recovery_img.data)
3038
3039  else:
3040    system_root_image = info_dict.get("system_root_image") == "true"
3041    path = os.path.join(input_dir, recovery_resource_dat_path)
3042    # With system-root-image, boot and recovery images will have mismatching
3043    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
3044    # to handle such a case.
3045    if system_root_image:
3046      diff_program = ["bsdiff"]
3047      bonus_args = ""
3048      assert not os.path.exists(path)
3049    else:
3050      diff_program = ["imgdiff"]
3051      if os.path.exists(path):
3052        diff_program.append("-b")
3053        diff_program.append(path)
3054        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3055      else:
3056        bonus_args = ""
3057
3058    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3059    _, _, patch = d.ComputePatch()
3060    output_sink("recovery-from-boot.p", patch)
3061
3062  try:
3063    # The following GetTypeAndDevice()s need to use the path in the target
3064    # info_dict instead of source_info_dict.
3065    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3066                                              check_no_slot=False)
3067    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3068                                                      check_no_slot=False)
3069  except KeyError:
3070    return
3071
3072  if full_recovery_image:
3073
3074    # Note that we use /vendor to refer to the recovery resources. This will
3075    # work for a separate vendor partition mounted at /vendor or a
3076    # /system/vendor subdirectory on the system partition, for which init will
3077    # create a symlink from /vendor to /system/vendor.
3078
3079    sh = """#!/vendor/bin/sh
3080if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3081  applypatch \\
3082          --flash /vendor/etc/recovery.img \\
3083          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3084      log -t recovery "Installing new recovery image: succeeded" || \\
3085      log -t recovery "Installing new recovery image: failed"
3086else
3087  log -t recovery "Recovery image already installed"
3088fi
3089""" % {'type': recovery_type,
3090       'device': recovery_device,
3091       'sha1': recovery_img.sha1,
3092       'size': recovery_img.size}
3093  else:
3094    sh = """#!/vendor/bin/sh
3095if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3096  applypatch %(bonus_args)s \\
3097          --patch /vendor/recovery-from-boot.p \\
3098          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3099          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3100      log -t recovery "Installing new recovery image: succeeded" || \\
3101      log -t recovery "Installing new recovery image: failed"
3102else
3103  log -t recovery "Recovery image already installed"
3104fi
3105""" % {'boot_size': boot_img.size,
3106       'boot_sha1': boot_img.sha1,
3107       'recovery_size': recovery_img.size,
3108       'recovery_sha1': recovery_img.sha1,
3109       'boot_type': boot_type,
3110       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3111       'recovery_type': recovery_type,
3112       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3113       'bonus_args': bonus_args}
3114
3115  # The install script location moved from /system/etc to /system/bin in the L
3116  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3117  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3118
3119  logger.info("putting script in %s", sh_location)
3120
3121  output_sink(sh_location, sh.encode())
3122
3123
3124class DynamicPartitionUpdate(object):
3125  def __init__(self, src_group=None, tgt_group=None, progress=None,
3126               block_difference=None):
3127    self.src_group = src_group
3128    self.tgt_group = tgt_group
3129    self.progress = progress
3130    self.block_difference = block_difference
3131
3132  @property
3133  def src_size(self):
3134    if not self.block_difference:
3135      return 0
3136    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3137
3138  @property
3139  def tgt_size(self):
3140    if not self.block_difference:
3141      return 0
3142    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3143
3144  @staticmethod
3145  def _GetSparseImageSize(img):
3146    if not img:
3147      return 0
3148    return img.blocksize * img.total_blocks
3149
3150
3151class DynamicGroupUpdate(object):
3152  def __init__(self, src_size=None, tgt_size=None):
3153    # None: group does not exist. 0: no size limits.
3154    self.src_size = src_size
3155    self.tgt_size = tgt_size
3156
3157
3158class DynamicPartitionsDifference(object):
3159  def __init__(self, info_dict, block_diffs, progress_dict=None,
3160               source_info_dict=None):
3161    if progress_dict is None:
3162      progress_dict = {}
3163
3164    self._remove_all_before_apply = False
3165    if source_info_dict is None:
3166      self._remove_all_before_apply = True
3167      source_info_dict = {}
3168
3169    block_diff_dict = collections.OrderedDict(
3170        [(e.partition, e) for e in block_diffs])
3171
3172    assert len(block_diff_dict) == len(block_diffs), \
3173        "Duplicated BlockDifference object for {}".format(
3174            [partition for partition, count in
3175             collections.Counter(e.partition for e in block_diffs).items()
3176             if count > 1])
3177
3178    self._partition_updates = collections.OrderedDict()
3179
3180    for p, block_diff in block_diff_dict.items():
3181      self._partition_updates[p] = DynamicPartitionUpdate()
3182      self._partition_updates[p].block_difference = block_diff
3183
3184    for p, progress in progress_dict.items():
3185      if p in self._partition_updates:
3186        self._partition_updates[p].progress = progress
3187
3188    tgt_groups = shlex.split(info_dict.get(
3189        "super_partition_groups", "").strip())
3190    src_groups = shlex.split(source_info_dict.get(
3191        "super_partition_groups", "").strip())
3192
3193    for g in tgt_groups:
3194      for p in shlex.split(info_dict.get(
3195          "super_%s_partition_list" % g, "").strip()):
3196        assert p in self._partition_updates, \
3197            "{} is in target super_{}_partition_list but no BlockDifference " \
3198            "object is provided.".format(p, g)
3199        self._partition_updates[p].tgt_group = g
3200
3201    for g in src_groups:
3202      for p in shlex.split(source_info_dict.get(
3203          "super_%s_partition_list" % g, "").strip()):
3204        assert p in self._partition_updates, \
3205            "{} is in source super_{}_partition_list but no BlockDifference " \
3206            "object is provided.".format(p, g)
3207        self._partition_updates[p].src_group = g
3208
3209    target_dynamic_partitions = set(shlex.split(info_dict.get(
3210        "dynamic_partition_list", "").strip()))
3211    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3212                                  if u.tgt_size)
3213    assert block_diffs_with_target == target_dynamic_partitions, \
3214        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3215            list(target_dynamic_partitions), list(block_diffs_with_target))
3216
3217    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3218        "dynamic_partition_list", "").strip()))
3219    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3220                                  if u.src_size)
3221    assert block_diffs_with_source == source_dynamic_partitions, \
3222        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3223            list(source_dynamic_partitions), list(block_diffs_with_source))
3224
3225    if self._partition_updates:
3226      logger.info("Updating dynamic partitions %s",
3227                  self._partition_updates.keys())
3228
3229    self._group_updates = collections.OrderedDict()
3230
3231    for g in tgt_groups:
3232      self._group_updates[g] = DynamicGroupUpdate()
3233      self._group_updates[g].tgt_size = int(info_dict.get(
3234          "super_%s_group_size" % g, "0").strip())
3235
3236    for g in src_groups:
3237      if g not in self._group_updates:
3238        self._group_updates[g] = DynamicGroupUpdate()
3239      self._group_updates[g].src_size = int(source_info_dict.get(
3240          "super_%s_group_size" % g, "0").strip())
3241
3242    self._Compute()
3243
3244  def WriteScript(self, script, output_zip, write_verify_script=False):
3245    script.Comment('--- Start patching dynamic partitions ---')
3246    for p, u in self._partition_updates.items():
3247      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3248        script.Comment('Patch partition %s' % p)
3249        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3250                                       write_verify_script=False)
3251
3252    op_list_path = MakeTempFile()
3253    with open(op_list_path, 'w') as f:
3254      for line in self._op_list:
3255        f.write('{}\n'.format(line))
3256
3257    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
3258
3259    script.Comment('Update dynamic partition metadata')
3260    script.AppendExtra('assert(update_dynamic_partitions('
3261                       'package_extract_file("dynamic_partitions_op_list")));')
3262
3263    if write_verify_script:
3264      for p, u in self._partition_updates.items():
3265        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3266          u.block_difference.WritePostInstallVerifyScript(script)
3267          script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
3268
3269    for p, u in self._partition_updates.items():
3270      if u.tgt_size and u.src_size <= u.tgt_size:
3271        script.Comment('Patch partition %s' % p)
3272        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3273                                       write_verify_script=write_verify_script)
3274        if write_verify_script:
3275          script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
3276
3277    script.Comment('--- End patching dynamic partitions ---')
3278
3279  def _Compute(self):
3280    self._op_list = list()
3281
3282    def append(line):
3283      self._op_list.append(line)
3284
3285    def comment(line):
3286      self._op_list.append("# %s" % line)
3287
3288    if self._remove_all_before_apply:
3289      comment('Remove all existing dynamic partitions and groups before '
3290              'applying full OTA')
3291      append('remove_all_groups')
3292
3293    for p, u in self._partition_updates.items():
3294      if u.src_group and not u.tgt_group:
3295        append('remove %s' % p)
3296
3297    for p, u in self._partition_updates.items():
3298      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3299        comment('Move partition %s from %s to default' % (p, u.src_group))
3300        append('move %s default' % p)
3301
3302    for p, u in self._partition_updates.items():
3303      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3304        comment('Shrink partition %s from %d to %d' %
3305                (p, u.src_size, u.tgt_size))
3306        append('resize %s %s' % (p, u.tgt_size))
3307
3308    for g, u in self._group_updates.items():
3309      if u.src_size is not None and u.tgt_size is None:
3310        append('remove_group %s' % g)
3311      if (u.src_size is not None and u.tgt_size is not None and
3312          u.src_size > u.tgt_size):
3313        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3314        append('resize_group %s %d' % (g, u.tgt_size))
3315
3316    for g, u in self._group_updates.items():
3317      if u.src_size is None and u.tgt_size is not None:
3318        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
3319        append('add_group %s %d' % (g, u.tgt_size))
3320      if (u.src_size is not None and u.tgt_size is not None and
3321          u.src_size < u.tgt_size):
3322        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3323        append('resize_group %s %d' % (g, u.tgt_size))
3324
3325    for p, u in self._partition_updates.items():
3326      if u.tgt_group and not u.src_group:
3327        comment('Add partition %s to group %s' % (p, u.tgt_group))
3328        append('add %s %s' % (p, u.tgt_group))
3329
3330    for p, u in self._partition_updates.items():
3331      if u.tgt_size and u.src_size < u.tgt_size:
3332        comment('Grow partition %s from %d to %d' % (p, u.src_size, u.tgt_size))
3333        append('resize %s %d' % (p, u.tgt_size))
3334
3335    for p, u in self._partition_updates.items():
3336      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3337        comment('Move partition %s from default to %s' %
3338                (p, u.tgt_group))
3339        append('move %s %s' % (p, u.tgt_group))
3340