• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23import getopt
24import getpass
25import gzip
26import imp
27import json
28import logging
29import logging.config
30import os
31import platform
32import re
33import shlex
34import shutil
35import subprocess
36import sys
37import tempfile
38import threading
39import time
40import zipfile
41from hashlib import sha1, sha256
42
43import images
44import rangelib
45import sparse_img
46from blockimgdiff import BlockImageDiff
47
48logger = logging.getLogger(__name__)
49
50
51class Options(object):
52
53  def __init__(self):
54    # Set up search path, in order to find framework/ and lib64/. At the time of
55    # running this function, user-supplied search path (`--path`) hasn't been
56    # available. So the value set here is the default, which might be overridden
57    # by commandline flag later.
58    exec_path = os.path.realpath(sys.argv[0])
59    if exec_path.endswith('.py'):
60      script_name = os.path.basename(exec_path)
61      # logger hasn't been initialized yet at this point. Use print to output
62      # warnings.
63      print(
64          'Warning: releasetools script should be invoked as hermetic Python '
65          'executable -- build and run `{}` directly.'.format(
66              script_name[:-3]),
67          file=sys.stderr)
68    self.search_path = os.path.dirname(os.path.dirname(exec_path))
69
70    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
71    self.signapk_shared_library_path = "lib64"   # Relative to search_path
72    self.extra_signapk_args = []
73    self.java_path = "java"  # Use the one on the path by default.
74    self.java_args = ["-Xmx2048m"]  # The default JVM args.
75    self.android_jar_path = None
76    self.public_key_suffix = ".x509.pem"
77    self.private_key_suffix = ".pk8"
78    # use otatools built boot_signer by default
79    self.boot_signer_path = "boot_signer"
80    self.boot_signer_args = []
81    self.verity_signer_path = None
82    self.verity_signer_args = []
83    self.aftl_tool_path = None
84    self.aftl_server = None
85    self.aftl_key_path = None
86    self.aftl_manufacturer_key_path = None
87    self.aftl_signer_helper = None
88    self.verbose = False
89    self.tempfiles = []
90    self.device_specific = None
91    self.extras = {}
92    self.info_dict = None
93    self.source_info_dict = None
94    self.target_info_dict = None
95    self.worker_threads = None
96    # Stash size cannot exceed cache_size * threshold.
97    self.cache_size = None
98    self.stash_threshold = 0.8
99    self.logfile = None
100    self.host_tools = {}
101
102
103OPTIONS = Options()
104
105# The block size that's used across the releasetools scripts.
106BLOCK_SIZE = 4096
107
108# Values for "certificate" in apkcerts that mean special things.
109SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
110
111# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
112# that system_other is not in the list because we don't want to include its
113# descriptor into vbmeta.img. When adding a new entry here, the
114# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
115# accordingly.
116AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery',
117                  'system', 'system_ext', 'vendor', 'vendor_boot',
118                  'vendor_dlkm', 'odm_dlkm')
119
120# Chained VBMeta partitions.
121AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
122
123# Partitions that should have their care_map added to META/care_map.pb
124PARTITIONS_WITH_CARE_MAP = [
125    'system',
126    'vendor',
127    'product',
128    'system_ext',
129    'odm',
130    'vendor_dlkm',
131    'odm_dlkm',
132]
133
134# Partitions with a build.prop file
135PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot']
136
137# See sysprop.mk. If file is moved, add new search paths here; don't remove
138# existing search paths.
139RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
140
141
142class ErrorCode(object):
143  """Define error_codes for failures that happen during the actual
144  update package installation.
145
146  Error codes 0-999 are reserved for failures before the package
147  installation (i.e. low battery, package verification failure).
148  Detailed code in 'bootable/recovery/error_code.h' """
149
150  SYSTEM_VERIFICATION_FAILURE = 1000
151  SYSTEM_UPDATE_FAILURE = 1001
152  SYSTEM_UNEXPECTED_CONTENTS = 1002
153  SYSTEM_NONZERO_CONTENTS = 1003
154  SYSTEM_RECOVER_FAILURE = 1004
155  VENDOR_VERIFICATION_FAILURE = 2000
156  VENDOR_UPDATE_FAILURE = 2001
157  VENDOR_UNEXPECTED_CONTENTS = 2002
158  VENDOR_NONZERO_CONTENTS = 2003
159  VENDOR_RECOVER_FAILURE = 2004
160  OEM_PROP_MISMATCH = 3000
161  FINGERPRINT_MISMATCH = 3001
162  THUMBPRINT_MISMATCH = 3002
163  OLDER_BUILD = 3003
164  DEVICE_MISMATCH = 3004
165  BAD_PATCH_FILE = 3005
166  INSUFFICIENT_CACHE_SPACE = 3006
167  TUNE_PARTITION_FAILURE = 3007
168  APPLY_PATCH_FAILURE = 3008
169
170
171class ExternalError(RuntimeError):
172  pass
173
174
175def InitLogging():
176  DEFAULT_LOGGING_CONFIG = {
177      'version': 1,
178      'disable_existing_loggers': False,
179      'formatters': {
180          'standard': {
181              'format':
182                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
183              'datefmt': '%Y-%m-%d %H:%M:%S',
184          },
185      },
186      'handlers': {
187          'default': {
188              'class': 'logging.StreamHandler',
189              'formatter': 'standard',
190              'level': 'WARNING',
191          },
192      },
193      'loggers': {
194          '': {
195              'handlers': ['default'],
196              'propagate': True,
197              'level': 'INFO',
198          }
199      }
200  }
201  env_config = os.getenv('LOGGING_CONFIG')
202  if env_config:
203    with open(env_config) as f:
204      config = json.load(f)
205  else:
206    config = DEFAULT_LOGGING_CONFIG
207
208    # Increase the logging level for verbose mode.
209    if OPTIONS.verbose:
210      config = copy.deepcopy(config)
211      config['handlers']['default']['level'] = 'INFO'
212
213    if OPTIONS.logfile:
214      config = copy.deepcopy(config)
215      config['handlers']['logfile'] = {
216          'class': 'logging.FileHandler',
217          'formatter': 'standard',
218          'level': 'INFO',
219          'mode': 'w',
220          'filename': OPTIONS.logfile,
221      }
222      config['loggers']['']['handlers'].append('logfile')
223
224  logging.config.dictConfig(config)
225
226
227def SetHostToolLocation(tool_name, location):
228  OPTIONS.host_tools[tool_name] = location
229
230
231def FindHostToolPath(tool_name):
232  """Finds the path to the host tool.
233
234  Args:
235    tool_name: name of the tool to find
236  Returns:
237    path to the tool if found under either one of the host_tools map or under
238    the same directory as this binary is located at. If not found, tool_name
239    is returned.
240  """
241  if tool_name in OPTIONS.host_tools:
242    return OPTIONS.host_tools[tool_name]
243
244  my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
245  tool_path = os.path.join(my_dir, tool_name)
246  if os.path.exists(tool_path):
247    return tool_path
248
249  return tool_name
250
251
252def Run(args, verbose=None, **kwargs):
253  """Creates and returns a subprocess.Popen object.
254
255  Args:
256    args: The command represented as a list of strings.
257    verbose: Whether the commands should be shown. Default to the global
258        verbosity if unspecified.
259    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
260        stdin, etc. stdout and stderr will default to subprocess.PIPE and
261        subprocess.STDOUT respectively unless caller specifies any of them.
262        universal_newlines will default to True, as most of the users in
263        releasetools expect string output.
264
265  Returns:
266    A subprocess.Popen object.
267  """
268  if 'stdout' not in kwargs and 'stderr' not in kwargs:
269    kwargs['stdout'] = subprocess.PIPE
270    kwargs['stderr'] = subprocess.STDOUT
271  if 'universal_newlines' not in kwargs:
272    kwargs['universal_newlines'] = True
273
274  if args:
275    # Make a copy of args in case client relies on the content of args later.
276    args = args[:]
277    args[0] = FindHostToolPath(args[0])
278
279  # Don't log any if caller explicitly says so.
280  if verbose:
281    logger.info("  Running: \"%s\"", " ".join(args))
282  return subprocess.Popen(args, **kwargs)
283
284
285def RunAndCheckOutput(args, verbose=None, **kwargs):
286  """Runs the given command and returns the output.
287
288  Args:
289    args: The command represented as a list of strings.
290    verbose: Whether the commands should be shown. Default to the global
291        verbosity if unspecified.
292    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
293        stdin, etc. stdout and stderr will default to subprocess.PIPE and
294        subprocess.STDOUT respectively unless caller specifies any of them.
295
296  Returns:
297    The output string.
298
299  Raises:
300    ExternalError: On non-zero exit from the command.
301  """
302  proc = Run(args, verbose=verbose, **kwargs)
303  output, _ = proc.communicate()
304  if output is None:
305    output = ""
306  # Don't log any if caller explicitly says so.
307  if verbose:
308    logger.info("%s", output.rstrip())
309  if proc.returncode != 0:
310    raise ExternalError(
311        "Failed to run command '{}' (exit code {}):\n{}".format(
312            args, proc.returncode, output))
313  return output
314
315
316def RoundUpTo4K(value):
317  rounded_up = value + 4095
318  return rounded_up - (rounded_up % 4096)
319
320
321def CloseInheritedPipes():
322  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
323  before doing other work."""
324  if platform.system() != "Darwin":
325    return
326  for d in range(3, 1025):
327    try:
328      stat = os.fstat(d)
329      if stat is not None:
330        pipebit = stat[0] & 0x1000
331        if pipebit != 0:
332          os.close(d)
333    except OSError:
334      pass
335
336
337class BuildInfo(object):
338  """A class that holds the information for a given build.
339
340  This class wraps up the property querying for a given source or target build.
341  It abstracts away the logic of handling OEM-specific properties, and caches
342  the commonly used properties such as fingerprint.
343
344  There are two types of info dicts: a) build-time info dict, which is generated
345  at build time (i.e. included in a target_files zip); b) OEM info dict that is
346  specified at package generation time (via command line argument
347  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
348  having "oem_fingerprint_properties" in build-time info dict), all the queries
349  would be answered based on build-time info dict only. Otherwise if using
350  OEM-specific properties, some of them will be calculated from two info dicts.
351
352  Users can query properties similarly as using a dict() (e.g. info['fstab']),
353  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
354
355  Attributes:
356    info_dict: The build-time info dict.
357    is_ab: Whether it's a build that uses A/B OTA.
358    oem_dicts: A list of OEM dicts.
359    oem_props: A list of OEM properties that should be read from OEM dicts; None
360        if the build doesn't use any OEM-specific property.
361    fingerprint: The fingerprint of the build, which would be calculated based
362        on OEM properties if applicable.
363    device: The device name, which could come from OEM dicts if applicable.
364  """
365
366  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
367                               "ro.product.manufacturer", "ro.product.model",
368                               "ro.product.name"]
369  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
370      "product", "odm", "vendor", "system_ext", "system"]
371  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
372      "product", "product_services", "odm", "vendor", "system"]
373  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
374
375  # The length of vbmeta digest to append to the fingerprint
376  _VBMETA_DIGEST_SIZE_USED = 8
377
378  def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
379    """Initializes a BuildInfo instance with the given dicts.
380
381    Note that it only wraps up the given dicts, without making copies.
382
383    Arguments:
384      info_dict: The build-time info dict.
385      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
386          that it always uses the first dict to calculate the fingerprint or the
387          device name. The rest would be used for asserting OEM properties only
388          (e.g. one package can be installed on one of these devices).
389      use_legacy_id: Use the legacy build id to construct the fingerprint. This
390          is used when we need a BuildInfo class, while the vbmeta digest is
391          unavailable.
392
393    Raises:
394      ValueError: On invalid inputs.
395    """
396    self.info_dict = info_dict
397    self.oem_dicts = oem_dicts
398
399    self._is_ab = info_dict.get("ab_update") == "true"
400    self.use_legacy_id = use_legacy_id
401
402    # Skip _oem_props if oem_dicts is None to use BuildInfo in
403    # sign_target_files_apks
404    if self.oem_dicts:
405      self._oem_props = info_dict.get("oem_fingerprint_properties")
406    else:
407      self._oem_props = None
408
409    def check_fingerprint(fingerprint):
410      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
411        raise ValueError(
412            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
413            "3.2.2. Build Parameters.".format(fingerprint))
414
415    self._partition_fingerprints = {}
416    for partition in PARTITIONS_WITH_BUILD_PROP:
417      try:
418        fingerprint = self.CalculatePartitionFingerprint(partition)
419        check_fingerprint(fingerprint)
420        self._partition_fingerprints[partition] = fingerprint
421      except ExternalError:
422        continue
423    if "system" in self._partition_fingerprints:
424      # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
425      # need a fingerprint when creating the image.
426      self._partition_fingerprints[
427          "system_other"] = self._partition_fingerprints["system"]
428
429    # These two should be computed only after setting self._oem_props.
430    self._device = self.GetOemProperty("ro.product.device")
431    self._fingerprint = self.CalculateFingerprint()
432    check_fingerprint(self._fingerprint)
433
434  @property
435  def is_ab(self):
436    return self._is_ab
437
438  @property
439  def device(self):
440    return self._device
441
442  @property
443  def fingerprint(self):
444    return self._fingerprint
445
446  @property
447  def is_vabc(self):
448    vendor_prop = self.info_dict.get("vendor.build.prop")
449    vabc_enabled = vendor_prop and \
450        vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true"
451    return vabc_enabled
452
453  @property
454  def vendor_suppressed_vabc(self):
455    vendor_prop = self.info_dict.get("vendor.build.prop")
456    vabc_suppressed = vendor_prop and \
457        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
458    return vabc_suppressed and vabc_suppressed.lower() == "true"
459
460  @property
461  def oem_props(self):
462    return self._oem_props
463
464  def __getitem__(self, key):
465    return self.info_dict[key]
466
467  def __setitem__(self, key, value):
468    self.info_dict[key] = value
469
470  def get(self, key, default=None):
471    return self.info_dict.get(key, default)
472
473  def items(self):
474    return self.info_dict.items()
475
476  def _GetRawBuildProp(self, prop, partition):
477    prop_file = '{}.build.prop'.format(
478        partition) if partition else 'build.prop'
479    partition_props = self.info_dict.get(prop_file)
480    if not partition_props:
481      return None
482    return partition_props.GetProp(prop)
483
484  def GetPartitionBuildProp(self, prop, partition):
485    """Returns the inquired build property for the provided partition."""
486
487    # Boot image uses ro.[product.]bootimage instead of boot.
488    prop_partition = "bootimage" if partition == "boot" else partition
489
490    # If provided a partition for this property, only look within that
491    # partition's build.prop.
492    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
493      prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
494    else:
495      prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
496
497    prop_val = self._GetRawBuildProp(prop, partition)
498    if prop_val is not None:
499      return prop_val
500    raise ExternalError("couldn't find %s in %s.build.prop" %
501                        (prop, partition))
502
503  def GetBuildProp(self, prop):
504    """Returns the inquired build property from the standard build.prop file."""
505    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
506      return self._ResolveRoProductBuildProp(prop)
507
508    if prop == "ro.build.id":
509      return self._GetBuildId()
510
511    prop_val = self._GetRawBuildProp(prop, None)
512    if prop_val is not None:
513      return prop_val
514
515    raise ExternalError("couldn't find %s in build.prop" % (prop,))
516
517  def _ResolveRoProductBuildProp(self, prop):
518    """Resolves the inquired ro.product.* build property"""
519    prop_val = self._GetRawBuildProp(prop, None)
520    if prop_val:
521      return prop_val
522
523    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
524    source_order_val = self._GetRawBuildProp(
525        "ro.product.property_source_order", None)
526    if source_order_val:
527      source_order = source_order_val.split(",")
528    else:
529      source_order = default_source_order
530
531    # Check that all sources in ro.product.property_source_order are valid
532    if any([x not in default_source_order for x in source_order]):
533      raise ExternalError(
534          "Invalid ro.product.property_source_order '{}'".format(source_order))
535
536    for source_partition in source_order:
537      source_prop = prop.replace(
538          "ro.product", "ro.product.{}".format(source_partition), 1)
539      prop_val = self._GetRawBuildProp(source_prop, source_partition)
540      if prop_val:
541        return prop_val
542
543    raise ExternalError("couldn't resolve {}".format(prop))
544
545  def _GetRoProductPropsDefaultSourceOrder(self):
546    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
547    # values of these properties for each Android release.
548    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
549    if android_codename == "REL":
550      android_version = self._GetRawBuildProp("ro.build.version.release", None)
551      if android_version == "10":
552        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
553      # NOTE: float() conversion of android_version will have rounding error.
554      # We are checking for "9" or less, and using "< 10" is well outside of
555      # possible floating point rounding.
556      try:
557        android_version_val = float(android_version)
558      except ValueError:
559        android_version_val = 0
560      if android_version_val < 10:
561        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
562    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
563
564  def _GetPlatformVersion(self):
565    version_sdk = self.GetBuildProp("ro.build.version.sdk")
566    # init code switches to version_release_or_codename (see b/158483506). After
567    # API finalization, release_or_codename will be the same as release. This
568    # is the best effort to support pre-S dev stage builds.
569    if int(version_sdk) >= 30:
570      try:
571        return self.GetBuildProp("ro.build.version.release_or_codename")
572      except ExternalError:
573        logger.warning('Failed to find ro.build.version.release_or_codename')
574
575    return self.GetBuildProp("ro.build.version.release")
576
577  def _GetBuildId(self):
578    build_id = self._GetRawBuildProp("ro.build.id", None)
579    if build_id:
580      return build_id
581
582    legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
583    if not legacy_build_id:
584      raise ExternalError("Couldn't find build id in property file")
585
586    if self.use_legacy_id:
587      return legacy_build_id
588
589    # Append the top 8 chars of vbmeta digest to the existing build id. The
590    # logic needs to match the one in init, so that OTA can deliver correctly.
591    avb_enable = self.info_dict.get("avb_enable") == "true"
592    if not avb_enable:
593      raise ExternalError("AVB isn't enabled when using legacy build id")
594
595    vbmeta_digest = self.info_dict.get("vbmeta_digest")
596    if not vbmeta_digest:
597      raise ExternalError("Vbmeta digest isn't provided when using legacy build"
598                          " id")
599    if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
600      raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
601
602    digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
603    return legacy_build_id + '.' + digest_prefix
604
605  def _GetPartitionPlatformVersion(self, partition):
606    try:
607      return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
608                                        partition)
609    except ExternalError:
610      return self.GetPartitionBuildProp("ro.build.version.release",
611                                        partition)
612
613  def GetOemProperty(self, key):
614    if self.oem_props is not None and key in self.oem_props:
615      return self.oem_dicts[0][key]
616    return self.GetBuildProp(key)
617
618  def GetPartitionFingerprint(self, partition):
619    return self._partition_fingerprints.get(partition, None)
620
621  def CalculatePartitionFingerprint(self, partition):
622    try:
623      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
624    except ExternalError:
625      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
626          self.GetPartitionBuildProp("ro.product.brand", partition),
627          self.GetPartitionBuildProp("ro.product.name", partition),
628          self.GetPartitionBuildProp("ro.product.device", partition),
629          self._GetPartitionPlatformVersion(partition),
630          self.GetPartitionBuildProp("ro.build.id", partition),
631          self.GetPartitionBuildProp(
632              "ro.build.version.incremental", partition),
633          self.GetPartitionBuildProp("ro.build.type", partition),
634          self.GetPartitionBuildProp("ro.build.tags", partition))
635
636  def CalculateFingerprint(self):
637    if self.oem_props is None:
638      try:
639        return self.GetBuildProp("ro.build.fingerprint")
640      except ExternalError:
641        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
642            self.GetBuildProp("ro.product.brand"),
643            self.GetBuildProp("ro.product.name"),
644            self.GetBuildProp("ro.product.device"),
645            self._GetPlatformVersion(),
646            self.GetBuildProp("ro.build.id"),
647            self.GetBuildProp("ro.build.version.incremental"),
648            self.GetBuildProp("ro.build.type"),
649            self.GetBuildProp("ro.build.tags"))
650    return "%s/%s/%s:%s" % (
651        self.GetOemProperty("ro.product.brand"),
652        self.GetOemProperty("ro.product.name"),
653        self.GetOemProperty("ro.product.device"),
654        self.GetBuildProp("ro.build.thumbprint"))
655
656  def WriteMountOemScript(self, script):
657    assert self.oem_props is not None
658    recovery_mount_options = self.info_dict.get("recovery_mount_options")
659    script.Mount("/oem", recovery_mount_options)
660
661  def WriteDeviceAssertions(self, script, oem_no_mount):
662    # Read the property directly if not using OEM properties.
663    if not self.oem_props:
664      script.AssertDevice(self.device)
665      return
666
667    # Otherwise assert OEM properties.
668    if not self.oem_dicts:
669      raise ExternalError(
670          "No OEM file provided to answer expected assertions")
671
672    for prop in self.oem_props.split():
673      values = []
674      for oem_dict in self.oem_dicts:
675        if prop in oem_dict:
676          values.append(oem_dict[prop])
677      if not values:
678        raise ExternalError(
679            "The OEM file is missing the property %s" % (prop,))
680      script.AssertOemProperty(prop, values, oem_no_mount)
681
682
683def ReadFromInputFile(input_file, fn):
684  """Reads the contents of fn from input zipfile or directory."""
685  if isinstance(input_file, zipfile.ZipFile):
686    return input_file.read(fn).decode()
687  else:
688    path = os.path.join(input_file, *fn.split("/"))
689    try:
690      with open(path) as f:
691        return f.read()
692    except IOError as e:
693      if e.errno == errno.ENOENT:
694        raise KeyError(fn)
695
696
697def ExtractFromInputFile(input_file, fn):
698  """Extracts the contents of fn from input zipfile or directory into a file."""
699  if isinstance(input_file, zipfile.ZipFile):
700    tmp_file = MakeTempFile(os.path.basename(fn))
701    with open(tmp_file, 'wb') as f:
702      f.write(input_file.read(fn))
703    return tmp_file
704  else:
705    file = os.path.join(input_file, *fn.split("/"))
706    if not os.path.exists(file):
707      raise KeyError(fn)
708    return file
709
710
711class RamdiskFormat(object):
712  LZ4 = 1
713  GZ = 2
714
715
716def _GetRamdiskFormat(info_dict):
717  if info_dict.get('lz4_ramdisks') == 'true':
718    ramdisk_format = RamdiskFormat.LZ4
719  else:
720    ramdisk_format = RamdiskFormat.GZ
721  return ramdisk_format
722
723
724def LoadInfoDict(input_file, repacking=False):
725  """Loads the key/value pairs from the given input target_files.
726
727  It reads `META/misc_info.txt` file in the target_files input, does validation
728  checks and returns the parsed key/value pairs for to the given build. It's
729  usually called early when working on input target_files files, e.g. when
730  generating OTAs, or signing builds. Note that the function may be called
731  against an old target_files file (i.e. from past dessert releases). So the
732  property parsing needs to be backward compatible.
733
734  In a `META/misc_info.txt`, a few properties are stored as links to the files
735  in the PRODUCT_OUT directory. It works fine with the build system. However,
736  they are no longer available when (re)generating images from target_files zip.
737  When `repacking` is True, redirect these properties to the actual files in the
738  unzipped directory.
739
740  Args:
741    input_file: The input target_files file, which could be an open
742        zipfile.ZipFile instance, or a str for the dir that contains the files
743        unzipped from a target_files file.
744    repacking: Whether it's trying repack an target_files file after loading the
745        info dict (default: False). If so, it will rewrite a few loaded
746        properties (e.g. selinux_fc, root_dir) to point to the actual files in
747        target_files file. When doing repacking, `input_file` must be a dir.
748
749  Returns:
750    A dict that contains the parsed key/value pairs.
751
752  Raises:
753    AssertionError: On invalid input arguments.
754    ValueError: On malformed input values.
755  """
756  if repacking:
757    assert isinstance(input_file, str), \
758        "input_file must be a path str when doing repacking"
759
760  def read_helper(fn):
761    return ReadFromInputFile(input_file, fn)
762
763  try:
764    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
765  except KeyError:
766    raise ValueError("Failed to find META/misc_info.txt in input target-files")
767
768  if "recovery_api_version" not in d:
769    raise ValueError("Failed to find 'recovery_api_version'")
770  if "fstab_version" not in d:
771    raise ValueError("Failed to find 'fstab_version'")
772
773  if repacking:
774    # "selinux_fc" properties should point to the file_contexts files
775    # (file_contexts.bin) under META/.
776    for key in d:
777      if key.endswith("selinux_fc"):
778        fc_basename = os.path.basename(d[key])
779        fc_config = os.path.join(input_file, "META", fc_basename)
780        assert os.path.exists(fc_config)
781
782        d[key] = fc_config
783
784    # Similarly we need to redirect "root_dir", and "root_fs_config".
785    d["root_dir"] = os.path.join(input_file, "ROOT")
786    d["root_fs_config"] = os.path.join(
787        input_file, "META", "root_filesystem_config.txt")
788
789    # Redirect {partition}_base_fs_file for each of the named partitions.
790    for part_name in ["system", "vendor", "system_ext", "product", "odm",
791                      "vendor_dlkm", "odm_dlkm"]:
792      key_name = part_name + "_base_fs_file"
793      if key_name not in d:
794        continue
795      basename = os.path.basename(d[key_name])
796      base_fs_file = os.path.join(input_file, "META", basename)
797      if os.path.exists(base_fs_file):
798        d[key_name] = base_fs_file
799      else:
800        logger.warning(
801            "Failed to find %s base fs file: %s", part_name, base_fs_file)
802        del d[key_name]
803
804  def makeint(key):
805    if key in d:
806      d[key] = int(d[key], 0)
807
808  makeint("recovery_api_version")
809  makeint("blocksize")
810  makeint("system_size")
811  makeint("vendor_size")
812  makeint("userdata_size")
813  makeint("cache_size")
814  makeint("recovery_size")
815  makeint("fstab_version")
816
817  boot_images = "boot.img"
818  if "boot_images" in d:
819    boot_images = d["boot_images"]
820  for b in boot_images.split():
821    makeint(b.replace(".img", "_size"))
822
823  # Load recovery fstab if applicable.
824  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
825  ramdisk_format = _GetRamdiskFormat(d)
826
827  # Tries to load the build props for all partitions with care_map, including
828  # system and vendor.
829  for partition in PARTITIONS_WITH_BUILD_PROP:
830    partition_prop = "{}.build.prop".format(partition)
831    d[partition_prop] = PartitionBuildProps.FromInputFile(
832        input_file, partition, ramdisk_format=ramdisk_format)
833  d["build.prop"] = d["system.build.prop"]
834
835  # Set up the salt (based on fingerprint) that will be used when adding AVB
836  # hash / hashtree footers.
837  if d.get("avb_enable") == "true":
838    build_info = BuildInfo(d, use_legacy_id=True)
839    for partition in PARTITIONS_WITH_BUILD_PROP:
840      fingerprint = build_info.GetPartitionFingerprint(partition)
841      if fingerprint:
842        d["avb_{}_salt".format(partition)] = sha256(
843            fingerprint.encode()).hexdigest()
844
845    # Set the vbmeta digest if exists
846    try:
847      d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
848    except KeyError:
849      pass
850
851  try:
852    d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
853  except KeyError:
854    logger.warning("Can't find META/ab_partitions.txt")
855  return d
856
857
858def LoadListFromFile(file_path):
859  with open(file_path) as f:
860    return f.read().splitlines()
861
862
863def LoadDictionaryFromFile(file_path):
864  lines = LoadListFromFile(file_path)
865  return LoadDictionaryFromLines(lines)
866
867
868def LoadDictionaryFromLines(lines):
869  d = {}
870  for line in lines:
871    line = line.strip()
872    if not line or line.startswith("#"):
873      continue
874    if "=" in line:
875      name, value = line.split("=", 1)
876      d[name] = value
877  return d
878
879
880class PartitionBuildProps(object):
881  """The class holds the build prop of a particular partition.
882
883  This class loads the build.prop and holds the build properties for a given
884  partition. It also partially recognizes the 'import' statement in the
885  build.prop; and calculates alternative values of some specific build
886  properties during runtime.
887
888  Attributes:
889    input_file: a zipped target-file or an unzipped target-file directory.
890    partition: name of the partition.
891    props_allow_override: a list of build properties to search for the
892        alternative values during runtime.
893    build_props: a dict of build properties for the given partition.
894    prop_overrides: a set of props that are overridden by import.
895    placeholder_values: A dict of runtime variables' values to replace the
896        placeholders in the build.prop file. We expect exactly one value for
897        each of the variables.
898    ramdisk_format: If name is "boot", the format of ramdisk inside the
899        boot image. Otherwise, its value is ignored.
900        Use lz4 to decompress by default. If its value is gzip, use minigzip.
901  """
902
903  def __init__(self, input_file, name, placeholder_values=None):
904    self.input_file = input_file
905    self.partition = name
906    self.props_allow_override = [props.format(name) for props in [
907        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
908    self.build_props = {}
909    self.prop_overrides = set()
910    self.placeholder_values = {}
911    if placeholder_values:
912      self.placeholder_values = copy.deepcopy(placeholder_values)
913
914  @staticmethod
915  def FromDictionary(name, build_props):
916    """Constructs an instance from a build prop dictionary."""
917
918    props = PartitionBuildProps("unknown", name)
919    props.build_props = build_props.copy()
920    return props
921
922  @staticmethod
923  def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
924    """Loads the build.prop file and builds the attributes."""
925
926    if name == "boot":
927      data = PartitionBuildProps._ReadBootPropFile(
928          input_file, ramdisk_format=ramdisk_format)
929    else:
930      data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
931
932    props = PartitionBuildProps(input_file, name, placeholder_values)
933    props._LoadBuildProp(data)
934    return props
935
936  @staticmethod
937  def _ReadBootPropFile(input_file, ramdisk_format):
938    """
939    Read build.prop for boot image from input_file.
940    Return empty string if not found.
941    """
942    try:
943      boot_img = ExtractFromInputFile(input_file, 'IMAGES/boot.img')
944    except KeyError:
945      logger.warning('Failed to read IMAGES/boot.img')
946      return ''
947    prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
948    if prop_file is None:
949      return ''
950    with open(prop_file, "r") as f:
951      return f.read()
952
953  @staticmethod
954  def _ReadPartitionPropFile(input_file, name):
955    """
956    Read build.prop for name from input_file.
957    Return empty string if not found.
958    """
959    data = ''
960    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
961                      '{}/build.prop'.format(name.upper())]:
962      try:
963        data = ReadFromInputFile(input_file, prop_file)
964        break
965      except KeyError:
966        logger.warning('Failed to read %s', prop_file)
967    return data
968
969  @staticmethod
970  def FromBuildPropFile(name, build_prop_file):
971    """Constructs an instance from a build prop file."""
972
973    props = PartitionBuildProps("unknown", name)
974    with open(build_prop_file) as f:
975      props._LoadBuildProp(f.read())
976    return props
977
978  def _LoadBuildProp(self, data):
979    for line in data.split('\n'):
980      line = line.strip()
981      if not line or line.startswith("#"):
982        continue
983      if line.startswith("import"):
984        overrides = self._ImportParser(line)
985        duplicates = self.prop_overrides.intersection(overrides.keys())
986        if duplicates:
987          raise ValueError('prop {} is overridden multiple times'.format(
988              ','.join(duplicates)))
989        self.prop_overrides = self.prop_overrides.union(overrides.keys())
990        self.build_props.update(overrides)
991      elif "=" in line:
992        name, value = line.split("=", 1)
993        if name in self.prop_overrides:
994          raise ValueError('prop {} is set again after overridden by import '
995                           'statement'.format(name))
996        self.build_props[name] = value
997
998  def _ImportParser(self, line):
999    """Parses the build prop in a given import statement."""
1000
1001    tokens = line.split()
1002    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
1003      raise ValueError('Unrecognized import statement {}'.format(line))
1004
1005    if len(tokens) == 3:
1006      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
1007      return {}
1008
1009    import_path = tokens[1]
1010    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
1011      raise ValueError('Unrecognized import path {}'.format(line))
1012
1013    # We only recognize a subset of import statement that the init process
1014    # supports. And we can loose the restriction based on how the dynamic
1015    # fingerprint is used in practice. The placeholder format should be
1016    # ${placeholder}, and its value should be provided by the caller through
1017    # the placeholder_values.
1018    for prop, value in self.placeholder_values.items():
1019      prop_place_holder = '${{{}}}'.format(prop)
1020      if prop_place_holder in import_path:
1021        import_path = import_path.replace(prop_place_holder, value)
1022    if '$' in import_path:
1023      logger.info('Unresolved place holder in import path %s', import_path)
1024      return {}
1025
1026    import_path = import_path.replace('/{}'.format(self.partition),
1027                                      self.partition.upper())
1028    logger.info('Parsing build props override from %s', import_path)
1029
1030    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
1031    d = LoadDictionaryFromLines(lines)
1032    return {key: val for key, val in d.items()
1033            if key in self.props_allow_override}
1034
1035  def GetProp(self, prop):
1036    return self.build_props.get(prop)
1037
1038
1039def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
1040                      system_root_image=False):
1041  class Partition(object):
1042    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
1043      self.mount_point = mount_point
1044      self.fs_type = fs_type
1045      self.device = device
1046      self.length = length
1047      self.context = context
1048      self.slotselect = slotselect
1049
1050  try:
1051    data = read_helper(recovery_fstab_path)
1052  except KeyError:
1053    logger.warning("Failed to find %s", recovery_fstab_path)
1054    data = ""
1055
1056  assert fstab_version == 2
1057
1058  d = {}
1059  for line in data.split("\n"):
1060    line = line.strip()
1061    if not line or line.startswith("#"):
1062      continue
1063
1064    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
1065    pieces = line.split()
1066    if len(pieces) != 5:
1067      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
1068
1069    # Ignore entries that are managed by vold.
1070    options = pieces[4]
1071    if "voldmanaged=" in options:
1072      continue
1073
1074    # It's a good line, parse it.
1075    length = 0
1076    slotselect = False
1077    options = options.split(",")
1078    for i in options:
1079      if i.startswith("length="):
1080        length = int(i[7:])
1081      elif i == "slotselect":
1082        slotselect = True
1083      else:
1084        # Ignore all unknown options in the unified fstab.
1085        continue
1086
1087    mount_flags = pieces[3]
1088    # Honor the SELinux context if present.
1089    context = None
1090    for i in mount_flags.split(","):
1091      if i.startswith("context="):
1092        context = i
1093
1094    mount_point = pieces[1]
1095    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
1096                               device=pieces[0], length=length, context=context,
1097                               slotselect=slotselect)
1098
1099  # / is used for the system mount point when the root directory is included in
1100  # system. Other areas assume system is always at "/system" so point /system
1101  # at /.
1102  if system_root_image:
1103    assert '/system' not in d and '/' in d
1104    d["/system"] = d["/"]
1105  return d
1106
1107
1108def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
1109  """Finds the path to recovery fstab and loads its contents."""
1110  # recovery fstab is only meaningful when installing an update via recovery
1111  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
1112  if info_dict.get('ab_update') == 'true' and \
1113     info_dict.get("allow_non_ab") != "true":
1114    return None
1115
1116  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
1117  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
1118  # cases, since it may load the info_dict from an old build (e.g. when
1119  # generating incremental OTAs from that build).
1120  system_root_image = info_dict.get('system_root_image') == 'true'
1121  if info_dict.get('no_recovery') != 'true':
1122    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
1123    if isinstance(input_file, zipfile.ZipFile):
1124      if recovery_fstab_path not in input_file.namelist():
1125        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1126    else:
1127      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
1128      if not os.path.exists(path):
1129        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1130    return LoadRecoveryFSTab(
1131        read_helper, info_dict['fstab_version'], recovery_fstab_path,
1132        system_root_image)
1133
1134  if info_dict.get('recovery_as_boot') == 'true':
1135    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
1136    if isinstance(input_file, zipfile.ZipFile):
1137      if recovery_fstab_path not in input_file.namelist():
1138        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1139    else:
1140      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
1141      if not os.path.exists(path):
1142        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1143    return LoadRecoveryFSTab(
1144        read_helper, info_dict['fstab_version'], recovery_fstab_path,
1145        system_root_image)
1146
1147  return None
1148
1149
1150def DumpInfoDict(d):
1151  for k, v in sorted(d.items()):
1152    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
1153
1154
1155def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
1156  """Merges dynamic partition info variables.
1157
1158  Args:
1159    framework_dict: The dictionary of dynamic partition info variables from the
1160      partial framework target files.
1161    vendor_dict: The dictionary of dynamic partition info variables from the
1162      partial vendor target files.
1163
1164  Returns:
1165    The merged dynamic partition info dictionary.
1166  """
1167
1168  def uniq_concat(a, b):
1169    combined = set(a.split(" "))
1170    combined.update(set(b.split(" ")))
1171    combined = [item.strip() for item in combined if item.strip()]
1172    return " ".join(sorted(combined))
1173
1174  if (framework_dict.get("use_dynamic_partitions") !=
1175          "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
1176    raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
1177
1178  merged_dict = {"use_dynamic_partitions": "true"}
1179
1180  merged_dict["dynamic_partition_list"] = uniq_concat(
1181      framework_dict.get("dynamic_partition_list", ""),
1182      vendor_dict.get("dynamic_partition_list", ""))
1183
1184  # Super block devices are defined by the vendor dict.
1185  if "super_block_devices" in vendor_dict:
1186    merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
1187    for block_device in merged_dict["super_block_devices"].split(" "):
1188      key = "super_%s_device_size" % block_device
1189      if key not in vendor_dict:
1190        raise ValueError("Vendor dict does not contain required key %s." % key)
1191      merged_dict[key] = vendor_dict[key]
1192
1193  # Partition groups and group sizes are defined by the vendor dict because
1194  # these values may vary for each board that uses a shared system image.
1195  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1196  for partition_group in merged_dict["super_partition_groups"].split(" "):
1197    # Set the partition group's size using the value from the vendor dict.
1198    key = "super_%s_group_size" % partition_group
1199    if key not in vendor_dict:
1200      raise ValueError("Vendor dict does not contain required key %s." % key)
1201    merged_dict[key] = vendor_dict[key]
1202
1203    # Set the partition group's partition list using a concatenation of the
1204    # framework and vendor partition lists.
1205    key = "super_%s_partition_list" % partition_group
1206    merged_dict[key] = uniq_concat(
1207        framework_dict.get(key, ""), vendor_dict.get(key, ""))
1208
1209  # Various other flags should be copied from the vendor dict, if defined.
1210  for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
1211              "super_metadata_device", "super_partition_error_limit",
1212              "super_partition_size"):
1213    if key in vendor_dict.keys():
1214      merged_dict[key] = vendor_dict[key]
1215
1216  return merged_dict
1217
1218
1219def PartitionMapFromTargetFiles(target_files_dir):
1220  """Builds a map from partition -> path within an extracted target files directory."""
1221  # Keep possible_subdirs in sync with build/make/core/board_config.mk.
1222  possible_subdirs = {
1223      "system": ["SYSTEM"],
1224      "vendor": ["VENDOR", "SYSTEM/vendor"],
1225      "product": ["PRODUCT", "SYSTEM/product"],
1226      "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
1227      "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
1228      "vendor_dlkm": [
1229          "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
1230      ],
1231      "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
1232  }
1233  partition_map = {}
1234  for partition, subdirs in possible_subdirs.items():
1235    for subdir in subdirs:
1236      if os.path.exists(os.path.join(target_files_dir, subdir)):
1237        partition_map[partition] = subdir
1238        break
1239  return partition_map
1240
1241
1242def SharedUidPartitionViolations(uid_dict, partition_groups):
1243  """Checks for APK sharedUserIds that cross partition group boundaries.
1244
1245  This uses a single or merged build's shareduid_violation_modules.json
1246  output file, as generated by find_shareduid_violation.py or
1247  core/tasks/find-shareduid-violation.mk.
1248
1249  An error is defined as a sharedUserId that is found in a set of partitions
1250  that span more than one partition group.
1251
1252  Args:
1253    uid_dict: A dictionary created by using the standard json module to read a
1254      complete shareduid_violation_modules.json file.
1255    partition_groups: A list of groups, where each group is a list of
1256      partitions.
1257
1258  Returns:
1259    A list of error messages.
1260  """
1261  errors = []
1262  for uid, partitions in uid_dict.items():
1263    found_in_groups = [
1264        group for group in partition_groups
1265        if set(partitions.keys()) & set(group)
1266    ]
1267    if len(found_in_groups) > 1:
1268      errors.append(
1269          "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
1270          % (uid, ",".join(sorted(partitions.keys()))))
1271  return errors
1272
1273
1274def RunHostInitVerifier(product_out, partition_map):
1275  """Runs host_init_verifier on the init rc files within partitions.
1276
1277  host_init_verifier searches the etc/init path within each partition.
1278
1279  Args:
1280    product_out: PRODUCT_OUT directory, containing partition directories.
1281    partition_map: A map of partition name -> relative path within product_out.
1282  """
1283  allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
1284  cmd = ["host_init_verifier"]
1285  for partition, path in partition_map.items():
1286    if partition not in allowed_partitions:
1287      raise ExternalError("Unable to call host_init_verifier for partition %s" %
1288                          partition)
1289    cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
1290    # Add --property-contexts if the file exists on the partition.
1291    property_contexts = "%s_property_contexts" % (
1292        "plat" if partition == "system" else partition)
1293    property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
1294                                          property_contexts)
1295    if os.path.exists(property_contexts_path):
1296      cmd.append("--property-contexts=%s" % property_contexts_path)
1297    # Add the passwd file if the file exists on the partition.
1298    passwd_path = os.path.join(product_out, path, "etc", "passwd")
1299    if os.path.exists(passwd_path):
1300      cmd.extend(["-p", passwd_path])
1301  return RunAndCheckOutput(cmd)
1302
1303
1304def AppendAVBSigningArgs(cmd, partition):
1305  """Append signing arguments for avbtool."""
1306  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1307  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
1308  if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
1309    new_key_path = os.path.join(OPTIONS.search_path, key_path)
1310    if os.path.exists(new_key_path):
1311      key_path = new_key_path
1312  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1313  if key_path and algorithm:
1314    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1315  avb_salt = OPTIONS.info_dict.get("avb_salt")
1316  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1317  if avb_salt and not partition.startswith("vbmeta"):
1318    cmd.extend(["--salt", avb_salt])
1319
1320
1321def GetAvbPartitionArg(partition, image, info_dict=None):
1322  """Returns the VBMeta arguments for partition.
1323
1324  It sets up the VBMeta argument by including the partition descriptor from the
1325  given 'image', or by configuring the partition as a chained partition.
1326
1327  Args:
1328    partition: The name of the partition (e.g. "system").
1329    image: The path to the partition image.
1330    info_dict: A dict returned by common.LoadInfoDict(). Will use
1331        OPTIONS.info_dict if None has been given.
1332
1333  Returns:
1334    A list of VBMeta arguments.
1335  """
1336  if info_dict is None:
1337    info_dict = OPTIONS.info_dict
1338
1339  # Check if chain partition is used.
1340  key_path = info_dict.get("avb_" + partition + "_key_path")
1341  if not key_path:
1342    return ["--include_descriptors_from_image", image]
1343
1344  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1345  # into vbmeta.img. The recovery image will be configured on an independent
1346  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1347  # See details at
1348  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1349  if info_dict.get("ab_update") != "true" and partition == "recovery":
1350    return []
1351
1352  # Otherwise chain the partition into vbmeta.
1353  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1354  return ["--chain_partition", chained_partition_arg]
1355
1356
1357def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1358  """Constructs and returns the arg to build or verify a chained partition.
1359
1360  Args:
1361    partition: The partition name.
1362    info_dict: The info dict to look up the key info and rollback index
1363        location.
1364    key: The key to be used for building or verifying the partition. Defaults to
1365        the key listed in info_dict.
1366
1367  Returns:
1368    A string of form "partition:rollback_index_location:key" that can be used to
1369    build or verify vbmeta image.
1370  """
1371  if key is None:
1372    key = info_dict["avb_" + partition + "_key_path"]
1373  if key and not os.path.exists(key) and OPTIONS.search_path:
1374    new_key_path = os.path.join(OPTIONS.search_path, key)
1375    if os.path.exists(new_key_path):
1376      key = new_key_path
1377  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1378  rollback_index_location = info_dict[
1379      "avb_" + partition + "_rollback_index_location"]
1380  return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
1381
1382
1383def ConstructAftlMakeImageCommands(output_image):
1384  """Constructs the command to append the aftl image to vbmeta."""
1385
1386  # Ensure the other AFTL parameters are set as well.
1387  assert OPTIONS.aftl_tool_path is not None, 'No aftl tool provided.'
1388  assert OPTIONS.aftl_key_path is not None, 'No AFTL key provided.'
1389  assert OPTIONS.aftl_manufacturer_key_path is not None, \
1390      'No AFTL manufacturer key provided.'
1391
1392  vbmeta_image = MakeTempFile()
1393  os.rename(output_image, vbmeta_image)
1394  build_info = BuildInfo(OPTIONS.info_dict, use_legacy_id=True)
1395  version_incremental = build_info.GetBuildProp("ro.build.version.incremental")
1396  aftltool = OPTIONS.aftl_tool_path
1397  server_argument_list = [OPTIONS.aftl_server, OPTIONS.aftl_key_path]
1398  aftl_cmd = [aftltool, "make_icp_from_vbmeta",
1399              "--vbmeta_image_path", vbmeta_image,
1400              "--output", output_image,
1401              "--version_incremental", version_incremental,
1402              "--transparency_log_servers", ','.join(server_argument_list),
1403              "--manufacturer_key", OPTIONS.aftl_manufacturer_key_path,
1404              "--algorithm", "SHA256_RSA4096",
1405              "--padding", "4096"]
1406  if OPTIONS.aftl_signer_helper:
1407    aftl_cmd.extend(shlex.split(OPTIONS.aftl_signer_helper))
1408  return aftl_cmd
1409
1410
1411def AddAftlInclusionProof(output_image):
1412  """Appends the aftl inclusion proof to the vbmeta image."""
1413
1414  aftl_cmd = ConstructAftlMakeImageCommands(output_image)
1415  RunAndCheckOutput(aftl_cmd)
1416
1417  verify_cmd = ['aftltool', 'verify_image_icp', '--vbmeta_image_path',
1418                output_image, '--transparency_log_pub_keys',
1419                OPTIONS.aftl_key_path]
1420  RunAndCheckOutput(verify_cmd)
1421
1422
1423def AppendGkiSigningArgs(cmd):
1424  """Append GKI signing arguments for mkbootimg."""
1425  # e.g., --gki_signing_key path/to/signing_key
1426  #       --gki_signing_algorithm SHA256_RSA4096"
1427
1428  key_path = OPTIONS.info_dict.get("gki_signing_key_path")
1429  # It's fine that a non-GKI boot.img has no gki_signing_key_path.
1430  if not key_path:
1431    return
1432
1433  if not os.path.exists(key_path) and OPTIONS.search_path:
1434    new_key_path = os.path.join(OPTIONS.search_path, key_path)
1435    if os.path.exists(new_key_path):
1436      key_path = new_key_path
1437
1438  # Checks key_path exists, before appending --gki_signing_* args.
1439  if not os.path.exists(key_path):
1440    raise ExternalError(
1441        'gki_signing_key_path: "{}" not found'.format(key_path))
1442
1443  algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
1444  if key_path and algorithm:
1445    cmd.extend(["--gki_signing_key", key_path,
1446                "--gki_signing_algorithm", algorithm])
1447
1448    signature_args = OPTIONS.info_dict.get("gki_signing_signature_args")
1449    if signature_args:
1450      cmd.extend(["--gki_signing_signature_args", signature_args])
1451
1452
1453def BuildVBMeta(image_path, partitions, name, needed_partitions):
1454  """Creates a VBMeta image.
1455
1456  It generates the requested VBMeta image. The requested image could be for
1457  top-level or chained VBMeta image, which is determined based on the name.
1458
1459  Args:
1460    image_path: The output path for the new VBMeta image.
1461    partitions: A dict that's keyed by partition names with image paths as
1462        values. Only valid partition names are accepted, as partitions listed
1463        in common.AVB_PARTITIONS and custom partitions listed in
1464        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1465    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1466    needed_partitions: Partitions whose descriptors should be included into the
1467        generated VBMeta image.
1468
1469  Raises:
1470    AssertionError: On invalid input args.
1471  """
1472  avbtool = OPTIONS.info_dict["avb_avbtool"]
1473  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1474  AppendAVBSigningArgs(cmd, name)
1475
1476  custom_partitions = OPTIONS.info_dict.get(
1477      "avb_custom_images_partition_list", "").strip().split()
1478
1479  for partition, path in partitions.items():
1480    if partition not in needed_partitions:
1481      continue
1482    assert (partition in AVB_PARTITIONS or
1483            partition in AVB_VBMETA_PARTITIONS or
1484            partition in custom_partitions), \
1485        'Unknown partition: {}'.format(partition)
1486    assert os.path.exists(path), \
1487        'Failed to find {} for {}'.format(path, partition)
1488    cmd.extend(GetAvbPartitionArg(partition, path))
1489
1490  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1491  if args and args.strip():
1492    split_args = shlex.split(args)
1493    for index, arg in enumerate(split_args[:-1]):
1494      # Check that the image file exists. Some images might be defined
1495      # as a path relative to source tree, which may not be available at the
1496      # same location when running this script (we have the input target_files
1497      # zip only). For such cases, we additionally scan other locations (e.g.
1498      # IMAGES/, RADIO/, etc) before bailing out.
1499      if arg == '--include_descriptors_from_image':
1500        chained_image = split_args[index + 1]
1501        if os.path.exists(chained_image):
1502          continue
1503        found = False
1504        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1505          alt_path = os.path.join(
1506              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1507          if os.path.exists(alt_path):
1508            split_args[index + 1] = alt_path
1509            found = True
1510            break
1511        assert found, 'Failed to find {}'.format(chained_image)
1512    cmd.extend(split_args)
1513
1514  RunAndCheckOutput(cmd)
1515
1516  # Generate the AFTL inclusion proof.
1517  if OPTIONS.aftl_server is not None:
1518    AddAftlInclusionProof(image_path)
1519
1520
1521def _MakeRamdisk(sourcedir, fs_config_file=None,
1522                 ramdisk_format=RamdiskFormat.GZ):
1523  ramdisk_img = tempfile.NamedTemporaryFile()
1524
1525  if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
1526    cmd = ["mkbootfs", "-f", fs_config_file,
1527           os.path.join(sourcedir, "RAMDISK")]
1528  else:
1529    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
1530  p1 = Run(cmd, stdout=subprocess.PIPE)
1531  if ramdisk_format == RamdiskFormat.LZ4:
1532    p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
1533             stdout=ramdisk_img.file.fileno())
1534  elif ramdisk_format == RamdiskFormat.GZ:
1535    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1536  else:
1537    raise ValueError("Only support lz4 or minigzip ramdisk format.")
1538
1539  p2.wait()
1540  p1.wait()
1541  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1542  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1543
1544  return ramdisk_img
1545
1546
1547def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
1548                        has_ramdisk=False, two_step_image=False):
1549  """Build a bootable image from the specified sourcedir.
1550
1551  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1552  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1553  we are building a two-step special image (i.e. building a recovery image to
1554  be loaded into /boot in two-step OTAs).
1555
1556  Return the image data, or None if sourcedir does not appear to contains files
1557  for building the requested image.
1558  """
1559
1560  if info_dict is None:
1561    info_dict = OPTIONS.info_dict
1562
1563  # "boot" or "recovery", without extension.
1564  partition_name = os.path.basename(sourcedir).lower()
1565
1566  kernel = None
1567  if partition_name == "recovery":
1568    if info_dict.get("exclude_kernel_from_recovery_image") == "true":
1569      logger.info("Excluded kernel binary from recovery image.")
1570    else:
1571      kernel = "kernel"
1572  else:
1573    kernel = image_name.replace("boot", "kernel")
1574    kernel = kernel.replace(".img", "")
1575  if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1576    return None
1577
1578  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1579    return None
1580
1581  img = tempfile.NamedTemporaryFile()
1582
1583  if has_ramdisk:
1584    ramdisk_format = _GetRamdiskFormat(info_dict)
1585    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file,
1586                               ramdisk_format=ramdisk_format)
1587
1588  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1589  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1590
1591  cmd = [mkbootimg]
1592  if kernel:
1593    cmd += ["--kernel", os.path.join(sourcedir, kernel)]
1594
1595  fn = os.path.join(sourcedir, "second")
1596  if os.access(fn, os.F_OK):
1597    cmd.append("--second")
1598    cmd.append(fn)
1599
1600  fn = os.path.join(sourcedir, "dtb")
1601  if os.access(fn, os.F_OK):
1602    cmd.append("--dtb")
1603    cmd.append(fn)
1604
1605  fn = os.path.join(sourcedir, "cmdline")
1606  if os.access(fn, os.F_OK):
1607    cmd.append("--cmdline")
1608    cmd.append(open(fn).read().rstrip("\n"))
1609
1610  fn = os.path.join(sourcedir, "base")
1611  if os.access(fn, os.F_OK):
1612    cmd.append("--base")
1613    cmd.append(open(fn).read().rstrip("\n"))
1614
1615  fn = os.path.join(sourcedir, "pagesize")
1616  if os.access(fn, os.F_OK):
1617    cmd.append("--pagesize")
1618    cmd.append(open(fn).read().rstrip("\n"))
1619
1620  if partition_name == "recovery":
1621    args = info_dict.get("recovery_mkbootimg_args")
1622    if not args:
1623      # Fall back to "mkbootimg_args" for recovery image
1624      # in case "recovery_mkbootimg_args" is not set.
1625      args = info_dict.get("mkbootimg_args")
1626  else:
1627    args = info_dict.get("mkbootimg_args")
1628  if args and args.strip():
1629    cmd.extend(shlex.split(args))
1630
1631  args = info_dict.get("mkbootimg_version_args")
1632  if args and args.strip():
1633    cmd.extend(shlex.split(args))
1634
1635  if has_ramdisk:
1636    cmd.extend(["--ramdisk", ramdisk_img.name])
1637
1638  AppendGkiSigningArgs(cmd)
1639
1640  img_unsigned = None
1641  if info_dict.get("vboot"):
1642    img_unsigned = tempfile.NamedTemporaryFile()
1643    cmd.extend(["--output", img_unsigned.name])
1644  else:
1645    cmd.extend(["--output", img.name])
1646
1647  if partition_name == "recovery":
1648    if info_dict.get("include_recovery_dtbo") == "true":
1649      fn = os.path.join(sourcedir, "recovery_dtbo")
1650      cmd.extend(["--recovery_dtbo", fn])
1651    if info_dict.get("include_recovery_acpio") == "true":
1652      fn = os.path.join(sourcedir, "recovery_acpio")
1653      cmd.extend(["--recovery_acpio", fn])
1654
1655  RunAndCheckOutput(cmd)
1656
1657  if (info_dict.get("boot_signer") == "true" and
1658          info_dict.get("verity_key")):
1659    # Hard-code the path as "/boot" for two-step special recovery image (which
1660    # will be loaded into /boot during the two-step OTA).
1661    if two_step_image:
1662      path = "/boot"
1663    else:
1664      path = "/" + partition_name
1665    cmd = [OPTIONS.boot_signer_path]
1666    cmd.extend(OPTIONS.boot_signer_args)
1667    cmd.extend([path, img.name,
1668                info_dict["verity_key"] + ".pk8",
1669                info_dict["verity_key"] + ".x509.pem", img.name])
1670    RunAndCheckOutput(cmd)
1671
1672  # Sign the image if vboot is non-empty.
1673  elif info_dict.get("vboot"):
1674    path = "/" + partition_name
1675    img_keyblock = tempfile.NamedTemporaryFile()
1676    # We have switched from the prebuilt futility binary to using the tool
1677    # (futility-host) built from the source. Override the setting in the old
1678    # TF.zip.
1679    futility = info_dict["futility"]
1680    if futility.startswith("prebuilts/"):
1681      futility = "futility-host"
1682    cmd = [info_dict["vboot_signer_cmd"], futility,
1683           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
1684           info_dict["vboot_key"] + ".vbprivk",
1685           info_dict["vboot_subkey"] + ".vbprivk",
1686           img_keyblock.name,
1687           img.name]
1688    RunAndCheckOutput(cmd)
1689
1690    # Clean up the temp files.
1691    img_unsigned.close()
1692    img_keyblock.close()
1693
1694  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1695  if info_dict.get("avb_enable") == "true":
1696    avbtool = info_dict["avb_avbtool"]
1697    if partition_name == "recovery":
1698      part_size = info_dict["recovery_size"]
1699    else:
1700      part_size = info_dict[image_name.replace(".img", "_size")]
1701    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1702           "--partition_size", str(part_size), "--partition_name",
1703           partition_name]
1704    AppendAVBSigningArgs(cmd, partition_name)
1705    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1706    if args and args.strip():
1707      cmd.extend(shlex.split(args))
1708    RunAndCheckOutput(cmd)
1709
1710  img.seek(os.SEEK_SET, 0)
1711  data = img.read()
1712
1713  if has_ramdisk:
1714    ramdisk_img.close()
1715  img.close()
1716
1717  return data
1718
1719
1720def _SignBootableImage(image_path, prebuilt_name, partition_name,
1721                       info_dict=None):
1722  """Performs AVB signing for a prebuilt boot.img.
1723
1724  Args:
1725    image_path: The full path of the image, e.g., /path/to/boot.img.
1726    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
1727        boot-5.10.img, recovery.img.
1728    partition_name: The partition name, e.g., 'boot' or 'recovery'.
1729    info_dict: The information dict read from misc_info.txt.
1730  """
1731  if info_dict is None:
1732    info_dict = OPTIONS.info_dict
1733
1734  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1735  if info_dict.get("avb_enable") == "true":
1736    avbtool = info_dict["avb_avbtool"]
1737    if partition_name == "recovery":
1738      part_size = info_dict["recovery_size"]
1739    else:
1740      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
1741
1742    cmd = [avbtool, "add_hash_footer", "--image", image_path,
1743           "--partition_size", str(part_size), "--partition_name",
1744           partition_name]
1745    AppendAVBSigningArgs(cmd, partition_name)
1746    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1747    if args and args.strip():
1748      cmd.extend(shlex.split(args))
1749    RunAndCheckOutput(cmd)
1750
1751
1752def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1753                     info_dict=None, two_step_image=False):
1754  """Return a File object with the desired bootable image.
1755
1756  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1757  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1758  the source files in 'unpack_dir'/'tree_subdir'."""
1759
1760  if info_dict is None:
1761    info_dict = OPTIONS.info_dict
1762
1763  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1764  if os.path.exists(prebuilt_path):
1765    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1766    return File.FromLocalFile(name, prebuilt_path)
1767
1768  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1769  if os.path.exists(prebuilt_path):
1770    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1771    return File.FromLocalFile(name, prebuilt_path)
1772
1773  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
1774  if os.path.exists(prebuilt_path):
1775    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
1776    signed_img = MakeTempFile()
1777    shutil.copy(prebuilt_path, signed_img)
1778    partition_name = tree_subdir.lower()
1779    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
1780    return File.FromLocalFile(name, signed_img)
1781
1782  logger.info("building image from target_files %s...", tree_subdir)
1783
1784  # With system_root_image == "true", we don't pack ramdisk into the boot image.
1785  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
1786  # for recovery.
1787  has_ramdisk = (info_dict.get("system_root_image") != "true" or
1788                 prebuilt_name != "boot.img" or
1789                 info_dict.get("recovery_as_boot") == "true")
1790
1791  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1792  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1793                             os.path.join(unpack_dir, fs_config),
1794                             info_dict, has_ramdisk, two_step_image)
1795  if data:
1796    return File(name, data)
1797  return None
1798
1799
1800def _BuildVendorBootImage(sourcedir, info_dict=None):
1801  """Build a vendor boot image from the specified sourcedir.
1802
1803  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1804  turn them into a vendor boot image.
1805
1806  Return the image data, or None if sourcedir does not appear to contains files
1807  for building the requested image.
1808  """
1809
1810  if info_dict is None:
1811    info_dict = OPTIONS.info_dict
1812
1813  img = tempfile.NamedTemporaryFile()
1814
1815  ramdisk_format = _GetRamdiskFormat(info_dict)
1816  ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
1817
1818  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1819  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1820
1821  cmd = [mkbootimg]
1822
1823  fn = os.path.join(sourcedir, "dtb")
1824  if os.access(fn, os.F_OK):
1825    cmd.append("--dtb")
1826    cmd.append(fn)
1827
1828  fn = os.path.join(sourcedir, "vendor_cmdline")
1829  if os.access(fn, os.F_OK):
1830    cmd.append("--vendor_cmdline")
1831    cmd.append(open(fn).read().rstrip("\n"))
1832
1833  fn = os.path.join(sourcedir, "base")
1834  if os.access(fn, os.F_OK):
1835    cmd.append("--base")
1836    cmd.append(open(fn).read().rstrip("\n"))
1837
1838  fn = os.path.join(sourcedir, "pagesize")
1839  if os.access(fn, os.F_OK):
1840    cmd.append("--pagesize")
1841    cmd.append(open(fn).read().rstrip("\n"))
1842
1843  args = info_dict.get("mkbootimg_args")
1844  if args and args.strip():
1845    cmd.extend(shlex.split(args))
1846
1847  args = info_dict.get("mkbootimg_version_args")
1848  if args and args.strip():
1849    cmd.extend(shlex.split(args))
1850
1851  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
1852  cmd.extend(["--vendor_boot", img.name])
1853
1854  fn = os.path.join(sourcedir, "vendor_bootconfig")
1855  if os.access(fn, os.F_OK):
1856    cmd.append("--vendor_bootconfig")
1857    cmd.append(fn)
1858
1859  ramdisk_fragment_imgs = []
1860  fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
1861  if os.access(fn, os.F_OK):
1862    ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
1863    for ramdisk_fragment in ramdisk_fragments:
1864      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
1865                        ramdisk_fragment, "mkbootimg_args")
1866      cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
1867      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
1868                        ramdisk_fragment, "prebuilt_ramdisk")
1869      # Use prebuilt image if found, else create ramdisk from supplied files.
1870      if os.access(fn, os.F_OK):
1871        ramdisk_fragment_pathname = fn
1872      else:
1873        ramdisk_fragment_root = os.path.join(
1874            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
1875        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
1876                                            ramdisk_format=ramdisk_format)
1877        ramdisk_fragment_imgs.append(ramdisk_fragment_img)
1878        ramdisk_fragment_pathname = ramdisk_fragment_img.name
1879      cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
1880
1881  RunAndCheckOutput(cmd)
1882
1883  # AVB: if enabled, calculate and add hash.
1884  if info_dict.get("avb_enable") == "true":
1885    avbtool = info_dict["avb_avbtool"]
1886    part_size = info_dict["vendor_boot_size"]
1887    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1888           "--partition_size", str(part_size), "--partition_name", "vendor_boot"]
1889    AppendAVBSigningArgs(cmd, "vendor_boot")
1890    args = info_dict.get("avb_vendor_boot_add_hash_footer_args")
1891    if args and args.strip():
1892      cmd.extend(shlex.split(args))
1893    RunAndCheckOutput(cmd)
1894
1895  img.seek(os.SEEK_SET, 0)
1896  data = img.read()
1897
1898  for f in ramdisk_fragment_imgs:
1899    f.close()
1900  ramdisk_img.close()
1901  img.close()
1902
1903  return data
1904
1905
1906def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
1907                       info_dict=None):
1908  """Return a File object with the desired vendor boot image.
1909
1910  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1911  the source files in 'unpack_dir'/'tree_subdir'."""
1912
1913  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1914  if os.path.exists(prebuilt_path):
1915    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1916    return File.FromLocalFile(name, prebuilt_path)
1917
1918  logger.info("building image from target_files %s...", tree_subdir)
1919
1920  if info_dict is None:
1921    info_dict = OPTIONS.info_dict
1922
1923  data = _BuildVendorBootImage(
1924      os.path.join(unpack_dir, tree_subdir), info_dict)
1925  if data:
1926    return File(name, data)
1927  return None
1928
1929
1930def Gunzip(in_filename, out_filename):
1931  """Gunzips the given gzip compressed file to a given output file."""
1932  with gzip.open(in_filename, "rb") as in_file, \
1933          open(out_filename, "wb") as out_file:
1934    shutil.copyfileobj(in_file, out_file)
1935
1936
1937def UnzipToDir(filename, dirname, patterns=None):
1938  """Unzips the archive to the given directory.
1939
1940  Args:
1941    filename: The name of the zip file to unzip.
1942    dirname: Where the unziped files will land.
1943    patterns: Files to unzip from the archive. If omitted, will unzip the entire
1944        archvie. Non-matching patterns will be filtered out. If there's no match
1945        after the filtering, no file will be unzipped.
1946  """
1947  cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
1948  if patterns is not None:
1949    # Filter out non-matching patterns. unzip will complain otherwise.
1950    with zipfile.ZipFile(filename, allowZip64=True) as input_zip:
1951      names = input_zip.namelist()
1952    filtered = [
1953        pattern for pattern in patterns if fnmatch.filter(names, pattern)]
1954
1955    # There isn't any matching files. Don't unzip anything.
1956    if not filtered:
1957      return
1958    cmd.extend(filtered)
1959
1960  RunAndCheckOutput(cmd)
1961
1962
1963def UnzipTemp(filename, pattern=None):
1964  """Unzips the given archive into a temporary directory and returns the name.
1965
1966  Args:
1967    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
1968    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
1969
1970    pattern: Files to unzip from the archive. If omitted, will unzip the entire
1971    archvie.
1972
1973  Returns:
1974    The name of the temporary directory.
1975  """
1976
1977  tmp = MakeTempDir(prefix="targetfiles-")
1978  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
1979  if m:
1980    UnzipToDir(m.group(1), tmp, pattern)
1981    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern)
1982    filename = m.group(1)
1983  else:
1984    UnzipToDir(filename, tmp, pattern)
1985
1986  return tmp
1987
1988
1989def GetUserImage(which, tmpdir, input_zip,
1990                 info_dict=None,
1991                 allow_shared_blocks=None,
1992                 hashtree_info_generator=None,
1993                 reset_file_map=False):
1994  """Returns an Image object suitable for passing to BlockImageDiff.
1995
1996  This function loads the specified image from the given path. If the specified
1997  image is sparse, it also performs additional processing for OTA purpose. For
1998  example, it always adds block 0 to clobbered blocks list. It also detects
1999  files that cannot be reconstructed from the block list, for whom we should
2000  avoid applying imgdiff.
2001
2002  Args:
2003    which: The partition name.
2004    tmpdir: The directory that contains the prebuilt image and block map file.
2005    input_zip: The target-files ZIP archive.
2006    info_dict: The dict to be looked up for relevant info.
2007    allow_shared_blocks: If image is sparse, whether having shared blocks is
2008        allowed. If none, it is looked up from info_dict.
2009    hashtree_info_generator: If present and image is sparse, generates the
2010        hashtree_info for this sparse image.
2011    reset_file_map: If true and image is sparse, reset file map before returning
2012        the image.
2013  Returns:
2014    A Image object. If it is a sparse image and reset_file_map is False, the
2015    image will have file_map info loaded.
2016  """
2017  if info_dict is None:
2018    info_dict = LoadInfoDict(input_zip)
2019
2020  is_sparse = info_dict.get("extfs_sparse_flag")
2021
2022  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
2023  # shared blocks (i.e. some blocks will show up in multiple files' block
2024  # list). We can only allocate such shared blocks to the first "owner", and
2025  # disable imgdiff for all later occurrences.
2026  if allow_shared_blocks is None:
2027    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
2028
2029  if is_sparse:
2030    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
2031                         hashtree_info_generator)
2032    if reset_file_map:
2033      img.ResetFileMap()
2034    return img
2035  return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
2036
2037
2038def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
2039  """Returns a Image object suitable for passing to BlockImageDiff.
2040
2041  This function loads the specified non-sparse image from the given path.
2042
2043  Args:
2044    which: The partition name.
2045    tmpdir: The directory that contains the prebuilt image and block map file.
2046  Returns:
2047    A Image object.
2048  """
2049  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2050  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2051
2052  # The image and map files must have been created prior to calling
2053  # ota_from_target_files.py (since LMP).
2054  assert os.path.exists(path) and os.path.exists(mappath)
2055
2056  return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
2057
2058
2059def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
2060                   hashtree_info_generator=None):
2061  """Returns a SparseImage object suitable for passing to BlockImageDiff.
2062
2063  This function loads the specified sparse image from the given path, and
2064  performs additional processing for OTA purpose. For example, it always adds
2065  block 0 to clobbered blocks list. It also detects files that cannot be
2066  reconstructed from the block list, for whom we should avoid applying imgdiff.
2067
2068  Args:
2069    which: The partition name, e.g. "system", "vendor".
2070    tmpdir: The directory that contains the prebuilt image and block map file.
2071    input_zip: The target-files ZIP archive.
2072    allow_shared_blocks: Whether having shared blocks is allowed.
2073    hashtree_info_generator: If present, generates the hashtree_info for this
2074        sparse image.
2075  Returns:
2076    A SparseImage object, with file_map info loaded.
2077  """
2078  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2079  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2080
2081  # The image and map files must have been created prior to calling
2082  # ota_from_target_files.py (since LMP).
2083  assert os.path.exists(path) and os.path.exists(mappath)
2084
2085  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
2086  # it to clobbered_blocks so that it will be written to the target
2087  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
2088  clobbered_blocks = "0"
2089
2090  image = sparse_img.SparseImage(
2091      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
2092      hashtree_info_generator=hashtree_info_generator)
2093
2094  # block.map may contain less blocks, because mke2fs may skip allocating blocks
2095  # if they contain all zeros. We can't reconstruct such a file from its block
2096  # list. Tag such entries accordingly. (Bug: 65213616)
2097  for entry in image.file_map:
2098    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
2099    if not entry.startswith('/'):
2100      continue
2101
2102    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
2103    # filename listed in system.map may contain an additional leading slash
2104    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
2105    # results.
2106    # And handle another special case, where files not under /system
2107    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
2108    arcname = entry.lstrip('/')
2109    if which == 'system' and not arcname.startswith('system'):
2110      arcname = 'ROOT/' + arcname
2111    else:
2112      arcname = arcname.replace(which, which.upper(), 1)
2113
2114    assert arcname in input_zip.namelist(), \
2115        "Failed to find the ZIP entry for {}".format(entry)
2116
2117    info = input_zip.getinfo(arcname)
2118    ranges = image.file_map[entry]
2119
2120    # If a RangeSet has been tagged as using shared blocks while loading the
2121    # image, check the original block list to determine its completeness. Note
2122    # that the 'incomplete' flag would be tagged to the original RangeSet only.
2123    if ranges.extra.get('uses_shared_blocks'):
2124      ranges = ranges.extra['uses_shared_blocks']
2125
2126    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
2127      ranges.extra['incomplete'] = True
2128
2129  return image
2130
2131
2132def GetKeyPasswords(keylist):
2133  """Given a list of keys, prompt the user to enter passwords for
2134  those which require them.  Return a {key: password} dict.  password
2135  will be None if the key has no password."""
2136
2137  no_passwords = []
2138  need_passwords = []
2139  key_passwords = {}
2140  devnull = open("/dev/null", "w+b")
2141  for k in sorted(keylist):
2142    # We don't need a password for things that aren't really keys.
2143    if k in SPECIAL_CERT_STRINGS:
2144      no_passwords.append(k)
2145      continue
2146
2147    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2148             "-inform", "DER", "-nocrypt"],
2149            stdin=devnull.fileno(),
2150            stdout=devnull.fileno(),
2151            stderr=subprocess.STDOUT)
2152    p.communicate()
2153    if p.returncode == 0:
2154      # Definitely an unencrypted key.
2155      no_passwords.append(k)
2156    else:
2157      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2158               "-inform", "DER", "-passin", "pass:"],
2159              stdin=devnull.fileno(),
2160              stdout=devnull.fileno(),
2161              stderr=subprocess.PIPE)
2162      _, stderr = p.communicate()
2163      if p.returncode == 0:
2164        # Encrypted key with empty string as password.
2165        key_passwords[k] = ''
2166      elif stderr.startswith('Error decrypting key'):
2167        # Definitely encrypted key.
2168        # It would have said "Error reading key" if it didn't parse correctly.
2169        need_passwords.append(k)
2170      else:
2171        # Potentially, a type of key that openssl doesn't understand.
2172        # We'll let the routines in signapk.jar handle it.
2173        no_passwords.append(k)
2174  devnull.close()
2175
2176  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
2177  key_passwords.update(dict.fromkeys(no_passwords))
2178  return key_passwords
2179
2180
2181def GetMinSdkVersion(apk_name):
2182  """Gets the minSdkVersion declared in the APK.
2183
2184  It calls 'aapt2' to query the embedded minSdkVersion from the given APK file.
2185  This can be both a decimal number (API Level) or a codename.
2186
2187  Args:
2188    apk_name: The APK filename.
2189
2190  Returns:
2191    The parsed SDK version string.
2192
2193  Raises:
2194    ExternalError: On failing to obtain the min SDK version.
2195  """
2196  proc = Run(
2197      ["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE,
2198      stderr=subprocess.PIPE)
2199  stdoutdata, stderrdata = proc.communicate()
2200  if proc.returncode != 0:
2201    raise ExternalError(
2202        "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
2203            proc.returncode, stdoutdata, stderrdata))
2204
2205  for line in stdoutdata.split("\n"):
2206    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
2207    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
2208    if m:
2209      return m.group(1)
2210  raise ExternalError("No minSdkVersion returned by aapt2")
2211
2212
2213def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
2214  """Returns the minSdkVersion declared in the APK as a number (API Level).
2215
2216  If minSdkVersion is set to a codename, it is translated to a number using the
2217  provided map.
2218
2219  Args:
2220    apk_name: The APK filename.
2221
2222  Returns:
2223    The parsed SDK version number.
2224
2225  Raises:
2226    ExternalError: On failing to get the min SDK version number.
2227  """
2228  version = GetMinSdkVersion(apk_name)
2229  try:
2230    return int(version)
2231  except ValueError:
2232    # Not a decimal number. Codename?
2233    if version in codename_to_api_level_map:
2234      return codename_to_api_level_map[version]
2235    raise ExternalError(
2236        "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
2237            version, codename_to_api_level_map))
2238
2239
2240def SignFile(input_name, output_name, key, password, min_api_level=None,
2241             codename_to_api_level_map=None, whole_file=False,
2242             extra_signapk_args=None):
2243  """Sign the input_name zip/jar/apk, producing output_name.  Use the
2244  given key and password (the latter may be None if the key does not
2245  have a password.
2246
2247  If whole_file is true, use the "-w" option to SignApk to embed a
2248  signature that covers the whole file in the archive comment of the
2249  zip file.
2250
2251  min_api_level is the API Level (int) of the oldest platform this file may end
2252  up on. If not specified for an APK, the API Level is obtained by interpreting
2253  the minSdkVersion attribute of the APK's AndroidManifest.xml.
2254
2255  codename_to_api_level_map is needed to translate the codename which may be
2256  encountered as the APK's minSdkVersion.
2257
2258  Caller may optionally specify extra args to be passed to SignApk, which
2259  defaults to OPTIONS.extra_signapk_args if omitted.
2260  """
2261  if codename_to_api_level_map is None:
2262    codename_to_api_level_map = {}
2263  if extra_signapk_args is None:
2264    extra_signapk_args = OPTIONS.extra_signapk_args
2265
2266  java_library_path = os.path.join(
2267      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2268
2269  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2270         ["-Djava.library.path=" + java_library_path,
2271          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
2272         extra_signapk_args)
2273  if whole_file:
2274    cmd.append("-w")
2275
2276  min_sdk_version = min_api_level
2277  if min_sdk_version is None:
2278    if not whole_file:
2279      min_sdk_version = GetMinSdkVersionInt(
2280          input_name, codename_to_api_level_map)
2281  if min_sdk_version is not None:
2282    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
2283
2284  cmd.extend([key + OPTIONS.public_key_suffix,
2285              key + OPTIONS.private_key_suffix,
2286              input_name, output_name])
2287
2288  proc = Run(cmd, stdin=subprocess.PIPE)
2289  if password is not None:
2290    password += "\n"
2291  stdoutdata, _ = proc.communicate(password)
2292  if proc.returncode != 0:
2293    raise ExternalError(
2294        "Failed to run signapk.jar: return code {}:\n{}".format(
2295            proc.returncode, stdoutdata))
2296
2297
2298def CheckSize(data, target, info_dict):
2299  """Checks the data string passed against the max size limit.
2300
2301  For non-AVB images, raise exception if the data is too big. Print a warning
2302  if the data is nearing the maximum size.
2303
2304  For AVB images, the actual image size should be identical to the limit.
2305
2306  Args:
2307    data: A string that contains all the data for the partition.
2308    target: The partition name. The ".img" suffix is optional.
2309    info_dict: The dict to be looked up for relevant info.
2310  """
2311  if target.endswith(".img"):
2312    target = target[:-4]
2313  mount_point = "/" + target
2314
2315  fs_type = None
2316  limit = None
2317  if info_dict["fstab"]:
2318    if mount_point == "/userdata":
2319      mount_point = "/data"
2320    p = info_dict["fstab"][mount_point]
2321    fs_type = p.fs_type
2322    device = p.device
2323    if "/" in device:
2324      device = device[device.rfind("/")+1:]
2325    limit = info_dict.get(device + "_size")
2326  if not fs_type or not limit:
2327    return
2328
2329  size = len(data)
2330  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
2331  # path.
2332  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
2333    if size != limit:
2334      raise ExternalError(
2335          "Mismatching image size for %s: expected %d actual %d" % (
2336              target, limit, size))
2337  else:
2338    pct = float(size) * 100.0 / limit
2339    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
2340    if pct >= 99.0:
2341      raise ExternalError(msg)
2342
2343    if pct >= 95.0:
2344      logger.warning("\n  WARNING: %s\n", msg)
2345    else:
2346      logger.info("  %s", msg)
2347
2348
2349def ReadApkCerts(tf_zip):
2350  """Parses the APK certs info from a given target-files zip.
2351
2352  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
2353  tuple with the following elements: (1) a dictionary that maps packages to
2354  certs (based on the "certificate" and "private_key" attributes in the file;
2355  (2) a string representing the extension of compressed APKs in the target files
2356  (e.g ".gz", ".bro").
2357
2358  Args:
2359    tf_zip: The input target_files ZipFile (already open).
2360
2361  Returns:
2362    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
2363        the extension string of compressed APKs (e.g. ".gz"), or None if there's
2364        no compressed APKs.
2365  """
2366  certmap = {}
2367  compressed_extension = None
2368
2369  # META/apkcerts.txt contains the info for _all_ the packages known at build
2370  # time. Filter out the ones that are not installed.
2371  installed_files = set()
2372  for name in tf_zip.namelist():
2373    basename = os.path.basename(name)
2374    if basename:
2375      installed_files.add(basename)
2376
2377  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
2378    line = line.strip()
2379    if not line:
2380      continue
2381    m = re.match(
2382        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
2383        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2384        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2385        line)
2386    if not m:
2387      continue
2388
2389    matches = m.groupdict()
2390    cert = matches["CERT"]
2391    privkey = matches["PRIVKEY"]
2392    name = matches["NAME"]
2393    this_compressed_extension = matches["COMPRESSED"]
2394
2395    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2396    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2397    if cert in SPECIAL_CERT_STRINGS and not privkey:
2398      certmap[name] = cert
2399    elif (cert.endswith(OPTIONS.public_key_suffix) and
2400          privkey.endswith(OPTIONS.private_key_suffix) and
2401          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2402      certmap[name] = cert[:-public_key_suffix_len]
2403    else:
2404      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2405
2406    if not this_compressed_extension:
2407      continue
2408
2409    # Only count the installed files.
2410    filename = name + '.' + this_compressed_extension
2411    if filename not in installed_files:
2412      continue
2413
2414    # Make sure that all the values in the compression map have the same
2415    # extension. We don't support multiple compression methods in the same
2416    # system image.
2417    if compressed_extension:
2418      if this_compressed_extension != compressed_extension:
2419        raise ValueError(
2420            "Multiple compressed extensions: {} vs {}".format(
2421                compressed_extension, this_compressed_extension))
2422    else:
2423      compressed_extension = this_compressed_extension
2424
2425  return (certmap,
2426          ("." + compressed_extension) if compressed_extension else None)
2427
2428
2429COMMON_DOCSTRING = """
2430Global options
2431
2432  -p  (--path) <dir>
2433      Prepend <dir>/bin to the list of places to search for binaries run by this
2434      script, and expect to find jars in <dir>/framework.
2435
2436  -s  (--device_specific) <file>
2437      Path to the Python module containing device-specific releasetools code.
2438
2439  -x  (--extra) <key=value>
2440      Add a key/value pair to the 'extras' dict, which device-specific extension
2441      code may look at.
2442
2443  -v  (--verbose)
2444      Show command lines being executed.
2445
2446  -h  (--help)
2447      Display this usage message and exit.
2448
2449  --logfile <file>
2450      Put verbose logs to specified file (regardless of --verbose option.)
2451"""
2452
2453
2454def Usage(docstring):
2455  print(docstring.rstrip("\n"))
2456  print(COMMON_DOCSTRING)
2457
2458
2459def ParseOptions(argv,
2460                 docstring,
2461                 extra_opts="", extra_long_opts=(),
2462                 extra_option_handler=None):
2463  """Parse the options in argv and return any arguments that aren't
2464  flags.  docstring is the calling module's docstring, to be displayed
2465  for errors and -h.  extra_opts and extra_long_opts are for flags
2466  defined by the caller, which are processed by passing them to
2467  extra_option_handler."""
2468
2469  try:
2470    opts, args = getopt.getopt(
2471        argv, "hvp:s:x:" + extra_opts,
2472        ["help", "verbose", "path=", "signapk_path=",
2473         "signapk_shared_library_path=", "extra_signapk_args=",
2474         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2475         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2476         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2477         "extra=", "logfile=", "aftl_tool_path=", "aftl_server=",
2478         "aftl_key_path=", "aftl_manufacturer_key_path=",
2479         "aftl_signer_helper="] + list(extra_long_opts))
2480  except getopt.GetoptError as err:
2481    Usage(docstring)
2482    print("**", str(err), "**")
2483    sys.exit(2)
2484
2485  for o, a in opts:
2486    if o in ("-h", "--help"):
2487      Usage(docstring)
2488      sys.exit()
2489    elif o in ("-v", "--verbose"):
2490      OPTIONS.verbose = True
2491    elif o in ("-p", "--path"):
2492      OPTIONS.search_path = a
2493    elif o in ("--signapk_path",):
2494      OPTIONS.signapk_path = a
2495    elif o in ("--signapk_shared_library_path",):
2496      OPTIONS.signapk_shared_library_path = a
2497    elif o in ("--extra_signapk_args",):
2498      OPTIONS.extra_signapk_args = shlex.split(a)
2499    elif o in ("--java_path",):
2500      OPTIONS.java_path = a
2501    elif o in ("--java_args",):
2502      OPTIONS.java_args = shlex.split(a)
2503    elif o in ("--android_jar_path",):
2504      OPTIONS.android_jar_path = a
2505    elif o in ("--public_key_suffix",):
2506      OPTIONS.public_key_suffix = a
2507    elif o in ("--private_key_suffix",):
2508      OPTIONS.private_key_suffix = a
2509    elif o in ("--boot_signer_path",):
2510      OPTIONS.boot_signer_path = a
2511    elif o in ("--boot_signer_args",):
2512      OPTIONS.boot_signer_args = shlex.split(a)
2513    elif o in ("--verity_signer_path",):
2514      OPTIONS.verity_signer_path = a
2515    elif o in ("--verity_signer_args",):
2516      OPTIONS.verity_signer_args = shlex.split(a)
2517    elif o in ("--aftl_tool_path",):
2518      OPTIONS.aftl_tool_path = a
2519    elif o in ("--aftl_server",):
2520      OPTIONS.aftl_server = a
2521    elif o in ("--aftl_key_path",):
2522      OPTIONS.aftl_key_path = a
2523    elif o in ("--aftl_manufacturer_key_path",):
2524      OPTIONS.aftl_manufacturer_key_path = a
2525    elif o in ("--aftl_signer_helper",):
2526      OPTIONS.aftl_signer_helper = a
2527    elif o in ("-s", "--device_specific"):
2528      OPTIONS.device_specific = a
2529    elif o in ("-x", "--extra"):
2530      key, value = a.split("=", 1)
2531      OPTIONS.extras[key] = value
2532    elif o in ("--logfile",):
2533      OPTIONS.logfile = a
2534    else:
2535      if extra_option_handler is None or not extra_option_handler(o, a):
2536        assert False, "unknown option \"%s\"" % (o,)
2537
2538  if OPTIONS.search_path:
2539    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2540                          os.pathsep + os.environ["PATH"])
2541
2542  return args
2543
2544
2545def MakeTempFile(prefix='tmp', suffix=''):
2546  """Make a temp file and add it to the list of things to be deleted
2547  when Cleanup() is called.  Return the filename."""
2548  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2549  os.close(fd)
2550  OPTIONS.tempfiles.append(fn)
2551  return fn
2552
2553
2554def MakeTempDir(prefix='tmp', suffix=''):
2555  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2556
2557  Returns:
2558    The absolute pathname of the new directory.
2559  """
2560  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2561  OPTIONS.tempfiles.append(dir_name)
2562  return dir_name
2563
2564
2565def Cleanup():
2566  for i in OPTIONS.tempfiles:
2567    if os.path.isdir(i):
2568      shutil.rmtree(i, ignore_errors=True)
2569    else:
2570      os.remove(i)
2571  del OPTIONS.tempfiles[:]
2572
2573
2574class PasswordManager(object):
2575  def __init__(self):
2576    self.editor = os.getenv("EDITOR")
2577    self.pwfile = os.getenv("ANDROID_PW_FILE")
2578
2579  def GetPasswords(self, items):
2580    """Get passwords corresponding to each string in 'items',
2581    returning a dict.  (The dict may have keys in addition to the
2582    values in 'items'.)
2583
2584    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2585    user edit that file to add more needed passwords.  If no editor is
2586    available, or $ANDROID_PW_FILE isn't define, prompts the user
2587    interactively in the ordinary way.
2588    """
2589
2590    current = self.ReadFile()
2591
2592    first = True
2593    while True:
2594      missing = []
2595      for i in items:
2596        if i not in current or not current[i]:
2597          missing.append(i)
2598      # Are all the passwords already in the file?
2599      if not missing:
2600        return current
2601
2602      for i in missing:
2603        current[i] = ""
2604
2605      if not first:
2606        print("key file %s still missing some passwords." % (self.pwfile,))
2607        if sys.version_info[0] >= 3:
2608          raw_input = input  # pylint: disable=redefined-builtin
2609        answer = raw_input("try to edit again? [y]> ").strip()
2610        if answer and answer[0] not in 'yY':
2611          raise RuntimeError("key passwords unavailable")
2612      first = False
2613
2614      current = self.UpdateAndReadFile(current)
2615
2616  def PromptResult(self, current):  # pylint: disable=no-self-use
2617    """Prompt the user to enter a value (password) for each key in
2618    'current' whose value is fales.  Returns a new dict with all the
2619    values.
2620    """
2621    result = {}
2622    for k, v in sorted(current.items()):
2623      if v:
2624        result[k] = v
2625      else:
2626        while True:
2627          result[k] = getpass.getpass(
2628              "Enter password for %s key> " % k).strip()
2629          if result[k]:
2630            break
2631    return result
2632
2633  def UpdateAndReadFile(self, current):
2634    if not self.editor or not self.pwfile:
2635      return self.PromptResult(current)
2636
2637    f = open(self.pwfile, "w")
2638    os.chmod(self.pwfile, 0o600)
2639    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2640    f.write("# (Additional spaces are harmless.)\n\n")
2641
2642    first_line = None
2643    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2644    for i, (_, k, v) in enumerate(sorted_list):
2645      f.write("[[[  %s  ]]] %s\n" % (v, k))
2646      if not v and first_line is None:
2647        # position cursor on first line with no password.
2648        first_line = i + 4
2649    f.close()
2650
2651    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2652
2653    return self.ReadFile()
2654
2655  def ReadFile(self):
2656    result = {}
2657    if self.pwfile is None:
2658      return result
2659    try:
2660      f = open(self.pwfile, "r")
2661      for line in f:
2662        line = line.strip()
2663        if not line or line[0] == '#':
2664          continue
2665        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2666        if not m:
2667          logger.warning("Failed to parse password file: %s", line)
2668        else:
2669          result[m.group(2)] = m.group(1)
2670      f.close()
2671    except IOError as e:
2672      if e.errno != errno.ENOENT:
2673        logger.exception("Error reading password file:")
2674    return result
2675
2676
2677def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2678             compress_type=None):
2679
2680  # http://b/18015246
2681  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2682  # for files larger than 2GiB. We can work around this by adjusting their
2683  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2684  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2685  # it isn't clear to me exactly what circumstances cause this).
2686  # `zipfile.write()` must be used directly to work around this.
2687  #
2688  # This mess can be avoided if we port to python3.
2689  saved_zip64_limit = zipfile.ZIP64_LIMIT
2690  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2691
2692  if compress_type is None:
2693    compress_type = zip_file.compression
2694  if arcname is None:
2695    arcname = filename
2696
2697  saved_stat = os.stat(filename)
2698
2699  try:
2700    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2701    # file to be zipped and reset it when we're done.
2702    os.chmod(filename, perms)
2703
2704    # Use a fixed timestamp so the output is repeatable.
2705    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2706    # intentional. zip stores datetimes in local time without a time zone
2707    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2708    # in the zip archive.
2709    local_epoch = datetime.datetime.fromtimestamp(0)
2710    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
2711    os.utime(filename, (timestamp, timestamp))
2712
2713    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
2714  finally:
2715    os.chmod(filename, saved_stat.st_mode)
2716    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
2717    zipfile.ZIP64_LIMIT = saved_zip64_limit
2718
2719
2720def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
2721                compress_type=None):
2722  """Wrap zipfile.writestr() function to work around the zip64 limit.
2723
2724  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
2725  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
2726  when calling crc32(bytes).
2727
2728  But it still works fine to write a shorter string into a large zip file.
2729  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
2730  when we know the string won't be too long.
2731  """
2732
2733  saved_zip64_limit = zipfile.ZIP64_LIMIT
2734  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2735
2736  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
2737    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
2738    zinfo.compress_type = zip_file.compression
2739    if perms is None:
2740      perms = 0o100644
2741  else:
2742    zinfo = zinfo_or_arcname
2743    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
2744    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
2745    # such a case (since
2746    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
2747    # which seems to make more sense. Otherwise the entry will have 0o000 as the
2748    # permission bits. We follow the logic in Python 3 to get consistent
2749    # behavior between using the two versions.
2750    if not zinfo.external_attr:
2751      zinfo.external_attr = 0o600 << 16
2752
2753  # If compress_type is given, it overrides the value in zinfo.
2754  if compress_type is not None:
2755    zinfo.compress_type = compress_type
2756
2757  # If perms is given, it has a priority.
2758  if perms is not None:
2759    # If perms doesn't set the file type, mark it as a regular file.
2760    if perms & 0o770000 == 0:
2761      perms |= 0o100000
2762    zinfo.external_attr = perms << 16
2763
2764  # Use a fixed timestamp so the output is repeatable.
2765  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
2766
2767  zip_file.writestr(zinfo, data)
2768  zipfile.ZIP64_LIMIT = saved_zip64_limit
2769
2770
2771def ZipDelete(zip_filename, entries):
2772  """Deletes entries from a ZIP file.
2773
2774  Since deleting entries from a ZIP file is not supported, it shells out to
2775  'zip -d'.
2776
2777  Args:
2778    zip_filename: The name of the ZIP file.
2779    entries: The name of the entry, or the list of names to be deleted.
2780
2781  Raises:
2782    AssertionError: In case of non-zero return from 'zip'.
2783  """
2784  if isinstance(entries, str):
2785    entries = [entries]
2786  cmd = ["zip", "-d", zip_filename] + entries
2787  RunAndCheckOutput(cmd)
2788
2789
2790def ZipClose(zip_file):
2791  # http://b/18015246
2792  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
2793  # central directory.
2794  saved_zip64_limit = zipfile.ZIP64_LIMIT
2795  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2796
2797  zip_file.close()
2798
2799  zipfile.ZIP64_LIMIT = saved_zip64_limit
2800
2801
2802class DeviceSpecificParams(object):
2803  module = None
2804
2805  def __init__(self, **kwargs):
2806    """Keyword arguments to the constructor become attributes of this
2807    object, which is passed to all functions in the device-specific
2808    module."""
2809    for k, v in kwargs.items():
2810      setattr(self, k, v)
2811    self.extras = OPTIONS.extras
2812
2813    if self.module is None:
2814      path = OPTIONS.device_specific
2815      if not path:
2816        return
2817      try:
2818        if os.path.isdir(path):
2819          info = imp.find_module("releasetools", [path])
2820        else:
2821          d, f = os.path.split(path)
2822          b, x = os.path.splitext(f)
2823          if x == ".py":
2824            f = b
2825          info = imp.find_module(f, [d])
2826        logger.info("loaded device-specific extensions from %s", path)
2827        self.module = imp.load_module("device_specific", *info)
2828      except ImportError:
2829        logger.info("unable to load device-specific module; assuming none")
2830
2831  def _DoCall(self, function_name, *args, **kwargs):
2832    """Call the named function in the device-specific module, passing
2833    the given args and kwargs.  The first argument to the call will be
2834    the DeviceSpecific object itself.  If there is no module, or the
2835    module does not define the function, return the value of the
2836    'default' kwarg (which itself defaults to None)."""
2837    if self.module is None or not hasattr(self.module, function_name):
2838      return kwargs.get("default")
2839    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
2840
2841  def FullOTA_Assertions(self):
2842    """Called after emitting the block of assertions at the top of a
2843    full OTA package.  Implementations can add whatever additional
2844    assertions they like."""
2845    return self._DoCall("FullOTA_Assertions")
2846
2847  def FullOTA_InstallBegin(self):
2848    """Called at the start of full OTA installation."""
2849    return self._DoCall("FullOTA_InstallBegin")
2850
2851  def FullOTA_GetBlockDifferences(self):
2852    """Called during full OTA installation and verification.
2853    Implementation should return a list of BlockDifference objects describing
2854    the update on each additional partitions.
2855    """
2856    return self._DoCall("FullOTA_GetBlockDifferences")
2857
2858  def FullOTA_InstallEnd(self):
2859    """Called at the end of full OTA installation; typically this is
2860    used to install the image for the device's baseband processor."""
2861    return self._DoCall("FullOTA_InstallEnd")
2862
2863  def IncrementalOTA_Assertions(self):
2864    """Called after emitting the block of assertions at the top of an
2865    incremental OTA package.  Implementations can add whatever
2866    additional assertions they like."""
2867    return self._DoCall("IncrementalOTA_Assertions")
2868
2869  def IncrementalOTA_VerifyBegin(self):
2870    """Called at the start of the verification phase of incremental
2871    OTA installation; additional checks can be placed here to abort
2872    the script before any changes are made."""
2873    return self._DoCall("IncrementalOTA_VerifyBegin")
2874
2875  def IncrementalOTA_VerifyEnd(self):
2876    """Called at the end of the verification phase of incremental OTA
2877    installation; additional checks can be placed here to abort the
2878    script before any changes are made."""
2879    return self._DoCall("IncrementalOTA_VerifyEnd")
2880
2881  def IncrementalOTA_InstallBegin(self):
2882    """Called at the start of incremental OTA installation (after
2883    verification is complete)."""
2884    return self._DoCall("IncrementalOTA_InstallBegin")
2885
2886  def IncrementalOTA_GetBlockDifferences(self):
2887    """Called during incremental OTA installation and verification.
2888    Implementation should return a list of BlockDifference objects describing
2889    the update on each additional partitions.
2890    """
2891    return self._DoCall("IncrementalOTA_GetBlockDifferences")
2892
2893  def IncrementalOTA_InstallEnd(self):
2894    """Called at the end of incremental OTA installation; typically
2895    this is used to install the image for the device's baseband
2896    processor."""
2897    return self._DoCall("IncrementalOTA_InstallEnd")
2898
2899  def VerifyOTA_Assertions(self):
2900    return self._DoCall("VerifyOTA_Assertions")
2901
2902
2903class File(object):
2904  def __init__(self, name, data, compress_size=None):
2905    self.name = name
2906    self.data = data
2907    self.size = len(data)
2908    self.compress_size = compress_size or self.size
2909    self.sha1 = sha1(data).hexdigest()
2910
2911  @classmethod
2912  def FromLocalFile(cls, name, diskname):
2913    f = open(diskname, "rb")
2914    data = f.read()
2915    f.close()
2916    return File(name, data)
2917
2918  def WriteToTemp(self):
2919    t = tempfile.NamedTemporaryFile()
2920    t.write(self.data)
2921    t.flush()
2922    return t
2923
2924  def WriteToDir(self, d):
2925    with open(os.path.join(d, self.name), "wb") as fp:
2926      fp.write(self.data)
2927
2928  def AddToZip(self, z, compression=None):
2929    ZipWriteStr(z, self.name, self.data, compress_type=compression)
2930
2931
2932DIFF_PROGRAM_BY_EXT = {
2933    ".gz": "imgdiff",
2934    ".zip": ["imgdiff", "-z"],
2935    ".jar": ["imgdiff", "-z"],
2936    ".apk": ["imgdiff", "-z"],
2937    ".img": "imgdiff",
2938}
2939
2940
2941class Difference(object):
2942  def __init__(self, tf, sf, diff_program=None):
2943    self.tf = tf
2944    self.sf = sf
2945    self.patch = None
2946    self.diff_program = diff_program
2947
2948  def ComputePatch(self):
2949    """Compute the patch (as a string of data) needed to turn sf into
2950    tf.  Returns the same tuple as GetPatch()."""
2951
2952    tf = self.tf
2953    sf = self.sf
2954
2955    if self.diff_program:
2956      diff_program = self.diff_program
2957    else:
2958      ext = os.path.splitext(tf.name)[1]
2959      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
2960
2961    ttemp = tf.WriteToTemp()
2962    stemp = sf.WriteToTemp()
2963
2964    ext = os.path.splitext(tf.name)[1]
2965
2966    try:
2967      ptemp = tempfile.NamedTemporaryFile()
2968      if isinstance(diff_program, list):
2969        cmd = copy.copy(diff_program)
2970      else:
2971        cmd = [diff_program]
2972      cmd.append(stemp.name)
2973      cmd.append(ttemp.name)
2974      cmd.append(ptemp.name)
2975      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2976      err = []
2977
2978      def run():
2979        _, e = p.communicate()
2980        if e:
2981          err.append(e)
2982      th = threading.Thread(target=run)
2983      th.start()
2984      th.join(timeout=300)   # 5 mins
2985      if th.is_alive():
2986        logger.warning("diff command timed out")
2987        p.terminate()
2988        th.join(5)
2989        if th.is_alive():
2990          p.kill()
2991          th.join()
2992
2993      if p.returncode != 0:
2994        logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
2995        self.patch = None
2996        return None, None, None
2997      diff = ptemp.read()
2998    finally:
2999      ptemp.close()
3000      stemp.close()
3001      ttemp.close()
3002
3003    self.patch = diff
3004    return self.tf, self.sf, self.patch
3005
3006  def GetPatch(self):
3007    """Returns a tuple of (target_file, source_file, patch_data).
3008
3009    patch_data may be None if ComputePatch hasn't been called, or if
3010    computing the patch failed.
3011    """
3012    return self.tf, self.sf, self.patch
3013
3014
3015def ComputeDifferences(diffs):
3016  """Call ComputePatch on all the Difference objects in 'diffs'."""
3017  logger.info("%d diffs to compute", len(diffs))
3018
3019  # Do the largest files first, to try and reduce the long-pole effect.
3020  by_size = [(i.tf.size, i) for i in diffs]
3021  by_size.sort(reverse=True)
3022  by_size = [i[1] for i in by_size]
3023
3024  lock = threading.Lock()
3025  diff_iter = iter(by_size)   # accessed under lock
3026
3027  def worker():
3028    try:
3029      lock.acquire()
3030      for d in diff_iter:
3031        lock.release()
3032        start = time.time()
3033        d.ComputePatch()
3034        dur = time.time() - start
3035        lock.acquire()
3036
3037        tf, sf, patch = d.GetPatch()
3038        if sf.name == tf.name:
3039          name = tf.name
3040        else:
3041          name = "%s (%s)" % (tf.name, sf.name)
3042        if patch is None:
3043          logger.error("patching failed! %40s", name)
3044        else:
3045          logger.info(
3046              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
3047              tf.size, 100.0 * len(patch) / tf.size, name)
3048      lock.release()
3049    except Exception:
3050      logger.exception("Failed to compute diff from worker")
3051      raise
3052
3053  # start worker threads; wait for them all to finish.
3054  threads = [threading.Thread(target=worker)
3055             for i in range(OPTIONS.worker_threads)]
3056  for th in threads:
3057    th.start()
3058  while threads:
3059    threads.pop().join()
3060
3061
3062class BlockDifference(object):
3063  def __init__(self, partition, tgt, src=None, check_first_block=False,
3064               version=None, disable_imgdiff=False):
3065    self.tgt = tgt
3066    self.src = src
3067    self.partition = partition
3068    self.check_first_block = check_first_block
3069    self.disable_imgdiff = disable_imgdiff
3070
3071    if version is None:
3072      version = max(
3073          int(i) for i in
3074          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
3075    assert version >= 3
3076    self.version = version
3077
3078    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
3079                       version=self.version,
3080                       disable_imgdiff=self.disable_imgdiff)
3081    self.path = os.path.join(MakeTempDir(), partition)
3082    b.Compute(self.path)
3083    self._required_cache = b.max_stashed_size
3084    self.touched_src_ranges = b.touched_src_ranges
3085    self.touched_src_sha1 = b.touched_src_sha1
3086
3087    # On devices with dynamic partitions, for new partitions,
3088    # src is None but OPTIONS.source_info_dict is not.
3089    if OPTIONS.source_info_dict is None:
3090      is_dynamic_build = OPTIONS.info_dict.get(
3091          "use_dynamic_partitions") == "true"
3092      is_dynamic_source = False
3093    else:
3094      is_dynamic_build = OPTIONS.source_info_dict.get(
3095          "use_dynamic_partitions") == "true"
3096      is_dynamic_source = partition in shlex.split(
3097          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
3098
3099    is_dynamic_target = partition in shlex.split(
3100        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
3101
3102    # For dynamic partitions builds, check partition list in both source
3103    # and target build because new partitions may be added, and existing
3104    # partitions may be removed.
3105    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
3106
3107    if is_dynamic:
3108      self.device = 'map_partition("%s")' % partition
3109    else:
3110      if OPTIONS.source_info_dict is None:
3111        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3112                                              OPTIONS.info_dict)
3113      else:
3114        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3115                                              OPTIONS.source_info_dict)
3116      self.device = device_expr
3117
3118  @property
3119  def required_cache(self):
3120    return self._required_cache
3121
3122  def WriteScript(self, script, output_zip, progress=None,
3123                  write_verify_script=False):
3124    if not self.src:
3125      # write the output unconditionally
3126      script.Print("Patching %s image unconditionally..." % (self.partition,))
3127    else:
3128      script.Print("Patching %s image after verification." % (self.partition,))
3129
3130    if progress:
3131      script.ShowProgress(progress, 0)
3132    self._WriteUpdate(script, output_zip)
3133
3134    if write_verify_script:
3135      self.WritePostInstallVerifyScript(script)
3136
3137  def WriteStrictVerifyScript(self, script):
3138    """Verify all the blocks in the care_map, including clobbered blocks.
3139
3140    This differs from the WriteVerifyScript() function: a) it prints different
3141    error messages; b) it doesn't allow half-way updated images to pass the
3142    verification."""
3143
3144    partition = self.partition
3145    script.Print("Verifying %s..." % (partition,))
3146    ranges = self.tgt.care_map
3147    ranges_str = ranges.to_string_raw()
3148    script.AppendExtra(
3149        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
3150        'ui_print("%s has unexpected contents.");' % (
3151            self.device, ranges_str,
3152            self.tgt.TotalSha1(include_clobbered_blocks=True),
3153            self.partition))
3154    script.AppendExtra("")
3155
3156  def WriteVerifyScript(self, script, touched_blocks_only=False):
3157    partition = self.partition
3158
3159    # full OTA
3160    if not self.src:
3161      script.Print("Image %s will be patched unconditionally." % (partition,))
3162
3163    # incremental OTA
3164    else:
3165      if touched_blocks_only:
3166        ranges = self.touched_src_ranges
3167        expected_sha1 = self.touched_src_sha1
3168      else:
3169        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
3170        expected_sha1 = self.src.TotalSha1()
3171
3172      # No blocks to be checked, skipping.
3173      if not ranges:
3174        return
3175
3176      ranges_str = ranges.to_string_raw()
3177      script.AppendExtra(
3178          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
3179          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
3180          '"%s.patch.dat")) then' % (
3181              self.device, ranges_str, expected_sha1,
3182              self.device, partition, partition, partition))
3183      script.Print('Verified %s image...' % (partition,))
3184      script.AppendExtra('else')
3185
3186      if self.version >= 4:
3187
3188        # Bug: 21124327
3189        # When generating incrementals for the system and vendor partitions in
3190        # version 4 or newer, explicitly check the first block (which contains
3191        # the superblock) of the partition to see if it's what we expect. If
3192        # this check fails, give an explicit log message about the partition
3193        # having been remounted R/W (the most likely explanation).
3194        if self.check_first_block:
3195          script.AppendExtra('check_first_block(%s);' % (self.device,))
3196
3197        # If version >= 4, try block recovery before abort update
3198        if partition == "system":
3199          code = ErrorCode.SYSTEM_RECOVER_FAILURE
3200        else:
3201          code = ErrorCode.VENDOR_RECOVER_FAILURE
3202        script.AppendExtra((
3203            'ifelse (block_image_recover({device}, "{ranges}") && '
3204            'block_image_verify({device}, '
3205            'package_extract_file("{partition}.transfer.list"), '
3206            '"{partition}.new.dat", "{partition}.patch.dat"), '
3207            'ui_print("{partition} recovered successfully."), '
3208            'abort("E{code}: {partition} partition fails to recover"));\n'
3209            'endif;').format(device=self.device, ranges=ranges_str,
3210                             partition=partition, code=code))
3211
3212      # Abort the OTA update. Note that the incremental OTA cannot be applied
3213      # even if it may match the checksum of the target partition.
3214      # a) If version < 3, operations like move and erase will make changes
3215      #    unconditionally and damage the partition.
3216      # b) If version >= 3, it won't even reach here.
3217      else:
3218        if partition == "system":
3219          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
3220        else:
3221          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
3222        script.AppendExtra((
3223            'abort("E%d: %s partition has unexpected contents");\n'
3224            'endif;') % (code, partition))
3225
3226  def WritePostInstallVerifyScript(self, script):
3227    partition = self.partition
3228    script.Print('Verifying the updated %s image...' % (partition,))
3229    # Unlike pre-install verification, clobbered_blocks should not be ignored.
3230    ranges = self.tgt.care_map
3231    ranges_str = ranges.to_string_raw()
3232    script.AppendExtra(
3233        'if range_sha1(%s, "%s") == "%s" then' % (
3234            self.device, ranges_str,
3235            self.tgt.TotalSha1(include_clobbered_blocks=True)))
3236
3237    # Bug: 20881595
3238    # Verify that extended blocks are really zeroed out.
3239    if self.tgt.extended:
3240      ranges_str = self.tgt.extended.to_string_raw()
3241      script.AppendExtra(
3242          'if range_sha1(%s, "%s") == "%s" then' % (
3243              self.device, ranges_str,
3244              self._HashZeroBlocks(self.tgt.extended.size())))
3245      script.Print('Verified the updated %s image.' % (partition,))
3246      if partition == "system":
3247        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
3248      else:
3249        code = ErrorCode.VENDOR_NONZERO_CONTENTS
3250      script.AppendExtra(
3251          'else\n'
3252          '  abort("E%d: %s partition has unexpected non-zero contents after '
3253          'OTA update");\n'
3254          'endif;' % (code, partition))
3255    else:
3256      script.Print('Verified the updated %s image.' % (partition,))
3257
3258    if partition == "system":
3259      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
3260    else:
3261      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
3262
3263    script.AppendExtra(
3264        'else\n'
3265        '  abort("E%d: %s partition has unexpected contents after OTA '
3266        'update");\n'
3267        'endif;' % (code, partition))
3268
3269  def _WriteUpdate(self, script, output_zip):
3270    ZipWrite(output_zip,
3271             '{}.transfer.list'.format(self.path),
3272             '{}.transfer.list'.format(self.partition))
3273
3274    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
3275    # its size. Quailty 9 almost triples the compression time but doesn't
3276    # further reduce the size too much. For a typical 1.8G system.new.dat
3277    #                       zip  | brotli(quality 6)  | brotli(quality 9)
3278    #   compressed_size:    942M | 869M (~8% reduced) | 854M
3279    #   compression_time:   75s  | 265s               | 719s
3280    #   decompression_time: 15s  | 25s                | 25s
3281
3282    if not self.src:
3283      brotli_cmd = ['brotli', '--quality=6',
3284                    '--output={}.new.dat.br'.format(self.path),
3285                    '{}.new.dat'.format(self.path)]
3286      print("Compressing {}.new.dat with brotli".format(self.partition))
3287      RunAndCheckOutput(brotli_cmd)
3288
3289      new_data_name = '{}.new.dat.br'.format(self.partition)
3290      ZipWrite(output_zip,
3291               '{}.new.dat.br'.format(self.path),
3292               new_data_name,
3293               compress_type=zipfile.ZIP_STORED)
3294    else:
3295      new_data_name = '{}.new.dat'.format(self.partition)
3296      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
3297
3298    ZipWrite(output_zip,
3299             '{}.patch.dat'.format(self.path),
3300             '{}.patch.dat'.format(self.partition),
3301             compress_type=zipfile.ZIP_STORED)
3302
3303    if self.partition == "system":
3304      code = ErrorCode.SYSTEM_UPDATE_FAILURE
3305    else:
3306      code = ErrorCode.VENDOR_UPDATE_FAILURE
3307
3308    call = ('block_image_update({device}, '
3309            'package_extract_file("{partition}.transfer.list"), '
3310            '"{new_data_name}", "{partition}.patch.dat") ||\n'
3311            '  abort("E{code}: Failed to update {partition} image.");'.format(
3312                device=self.device, partition=self.partition,
3313                new_data_name=new_data_name, code=code))
3314    script.AppendExtra(script.WordWrap(call))
3315
3316  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
3317    data = source.ReadRangeSet(ranges)
3318    ctx = sha1()
3319
3320    for p in data:
3321      ctx.update(p)
3322
3323    return ctx.hexdigest()
3324
3325  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
3326    """Return the hash value for all zero blocks."""
3327    zero_block = '\x00' * 4096
3328    ctx = sha1()
3329    for _ in range(num_blocks):
3330      ctx.update(zero_block)
3331
3332    return ctx.hexdigest()
3333
3334
3335# Expose these two classes to support vendor-specific scripts
3336DataImage = images.DataImage
3337EmptyImage = images.EmptyImage
3338
3339
3340# map recovery.fstab's fs_types to mount/format "partition types"
3341PARTITION_TYPES = {
3342    "ext4": "EMMC",
3343    "emmc": "EMMC",
3344    "f2fs": "EMMC",
3345    "squashfs": "EMMC"
3346}
3347
3348
3349def GetTypeAndDevice(mount_point, info, check_no_slot=True):
3350  """
3351  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
3352  backwards compatibility. It aborts if the fstab entry has slotselect option
3353  (unless check_no_slot is explicitly set to False).
3354  """
3355  fstab = info["fstab"]
3356  if fstab:
3357    if check_no_slot:
3358      assert not fstab[mount_point].slotselect, \
3359          "Use GetTypeAndDeviceExpr instead"
3360    return (PARTITION_TYPES[fstab[mount_point].fs_type],
3361            fstab[mount_point].device)
3362  raise KeyError
3363
3364
3365def GetTypeAndDeviceExpr(mount_point, info):
3366  """
3367  Return the filesystem of the partition, and an edify expression that evaluates
3368  to the device at runtime.
3369  """
3370  fstab = info["fstab"]
3371  if fstab:
3372    p = fstab[mount_point]
3373    device_expr = '"%s"' % fstab[mount_point].device
3374    if p.slotselect:
3375      device_expr = 'add_slot_suffix(%s)' % device_expr
3376    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
3377  raise KeyError
3378
3379
3380def GetEntryForDevice(fstab, device):
3381  """
3382  Returns:
3383    The first entry in fstab whose device is the given value.
3384  """
3385  if not fstab:
3386    return None
3387  for mount_point in fstab:
3388    if fstab[mount_point].device == device:
3389      return fstab[mount_point]
3390  return None
3391
3392
3393def ParseCertificate(data):
3394  """Parses and converts a PEM-encoded certificate into DER-encoded.
3395
3396  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3397
3398  Returns:
3399    The decoded certificate bytes.
3400  """
3401  cert_buffer = []
3402  save = False
3403  for line in data.split("\n"):
3404    if "--END CERTIFICATE--" in line:
3405      break
3406    if save:
3407      cert_buffer.append(line)
3408    if "--BEGIN CERTIFICATE--" in line:
3409      save = True
3410  cert = base64.b64decode("".join(cert_buffer))
3411  return cert
3412
3413
3414def ExtractPublicKey(cert):
3415  """Extracts the public key (PEM-encoded) from the given certificate file.
3416
3417  Args:
3418    cert: The certificate filename.
3419
3420  Returns:
3421    The public key string.
3422
3423  Raises:
3424    AssertionError: On non-zero return from 'openssl'.
3425  """
3426  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3427  # While openssl 1.1 writes the key into the given filename followed by '-out',
3428  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3429  # stdout instead.
3430  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3431  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3432  pubkey, stderrdata = proc.communicate()
3433  assert proc.returncode == 0, \
3434      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3435  return pubkey
3436
3437
3438def ExtractAvbPublicKey(avbtool, key):
3439  """Extracts the AVB public key from the given public or private key.
3440
3441  Args:
3442    avbtool: The AVB tool to use.
3443    key: The input key file, which should be PEM-encoded public or private key.
3444
3445  Returns:
3446    The path to the extracted AVB public key file.
3447  """
3448  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3449  RunAndCheckOutput(
3450      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3451  return output
3452
3453
3454def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3455                      info_dict=None):
3456  """Generates the recovery-from-boot patch and writes the script to output.
3457
3458  Most of the space in the boot and recovery images is just the kernel, which is
3459  identical for the two, so the resulting patch should be efficient. Add it to
3460  the output zip, along with a shell script that is run from init.rc on first
3461  boot to actually do the patching and install the new recovery image.
3462
3463  Args:
3464    input_dir: The top-level input directory of the target-files.zip.
3465    output_sink: The callback function that writes the result.
3466    recovery_img: File object for the recovery image.
3467    boot_img: File objects for the boot image.
3468    info_dict: A dict returned by common.LoadInfoDict() on the input
3469        target_files. Will use OPTIONS.info_dict if None has been given.
3470  """
3471  if info_dict is None:
3472    info_dict = OPTIONS.info_dict
3473
3474  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3475  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3476
3477  if board_uses_vendorimage:
3478    # In this case, the output sink is rooted at VENDOR
3479    recovery_img_path = "etc/recovery.img"
3480    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3481    sh_dir = "bin"
3482  else:
3483    # In this case the output sink is rooted at SYSTEM
3484    recovery_img_path = "vendor/etc/recovery.img"
3485    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3486    sh_dir = "vendor/bin"
3487
3488  if full_recovery_image:
3489    output_sink(recovery_img_path, recovery_img.data)
3490
3491  else:
3492    system_root_image = info_dict.get("system_root_image") == "true"
3493    path = os.path.join(input_dir, recovery_resource_dat_path)
3494    # With system-root-image, boot and recovery images will have mismatching
3495    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
3496    # to handle such a case.
3497    if system_root_image:
3498      diff_program = ["bsdiff"]
3499      bonus_args = ""
3500      assert not os.path.exists(path)
3501    else:
3502      diff_program = ["imgdiff"]
3503      if os.path.exists(path):
3504        diff_program.append("-b")
3505        diff_program.append(path)
3506        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3507      else:
3508        bonus_args = ""
3509
3510    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3511    _, _, patch = d.ComputePatch()
3512    output_sink("recovery-from-boot.p", patch)
3513
3514  try:
3515    # The following GetTypeAndDevice()s need to use the path in the target
3516    # info_dict instead of source_info_dict.
3517    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3518                                              check_no_slot=False)
3519    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3520                                                      check_no_slot=False)
3521  except KeyError:
3522    return
3523
3524  if full_recovery_image:
3525
3526    # Note that we use /vendor to refer to the recovery resources. This will
3527    # work for a separate vendor partition mounted at /vendor or a
3528    # /system/vendor subdirectory on the system partition, for which init will
3529    # create a symlink from /vendor to /system/vendor.
3530
3531    sh = """#!/vendor/bin/sh
3532if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3533  applypatch \\
3534          --flash /vendor/etc/recovery.img \\
3535          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3536      log -t recovery "Installing new recovery image: succeeded" || \\
3537      log -t recovery "Installing new recovery image: failed"
3538else
3539  log -t recovery "Recovery image already installed"
3540fi
3541""" % {'type': recovery_type,
3542       'device': recovery_device,
3543       'sha1': recovery_img.sha1,
3544       'size': recovery_img.size}
3545  else:
3546    sh = """#!/vendor/bin/sh
3547if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3548  applypatch %(bonus_args)s \\
3549          --patch /vendor/recovery-from-boot.p \\
3550          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3551          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3552      log -t recovery "Installing new recovery image: succeeded" || \\
3553      log -t recovery "Installing new recovery image: failed"
3554else
3555  log -t recovery "Recovery image already installed"
3556fi
3557""" % {'boot_size': boot_img.size,
3558       'boot_sha1': boot_img.sha1,
3559       'recovery_size': recovery_img.size,
3560       'recovery_sha1': recovery_img.sha1,
3561       'boot_type': boot_type,
3562       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3563       'recovery_type': recovery_type,
3564       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3565       'bonus_args': bonus_args}
3566
3567  # The install script location moved from /system/etc to /system/bin in the L
3568  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3569  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3570
3571  logger.info("putting script in %s", sh_location)
3572
3573  output_sink(sh_location, sh.encode())
3574
3575
3576class DynamicPartitionUpdate(object):
3577  def __init__(self, src_group=None, tgt_group=None, progress=None,
3578               block_difference=None):
3579    self.src_group = src_group
3580    self.tgt_group = tgt_group
3581    self.progress = progress
3582    self.block_difference = block_difference
3583
3584  @property
3585  def src_size(self):
3586    if not self.block_difference:
3587      return 0
3588    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3589
3590  @property
3591  def tgt_size(self):
3592    if not self.block_difference:
3593      return 0
3594    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3595
3596  @staticmethod
3597  def _GetSparseImageSize(img):
3598    if not img:
3599      return 0
3600    return img.blocksize * img.total_blocks
3601
3602
3603class DynamicGroupUpdate(object):
3604  def __init__(self, src_size=None, tgt_size=None):
3605    # None: group does not exist. 0: no size limits.
3606    self.src_size = src_size
3607    self.tgt_size = tgt_size
3608
3609
3610class DynamicPartitionsDifference(object):
3611  def __init__(self, info_dict, block_diffs, progress_dict=None,
3612               source_info_dict=None):
3613    if progress_dict is None:
3614      progress_dict = {}
3615
3616    self._remove_all_before_apply = False
3617    if source_info_dict is None:
3618      self._remove_all_before_apply = True
3619      source_info_dict = {}
3620
3621    block_diff_dict = collections.OrderedDict(
3622        [(e.partition, e) for e in block_diffs])
3623
3624    assert len(block_diff_dict) == len(block_diffs), \
3625        "Duplicated BlockDifference object for {}".format(
3626            [partition for partition, count in
3627             collections.Counter(e.partition for e in block_diffs).items()
3628             if count > 1])
3629
3630    self._partition_updates = collections.OrderedDict()
3631
3632    for p, block_diff in block_diff_dict.items():
3633      self._partition_updates[p] = DynamicPartitionUpdate()
3634      self._partition_updates[p].block_difference = block_diff
3635
3636    for p, progress in progress_dict.items():
3637      if p in self._partition_updates:
3638        self._partition_updates[p].progress = progress
3639
3640    tgt_groups = shlex.split(info_dict.get(
3641        "super_partition_groups", "").strip())
3642    src_groups = shlex.split(source_info_dict.get(
3643        "super_partition_groups", "").strip())
3644
3645    for g in tgt_groups:
3646      for p in shlex.split(info_dict.get(
3647              "super_%s_partition_list" % g, "").strip()):
3648        assert p in self._partition_updates, \
3649            "{} is in target super_{}_partition_list but no BlockDifference " \
3650            "object is provided.".format(p, g)
3651        self._partition_updates[p].tgt_group = g
3652
3653    for g in src_groups:
3654      for p in shlex.split(source_info_dict.get(
3655              "super_%s_partition_list" % g, "").strip()):
3656        assert p in self._partition_updates, \
3657            "{} is in source super_{}_partition_list but no BlockDifference " \
3658            "object is provided.".format(p, g)
3659        self._partition_updates[p].src_group = g
3660
3661    target_dynamic_partitions = set(shlex.split(info_dict.get(
3662        "dynamic_partition_list", "").strip()))
3663    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3664                                  if u.tgt_size)
3665    assert block_diffs_with_target == target_dynamic_partitions, \
3666        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3667            list(target_dynamic_partitions), list(block_diffs_with_target))
3668
3669    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3670        "dynamic_partition_list", "").strip()))
3671    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3672                                  if u.src_size)
3673    assert block_diffs_with_source == source_dynamic_partitions, \
3674        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3675            list(source_dynamic_partitions), list(block_diffs_with_source))
3676
3677    if self._partition_updates:
3678      logger.info("Updating dynamic partitions %s",
3679                  self._partition_updates.keys())
3680
3681    self._group_updates = collections.OrderedDict()
3682
3683    for g in tgt_groups:
3684      self._group_updates[g] = DynamicGroupUpdate()
3685      self._group_updates[g].tgt_size = int(info_dict.get(
3686          "super_%s_group_size" % g, "0").strip())
3687
3688    for g in src_groups:
3689      if g not in self._group_updates:
3690        self._group_updates[g] = DynamicGroupUpdate()
3691      self._group_updates[g].src_size = int(source_info_dict.get(
3692          "super_%s_group_size" % g, "0").strip())
3693
3694    self._Compute()
3695
3696  def WriteScript(self, script, output_zip, write_verify_script=False):
3697    script.Comment('--- Start patching dynamic partitions ---')
3698    for p, u in self._partition_updates.items():
3699      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3700        script.Comment('Patch partition %s' % p)
3701        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3702                                       write_verify_script=False)
3703
3704    op_list_path = MakeTempFile()
3705    with open(op_list_path, 'w') as f:
3706      for line in self._op_list:
3707        f.write('{}\n'.format(line))
3708
3709    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
3710
3711    script.Comment('Update dynamic partition metadata')
3712    script.AppendExtra('assert(update_dynamic_partitions('
3713                       'package_extract_file("dynamic_partitions_op_list")));')
3714
3715    if write_verify_script:
3716      for p, u in self._partition_updates.items():
3717        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3718          u.block_difference.WritePostInstallVerifyScript(script)
3719          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3720
3721    for p, u in self._partition_updates.items():
3722      if u.tgt_size and u.src_size <= u.tgt_size:
3723        script.Comment('Patch partition %s' % p)
3724        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3725                                       write_verify_script=write_verify_script)
3726        if write_verify_script:
3727          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3728
3729    script.Comment('--- End patching dynamic partitions ---')
3730
3731  def _Compute(self):
3732    self._op_list = list()
3733
3734    def append(line):
3735      self._op_list.append(line)
3736
3737    def comment(line):
3738      self._op_list.append("# %s" % line)
3739
3740    if self._remove_all_before_apply:
3741      comment('Remove all existing dynamic partitions and groups before '
3742              'applying full OTA')
3743      append('remove_all_groups')
3744
3745    for p, u in self._partition_updates.items():
3746      if u.src_group and not u.tgt_group:
3747        append('remove %s' % p)
3748
3749    for p, u in self._partition_updates.items():
3750      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3751        comment('Move partition %s from %s to default' % (p, u.src_group))
3752        append('move %s default' % p)
3753
3754    for p, u in self._partition_updates.items():
3755      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3756        comment('Shrink partition %s from %d to %d' %
3757                (p, u.src_size, u.tgt_size))
3758        append('resize %s %s' % (p, u.tgt_size))
3759
3760    for g, u in self._group_updates.items():
3761      if u.src_size is not None and u.tgt_size is None:
3762        append('remove_group %s' % g)
3763      if (u.src_size is not None and u.tgt_size is not None and
3764              u.src_size > u.tgt_size):
3765        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3766        append('resize_group %s %d' % (g, u.tgt_size))
3767
3768    for g, u in self._group_updates.items():
3769      if u.src_size is None and u.tgt_size is not None:
3770        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
3771        append('add_group %s %d' % (g, u.tgt_size))
3772      if (u.src_size is not None and u.tgt_size is not None and
3773              u.src_size < u.tgt_size):
3774        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3775        append('resize_group %s %d' % (g, u.tgt_size))
3776
3777    for p, u in self._partition_updates.items():
3778      if u.tgt_group and not u.src_group:
3779        comment('Add partition %s to group %s' % (p, u.tgt_group))
3780        append('add %s %s' % (p, u.tgt_group))
3781
3782    for p, u in self._partition_updates.items():
3783      if u.tgt_size and u.src_size < u.tgt_size:
3784        comment('Grow partition %s from %d to %d' %
3785                (p, u.src_size, u.tgt_size))
3786        append('resize %s %d' % (p, u.tgt_size))
3787
3788    for p, u in self._partition_updates.items():
3789      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3790        comment('Move partition %s from default to %s' %
3791                (p, u.tgt_group))
3792        append('move %s %s' % (p, u.tgt_group))
3793
3794
3795def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
3796  """
3797  Get build.prop from ramdisk within the boot image
3798
3799  Args:
3800    boot_img: the boot image file. Ramdisk must be compressed with lz4 or minigzip format.
3801
3802  Return:
3803    An extracted file that stores properties in the boot image.
3804  """
3805  tmp_dir = MakeTempDir('boot_', suffix='.img')
3806  try:
3807    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
3808                      boot_img, '--out', tmp_dir])
3809    ramdisk = os.path.join(tmp_dir, 'ramdisk')
3810    if not os.path.isfile(ramdisk):
3811      logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
3812      return None
3813    uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
3814    if ramdisk_format == RamdiskFormat.LZ4:
3815      RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
3816    elif ramdisk_format == RamdiskFormat.GZ:
3817      with open(ramdisk, 'rb') as input_stream:
3818        with open(uncompressed_ramdisk, 'wb') as output_stream:
3819          p2 = Run(['minigzip', '-d'], stdin=input_stream.fileno(),
3820                   stdout=output_stream.fileno())
3821          p2.wait()
3822    else:
3823      logger.error('Only support lz4 or minigzip ramdisk format.')
3824      return None
3825
3826    abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
3827    extracted_ramdisk = MakeTempDir('extracted_ramdisk')
3828    # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
3829    # the host environment.
3830    RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
3831                      cwd=extracted_ramdisk)
3832
3833    for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
3834      prop_file = os.path.join(extracted_ramdisk, search_path)
3835      if os.path.isfile(prop_file):
3836        return prop_file
3837      logger.warning(
3838          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
3839
3840    return None
3841
3842  except ExternalError as e:
3843    logger.warning('Unable to get boot image build props: %s', e)
3844    return None
3845
3846
3847def GetBootImageTimestamp(boot_img):
3848  """
3849  Get timestamp from ramdisk within the boot image
3850
3851  Args:
3852    boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
3853
3854  Return:
3855    An integer that corresponds to the timestamp of the boot image, or None
3856    if file has unknown format. Raise exception if an unexpected error has
3857    occurred.
3858  """
3859  prop_file = GetBootImageBuildProp(boot_img)
3860  if not prop_file:
3861    return None
3862
3863  props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
3864  if props is None:
3865    return None
3866
3867  try:
3868    timestamp = props.GetProp('ro.bootimage.build.date.utc')
3869    if timestamp:
3870      return int(timestamp)
3871    logger.warning(
3872        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
3873    return None
3874
3875  except ExternalError as e:
3876    logger.warning('Unable to get boot image timestamp: %s', e)
3877    return None
3878
3879
3880def GetCareMap(which, imgname):
3881  """Returns the care_map string for the given partition.
3882
3883  Args:
3884    which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
3885    imgname: The filename of the image.
3886
3887  Returns:
3888    (which, care_map_ranges): care_map_ranges is the raw string of the care_map
3889    RangeSet; or None.
3890  """
3891  assert which in PARTITIONS_WITH_CARE_MAP
3892
3893  # which + "_image_size" contains the size that the actual filesystem image
3894  # resides in, which is all that needs to be verified. The additional blocks in
3895  # the image file contain verity metadata, by reading which would trigger
3896  # invalid reads.
3897  image_size = OPTIONS.info_dict.get(which + "_image_size")
3898  if not image_size:
3899    return None
3900
3901  image_blocks = int(image_size) // 4096 - 1
3902  assert image_blocks > 0, "blocks for {} must be positive".format(which)
3903
3904  # For sparse images, we will only check the blocks that are listed in the care
3905  # map, i.e. the ones with meaningful data.
3906  if "extfs_sparse_flag" in OPTIONS.info_dict:
3907    simg = sparse_img.SparseImage(imgname)
3908    care_map_ranges = simg.care_map.intersect(
3909        rangelib.RangeSet("0-{}".format(image_blocks)))
3910
3911  # Otherwise for non-sparse images, we read all the blocks in the filesystem
3912  # image.
3913  else:
3914    care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
3915
3916  return [which, care_map_ranges.to_string_raw()]
3917
3918
3919def AddCareMapForAbOta(output_file, ab_partitions, image_paths):
3920  """Generates and adds care_map.pb for a/b partition that has care_map.
3921
3922  Args:
3923    output_file: The output zip file (needs to be already open),
3924        or file path to write care_map.pb.
3925    ab_partitions: The list of A/B partitions.
3926    image_paths: A map from the partition name to the image path.
3927  """
3928  if not output_file:
3929    raise ExternalError('Expected output_file for AddCareMapForAbOta')
3930
3931  care_map_list = []
3932  for partition in ab_partitions:
3933    partition = partition.strip()
3934    if partition not in PARTITIONS_WITH_CARE_MAP:
3935      continue
3936
3937    verity_block_device = "{}_verity_block_device".format(partition)
3938    avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
3939    if (verity_block_device in OPTIONS.info_dict or
3940            OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
3941      if partition not in image_paths:
3942        logger.warning('Potential partition with care_map missing from images: %s',
3943                       partition)
3944        continue
3945      image_path = image_paths[partition]
3946      if not os.path.exists(image_path):
3947        raise ExternalError('Expected image at path {}'.format(image_path))
3948
3949      care_map = GetCareMap(partition, image_path)
3950      if not care_map:
3951        continue
3952      care_map_list += care_map
3953
3954      # adds fingerprint field to the care_map
3955      # TODO(xunchang) revisit the fingerprint calculation for care_map.
3956      partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
3957      prop_name_list = ["ro.{}.build.fingerprint".format(partition),
3958                        "ro.{}.build.thumbprint".format(partition)]
3959
3960      present_props = [x for x in prop_name_list if
3961                       partition_props and partition_props.GetProp(x)]
3962      if not present_props:
3963        logger.warning(
3964            "fingerprint is not present for partition %s", partition)
3965        property_id, fingerprint = "unknown", "unknown"
3966      else:
3967        property_id = present_props[0]
3968        fingerprint = partition_props.GetProp(property_id)
3969      care_map_list += [property_id, fingerprint]
3970
3971  if not care_map_list:
3972    return
3973
3974  # Converts the list into proto buf message by calling care_map_generator; and
3975  # writes the result to a temp file.
3976  temp_care_map_text = MakeTempFile(prefix="caremap_text-",
3977                                           suffix=".txt")
3978  with open(temp_care_map_text, 'w') as text_file:
3979    text_file.write('\n'.join(care_map_list))
3980
3981  temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
3982  care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
3983  RunAndCheckOutput(care_map_gen_cmd)
3984
3985  if not isinstance(output_file, zipfile.ZipFile):
3986    shutil.copy(temp_care_map, output_file)
3987    return
3988  # output_file is a zip file
3989  care_map_path = "META/care_map.pb"
3990  if care_map_path in output_file.namelist():
3991    # Copy the temp file into the OPTIONS.input_tmp dir and update the
3992    # replace_updated_files_list used by add_img_to_target_files
3993    if not OPTIONS.replace_updated_files_list:
3994      OPTIONS.replace_updated_files_list = []
3995    shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
3996    OPTIONS.replace_updated_files_list.append(care_map_path)
3997  else:
3998    ZipWrite(output_file, temp_care_map, arcname=care_map_path)
3999