• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23import getopt
24import getpass
25import gzip
26import imp
27import json
28import logging
29import logging.config
30import os
31import platform
32import re
33import shlex
34import shutil
35import subprocess
36import sys
37import tempfile
38import threading
39import time
40import zipfile
41from hashlib import sha1, sha256
42
43import images
44import rangelib
45import sparse_img
46from blockimgdiff import BlockImageDiff
47
48logger = logging.getLogger(__name__)
49
50
51class Options(object):
52
53  def __init__(self):
54    # Set up search path, in order to find framework/ and lib64/. At the time of
55    # running this function, user-supplied search path (`--path`) hasn't been
56    # available. So the value set here is the default, which might be overridden
57    # by commandline flag later.
58    exec_path = os.path.realpath(sys.argv[0])
59    if exec_path.endswith('.py'):
60      script_name = os.path.basename(exec_path)
61      # logger hasn't been initialized yet at this point. Use print to output
62      # warnings.
63      print(
64          'Warning: releasetools script should be invoked as hermetic Python '
65          'executable -- build and run `{}` directly.'.format(
66              script_name[:-3]),
67          file=sys.stderr)
68    self.search_path = os.path.dirname(os.path.dirname(exec_path))
69
70    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
71    if not os.path.exists(os.path.join(self.search_path, self.signapk_path)):
72      if "ANDROID_HOST_OUT" in os.environ:
73        self.search_path = os.environ["ANDROID_HOST_OUT"]
74    self.signapk_shared_library_path = "lib64"   # Relative to search_path
75    self.extra_signapk_args = []
76    self.aapt2_path = "aapt2"
77    self.java_path = "java"  # Use the one on the path by default.
78    self.java_args = ["-Xmx2048m"]  # The default JVM args.
79    self.android_jar_path = None
80    self.public_key_suffix = ".x509.pem"
81    self.private_key_suffix = ".pk8"
82    # use otatools built boot_signer by default
83    self.boot_signer_path = "boot_signer"
84    self.boot_signer_args = []
85    self.verity_signer_path = None
86    self.verity_signer_args = []
87    self.verbose = False
88    self.tempfiles = []
89    self.device_specific = None
90    self.extras = {}
91    self.info_dict = None
92    self.source_info_dict = None
93    self.target_info_dict = None
94    self.worker_threads = None
95    # Stash size cannot exceed cache_size * threshold.
96    self.cache_size = None
97    self.stash_threshold = 0.8
98    self.logfile = None
99    self.host_tools = {}
100
101
102OPTIONS = Options()
103
104# The block size that's used across the releasetools scripts.
105BLOCK_SIZE = 4096
106
107# Values for "certificate" in apkcerts that mean special things.
108SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
109
110# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
111# that system_other is not in the list because we don't want to include its
112# descriptor into vbmeta.img. When adding a new entry here, the
113# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
114# accordingly.
115AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery',
116                  'system', 'system_ext', 'vendor', 'vendor_boot', 'vendor_kernel_boot',
117                  'vendor_dlkm', 'odm_dlkm', 'system_dlkm')
118
119# Chained VBMeta partitions.
120AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
121
122# Partitions that should have their care_map added to META/care_map.pb
123PARTITIONS_WITH_CARE_MAP = [
124    'system',
125    'vendor',
126    'product',
127    'system_ext',
128    'odm',
129    'vendor_dlkm',
130    'odm_dlkm',
131    'system_dlkm',
132]
133
134# Partitions with a build.prop file
135PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot']
136
137# See sysprop.mk. If file is moved, add new search paths here; don't remove
138# existing search paths.
139RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
140
141
142class ErrorCode(object):
143  """Define error_codes for failures that happen during the actual
144  update package installation.
145
146  Error codes 0-999 are reserved for failures before the package
147  installation (i.e. low battery, package verification failure).
148  Detailed code in 'bootable/recovery/error_code.h' """
149
150  SYSTEM_VERIFICATION_FAILURE = 1000
151  SYSTEM_UPDATE_FAILURE = 1001
152  SYSTEM_UNEXPECTED_CONTENTS = 1002
153  SYSTEM_NONZERO_CONTENTS = 1003
154  SYSTEM_RECOVER_FAILURE = 1004
155  VENDOR_VERIFICATION_FAILURE = 2000
156  VENDOR_UPDATE_FAILURE = 2001
157  VENDOR_UNEXPECTED_CONTENTS = 2002
158  VENDOR_NONZERO_CONTENTS = 2003
159  VENDOR_RECOVER_FAILURE = 2004
160  OEM_PROP_MISMATCH = 3000
161  FINGERPRINT_MISMATCH = 3001
162  THUMBPRINT_MISMATCH = 3002
163  OLDER_BUILD = 3003
164  DEVICE_MISMATCH = 3004
165  BAD_PATCH_FILE = 3005
166  INSUFFICIENT_CACHE_SPACE = 3006
167  TUNE_PARTITION_FAILURE = 3007
168  APPLY_PATCH_FAILURE = 3008
169
170
171class ExternalError(RuntimeError):
172  pass
173
174
175def InitLogging():
176  DEFAULT_LOGGING_CONFIG = {
177      'version': 1,
178      'disable_existing_loggers': False,
179      'formatters': {
180          'standard': {
181              'format':
182                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
183              'datefmt': '%Y-%m-%d %H:%M:%S',
184          },
185      },
186      'handlers': {
187          'default': {
188              'class': 'logging.StreamHandler',
189              'formatter': 'standard',
190              'level': 'WARNING',
191          },
192      },
193      'loggers': {
194          '': {
195              'handlers': ['default'],
196              'propagate': True,
197              'level': 'INFO',
198          }
199      }
200  }
201  env_config = os.getenv('LOGGING_CONFIG')
202  if env_config:
203    with open(env_config) as f:
204      config = json.load(f)
205  else:
206    config = DEFAULT_LOGGING_CONFIG
207
208    # Increase the logging level for verbose mode.
209    if OPTIONS.verbose:
210      config = copy.deepcopy(config)
211      config['handlers']['default']['level'] = 'INFO'
212
213    if OPTIONS.logfile:
214      config = copy.deepcopy(config)
215      config['handlers']['logfile'] = {
216          'class': 'logging.FileHandler',
217          'formatter': 'standard',
218          'level': 'INFO',
219          'mode': 'w',
220          'filename': OPTIONS.logfile,
221      }
222      config['loggers']['']['handlers'].append('logfile')
223
224  logging.config.dictConfig(config)
225
226
227def SetHostToolLocation(tool_name, location):
228  OPTIONS.host_tools[tool_name] = location
229
230
231def FindHostToolPath(tool_name):
232  """Finds the path to the host tool.
233
234  Args:
235    tool_name: name of the tool to find
236  Returns:
237    path to the tool if found under either one of the host_tools map or under
238    the same directory as this binary is located at. If not found, tool_name
239    is returned.
240  """
241  if tool_name in OPTIONS.host_tools:
242    return OPTIONS.host_tools[tool_name]
243
244  my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
245  tool_path = os.path.join(my_dir, tool_name)
246  if os.path.exists(tool_path):
247    return tool_path
248
249  return tool_name
250
251
252def Run(args, verbose=None, **kwargs):
253  """Creates and returns a subprocess.Popen object.
254
255  Args:
256    args: The command represented as a list of strings.
257    verbose: Whether the commands should be shown. Default to the global
258        verbosity if unspecified.
259    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
260        stdin, etc. stdout and stderr will default to subprocess.PIPE and
261        subprocess.STDOUT respectively unless caller specifies any of them.
262        universal_newlines will default to True, as most of the users in
263        releasetools expect string output.
264
265  Returns:
266    A subprocess.Popen object.
267  """
268  if 'stdout' not in kwargs and 'stderr' not in kwargs:
269    kwargs['stdout'] = subprocess.PIPE
270    kwargs['stderr'] = subprocess.STDOUT
271  if 'universal_newlines' not in kwargs:
272    kwargs['universal_newlines'] = True
273
274  if args:
275    # Make a copy of args in case client relies on the content of args later.
276    args = args[:]
277    args[0] = FindHostToolPath(args[0])
278
279  if verbose is None:
280    verbose = OPTIONS.verbose
281
282  # Don't log any if caller explicitly says so.
283  if verbose:
284    logger.info("  Running: \"%s\"", " ".join(args))
285  return subprocess.Popen(args, **kwargs)
286
287
288def RunAndCheckOutput(args, verbose=None, **kwargs):
289  """Runs the given command and returns the output.
290
291  Args:
292    args: The command represented as a list of strings.
293    verbose: Whether the commands should be shown. Default to the global
294        verbosity if unspecified.
295    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
296        stdin, etc. stdout and stderr will default to subprocess.PIPE and
297        subprocess.STDOUT respectively unless caller specifies any of them.
298
299  Returns:
300    The output string.
301
302  Raises:
303    ExternalError: On non-zero exit from the command.
304  """
305  proc = Run(args, verbose=verbose, **kwargs)
306  output, _ = proc.communicate()
307  if output is None:
308    output = ""
309  # Don't log any if caller explicitly says so.
310  if verbose:
311    logger.info("%s", output.rstrip())
312  if proc.returncode != 0:
313    raise ExternalError(
314        "Failed to run command '{}' (exit code {}):\n{}".format(
315            args, proc.returncode, output))
316  return output
317
318
319def RoundUpTo4K(value):
320  rounded_up = value + 4095
321  return rounded_up - (rounded_up % 4096)
322
323
324def CloseInheritedPipes():
325  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
326  before doing other work."""
327  if platform.system() != "Darwin":
328    return
329  for d in range(3, 1025):
330    try:
331      stat = os.fstat(d)
332      if stat is not None:
333        pipebit = stat[0] & 0x1000
334        if pipebit != 0:
335          os.close(d)
336    except OSError:
337      pass
338
339
340class BuildInfo(object):
341  """A class that holds the information for a given build.
342
343  This class wraps up the property querying for a given source or target build.
344  It abstracts away the logic of handling OEM-specific properties, and caches
345  the commonly used properties such as fingerprint.
346
347  There are two types of info dicts: a) build-time info dict, which is generated
348  at build time (i.e. included in a target_files zip); b) OEM info dict that is
349  specified at package generation time (via command line argument
350  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
351  having "oem_fingerprint_properties" in build-time info dict), all the queries
352  would be answered based on build-time info dict only. Otherwise if using
353  OEM-specific properties, some of them will be calculated from two info dicts.
354
355  Users can query properties similarly as using a dict() (e.g. info['fstab']),
356  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
357
358  Attributes:
359    info_dict: The build-time info dict.
360    is_ab: Whether it's a build that uses A/B OTA.
361    oem_dicts: A list of OEM dicts.
362    oem_props: A list of OEM properties that should be read from OEM dicts; None
363        if the build doesn't use any OEM-specific property.
364    fingerprint: The fingerprint of the build, which would be calculated based
365        on OEM properties if applicable.
366    device: The device name, which could come from OEM dicts if applicable.
367  """
368
369  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
370                               "ro.product.manufacturer", "ro.product.model",
371                               "ro.product.name"]
372  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
373      "product", "odm", "vendor", "system_ext", "system"]
374  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
375      "product", "product_services", "odm", "vendor", "system"]
376  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
377
378  # The length of vbmeta digest to append to the fingerprint
379  _VBMETA_DIGEST_SIZE_USED = 8
380
381  def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
382    """Initializes a BuildInfo instance with the given dicts.
383
384    Note that it only wraps up the given dicts, without making copies.
385
386    Arguments:
387      info_dict: The build-time info dict.
388      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
389          that it always uses the first dict to calculate the fingerprint or the
390          device name. The rest would be used for asserting OEM properties only
391          (e.g. one package can be installed on one of these devices).
392      use_legacy_id: Use the legacy build id to construct the fingerprint. This
393          is used when we need a BuildInfo class, while the vbmeta digest is
394          unavailable.
395
396    Raises:
397      ValueError: On invalid inputs.
398    """
399    self.info_dict = info_dict
400    self.oem_dicts = oem_dicts
401
402    self._is_ab = info_dict.get("ab_update") == "true"
403    self.use_legacy_id = use_legacy_id
404
405    # Skip _oem_props if oem_dicts is None to use BuildInfo in
406    # sign_target_files_apks
407    if self.oem_dicts:
408      self._oem_props = info_dict.get("oem_fingerprint_properties")
409    else:
410      self._oem_props = None
411
412    def check_fingerprint(fingerprint):
413      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
414        raise ValueError(
415            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
416            "3.2.2. Build Parameters.".format(fingerprint))
417
418    self._partition_fingerprints = {}
419    for partition in PARTITIONS_WITH_BUILD_PROP:
420      try:
421        fingerprint = self.CalculatePartitionFingerprint(partition)
422        check_fingerprint(fingerprint)
423        self._partition_fingerprints[partition] = fingerprint
424      except ExternalError:
425        continue
426    if "system" in self._partition_fingerprints:
427      # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
428      # need a fingerprint when creating the image.
429      self._partition_fingerprints[
430          "system_other"] = self._partition_fingerprints["system"]
431
432    # These two should be computed only after setting self._oem_props.
433    self._device = self.GetOemProperty("ro.product.device")
434    self._fingerprint = self.CalculateFingerprint()
435    check_fingerprint(self._fingerprint)
436
437  @property
438  def is_ab(self):
439    return self._is_ab
440
441  @property
442  def device(self):
443    return self._device
444
445  @property
446  def fingerprint(self):
447    return self._fingerprint
448
449  @property
450  def is_vabc(self):
451    vendor_prop = self.info_dict.get("vendor.build.prop")
452    vabc_enabled = vendor_prop and \
453        vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true"
454    return vabc_enabled
455
456  @property
457  def is_vabc_xor(self):
458    vendor_prop = self.info_dict.get("vendor.build.prop")
459    vabc_xor_enabled = vendor_prop and \
460        vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true"
461    return vabc_xor_enabled
462
463  @property
464  def vendor_suppressed_vabc(self):
465    vendor_prop = self.info_dict.get("vendor.build.prop")
466    vabc_suppressed = vendor_prop and \
467        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
468    return vabc_suppressed and vabc_suppressed.lower() == "true"
469
470  @property
471  def oem_props(self):
472    return self._oem_props
473
474  def __getitem__(self, key):
475    return self.info_dict[key]
476
477  def __setitem__(self, key, value):
478    self.info_dict[key] = value
479
480  def get(self, key, default=None):
481    return self.info_dict.get(key, default)
482
483  def items(self):
484    return self.info_dict.items()
485
486  def _GetRawBuildProp(self, prop, partition):
487    prop_file = '{}.build.prop'.format(
488        partition) if partition else 'build.prop'
489    partition_props = self.info_dict.get(prop_file)
490    if not partition_props:
491      return None
492    return partition_props.GetProp(prop)
493
494  def GetPartitionBuildProp(self, prop, partition):
495    """Returns the inquired build property for the provided partition."""
496
497    # Boot image and init_boot image uses ro.[product.]bootimage instead of boot.
498    # This comes from the generic ramdisk
499    prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition
500
501    # If provided a partition for this property, only look within that
502    # partition's build.prop.
503    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
504      prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
505    else:
506      prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
507
508    prop_val = self._GetRawBuildProp(prop, partition)
509    if prop_val is not None:
510      return prop_val
511    raise ExternalError("couldn't find %s in %s.build.prop" %
512                        (prop, partition))
513
514  def GetBuildProp(self, prop):
515    """Returns the inquired build property from the standard build.prop file."""
516    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
517      return self._ResolveRoProductBuildProp(prop)
518
519    if prop == "ro.build.id":
520      return self._GetBuildId()
521
522    prop_val = self._GetRawBuildProp(prop, None)
523    if prop_val is not None:
524      return prop_val
525
526    raise ExternalError("couldn't find %s in build.prop" % (prop,))
527
528  def _ResolveRoProductBuildProp(self, prop):
529    """Resolves the inquired ro.product.* build property"""
530    prop_val = self._GetRawBuildProp(prop, None)
531    if prop_val:
532      return prop_val
533
534    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
535    source_order_val = self._GetRawBuildProp(
536        "ro.product.property_source_order", None)
537    if source_order_val:
538      source_order = source_order_val.split(",")
539    else:
540      source_order = default_source_order
541
542    # Check that all sources in ro.product.property_source_order are valid
543    if any([x not in default_source_order for x in source_order]):
544      raise ExternalError(
545          "Invalid ro.product.property_source_order '{}'".format(source_order))
546
547    for source_partition in source_order:
548      source_prop = prop.replace(
549          "ro.product", "ro.product.{}".format(source_partition), 1)
550      prop_val = self._GetRawBuildProp(source_prop, source_partition)
551      if prop_val:
552        return prop_val
553
554    raise ExternalError("couldn't resolve {}".format(prop))
555
556  def _GetRoProductPropsDefaultSourceOrder(self):
557    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
558    # values of these properties for each Android release.
559    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
560    if android_codename == "REL":
561      android_version = self._GetRawBuildProp("ro.build.version.release", None)
562      if android_version == "10":
563        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
564      # NOTE: float() conversion of android_version will have rounding error.
565      # We are checking for "9" or less, and using "< 10" is well outside of
566      # possible floating point rounding.
567      try:
568        android_version_val = float(android_version)
569      except ValueError:
570        android_version_val = 0
571      if android_version_val < 10:
572        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
573    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
574
575  def _GetPlatformVersion(self):
576    version_sdk = self.GetBuildProp("ro.build.version.sdk")
577    # init code switches to version_release_or_codename (see b/158483506). After
578    # API finalization, release_or_codename will be the same as release. This
579    # is the best effort to support pre-S dev stage builds.
580    if int(version_sdk) >= 30:
581      try:
582        return self.GetBuildProp("ro.build.version.release_or_codename")
583      except ExternalError:
584        logger.warning('Failed to find ro.build.version.release_or_codename')
585
586    return self.GetBuildProp("ro.build.version.release")
587
588  def _GetBuildId(self):
589    build_id = self._GetRawBuildProp("ro.build.id", None)
590    if build_id:
591      return build_id
592
593    legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
594    if not legacy_build_id:
595      raise ExternalError("Couldn't find build id in property file")
596
597    if self.use_legacy_id:
598      return legacy_build_id
599
600    # Append the top 8 chars of vbmeta digest to the existing build id. The
601    # logic needs to match the one in init, so that OTA can deliver correctly.
602    avb_enable = self.info_dict.get("avb_enable") == "true"
603    if not avb_enable:
604      raise ExternalError("AVB isn't enabled when using legacy build id")
605
606    vbmeta_digest = self.info_dict.get("vbmeta_digest")
607    if not vbmeta_digest:
608      raise ExternalError("Vbmeta digest isn't provided when using legacy build"
609                          " id")
610    if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
611      raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
612
613    digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
614    return legacy_build_id + '.' + digest_prefix
615
616  def _GetPartitionPlatformVersion(self, partition):
617    try:
618      return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
619                                        partition)
620    except ExternalError:
621      return self.GetPartitionBuildProp("ro.build.version.release",
622                                        partition)
623
624  def GetOemProperty(self, key):
625    if self.oem_props is not None and key in self.oem_props:
626      return self.oem_dicts[0][key]
627    return self.GetBuildProp(key)
628
629  def GetPartitionFingerprint(self, partition):
630    return self._partition_fingerprints.get(partition, None)
631
632  def CalculatePartitionFingerprint(self, partition):
633    try:
634      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
635    except ExternalError:
636      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
637          self.GetPartitionBuildProp("ro.product.brand", partition),
638          self.GetPartitionBuildProp("ro.product.name", partition),
639          self.GetPartitionBuildProp("ro.product.device", partition),
640          self._GetPartitionPlatformVersion(partition),
641          self.GetPartitionBuildProp("ro.build.id", partition),
642          self.GetPartitionBuildProp(
643              "ro.build.version.incremental", partition),
644          self.GetPartitionBuildProp("ro.build.type", partition),
645          self.GetPartitionBuildProp("ro.build.tags", partition))
646
647  def CalculateFingerprint(self):
648    if self.oem_props is None:
649      try:
650        return self.GetBuildProp("ro.build.fingerprint")
651      except ExternalError:
652        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
653            self.GetBuildProp("ro.product.brand"),
654            self.GetBuildProp("ro.product.name"),
655            self.GetBuildProp("ro.product.device"),
656            self._GetPlatformVersion(),
657            self.GetBuildProp("ro.build.id"),
658            self.GetBuildProp("ro.build.version.incremental"),
659            self.GetBuildProp("ro.build.type"),
660            self.GetBuildProp("ro.build.tags"))
661    return "%s/%s/%s:%s" % (
662        self.GetOemProperty("ro.product.brand"),
663        self.GetOemProperty("ro.product.name"),
664        self.GetOemProperty("ro.product.device"),
665        self.GetBuildProp("ro.build.thumbprint"))
666
667  def WriteMountOemScript(self, script):
668    assert self.oem_props is not None
669    recovery_mount_options = self.info_dict.get("recovery_mount_options")
670    script.Mount("/oem", recovery_mount_options)
671
672  def WriteDeviceAssertions(self, script, oem_no_mount):
673    # Read the property directly if not using OEM properties.
674    if not self.oem_props:
675      script.AssertDevice(self.device)
676      return
677
678    # Otherwise assert OEM properties.
679    if not self.oem_dicts:
680      raise ExternalError(
681          "No OEM file provided to answer expected assertions")
682
683    for prop in self.oem_props.split():
684      values = []
685      for oem_dict in self.oem_dicts:
686        if prop in oem_dict:
687          values.append(oem_dict[prop])
688      if not values:
689        raise ExternalError(
690            "The OEM file is missing the property %s" % (prop,))
691      script.AssertOemProperty(prop, values, oem_no_mount)
692
693
694def ReadFromInputFile(input_file, fn):
695  """Reads the contents of fn from input zipfile or directory."""
696  if isinstance(input_file, zipfile.ZipFile):
697    return input_file.read(fn).decode()
698  else:
699    path = os.path.join(input_file, *fn.split("/"))
700    try:
701      with open(path) as f:
702        return f.read()
703    except IOError as e:
704      if e.errno == errno.ENOENT:
705        raise KeyError(fn)
706
707
708def ExtractFromInputFile(input_file, fn):
709  """Extracts the contents of fn from input zipfile or directory into a file."""
710  if isinstance(input_file, zipfile.ZipFile):
711    tmp_file = MakeTempFile(os.path.basename(fn))
712    with open(tmp_file, 'wb') as f:
713      f.write(input_file.read(fn))
714    return tmp_file
715  else:
716    file = os.path.join(input_file, *fn.split("/"))
717    if not os.path.exists(file):
718      raise KeyError(fn)
719    return file
720
721
722class RamdiskFormat(object):
723  LZ4 = 1
724  GZ = 2
725
726
727def _GetRamdiskFormat(info_dict):
728  if info_dict.get('lz4_ramdisks') == 'true':
729    ramdisk_format = RamdiskFormat.LZ4
730  else:
731    ramdisk_format = RamdiskFormat.GZ
732  return ramdisk_format
733
734
735def LoadInfoDict(input_file, repacking=False):
736  """Loads the key/value pairs from the given input target_files.
737
738  It reads `META/misc_info.txt` file in the target_files input, does validation
739  checks and returns the parsed key/value pairs for to the given build. It's
740  usually called early when working on input target_files files, e.g. when
741  generating OTAs, or signing builds. Note that the function may be called
742  against an old target_files file (i.e. from past dessert releases). So the
743  property parsing needs to be backward compatible.
744
745  In a `META/misc_info.txt`, a few properties are stored as links to the files
746  in the PRODUCT_OUT directory. It works fine with the build system. However,
747  they are no longer available when (re)generating images from target_files zip.
748  When `repacking` is True, redirect these properties to the actual files in the
749  unzipped directory.
750
751  Args:
752    input_file: The input target_files file, which could be an open
753        zipfile.ZipFile instance, or a str for the dir that contains the files
754        unzipped from a target_files file.
755    repacking: Whether it's trying repack an target_files file after loading the
756        info dict (default: False). If so, it will rewrite a few loaded
757        properties (e.g. selinux_fc, root_dir) to point to the actual files in
758        target_files file. When doing repacking, `input_file` must be a dir.
759
760  Returns:
761    A dict that contains the parsed key/value pairs.
762
763  Raises:
764    AssertionError: On invalid input arguments.
765    ValueError: On malformed input values.
766  """
767  if repacking:
768    assert isinstance(input_file, str), \
769        "input_file must be a path str when doing repacking"
770
771  def read_helper(fn):
772    return ReadFromInputFile(input_file, fn)
773
774  try:
775    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
776  except KeyError:
777    raise ValueError("Failed to find META/misc_info.txt in input target-files")
778
779  if "recovery_api_version" not in d:
780    raise ValueError("Failed to find 'recovery_api_version'")
781  if "fstab_version" not in d:
782    raise ValueError("Failed to find 'fstab_version'")
783
784  if repacking:
785    # "selinux_fc" properties should point to the file_contexts files
786    # (file_contexts.bin) under META/.
787    for key in d:
788      if key.endswith("selinux_fc"):
789        fc_basename = os.path.basename(d[key])
790        fc_config = os.path.join(input_file, "META", fc_basename)
791        assert os.path.exists(fc_config)
792
793        d[key] = fc_config
794
795    # Similarly we need to redirect "root_dir", and "root_fs_config".
796    d["root_dir"] = os.path.join(input_file, "ROOT")
797    d["root_fs_config"] = os.path.join(
798        input_file, "META", "root_filesystem_config.txt")
799
800    # Redirect {partition}_base_fs_file for each of the named partitions.
801    for part_name in ["system", "vendor", "system_ext", "product", "odm",
802                      "vendor_dlkm", "odm_dlkm", "system_dlkm"]:
803      key_name = part_name + "_base_fs_file"
804      if key_name not in d:
805        continue
806      basename = os.path.basename(d[key_name])
807      base_fs_file = os.path.join(input_file, "META", basename)
808      if os.path.exists(base_fs_file):
809        d[key_name] = base_fs_file
810      else:
811        logger.warning(
812            "Failed to find %s base fs file: %s", part_name, base_fs_file)
813        del d[key_name]
814
815  def makeint(key):
816    if key in d:
817      d[key] = int(d[key], 0)
818
819  makeint("recovery_api_version")
820  makeint("blocksize")
821  makeint("system_size")
822  makeint("vendor_size")
823  makeint("userdata_size")
824  makeint("cache_size")
825  makeint("recovery_size")
826  makeint("fstab_version")
827
828  boot_images = "boot.img"
829  if "boot_images" in d:
830    boot_images = d["boot_images"]
831  for b in boot_images.split():
832    makeint(b.replace(".img", "_size"))
833
834  # Load recovery fstab if applicable.
835  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
836  ramdisk_format = _GetRamdiskFormat(d)
837
838  # Tries to load the build props for all partitions with care_map, including
839  # system and vendor.
840  for partition in PARTITIONS_WITH_BUILD_PROP:
841    partition_prop = "{}.build.prop".format(partition)
842    d[partition_prop] = PartitionBuildProps.FromInputFile(
843        input_file, partition, ramdisk_format=ramdisk_format)
844  d["build.prop"] = d["system.build.prop"]
845
846  # Set up the salt (based on fingerprint) that will be used when adding AVB
847  # hash / hashtree footers.
848  if d.get("avb_enable") == "true":
849    build_info = BuildInfo(d, use_legacy_id=True)
850    for partition in PARTITIONS_WITH_BUILD_PROP:
851      fingerprint = build_info.GetPartitionFingerprint(partition)
852      if fingerprint:
853        d["avb_{}_salt".format(partition)] = sha256(
854            fingerprint.encode()).hexdigest()
855
856    # Set the vbmeta digest if exists
857    try:
858      d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
859    except KeyError:
860      pass
861
862  try:
863    d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
864  except KeyError:
865    logger.warning("Can't find META/ab_partitions.txt")
866  return d
867
868
869def LoadListFromFile(file_path):
870  with open(file_path) as f:
871    return f.read().splitlines()
872
873
874def LoadDictionaryFromFile(file_path):
875  lines = LoadListFromFile(file_path)
876  return LoadDictionaryFromLines(lines)
877
878
879def LoadDictionaryFromLines(lines):
880  d = {}
881  for line in lines:
882    line = line.strip()
883    if not line or line.startswith("#"):
884      continue
885    if "=" in line:
886      name, value = line.split("=", 1)
887      d[name] = value
888  return d
889
890
891class PartitionBuildProps(object):
892  """The class holds the build prop of a particular partition.
893
894  This class loads the build.prop and holds the build properties for a given
895  partition. It also partially recognizes the 'import' statement in the
896  build.prop; and calculates alternative values of some specific build
897  properties during runtime.
898
899  Attributes:
900    input_file: a zipped target-file or an unzipped target-file directory.
901    partition: name of the partition.
902    props_allow_override: a list of build properties to search for the
903        alternative values during runtime.
904    build_props: a dict of build properties for the given partition.
905    prop_overrides: a set of props that are overridden by import.
906    placeholder_values: A dict of runtime variables' values to replace the
907        placeholders in the build.prop file. We expect exactly one value for
908        each of the variables.
909    ramdisk_format: If name is "boot", the format of ramdisk inside the
910        boot image. Otherwise, its value is ignored.
911        Use lz4 to decompress by default. If its value is gzip, use minigzip.
912  """
913
914  def __init__(self, input_file, name, placeholder_values=None):
915    self.input_file = input_file
916    self.partition = name
917    self.props_allow_override = [props.format(name) for props in [
918        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
919    self.build_props = {}
920    self.prop_overrides = set()
921    self.placeholder_values = {}
922    if placeholder_values:
923      self.placeholder_values = copy.deepcopy(placeholder_values)
924
925  @staticmethod
926  def FromDictionary(name, build_props):
927    """Constructs an instance from a build prop dictionary."""
928
929    props = PartitionBuildProps("unknown", name)
930    props.build_props = build_props.copy()
931    return props
932
933  @staticmethod
934  def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
935    """Loads the build.prop file and builds the attributes."""
936
937    if name in ("boot", "init_boot"):
938      data = PartitionBuildProps._ReadBootPropFile(
939          input_file, name, ramdisk_format=ramdisk_format)
940    else:
941      data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
942
943    props = PartitionBuildProps(input_file, name, placeholder_values)
944    props._LoadBuildProp(data)
945    return props
946
947  @staticmethod
948  def _ReadBootPropFile(input_file, partition_name, ramdisk_format):
949    """
950    Read build.prop for boot image from input_file.
951    Return empty string if not found.
952    """
953    image_path = 'IMAGES/' + partition_name + '.img'
954    try:
955      boot_img = ExtractFromInputFile(input_file, image_path)
956    except KeyError:
957      logger.warning('Failed to read %s', image_path)
958      return ''
959    prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
960    if prop_file is None:
961      return ''
962    with open(prop_file, "r") as f:
963      return f.read()
964
965  @staticmethod
966  def _ReadPartitionPropFile(input_file, name):
967    """
968    Read build.prop for name from input_file.
969    Return empty string if not found.
970    """
971    data = ''
972    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
973                      '{}/build.prop'.format(name.upper())]:
974      try:
975        data = ReadFromInputFile(input_file, prop_file)
976        break
977      except KeyError:
978        logger.warning('Failed to read %s', prop_file)
979    if data == '':
980      logger.warning("Failed to read build.prop for partition {}".format(name))
981    return data
982
983  @staticmethod
984  def FromBuildPropFile(name, build_prop_file):
985    """Constructs an instance from a build prop file."""
986
987    props = PartitionBuildProps("unknown", name)
988    with open(build_prop_file) as f:
989      props._LoadBuildProp(f.read())
990    return props
991
992  def _LoadBuildProp(self, data):
993    for line in data.split('\n'):
994      line = line.strip()
995      if not line or line.startswith("#"):
996        continue
997      if line.startswith("import"):
998        overrides = self._ImportParser(line)
999        duplicates = self.prop_overrides.intersection(overrides.keys())
1000        if duplicates:
1001          raise ValueError('prop {} is overridden multiple times'.format(
1002              ','.join(duplicates)))
1003        self.prop_overrides = self.prop_overrides.union(overrides.keys())
1004        self.build_props.update(overrides)
1005      elif "=" in line:
1006        name, value = line.split("=", 1)
1007        if name in self.prop_overrides:
1008          raise ValueError('prop {} is set again after overridden by import '
1009                           'statement'.format(name))
1010        self.build_props[name] = value
1011
1012  def _ImportParser(self, line):
1013    """Parses the build prop in a given import statement."""
1014
1015    tokens = line.split()
1016    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
1017      raise ValueError('Unrecognized import statement {}'.format(line))
1018
1019    if len(tokens) == 3:
1020      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
1021      return {}
1022
1023    import_path = tokens[1]
1024    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
1025      logger.warn('Unrecognized import path {}'.format(line))
1026      return {}
1027
1028    # We only recognize a subset of import statement that the init process
1029    # supports. And we can loose the restriction based on how the dynamic
1030    # fingerprint is used in practice. The placeholder format should be
1031    # ${placeholder}, and its value should be provided by the caller through
1032    # the placeholder_values.
1033    for prop, value in self.placeholder_values.items():
1034      prop_place_holder = '${{{}}}'.format(prop)
1035      if prop_place_holder in import_path:
1036        import_path = import_path.replace(prop_place_holder, value)
1037    if '$' in import_path:
1038      logger.info('Unresolved place holder in import path %s', import_path)
1039      return {}
1040
1041    import_path = import_path.replace('/{}'.format(self.partition),
1042                                      self.partition.upper())
1043    logger.info('Parsing build props override from %s', import_path)
1044
1045    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
1046    d = LoadDictionaryFromLines(lines)
1047    return {key: val for key, val in d.items()
1048            if key in self.props_allow_override}
1049
1050  def GetProp(self, prop):
1051    return self.build_props.get(prop)
1052
1053
1054def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
1055                      system_root_image=False):
1056  class Partition(object):
1057    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
1058      self.mount_point = mount_point
1059      self.fs_type = fs_type
1060      self.device = device
1061      self.length = length
1062      self.context = context
1063      self.slotselect = slotselect
1064
1065  try:
1066    data = read_helper(recovery_fstab_path)
1067  except KeyError:
1068    logger.warning("Failed to find %s", recovery_fstab_path)
1069    data = ""
1070
1071  assert fstab_version == 2
1072
1073  d = {}
1074  for line in data.split("\n"):
1075    line = line.strip()
1076    if not line or line.startswith("#"):
1077      continue
1078
1079    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
1080    pieces = line.split()
1081    if len(pieces) != 5:
1082      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
1083
1084    # Ignore entries that are managed by vold.
1085    options = pieces[4]
1086    if "voldmanaged=" in options:
1087      continue
1088
1089    # It's a good line, parse it.
1090    length = 0
1091    slotselect = False
1092    options = options.split(",")
1093    for i in options:
1094      if i.startswith("length="):
1095        length = int(i[7:])
1096      elif i == "slotselect":
1097        slotselect = True
1098      else:
1099        # Ignore all unknown options in the unified fstab.
1100        continue
1101
1102    mount_flags = pieces[3]
1103    # Honor the SELinux context if present.
1104    context = None
1105    for i in mount_flags.split(","):
1106      if i.startswith("context="):
1107        context = i
1108
1109    mount_point = pieces[1]
1110    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
1111                               device=pieces[0], length=length, context=context,
1112                               slotselect=slotselect)
1113
1114  # / is used for the system mount point when the root directory is included in
1115  # system. Other areas assume system is always at "/system" so point /system
1116  # at /.
1117  if system_root_image:
1118    assert '/system' not in d and '/' in d
1119    d["/system"] = d["/"]
1120  return d
1121
1122
1123def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
1124  """Finds the path to recovery fstab and loads its contents."""
1125  # recovery fstab is only meaningful when installing an update via recovery
1126  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
1127  if info_dict.get('ab_update') == 'true' and \
1128     info_dict.get("allow_non_ab") != "true":
1129    return None
1130
1131  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
1132  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
1133  # cases, since it may load the info_dict from an old build (e.g. when
1134  # generating incremental OTAs from that build).
1135  system_root_image = info_dict.get('system_root_image') == 'true'
1136  if info_dict.get('no_recovery') != 'true':
1137    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
1138    if isinstance(input_file, zipfile.ZipFile):
1139      if recovery_fstab_path not in input_file.namelist():
1140        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1141    else:
1142      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
1143      if not os.path.exists(path):
1144        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1145    return LoadRecoveryFSTab(
1146        read_helper, info_dict['fstab_version'], recovery_fstab_path,
1147        system_root_image)
1148
1149  if info_dict.get('recovery_as_boot') == 'true':
1150    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
1151    if isinstance(input_file, zipfile.ZipFile):
1152      if recovery_fstab_path not in input_file.namelist():
1153        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1154    else:
1155      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
1156      if not os.path.exists(path):
1157        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1158    return LoadRecoveryFSTab(
1159        read_helper, info_dict['fstab_version'], recovery_fstab_path,
1160        system_root_image)
1161
1162  return None
1163
1164
1165def DumpInfoDict(d):
1166  for k, v in sorted(d.items()):
1167    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
1168
1169
1170def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
1171  """Merges dynamic partition info variables.
1172
1173  Args:
1174    framework_dict: The dictionary of dynamic partition info variables from the
1175      partial framework target files.
1176    vendor_dict: The dictionary of dynamic partition info variables from the
1177      partial vendor target files.
1178
1179  Returns:
1180    The merged dynamic partition info dictionary.
1181  """
1182
1183  def uniq_concat(a, b):
1184    combined = set(a.split(" "))
1185    combined.update(set(b.split(" ")))
1186    combined = [item.strip() for item in combined if item.strip()]
1187    return " ".join(sorted(combined))
1188
1189  if (framework_dict.get("use_dynamic_partitions") !=
1190        "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
1191    raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
1192
1193  merged_dict = {"use_dynamic_partitions": "true"}
1194  # For keys-value pairs that are the same, copy to merged dict
1195  for key in vendor_dict.keys():
1196    if key in framework_dict and framework_dict[key] == vendor_dict[key]:
1197      merged_dict[key] = vendor_dict[key]
1198
1199  merged_dict["dynamic_partition_list"] = uniq_concat(
1200      framework_dict.get("dynamic_partition_list", ""),
1201      vendor_dict.get("dynamic_partition_list", ""))
1202
1203  # Super block devices are defined by the vendor dict.
1204  if "super_block_devices" in vendor_dict:
1205    merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
1206    for block_device in merged_dict["super_block_devices"].split(" "):
1207      key = "super_%s_device_size" % block_device
1208      if key not in vendor_dict:
1209        raise ValueError("Vendor dict does not contain required key %s." % key)
1210      merged_dict[key] = vendor_dict[key]
1211
1212  # Partition groups and group sizes are defined by the vendor dict because
1213  # these values may vary for each board that uses a shared system image.
1214  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1215  for partition_group in merged_dict["super_partition_groups"].split(" "):
1216    # Set the partition group's size using the value from the vendor dict.
1217    key = "super_%s_group_size" % partition_group
1218    if key not in vendor_dict:
1219      raise ValueError("Vendor dict does not contain required key %s." % key)
1220    merged_dict[key] = vendor_dict[key]
1221
1222    # Set the partition group's partition list using a concatenation of the
1223    # framework and vendor partition lists.
1224    key = "super_%s_partition_list" % partition_group
1225    merged_dict[key] = uniq_concat(
1226        framework_dict.get(key, ""), vendor_dict.get(key, ""))
1227
1228  # Various other flags should be copied from the vendor dict, if defined.
1229  for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
1230              "super_metadata_device", "super_partition_error_limit",
1231              "super_partition_size"):
1232    if key in vendor_dict.keys():
1233      merged_dict[key] = vendor_dict[key]
1234
1235  return merged_dict
1236
1237
1238def PartitionMapFromTargetFiles(target_files_dir):
1239  """Builds a map from partition -> path within an extracted target files directory."""
1240  # Keep possible_subdirs in sync with build/make/core/board_config.mk.
1241  possible_subdirs = {
1242      "system": ["SYSTEM"],
1243      "vendor": ["VENDOR", "SYSTEM/vendor"],
1244      "product": ["PRODUCT", "SYSTEM/product"],
1245      "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
1246      "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
1247      "vendor_dlkm": [
1248          "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
1249      ],
1250      "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
1251      "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"],
1252  }
1253  partition_map = {}
1254  for partition, subdirs in possible_subdirs.items():
1255    for subdir in subdirs:
1256      if os.path.exists(os.path.join(target_files_dir, subdir)):
1257        partition_map[partition] = subdir
1258        break
1259  return partition_map
1260
1261
1262def SharedUidPartitionViolations(uid_dict, partition_groups):
1263  """Checks for APK sharedUserIds that cross partition group boundaries.
1264
1265  This uses a single or merged build's shareduid_violation_modules.json
1266  output file, as generated by find_shareduid_violation.py or
1267  core/tasks/find-shareduid-violation.mk.
1268
1269  An error is defined as a sharedUserId that is found in a set of partitions
1270  that span more than one partition group.
1271
1272  Args:
1273    uid_dict: A dictionary created by using the standard json module to read a
1274      complete shareduid_violation_modules.json file.
1275    partition_groups: A list of groups, where each group is a list of
1276      partitions.
1277
1278  Returns:
1279    A list of error messages.
1280  """
1281  errors = []
1282  for uid, partitions in uid_dict.items():
1283    found_in_groups = [
1284        group for group in partition_groups
1285        if set(partitions.keys()) & set(group)
1286    ]
1287    if len(found_in_groups) > 1:
1288      errors.append(
1289          "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
1290          % (uid, ",".join(sorted(partitions.keys()))))
1291  return errors
1292
1293
1294def RunHostInitVerifier(product_out, partition_map):
1295  """Runs host_init_verifier on the init rc files within partitions.
1296
1297  host_init_verifier searches the etc/init path within each partition.
1298
1299  Args:
1300    product_out: PRODUCT_OUT directory, containing partition directories.
1301    partition_map: A map of partition name -> relative path within product_out.
1302  """
1303  allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
1304  cmd = ["host_init_verifier"]
1305  for partition, path in partition_map.items():
1306    if partition not in allowed_partitions:
1307      raise ExternalError("Unable to call host_init_verifier for partition %s" %
1308                          partition)
1309    cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
1310    # Add --property-contexts if the file exists on the partition.
1311    property_contexts = "%s_property_contexts" % (
1312        "plat" if partition == "system" else partition)
1313    property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
1314                                          property_contexts)
1315    if os.path.exists(property_contexts_path):
1316      cmd.append("--property-contexts=%s" % property_contexts_path)
1317    # Add the passwd file if the file exists on the partition.
1318    passwd_path = os.path.join(product_out, path, "etc", "passwd")
1319    if os.path.exists(passwd_path):
1320      cmd.extend(["-p", passwd_path])
1321  return RunAndCheckOutput(cmd)
1322
1323
1324def AppendAVBSigningArgs(cmd, partition):
1325  """Append signing arguments for avbtool."""
1326  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1327  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
1328  if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
1329    new_key_path = os.path.join(OPTIONS.search_path, key_path)
1330    if os.path.exists(new_key_path):
1331      key_path = new_key_path
1332  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1333  if key_path and algorithm:
1334    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1335  avb_salt = OPTIONS.info_dict.get("avb_salt")
1336  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1337  if avb_salt and not partition.startswith("vbmeta"):
1338    cmd.extend(["--salt", avb_salt])
1339
1340
1341def GetAvbPartitionArg(partition, image, info_dict=None):
1342  """Returns the VBMeta arguments for partition.
1343
1344  It sets up the VBMeta argument by including the partition descriptor from the
1345  given 'image', or by configuring the partition as a chained partition.
1346
1347  Args:
1348    partition: The name of the partition (e.g. "system").
1349    image: The path to the partition image.
1350    info_dict: A dict returned by common.LoadInfoDict(). Will use
1351        OPTIONS.info_dict if None has been given.
1352
1353  Returns:
1354    A list of VBMeta arguments.
1355  """
1356  if info_dict is None:
1357    info_dict = OPTIONS.info_dict
1358
1359  # Check if chain partition is used.
1360  key_path = info_dict.get("avb_" + partition + "_key_path")
1361  if not key_path:
1362    return ["--include_descriptors_from_image", image]
1363
1364  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1365  # into vbmeta.img. The recovery image will be configured on an independent
1366  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1367  # See details at
1368  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1369  if info_dict.get("ab_update") != "true" and partition == "recovery":
1370    return []
1371
1372  # Otherwise chain the partition into vbmeta.
1373  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1374  return ["--chain_partition", chained_partition_arg]
1375
1376
1377def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1378  """Constructs and returns the arg to build or verify a chained partition.
1379
1380  Args:
1381    partition: The partition name.
1382    info_dict: The info dict to look up the key info and rollback index
1383        location.
1384    key: The key to be used for building or verifying the partition. Defaults to
1385        the key listed in info_dict.
1386
1387  Returns:
1388    A string of form "partition:rollback_index_location:key" that can be used to
1389    build or verify vbmeta image.
1390  """
1391  if key is None:
1392    key = info_dict["avb_" + partition + "_key_path"]
1393  if key and not os.path.exists(key) and OPTIONS.search_path:
1394    new_key_path = os.path.join(OPTIONS.search_path, key)
1395    if os.path.exists(new_key_path):
1396      key = new_key_path
1397  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1398  rollback_index_location = info_dict[
1399      "avb_" + partition + "_rollback_index_location"]
1400  return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
1401
1402
1403def _HasGkiCertificationArgs():
1404  return ("gki_signing_key_path" in OPTIONS.info_dict and
1405          "gki_signing_algorithm" in OPTIONS.info_dict)
1406
1407
1408def _GenerateGkiCertificate(image, image_name):
1409  key_path = OPTIONS.info_dict.get("gki_signing_key_path")
1410  algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
1411
1412  if not os.path.exists(key_path) and OPTIONS.search_path:
1413    new_key_path = os.path.join(OPTIONS.search_path, key_path)
1414    if os.path.exists(new_key_path):
1415      key_path = new_key_path
1416
1417  # Checks key_path exists, before processing --gki_signing_* args.
1418  if not os.path.exists(key_path):
1419    raise ExternalError(
1420        'gki_signing_key_path: "{}" not found'.format(key_path))
1421
1422  output_certificate = tempfile.NamedTemporaryFile()
1423  cmd = [
1424      "generate_gki_certificate",
1425      "--name", image_name,
1426      "--algorithm", algorithm,
1427      "--key", key_path,
1428      "--output", output_certificate.name,
1429      image,
1430  ]
1431
1432  signature_args = OPTIONS.info_dict.get("gki_signing_signature_args", "")
1433  signature_args = signature_args.strip()
1434  if signature_args:
1435    cmd.extend(["--additional_avb_args", signature_args])
1436
1437  args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "")
1438  args = args.strip()
1439  if args:
1440    cmd.extend(["--additional_avb_args", args])
1441
1442  RunAndCheckOutput(cmd)
1443
1444  output_certificate.seek(os.SEEK_SET, 0)
1445  data = output_certificate.read()
1446  output_certificate.close()
1447  return data
1448
1449
1450def BuildVBMeta(image_path, partitions, name, needed_partitions):
1451  """Creates a VBMeta image.
1452
1453  It generates the requested VBMeta image. The requested image could be for
1454  top-level or chained VBMeta image, which is determined based on the name.
1455
1456  Args:
1457    image_path: The output path for the new VBMeta image.
1458    partitions: A dict that's keyed by partition names with image paths as
1459        values. Only valid partition names are accepted, as partitions listed
1460        in common.AVB_PARTITIONS and custom partitions listed in
1461        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1462    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1463    needed_partitions: Partitions whose descriptors should be included into the
1464        generated VBMeta image.
1465
1466  Raises:
1467    AssertionError: On invalid input args.
1468  """
1469  avbtool = OPTIONS.info_dict["avb_avbtool"]
1470  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1471  AppendAVBSigningArgs(cmd, name)
1472
1473  custom_partitions = OPTIONS.info_dict.get(
1474      "avb_custom_images_partition_list", "").strip().split()
1475
1476  for partition, path in partitions.items():
1477    if partition not in needed_partitions:
1478      continue
1479    assert (partition in AVB_PARTITIONS or
1480            partition in AVB_VBMETA_PARTITIONS or
1481            partition in custom_partitions), \
1482        'Unknown partition: {}'.format(partition)
1483    assert os.path.exists(path), \
1484        'Failed to find {} for {}'.format(path, partition)
1485    cmd.extend(GetAvbPartitionArg(partition, path))
1486
1487  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1488  if args and args.strip():
1489    split_args = shlex.split(args)
1490    for index, arg in enumerate(split_args[:-1]):
1491      # Check that the image file exists. Some images might be defined
1492      # as a path relative to source tree, which may not be available at the
1493      # same location when running this script (we have the input target_files
1494      # zip only). For such cases, we additionally scan other locations (e.g.
1495      # IMAGES/, RADIO/, etc) before bailing out.
1496      if arg == '--include_descriptors_from_image':
1497        chained_image = split_args[index + 1]
1498        if os.path.exists(chained_image):
1499          continue
1500        found = False
1501        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1502          alt_path = os.path.join(
1503              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1504          if os.path.exists(alt_path):
1505            split_args[index + 1] = alt_path
1506            found = True
1507            break
1508        assert found, 'Failed to find {}'.format(chained_image)
1509    cmd.extend(split_args)
1510
1511  RunAndCheckOutput(cmd)
1512
1513
1514def _MakeRamdisk(sourcedir, fs_config_file=None,
1515                 ramdisk_format=RamdiskFormat.GZ):
1516  ramdisk_img = tempfile.NamedTemporaryFile()
1517
1518  if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
1519    cmd = ["mkbootfs", "-f", fs_config_file,
1520           os.path.join(sourcedir, "RAMDISK")]
1521  else:
1522    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
1523  p1 = Run(cmd, stdout=subprocess.PIPE)
1524  if ramdisk_format == RamdiskFormat.LZ4:
1525    p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
1526             stdout=ramdisk_img.file.fileno())
1527  elif ramdisk_format == RamdiskFormat.GZ:
1528    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1529  else:
1530    raise ValueError("Only support lz4 or minigzip ramdisk format.")
1531
1532  p2.wait()
1533  p1.wait()
1534  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1535  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1536
1537  return ramdisk_img
1538
1539
1540def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
1541                        has_ramdisk=False, two_step_image=False):
1542  """Build a bootable image from the specified sourcedir.
1543
1544  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1545  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1546  we are building a two-step special image (i.e. building a recovery image to
1547  be loaded into /boot in two-step OTAs).
1548
1549  Return the image data, or None if sourcedir does not appear to contains files
1550  for building the requested image.
1551  """
1552
1553  if info_dict is None:
1554    info_dict = OPTIONS.info_dict
1555
1556  # "boot" or "recovery", without extension.
1557  partition_name = os.path.basename(sourcedir).lower()
1558
1559  kernel = None
1560  if partition_name == "recovery":
1561    if info_dict.get("exclude_kernel_from_recovery_image") == "true":
1562      logger.info("Excluded kernel binary from recovery image.")
1563    else:
1564      kernel = "kernel"
1565  elif partition_name == "init_boot":
1566    pass
1567  else:
1568    kernel = image_name.replace("boot", "kernel")
1569    kernel = kernel.replace(".img", "")
1570  if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1571    return None
1572
1573  kernel_path = os.path.join(sourcedir, kernel) if kernel else None
1574
1575  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1576    return None
1577
1578  img = tempfile.NamedTemporaryFile()
1579
1580  if has_ramdisk:
1581    ramdisk_format = _GetRamdiskFormat(info_dict)
1582    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file,
1583                               ramdisk_format=ramdisk_format)
1584
1585  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1586  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1587
1588  cmd = [mkbootimg]
1589  if kernel_path is not None:
1590    cmd.extend(["--kernel", kernel_path])
1591
1592  fn = os.path.join(sourcedir, "second")
1593  if os.access(fn, os.F_OK):
1594    cmd.append("--second")
1595    cmd.append(fn)
1596
1597  fn = os.path.join(sourcedir, "dtb")
1598  if os.access(fn, os.F_OK):
1599    cmd.append("--dtb")
1600    cmd.append(fn)
1601
1602  fn = os.path.join(sourcedir, "cmdline")
1603  if os.access(fn, os.F_OK):
1604    cmd.append("--cmdline")
1605    cmd.append(open(fn).read().rstrip("\n"))
1606
1607  fn = os.path.join(sourcedir, "base")
1608  if os.access(fn, os.F_OK):
1609    cmd.append("--base")
1610    cmd.append(open(fn).read().rstrip("\n"))
1611
1612  fn = os.path.join(sourcedir, "pagesize")
1613  if os.access(fn, os.F_OK):
1614    cmd.append("--pagesize")
1615    cmd.append(open(fn).read().rstrip("\n"))
1616
1617  if partition_name == "recovery":
1618    args = info_dict.get("recovery_mkbootimg_args")
1619    if not args:
1620      # Fall back to "mkbootimg_args" for recovery image
1621      # in case "recovery_mkbootimg_args" is not set.
1622      args = info_dict.get("mkbootimg_args")
1623  elif partition_name == "init_boot":
1624    args = info_dict.get("mkbootimg_init_args")
1625  else:
1626    args = info_dict.get("mkbootimg_args")
1627  if args and args.strip():
1628    cmd.extend(shlex.split(args))
1629
1630  args = info_dict.get("mkbootimg_version_args")
1631  if args and args.strip():
1632    cmd.extend(shlex.split(args))
1633
1634  if has_ramdisk:
1635    cmd.extend(["--ramdisk", ramdisk_img.name])
1636
1637  img_unsigned = None
1638  if info_dict.get("vboot"):
1639    img_unsigned = tempfile.NamedTemporaryFile()
1640    cmd.extend(["--output", img_unsigned.name])
1641  else:
1642    cmd.extend(["--output", img.name])
1643
1644  if partition_name == "recovery":
1645    if info_dict.get("include_recovery_dtbo") == "true":
1646      fn = os.path.join(sourcedir, "recovery_dtbo")
1647      cmd.extend(["--recovery_dtbo", fn])
1648    if info_dict.get("include_recovery_acpio") == "true":
1649      fn = os.path.join(sourcedir, "recovery_acpio")
1650      cmd.extend(["--recovery_acpio", fn])
1651
1652  RunAndCheckOutput(cmd)
1653
1654  if _HasGkiCertificationArgs():
1655    if not os.path.exists(img.name):
1656      raise ValueError("Cannot find GKI boot.img")
1657    if kernel_path is None or not os.path.exists(kernel_path):
1658      raise ValueError("Cannot find GKI kernel.img")
1659
1660    # Certify GKI images.
1661    boot_signature_bytes = b''
1662    boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot")
1663    boot_signature_bytes += _GenerateGkiCertificate(
1664        kernel_path, "generic_kernel")
1665
1666    BOOT_SIGNATURE_SIZE = 16 * 1024
1667    if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE:
1668      raise ValueError(
1669          f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}")
1670    boot_signature_bytes += (
1671        b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes)))
1672    assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE
1673
1674    with open(img.name, 'ab') as f:
1675      f.write(boot_signature_bytes)
1676
1677  if (info_dict.get("boot_signer") == "true" and
1678          info_dict.get("verity_key")):
1679    # Hard-code the path as "/boot" for two-step special recovery image (which
1680    # will be loaded into /boot during the two-step OTA).
1681    if two_step_image:
1682      path = "/boot"
1683    else:
1684      path = "/" + partition_name
1685    cmd = [OPTIONS.boot_signer_path]
1686    cmd.extend(OPTIONS.boot_signer_args)
1687    cmd.extend([path, img.name,
1688                info_dict["verity_key"] + ".pk8",
1689                info_dict["verity_key"] + ".x509.pem", img.name])
1690    RunAndCheckOutput(cmd)
1691
1692  # Sign the image if vboot is non-empty.
1693  elif info_dict.get("vboot"):
1694    path = "/" + partition_name
1695    img_keyblock = tempfile.NamedTemporaryFile()
1696    # We have switched from the prebuilt futility binary to using the tool
1697    # (futility-host) built from the source. Override the setting in the old
1698    # TF.zip.
1699    futility = info_dict["futility"]
1700    if futility.startswith("prebuilts/"):
1701      futility = "futility-host"
1702    cmd = [info_dict["vboot_signer_cmd"], futility,
1703           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
1704           info_dict["vboot_key"] + ".vbprivk",
1705           info_dict["vboot_subkey"] + ".vbprivk",
1706           img_keyblock.name,
1707           img.name]
1708    RunAndCheckOutput(cmd)
1709
1710    # Clean up the temp files.
1711    img_unsigned.close()
1712    img_keyblock.close()
1713
1714  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1715  if info_dict.get("avb_enable") == "true":
1716    avbtool = info_dict["avb_avbtool"]
1717    if partition_name == "recovery":
1718      part_size = info_dict["recovery_size"]
1719    else:
1720      part_size = info_dict[image_name.replace(".img", "_size")]
1721    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1722           "--partition_size", str(part_size), "--partition_name",
1723           partition_name]
1724    AppendAVBSigningArgs(cmd, partition_name)
1725    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1726    if args and args.strip():
1727      cmd.extend(shlex.split(args))
1728    RunAndCheckOutput(cmd)
1729
1730  img.seek(os.SEEK_SET, 0)
1731  data = img.read()
1732
1733  if has_ramdisk:
1734    ramdisk_img.close()
1735  img.close()
1736
1737  return data
1738
1739
1740def _SignBootableImage(image_path, prebuilt_name, partition_name,
1741                       info_dict=None):
1742  """Performs AVB signing for a prebuilt boot.img.
1743
1744  Args:
1745    image_path: The full path of the image, e.g., /path/to/boot.img.
1746    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
1747        boot-5.10.img, recovery.img or init_boot.img.
1748    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1749    info_dict: The information dict read from misc_info.txt.
1750  """
1751  if info_dict is None:
1752    info_dict = OPTIONS.info_dict
1753
1754  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1755  if info_dict.get("avb_enable") == "true":
1756    avbtool = info_dict["avb_avbtool"]
1757    if partition_name == "recovery":
1758      part_size = info_dict["recovery_size"]
1759    else:
1760      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
1761
1762    cmd = [avbtool, "add_hash_footer", "--image", image_path,
1763           "--partition_size", str(part_size), "--partition_name",
1764           partition_name]
1765    AppendAVBSigningArgs(cmd, partition_name)
1766    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1767    if args and args.strip():
1768      cmd.extend(shlex.split(args))
1769    RunAndCheckOutput(cmd)
1770
1771
1772def HasRamdisk(partition_name, info_dict=None):
1773  """Returns true/false to see if a bootable image should have a ramdisk.
1774
1775  Args:
1776    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1777    info_dict: The information dict read from misc_info.txt.
1778  """
1779  if info_dict is None:
1780    info_dict = OPTIONS.info_dict
1781
1782  if partition_name != "boot":
1783    return True  # init_boot.img or recovery.img has a ramdisk.
1784
1785  if info_dict.get("recovery_as_boot") == "true":
1786    return True  # the recovery-as-boot boot.img has a RECOVERY ramdisk.
1787
1788  if info_dict.get("gki_boot_image_without_ramdisk") == "true":
1789    return False  # A GKI boot.img has no ramdisk since Android-13.
1790
1791  if info_dict.get("system_root_image") == "true":
1792    # The ramdisk content is merged into the system.img, so there is NO
1793    # ramdisk in the boot.img or boot-<kernel version>.img.
1794    return False
1795
1796  if info_dict.get("init_boot") == "true":
1797    # The ramdisk is moved to the init_boot.img, so there is NO
1798    # ramdisk in the boot.img or boot-<kernel version>.img.
1799    return False
1800
1801  return True
1802
1803
1804def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1805                     info_dict=None, two_step_image=False):
1806  """Return a File object with the desired bootable image.
1807
1808  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1809  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1810  the source files in 'unpack_dir'/'tree_subdir'."""
1811
1812  if info_dict is None:
1813    info_dict = OPTIONS.info_dict
1814
1815  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1816  if os.path.exists(prebuilt_path):
1817    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1818    return File.FromLocalFile(name, prebuilt_path)
1819
1820  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1821  if os.path.exists(prebuilt_path):
1822    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1823    return File.FromLocalFile(name, prebuilt_path)
1824
1825  partition_name = tree_subdir.lower()
1826  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
1827  if os.path.exists(prebuilt_path):
1828    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
1829    signed_img = MakeTempFile()
1830    shutil.copy(prebuilt_path, signed_img)
1831    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
1832    return File.FromLocalFile(name, signed_img)
1833
1834  logger.info("building image from target_files %s...", tree_subdir)
1835
1836  has_ramdisk = HasRamdisk(partition_name, info_dict)
1837
1838  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1839  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1840                             os.path.join(unpack_dir, fs_config),
1841                             info_dict, has_ramdisk, two_step_image)
1842  if data:
1843    return File(name, data)
1844  return None
1845
1846
1847def _BuildVendorBootImage(sourcedir, partition_name, info_dict=None):
1848  """Build a vendor boot image from the specified sourcedir.
1849
1850  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1851  turn them into a vendor boot image.
1852
1853  Return the image data, or None if sourcedir does not appear to contains files
1854  for building the requested image.
1855  """
1856
1857  if info_dict is None:
1858    info_dict = OPTIONS.info_dict
1859
1860  img = tempfile.NamedTemporaryFile()
1861
1862  ramdisk_format = _GetRamdiskFormat(info_dict)
1863  ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
1864
1865  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1866  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1867
1868  cmd = [mkbootimg]
1869
1870  fn = os.path.join(sourcedir, "dtb")
1871  if os.access(fn, os.F_OK):
1872    has_vendor_kernel_boot = (info_dict.get("vendor_kernel_boot", "").lower() == "true")
1873
1874    # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot.
1875    # Otherwise pack dtb into vendor_boot.
1876    if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot":
1877      cmd.append("--dtb")
1878      cmd.append(fn)
1879
1880  fn = os.path.join(sourcedir, "vendor_cmdline")
1881  if os.access(fn, os.F_OK):
1882    cmd.append("--vendor_cmdline")
1883    cmd.append(open(fn).read().rstrip("\n"))
1884
1885  fn = os.path.join(sourcedir, "base")
1886  if os.access(fn, os.F_OK):
1887    cmd.append("--base")
1888    cmd.append(open(fn).read().rstrip("\n"))
1889
1890  fn = os.path.join(sourcedir, "pagesize")
1891  if os.access(fn, os.F_OK):
1892    cmd.append("--pagesize")
1893    cmd.append(open(fn).read().rstrip("\n"))
1894
1895  args = info_dict.get("mkbootimg_args")
1896  if args and args.strip():
1897    cmd.extend(shlex.split(args))
1898
1899  args = info_dict.get("mkbootimg_version_args")
1900  if args and args.strip():
1901    cmd.extend(shlex.split(args))
1902
1903  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
1904  cmd.extend(["--vendor_boot", img.name])
1905
1906  fn = os.path.join(sourcedir, "vendor_bootconfig")
1907  if os.access(fn, os.F_OK):
1908    cmd.append("--vendor_bootconfig")
1909    cmd.append(fn)
1910
1911  ramdisk_fragment_imgs = []
1912  fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
1913  if os.access(fn, os.F_OK):
1914    ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
1915    for ramdisk_fragment in ramdisk_fragments:
1916      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
1917                        ramdisk_fragment, "mkbootimg_args")
1918      cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
1919      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
1920                        ramdisk_fragment, "prebuilt_ramdisk")
1921      # Use prebuilt image if found, else create ramdisk from supplied files.
1922      if os.access(fn, os.F_OK):
1923        ramdisk_fragment_pathname = fn
1924      else:
1925        ramdisk_fragment_root = os.path.join(
1926            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
1927        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
1928                                            ramdisk_format=ramdisk_format)
1929        ramdisk_fragment_imgs.append(ramdisk_fragment_img)
1930        ramdisk_fragment_pathname = ramdisk_fragment_img.name
1931      cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
1932
1933  RunAndCheckOutput(cmd)
1934
1935  # AVB: if enabled, calculate and add hash.
1936  if info_dict.get("avb_enable") == "true":
1937    avbtool = info_dict["avb_avbtool"]
1938    part_size = info_dict[f'{partition_name}_size']
1939    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1940           "--partition_size", str(part_size), "--partition_name", partition_name]
1941    AppendAVBSigningArgs(cmd, partition_name)
1942    args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args')
1943    if args and args.strip():
1944      cmd.extend(shlex.split(args))
1945    RunAndCheckOutput(cmd)
1946
1947  img.seek(os.SEEK_SET, 0)
1948  data = img.read()
1949
1950  for f in ramdisk_fragment_imgs:
1951    f.close()
1952  ramdisk_img.close()
1953  img.close()
1954
1955  return data
1956
1957
1958def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
1959                       info_dict=None):
1960  """Return a File object with the desired vendor boot image.
1961
1962  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1963  the source files in 'unpack_dir'/'tree_subdir'."""
1964
1965  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1966  if os.path.exists(prebuilt_path):
1967    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1968    return File.FromLocalFile(name, prebuilt_path)
1969
1970  logger.info("building image from target_files %s...", tree_subdir)
1971
1972  if info_dict is None:
1973    info_dict = OPTIONS.info_dict
1974
1975  data = _BuildVendorBootImage(
1976      os.path.join(unpack_dir, tree_subdir), "vendor_boot", info_dict)
1977  if data:
1978    return File(name, data)
1979  return None
1980
1981
1982def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
1983                       info_dict=None):
1984  """Return a File object with the desired vendor kernel boot image.
1985
1986  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1987  the source files in 'unpack_dir'/'tree_subdir'."""
1988
1989  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1990  if os.path.exists(prebuilt_path):
1991    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1992    return File.FromLocalFile(name, prebuilt_path)
1993
1994  logger.info("building image from target_files %s...", tree_subdir)
1995
1996  if info_dict is None:
1997    info_dict = OPTIONS.info_dict
1998
1999  data = _BuildVendorBootImage(
2000      os.path.join(unpack_dir, tree_subdir), "vendor_kernel_boot", info_dict)
2001  if data:
2002    return File(name, data)
2003  return None
2004
2005
2006def Gunzip(in_filename, out_filename):
2007  """Gunzips the given gzip compressed file to a given output file."""
2008  with gzip.open(in_filename, "rb") as in_file, \
2009          open(out_filename, "wb") as out_file:
2010    shutil.copyfileobj(in_file, out_file)
2011
2012
2013def UnzipToDir(filename, dirname, patterns=None):
2014  """Unzips the archive to the given directory.
2015
2016  Args:
2017    filename: The name of the zip file to unzip.
2018    dirname: Where the unziped files will land.
2019    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2020        archvie. Non-matching patterns will be filtered out. If there's no match
2021        after the filtering, no file will be unzipped.
2022  """
2023  cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
2024  if patterns is not None:
2025    # Filter out non-matching patterns. unzip will complain otherwise.
2026    with zipfile.ZipFile(filename, allowZip64=True) as input_zip:
2027      names = input_zip.namelist()
2028    filtered = [
2029        pattern for pattern in patterns if fnmatch.filter(names, pattern)]
2030
2031    # There isn't any matching files. Don't unzip anything.
2032    if not filtered:
2033      return
2034    cmd.extend(filtered)
2035
2036  RunAndCheckOutput(cmd)
2037
2038
2039def UnzipTemp(filename, patterns=None):
2040  """Unzips the given archive into a temporary directory and returns the name.
2041
2042  Args:
2043    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
2044    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
2045
2046    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2047    archvie.
2048
2049  Returns:
2050    The name of the temporary directory.
2051  """
2052
2053  tmp = MakeTempDir(prefix="targetfiles-")
2054  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
2055  if m:
2056    UnzipToDir(m.group(1), tmp, patterns)
2057    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns)
2058    filename = m.group(1)
2059  else:
2060    UnzipToDir(filename, tmp, patterns)
2061
2062  return tmp
2063
2064
2065def GetUserImage(which, tmpdir, input_zip,
2066                 info_dict=None,
2067                 allow_shared_blocks=None,
2068                 hashtree_info_generator=None,
2069                 reset_file_map=False):
2070  """Returns an Image object suitable for passing to BlockImageDiff.
2071
2072  This function loads the specified image from the given path. If the specified
2073  image is sparse, it also performs additional processing for OTA purpose. For
2074  example, it always adds block 0 to clobbered blocks list. It also detects
2075  files that cannot be reconstructed from the block list, for whom we should
2076  avoid applying imgdiff.
2077
2078  Args:
2079    which: The partition name.
2080    tmpdir: The directory that contains the prebuilt image and block map file.
2081    input_zip: The target-files ZIP archive.
2082    info_dict: The dict to be looked up for relevant info.
2083    allow_shared_blocks: If image is sparse, whether having shared blocks is
2084        allowed. If none, it is looked up from info_dict.
2085    hashtree_info_generator: If present and image is sparse, generates the
2086        hashtree_info for this sparse image.
2087    reset_file_map: If true and image is sparse, reset file map before returning
2088        the image.
2089  Returns:
2090    A Image object. If it is a sparse image and reset_file_map is False, the
2091    image will have file_map info loaded.
2092  """
2093  if info_dict is None:
2094    info_dict = LoadInfoDict(input_zip)
2095
2096  is_sparse = info_dict.get("extfs_sparse_flag")
2097  if info_dict.get(which + "_disable_sparse"):
2098    is_sparse = False
2099
2100  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
2101  # shared blocks (i.e. some blocks will show up in multiple files' block
2102  # list). We can only allocate such shared blocks to the first "owner", and
2103  # disable imgdiff for all later occurrences.
2104  if allow_shared_blocks is None:
2105    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
2106
2107  if is_sparse:
2108    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
2109                         hashtree_info_generator)
2110    if reset_file_map:
2111      img.ResetFileMap()
2112    return img
2113  return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
2114
2115
2116def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
2117  """Returns a Image object suitable for passing to BlockImageDiff.
2118
2119  This function loads the specified non-sparse image from the given path.
2120
2121  Args:
2122    which: The partition name.
2123    tmpdir: The directory that contains the prebuilt image and block map file.
2124  Returns:
2125    A Image object.
2126  """
2127  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2128  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2129
2130  # The image and map files must have been created prior to calling
2131  # ota_from_target_files.py (since LMP).
2132  assert os.path.exists(path) and os.path.exists(mappath)
2133
2134  return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
2135
2136
2137def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
2138                   hashtree_info_generator=None):
2139  """Returns a SparseImage object suitable for passing to BlockImageDiff.
2140
2141  This function loads the specified sparse image from the given path, and
2142  performs additional processing for OTA purpose. For example, it always adds
2143  block 0 to clobbered blocks list. It also detects files that cannot be
2144  reconstructed from the block list, for whom we should avoid applying imgdiff.
2145
2146  Args:
2147    which: The partition name, e.g. "system", "vendor".
2148    tmpdir: The directory that contains the prebuilt image and block map file.
2149    input_zip: The target-files ZIP archive.
2150    allow_shared_blocks: Whether having shared blocks is allowed.
2151    hashtree_info_generator: If present, generates the hashtree_info for this
2152        sparse image.
2153  Returns:
2154    A SparseImage object, with file_map info loaded.
2155  """
2156  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2157  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2158
2159  # The image and map files must have been created prior to calling
2160  # ota_from_target_files.py (since LMP).
2161  assert os.path.exists(path) and os.path.exists(mappath)
2162
2163  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
2164  # it to clobbered_blocks so that it will be written to the target
2165  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
2166  clobbered_blocks = "0"
2167
2168  image = sparse_img.SparseImage(
2169      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
2170      hashtree_info_generator=hashtree_info_generator)
2171
2172  # block.map may contain less blocks, because mke2fs may skip allocating blocks
2173  # if they contain all zeros. We can't reconstruct such a file from its block
2174  # list. Tag such entries accordingly. (Bug: 65213616)
2175  for entry in image.file_map:
2176    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
2177    if not entry.startswith('/'):
2178      continue
2179
2180    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
2181    # filename listed in system.map may contain an additional leading slash
2182    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
2183    # results.
2184    # And handle another special case, where files not under /system
2185    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
2186    arcname = entry.lstrip('/')
2187    if which == 'system' and not arcname.startswith('system'):
2188      arcname = 'ROOT/' + arcname
2189    else:
2190      arcname = arcname.replace(which, which.upper(), 1)
2191
2192    assert arcname in input_zip.namelist(), \
2193        "Failed to find the ZIP entry for {}".format(entry)
2194
2195    info = input_zip.getinfo(arcname)
2196    ranges = image.file_map[entry]
2197
2198    # If a RangeSet has been tagged as using shared blocks while loading the
2199    # image, check the original block list to determine its completeness. Note
2200    # that the 'incomplete' flag would be tagged to the original RangeSet only.
2201    if ranges.extra.get('uses_shared_blocks'):
2202      ranges = ranges.extra['uses_shared_blocks']
2203
2204    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
2205      ranges.extra['incomplete'] = True
2206
2207  return image
2208
2209
2210def GetKeyPasswords(keylist):
2211  """Given a list of keys, prompt the user to enter passwords for
2212  those which require them.  Return a {key: password} dict.  password
2213  will be None if the key has no password."""
2214
2215  no_passwords = []
2216  need_passwords = []
2217  key_passwords = {}
2218  devnull = open("/dev/null", "w+b")
2219
2220  # sorted() can't compare strings to None, so convert Nones to strings
2221  for k in sorted(keylist, key=lambda x: x if x is not None else ""):
2222    # We don't need a password for things that aren't really keys.
2223    if k in SPECIAL_CERT_STRINGS or k is None:
2224      no_passwords.append(k)
2225      continue
2226
2227    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2228             "-inform", "DER", "-nocrypt"],
2229            stdin=devnull.fileno(),
2230            stdout=devnull.fileno(),
2231            stderr=subprocess.STDOUT)
2232    p.communicate()
2233    if p.returncode == 0:
2234      # Definitely an unencrypted key.
2235      no_passwords.append(k)
2236    else:
2237      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2238               "-inform", "DER", "-passin", "pass:"],
2239              stdin=devnull.fileno(),
2240              stdout=devnull.fileno(),
2241              stderr=subprocess.PIPE)
2242      _, stderr = p.communicate()
2243      if p.returncode == 0:
2244        # Encrypted key with empty string as password.
2245        key_passwords[k] = ''
2246      elif stderr.startswith('Error decrypting key'):
2247        # Definitely encrypted key.
2248        # It would have said "Error reading key" if it didn't parse correctly.
2249        need_passwords.append(k)
2250      else:
2251        # Potentially, a type of key that openssl doesn't understand.
2252        # We'll let the routines in signapk.jar handle it.
2253        no_passwords.append(k)
2254  devnull.close()
2255
2256  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
2257  key_passwords.update(dict.fromkeys(no_passwords))
2258  return key_passwords
2259
2260
2261def GetMinSdkVersion(apk_name):
2262  """Gets the minSdkVersion declared in the APK.
2263
2264  It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given
2265  APK file. This can be both a decimal number (API Level) or a codename.
2266
2267  Args:
2268    apk_name: The APK filename.
2269
2270  Returns:
2271    The parsed SDK version string.
2272
2273  Raises:
2274    ExternalError: On failing to obtain the min SDK version.
2275  """
2276  proc = Run(
2277      [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE,
2278      stderr=subprocess.PIPE)
2279  stdoutdata, stderrdata = proc.communicate()
2280  if proc.returncode != 0:
2281    raise ExternalError(
2282        "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format(
2283            apk_name, proc.returncode, stdoutdata, stderrdata))
2284
2285  for line in stdoutdata.split("\n"):
2286    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
2287    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
2288    if m:
2289      return m.group(1)
2290  raise ExternalError("No minSdkVersion returned by aapt2")
2291
2292
2293def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
2294  """Returns the minSdkVersion declared in the APK as a number (API Level).
2295
2296  If minSdkVersion is set to a codename, it is translated to a number using the
2297  provided map.
2298
2299  Args:
2300    apk_name: The APK filename.
2301
2302  Returns:
2303    The parsed SDK version number.
2304
2305  Raises:
2306    ExternalError: On failing to get the min SDK version number.
2307  """
2308  version = GetMinSdkVersion(apk_name)
2309  try:
2310    return int(version)
2311  except ValueError:
2312    # Not a decimal number.
2313    #
2314    # It could be either a straight codename, e.g.
2315    #     UpsideDownCake
2316    #
2317    # Or a codename with API fingerprint SHA, e.g.
2318    #     UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e
2319    #
2320    # Extract the codename and try and map it to a version number.
2321    split = version.split(".")
2322    codename = split[0]
2323    if codename in codename_to_api_level_map:
2324      return codename_to_api_level_map[codename]
2325    raise ExternalError(
2326        "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format(
2327            codename, version, codename_to_api_level_map))
2328
2329
2330def SignFile(input_name, output_name, key, password, min_api_level=None,
2331             codename_to_api_level_map=None, whole_file=False,
2332             extra_signapk_args=None):
2333  """Sign the input_name zip/jar/apk, producing output_name.  Use the
2334  given key and password (the latter may be None if the key does not
2335  have a password.
2336
2337  If whole_file is true, use the "-w" option to SignApk to embed a
2338  signature that covers the whole file in the archive comment of the
2339  zip file.
2340
2341  min_api_level is the API Level (int) of the oldest platform this file may end
2342  up on. If not specified for an APK, the API Level is obtained by interpreting
2343  the minSdkVersion attribute of the APK's AndroidManifest.xml.
2344
2345  codename_to_api_level_map is needed to translate the codename which may be
2346  encountered as the APK's minSdkVersion.
2347
2348  Caller may optionally specify extra args to be passed to SignApk, which
2349  defaults to OPTIONS.extra_signapk_args if omitted.
2350  """
2351  if codename_to_api_level_map is None:
2352    codename_to_api_level_map = {}
2353  if extra_signapk_args is None:
2354    extra_signapk_args = OPTIONS.extra_signapk_args
2355
2356  java_library_path = os.path.join(
2357      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2358
2359  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2360         ["-Djava.library.path=" + java_library_path,
2361          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
2362         extra_signapk_args)
2363  if whole_file:
2364    cmd.append("-w")
2365
2366  min_sdk_version = min_api_level
2367  if min_sdk_version is None:
2368    if not whole_file:
2369      min_sdk_version = GetMinSdkVersionInt(
2370          input_name, codename_to_api_level_map)
2371  if min_sdk_version is not None:
2372    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
2373
2374  cmd.extend([key + OPTIONS.public_key_suffix,
2375              key + OPTIONS.private_key_suffix,
2376              input_name, output_name])
2377
2378  proc = Run(cmd, stdin=subprocess.PIPE)
2379  if password is not None:
2380    password += "\n"
2381  stdoutdata, _ = proc.communicate(password)
2382  if proc.returncode != 0:
2383    raise ExternalError(
2384        "Failed to run signapk.jar: return code {}:\n{}".format(
2385            proc.returncode, stdoutdata))
2386
2387
2388def CheckSize(data, target, info_dict):
2389  """Checks the data string passed against the max size limit.
2390
2391  For non-AVB images, raise exception if the data is too big. Print a warning
2392  if the data is nearing the maximum size.
2393
2394  For AVB images, the actual image size should be identical to the limit.
2395
2396  Args:
2397    data: A string that contains all the data for the partition.
2398    target: The partition name. The ".img" suffix is optional.
2399    info_dict: The dict to be looked up for relevant info.
2400  """
2401  if target.endswith(".img"):
2402    target = target[:-4]
2403  mount_point = "/" + target
2404
2405  fs_type = None
2406  limit = None
2407  if info_dict["fstab"]:
2408    if mount_point == "/userdata":
2409      mount_point = "/data"
2410    p = info_dict["fstab"][mount_point]
2411    fs_type = p.fs_type
2412    device = p.device
2413    if "/" in device:
2414      device = device[device.rfind("/")+1:]
2415    limit = info_dict.get(device + "_size")
2416  if not fs_type or not limit:
2417    return
2418
2419  size = len(data)
2420  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
2421  # path.
2422  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
2423    if size != limit:
2424      raise ExternalError(
2425          "Mismatching image size for %s: expected %d actual %d" % (
2426              target, limit, size))
2427  else:
2428    pct = float(size) * 100.0 / limit
2429    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
2430    if pct >= 99.0:
2431      raise ExternalError(msg)
2432
2433    if pct >= 95.0:
2434      logger.warning("\n  WARNING: %s\n", msg)
2435    else:
2436      logger.info("  %s", msg)
2437
2438
2439def ReadApkCerts(tf_zip):
2440  """Parses the APK certs info from a given target-files zip.
2441
2442  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
2443  tuple with the following elements: (1) a dictionary that maps packages to
2444  certs (based on the "certificate" and "private_key" attributes in the file;
2445  (2) a string representing the extension of compressed APKs in the target files
2446  (e.g ".gz", ".bro").
2447
2448  Args:
2449    tf_zip: The input target_files ZipFile (already open).
2450
2451  Returns:
2452    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
2453        the extension string of compressed APKs (e.g. ".gz"), or None if there's
2454        no compressed APKs.
2455  """
2456  certmap = {}
2457  compressed_extension = None
2458
2459  # META/apkcerts.txt contains the info for _all_ the packages known at build
2460  # time. Filter out the ones that are not installed.
2461  installed_files = set()
2462  for name in tf_zip.namelist():
2463    basename = os.path.basename(name)
2464    if basename:
2465      installed_files.add(basename)
2466
2467  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
2468    line = line.strip()
2469    if not line:
2470      continue
2471    m = re.match(
2472        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
2473        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2474        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2475        line)
2476    if not m:
2477      continue
2478
2479    matches = m.groupdict()
2480    cert = matches["CERT"]
2481    privkey = matches["PRIVKEY"]
2482    name = matches["NAME"]
2483    this_compressed_extension = matches["COMPRESSED"]
2484
2485    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2486    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2487    if cert in SPECIAL_CERT_STRINGS and not privkey:
2488      certmap[name] = cert
2489    elif (cert.endswith(OPTIONS.public_key_suffix) and
2490          privkey.endswith(OPTIONS.private_key_suffix) and
2491          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2492      certmap[name] = cert[:-public_key_suffix_len]
2493    else:
2494      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2495
2496    if not this_compressed_extension:
2497      continue
2498
2499    # Only count the installed files.
2500    filename = name + '.' + this_compressed_extension
2501    if filename not in installed_files:
2502      continue
2503
2504    # Make sure that all the values in the compression map have the same
2505    # extension. We don't support multiple compression methods in the same
2506    # system image.
2507    if compressed_extension:
2508      if this_compressed_extension != compressed_extension:
2509        raise ValueError(
2510            "Multiple compressed extensions: {} vs {}".format(
2511                compressed_extension, this_compressed_extension))
2512    else:
2513      compressed_extension = this_compressed_extension
2514
2515  return (certmap,
2516          ("." + compressed_extension) if compressed_extension else None)
2517
2518
2519COMMON_DOCSTRING = """
2520Global options
2521
2522  -p  (--path) <dir>
2523      Prepend <dir>/bin to the list of places to search for binaries run by this
2524      script, and expect to find jars in <dir>/framework.
2525
2526  -s  (--device_specific) <file>
2527      Path to the Python module containing device-specific releasetools code.
2528
2529  -x  (--extra) <key=value>
2530      Add a key/value pair to the 'extras' dict, which device-specific extension
2531      code may look at.
2532
2533  -v  (--verbose)
2534      Show command lines being executed.
2535
2536  -h  (--help)
2537      Display this usage message and exit.
2538
2539  --logfile <file>
2540      Put verbose logs to specified file (regardless of --verbose option.)
2541"""
2542
2543
2544def Usage(docstring):
2545  print(docstring.rstrip("\n"))
2546  print(COMMON_DOCSTRING)
2547
2548
2549def ParseOptions(argv,
2550                 docstring,
2551                 extra_opts="", extra_long_opts=(),
2552                 extra_option_handler=None):
2553  """Parse the options in argv and return any arguments that aren't
2554  flags.  docstring is the calling module's docstring, to be displayed
2555  for errors and -h.  extra_opts and extra_long_opts are for flags
2556  defined by the caller, which are processed by passing them to
2557  extra_option_handler."""
2558
2559  try:
2560    opts, args = getopt.getopt(
2561        argv, "hvp:s:x:" + extra_opts,
2562        ["help", "verbose", "path=", "signapk_path=",
2563         "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
2564         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2565         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2566         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2567         "extra=", "logfile="] + list(extra_long_opts))
2568  except getopt.GetoptError as err:
2569    Usage(docstring)
2570    print("**", str(err), "**")
2571    sys.exit(2)
2572
2573  for o, a in opts:
2574    if o in ("-h", "--help"):
2575      Usage(docstring)
2576      sys.exit()
2577    elif o in ("-v", "--verbose"):
2578      OPTIONS.verbose = True
2579    elif o in ("-p", "--path"):
2580      OPTIONS.search_path = a
2581    elif o in ("--signapk_path",):
2582      OPTIONS.signapk_path = a
2583    elif o in ("--signapk_shared_library_path",):
2584      OPTIONS.signapk_shared_library_path = a
2585    elif o in ("--extra_signapk_args",):
2586      OPTIONS.extra_signapk_args = shlex.split(a)
2587    elif o in ("--aapt2_path",):
2588      OPTIONS.aapt2_path = a
2589    elif o in ("--java_path",):
2590      OPTIONS.java_path = a
2591    elif o in ("--java_args",):
2592      OPTIONS.java_args = shlex.split(a)
2593    elif o in ("--android_jar_path",):
2594      OPTIONS.android_jar_path = a
2595    elif o in ("--public_key_suffix",):
2596      OPTIONS.public_key_suffix = a
2597    elif o in ("--private_key_suffix",):
2598      OPTIONS.private_key_suffix = a
2599    elif o in ("--boot_signer_path",):
2600      OPTIONS.boot_signer_path = a
2601    elif o in ("--boot_signer_args",):
2602      OPTIONS.boot_signer_args = shlex.split(a)
2603    elif o in ("--verity_signer_path",):
2604      OPTIONS.verity_signer_path = a
2605    elif o in ("--verity_signer_args",):
2606      OPTIONS.verity_signer_args = shlex.split(a)
2607    elif o in ("-s", "--device_specific"):
2608      OPTIONS.device_specific = a
2609    elif o in ("-x", "--extra"):
2610      key, value = a.split("=", 1)
2611      OPTIONS.extras[key] = value
2612    elif o in ("--logfile",):
2613      OPTIONS.logfile = a
2614    else:
2615      if extra_option_handler is None or not extra_option_handler(o, a):
2616        assert False, "unknown option \"%s\"" % (o,)
2617
2618  if OPTIONS.search_path:
2619    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2620                          os.pathsep + os.environ["PATH"])
2621
2622  return args
2623
2624
2625def MakeTempFile(prefix='tmp', suffix=''):
2626  """Make a temp file and add it to the list of things to be deleted
2627  when Cleanup() is called.  Return the filename."""
2628  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2629  os.close(fd)
2630  OPTIONS.tempfiles.append(fn)
2631  return fn
2632
2633
2634def MakeTempDir(prefix='tmp', suffix=''):
2635  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2636
2637  Returns:
2638    The absolute pathname of the new directory.
2639  """
2640  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2641  OPTIONS.tempfiles.append(dir_name)
2642  return dir_name
2643
2644
2645def Cleanup():
2646  for i in OPTIONS.tempfiles:
2647    if os.path.isdir(i):
2648      shutil.rmtree(i, ignore_errors=True)
2649    else:
2650      os.remove(i)
2651  del OPTIONS.tempfiles[:]
2652
2653
2654class PasswordManager(object):
2655  def __init__(self):
2656    self.editor = os.getenv("EDITOR")
2657    self.pwfile = os.getenv("ANDROID_PW_FILE")
2658
2659  def GetPasswords(self, items):
2660    """Get passwords corresponding to each string in 'items',
2661    returning a dict.  (The dict may have keys in addition to the
2662    values in 'items'.)
2663
2664    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2665    user edit that file to add more needed passwords.  If no editor is
2666    available, or $ANDROID_PW_FILE isn't define, prompts the user
2667    interactively in the ordinary way.
2668    """
2669
2670    current = self.ReadFile()
2671
2672    first = True
2673    while True:
2674      missing = []
2675      for i in items:
2676        if i not in current or not current[i]:
2677          missing.append(i)
2678      # Are all the passwords already in the file?
2679      if not missing:
2680        return current
2681
2682      for i in missing:
2683        current[i] = ""
2684
2685      if not first:
2686        print("key file %s still missing some passwords." % (self.pwfile,))
2687        if sys.version_info[0] >= 3:
2688          raw_input = input  # pylint: disable=redefined-builtin
2689        answer = raw_input("try to edit again? [y]> ").strip()
2690        if answer and answer[0] not in 'yY':
2691          raise RuntimeError("key passwords unavailable")
2692      first = False
2693
2694      current = self.UpdateAndReadFile(current)
2695
2696  def PromptResult(self, current):  # pylint: disable=no-self-use
2697    """Prompt the user to enter a value (password) for each key in
2698    'current' whose value is fales.  Returns a new dict with all the
2699    values.
2700    """
2701    result = {}
2702    for k, v in sorted(current.items()):
2703      if v:
2704        result[k] = v
2705      else:
2706        while True:
2707          result[k] = getpass.getpass(
2708              "Enter password for %s key> " % k).strip()
2709          if result[k]:
2710            break
2711    return result
2712
2713  def UpdateAndReadFile(self, current):
2714    if not self.editor or not self.pwfile:
2715      return self.PromptResult(current)
2716
2717    f = open(self.pwfile, "w")
2718    os.chmod(self.pwfile, 0o600)
2719    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2720    f.write("# (Additional spaces are harmless.)\n\n")
2721
2722    first_line = None
2723    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2724    for i, (_, k, v) in enumerate(sorted_list):
2725      f.write("[[[  %s  ]]] %s\n" % (v, k))
2726      if not v and first_line is None:
2727        # position cursor on first line with no password.
2728        first_line = i + 4
2729    f.close()
2730
2731    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2732
2733    return self.ReadFile()
2734
2735  def ReadFile(self):
2736    result = {}
2737    if self.pwfile is None:
2738      return result
2739    try:
2740      f = open(self.pwfile, "r")
2741      for line in f:
2742        line = line.strip()
2743        if not line or line[0] == '#':
2744          continue
2745        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2746        if not m:
2747          logger.warning("Failed to parse password file: %s", line)
2748        else:
2749          result[m.group(2)] = m.group(1)
2750      f.close()
2751    except IOError as e:
2752      if e.errno != errno.ENOENT:
2753        logger.exception("Error reading password file:")
2754    return result
2755
2756
2757def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2758             compress_type=None):
2759
2760  # http://b/18015246
2761  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2762  # for files larger than 2GiB. We can work around this by adjusting their
2763  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2764  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2765  # it isn't clear to me exactly what circumstances cause this).
2766  # `zipfile.write()` must be used directly to work around this.
2767  #
2768  # This mess can be avoided if we port to python3.
2769  saved_zip64_limit = zipfile.ZIP64_LIMIT
2770  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2771
2772  if compress_type is None:
2773    compress_type = zip_file.compression
2774  if arcname is None:
2775    arcname = filename
2776
2777  saved_stat = os.stat(filename)
2778
2779  try:
2780    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2781    # file to be zipped and reset it when we're done.
2782    os.chmod(filename, perms)
2783
2784    # Use a fixed timestamp so the output is repeatable.
2785    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2786    # intentional. zip stores datetimes in local time without a time zone
2787    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2788    # in the zip archive.
2789    local_epoch = datetime.datetime.fromtimestamp(0)
2790    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
2791    os.utime(filename, (timestamp, timestamp))
2792
2793    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
2794  finally:
2795    os.chmod(filename, saved_stat.st_mode)
2796    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
2797    zipfile.ZIP64_LIMIT = saved_zip64_limit
2798
2799
2800def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
2801                compress_type=None):
2802  """Wrap zipfile.writestr() function to work around the zip64 limit.
2803
2804  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
2805  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
2806  when calling crc32(bytes).
2807
2808  But it still works fine to write a shorter string into a large zip file.
2809  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
2810  when we know the string won't be too long.
2811  """
2812
2813  saved_zip64_limit = zipfile.ZIP64_LIMIT
2814  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2815
2816  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
2817    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
2818    zinfo.compress_type = zip_file.compression
2819    if perms is None:
2820      perms = 0o100644
2821  else:
2822    zinfo = zinfo_or_arcname
2823    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
2824    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
2825    # such a case (since
2826    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
2827    # which seems to make more sense. Otherwise the entry will have 0o000 as the
2828    # permission bits. We follow the logic in Python 3 to get consistent
2829    # behavior between using the two versions.
2830    if not zinfo.external_attr:
2831      zinfo.external_attr = 0o600 << 16
2832
2833  # If compress_type is given, it overrides the value in zinfo.
2834  if compress_type is not None:
2835    zinfo.compress_type = compress_type
2836
2837  # If perms is given, it has a priority.
2838  if perms is not None:
2839    # If perms doesn't set the file type, mark it as a regular file.
2840    if perms & 0o770000 == 0:
2841      perms |= 0o100000
2842    zinfo.external_attr = perms << 16
2843
2844  # Use a fixed timestamp so the output is repeatable.
2845  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
2846
2847  zip_file.writestr(zinfo, data)
2848  zipfile.ZIP64_LIMIT = saved_zip64_limit
2849
2850
2851def ZipDelete(zip_filename, entries):
2852  """Deletes entries from a ZIP file.
2853
2854  Since deleting entries from a ZIP file is not supported, it shells out to
2855  'zip -d'.
2856
2857  Args:
2858    zip_filename: The name of the ZIP file.
2859    entries: The name of the entry, or the list of names to be deleted.
2860
2861  Raises:
2862    AssertionError: In case of non-zero return from 'zip'.
2863  """
2864  if isinstance(entries, str):
2865    entries = [entries]
2866  # If list is empty, nothing to do
2867  if not entries:
2868    return
2869  cmd = ["zip", "-d", zip_filename] + entries
2870  RunAndCheckOutput(cmd)
2871
2872
2873def ZipClose(zip_file):
2874  # http://b/18015246
2875  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
2876  # central directory.
2877  saved_zip64_limit = zipfile.ZIP64_LIMIT
2878  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2879
2880  zip_file.close()
2881
2882  zipfile.ZIP64_LIMIT = saved_zip64_limit
2883
2884
2885class DeviceSpecificParams(object):
2886  module = None
2887
2888  def __init__(self, **kwargs):
2889    """Keyword arguments to the constructor become attributes of this
2890    object, which is passed to all functions in the device-specific
2891    module."""
2892    for k, v in kwargs.items():
2893      setattr(self, k, v)
2894    self.extras = OPTIONS.extras
2895
2896    if self.module is None:
2897      path = OPTIONS.device_specific
2898      if not path:
2899        return
2900      try:
2901        if os.path.isdir(path):
2902          info = imp.find_module("releasetools", [path])
2903        else:
2904          d, f = os.path.split(path)
2905          b, x = os.path.splitext(f)
2906          if x == ".py":
2907            f = b
2908          info = imp.find_module(f, [d])
2909        logger.info("loaded device-specific extensions from %s", path)
2910        self.module = imp.load_module("device_specific", *info)
2911      except ImportError:
2912        logger.info("unable to load device-specific module; assuming none")
2913
2914  def _DoCall(self, function_name, *args, **kwargs):
2915    """Call the named function in the device-specific module, passing
2916    the given args and kwargs.  The first argument to the call will be
2917    the DeviceSpecific object itself.  If there is no module, or the
2918    module does not define the function, return the value of the
2919    'default' kwarg (which itself defaults to None)."""
2920    if self.module is None or not hasattr(self.module, function_name):
2921      return kwargs.get("default")
2922    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
2923
2924  def FullOTA_Assertions(self):
2925    """Called after emitting the block of assertions at the top of a
2926    full OTA package.  Implementations can add whatever additional
2927    assertions they like."""
2928    return self._DoCall("FullOTA_Assertions")
2929
2930  def FullOTA_InstallBegin(self):
2931    """Called at the start of full OTA installation."""
2932    return self._DoCall("FullOTA_InstallBegin")
2933
2934  def FullOTA_GetBlockDifferences(self):
2935    """Called during full OTA installation and verification.
2936    Implementation should return a list of BlockDifference objects describing
2937    the update on each additional partitions.
2938    """
2939    return self._DoCall("FullOTA_GetBlockDifferences")
2940
2941  def FullOTA_InstallEnd(self):
2942    """Called at the end of full OTA installation; typically this is
2943    used to install the image for the device's baseband processor."""
2944    return self._DoCall("FullOTA_InstallEnd")
2945
2946  def IncrementalOTA_Assertions(self):
2947    """Called after emitting the block of assertions at the top of an
2948    incremental OTA package.  Implementations can add whatever
2949    additional assertions they like."""
2950    return self._DoCall("IncrementalOTA_Assertions")
2951
2952  def IncrementalOTA_VerifyBegin(self):
2953    """Called at the start of the verification phase of incremental
2954    OTA installation; additional checks can be placed here to abort
2955    the script before any changes are made."""
2956    return self._DoCall("IncrementalOTA_VerifyBegin")
2957
2958  def IncrementalOTA_VerifyEnd(self):
2959    """Called at the end of the verification phase of incremental OTA
2960    installation; additional checks can be placed here to abort the
2961    script before any changes are made."""
2962    return self._DoCall("IncrementalOTA_VerifyEnd")
2963
2964  def IncrementalOTA_InstallBegin(self):
2965    """Called at the start of incremental OTA installation (after
2966    verification is complete)."""
2967    return self._DoCall("IncrementalOTA_InstallBegin")
2968
2969  def IncrementalOTA_GetBlockDifferences(self):
2970    """Called during incremental OTA installation and verification.
2971    Implementation should return a list of BlockDifference objects describing
2972    the update on each additional partitions.
2973    """
2974    return self._DoCall("IncrementalOTA_GetBlockDifferences")
2975
2976  def IncrementalOTA_InstallEnd(self):
2977    """Called at the end of incremental OTA installation; typically
2978    this is used to install the image for the device's baseband
2979    processor."""
2980    return self._DoCall("IncrementalOTA_InstallEnd")
2981
2982  def VerifyOTA_Assertions(self):
2983    return self._DoCall("VerifyOTA_Assertions")
2984
2985
2986class File(object):
2987  def __init__(self, name, data, compress_size=None):
2988    self.name = name
2989    self.data = data
2990    self.size = len(data)
2991    self.compress_size = compress_size or self.size
2992    self.sha1 = sha1(data).hexdigest()
2993
2994  @classmethod
2995  def FromLocalFile(cls, name, diskname):
2996    f = open(diskname, "rb")
2997    data = f.read()
2998    f.close()
2999    return File(name, data)
3000
3001  def WriteToTemp(self):
3002    t = tempfile.NamedTemporaryFile()
3003    t.write(self.data)
3004    t.flush()
3005    return t
3006
3007  def WriteToDir(self, d):
3008    with open(os.path.join(d, self.name), "wb") as fp:
3009      fp.write(self.data)
3010
3011  def AddToZip(self, z, compression=None):
3012    ZipWriteStr(z, self.name, self.data, compress_type=compression)
3013
3014
3015DIFF_PROGRAM_BY_EXT = {
3016    ".gz": "imgdiff",
3017    ".zip": ["imgdiff", "-z"],
3018    ".jar": ["imgdiff", "-z"],
3019    ".apk": ["imgdiff", "-z"],
3020    ".img": "imgdiff",
3021}
3022
3023
3024class Difference(object):
3025  def __init__(self, tf, sf, diff_program=None):
3026    self.tf = tf
3027    self.sf = sf
3028    self.patch = None
3029    self.diff_program = diff_program
3030
3031  def ComputePatch(self):
3032    """Compute the patch (as a string of data) needed to turn sf into
3033    tf.  Returns the same tuple as GetPatch()."""
3034
3035    tf = self.tf
3036    sf = self.sf
3037
3038    if self.diff_program:
3039      diff_program = self.diff_program
3040    else:
3041      ext = os.path.splitext(tf.name)[1]
3042      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
3043
3044    ttemp = tf.WriteToTemp()
3045    stemp = sf.WriteToTemp()
3046
3047    ext = os.path.splitext(tf.name)[1]
3048
3049    try:
3050      ptemp = tempfile.NamedTemporaryFile()
3051      if isinstance(diff_program, list):
3052        cmd = copy.copy(diff_program)
3053      else:
3054        cmd = [diff_program]
3055      cmd.append(stemp.name)
3056      cmd.append(ttemp.name)
3057      cmd.append(ptemp.name)
3058      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3059      err = []
3060
3061      def run():
3062        _, e = p.communicate()
3063        if e:
3064          err.append(e)
3065      th = threading.Thread(target=run)
3066      th.start()
3067      th.join(timeout=300)   # 5 mins
3068      if th.is_alive():
3069        logger.warning("diff command timed out")
3070        p.terminate()
3071        th.join(5)
3072        if th.is_alive():
3073          p.kill()
3074          th.join()
3075
3076      if p.returncode != 0:
3077        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
3078        self.patch = None
3079        return None, None, None
3080      diff = ptemp.read()
3081    finally:
3082      ptemp.close()
3083      stemp.close()
3084      ttemp.close()
3085
3086    self.patch = diff
3087    return self.tf, self.sf, self.patch
3088
3089  def GetPatch(self):
3090    """Returns a tuple of (target_file, source_file, patch_data).
3091
3092    patch_data may be None if ComputePatch hasn't been called, or if
3093    computing the patch failed.
3094    """
3095    return self.tf, self.sf, self.patch
3096
3097
3098def ComputeDifferences(diffs):
3099  """Call ComputePatch on all the Difference objects in 'diffs'."""
3100  logger.info("%d diffs to compute", len(diffs))
3101
3102  # Do the largest files first, to try and reduce the long-pole effect.
3103  by_size = [(i.tf.size, i) for i in diffs]
3104  by_size.sort(reverse=True)
3105  by_size = [i[1] for i in by_size]
3106
3107  lock = threading.Lock()
3108  diff_iter = iter(by_size)   # accessed under lock
3109
3110  def worker():
3111    try:
3112      lock.acquire()
3113      for d in diff_iter:
3114        lock.release()
3115        start = time.time()
3116        d.ComputePatch()
3117        dur = time.time() - start
3118        lock.acquire()
3119
3120        tf, sf, patch = d.GetPatch()
3121        if sf.name == tf.name:
3122          name = tf.name
3123        else:
3124          name = "%s (%s)" % (tf.name, sf.name)
3125        if patch is None:
3126          logger.error("patching failed! %40s", name)
3127        else:
3128          logger.info(
3129              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
3130              tf.size, 100.0 * len(patch) / tf.size, name)
3131      lock.release()
3132    except Exception:
3133      logger.exception("Failed to compute diff from worker")
3134      raise
3135
3136  # start worker threads; wait for them all to finish.
3137  threads = [threading.Thread(target=worker)
3138             for i in range(OPTIONS.worker_threads)]
3139  for th in threads:
3140    th.start()
3141  while threads:
3142    threads.pop().join()
3143
3144
3145class BlockDifference(object):
3146  def __init__(self, partition, tgt, src=None, check_first_block=False,
3147               version=None, disable_imgdiff=False):
3148    self.tgt = tgt
3149    self.src = src
3150    self.partition = partition
3151    self.check_first_block = check_first_block
3152    self.disable_imgdiff = disable_imgdiff
3153
3154    if version is None:
3155      version = max(
3156          int(i) for i in
3157          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
3158    assert version >= 3
3159    self.version = version
3160
3161    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
3162                       version=self.version,
3163                       disable_imgdiff=self.disable_imgdiff)
3164    self.path = os.path.join(MakeTempDir(), partition)
3165    b.Compute(self.path)
3166    self._required_cache = b.max_stashed_size
3167    self.touched_src_ranges = b.touched_src_ranges
3168    self.touched_src_sha1 = b.touched_src_sha1
3169
3170    # On devices with dynamic partitions, for new partitions,
3171    # src is None but OPTIONS.source_info_dict is not.
3172    if OPTIONS.source_info_dict is None:
3173      is_dynamic_build = OPTIONS.info_dict.get(
3174          "use_dynamic_partitions") == "true"
3175      is_dynamic_source = False
3176    else:
3177      is_dynamic_build = OPTIONS.source_info_dict.get(
3178          "use_dynamic_partitions") == "true"
3179      is_dynamic_source = partition in shlex.split(
3180          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
3181
3182    is_dynamic_target = partition in shlex.split(
3183        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
3184
3185    # For dynamic partitions builds, check partition list in both source
3186    # and target build because new partitions may be added, and existing
3187    # partitions may be removed.
3188    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
3189
3190    if is_dynamic:
3191      self.device = 'map_partition("%s")' % partition
3192    else:
3193      if OPTIONS.source_info_dict is None:
3194        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3195                                              OPTIONS.info_dict)
3196      else:
3197        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3198                                              OPTIONS.source_info_dict)
3199      self.device = device_expr
3200
3201  @property
3202  def required_cache(self):
3203    return self._required_cache
3204
3205  def WriteScript(self, script, output_zip, progress=None,
3206                  write_verify_script=False):
3207    if not self.src:
3208      # write the output unconditionally
3209      script.Print("Patching %s image unconditionally..." % (self.partition,))
3210    else:
3211      script.Print("Patching %s image after verification." % (self.partition,))
3212
3213    if progress:
3214      script.ShowProgress(progress, 0)
3215    self._WriteUpdate(script, output_zip)
3216
3217    if write_verify_script:
3218      self.WritePostInstallVerifyScript(script)
3219
3220  def WriteStrictVerifyScript(self, script):
3221    """Verify all the blocks in the care_map, including clobbered blocks.
3222
3223    This differs from the WriteVerifyScript() function: a) it prints different
3224    error messages; b) it doesn't allow half-way updated images to pass the
3225    verification."""
3226
3227    partition = self.partition
3228    script.Print("Verifying %s..." % (partition,))
3229    ranges = self.tgt.care_map
3230    ranges_str = ranges.to_string_raw()
3231    script.AppendExtra(
3232        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
3233        'ui_print("%s has unexpected contents.");' % (
3234            self.device, ranges_str,
3235            self.tgt.TotalSha1(include_clobbered_blocks=True),
3236            self.partition))
3237    script.AppendExtra("")
3238
3239  def WriteVerifyScript(self, script, touched_blocks_only=False):
3240    partition = self.partition
3241
3242    # full OTA
3243    if not self.src:
3244      script.Print("Image %s will be patched unconditionally." % (partition,))
3245
3246    # incremental OTA
3247    else:
3248      if touched_blocks_only:
3249        ranges = self.touched_src_ranges
3250        expected_sha1 = self.touched_src_sha1
3251      else:
3252        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
3253        expected_sha1 = self.src.TotalSha1()
3254
3255      # No blocks to be checked, skipping.
3256      if not ranges:
3257        return
3258
3259      ranges_str = ranges.to_string_raw()
3260      script.AppendExtra(
3261          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
3262          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
3263          '"%s.patch.dat")) then' % (
3264              self.device, ranges_str, expected_sha1,
3265              self.device, partition, partition, partition))
3266      script.Print('Verified %s image...' % (partition,))
3267      script.AppendExtra('else')
3268
3269      if self.version >= 4:
3270
3271        # Bug: 21124327
3272        # When generating incrementals for the system and vendor partitions in
3273        # version 4 or newer, explicitly check the first block (which contains
3274        # the superblock) of the partition to see if it's what we expect. If
3275        # this check fails, give an explicit log message about the partition
3276        # having been remounted R/W (the most likely explanation).
3277        if self.check_first_block:
3278          script.AppendExtra('check_first_block(%s);' % (self.device,))
3279
3280        # If version >= 4, try block recovery before abort update
3281        if partition == "system":
3282          code = ErrorCode.SYSTEM_RECOVER_FAILURE
3283        else:
3284          code = ErrorCode.VENDOR_RECOVER_FAILURE
3285        script.AppendExtra((
3286            'ifelse (block_image_recover({device}, "{ranges}") && '
3287            'block_image_verify({device}, '
3288            'package_extract_file("{partition}.transfer.list"), '
3289            '"{partition}.new.dat", "{partition}.patch.dat"), '
3290            'ui_print("{partition} recovered successfully."), '
3291            'abort("E{code}: {partition} partition fails to recover"));\n'
3292            'endif;').format(device=self.device, ranges=ranges_str,
3293                             partition=partition, code=code))
3294
3295      # Abort the OTA update. Note that the incremental OTA cannot be applied
3296      # even if it may match the checksum of the target partition.
3297      # a) If version < 3, operations like move and erase will make changes
3298      #    unconditionally and damage the partition.
3299      # b) If version >= 3, it won't even reach here.
3300      else:
3301        if partition == "system":
3302          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
3303        else:
3304          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
3305        script.AppendExtra((
3306            'abort("E%d: %s partition has unexpected contents");\n'
3307            'endif;') % (code, partition))
3308
3309  def WritePostInstallVerifyScript(self, script):
3310    partition = self.partition
3311    script.Print('Verifying the updated %s image...' % (partition,))
3312    # Unlike pre-install verification, clobbered_blocks should not be ignored.
3313    ranges = self.tgt.care_map
3314    ranges_str = ranges.to_string_raw()
3315    script.AppendExtra(
3316        'if range_sha1(%s, "%s") == "%s" then' % (
3317            self.device, ranges_str,
3318            self.tgt.TotalSha1(include_clobbered_blocks=True)))
3319
3320    # Bug: 20881595
3321    # Verify that extended blocks are really zeroed out.
3322    if self.tgt.extended:
3323      ranges_str = self.tgt.extended.to_string_raw()
3324      script.AppendExtra(
3325          'if range_sha1(%s, "%s") == "%s" then' % (
3326              self.device, ranges_str,
3327              self._HashZeroBlocks(self.tgt.extended.size())))
3328      script.Print('Verified the updated %s image.' % (partition,))
3329      if partition == "system":
3330        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
3331      else:
3332        code = ErrorCode.VENDOR_NONZERO_CONTENTS
3333      script.AppendExtra(
3334          'else\n'
3335          '  abort("E%d: %s partition has unexpected non-zero contents after '
3336          'OTA update");\n'
3337          'endif;' % (code, partition))
3338    else:
3339      script.Print('Verified the updated %s image.' % (partition,))
3340
3341    if partition == "system":
3342      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
3343    else:
3344      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
3345
3346    script.AppendExtra(
3347        'else\n'
3348        '  abort("E%d: %s partition has unexpected contents after OTA '
3349        'update");\n'
3350        'endif;' % (code, partition))
3351
3352  def _WriteUpdate(self, script, output_zip):
3353    ZipWrite(output_zip,
3354             '{}.transfer.list'.format(self.path),
3355             '{}.transfer.list'.format(self.partition))
3356
3357    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
3358    # its size. Quailty 9 almost triples the compression time but doesn't
3359    # further reduce the size too much. For a typical 1.8G system.new.dat
3360    #                       zip  | brotli(quality 6)  | brotli(quality 9)
3361    #   compressed_size:    942M | 869M (~8% reduced) | 854M
3362    #   compression_time:   75s  | 265s               | 719s
3363    #   decompression_time: 15s  | 25s                | 25s
3364
3365    if not self.src:
3366      brotli_cmd = ['brotli', '--quality=6',
3367                    '--output={}.new.dat.br'.format(self.path),
3368                    '{}.new.dat'.format(self.path)]
3369      print("Compressing {}.new.dat with brotli".format(self.partition))
3370      RunAndCheckOutput(brotli_cmd)
3371
3372      new_data_name = '{}.new.dat.br'.format(self.partition)
3373      ZipWrite(output_zip,
3374               '{}.new.dat.br'.format(self.path),
3375               new_data_name,
3376               compress_type=zipfile.ZIP_STORED)
3377    else:
3378      new_data_name = '{}.new.dat'.format(self.partition)
3379      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
3380
3381    ZipWrite(output_zip,
3382             '{}.patch.dat'.format(self.path),
3383             '{}.patch.dat'.format(self.partition),
3384             compress_type=zipfile.ZIP_STORED)
3385
3386    if self.partition == "system":
3387      code = ErrorCode.SYSTEM_UPDATE_FAILURE
3388    else:
3389      code = ErrorCode.VENDOR_UPDATE_FAILURE
3390
3391    call = ('block_image_update({device}, '
3392            'package_extract_file("{partition}.transfer.list"), '
3393            '"{new_data_name}", "{partition}.patch.dat") ||\n'
3394            '  abort("E{code}: Failed to update {partition} image.");'.format(
3395                device=self.device, partition=self.partition,
3396                new_data_name=new_data_name, code=code))
3397    script.AppendExtra(script.WordWrap(call))
3398
3399  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
3400    data = source.ReadRangeSet(ranges)
3401    ctx = sha1()
3402
3403    for p in data:
3404      ctx.update(p)
3405
3406    return ctx.hexdigest()
3407
3408  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
3409    """Return the hash value for all zero blocks."""
3410    zero_block = '\x00' * 4096
3411    ctx = sha1()
3412    for _ in range(num_blocks):
3413      ctx.update(zero_block)
3414
3415    return ctx.hexdigest()
3416
3417
3418# Expose these two classes to support vendor-specific scripts
3419DataImage = images.DataImage
3420EmptyImage = images.EmptyImage
3421
3422
3423# map recovery.fstab's fs_types to mount/format "partition types"
3424PARTITION_TYPES = {
3425    "ext4": "EMMC",
3426    "emmc": "EMMC",
3427    "f2fs": "EMMC",
3428    "squashfs": "EMMC"
3429}
3430
3431
3432def GetTypeAndDevice(mount_point, info, check_no_slot=True):
3433  """
3434  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
3435  backwards compatibility. It aborts if the fstab entry has slotselect option
3436  (unless check_no_slot is explicitly set to False).
3437  """
3438  fstab = info["fstab"]
3439  if fstab:
3440    if check_no_slot:
3441      assert not fstab[mount_point].slotselect, \
3442          "Use GetTypeAndDeviceExpr instead"
3443    return (PARTITION_TYPES[fstab[mount_point].fs_type],
3444            fstab[mount_point].device)
3445  raise KeyError
3446
3447
3448def GetTypeAndDeviceExpr(mount_point, info):
3449  """
3450  Return the filesystem of the partition, and an edify expression that evaluates
3451  to the device at runtime.
3452  """
3453  fstab = info["fstab"]
3454  if fstab:
3455    p = fstab[mount_point]
3456    device_expr = '"%s"' % fstab[mount_point].device
3457    if p.slotselect:
3458      device_expr = 'add_slot_suffix(%s)' % device_expr
3459    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
3460  raise KeyError
3461
3462
3463def GetEntryForDevice(fstab, device):
3464  """
3465  Returns:
3466    The first entry in fstab whose device is the given value.
3467  """
3468  if not fstab:
3469    return None
3470  for mount_point in fstab:
3471    if fstab[mount_point].device == device:
3472      return fstab[mount_point]
3473  return None
3474
3475
3476def ParseCertificate(data):
3477  """Parses and converts a PEM-encoded certificate into DER-encoded.
3478
3479  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3480
3481  Returns:
3482    The decoded certificate bytes.
3483  """
3484  cert_buffer = []
3485  save = False
3486  for line in data.split("\n"):
3487    if "--END CERTIFICATE--" in line:
3488      break
3489    if save:
3490      cert_buffer.append(line)
3491    if "--BEGIN CERTIFICATE--" in line:
3492      save = True
3493  cert = base64.b64decode("".join(cert_buffer))
3494  return cert
3495
3496
3497def ExtractPublicKey(cert):
3498  """Extracts the public key (PEM-encoded) from the given certificate file.
3499
3500  Args:
3501    cert: The certificate filename.
3502
3503  Returns:
3504    The public key string.
3505
3506  Raises:
3507    AssertionError: On non-zero return from 'openssl'.
3508  """
3509  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3510  # While openssl 1.1 writes the key into the given filename followed by '-out',
3511  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3512  # stdout instead.
3513  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3514  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3515  pubkey, stderrdata = proc.communicate()
3516  assert proc.returncode == 0, \
3517      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3518  return pubkey
3519
3520
3521def ExtractAvbPublicKey(avbtool, key):
3522  """Extracts the AVB public key from the given public or private key.
3523
3524  Args:
3525    avbtool: The AVB tool to use.
3526    key: The input key file, which should be PEM-encoded public or private key.
3527
3528  Returns:
3529    The path to the extracted AVB public key file.
3530  """
3531  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3532  RunAndCheckOutput(
3533      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3534  return output
3535
3536
3537def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3538                      info_dict=None):
3539  """Generates the recovery-from-boot patch and writes the script to output.
3540
3541  Most of the space in the boot and recovery images is just the kernel, which is
3542  identical for the two, so the resulting patch should be efficient. Add it to
3543  the output zip, along with a shell script that is run from init.rc on first
3544  boot to actually do the patching and install the new recovery image.
3545
3546  Args:
3547    input_dir: The top-level input directory of the target-files.zip.
3548    output_sink: The callback function that writes the result.
3549    recovery_img: File object for the recovery image.
3550    boot_img: File objects for the boot image.
3551    info_dict: A dict returned by common.LoadInfoDict() on the input
3552        target_files. Will use OPTIONS.info_dict if None has been given.
3553  """
3554  if info_dict is None:
3555    info_dict = OPTIONS.info_dict
3556
3557  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3558  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3559
3560  if board_uses_vendorimage:
3561    # In this case, the output sink is rooted at VENDOR
3562    recovery_img_path = "etc/recovery.img"
3563    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3564    sh_dir = "bin"
3565  else:
3566    # In this case the output sink is rooted at SYSTEM
3567    recovery_img_path = "vendor/etc/recovery.img"
3568    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3569    sh_dir = "vendor/bin"
3570
3571  if full_recovery_image:
3572    output_sink(recovery_img_path, recovery_img.data)
3573
3574  else:
3575    system_root_image = info_dict.get("system_root_image") == "true"
3576    path = os.path.join(input_dir, recovery_resource_dat_path)
3577    # With system-root-image, boot and recovery images will have mismatching
3578    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
3579    # to handle such a case.
3580    if system_root_image:
3581      diff_program = ["bsdiff"]
3582      bonus_args = ""
3583      assert not os.path.exists(path)
3584    else:
3585      diff_program = ["imgdiff"]
3586      if os.path.exists(path):
3587        diff_program.append("-b")
3588        diff_program.append(path)
3589        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3590      else:
3591        bonus_args = ""
3592
3593    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3594    _, _, patch = d.ComputePatch()
3595    output_sink("recovery-from-boot.p", patch)
3596
3597  try:
3598    # The following GetTypeAndDevice()s need to use the path in the target
3599    # info_dict instead of source_info_dict.
3600    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3601                                              check_no_slot=False)
3602    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3603                                                      check_no_slot=False)
3604  except KeyError:
3605    return
3606
3607  if full_recovery_image:
3608
3609    # Note that we use /vendor to refer to the recovery resources. This will
3610    # work for a separate vendor partition mounted at /vendor or a
3611    # /system/vendor subdirectory on the system partition, for which init will
3612    # create a symlink from /vendor to /system/vendor.
3613
3614    sh = """#!/vendor/bin/sh
3615if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3616  applypatch \\
3617          --flash /vendor/etc/recovery.img \\
3618          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3619      log -t recovery "Installing new recovery image: succeeded" || \\
3620      log -t recovery "Installing new recovery image: failed"
3621else
3622  log -t recovery "Recovery image already installed"
3623fi
3624""" % {'type': recovery_type,
3625       'device': recovery_device,
3626       'sha1': recovery_img.sha1,
3627       'size': recovery_img.size}
3628  else:
3629    sh = """#!/vendor/bin/sh
3630if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3631  applypatch %(bonus_args)s \\
3632          --patch /vendor/recovery-from-boot.p \\
3633          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3634          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3635      log -t recovery "Installing new recovery image: succeeded" || \\
3636      log -t recovery "Installing new recovery image: failed"
3637else
3638  log -t recovery "Recovery image already installed"
3639fi
3640""" % {'boot_size': boot_img.size,
3641       'boot_sha1': boot_img.sha1,
3642       'recovery_size': recovery_img.size,
3643       'recovery_sha1': recovery_img.sha1,
3644       'boot_type': boot_type,
3645       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3646       'recovery_type': recovery_type,
3647       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3648       'bonus_args': bonus_args}
3649
3650  # The install script location moved from /system/etc to /system/bin in the L
3651  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3652  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3653
3654  logger.info("putting script in %s", sh_location)
3655
3656  output_sink(sh_location, sh.encode())
3657
3658
3659class DynamicPartitionUpdate(object):
3660  def __init__(self, src_group=None, tgt_group=None, progress=None,
3661               block_difference=None):
3662    self.src_group = src_group
3663    self.tgt_group = tgt_group
3664    self.progress = progress
3665    self.block_difference = block_difference
3666
3667  @property
3668  def src_size(self):
3669    if not self.block_difference:
3670      return 0
3671    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3672
3673  @property
3674  def tgt_size(self):
3675    if not self.block_difference:
3676      return 0
3677    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3678
3679  @staticmethod
3680  def _GetSparseImageSize(img):
3681    if not img:
3682      return 0
3683    return img.blocksize * img.total_blocks
3684
3685
3686class DynamicGroupUpdate(object):
3687  def __init__(self, src_size=None, tgt_size=None):
3688    # None: group does not exist. 0: no size limits.
3689    self.src_size = src_size
3690    self.tgt_size = tgt_size
3691
3692
3693class DynamicPartitionsDifference(object):
3694  def __init__(self, info_dict, block_diffs, progress_dict=None,
3695               source_info_dict=None):
3696    if progress_dict is None:
3697      progress_dict = {}
3698
3699    self._remove_all_before_apply = False
3700    if source_info_dict is None:
3701      self._remove_all_before_apply = True
3702      source_info_dict = {}
3703
3704    block_diff_dict = collections.OrderedDict(
3705        [(e.partition, e) for e in block_diffs])
3706
3707    assert len(block_diff_dict) == len(block_diffs), \
3708        "Duplicated BlockDifference object for {}".format(
3709            [partition for partition, count in
3710             collections.Counter(e.partition for e in block_diffs).items()
3711             if count > 1])
3712
3713    self._partition_updates = collections.OrderedDict()
3714
3715    for p, block_diff in block_diff_dict.items():
3716      self._partition_updates[p] = DynamicPartitionUpdate()
3717      self._partition_updates[p].block_difference = block_diff
3718
3719    for p, progress in progress_dict.items():
3720      if p in self._partition_updates:
3721        self._partition_updates[p].progress = progress
3722
3723    tgt_groups = shlex.split(info_dict.get(
3724        "super_partition_groups", "").strip())
3725    src_groups = shlex.split(source_info_dict.get(
3726        "super_partition_groups", "").strip())
3727
3728    for g in tgt_groups:
3729      for p in shlex.split(info_dict.get(
3730              "super_%s_partition_list" % g, "").strip()):
3731        assert p in self._partition_updates, \
3732            "{} is in target super_{}_partition_list but no BlockDifference " \
3733            "object is provided.".format(p, g)
3734        self._partition_updates[p].tgt_group = g
3735
3736    for g in src_groups:
3737      for p in shlex.split(source_info_dict.get(
3738              "super_%s_partition_list" % g, "").strip()):
3739        assert p in self._partition_updates, \
3740            "{} is in source super_{}_partition_list but no BlockDifference " \
3741            "object is provided.".format(p, g)
3742        self._partition_updates[p].src_group = g
3743
3744    target_dynamic_partitions = set(shlex.split(info_dict.get(
3745        "dynamic_partition_list", "").strip()))
3746    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3747                                  if u.tgt_size)
3748    assert block_diffs_with_target == target_dynamic_partitions, \
3749        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3750            list(target_dynamic_partitions), list(block_diffs_with_target))
3751
3752    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3753        "dynamic_partition_list", "").strip()))
3754    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3755                                  if u.src_size)
3756    assert block_diffs_with_source == source_dynamic_partitions, \
3757        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3758            list(source_dynamic_partitions), list(block_diffs_with_source))
3759
3760    if self._partition_updates:
3761      logger.info("Updating dynamic partitions %s",
3762                  self._partition_updates.keys())
3763
3764    self._group_updates = collections.OrderedDict()
3765
3766    for g in tgt_groups:
3767      self._group_updates[g] = DynamicGroupUpdate()
3768      self._group_updates[g].tgt_size = int(info_dict.get(
3769          "super_%s_group_size" % g, "0").strip())
3770
3771    for g in src_groups:
3772      if g not in self._group_updates:
3773        self._group_updates[g] = DynamicGroupUpdate()
3774      self._group_updates[g].src_size = int(source_info_dict.get(
3775          "super_%s_group_size" % g, "0").strip())
3776
3777    self._Compute()
3778
3779  def WriteScript(self, script, output_zip, write_verify_script=False):
3780    script.Comment('--- Start patching dynamic partitions ---')
3781    for p, u in self._partition_updates.items():
3782      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3783        script.Comment('Patch partition %s' % p)
3784        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3785                                       write_verify_script=False)
3786
3787    op_list_path = MakeTempFile()
3788    with open(op_list_path, 'w') as f:
3789      for line in self._op_list:
3790        f.write('{}\n'.format(line))
3791
3792    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
3793
3794    script.Comment('Update dynamic partition metadata')
3795    script.AppendExtra('assert(update_dynamic_partitions('
3796                       'package_extract_file("dynamic_partitions_op_list")));')
3797
3798    if write_verify_script:
3799      for p, u in self._partition_updates.items():
3800        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3801          u.block_difference.WritePostInstallVerifyScript(script)
3802          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3803
3804    for p, u in self._partition_updates.items():
3805      if u.tgt_size and u.src_size <= u.tgt_size:
3806        script.Comment('Patch partition %s' % p)
3807        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3808                                       write_verify_script=write_verify_script)
3809        if write_verify_script:
3810          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3811
3812    script.Comment('--- End patching dynamic partitions ---')
3813
3814  def _Compute(self):
3815    self._op_list = list()
3816
3817    def append(line):
3818      self._op_list.append(line)
3819
3820    def comment(line):
3821      self._op_list.append("# %s" % line)
3822
3823    if self._remove_all_before_apply:
3824      comment('Remove all existing dynamic partitions and groups before '
3825              'applying full OTA')
3826      append('remove_all_groups')
3827
3828    for p, u in self._partition_updates.items():
3829      if u.src_group and not u.tgt_group:
3830        append('remove %s' % p)
3831
3832    for p, u in self._partition_updates.items():
3833      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3834        comment('Move partition %s from %s to default' % (p, u.src_group))
3835        append('move %s default' % p)
3836
3837    for p, u in self._partition_updates.items():
3838      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3839        comment('Shrink partition %s from %d to %d' %
3840                (p, u.src_size, u.tgt_size))
3841        append('resize %s %s' % (p, u.tgt_size))
3842
3843    for g, u in self._group_updates.items():
3844      if u.src_size is not None and u.tgt_size is None:
3845        append('remove_group %s' % g)
3846      if (u.src_size is not None and u.tgt_size is not None and
3847              u.src_size > u.tgt_size):
3848        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3849        append('resize_group %s %d' % (g, u.tgt_size))
3850
3851    for g, u in self._group_updates.items():
3852      if u.src_size is None and u.tgt_size is not None:
3853        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
3854        append('add_group %s %d' % (g, u.tgt_size))
3855      if (u.src_size is not None and u.tgt_size is not None and
3856              u.src_size < u.tgt_size):
3857        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3858        append('resize_group %s %d' % (g, u.tgt_size))
3859
3860    for p, u in self._partition_updates.items():
3861      if u.tgt_group and not u.src_group:
3862        comment('Add partition %s to group %s' % (p, u.tgt_group))
3863        append('add %s %s' % (p, u.tgt_group))
3864
3865    for p, u in self._partition_updates.items():
3866      if u.tgt_size and u.src_size < u.tgt_size:
3867        comment('Grow partition %s from %d to %d' %
3868                (p, u.src_size, u.tgt_size))
3869        append('resize %s %d' % (p, u.tgt_size))
3870
3871    for p, u in self._partition_updates.items():
3872      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3873        comment('Move partition %s from default to %s' %
3874                (p, u.tgt_group))
3875        append('move %s %s' % (p, u.tgt_group))
3876
3877
3878def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
3879  """
3880  Get build.prop from ramdisk within the boot image
3881
3882  Args:
3883    boot_img: the boot image file. Ramdisk must be compressed with lz4 or minigzip format.
3884
3885  Return:
3886    An extracted file that stores properties in the boot image.
3887  """
3888  tmp_dir = MakeTempDir('boot_', suffix='.img')
3889  try:
3890    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
3891                      boot_img, '--out', tmp_dir])
3892    ramdisk = os.path.join(tmp_dir, 'ramdisk')
3893    if not os.path.isfile(ramdisk):
3894      logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
3895      return None
3896    uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
3897    if ramdisk_format == RamdiskFormat.LZ4:
3898      RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
3899    elif ramdisk_format == RamdiskFormat.GZ:
3900      with open(ramdisk, 'rb') as input_stream:
3901        with open(uncompressed_ramdisk, 'wb') as output_stream:
3902          p2 = Run(['minigzip', '-d'], stdin=input_stream.fileno(),
3903                   stdout=output_stream.fileno())
3904          p2.wait()
3905    else:
3906      logger.error('Only support lz4 or minigzip ramdisk format.')
3907      return None
3908
3909    abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
3910    extracted_ramdisk = MakeTempDir('extracted_ramdisk')
3911    # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
3912    # the host environment.
3913    RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
3914                      cwd=extracted_ramdisk)
3915
3916    for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
3917      prop_file = os.path.join(extracted_ramdisk, search_path)
3918      if os.path.isfile(prop_file):
3919        return prop_file
3920      logger.warning(
3921          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
3922
3923    return None
3924
3925  except ExternalError as e:
3926    logger.warning('Unable to get boot image build props: %s', e)
3927    return None
3928
3929
3930def GetBootImageTimestamp(boot_img):
3931  """
3932  Get timestamp from ramdisk within the boot image
3933
3934  Args:
3935    boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
3936
3937  Return:
3938    An integer that corresponds to the timestamp of the boot image, or None
3939    if file has unknown format. Raise exception if an unexpected error has
3940    occurred.
3941  """
3942  prop_file = GetBootImageBuildProp(boot_img)
3943  if not prop_file:
3944    return None
3945
3946  props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
3947  if props is None:
3948    return None
3949
3950  try:
3951    timestamp = props.GetProp('ro.bootimage.build.date.utc')
3952    if timestamp:
3953      return int(timestamp)
3954    logger.warning(
3955        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
3956    return None
3957
3958  except ExternalError as e:
3959    logger.warning('Unable to get boot image timestamp: %s', e)
3960    return None
3961
3962
3963def GetCareMap(which, imgname):
3964  """Returns the care_map string for the given partition.
3965
3966  Args:
3967    which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
3968    imgname: The filename of the image.
3969
3970  Returns:
3971    (which, care_map_ranges): care_map_ranges is the raw string of the care_map
3972    RangeSet; or None.
3973  """
3974  assert which in PARTITIONS_WITH_CARE_MAP
3975
3976  # which + "_image_size" contains the size that the actual filesystem image
3977  # resides in, which is all that needs to be verified. The additional blocks in
3978  # the image file contain verity metadata, by reading which would trigger
3979  # invalid reads.
3980  image_size = OPTIONS.info_dict.get(which + "_image_size")
3981  if not image_size:
3982    return None
3983
3984  disable_sparse = OPTIONS.info_dict.get(which + "_disable_sparse")
3985
3986  image_blocks = int(image_size) // 4096 - 1
3987  # It's OK for image_blocks to be 0, because care map ranges are inclusive.
3988  # So 0-0 means "just block 0", which is valid.
3989  assert image_blocks >= 0, "blocks for {} must be non-negative, image size: {}".format(
3990      which, image_size)
3991
3992  # For sparse images, we will only check the blocks that are listed in the care
3993  # map, i.e. the ones with meaningful data.
3994  if "extfs_sparse_flag" in OPTIONS.info_dict and not disable_sparse:
3995    simg = sparse_img.SparseImage(imgname)
3996    care_map_ranges = simg.care_map.intersect(
3997        rangelib.RangeSet("0-{}".format(image_blocks)))
3998
3999  # Otherwise for non-sparse images, we read all the blocks in the filesystem
4000  # image.
4001  else:
4002    care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
4003
4004  return [which, care_map_ranges.to_string_raw()]
4005
4006
4007def AddCareMapForAbOta(output_file, ab_partitions, image_paths):
4008  """Generates and adds care_map.pb for a/b partition that has care_map.
4009
4010  Args:
4011    output_file: The output zip file (needs to be already open),
4012        or file path to write care_map.pb.
4013    ab_partitions: The list of A/B partitions.
4014    image_paths: A map from the partition name to the image path.
4015  """
4016  if not output_file:
4017    raise ExternalError('Expected output_file for AddCareMapForAbOta')
4018
4019  care_map_list = []
4020  for partition in ab_partitions:
4021    partition = partition.strip()
4022    if partition not in PARTITIONS_WITH_CARE_MAP:
4023      continue
4024
4025    verity_block_device = "{}_verity_block_device".format(partition)
4026    avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
4027    if (verity_block_device in OPTIONS.info_dict or
4028            OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
4029      if partition not in image_paths:
4030        logger.warning('Potential partition with care_map missing from images: %s',
4031                       partition)
4032        continue
4033      image_path = image_paths[partition]
4034      if not os.path.exists(image_path):
4035        raise ExternalError('Expected image at path {}'.format(image_path))
4036
4037      care_map = GetCareMap(partition, image_path)
4038      if not care_map:
4039        continue
4040      care_map_list += care_map
4041
4042      # adds fingerprint field to the care_map
4043      # TODO(xunchang) revisit the fingerprint calculation for care_map.
4044      partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
4045      prop_name_list = ["ro.{}.build.fingerprint".format(partition),
4046                        "ro.{}.build.thumbprint".format(partition)]
4047
4048      present_props = [x for x in prop_name_list if
4049                       partition_props and partition_props.GetProp(x)]
4050      if not present_props:
4051        logger.warning(
4052            "fingerprint is not present for partition %s", partition)
4053        property_id, fingerprint = "unknown", "unknown"
4054      else:
4055        property_id = present_props[0]
4056        fingerprint = partition_props.GetProp(property_id)
4057      care_map_list += [property_id, fingerprint]
4058
4059  if not care_map_list:
4060    return
4061
4062  # Converts the list into proto buf message by calling care_map_generator; and
4063  # writes the result to a temp file.
4064  temp_care_map_text = MakeTempFile(prefix="caremap_text-",
4065                                           suffix=".txt")
4066  with open(temp_care_map_text, 'w') as text_file:
4067    text_file.write('\n'.join(care_map_list))
4068
4069  temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
4070  care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
4071  RunAndCheckOutput(care_map_gen_cmd)
4072
4073  if not isinstance(output_file, zipfile.ZipFile):
4074    shutil.copy(temp_care_map, output_file)
4075    return
4076  # output_file is a zip file
4077  care_map_path = "META/care_map.pb"
4078  if care_map_path in output_file.namelist():
4079    # Copy the temp file into the OPTIONS.input_tmp dir and update the
4080    # replace_updated_files_list used by add_img_to_target_files
4081    if not OPTIONS.replace_updated_files_list:
4082      OPTIONS.replace_updated_files_list = []
4083    shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
4084    OPTIONS.replace_updated_files_list.append(care_map_path)
4085  else:
4086    ZipWrite(output_file, temp_care_map, arcname=care_map_path)
4087
4088
4089def IsSparseImage(filepath):
4090  with open(filepath, 'rb') as fp:
4091    # Magic for android sparse image format
4092    # https://source.android.com/devices/bootloader/images
4093    return fp.read(4) == b'\x3A\xFF\x26\xED'
4094