• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23import getopt
24import getpass
25import gzip
26import imp
27import json
28import logging
29import logging.config
30import os
31import platform
32import re
33import shlex
34import shutil
35import subprocess
36import stat
37import sys
38import tempfile
39import threading
40import time
41import zipfile
42
43from typing import Iterable, Callable
44from dataclasses import dataclass
45from hashlib import sha1, sha256
46
47import images
48import sparse_img
49from blockimgdiff import BlockImageDiff
50
51logger = logging.getLogger(__name__)
52
53
54@dataclass
55class OptionHandler:
56  extra_long_opts: Iterable[str]
57  handler: Callable
58
59class Options(object):
60
61  def __init__(self):
62    # Set up search path, in order to find framework/ and lib64/. At the time of
63    # running this function, user-supplied search path (`--path`) hasn't been
64    # available. So the value set here is the default, which might be overridden
65    # by commandline flag later.
66    exec_path = os.path.realpath(sys.argv[0])
67    if exec_path.endswith('.py'):
68      script_name = os.path.basename(exec_path)
69      # logger hasn't been initialized yet at this point. Use print to output
70      # warnings.
71      print(
72          'Warning: releasetools script should be invoked as hermetic Python '
73          'executable -- build and run `{}` directly.'.format(
74              script_name[:-3]),
75          file=sys.stderr)
76    self.search_path = os.path.dirname(os.path.dirname(exec_path))
77
78    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
79    if not os.path.exists(os.path.join(self.search_path, self.signapk_path)):
80      if "ANDROID_HOST_OUT" in os.environ:
81        self.search_path = os.environ["ANDROID_HOST_OUT"]
82    self.signapk_shared_library_path = "lib64"   # Relative to search_path
83    self.extra_signapk_args = []
84    self.aapt2_path = "aapt2"
85    self.java_path = "java"  # Use the one on the path by default.
86    self.java_args = ["-Xmx4096m"]  # The default JVM args.
87    self.android_jar_path = None
88    self.public_key_suffix = ".x509.pem"
89    self.private_key_suffix = ".pk8"
90    # use otatools built boot_signer by default
91    self.verbose = False
92    self.tempfiles = []
93    self.device_specific = None
94    self.extras = {}
95    self.info_dict = None
96    self.source_info_dict = None
97    self.target_info_dict = None
98    self.worker_threads = None
99    # Stash size cannot exceed cache_size * threshold.
100    self.cache_size = None
101    self.stash_threshold = 0.8
102    self.logfile = None
103
104
105OPTIONS = Options()
106
107# The block size that's used across the releasetools scripts.
108BLOCK_SIZE = 4096
109
110# Values for "certificate" in apkcerts that mean special things.
111SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
112
113# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
114# that system_other is not in the list because we don't want to include its
115# descriptor into vbmeta.img. When adding a new entry here, the
116# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
117# accordingly.
118AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw',
119                  'recovery', 'system', 'system_ext', 'vendor', 'vendor_boot',
120                  'vendor_kernel_boot', 'vendor_dlkm', 'odm_dlkm',
121                  'system_dlkm')
122
123# Chained VBMeta partitions.
124AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
125
126# avbtool arguments name
127AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG = '--include_descriptors_from_image'
128AVB_ARG_NAME_CHAIN_PARTITION = '--chain_partition'
129
130# Partitions that should have their care_map added to META/care_map.pb
131PARTITIONS_WITH_CARE_MAP = [
132    'system',
133    'vendor',
134    'product',
135    'system_ext',
136    'odm',
137    'vendor_dlkm',
138    'odm_dlkm',
139    'system_dlkm',
140]
141
142# Partitions with a build.prop file
143PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot']
144
145# See sysprop.mk. If file is moved, add new search paths here; don't remove
146# existing search paths.
147RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
148
149
150@dataclass
151class AvbChainedPartitionArg:
152  """The required arguments for avbtool --chain_partition."""
153  partition: str
154  rollback_index_location: int
155  pubkey_path: str
156
157  def to_string(self):
158    """Convert to string command arguments."""
159    return '{}:{}:{}'.format(
160        self.partition, self.rollback_index_location, self.pubkey_path)
161
162
163class ErrorCode(object):
164  """Define error_codes for failures that happen during the actual
165  update package installation.
166
167  Error codes 0-999 are reserved for failures before the package
168  installation (i.e. low battery, package verification failure).
169  Detailed code in 'bootable/recovery/error_code.h' """
170
171  SYSTEM_VERIFICATION_FAILURE = 1000
172  SYSTEM_UPDATE_FAILURE = 1001
173  SYSTEM_UNEXPECTED_CONTENTS = 1002
174  SYSTEM_NONZERO_CONTENTS = 1003
175  SYSTEM_RECOVER_FAILURE = 1004
176  VENDOR_VERIFICATION_FAILURE = 2000
177  VENDOR_UPDATE_FAILURE = 2001
178  VENDOR_UNEXPECTED_CONTENTS = 2002
179  VENDOR_NONZERO_CONTENTS = 2003
180  VENDOR_RECOVER_FAILURE = 2004
181  OEM_PROP_MISMATCH = 3000
182  FINGERPRINT_MISMATCH = 3001
183  THUMBPRINT_MISMATCH = 3002
184  OLDER_BUILD = 3003
185  DEVICE_MISMATCH = 3004
186  BAD_PATCH_FILE = 3005
187  INSUFFICIENT_CACHE_SPACE = 3006
188  TUNE_PARTITION_FAILURE = 3007
189  APPLY_PATCH_FAILURE = 3008
190
191
192class ExternalError(RuntimeError):
193  pass
194
195
196def InitLogging():
197  DEFAULT_LOGGING_CONFIG = {
198      'version': 1,
199      'disable_existing_loggers': False,
200      'formatters': {
201          'standard': {
202              'format':
203                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
204              'datefmt': '%Y-%m-%d %H:%M:%S',
205          },
206      },
207      'handlers': {
208          'default': {
209              'class': 'logging.StreamHandler',
210              'formatter': 'standard',
211              'level': 'WARNING',
212          },
213      },
214      'loggers': {
215          '': {
216              'handlers': ['default'],
217              'propagate': True,
218              'level': 'NOTSET',
219          }
220      }
221  }
222  env_config = os.getenv('LOGGING_CONFIG')
223  if env_config:
224    with open(env_config) as f:
225      config = json.load(f)
226  else:
227    config = DEFAULT_LOGGING_CONFIG
228
229    # Increase the logging level for verbose mode.
230    if OPTIONS.verbose:
231      config = copy.deepcopy(config)
232      config['handlers']['default']['level'] = 'INFO'
233
234    if OPTIONS.logfile:
235      config = copy.deepcopy(config)
236      config['handlers']['logfile'] = {
237          'class': 'logging.FileHandler',
238          'formatter': 'standard',
239          'level': 'INFO',
240          'mode': 'w',
241          'filename': OPTIONS.logfile,
242      }
243      config['loggers']['']['handlers'].append('logfile')
244
245  logging.config.dictConfig(config)
246
247
248def FindHostToolPath(tool_name):
249  """Finds the path to the host tool.
250
251  Args:
252    tool_name: name of the tool to find
253  Returns:
254    path to the tool if found under the same directory as this binary is located at. If not found,
255    tool_name is returned.
256  """
257  my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
258  tool_path = os.path.join(my_dir, tool_name)
259  if os.path.exists(tool_path):
260    return tool_path
261
262  return tool_name
263
264
265def Run(args, verbose=None, **kwargs):
266  """Creates and returns a subprocess.Popen object.
267
268  Args:
269    args: The command represented as a list of strings.
270    verbose: Whether the commands should be shown. Default to the global
271        verbosity if unspecified.
272    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
273        stdin, etc. stdout and stderr will default to subprocess.PIPE and
274        subprocess.STDOUT respectively unless caller specifies any of them.
275        universal_newlines will default to True, as most of the users in
276        releasetools expect string output.
277
278  Returns:
279    A subprocess.Popen object.
280  """
281  if 'stdout' not in kwargs and 'stderr' not in kwargs:
282    kwargs['stdout'] = subprocess.PIPE
283    kwargs['stderr'] = subprocess.STDOUT
284  if 'universal_newlines' not in kwargs:
285    kwargs['universal_newlines'] = True
286
287  if args:
288    # Make a copy of args in case client relies on the content of args later.
289    args = args[:]
290    args[0] = FindHostToolPath(args[0])
291
292  if verbose is None:
293    verbose = OPTIONS.verbose
294
295  # Don't log any if caller explicitly says so.
296  if verbose:
297    logger.info("  Running: \"%s\"", " ".join(args))
298  return subprocess.Popen(args, **kwargs)
299
300
301def RunAndCheckOutput(args, verbose=None, **kwargs):
302  """Runs the given command and returns the output.
303
304  Args:
305    args: The command represented as a list of strings.
306    verbose: Whether the commands should be shown. Default to the global
307        verbosity if unspecified.
308    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
309        stdin, etc. stdout and stderr will default to subprocess.PIPE and
310        subprocess.STDOUT respectively unless caller specifies any of them.
311
312  Returns:
313    The output string.
314
315  Raises:
316    ExternalError: On non-zero exit from the command.
317  """
318  if verbose is None:
319    verbose = OPTIONS.verbose
320  proc = Run(args, verbose=verbose, **kwargs)
321  output, _ = proc.communicate()
322  if output is None:
323    output = ""
324  # Don't log any if caller explicitly says so.
325  if verbose:
326    logger.info("%s", output.rstrip())
327  if proc.returncode != 0:
328    raise ExternalError(
329        "Failed to run command '{}' (exit code {}):\n{}".format(
330            args, proc.returncode, output))
331  return output
332
333
334def RoundUpTo4K(value):
335  rounded_up = value + 4095
336  return rounded_up - (rounded_up % 4096)
337
338
339def CloseInheritedPipes():
340  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
341  before doing other work."""
342  if platform.system() != "Darwin":
343    return
344  for d in range(3, 1025):
345    try:
346      stat = os.fstat(d)
347      if stat is not None:
348        pipebit = stat[0] & 0x1000
349        if pipebit != 0:
350          os.close(d)
351    except OSError:
352      pass
353
354
355class BuildInfo(object):
356  """A class that holds the information for a given build.
357
358  This class wraps up the property querying for a given source or target build.
359  It abstracts away the logic of handling OEM-specific properties, and caches
360  the commonly used properties such as fingerprint.
361
362  There are two types of info dicts: a) build-time info dict, which is generated
363  at build time (i.e. included in a target_files zip); b) OEM info dict that is
364  specified at package generation time (via command line argument
365  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
366  having "oem_fingerprint_properties" in build-time info dict), all the queries
367  would be answered based on build-time info dict only. Otherwise if using
368  OEM-specific properties, some of them will be calculated from two info dicts.
369
370  Users can query properties similarly as using a dict() (e.g. info['fstab']),
371  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
372
373  Attributes:
374    info_dict: The build-time info dict.
375    is_ab: Whether it's a build that uses A/B OTA.
376    oem_dicts: A list of OEM dicts.
377    oem_props: A list of OEM properties that should be read from OEM dicts; None
378        if the build doesn't use any OEM-specific property.
379    fingerprint: The fingerprint of the build, which would be calculated based
380        on OEM properties if applicable.
381    device: The device name, which could come from OEM dicts if applicable.
382  """
383
384  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
385                               "ro.product.manufacturer", "ro.product.model",
386                               "ro.product.name"]
387  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
388      "product", "odm", "vendor", "system_ext", "system"]
389  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
390      "product", "product_services", "odm", "vendor", "system"]
391  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
392
393  # The length of vbmeta digest to append to the fingerprint
394  _VBMETA_DIGEST_SIZE_USED = 8
395
396  def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
397    """Initializes a BuildInfo instance with the given dicts.
398
399    Note that it only wraps up the given dicts, without making copies.
400
401    Arguments:
402      info_dict: The build-time info dict.
403      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
404          that it always uses the first dict to calculate the fingerprint or the
405          device name. The rest would be used for asserting OEM properties only
406          (e.g. one package can be installed on one of these devices).
407      use_legacy_id: Use the legacy build id to construct the fingerprint. This
408          is used when we need a BuildInfo class, while the vbmeta digest is
409          unavailable.
410
411    Raises:
412      ValueError: On invalid inputs.
413    """
414    self.info_dict = info_dict
415    self.oem_dicts = oem_dicts
416
417    self._is_ab = info_dict.get("ab_update") == "true"
418    self.use_legacy_id = use_legacy_id
419
420    # Skip _oem_props if oem_dicts is None to use BuildInfo in
421    # sign_target_files_apks
422    if self.oem_dicts:
423      self._oem_props = info_dict.get("oem_fingerprint_properties")
424    else:
425      self._oem_props = None
426
427    def check_fingerprint(fingerprint):
428      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
429        raise ValueError(
430            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
431            "3.2.2. Build Parameters.".format(fingerprint))
432
433    self._partition_fingerprints = {}
434    for partition in PARTITIONS_WITH_BUILD_PROP:
435      try:
436        fingerprint = self.CalculatePartitionFingerprint(partition)
437        check_fingerprint(fingerprint)
438        self._partition_fingerprints[partition] = fingerprint
439      except ExternalError:
440        continue
441    if "system" in self._partition_fingerprints:
442      # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
443      # need a fingerprint when creating the image.
444      self._partition_fingerprints[
445          "system_other"] = self._partition_fingerprints["system"]
446
447    # These two should be computed only after setting self._oem_props.
448    self._device = self.GetOemProperty("ro.product.device")
449    self._fingerprint = self.CalculateFingerprint()
450    check_fingerprint(self._fingerprint)
451
452  @property
453  def is_ab(self):
454    return self._is_ab
455
456  @property
457  def device(self):
458    return self._device
459
460  @property
461  def fingerprint(self):
462    return self._fingerprint
463
464  @property
465  def is_vabc(self):
466    return self.info_dict.get("virtual_ab_compression") == "true"
467
468  @property
469  def is_android_r(self):
470    system_prop = self.info_dict.get("system.build.prop")
471    return system_prop and system_prop.GetProp("ro.build.version.release") == "11"
472
473  @property
474  def is_release_key(self):
475    system_prop = self.info_dict.get("build.prop")
476    return system_prop and system_prop.GetProp("ro.build.tags") == "release-key"
477
478  @property
479  def vabc_compression_param(self):
480    return self.get("virtual_ab_compression_method", "")
481
482  @property
483  def vabc_cow_version(self):
484    return self.get("virtual_ab_cow_version", "")
485
486  @property
487  def vendor_api_level(self):
488    vendor_prop = self.info_dict.get("vendor.build.prop")
489    if not vendor_prop:
490      return -1
491
492    props = [
493        "ro.board.first_api_level",
494        "ro.product.first_api_level",
495    ]
496    for prop in props:
497      value = vendor_prop.GetProp(prop)
498      try:
499        return int(value)
500      except:
501        pass
502    return -1
503
504  @property
505  def is_vabc_xor(self):
506    vendor_prop = self.info_dict.get("vendor.build.prop")
507    vabc_xor_enabled = vendor_prop and \
508        vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true"
509    return vabc_xor_enabled
510
511  @property
512  def vendor_suppressed_vabc(self):
513    vendor_prop = self.info_dict.get("vendor.build.prop")
514    vabc_suppressed = vendor_prop and \
515        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
516    return vabc_suppressed and vabc_suppressed.lower() == "true"
517
518  @property
519  def oem_props(self):
520    return self._oem_props
521
522  def __getitem__(self, key):
523    return self.info_dict[key]
524
525  def __setitem__(self, key, value):
526    self.info_dict[key] = value
527
528  def get(self, key, default=None):
529    return self.info_dict.get(key, default)
530
531  def items(self):
532    return self.info_dict.items()
533
534  def _GetRawBuildProp(self, prop, partition):
535    prop_file = '{}.build.prop'.format(
536        partition) if partition else 'build.prop'
537    partition_props = self.info_dict.get(prop_file)
538    if not partition_props:
539      return None
540    return partition_props.GetProp(prop)
541
542  def GetPartitionBuildProp(self, prop, partition):
543    """Returns the inquired build property for the provided partition."""
544
545    # Boot image and init_boot image uses ro.[product.]bootimage instead of boot.
546    # This comes from the generic ramdisk
547    prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition
548
549    # If provided a partition for this property, only look within that
550    # partition's build.prop.
551    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
552      prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
553    else:
554      prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
555
556    prop_val = self._GetRawBuildProp(prop, partition)
557    if prop_val is not None:
558      return prop_val
559    raise ExternalError("couldn't find %s in %s.build.prop" %
560                        (prop, partition))
561
562  def GetBuildProp(self, prop):
563    """Returns the inquired build property from the standard build.prop file."""
564    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
565      return self._ResolveRoProductBuildProp(prop)
566
567    if prop == "ro.build.id":
568      return self._GetBuildId()
569
570    prop_val = self._GetRawBuildProp(prop, None)
571    if prop_val is not None:
572      return prop_val
573
574    raise ExternalError("couldn't find %s in build.prop" % (prop,))
575
576  def _ResolveRoProductBuildProp(self, prop):
577    """Resolves the inquired ro.product.* build property"""
578    prop_val = self._GetRawBuildProp(prop, None)
579    if prop_val:
580      return prop_val
581
582    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
583    source_order_val = self._GetRawBuildProp(
584        "ro.product.property_source_order", None)
585    if source_order_val:
586      source_order = source_order_val.split(",")
587    else:
588      source_order = default_source_order
589
590    # Check that all sources in ro.product.property_source_order are valid
591    if any([x not in default_source_order for x in source_order]):
592      raise ExternalError(
593          "Invalid ro.product.property_source_order '{}'".format(source_order))
594
595    for source_partition in source_order:
596      source_prop = prop.replace(
597          "ro.product", "ro.product.{}".format(source_partition), 1)
598      prop_val = self._GetRawBuildProp(source_prop, source_partition)
599      if prop_val:
600        return prop_val
601
602    raise ExternalError("couldn't resolve {}".format(prop))
603
604  def _GetRoProductPropsDefaultSourceOrder(self):
605    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
606    # values of these properties for each Android release.
607    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
608    if android_codename == "REL":
609      android_version = self._GetRawBuildProp("ro.build.version.release", None)
610      if android_version == "10":
611        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
612      # NOTE: float() conversion of android_version will have rounding error.
613      # We are checking for "9" or less, and using "< 10" is well outside of
614      # possible floating point rounding.
615      try:
616        android_version_val = float(android_version)
617      except ValueError:
618        android_version_val = 0
619      if android_version_val < 10:
620        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
621    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
622
623  def _GetPlatformVersion(self):
624    version_sdk = self.GetBuildProp("ro.build.version.sdk")
625    # init code switches to version_release_or_codename (see b/158483506). After
626    # API finalization, release_or_codename will be the same as release. This
627    # is the best effort to support pre-S dev stage builds.
628    if int(version_sdk) >= 30:
629      try:
630        return self.GetBuildProp("ro.build.version.release_or_codename")
631      except ExternalError:
632        logger.warning('Failed to find ro.build.version.release_or_codename')
633
634    return self.GetBuildProp("ro.build.version.release")
635
636  def _GetBuildId(self):
637    build_id = self._GetRawBuildProp("ro.build.id", None)
638    if build_id:
639      return build_id
640
641    legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
642    if not legacy_build_id:
643      raise ExternalError("Couldn't find build id in property file")
644
645    if self.use_legacy_id:
646      return legacy_build_id
647
648    # Append the top 8 chars of vbmeta digest to the existing build id. The
649    # logic needs to match the one in init, so that OTA can deliver correctly.
650    avb_enable = self.info_dict.get("avb_enable") == "true"
651    if not avb_enable:
652      raise ExternalError("AVB isn't enabled when using legacy build id")
653
654    vbmeta_digest = self.info_dict.get("vbmeta_digest")
655    if not vbmeta_digest:
656      raise ExternalError("Vbmeta digest isn't provided when using legacy build"
657                          " id")
658    if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
659      raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
660
661    digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
662    return legacy_build_id + '.' + digest_prefix
663
664  def _GetPartitionPlatformVersion(self, partition):
665    try:
666      return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
667                                        partition)
668    except ExternalError:
669      return self.GetPartitionBuildProp("ro.build.version.release",
670                                        partition)
671
672  def GetOemProperty(self, key):
673    if self.oem_props is not None and key in self.oem_props:
674      return self.oem_dicts[0][key]
675    return self.GetBuildProp(key)
676
677  def GetPartitionFingerprint(self, partition):
678    return self._partition_fingerprints.get(partition, None)
679
680  def CalculatePartitionFingerprint(self, partition):
681    try:
682      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
683    except ExternalError:
684      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
685          self.GetPartitionBuildProp("ro.product.brand", partition),
686          self.GetPartitionBuildProp("ro.product.name", partition),
687          self.GetPartitionBuildProp("ro.product.device", partition),
688          self._GetPartitionPlatformVersion(partition),
689          self.GetPartitionBuildProp("ro.build.id", partition),
690          self.GetPartitionBuildProp(
691              "ro.build.version.incremental", partition),
692          self.GetPartitionBuildProp("ro.build.type", partition),
693          self.GetPartitionBuildProp("ro.build.tags", partition))
694
695  def CalculateFingerprint(self):
696    if self.oem_props is None:
697      try:
698        return self.GetBuildProp("ro.build.fingerprint")
699      except ExternalError:
700        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
701            self.GetBuildProp("ro.product.brand"),
702            self.GetBuildProp("ro.product.name"),
703            self.GetBuildProp("ro.product.device"),
704            self._GetPlatformVersion(),
705            self.GetBuildProp("ro.build.id"),
706            self.GetBuildProp("ro.build.version.incremental"),
707            self.GetBuildProp("ro.build.type"),
708            self.GetBuildProp("ro.build.tags"))
709    return "%s/%s/%s:%s" % (
710        self.GetOemProperty("ro.product.brand"),
711        self.GetOemProperty("ro.product.name"),
712        self.GetOemProperty("ro.product.device"),
713        self.GetBuildProp("ro.build.thumbprint"))
714
715  def WriteMountOemScript(self, script):
716    assert self.oem_props is not None
717    recovery_mount_options = self.info_dict.get("recovery_mount_options")
718    script.Mount("/oem", recovery_mount_options)
719
720  def WriteDeviceAssertions(self, script, oem_no_mount):
721    # Read the property directly if not using OEM properties.
722    if not self.oem_props:
723      script.AssertDevice(self.device)
724      return
725
726    # Otherwise assert OEM properties.
727    if not self.oem_dicts:
728      raise ExternalError(
729          "No OEM file provided to answer expected assertions")
730
731    for prop in self.oem_props.split():
732      values = []
733      for oem_dict in self.oem_dicts:
734        if prop in oem_dict:
735          values.append(oem_dict[prop])
736      if not values:
737        raise ExternalError(
738            "The OEM file is missing the property %s" % (prop,))
739      script.AssertOemProperty(prop, values, oem_no_mount)
740
741
742def DoesInputFileContain(input_file, fn):
743  """Check whether the input target_files.zip contain an entry `fn`"""
744  if isinstance(input_file, zipfile.ZipFile):
745    return fn in input_file.namelist()
746  elif zipfile.is_zipfile(input_file):
747    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
748      return fn in zfp.namelist()
749  else:
750    if not os.path.isdir(input_file):
751      raise ValueError(
752          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
753    path = os.path.join(input_file, *fn.split("/"))
754    return os.path.exists(path)
755
756
757def ReadBytesFromInputFile(input_file, fn):
758  """Reads the bytes of fn from input zipfile or directory."""
759  if isinstance(input_file, zipfile.ZipFile):
760    return input_file.read(fn)
761  elif zipfile.is_zipfile(input_file):
762    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
763      return zfp.read(fn)
764  else:
765    if not os.path.isdir(input_file):
766      raise ValueError(
767          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
768    path = os.path.join(input_file, *fn.split("/"))
769    try:
770      with open(path, "rb") as f:
771        return f.read()
772    except IOError as e:
773      if e.errno == errno.ENOENT:
774        raise KeyError(fn)
775
776
777def ReadFromInputFile(input_file, fn):
778  """Reads the str contents of fn from input zipfile or directory."""
779  return ReadBytesFromInputFile(input_file, fn).decode()
780
781
782def WriteBytesToInputFile(input_file, fn, data):
783  """Write bytes |data| contents to fn of input zipfile or directory."""
784  if isinstance(input_file, zipfile.ZipFile):
785    with input_file.open(fn, "w") as entry_fp:
786      return entry_fp.write(data)
787  elif zipfile.is_zipfile(input_file):
788    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
789      with zfp.open(fn, "w") as entry_fp:
790        return entry_fp.write(data)
791  else:
792    if not os.path.isdir(input_file):
793      raise ValueError(
794          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
795    path = os.path.join(input_file, *fn.split("/"))
796    try:
797      with open(path, "wb") as f:
798        return f.write(data)
799    except IOError as e:
800      if e.errno == errno.ENOENT:
801        raise KeyError(fn)
802
803
804def WriteToInputFile(input_file, fn, str: str):
805  """Write str content to fn of input file or directory"""
806  return WriteBytesToInputFile(input_file, fn, str.encode())
807
808
809def ExtractFromInputFile(input_file, fn):
810  """Extracts the contents of fn from input zipfile or directory into a file."""
811  if isinstance(input_file, zipfile.ZipFile):
812    tmp_file = MakeTempFile(os.path.basename(fn))
813    with open(tmp_file, 'wb') as f:
814      f.write(input_file.read(fn))
815    return tmp_file
816  elif zipfile.is_zipfile(input_file):
817    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
818      tmp_file = MakeTempFile(os.path.basename(fn))
819      with open(tmp_file, "wb") as fp:
820        fp.write(zfp.read(fn))
821      return tmp_file
822  else:
823    if not os.path.isdir(input_file):
824      raise ValueError(
825          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
826    file = os.path.join(input_file, *fn.split("/"))
827    if not os.path.exists(file):
828      raise KeyError(fn)
829    return file
830
831
832class RamdiskFormat(object):
833  LZ4 = 1
834  GZ = 2
835
836
837def GetRamdiskFormat(info_dict):
838  if info_dict.get('lz4_ramdisks') == 'true':
839    ramdisk_format = RamdiskFormat.LZ4
840  else:
841    ramdisk_format = RamdiskFormat.GZ
842  return ramdisk_format
843
844
845def LoadInfoDict(input_file, repacking=False):
846  """Loads the key/value pairs from the given input target_files.
847
848  It reads `META/misc_info.txt` file in the target_files input, does validation
849  checks and returns the parsed key/value pairs for to the given build. It's
850  usually called early when working on input target_files files, e.g. when
851  generating OTAs, or signing builds. Note that the function may be called
852  against an old target_files file (i.e. from past dessert releases). So the
853  property parsing needs to be backward compatible.
854
855  In a `META/misc_info.txt`, a few properties are stored as links to the files
856  in the PRODUCT_OUT directory. It works fine with the build system. However,
857  they are no longer available when (re)generating images from target_files zip.
858  When `repacking` is True, redirect these properties to the actual files in the
859  unzipped directory.
860
861  Args:
862    input_file: The input target_files file, which could be an open
863        zipfile.ZipFile instance, or a str for the dir that contains the files
864        unzipped from a target_files file.
865    repacking: Whether it's trying repack an target_files file after loading the
866        info dict (default: False). If so, it will rewrite a few loaded
867        properties (e.g. selinux_fc, root_dir) to point to the actual files in
868        target_files file. When doing repacking, `input_file` must be a dir.
869
870  Returns:
871    A dict that contains the parsed key/value pairs.
872
873  Raises:
874    AssertionError: On invalid input arguments.
875    ValueError: On malformed input values.
876  """
877  if repacking:
878    assert isinstance(input_file, str), \
879        "input_file must be a path str when doing repacking"
880
881  def read_helper(fn):
882    return ReadFromInputFile(input_file, fn)
883
884  try:
885    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
886  except KeyError:
887    raise ValueError("Failed to find META/misc_info.txt in input target-files")
888
889  if "recovery_api_version" not in d:
890    raise ValueError("Failed to find 'recovery_api_version'")
891  if "fstab_version" not in d:
892    raise ValueError("Failed to find 'fstab_version'")
893
894  if repacking:
895    # "selinux_fc" properties should point to the file_contexts files
896    # (file_contexts.bin) under META/.
897    for key in d:
898      if key.endswith("selinux_fc"):
899        fc_basename = os.path.basename(d[key])
900        fc_config = os.path.join(input_file, "META", fc_basename)
901        assert os.path.exists(fc_config)
902
903        d[key] = fc_config
904
905    # Similarly we need to redirect "root_dir", and "root_fs_config".
906    d["root_dir"] = os.path.join(input_file, "ROOT")
907    d["root_fs_config"] = os.path.join(
908        input_file, "META", "root_filesystem_config.txt")
909
910    # Redirect {partition}_base_fs_file for each of the named partitions.
911    for part_name in ["system", "vendor", "system_ext", "product", "odm",
912                      "vendor_dlkm", "odm_dlkm", "system_dlkm"]:
913      key_name = part_name + "_base_fs_file"
914      if key_name not in d:
915        continue
916      basename = os.path.basename(d[key_name])
917      base_fs_file = os.path.join(input_file, "META", basename)
918      if os.path.exists(base_fs_file):
919        d[key_name] = base_fs_file
920      else:
921        logger.warning(
922            "Failed to find %s base fs file: %s", part_name, base_fs_file)
923        del d[key_name]
924
925  def makeint(key):
926    if key in d:
927      d[key] = int(d[key], 0)
928
929  makeint("recovery_api_version")
930  makeint("blocksize")
931  makeint("system_size")
932  makeint("vendor_size")
933  makeint("userdata_size")
934  makeint("cache_size")
935  makeint("recovery_size")
936  makeint("fstab_version")
937
938  boot_images = "boot.img"
939  if "boot_images" in d:
940    boot_images = d["boot_images"]
941  for b in boot_images.split():
942    makeint(b.replace(".img", "_size"))
943
944  # Load recovery fstab if applicable.
945  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
946  ramdisk_format = GetRamdiskFormat(d)
947
948  # Tries to load the build props for all partitions with care_map, including
949  # system and vendor.
950  for partition in PARTITIONS_WITH_BUILD_PROP:
951    partition_prop = "{}.build.prop".format(partition)
952    d[partition_prop] = PartitionBuildProps.FromInputFile(
953        input_file, partition, ramdisk_format=ramdisk_format)
954  d["build.prop"] = d["system.build.prop"]
955
956  if d.get("avb_enable") == "true":
957    build_info = BuildInfo(d, use_legacy_id=True)
958    # Set up the salt for partitions without build.prop
959    if build_info.fingerprint:
960      if "fingerprint" not in d:
961        d["fingerprint"] = build_info.fingerprint
962      if "avb_salt" not in d:
963        d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest()
964    # Set the vbmeta digest if exists
965    try:
966      d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
967    except KeyError:
968      pass
969
970  try:
971    d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
972  except KeyError:
973    logger.warning("Can't find META/ab_partitions.txt")
974  return d
975
976
977def LoadListFromFile(file_path):
978  with open(file_path) as f:
979    return f.read().splitlines()
980
981
982def LoadDictionaryFromFile(file_path):
983  lines = LoadListFromFile(file_path)
984  return LoadDictionaryFromLines(lines)
985
986
987def LoadDictionaryFromLines(lines):
988  d = {}
989  for line in lines:
990    line = line.strip()
991    if not line or line.startswith("#"):
992      continue
993    if "=" in line:
994      name, value = line.split("=", 1)
995      d[name] = value
996  return d
997
998
999class PartitionBuildProps(object):
1000  """The class holds the build prop of a particular partition.
1001
1002  This class loads the build.prop and holds the build properties for a given
1003  partition. It also partially recognizes the 'import' statement in the
1004  build.prop; and calculates alternative values of some specific build
1005  properties during runtime.
1006
1007  Attributes:
1008    input_file: a zipped target-file or an unzipped target-file directory.
1009    partition: name of the partition.
1010    props_allow_override: a list of build properties to search for the
1011        alternative values during runtime.
1012    build_props: a dict of build properties for the given partition.
1013    prop_overrides: a set of props that are overridden by import.
1014    placeholder_values: A dict of runtime variables' values to replace the
1015        placeholders in the build.prop file. We expect exactly one value for
1016        each of the variables.
1017    ramdisk_format: If name is "boot", the format of ramdisk inside the
1018        boot image. Otherwise, its value is ignored.
1019        Use lz4 to decompress by default. If its value is gzip, use gzip.
1020  """
1021
1022  def __init__(self, input_file, name, placeholder_values=None):
1023    self.input_file = input_file
1024    self.partition = name
1025    self.props_allow_override = [props.format(name) for props in [
1026        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
1027    self.build_props = {}
1028    self.prop_overrides = set()
1029    self.placeholder_values = {}
1030    if placeholder_values:
1031      self.placeholder_values = copy.deepcopy(placeholder_values)
1032
1033  @staticmethod
1034  def FromDictionary(name, build_props):
1035    """Constructs an instance from a build prop dictionary."""
1036
1037    props = PartitionBuildProps("unknown", name)
1038    props.build_props = build_props.copy()
1039    return props
1040
1041  @staticmethod
1042  def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
1043    """Loads the build.prop file and builds the attributes."""
1044
1045    if name in ("boot", "init_boot"):
1046      data = PartitionBuildProps._ReadBootPropFile(
1047          input_file, name, ramdisk_format=ramdisk_format)
1048    else:
1049      data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
1050
1051    props = PartitionBuildProps(input_file, name, placeholder_values)
1052    props._LoadBuildProp(data)
1053    return props
1054
1055  @staticmethod
1056  def _ReadBootPropFile(input_file, partition_name, ramdisk_format):
1057    """
1058    Read build.prop for boot image from input_file.
1059    Return empty string if not found.
1060    """
1061    image_path = 'IMAGES/' + partition_name + '.img'
1062    try:
1063      boot_img = ExtractFromInputFile(input_file, image_path)
1064    except KeyError:
1065      logger.warning('Failed to read %s', image_path)
1066      return ''
1067    prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
1068    if prop_file is None:
1069      return ''
1070    with open(prop_file, "r") as f:
1071      return f.read()
1072
1073  @staticmethod
1074  def _ReadPartitionPropFile(input_file, name):
1075    """
1076    Read build.prop for name from input_file.
1077    Return empty string if not found.
1078    """
1079    data = ''
1080    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
1081                      '{}/build.prop'.format(name.upper())]:
1082      try:
1083        data = ReadFromInputFile(input_file, prop_file)
1084        break
1085      except KeyError:
1086        logger.warning('Failed to read %s', prop_file)
1087    if data == '':
1088      logger.warning("Failed to read build.prop for partition {}".format(name))
1089    return data
1090
1091  @staticmethod
1092  def FromBuildPropFile(name, build_prop_file):
1093    """Constructs an instance from a build prop file."""
1094
1095    props = PartitionBuildProps("unknown", name)
1096    with open(build_prop_file) as f:
1097      props._LoadBuildProp(f.read())
1098    return props
1099
1100  def _LoadBuildProp(self, data):
1101    for line in data.split('\n'):
1102      line = line.strip()
1103      if not line or line.startswith("#"):
1104        continue
1105      if line.startswith("import"):
1106        overrides = self._ImportParser(line)
1107        duplicates = self.prop_overrides.intersection(overrides.keys())
1108        if duplicates:
1109          raise ValueError('prop {} is overridden multiple times'.format(
1110              ','.join(duplicates)))
1111        self.prop_overrides = self.prop_overrides.union(overrides.keys())
1112        self.build_props.update(overrides)
1113      elif "=" in line:
1114        name, value = line.split("=", 1)
1115        if name in self.prop_overrides:
1116          raise ValueError('prop {} is set again after overridden by import '
1117                           'statement'.format(name))
1118        self.build_props[name] = value
1119
1120  def _ImportParser(self, line):
1121    """Parses the build prop in a given import statement."""
1122
1123    tokens = line.split()
1124    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
1125      raise ValueError('Unrecognized import statement {}'.format(line))
1126
1127    if len(tokens) == 3:
1128      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
1129      return {}
1130
1131    import_path = tokens[1]
1132    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
1133      logger.warn('Unrecognized import path {}'.format(line))
1134      return {}
1135
1136    # We only recognize a subset of import statement that the init process
1137    # supports. And we can loose the restriction based on how the dynamic
1138    # fingerprint is used in practice. The placeholder format should be
1139    # ${placeholder}, and its value should be provided by the caller through
1140    # the placeholder_values.
1141    for prop, value in self.placeholder_values.items():
1142      prop_place_holder = '${{{}}}'.format(prop)
1143      if prop_place_holder in import_path:
1144        import_path = import_path.replace(prop_place_holder, value)
1145    if '$' in import_path:
1146      logger.info('Unresolved place holder in import path %s', import_path)
1147      return {}
1148
1149    import_path = import_path.replace('/{}'.format(self.partition),
1150                                      self.partition.upper())
1151    logger.info('Parsing build props override from %s', import_path)
1152
1153    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
1154    d = LoadDictionaryFromLines(lines)
1155    return {key: val for key, val in d.items()
1156            if key in self.props_allow_override}
1157
1158  def __getstate__(self):
1159    state = self.__dict__.copy()
1160    # Don't pickle baz
1161    if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile):
1162      state["input_file"] = state["input_file"].filename
1163    return state
1164
1165  def GetProp(self, prop):
1166    return self.build_props.get(prop)
1167
1168
1169def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path):
1170  class Partition(object):
1171    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
1172      self.mount_point = mount_point
1173      self.fs_type = fs_type
1174      self.device = device
1175      self.length = length
1176      self.context = context
1177      self.slotselect = slotselect
1178
1179  try:
1180    data = read_helper(recovery_fstab_path)
1181  except KeyError:
1182    logger.warning("Failed to find %s", recovery_fstab_path)
1183    data = ""
1184
1185  assert fstab_version == 2
1186
1187  d = {}
1188  for line in data.split("\n"):
1189    line = line.strip()
1190    if not line or line.startswith("#"):
1191      continue
1192
1193    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
1194    pieces = line.split()
1195    if len(pieces) != 5:
1196      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
1197
1198    # Ignore entries that are managed by vold.
1199    options = pieces[4]
1200    if "voldmanaged=" in options:
1201      continue
1202
1203    # It's a good line, parse it.
1204    length = 0
1205    slotselect = False
1206    options = options.split(",")
1207    for i in options:
1208      if i.startswith("length="):
1209        length = int(i[7:])
1210      elif i == "slotselect":
1211        slotselect = True
1212      else:
1213        # Ignore all unknown options in the unified fstab.
1214        continue
1215
1216    mount_flags = pieces[3]
1217    # Honor the SELinux context if present.
1218    context = None
1219    for i in mount_flags.split(","):
1220      if i.startswith("context="):
1221        context = i
1222
1223    mount_point = pieces[1]
1224    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
1225                               device=pieces[0], length=length, context=context,
1226                               slotselect=slotselect)
1227
1228  return d
1229
1230
1231def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
1232  """Finds the path to recovery fstab and loads its contents."""
1233  # recovery fstab is only meaningful when installing an update via recovery
1234  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
1235  if info_dict.get('ab_update') == 'true' and \
1236     info_dict.get("allow_non_ab") != "true":
1237    return None
1238
1239  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
1240  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
1241  # cases, since it may load the info_dict from an old build (e.g. when
1242  # generating incremental OTAs from that build).
1243  if info_dict.get('no_recovery') != 'true':
1244    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
1245    if not DoesInputFileContain(input_file, recovery_fstab_path):
1246      recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1247    return LoadRecoveryFSTab(
1248        read_helper, info_dict['fstab_version'], recovery_fstab_path)
1249
1250  if info_dict.get('recovery_as_boot') == 'true':
1251    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
1252    if not DoesInputFileContain(input_file, recovery_fstab_path):
1253      recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1254    return LoadRecoveryFSTab(
1255        read_helper, info_dict['fstab_version'], recovery_fstab_path)
1256
1257  return None
1258
1259
1260def DumpInfoDict(d):
1261  for k, v in sorted(d.items()):
1262    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
1263
1264
1265def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
1266  """Merges dynamic partition info variables.
1267
1268  Args:
1269    framework_dict: The dictionary of dynamic partition info variables from the
1270      partial framework target files.
1271    vendor_dict: The dictionary of dynamic partition info variables from the
1272      partial vendor target files.
1273
1274  Returns:
1275    The merged dynamic partition info dictionary.
1276  """
1277
1278  def uniq_concat(a, b):
1279    combined = set(a.split())
1280    combined.update(set(b.split()))
1281    combined = [item.strip() for item in combined if item.strip()]
1282    return " ".join(sorted(combined))
1283
1284  if (framework_dict.get("use_dynamic_partitions") !=
1285          "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
1286    raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
1287
1288  merged_dict = {"use_dynamic_partitions": "true"}
1289  # For keys-value pairs that are the same, copy to merged dict
1290  for key in vendor_dict.keys():
1291    if key in framework_dict and framework_dict[key] == vendor_dict[key]:
1292      merged_dict[key] = vendor_dict[key]
1293
1294  merged_dict["dynamic_partition_list"] = uniq_concat(
1295      framework_dict.get("dynamic_partition_list", ""),
1296      vendor_dict.get("dynamic_partition_list", ""))
1297
1298  # Super block devices are defined by the vendor dict.
1299  if "super_block_devices" in vendor_dict:
1300    merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
1301    for block_device in merged_dict["super_block_devices"].split():
1302      key = "super_%s_device_size" % block_device
1303      if key not in vendor_dict:
1304        raise ValueError("Vendor dict does not contain required key %s." % key)
1305      merged_dict[key] = vendor_dict[key]
1306
1307  # Partition groups and group sizes are defined by the vendor dict because
1308  # these values may vary for each board that uses a shared system image.
1309  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1310  for partition_group in merged_dict["super_partition_groups"].split():
1311    # Set the partition group's size using the value from the vendor dict.
1312    key = "super_%s_group_size" % partition_group
1313    if key not in vendor_dict:
1314      raise ValueError("Vendor dict does not contain required key %s." % key)
1315    merged_dict[key] = vendor_dict[key]
1316
1317    # Set the partition group's partition list using a concatenation of the
1318    # framework and vendor partition lists.
1319    key = "super_%s_partition_list" % partition_group
1320    merged_dict[key] = uniq_concat(
1321        framework_dict.get(key, ""), vendor_dict.get(key, ""))
1322  # in the case that vendor is on s build, but is taking a v3 -> v3 vabc ota, we want to fallback to v2
1323  if "vabc_cow_version" not in vendor_dict or "vabc_cow_version" not in framework_dict:
1324    merged_dict["vabc_cow_version"] = '2'
1325  else:
1326    merged_dict["vabc_cow_version"] = min(vendor_dict["vabc_cow_version"], framework_dict["vabc_cow_version"])
1327  # Various other flags should be copied from the vendor dict, if defined.
1328  for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
1329              "super_metadata_device", "super_partition_error_limit",
1330              "super_partition_size"):
1331    if key in vendor_dict.keys():
1332      merged_dict[key] = vendor_dict[key]
1333
1334  return merged_dict
1335
1336
1337def PartitionMapFromTargetFiles(target_files_dir):
1338  """Builds a map from partition -> path within an extracted target files directory."""
1339  # Keep possible_subdirs in sync with build/make/core/board_config.mk.
1340  possible_subdirs = {
1341      "system": ["SYSTEM"],
1342      "vendor": ["VENDOR", "SYSTEM/vendor"],
1343      "product": ["PRODUCT", "SYSTEM/product"],
1344      "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
1345      "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
1346      "vendor_dlkm": [
1347          "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
1348      ],
1349      "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
1350      "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"],
1351  }
1352  partition_map = {}
1353  for partition, subdirs in possible_subdirs.items():
1354    for subdir in subdirs:
1355      if os.path.exists(os.path.join(target_files_dir, subdir)):
1356        partition_map[partition] = subdir
1357        break
1358  return partition_map
1359
1360
1361def SharedUidPartitionViolations(uid_dict, partition_groups):
1362  """Checks for APK sharedUserIds that cross partition group boundaries.
1363
1364  This uses a single or merged build's shareduid_violation_modules.json
1365  output file, as generated by find_shareduid_violation.py or
1366  core/tasks/find-shareduid-violation.mk.
1367
1368  An error is defined as a sharedUserId that is found in a set of partitions
1369  that span more than one partition group.
1370
1371  Args:
1372    uid_dict: A dictionary created by using the standard json module to read a
1373      complete shareduid_violation_modules.json file.
1374    partition_groups: A list of groups, where each group is a list of
1375      partitions.
1376
1377  Returns:
1378    A list of error messages.
1379  """
1380  errors = []
1381  for uid, partitions in uid_dict.items():
1382    found_in_groups = [
1383        group for group in partition_groups
1384        if set(partitions.keys()) & set(group)
1385    ]
1386    if len(found_in_groups) > 1:
1387      errors.append(
1388          "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
1389          % (uid, ",".join(sorted(partitions.keys()))))
1390  return errors
1391
1392
1393def RunHostInitVerifier(product_out, partition_map):
1394  """Runs host_init_verifier on the init rc files within partitions.
1395
1396  host_init_verifier searches the etc/init path within each partition.
1397
1398  Args:
1399    product_out: PRODUCT_OUT directory, containing partition directories.
1400    partition_map: A map of partition name -> relative path within product_out.
1401  """
1402  allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
1403  cmd = ["host_init_verifier"]
1404  for partition, path in partition_map.items():
1405    if partition not in allowed_partitions:
1406      raise ExternalError("Unable to call host_init_verifier for partition %s" %
1407                          partition)
1408    cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
1409    # Add --property-contexts if the file exists on the partition.
1410    property_contexts = "%s_property_contexts" % (
1411        "plat" if partition == "system" else partition)
1412    property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
1413                                          property_contexts)
1414    if os.path.exists(property_contexts_path):
1415      cmd.append("--property-contexts=%s" % property_contexts_path)
1416    # Add the passwd file if the file exists on the partition.
1417    passwd_path = os.path.join(product_out, path, "etc", "passwd")
1418    if os.path.exists(passwd_path):
1419      cmd.extend(["-p", passwd_path])
1420  return RunAndCheckOutput(cmd)
1421
1422
1423def AppendAVBSigningArgs(cmd, partition, avb_salt=None):
1424  """Append signing arguments for avbtool."""
1425  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1426  key_path = ResolveAVBSigningPathArgs(
1427      OPTIONS.info_dict.get("avb_" + partition + "_key_path"))
1428  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1429  if key_path and algorithm:
1430    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1431  if avb_salt is None:
1432    avb_salt = OPTIONS.info_dict.get("avb_salt")
1433  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1434  if avb_salt and not partition.startswith("vbmeta"):
1435    cmd.extend(["--salt", avb_salt])
1436
1437
1438def ResolveAVBSigningPathArgs(split_args):
1439
1440  def ResolveBinaryPath(path):
1441    if os.path.exists(path):
1442      return path
1443    if OPTIONS.search_path:
1444      new_path = os.path.join(OPTIONS.search_path, path)
1445      if os.path.exists(new_path):
1446        return new_path
1447    raise ExternalError(
1448        "Failed to find {}".format(path))
1449
1450  if not split_args:
1451    return split_args
1452
1453  if isinstance(split_args, list):
1454    for index, arg in enumerate(split_args[:-1]):
1455      if arg == '--signing_helper':
1456        signing_helper_path = split_args[index + 1]
1457        split_args[index + 1] = ResolveBinaryPath(signing_helper_path)
1458        break
1459  elif isinstance(split_args, str):
1460    split_args = ResolveBinaryPath(split_args)
1461
1462  return split_args
1463
1464
1465def GetAvbPartitionArg(partition, image, info_dict=None):
1466  """Returns the VBMeta arguments for one partition.
1467
1468  It sets up the VBMeta argument by including the partition descriptor from the
1469  given 'image', or by configuring the partition as a chained partition.
1470
1471  Args:
1472    partition: The name of the partition (e.g. "system").
1473    image: The path to the partition image.
1474    info_dict: A dict returned by common.LoadInfoDict(). Will use
1475        OPTIONS.info_dict if None has been given.
1476
1477  Returns:
1478    A list of VBMeta arguments for one partition.
1479  """
1480  if info_dict is None:
1481    info_dict = OPTIONS.info_dict
1482
1483  # Check if chain partition is used.
1484  key_path = info_dict.get("avb_" + partition + "_key_path")
1485  if not key_path:
1486    return [AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image]
1487
1488  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1489  # into vbmeta.img. The recovery image will be configured on an independent
1490  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1491  # See details at
1492  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1493  if info_dict.get("ab_update") != "true" and partition == "recovery":
1494    return []
1495
1496  # Otherwise chain the partition into vbmeta.
1497  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1498  return [AVB_ARG_NAME_CHAIN_PARTITION, chained_partition_arg]
1499
1500
1501def GetAvbPartitionsArg(partitions,
1502                        resolve_rollback_index_location_conflict=False,
1503                        info_dict=None):
1504  """Returns the VBMeta arguments for all AVB partitions.
1505
1506  It sets up the VBMeta argument by calling GetAvbPartitionArg of all
1507  partitions.
1508
1509  Args:
1510    partitions: A dict of all AVB partitions.
1511    resolve_rollback_index_location_conflict: If true, resolve conflicting avb
1512        rollback index locations by assigning the smallest unused value.
1513    info_dict: A dict returned by common.LoadInfoDict().
1514
1515  Returns:
1516    A list of VBMeta arguments for all partitions.
1517  """
1518  # An AVB partition will be linked into a vbmeta partition by either
1519  # AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG or AVB_ARG_NAME_CHAIN_PARTITION, there
1520  # should be no other cases.
1521  valid_args = {
1522      AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: [],
1523      AVB_ARG_NAME_CHAIN_PARTITION: []
1524  }
1525
1526  for partition, path in sorted(partitions.items()):
1527    avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict)
1528    if not avb_partition_arg:
1529      continue
1530    arg_name, arg_value = avb_partition_arg
1531    assert arg_name in valid_args
1532    valid_args[arg_name].append(arg_value)
1533
1534  # Copy the arguments for non-chained AVB partitions directly without
1535  # intervention.
1536  avb_args = []
1537  for image in valid_args[AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG]:
1538    avb_args.extend([AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image])
1539
1540  # Handle chained AVB partitions. The rollback index location might be
1541  # adjusted if two partitions use the same value. This may happen when mixing
1542  # a shared system image with other vendor images.
1543  used_index_loc = set()
1544  for chained_partition_arg in valid_args[AVB_ARG_NAME_CHAIN_PARTITION]:
1545    if resolve_rollback_index_location_conflict:
1546      while chained_partition_arg.rollback_index_location in used_index_loc:
1547        chained_partition_arg.rollback_index_location += 1
1548
1549    used_index_loc.add(chained_partition_arg.rollback_index_location)
1550    avb_args.extend([AVB_ARG_NAME_CHAIN_PARTITION,
1551                     chained_partition_arg.to_string()])
1552
1553  return avb_args
1554
1555
1556def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1557  """Constructs and returns the arg to build or verify a chained partition.
1558
1559  Args:
1560    partition: The partition name.
1561    info_dict: The info dict to look up the key info and rollback index
1562        location.
1563    key: The key to be used for building or verifying the partition. Defaults to
1564        the key listed in info_dict.
1565
1566  Returns:
1567    An AvbChainedPartitionArg object with rollback_index_location and
1568    pubkey_path that can be used to build or verify vbmeta image.
1569  """
1570  if key is None:
1571    key = info_dict["avb_" + partition + "_key_path"]
1572  key = ResolveAVBSigningPathArgs(key)
1573  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1574  rollback_index_location = info_dict[
1575      "avb_" + partition + "_rollback_index_location"]
1576  return AvbChainedPartitionArg(
1577      partition=partition,
1578      rollback_index_location=int(rollback_index_location),
1579      pubkey_path=pubkey_path)
1580
1581
1582def BuildVBMeta(image_path, partitions, name, needed_partitions,
1583                resolve_rollback_index_location_conflict=False):
1584  """Creates a VBMeta image.
1585
1586  It generates the requested VBMeta image. The requested image could be for
1587  top-level or chained VBMeta image, which is determined based on the name.
1588
1589  Args:
1590    image_path: The output path for the new VBMeta image.
1591    partitions: A dict that's keyed by partition names with image paths as
1592        values. Only valid partition names are accepted, as partitions listed
1593        in common.AVB_PARTITIONS and custom partitions listed in
1594        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1595    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1596    needed_partitions: Partitions whose descriptors should be included into the
1597        generated VBMeta image.
1598    resolve_rollback_index_location_conflict: If true, resolve conflicting avb
1599        rollback index locations by assigning the smallest unused value.
1600
1601  Raises:
1602    AssertionError: On invalid input args.
1603  """
1604  avbtool = OPTIONS.info_dict["avb_avbtool"]
1605  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1606  AppendAVBSigningArgs(cmd, name)
1607
1608  custom_partitions = OPTIONS.info_dict.get(
1609      "avb_custom_images_partition_list", "").strip().split()
1610  custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get(
1611      "avb_custom_vbmeta_images_partition_list", "").strip().split()]
1612
1613  avb_partitions = {}
1614  for partition, path in sorted(partitions.items()):
1615    if partition not in needed_partitions:
1616      continue
1617    assert (partition in AVB_PARTITIONS or
1618            partition in AVB_VBMETA_PARTITIONS or
1619            partition in custom_avb_partitions or
1620            partition in custom_partitions), \
1621        'Unknown partition: {}'.format(partition)
1622    assert os.path.exists(path), \
1623        'Failed to find {} for {}'.format(path, partition)
1624    avb_partitions[partition] = path
1625  cmd.extend(GetAvbPartitionsArg(avb_partitions,
1626                                 resolve_rollback_index_location_conflict))
1627
1628  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1629  if args and args.strip():
1630    split_args = shlex.split(args)
1631    for index, arg in enumerate(split_args[:-1]):
1632      # Check that the image file exists. Some images might be defined
1633      # as a path relative to source tree, which may not be available at the
1634      # same location when running this script (we have the input target_files
1635      # zip only). For such cases, we additionally scan other locations (e.g.
1636      # IMAGES/, RADIO/, etc) before bailing out.
1637      if arg == AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG:
1638        chained_image = split_args[index + 1]
1639        if os.path.exists(chained_image):
1640          continue
1641        found = False
1642        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1643          alt_path = os.path.join(
1644              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1645          if os.path.exists(alt_path):
1646            split_args[index + 1] = alt_path
1647            found = True
1648            break
1649        assert found, 'Failed to find {}'.format(chained_image)
1650
1651    split_args = ResolveAVBSigningPathArgs(split_args)
1652    cmd.extend(split_args)
1653
1654  RunAndCheckOutput(cmd)
1655
1656
1657def _MakeRamdisk(sourcedir, fs_config_file=None,
1658                 dev_node_file=None,
1659                 ramdisk_format=RamdiskFormat.GZ):
1660  ramdisk_img = tempfile.NamedTemporaryFile()
1661
1662  cmd = ["mkbootfs"]
1663
1664  if fs_config_file and os.access(fs_config_file, os.F_OK):
1665    cmd.extend(["-f", fs_config_file])
1666
1667  if dev_node_file and os.access(dev_node_file, os.F_OK):
1668    cmd.extend(["-n", dev_node_file])
1669
1670  cmd.append(os.path.join(sourcedir, "RAMDISK"))
1671
1672  p1 = Run(cmd, stdout=subprocess.PIPE)
1673  if ramdisk_format == RamdiskFormat.LZ4:
1674    p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
1675             stdout=ramdisk_img.file.fileno())
1676  elif ramdisk_format == RamdiskFormat.GZ:
1677    p2 = Run(["gzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1678  else:
1679    raise ValueError("Only support lz4 or gzip ramdisk format.")
1680
1681  p2.wait()
1682  p1.wait()
1683  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1684  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1685
1686  return ramdisk_img
1687
1688
1689def _BuildBootableImage(image_name, sourcedir, fs_config_file,
1690                        dev_node_file=None, info_dict=None,
1691                        has_ramdisk=False, two_step_image=False):
1692  """Build a bootable image from the specified sourcedir.
1693
1694  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1695  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1696  we are building a two-step special image (i.e. building a recovery image to
1697  be loaded into /boot in two-step OTAs).
1698
1699  Return the image data, or None if sourcedir does not appear to contains files
1700  for building the requested image.
1701  """
1702
1703  if info_dict is None:
1704    info_dict = OPTIONS.info_dict
1705
1706  # "boot" or "recovery", without extension.
1707  partition_name = os.path.basename(sourcedir).lower()
1708
1709  kernel = None
1710  if partition_name == "recovery":
1711    if info_dict.get("exclude_kernel_from_recovery_image") == "true":
1712      logger.info("Excluded kernel binary from recovery image.")
1713    else:
1714      kernel = "kernel"
1715  elif partition_name == "init_boot":
1716    pass
1717  else:
1718    kernel = image_name.replace("boot", "kernel")
1719    kernel = kernel.replace(".img", "")
1720  if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1721    return None
1722
1723  kernel_path = os.path.join(sourcedir, kernel) if kernel else None
1724
1725  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1726    return None
1727
1728  img = tempfile.NamedTemporaryFile()
1729
1730  if has_ramdisk:
1731    ramdisk_format = GetRamdiskFormat(info_dict)
1732    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, dev_node_file,
1733                               ramdisk_format=ramdisk_format)
1734
1735  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1736  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1737
1738  cmd = [mkbootimg]
1739  if kernel_path is not None:
1740    cmd.extend(["--kernel", kernel_path])
1741
1742  fn = os.path.join(sourcedir, "second")
1743  if os.access(fn, os.F_OK):
1744    cmd.append("--second")
1745    cmd.append(fn)
1746
1747  fn = os.path.join(sourcedir, "dtb")
1748  if os.access(fn, os.F_OK):
1749    cmd.append("--dtb")
1750    cmd.append(fn)
1751
1752  fn = os.path.join(sourcedir, "cmdline")
1753  if os.access(fn, os.F_OK):
1754    cmd.append("--cmdline")
1755    cmd.append(open(fn).read().rstrip("\n"))
1756
1757  fn = os.path.join(sourcedir, "base")
1758  if os.access(fn, os.F_OK):
1759    cmd.append("--base")
1760    cmd.append(open(fn).read().rstrip("\n"))
1761
1762  fn = os.path.join(sourcedir, "pagesize")
1763  if os.access(fn, os.F_OK):
1764    cmd.append("--pagesize")
1765    cmd.append(open(fn).read().rstrip("\n"))
1766
1767  if partition_name == "recovery":
1768    args = info_dict.get("recovery_mkbootimg_args")
1769    if not args:
1770      # Fall back to "mkbootimg_args" for recovery image
1771      # in case "recovery_mkbootimg_args" is not set.
1772      args = info_dict.get("mkbootimg_args")
1773  elif partition_name == "init_boot":
1774    args = info_dict.get("mkbootimg_init_args")
1775  else:
1776    args = info_dict.get("mkbootimg_args")
1777  if args and args.strip():
1778    cmd.extend(shlex.split(args))
1779
1780  args = info_dict.get("mkbootimg_version_args")
1781  if args and args.strip():
1782    cmd.extend(shlex.split(args))
1783
1784  if has_ramdisk:
1785    cmd.extend(["--ramdisk", ramdisk_img.name])
1786
1787  img_unsigned = None
1788  if info_dict.get("vboot"):
1789    img_unsigned = tempfile.NamedTemporaryFile()
1790    cmd.extend(["--output", img_unsigned.name])
1791  else:
1792    cmd.extend(["--output", img.name])
1793
1794  if partition_name == "recovery":
1795    if info_dict.get("include_recovery_dtbo") == "true":
1796      fn = os.path.join(sourcedir, "recovery_dtbo")
1797      cmd.extend(["--recovery_dtbo", fn])
1798    if info_dict.get("include_recovery_acpio") == "true":
1799      fn = os.path.join(sourcedir, "recovery_acpio")
1800      cmd.extend(["--recovery_acpio", fn])
1801
1802  RunAndCheckOutput(cmd)
1803
1804  # Sign the image if vboot is non-empty.
1805  if info_dict.get("vboot"):
1806    path = "/" + partition_name
1807    img_keyblock = tempfile.NamedTemporaryFile()
1808    # We have switched from the prebuilt futility binary to using the tool
1809    # (futility-host) built from the source. Override the setting in the old
1810    # TF.zip.
1811    futility = info_dict["futility"]
1812    if futility.startswith("prebuilts/"):
1813      futility = "futility-host"
1814    cmd = [info_dict["vboot_signer_cmd"], futility,
1815           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
1816           info_dict["vboot_key"] + ".vbprivk",
1817           info_dict["vboot_subkey"] + ".vbprivk",
1818           img_keyblock.name,
1819           img.name]
1820    RunAndCheckOutput(cmd)
1821
1822    # Clean up the temp files.
1823    img_unsigned.close()
1824    img_keyblock.close()
1825
1826  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1827  if info_dict.get("avb_enable") == "true":
1828    avbtool = info_dict["avb_avbtool"]
1829    if partition_name == "recovery":
1830      part_size = info_dict["recovery_size"]
1831    else:
1832      part_size = info_dict[image_name.replace(".img", "_size")]
1833    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1834           "--partition_size", str(part_size), "--partition_name",
1835           partition_name]
1836    salt = None
1837    if kernel_path is not None:
1838      with open(kernel_path, "rb") as fp:
1839        salt = sha256(fp.read()).hexdigest()
1840    AppendAVBSigningArgs(cmd, partition_name, salt)
1841    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1842    if args and args.strip():
1843      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1844      cmd.extend(split_args)
1845    RunAndCheckOutput(cmd)
1846
1847  img.seek(os.SEEK_SET, 0)
1848  data = img.read()
1849
1850  if has_ramdisk:
1851    ramdisk_img.close()
1852  img.close()
1853
1854  return data
1855
1856
1857def _SignBootableImage(image_path, prebuilt_name, partition_name,
1858                       info_dict=None):
1859  """Performs AVB signing for a prebuilt boot.img.
1860
1861  Args:
1862    image_path: The full path of the image, e.g., /path/to/boot.img.
1863    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
1864        boot-5.10.img, recovery.img or init_boot.img.
1865    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1866    info_dict: The information dict read from misc_info.txt.
1867  """
1868  if info_dict is None:
1869    info_dict = OPTIONS.info_dict
1870
1871  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1872  if info_dict.get("avb_enable") == "true":
1873    avbtool = info_dict["avb_avbtool"]
1874    if partition_name == "recovery":
1875      part_size = info_dict["recovery_size"]
1876    else:
1877      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
1878
1879    cmd = [avbtool, "add_hash_footer", "--image", image_path,
1880           "--partition_size", str(part_size), "--partition_name",
1881           partition_name]
1882    # Use sha256 of the kernel as salt for reproducible builds
1883    with tempfile.TemporaryDirectory() as tmpdir:
1884      RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir])
1885      for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]:
1886        path = os.path.join(tmpdir, filename)
1887        if os.path.exists(path) and os.path.getsize(path):
1888          print("Using {} as salt for avb footer of {}".format(
1889              filename, partition_name))
1890          with open(path, "rb") as fp:
1891            salt = sha256(fp.read()).hexdigest()
1892            break
1893    AppendAVBSigningArgs(cmd, partition_name, salt)
1894    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1895    if args and args.strip():
1896      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1897      cmd.extend(split_args)
1898    RunAndCheckOutput(cmd)
1899
1900
1901def HasRamdisk(partition_name, info_dict=None):
1902  """Returns true/false to see if a bootable image should have a ramdisk.
1903
1904  Args:
1905    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1906    info_dict: The information dict read from misc_info.txt.
1907  """
1908  if info_dict is None:
1909    info_dict = OPTIONS.info_dict
1910
1911  if partition_name != "boot":
1912    return True  # init_boot.img or recovery.img has a ramdisk.
1913
1914  if info_dict.get("recovery_as_boot") == "true":
1915    return True  # the recovery-as-boot boot.img has a RECOVERY ramdisk.
1916
1917  if info_dict.get("gki_boot_image_without_ramdisk") == "true":
1918    return False  # A GKI boot.img has no ramdisk since Android-13.
1919
1920  if info_dict.get("init_boot") == "true":
1921    # The ramdisk is moved to the init_boot.img, so there is NO
1922    # ramdisk in the boot.img or boot-<kernel version>.img.
1923    return False
1924
1925  return True
1926
1927
1928def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1929                     info_dict=None, two_step_image=False,
1930                     dev_nodes=False):
1931  """Return a File object with the desired bootable image.
1932
1933  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1934  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1935  the source files in 'unpack_dir'/'tree_subdir'."""
1936
1937  if info_dict is None:
1938    info_dict = OPTIONS.info_dict
1939
1940  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1941  if os.path.exists(prebuilt_path):
1942    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1943    return File.FromLocalFile(name, prebuilt_path)
1944
1945  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1946  if os.path.exists(prebuilt_path):
1947    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1948    return File.FromLocalFile(name, prebuilt_path)
1949
1950  partition_name = tree_subdir.lower()
1951  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
1952  if os.path.exists(prebuilt_path):
1953    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
1954    signed_img = MakeTempFile()
1955    shutil.copy(prebuilt_path, signed_img)
1956    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
1957    return File.FromLocalFile(name, signed_img)
1958
1959  logger.info("building image from target_files %s...", tree_subdir)
1960
1961  has_ramdisk = HasRamdisk(partition_name, info_dict)
1962
1963  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1964  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1965                             os.path.join(unpack_dir, fs_config),
1966                             os.path.join(unpack_dir, 'META/ramdisk_node_list')
1967                             if dev_nodes else None,
1968                             info_dict, has_ramdisk, two_step_image)
1969  if data:
1970    return File(name, data)
1971  return None
1972
1973
1974def _BuildVendorBootImage(sourcedir, fs_config_file, partition_name, info_dict=None):
1975  """Build a vendor boot image from the specified sourcedir.
1976
1977  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1978  turn them into a vendor boot image.
1979
1980  Return the image data, or None if sourcedir does not appear to contains files
1981  for building the requested image.
1982  """
1983
1984  if info_dict is None:
1985    info_dict = OPTIONS.info_dict
1986
1987  img = tempfile.NamedTemporaryFile()
1988
1989  ramdisk_format = GetRamdiskFormat(info_dict)
1990  ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file=fs_config_file, ramdisk_format=ramdisk_format)
1991
1992  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1993  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1994
1995  cmd = [mkbootimg]
1996
1997  fn = os.path.join(sourcedir, "dtb")
1998  if os.access(fn, os.F_OK):
1999    has_vendor_kernel_boot = (info_dict.get(
2000        "vendor_kernel_boot", "").lower() == "true")
2001
2002    # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot.
2003    # Otherwise pack dtb into vendor_boot.
2004    if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot":
2005      cmd.append("--dtb")
2006      cmd.append(fn)
2007
2008  fn = os.path.join(sourcedir, "vendor_cmdline")
2009  if os.access(fn, os.F_OK):
2010    cmd.append("--vendor_cmdline")
2011    cmd.append(open(fn).read().rstrip("\n"))
2012
2013  fn = os.path.join(sourcedir, "base")
2014  if os.access(fn, os.F_OK):
2015    cmd.append("--base")
2016    cmd.append(open(fn).read().rstrip("\n"))
2017
2018  fn = os.path.join(sourcedir, "pagesize")
2019  if os.access(fn, os.F_OK):
2020    cmd.append("--pagesize")
2021    cmd.append(open(fn).read().rstrip("\n"))
2022
2023  args = info_dict.get("mkbootimg_args")
2024  if args and args.strip():
2025    cmd.extend(shlex.split(args))
2026
2027  args = info_dict.get("mkbootimg_version_args")
2028  if args and args.strip():
2029    cmd.extend(shlex.split(args))
2030
2031  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
2032  cmd.extend(["--vendor_boot", img.name])
2033
2034  fn = os.path.join(sourcedir, "vendor_bootconfig")
2035  if os.access(fn, os.F_OK):
2036    cmd.append("--vendor_bootconfig")
2037    cmd.append(fn)
2038
2039  ramdisk_fragment_imgs = []
2040  fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
2041  if os.access(fn, os.F_OK):
2042    ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
2043    for ramdisk_fragment in ramdisk_fragments:
2044      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2045                        ramdisk_fragment, "mkbootimg_args")
2046      cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
2047      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2048                        ramdisk_fragment, "prebuilt_ramdisk")
2049      # Use prebuilt image if found, else create ramdisk from supplied files.
2050      if os.access(fn, os.F_OK):
2051        ramdisk_fragment_pathname = fn
2052      else:
2053        ramdisk_fragment_root = os.path.join(
2054            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
2055        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
2056                                            ramdisk_format=ramdisk_format)
2057        ramdisk_fragment_imgs.append(ramdisk_fragment_img)
2058        ramdisk_fragment_pathname = ramdisk_fragment_img.name
2059      cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
2060
2061  RunAndCheckOutput(cmd)
2062
2063  # AVB: if enabled, calculate and add hash.
2064  if info_dict.get("avb_enable") == "true":
2065    avbtool = info_dict["avb_avbtool"]
2066    part_size = info_dict[f'{partition_name}_size']
2067    cmd = [avbtool, "add_hash_footer", "--image", img.name,
2068           "--partition_size", str(part_size), "--partition_name", partition_name]
2069    AppendAVBSigningArgs(cmd, partition_name)
2070    args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args')
2071    if args and args.strip():
2072      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
2073      cmd.extend(split_args)
2074    RunAndCheckOutput(cmd)
2075
2076  img.seek(os.SEEK_SET, 0)
2077  data = img.read()
2078
2079  for f in ramdisk_fragment_imgs:
2080    f.close()
2081  ramdisk_img.close()
2082  img.close()
2083
2084  return data
2085
2086
2087def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2088                       info_dict=None):
2089  """Return a File object with the desired vendor boot image.
2090
2091  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2092  the source files in 'unpack_dir'/'tree_subdir'."""
2093
2094  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2095  if os.path.exists(prebuilt_path):
2096    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2097    return File.FromLocalFile(name, prebuilt_path)
2098
2099  logger.info("building image from target_files %s...", tree_subdir)
2100
2101  if info_dict is None:
2102    info_dict = OPTIONS.info_dict
2103
2104  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
2105  data = _BuildVendorBootImage(
2106      os.path.join(unpack_dir, tree_subdir), os.path.join(unpack_dir, fs_config), "vendor_boot", info_dict)
2107  if data:
2108    return File(name, data)
2109  return None
2110
2111
2112def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2113                             info_dict=None):
2114  """Return a File object with the desired vendor kernel boot image.
2115
2116  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2117  the source files in 'unpack_dir'/'tree_subdir'."""
2118
2119  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2120  if os.path.exists(prebuilt_path):
2121    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2122    return File.FromLocalFile(name, prebuilt_path)
2123
2124  logger.info("building image from target_files %s...", tree_subdir)
2125
2126  if info_dict is None:
2127    info_dict = OPTIONS.info_dict
2128
2129  data = _BuildVendorBootImage(
2130      os.path.join(unpack_dir, tree_subdir), None, "vendor_kernel_boot", info_dict)
2131  if data:
2132    return File(name, data)
2133  return None
2134
2135
2136def Gunzip(in_filename, out_filename):
2137  """Gunzips the given gzip compressed file to a given output file."""
2138  with gzip.open(in_filename, "rb") as in_file, \
2139          open(out_filename, "wb") as out_file:
2140    shutil.copyfileobj(in_file, out_file)
2141
2142
2143def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str):
2144  # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838
2145  # higher bits of |external_attr| are unix file permission and types
2146  unix_filetype = info.external_attr >> 16
2147  file_perm = unix_filetype & 0o777
2148
2149  def CheckMask(a, mask):
2150    return (a & mask) == mask
2151
2152  def IsSymlink(a):
2153    return CheckMask(a, stat.S_IFLNK)
2154
2155  def IsDir(a):
2156    return CheckMask(a, stat.S_IFDIR)
2157  # python3.11 zipfile implementation doesn't handle symlink correctly
2158  if not IsSymlink(unix_filetype):
2159    target = input_zip.extract(info, dirname)
2160    # We want to ensure that the file is at least read/writable by owner and readable by all users
2161    if IsDir(unix_filetype):
2162      os.chmod(target, file_perm | 0o755)
2163    else:
2164      os.chmod(target, file_perm | 0o644)
2165    return target
2166  if dirname is None:
2167    dirname = os.getcwd()
2168  target = os.path.join(dirname, info.filename)
2169  os.makedirs(os.path.dirname(target), exist_ok=True)
2170  if os.path.exists(target):
2171    os.unlink(target)
2172  os.symlink(input_zip.read(info).decode(), target)
2173  return target
2174
2175
2176def UnzipToDir(filename, dirname, patterns=None):
2177  """Unzips the archive to the given directory.
2178
2179  Args:
2180    filename: The name of the zip file to unzip.
2181    dirname: Where the unziped files will land.
2182    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2183        archvie. Non-matching patterns will be filtered out. If there's no match
2184        after the filtering, no file will be unzipped.
2185  """
2186  with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip:
2187    # Filter out non-matching patterns. unzip will complain otherwise.
2188    entries = input_zip.infolist()
2189    # b/283033491
2190    # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header
2191    # In zip64 mode, central directory record's header_offset field might be
2192    # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra
2193    # fields will contain an 8 byte little endian integer at offset 20
2194    # to indicate the actual local header offset.
2195    # As of python3.11, python does not handle zip64 central directories
2196    # correctly, so we will manually do the parsing here.
2197
2198    # ZIP64 central directory extra field has two required fields:
2199    # 2 bytes header ID and 2 bytes size field. Thes two require fields have
2200    # a total size of 4 bytes. Then it has three other 8 bytes field, followed
2201    # by a 4 byte disk number field. The last disk number field is not required
2202    # to be present, but if it is present, the total size of extra field will be
2203    # divisible by 8(because 2+2+4+8*n is always going to be multiple of 8)
2204    # Most extra fields are optional, but when they appear, their must appear
2205    # in the order defined by zip64 spec. Since file header offset is the 2nd
2206    # to last field in zip64 spec, it will only be at last 8 bytes or last 12-4
2207    # bytes, depending on whether disk number is present.
2208    for entry in entries:
2209      if entry.header_offset == 0xFFFFFFFF:
2210        if len(entry.extra) % 8 == 0:
2211          entry.header_offset = int.from_bytes(entry.extra[-12:-4], "little")
2212        else:
2213          entry.header_offset = int.from_bytes(entry.extra[-8:], "little")
2214    if patterns is not None:
2215      filtered = [info for info in entries if any(
2216          [fnmatch.fnmatch(info.filename, p) for p in patterns])]
2217
2218      # There isn't any matching files. Don't unzip anything.
2219      if not filtered:
2220        return
2221      for info in filtered:
2222        UnzipSingleFile(input_zip, info, dirname)
2223    else:
2224      for info in entries:
2225        UnzipSingleFile(input_zip, info, dirname)
2226
2227
2228def UnzipTemp(filename, patterns=None):
2229  """Unzips the given archive into a temporary directory and returns the name.
2230
2231  Args:
2232    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
2233    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
2234
2235    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2236    archvie.
2237
2238  Returns:
2239    The name of the temporary directory.
2240  """
2241
2242  tmp = MakeTempDir(prefix="targetfiles-")
2243  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
2244  if m:
2245    UnzipToDir(m.group(1), tmp, patterns)
2246    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns)
2247    filename = m.group(1)
2248  else:
2249    UnzipToDir(filename, tmp, patterns)
2250
2251  return tmp
2252
2253
2254def GetUserImage(which, tmpdir, input_zip,
2255                 info_dict=None,
2256                 allow_shared_blocks=None,
2257                 reset_file_map=False):
2258  """Returns an Image object suitable for passing to BlockImageDiff.
2259
2260  This function loads the specified image from the given path. If the specified
2261  image is sparse, it also performs additional processing for OTA purpose. For
2262  example, it always adds block 0 to clobbered blocks list. It also detects
2263  files that cannot be reconstructed from the block list, for whom we should
2264  avoid applying imgdiff.
2265
2266  Args:
2267    which: The partition name.
2268    tmpdir: The directory that contains the prebuilt image and block map file.
2269    input_zip: The target-files ZIP archive.
2270    info_dict: The dict to be looked up for relevant info.
2271    allow_shared_blocks: If image is sparse, whether having shared blocks is
2272        allowed. If none, it is looked up from info_dict.
2273    reset_file_map: If true and image is sparse, reset file map before returning
2274        the image.
2275  Returns:
2276    A Image object. If it is a sparse image and reset_file_map is False, the
2277    image will have file_map info loaded.
2278  """
2279  if info_dict is None:
2280    info_dict = LoadInfoDict(input_zip)
2281
2282  is_sparse = IsSparseImage(os.path.join(tmpdir, "IMAGES", which + ".img"))
2283
2284  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
2285  # shared blocks (i.e. some blocks will show up in multiple files' block
2286  # list). We can only allocate such shared blocks to the first "owner", and
2287  # disable imgdiff for all later occurrences.
2288  if allow_shared_blocks is None:
2289    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
2290
2291  if is_sparse:
2292    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks)
2293    if reset_file_map:
2294      img.ResetFileMap()
2295    return img
2296  return GetNonSparseImage(which, tmpdir)
2297
2298
2299def GetNonSparseImage(which, tmpdir):
2300  """Returns a Image object suitable for passing to BlockImageDiff.
2301
2302  This function loads the specified non-sparse image from the given path.
2303
2304  Args:
2305    which: The partition name.
2306    tmpdir: The directory that contains the prebuilt image and block map file.
2307  Returns:
2308    A Image object.
2309  """
2310  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2311  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2312
2313  # The image and map files must have been created prior to calling
2314  # ota_from_target_files.py (since LMP).
2315  assert os.path.exists(path) and os.path.exists(mappath)
2316
2317  return images.FileImage(path)
2318
2319
2320def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
2321  """Returns a SparseImage object suitable for passing to BlockImageDiff.
2322
2323  This function loads the specified sparse image from the given path, and
2324  performs additional processing for OTA purpose. For example, it always adds
2325  block 0 to clobbered blocks list. It also detects files that cannot be
2326  reconstructed from the block list, for whom we should avoid applying imgdiff.
2327
2328  Args:
2329    which: The partition name, e.g. "system", "vendor".
2330    tmpdir: The directory that contains the prebuilt image and block map file.
2331    input_zip: The target-files ZIP archive.
2332    allow_shared_blocks: Whether having shared blocks is allowed.
2333  Returns:
2334    A SparseImage object, with file_map info loaded.
2335  """
2336  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2337  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2338
2339  # The image and map files must have been created prior to calling
2340  # ota_from_target_files.py (since LMP).
2341  assert os.path.exists(path) and os.path.exists(mappath)
2342
2343  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
2344  # it to clobbered_blocks so that it will be written to the target
2345  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
2346  clobbered_blocks = "0"
2347
2348  image = sparse_img.SparseImage(
2349      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks)
2350
2351  # block.map may contain less blocks, because mke2fs may skip allocating blocks
2352  # if they contain all zeros. We can't reconstruct such a file from its block
2353  # list. Tag such entries accordingly. (Bug: 65213616)
2354  for entry in image.file_map:
2355    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
2356    if not entry.startswith('/'):
2357      continue
2358
2359    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
2360    # filename listed in system.map may contain an additional leading slash
2361    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
2362    # results.
2363    # And handle another special case, where files not under /system
2364    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
2365    arcname = entry.lstrip('/')
2366    if which == 'system' and not arcname.startswith('system'):
2367      arcname = 'ROOT/' + arcname
2368    else:
2369      arcname = arcname.replace(which, which.upper(), 1)
2370
2371    assert arcname in input_zip.namelist(), \
2372        "Failed to find the ZIP entry for {}".format(entry)
2373
2374    info = input_zip.getinfo(arcname)
2375    ranges = image.file_map[entry]
2376
2377    # If a RangeSet has been tagged as using shared blocks while loading the
2378    # image, check the original block list to determine its completeness. Note
2379    # that the 'incomplete' flag would be tagged to the original RangeSet only.
2380    if ranges.extra.get('uses_shared_blocks'):
2381      ranges = ranges.extra['uses_shared_blocks']
2382
2383    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
2384      ranges.extra['incomplete'] = True
2385
2386  return image
2387
2388
2389def GetKeyPasswords(keylist):
2390  """Given a list of keys, prompt the user to enter passwords for
2391  those which require them.  Return a {key: password} dict.  password
2392  will be None if the key has no password."""
2393
2394  no_passwords = []
2395  need_passwords = []
2396  key_passwords = {}
2397  devnull = open("/dev/null", "w+b")
2398
2399  # sorted() can't compare strings to None, so convert Nones to strings
2400  for k in sorted(keylist, key=lambda x: x if x is not None else ""):
2401    # We don't need a password for things that aren't really keys.
2402    if k in SPECIAL_CERT_STRINGS or k is None:
2403      no_passwords.append(k)
2404      continue
2405
2406    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2407             "-inform", "DER", "-nocrypt"],
2408            stdin=devnull.fileno(),
2409            stdout=devnull.fileno(),
2410            stderr=subprocess.STDOUT)
2411    p.communicate()
2412    if p.returncode == 0:
2413      # Definitely an unencrypted key.
2414      no_passwords.append(k)
2415    else:
2416      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2417               "-inform", "DER", "-passin", "pass:"],
2418              stdin=devnull.fileno(),
2419              stdout=devnull.fileno(),
2420              stderr=subprocess.PIPE)
2421      _, stderr = p.communicate()
2422      if p.returncode == 0:
2423        # Encrypted key with empty string as password.
2424        key_passwords[k] = ''
2425      elif stderr.startswith('Error decrypting key'):
2426        # Definitely encrypted key.
2427        # It would have said "Error reading key" if it didn't parse correctly.
2428        need_passwords.append(k)
2429      else:
2430        # Potentially, a type of key that openssl doesn't understand.
2431        # We'll let the routines in signapk.jar handle it.
2432        no_passwords.append(k)
2433  devnull.close()
2434
2435  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
2436  key_passwords.update(dict.fromkeys(no_passwords))
2437  return key_passwords
2438
2439
2440def GetMinSdkVersion(apk_name):
2441  """Gets the minSdkVersion declared in the APK.
2442
2443  It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given
2444  APK file. This can be both a decimal number (API Level) or a codename.
2445
2446  Args:
2447    apk_name: The APK filename.
2448
2449  Returns:
2450    The parsed SDK version string.
2451
2452  Raises:
2453    ExternalError: On failing to obtain the min SDK version.
2454  """
2455  proc = Run(
2456      [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE,
2457      stderr=subprocess.PIPE)
2458  stdoutdata, stderrdata = proc.communicate()
2459  if proc.returncode != 0:
2460    raise ExternalError(
2461        "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format(
2462            apk_name, proc.returncode, stdoutdata, stderrdata))
2463
2464  for line in stdoutdata.split("\n"):
2465    # Due to ag/24161708, looking for lines such as minSdkVersion:'23',minSdkVersion:'M'
2466    # or sdkVersion:'23', sdkVersion:'M'.
2467    m = re.match(r'(?:minSdkVersion|sdkVersion):\'([^\']*)\'', line)
2468    if m:
2469      return m.group(1)
2470  raise ExternalError("No minSdkVersion returned by aapt2 for apk: {}".format(apk_name))
2471
2472
2473def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
2474  """Returns the minSdkVersion declared in the APK as a number (API Level).
2475
2476  If minSdkVersion is set to a codename, it is translated to a number using the
2477  provided map.
2478
2479  Args:
2480    apk_name: The APK filename.
2481
2482  Returns:
2483    The parsed SDK version number.
2484
2485  Raises:
2486    ExternalError: On failing to get the min SDK version number.
2487  """
2488  version = GetMinSdkVersion(apk_name)
2489  try:
2490    return int(version)
2491  except ValueError:
2492    # Not a decimal number.
2493    #
2494    # It could be either a straight codename, e.g.
2495    #     UpsideDownCake
2496    #
2497    # Or a codename with API fingerprint SHA, e.g.
2498    #     UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e
2499    #
2500    # Extract the codename and try and map it to a version number.
2501    split = version.split(".")
2502    codename = split[0]
2503    if codename in codename_to_api_level_map:
2504      return codename_to_api_level_map[codename]
2505    raise ExternalError(
2506        "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format(
2507            codename, version, codename_to_api_level_map))
2508
2509
2510def SignFile(input_name, output_name, key, password, min_api_level=None,
2511             codename_to_api_level_map=None, whole_file=False,
2512             extra_signapk_args=None):
2513  """Sign the input_name zip/jar/apk, producing output_name.  Use the
2514  given key and password (the latter may be None if the key does not
2515  have a password.
2516
2517  If whole_file is true, use the "-w" option to SignApk to embed a
2518  signature that covers the whole file in the archive comment of the
2519  zip file.
2520
2521  min_api_level is the API Level (int) of the oldest platform this file may end
2522  up on. If not specified for an APK, the API Level is obtained by interpreting
2523  the minSdkVersion attribute of the APK's AndroidManifest.xml.
2524
2525  codename_to_api_level_map is needed to translate the codename which may be
2526  encountered as the APK's minSdkVersion.
2527
2528  Caller may optionally specify extra args to be passed to SignApk, which
2529  defaults to OPTIONS.extra_signapk_args if omitted.
2530  """
2531  if codename_to_api_level_map is None:
2532    codename_to_api_level_map = {}
2533  if extra_signapk_args is None:
2534    extra_signapk_args = OPTIONS.extra_signapk_args
2535
2536  java_library_path = os.path.join(
2537      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2538
2539  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2540         ["-Djava.library.path=" + java_library_path,
2541          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
2542         extra_signapk_args)
2543  if whole_file:
2544    cmd.append("-w")
2545
2546  min_sdk_version = min_api_level
2547  if min_sdk_version is None:
2548    if not whole_file:
2549      min_sdk_version = GetMinSdkVersionInt(
2550          input_name, codename_to_api_level_map)
2551  if min_sdk_version is not None:
2552    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
2553
2554  cmd.extend([key + OPTIONS.public_key_suffix,
2555              key + OPTIONS.private_key_suffix,
2556              input_name, output_name])
2557
2558  proc = Run(cmd, stdin=subprocess.PIPE)
2559  if password is not None:
2560    password += "\n"
2561  stdoutdata, _ = proc.communicate(password)
2562  if proc.returncode != 0:
2563    raise ExternalError(
2564        "Failed to run {}: return code {}:\n{}".format(cmd,
2565                                                       proc.returncode, stdoutdata))
2566
2567
2568def CheckSize(data, target, info_dict):
2569  """Checks the data string passed against the max size limit.
2570
2571  For non-AVB images, raise exception if the data is too big. Print a warning
2572  if the data is nearing the maximum size.
2573
2574  For AVB images, the actual image size should be identical to the limit.
2575
2576  Args:
2577    data: A string that contains all the data for the partition.
2578    target: The partition name. The ".img" suffix is optional.
2579    info_dict: The dict to be looked up for relevant info.
2580  """
2581  if target.endswith(".img"):
2582    target = target[:-4]
2583  mount_point = "/" + target
2584
2585  fs_type = None
2586  limit = None
2587  if info_dict["fstab"]:
2588    if mount_point == "/userdata":
2589      mount_point = "/data"
2590    p = info_dict["fstab"][mount_point]
2591    fs_type = p.fs_type
2592    device = p.device
2593    if "/" in device:
2594      device = device[device.rfind("/")+1:]
2595    limit = info_dict.get(device + "_size", 0)
2596    if isinstance(limit, str):
2597      limit = int(limit, 0)
2598  if not fs_type or not limit:
2599    return
2600
2601  size = len(data)
2602  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
2603  # path.
2604  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
2605    if size != limit:
2606      raise ExternalError(
2607          "Mismatching image size for %s: expected %d actual %d" % (
2608              target, limit, size))
2609  else:
2610    pct = float(size) * 100.0 / limit
2611    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
2612    if pct >= 99.0:
2613      raise ExternalError(msg)
2614
2615    if pct >= 95.0:
2616      logger.warning("\n  WARNING: %s\n", msg)
2617    else:
2618      logger.info("  %s", msg)
2619
2620
2621def ReadApkCerts(tf_zip):
2622  """Parses the APK certs info from a given target-files zip.
2623
2624  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
2625  tuple with the following elements: (1) a dictionary that maps packages to
2626  certs (based on the "certificate" and "private_key" attributes in the file;
2627  (2) a string representing the extension of compressed APKs in the target files
2628  (e.g ".gz", ".bro").
2629
2630  Args:
2631    tf_zip: The input target_files ZipFile (already open).
2632
2633  Returns:
2634    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
2635        the extension string of compressed APKs (e.g. ".gz"), or None if there's
2636        no compressed APKs.
2637  """
2638  certmap = {}
2639  compressed_extension = None
2640
2641  # META/apkcerts.txt contains the info for _all_ the packages known at build
2642  # time. Filter out the ones that are not installed.
2643  installed_files = set()
2644  for name in tf_zip.namelist():
2645    basename = os.path.basename(name)
2646    if basename:
2647      installed_files.add(basename)
2648
2649  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
2650    line = line.strip()
2651    if not line:
2652      continue
2653    m = re.match(
2654        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
2655        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2656        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2657        line)
2658    if not m:
2659      continue
2660
2661    matches = m.groupdict()
2662    cert = matches["CERT"]
2663    privkey = matches["PRIVKEY"]
2664    name = matches["NAME"]
2665    this_compressed_extension = matches["COMPRESSED"]
2666
2667    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2668    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2669    if cert in SPECIAL_CERT_STRINGS and not privkey:
2670      certmap[name] = cert
2671    elif (cert.endswith(OPTIONS.public_key_suffix) and
2672          privkey.endswith(OPTIONS.private_key_suffix) and
2673          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2674      certmap[name] = cert[:-public_key_suffix_len]
2675    else:
2676      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2677
2678    if not this_compressed_extension:
2679      continue
2680
2681    # Only count the installed files.
2682    filename = name + '.' + this_compressed_extension
2683    if filename not in installed_files:
2684      continue
2685
2686    # Make sure that all the values in the compression map have the same
2687    # extension. We don't support multiple compression methods in the same
2688    # system image.
2689    if compressed_extension:
2690      if this_compressed_extension != compressed_extension:
2691        raise ValueError(
2692            "Multiple compressed extensions: {} vs {}".format(
2693                compressed_extension, this_compressed_extension))
2694    else:
2695      compressed_extension = this_compressed_extension
2696
2697  return (certmap,
2698          ("." + compressed_extension) if compressed_extension else None)
2699
2700
2701COMMON_DOCSTRING = """
2702Global options
2703
2704  -p  (--path) <dir>
2705      Prepend <dir>/bin to the list of places to search for binaries run by this
2706      script, and expect to find jars in <dir>/framework.
2707
2708  -s  (--device_specific) <file>
2709      Path to the Python module containing device-specific releasetools code.
2710
2711  -x  (--extra) <key=value>
2712      Add a key/value pair to the 'extras' dict, which device-specific extension
2713      code may look at.
2714
2715  -v  (--verbose)
2716      Show command lines being executed.
2717
2718  -h  (--help)
2719      Display this usage message and exit.
2720
2721  --logfile <file>
2722      Put verbose logs to specified file (regardless of --verbose option.)
2723"""
2724
2725
2726def Usage(docstring):
2727  print(docstring.rstrip("\n"))
2728  print(COMMON_DOCSTRING)
2729
2730
2731def ParseOptions(argv,
2732                 docstring,
2733                 extra_opts="", extra_long_opts=(),
2734                 extra_option_handler: Iterable[OptionHandler] = None):
2735  """Parse the options in argv and return any arguments that aren't
2736  flags.  docstring is the calling module's docstring, to be displayed
2737  for errors and -h.  extra_opts and extra_long_opts are for flags
2738  defined by the caller, which are processed by passing them to
2739  extra_option_handler."""
2740  extra_long_opts = list(extra_long_opts)
2741  if not isinstance(extra_option_handler, Iterable):
2742    extra_option_handler = [extra_option_handler]
2743
2744  for handler in extra_option_handler:
2745    if isinstance(handler, OptionHandler):
2746      extra_long_opts.extend(handler.extra_long_opts)
2747
2748  try:
2749    opts, args = getopt.getopt(
2750        argv, "hvp:s:x:" + extra_opts,
2751        ["help", "verbose", "path=", "signapk_path=",
2752         "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
2753         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2754         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2755         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2756         "extra=", "logfile="] + list(extra_long_opts))
2757  except getopt.GetoptError as err:
2758    Usage(docstring)
2759    print("**", str(err), "**")
2760    sys.exit(2)
2761
2762  for o, a in opts:
2763    if o in ("-h", "--help"):
2764      Usage(docstring)
2765      sys.exit()
2766    elif o in ("-v", "--verbose"):
2767      OPTIONS.verbose = True
2768    elif o in ("-p", "--path"):
2769      OPTIONS.search_path = a
2770    elif o in ("--signapk_path",):
2771      OPTIONS.signapk_path = a
2772    elif o in ("--signapk_shared_library_path",):
2773      OPTIONS.signapk_shared_library_path = a
2774    elif o in ("--extra_signapk_args",):
2775      OPTIONS.extra_signapk_args = shlex.split(a)
2776    elif o in ("--aapt2_path",):
2777      OPTIONS.aapt2_path = a
2778    elif o in ("--java_path",):
2779      OPTIONS.java_path = a
2780    elif o in ("--java_args",):
2781      OPTIONS.java_args = shlex.split(a)
2782    elif o in ("--android_jar_path",):
2783      OPTIONS.android_jar_path = a
2784    elif o in ("--public_key_suffix",):
2785      OPTIONS.public_key_suffix = a
2786    elif o in ("--private_key_suffix",):
2787      OPTIONS.private_key_suffix = a
2788    elif o in ("--boot_signer_path",):
2789      raise ValueError(
2790          "--boot_signer_path is no longer supported, please switch to AVB")
2791    elif o in ("--boot_signer_args",):
2792      raise ValueError(
2793          "--boot_signer_args is no longer supported, please switch to AVB")
2794    elif o in ("--verity_signer_path",):
2795      raise ValueError(
2796          "--verity_signer_path is no longer supported, please switch to AVB")
2797    elif o in ("--verity_signer_args",):
2798      raise ValueError(
2799          "--verity_signer_args is no longer supported, please switch to AVB")
2800    elif o in ("-s", "--device_specific"):
2801      OPTIONS.device_specific = a
2802    elif o in ("-x", "--extra"):
2803      key, value = a.split("=", 1)
2804      OPTIONS.extras[key] = value
2805    elif o in ("--logfile",):
2806      OPTIONS.logfile = a
2807    else:
2808      if extra_option_handler is None:
2809        raise ValueError("unknown option \"%s\"" % (o,))
2810      success = False
2811      for handler in extra_option_handler:
2812        if isinstance(handler, OptionHandler):
2813          if handler.handler(o, a):
2814            success = True
2815            break
2816        elif handler(o, a):
2817          success = True
2818      if not success:
2819        raise ValueError("unknown option \"%s\"" % (o,))
2820
2821
2822  if OPTIONS.search_path:
2823    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2824                          os.pathsep + os.environ["PATH"])
2825
2826  return args
2827
2828
2829def MakeTempFile(prefix='tmp', suffix=''):
2830  """Make a temp file and add it to the list of things to be deleted
2831  when Cleanup() is called.  Return the filename."""
2832  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2833  os.close(fd)
2834  OPTIONS.tempfiles.append(fn)
2835  return fn
2836
2837
2838def MakeTempDir(prefix='tmp', suffix=''):
2839  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2840
2841  Returns:
2842    The absolute pathname of the new directory.
2843  """
2844  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2845  OPTIONS.tempfiles.append(dir_name)
2846  return dir_name
2847
2848
2849def Cleanup():
2850  for i in OPTIONS.tempfiles:
2851    if not os.path.exists(i):
2852      continue
2853    if os.path.isdir(i):
2854      shutil.rmtree(i, ignore_errors=True)
2855    else:
2856      os.remove(i)
2857  del OPTIONS.tempfiles[:]
2858
2859
2860class PasswordManager(object):
2861  def __init__(self):
2862    self.editor = os.getenv("EDITOR")
2863    self.pwfile = os.getenv("ANDROID_PW_FILE")
2864
2865  def GetPasswords(self, items):
2866    """Get passwords corresponding to each string in 'items',
2867    returning a dict.  (The dict may have keys in addition to the
2868    values in 'items'.)
2869
2870    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2871    user edit that file to add more needed passwords.  If no editor is
2872    available, or $ANDROID_PW_FILE isn't define, prompts the user
2873    interactively in the ordinary way.
2874    """
2875
2876    current = self.ReadFile()
2877
2878    first = True
2879    while True:
2880      missing = []
2881      for i in items:
2882        if i not in current or not current[i]:
2883          missing.append(i)
2884      # Are all the passwords already in the file?
2885      if not missing:
2886        return current
2887
2888      for i in missing:
2889        current[i] = ""
2890
2891      if not first:
2892        print("key file %s still missing some passwords." % (self.pwfile,))
2893        if sys.version_info[0] >= 3:
2894          raw_input = input  # pylint: disable=redefined-builtin
2895        answer = raw_input("try to edit again? [y]> ").strip()
2896        if answer and answer[0] not in 'yY':
2897          raise RuntimeError("key passwords unavailable")
2898      first = False
2899
2900      current = self.UpdateAndReadFile(current)
2901
2902  def PromptResult(self, current):  # pylint: disable=no-self-use
2903    """Prompt the user to enter a value (password) for each key in
2904    'current' whose value is fales.  Returns a new dict with all the
2905    values.
2906    """
2907    result = {}
2908    for k, v in sorted(current.items()):
2909      if v:
2910        result[k] = v
2911      else:
2912        while True:
2913          result[k] = getpass.getpass(
2914              "Enter password for %s key> " % k).strip()
2915          if result[k]:
2916            break
2917    return result
2918
2919  def UpdateAndReadFile(self, current):
2920    if not self.editor or not self.pwfile:
2921      return self.PromptResult(current)
2922
2923    f = open(self.pwfile, "w")
2924    os.chmod(self.pwfile, 0o600)
2925    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2926    f.write("# (Additional spaces are harmless.)\n\n")
2927
2928    first_line = None
2929    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2930    for i, (_, k, v) in enumerate(sorted_list):
2931      f.write("[[[  %s  ]]] %s\n" % (v, k))
2932      if not v and first_line is None:
2933        # position cursor on first line with no password.
2934        first_line = i + 4
2935    f.close()
2936
2937    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2938
2939    return self.ReadFile()
2940
2941  def ReadFile(self):
2942    result = {}
2943    if self.pwfile is None:
2944      return result
2945    try:
2946      f = open(self.pwfile, "r")
2947      for line in f:
2948        line = line.strip()
2949        if not line or line[0] == '#':
2950          continue
2951        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2952        if not m:
2953          logger.warning("Failed to parse password file: %s", line)
2954        else:
2955          result[m.group(2)] = m.group(1)
2956      f.close()
2957    except IOError as e:
2958      if e.errno != errno.ENOENT:
2959        logger.exception("Error reading password file:")
2960    return result
2961
2962
2963def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2964             compress_type=None):
2965
2966  # http://b/18015246
2967  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2968  # for files larger than 2GiB. We can work around this by adjusting their
2969  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2970  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2971  # it isn't clear to me exactly what circumstances cause this).
2972  # `zipfile.write()` must be used directly to work around this.
2973  #
2974  # This mess can be avoided if we port to python3.
2975  saved_zip64_limit = zipfile.ZIP64_LIMIT
2976  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2977
2978  if compress_type is None:
2979    compress_type = zip_file.compression
2980  if arcname is None:
2981    arcname = filename
2982
2983  saved_stat = os.stat(filename)
2984
2985  try:
2986    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2987    # file to be zipped and reset it when we're done.
2988    os.chmod(filename, perms)
2989
2990    # Use a fixed timestamp so the output is repeatable.
2991    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2992    # intentional. zip stores datetimes in local time without a time zone
2993    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2994    # in the zip archive.
2995    local_epoch = datetime.datetime.fromtimestamp(0)
2996    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
2997    os.utime(filename, (timestamp, timestamp))
2998
2999    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
3000  finally:
3001    os.chmod(filename, saved_stat.st_mode)
3002    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
3003    zipfile.ZIP64_LIMIT = saved_zip64_limit
3004
3005
3006def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
3007                compress_type=None):
3008  """Wrap zipfile.writestr() function to work around the zip64 limit.
3009
3010  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
3011  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
3012  when calling crc32(bytes).
3013
3014  But it still works fine to write a shorter string into a large zip file.
3015  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
3016  when we know the string won't be too long.
3017  """
3018
3019  saved_zip64_limit = zipfile.ZIP64_LIMIT
3020  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3021
3022  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
3023    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
3024    zinfo.compress_type = zip_file.compression
3025    if perms is None:
3026      perms = 0o100644
3027  else:
3028    zinfo = zinfo_or_arcname
3029    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
3030    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
3031    # such a case (since
3032    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
3033    # which seems to make more sense. Otherwise the entry will have 0o000 as the
3034    # permission bits. We follow the logic in Python 3 to get consistent
3035    # behavior between using the two versions.
3036    if not zinfo.external_attr:
3037      zinfo.external_attr = 0o600 << 16
3038
3039  # If compress_type is given, it overrides the value in zinfo.
3040  if compress_type is not None:
3041    zinfo.compress_type = compress_type
3042
3043  # If perms is given, it has a priority.
3044  if perms is not None:
3045    # If perms doesn't set the file type, mark it as a regular file.
3046    if perms & 0o770000 == 0:
3047      perms |= 0o100000
3048    zinfo.external_attr = perms << 16
3049
3050  # Use a fixed timestamp so the output is repeatable.
3051  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
3052
3053  zip_file.writestr(zinfo, data)
3054  zipfile.ZIP64_LIMIT = saved_zip64_limit
3055
3056def ZipExclude(input_zip, output_zip, entries, force=False):
3057  """Deletes entries from a ZIP file.
3058
3059  Args:
3060    zip_filename: The name of the ZIP file.
3061    entries: The name of the entry, or the list of names to be deleted.
3062  """
3063  if isinstance(entries, str):
3064    entries = [entries]
3065  # If list is empty, nothing to do
3066  if not entries:
3067    shutil.copy(input_zip, output_zip)
3068    return
3069
3070  with zipfile.ZipFile(input_zip, 'r') as zin:
3071    if not force and len(set(zin.namelist()).intersection(entries)) == 0:
3072      raise ExternalError(
3073          "Failed to delete zip entries, name not matched: %s" % entries)
3074
3075    fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(input_zip))
3076    os.close(fd)
3077    cmd = ["zip2zip", "-i", input_zip, "-o", new_zipfile]
3078    for entry in entries:
3079      cmd.append("-x")
3080      cmd.append(entry)
3081    RunAndCheckOutput(cmd)
3082  os.replace(new_zipfile, output_zip)
3083
3084
3085def ZipDelete(zip_filename, entries, force=False):
3086  """Deletes entries from a ZIP file.
3087
3088  Args:
3089    zip_filename: The name of the ZIP file.
3090    entries: The name of the entry, or the list of names to be deleted.
3091  """
3092  if isinstance(entries, str):
3093    entries = [entries]
3094  # If list is empty, nothing to do
3095  if not entries:
3096    return
3097
3098  ZipExclude(zip_filename, zip_filename, entries, force)
3099
3100
3101def ZipClose(zip_file):
3102  # http://b/18015246
3103  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
3104  # central directory.
3105  saved_zip64_limit = zipfile.ZIP64_LIMIT
3106  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3107
3108  zip_file.close()
3109
3110  zipfile.ZIP64_LIMIT = saved_zip64_limit
3111
3112
3113class DeviceSpecificParams(object):
3114  module = None
3115
3116  def __init__(self, **kwargs):
3117    """Keyword arguments to the constructor become attributes of this
3118    object, which is passed to all functions in the device-specific
3119    module."""
3120    for k, v in kwargs.items():
3121      setattr(self, k, v)
3122    self.extras = OPTIONS.extras
3123
3124    if self.module is None:
3125      path = OPTIONS.device_specific
3126      if not path:
3127        return
3128      try:
3129        if os.path.isdir(path):
3130          info = imp.find_module("releasetools", [path])
3131        else:
3132          d, f = os.path.split(path)
3133          b, x = os.path.splitext(f)
3134          if x == ".py":
3135            f = b
3136          info = imp.find_module(f, [d])
3137        logger.info("loaded device-specific extensions from %s", path)
3138        self.module = imp.load_module("device_specific", *info)
3139      except ImportError:
3140        logger.info("unable to load device-specific module; assuming none")
3141
3142  def _DoCall(self, function_name, *args, **kwargs):
3143    """Call the named function in the device-specific module, passing
3144    the given args and kwargs.  The first argument to the call will be
3145    the DeviceSpecific object itself.  If there is no module, or the
3146    module does not define the function, return the value of the
3147    'default' kwarg (which itself defaults to None)."""
3148    if self.module is None or not hasattr(self.module, function_name):
3149      return kwargs.get("default")
3150    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
3151
3152  def FullOTA_Assertions(self):
3153    """Called after emitting the block of assertions at the top of a
3154    full OTA package.  Implementations can add whatever additional
3155    assertions they like."""
3156    return self._DoCall("FullOTA_Assertions")
3157
3158  def FullOTA_InstallBegin(self):
3159    """Called at the start of full OTA installation."""
3160    return self._DoCall("FullOTA_InstallBegin")
3161
3162  def FullOTA_GetBlockDifferences(self):
3163    """Called during full OTA installation and verification.
3164    Implementation should return a list of BlockDifference objects describing
3165    the update on each additional partitions.
3166    """
3167    return self._DoCall("FullOTA_GetBlockDifferences")
3168
3169  def FullOTA_InstallEnd(self):
3170    """Called at the end of full OTA installation; typically this is
3171    used to install the image for the device's baseband processor."""
3172    return self._DoCall("FullOTA_InstallEnd")
3173
3174  def IncrementalOTA_Assertions(self):
3175    """Called after emitting the block of assertions at the top of an
3176    incremental OTA package.  Implementations can add whatever
3177    additional assertions they like."""
3178    return self._DoCall("IncrementalOTA_Assertions")
3179
3180  def IncrementalOTA_VerifyBegin(self):
3181    """Called at the start of the verification phase of incremental
3182    OTA installation; additional checks can be placed here to abort
3183    the script before any changes are made."""
3184    return self._DoCall("IncrementalOTA_VerifyBegin")
3185
3186  def IncrementalOTA_VerifyEnd(self):
3187    """Called at the end of the verification phase of incremental OTA
3188    installation; additional checks can be placed here to abort the
3189    script before any changes are made."""
3190    return self._DoCall("IncrementalOTA_VerifyEnd")
3191
3192  def IncrementalOTA_InstallBegin(self):
3193    """Called at the start of incremental OTA installation (after
3194    verification is complete)."""
3195    return self._DoCall("IncrementalOTA_InstallBegin")
3196
3197  def IncrementalOTA_GetBlockDifferences(self):
3198    """Called during incremental OTA installation and verification.
3199    Implementation should return a list of BlockDifference objects describing
3200    the update on each additional partitions.
3201    """
3202    return self._DoCall("IncrementalOTA_GetBlockDifferences")
3203
3204  def IncrementalOTA_InstallEnd(self):
3205    """Called at the end of incremental OTA installation; typically
3206    this is used to install the image for the device's baseband
3207    processor."""
3208    return self._DoCall("IncrementalOTA_InstallEnd")
3209
3210  def VerifyOTA_Assertions(self):
3211    return self._DoCall("VerifyOTA_Assertions")
3212
3213
3214class File(object):
3215  def __init__(self, name, data, compress_size=None):
3216    self.name = name
3217    self.data = data
3218    self.size = len(data)
3219    self.compress_size = compress_size or self.size
3220    self.sha1 = sha1(data).hexdigest()
3221
3222  @classmethod
3223  def FromLocalFile(cls, name, diskname):
3224    f = open(diskname, "rb")
3225    data = f.read()
3226    f.close()
3227    return File(name, data)
3228
3229  def WriteToTemp(self):
3230    t = tempfile.NamedTemporaryFile()
3231    t.write(self.data)
3232    t.flush()
3233    return t
3234
3235  def WriteToDir(self, d):
3236    with open(os.path.join(d, self.name), "wb") as fp:
3237      fp.write(self.data)
3238
3239  def AddToZip(self, z, compression=None):
3240    ZipWriteStr(z, self.name, self.data, compress_type=compression)
3241
3242
3243DIFF_PROGRAM_BY_EXT = {
3244    ".gz": "imgdiff",
3245    ".zip": ["imgdiff", "-z"],
3246    ".jar": ["imgdiff", "-z"],
3247    ".apk": ["imgdiff", "-z"],
3248    ".img": "imgdiff",
3249}
3250
3251
3252class Difference(object):
3253  def __init__(self, tf, sf, diff_program=None):
3254    self.tf = tf
3255    self.sf = sf
3256    self.patch = None
3257    self.diff_program = diff_program
3258
3259  def ComputePatch(self):
3260    """Compute the patch (as a string of data) needed to turn sf into
3261    tf.  Returns the same tuple as GetPatch()."""
3262
3263    tf = self.tf
3264    sf = self.sf
3265
3266    if self.diff_program:
3267      diff_program = self.diff_program
3268    else:
3269      ext = os.path.splitext(tf.name)[1]
3270      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
3271
3272    ttemp = tf.WriteToTemp()
3273    stemp = sf.WriteToTemp()
3274
3275    ext = os.path.splitext(tf.name)[1]
3276
3277    try:
3278      ptemp = tempfile.NamedTemporaryFile()
3279      if isinstance(diff_program, list):
3280        cmd = copy.copy(diff_program)
3281      else:
3282        cmd = [diff_program]
3283      cmd.append(stemp.name)
3284      cmd.append(ttemp.name)
3285      cmd.append(ptemp.name)
3286      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3287      err = []
3288
3289      def run():
3290        _, e = p.communicate()
3291        if e:
3292          err.append(e)
3293      th = threading.Thread(target=run)
3294      th.start()
3295      th.join(timeout=300)   # 5 mins
3296      if th.is_alive():
3297        logger.warning("diff command timed out")
3298        p.terminate()
3299        th.join(5)
3300        if th.is_alive():
3301          p.kill()
3302          th.join()
3303
3304      if p.returncode != 0:
3305        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
3306        self.patch = None
3307        return None, None, None
3308      diff = ptemp.read()
3309    finally:
3310      ptemp.close()
3311      stemp.close()
3312      ttemp.close()
3313
3314    self.patch = diff
3315    return self.tf, self.sf, self.patch
3316
3317  def GetPatch(self):
3318    """Returns a tuple of (target_file, source_file, patch_data).
3319
3320    patch_data may be None if ComputePatch hasn't been called, or if
3321    computing the patch failed.
3322    """
3323    return self.tf, self.sf, self.patch
3324
3325
3326def ComputeDifferences(diffs):
3327  """Call ComputePatch on all the Difference objects in 'diffs'."""
3328  logger.info("%d diffs to compute", len(diffs))
3329
3330  # Do the largest files first, to try and reduce the long-pole effect.
3331  by_size = [(i.tf.size, i) for i in diffs]
3332  by_size.sort(reverse=True)
3333  by_size = [i[1] for i in by_size]
3334
3335  lock = threading.Lock()
3336  diff_iter = iter(by_size)   # accessed under lock
3337
3338  def worker():
3339    try:
3340      lock.acquire()
3341      for d in diff_iter:
3342        lock.release()
3343        start = time.time()
3344        d.ComputePatch()
3345        dur = time.time() - start
3346        lock.acquire()
3347
3348        tf, sf, patch = d.GetPatch()
3349        if sf.name == tf.name:
3350          name = tf.name
3351        else:
3352          name = "%s (%s)" % (tf.name, sf.name)
3353        if patch is None:
3354          logger.error("patching failed! %40s", name)
3355        else:
3356          logger.info(
3357              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
3358              tf.size, 100.0 * len(patch) / tf.size, name)
3359      lock.release()
3360    except Exception:
3361      logger.exception("Failed to compute diff from worker")
3362      raise
3363
3364  # start worker threads; wait for them all to finish.
3365  threads = [threading.Thread(target=worker)
3366             for i in range(OPTIONS.worker_threads)]
3367  for th in threads:
3368    th.start()
3369  while threads:
3370    threads.pop().join()
3371
3372
3373class BlockDifference(object):
3374  def __init__(self, partition, tgt, src=None, check_first_block=False,
3375               version=None, disable_imgdiff=False):
3376    self.tgt = tgt
3377    self.src = src
3378    self.partition = partition
3379    self.check_first_block = check_first_block
3380    self.disable_imgdiff = disable_imgdiff
3381
3382    if version is None:
3383      version = max(
3384          int(i) for i in
3385          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
3386    assert version >= 3
3387    self.version = version
3388
3389    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
3390                       version=self.version,
3391                       disable_imgdiff=self.disable_imgdiff)
3392    self.path = os.path.join(MakeTempDir(), partition)
3393    b.Compute(self.path)
3394    self._required_cache = b.max_stashed_size
3395    self.touched_src_ranges = b.touched_src_ranges
3396    self.touched_src_sha1 = b.touched_src_sha1
3397
3398    # On devices with dynamic partitions, for new partitions,
3399    # src is None but OPTIONS.source_info_dict is not.
3400    if OPTIONS.source_info_dict is None:
3401      is_dynamic_build = OPTIONS.info_dict.get(
3402          "use_dynamic_partitions") == "true"
3403      is_dynamic_source = False
3404    else:
3405      is_dynamic_build = OPTIONS.source_info_dict.get(
3406          "use_dynamic_partitions") == "true"
3407      is_dynamic_source = partition in shlex.split(
3408          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
3409
3410    is_dynamic_target = partition in shlex.split(
3411        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
3412
3413    # For dynamic partitions builds, check partition list in both source
3414    # and target build because new partitions may be added, and existing
3415    # partitions may be removed.
3416    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
3417
3418    if is_dynamic:
3419      self.device = 'map_partition("%s")' % partition
3420    else:
3421      if OPTIONS.source_info_dict is None:
3422        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3423                                              OPTIONS.info_dict)
3424      else:
3425        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3426                                              OPTIONS.source_info_dict)
3427      self.device = device_expr
3428
3429  @property
3430  def required_cache(self):
3431    return self._required_cache
3432
3433  def WriteScript(self, script, output_zip, progress=None,
3434                  write_verify_script=False):
3435    if not self.src:
3436      # write the output unconditionally
3437      script.Print("Patching %s image unconditionally..." % (self.partition,))
3438    else:
3439      script.Print("Patching %s image after verification." % (self.partition,))
3440
3441    if progress:
3442      script.ShowProgress(progress, 0)
3443    self._WriteUpdate(script, output_zip)
3444
3445    if write_verify_script:
3446      self.WritePostInstallVerifyScript(script)
3447
3448  def WriteStrictVerifyScript(self, script):
3449    """Verify all the blocks in the care_map, including clobbered blocks.
3450
3451    This differs from the WriteVerifyScript() function: a) it prints different
3452    error messages; b) it doesn't allow half-way updated images to pass the
3453    verification."""
3454
3455    partition = self.partition
3456    script.Print("Verifying %s..." % (partition,))
3457    ranges = self.tgt.care_map
3458    ranges_str = ranges.to_string_raw()
3459    script.AppendExtra(
3460        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
3461        'ui_print("%s has unexpected contents.");' % (
3462            self.device, ranges_str,
3463            self.tgt.TotalSha1(include_clobbered_blocks=True),
3464            self.partition))
3465    script.AppendExtra("")
3466
3467  def WriteVerifyScript(self, script, touched_blocks_only=False):
3468    partition = self.partition
3469
3470    # full OTA
3471    if not self.src:
3472      script.Print("Image %s will be patched unconditionally." % (partition,))
3473
3474    # incremental OTA
3475    else:
3476      if touched_blocks_only:
3477        ranges = self.touched_src_ranges
3478        expected_sha1 = self.touched_src_sha1
3479      else:
3480        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
3481        expected_sha1 = self.src.TotalSha1()
3482
3483      # No blocks to be checked, skipping.
3484      if not ranges:
3485        return
3486
3487      ranges_str = ranges.to_string_raw()
3488      script.AppendExtra(
3489          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
3490          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
3491          '"%s.patch.dat")) then' % (
3492              self.device, ranges_str, expected_sha1,
3493              self.device, partition, partition, partition))
3494      script.Print('Verified %s image...' % (partition,))
3495      script.AppendExtra('else')
3496
3497      if self.version >= 4:
3498
3499        # Bug: 21124327
3500        # When generating incrementals for the system and vendor partitions in
3501        # version 4 or newer, explicitly check the first block (which contains
3502        # the superblock) of the partition to see if it's what we expect. If
3503        # this check fails, give an explicit log message about the partition
3504        # having been remounted R/W (the most likely explanation).
3505        if self.check_first_block:
3506          script.AppendExtra('check_first_block(%s);' % (self.device,))
3507
3508        # If version >= 4, try block recovery before abort update
3509        if partition == "system":
3510          code = ErrorCode.SYSTEM_RECOVER_FAILURE
3511        else:
3512          code = ErrorCode.VENDOR_RECOVER_FAILURE
3513        script.AppendExtra((
3514            'ifelse (block_image_recover({device}, "{ranges}") && '
3515            'block_image_verify({device}, '
3516            'package_extract_file("{partition}.transfer.list"), '
3517            '"{partition}.new.dat", "{partition}.patch.dat"), '
3518            'ui_print("{partition} recovered successfully."), '
3519            'abort("E{code}: {partition} partition fails to recover"));\n'
3520            'endif;').format(device=self.device, ranges=ranges_str,
3521                             partition=partition, code=code))
3522
3523      # Abort the OTA update. Note that the incremental OTA cannot be applied
3524      # even if it may match the checksum of the target partition.
3525      # a) If version < 3, operations like move and erase will make changes
3526      #    unconditionally and damage the partition.
3527      # b) If version >= 3, it won't even reach here.
3528      else:
3529        if partition == "system":
3530          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
3531        else:
3532          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
3533        script.AppendExtra((
3534            'abort("E%d: %s partition has unexpected contents");\n'
3535            'endif;') % (code, partition))
3536
3537  def WritePostInstallVerifyScript(self, script):
3538    partition = self.partition
3539    script.Print('Verifying the updated %s image...' % (partition,))
3540    # Unlike pre-install verification, clobbered_blocks should not be ignored.
3541    ranges = self.tgt.care_map
3542    ranges_str = ranges.to_string_raw()
3543    script.AppendExtra(
3544        'if range_sha1(%s, "%s") == "%s" then' % (
3545            self.device, ranges_str,
3546            self.tgt.TotalSha1(include_clobbered_blocks=True)))
3547
3548    # Bug: 20881595
3549    # Verify that extended blocks are really zeroed out.
3550    if self.tgt.extended:
3551      ranges_str = self.tgt.extended.to_string_raw()
3552      script.AppendExtra(
3553          'if range_sha1(%s, "%s") == "%s" then' % (
3554              self.device, ranges_str,
3555              self._HashZeroBlocks(self.tgt.extended.size())))
3556      script.Print('Verified the updated %s image.' % (partition,))
3557      if partition == "system":
3558        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
3559      else:
3560        code = ErrorCode.VENDOR_NONZERO_CONTENTS
3561      script.AppendExtra(
3562          'else\n'
3563          '  abort("E%d: %s partition has unexpected non-zero contents after '
3564          'OTA update");\n'
3565          'endif;' % (code, partition))
3566    else:
3567      script.Print('Verified the updated %s image.' % (partition,))
3568
3569    if partition == "system":
3570      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
3571    else:
3572      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
3573
3574    script.AppendExtra(
3575        'else\n'
3576        '  abort("E%d: %s partition has unexpected contents after OTA '
3577        'update");\n'
3578        'endif;' % (code, partition))
3579
3580  def _WriteUpdate(self, script, output_zip):
3581    ZipWrite(output_zip,
3582             '{}.transfer.list'.format(self.path),
3583             '{}.transfer.list'.format(self.partition))
3584
3585    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
3586    # its size. Quailty 9 almost triples the compression time but doesn't
3587    # further reduce the size too much. For a typical 1.8G system.new.dat
3588    #                       zip  | brotli(quality 6)  | brotli(quality 9)
3589    #   compressed_size:    942M | 869M (~8% reduced) | 854M
3590    #   compression_time:   75s  | 265s               | 719s
3591    #   decompression_time: 15s  | 25s                | 25s
3592
3593    if not self.src:
3594      brotli_cmd = ['brotli', '--quality=6',
3595                    '--output={}.new.dat.br'.format(self.path),
3596                    '{}.new.dat'.format(self.path)]
3597      print("Compressing {}.new.dat with brotli".format(self.partition))
3598      RunAndCheckOutput(brotli_cmd)
3599
3600      new_data_name = '{}.new.dat.br'.format(self.partition)
3601      ZipWrite(output_zip,
3602               '{}.new.dat.br'.format(self.path),
3603               new_data_name,
3604               compress_type=zipfile.ZIP_STORED)
3605    else:
3606      new_data_name = '{}.new.dat'.format(self.partition)
3607      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
3608
3609    ZipWrite(output_zip,
3610             '{}.patch.dat'.format(self.path),
3611             '{}.patch.dat'.format(self.partition),
3612             compress_type=zipfile.ZIP_STORED)
3613
3614    if self.partition == "system":
3615      code = ErrorCode.SYSTEM_UPDATE_FAILURE
3616    else:
3617      code = ErrorCode.VENDOR_UPDATE_FAILURE
3618
3619    call = ('block_image_update({device}, '
3620            'package_extract_file("{partition}.transfer.list"), '
3621            '"{new_data_name}", "{partition}.patch.dat") ||\n'
3622            '  abort("E{code}: Failed to update {partition} image.");'.format(
3623                device=self.device, partition=self.partition,
3624                new_data_name=new_data_name, code=code))
3625    script.AppendExtra(script.WordWrap(call))
3626
3627  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
3628    data = source.ReadRangeSet(ranges)
3629    ctx = sha1()
3630
3631    for p in data:
3632      ctx.update(p)
3633
3634    return ctx.hexdigest()
3635
3636  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
3637    """Return the hash value for all zero blocks."""
3638    zero_block = '\x00' * 4096
3639    ctx = sha1()
3640    for _ in range(num_blocks):
3641      ctx.update(zero_block)
3642
3643    return ctx.hexdigest()
3644
3645
3646# Expose these two classes to support vendor-specific scripts
3647DataImage = images.DataImage
3648EmptyImage = images.EmptyImage
3649
3650
3651# map recovery.fstab's fs_types to mount/format "partition types"
3652PARTITION_TYPES = {
3653    "ext4": "EMMC",
3654    "emmc": "EMMC",
3655    "f2fs": "EMMC",
3656    "squashfs": "EMMC",
3657    "erofs": "EMMC"
3658}
3659
3660
3661def GetTypeAndDevice(mount_point, info, check_no_slot=True):
3662  """
3663  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
3664  backwards compatibility. It aborts if the fstab entry has slotselect option
3665  (unless check_no_slot is explicitly set to False).
3666  """
3667  fstab = info["fstab"]
3668  if fstab:
3669    if check_no_slot:
3670      assert not fstab[mount_point].slotselect, \
3671          "Use GetTypeAndDeviceExpr instead"
3672    return (PARTITION_TYPES[fstab[mount_point].fs_type],
3673            fstab[mount_point].device)
3674  raise KeyError
3675
3676
3677def GetTypeAndDeviceExpr(mount_point, info):
3678  """
3679  Return the filesystem of the partition, and an edify expression that evaluates
3680  to the device at runtime.
3681  """
3682  fstab = info["fstab"]
3683  if fstab:
3684    p = fstab[mount_point]
3685    device_expr = '"%s"' % fstab[mount_point].device
3686    if p.slotselect:
3687      device_expr = 'add_slot_suffix(%s)' % device_expr
3688    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
3689  raise KeyError
3690
3691
3692def GetEntryForDevice(fstab, device):
3693  """
3694  Returns:
3695    The first entry in fstab whose device is the given value.
3696  """
3697  if not fstab:
3698    return None
3699  for mount_point in fstab:
3700    if fstab[mount_point].device == device:
3701      return fstab[mount_point]
3702  return None
3703
3704
3705def ParseCertificate(data):
3706  """Parses and converts a PEM-encoded certificate into DER-encoded.
3707
3708  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3709
3710  Returns:
3711    The decoded certificate bytes.
3712  """
3713  cert_buffer = []
3714  save = False
3715  for line in data.split("\n"):
3716    if "--END CERTIFICATE--" in line:
3717      break
3718    if save:
3719      cert_buffer.append(line)
3720    if "--BEGIN CERTIFICATE--" in line:
3721      save = True
3722  cert = base64.b64decode("".join(cert_buffer))
3723  return cert
3724
3725
3726def ExtractPublicKey(cert):
3727  """Extracts the public key (PEM-encoded) from the given certificate file.
3728
3729  Args:
3730    cert: The certificate filename.
3731
3732  Returns:
3733    The public key string.
3734
3735  Raises:
3736    AssertionError: On non-zero return from 'openssl'.
3737  """
3738  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3739  # While openssl 1.1 writes the key into the given filename followed by '-out',
3740  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3741  # stdout instead.
3742  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3743  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3744  pubkey, stderrdata = proc.communicate()
3745  assert proc.returncode == 0, \
3746      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3747  return pubkey
3748
3749
3750def ExtractAvbPublicKey(avbtool, key):
3751  """Extracts the AVB public key from the given public or private key.
3752
3753  Args:
3754    avbtool: The AVB tool to use.
3755    key: The input key file, which should be PEM-encoded public or private key.
3756
3757  Returns:
3758    The path to the extracted AVB public key file.
3759  """
3760  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3761  RunAndCheckOutput(
3762      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3763  return output
3764
3765
3766def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3767                      info_dict=None):
3768  """Generates the recovery-from-boot patch and writes the script to output.
3769
3770  Most of the space in the boot and recovery images is just the kernel, which is
3771  identical for the two, so the resulting patch should be efficient. Add it to
3772  the output zip, along with a shell script that is run from init.rc on first
3773  boot to actually do the patching and install the new recovery image.
3774
3775  Args:
3776    input_dir: The top-level input directory of the target-files.zip.
3777    output_sink: The callback function that writes the result.
3778    recovery_img: File object for the recovery image.
3779    boot_img: File objects for the boot image.
3780    info_dict: A dict returned by common.LoadInfoDict() on the input
3781        target_files. Will use OPTIONS.info_dict if None has been given.
3782  """
3783  if info_dict is None:
3784    info_dict = OPTIONS.info_dict
3785
3786  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3787  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3788
3789  if board_uses_vendorimage:
3790    # In this case, the output sink is rooted at VENDOR
3791    recovery_img_path = "etc/recovery.img"
3792    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3793    sh_dir = "bin"
3794  else:
3795    # In this case the output sink is rooted at SYSTEM
3796    recovery_img_path = "vendor/etc/recovery.img"
3797    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3798    sh_dir = "vendor/bin"
3799
3800  if full_recovery_image:
3801    output_sink(recovery_img_path, recovery_img.data)
3802
3803  else:
3804    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
3805    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
3806    path = os.path.join(input_dir, recovery_resource_dat_path)
3807    # Use bsdiff to handle mismatching entries (Bug: 72731506)
3808    if include_recovery_dtbo or include_recovery_acpio:
3809      diff_program = ["bsdiff"]
3810      bonus_args = ""
3811      assert not os.path.exists(path)
3812    else:
3813      diff_program = ["imgdiff"]
3814      if os.path.exists(path):
3815        diff_program.append("-b")
3816        diff_program.append(path)
3817        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3818      else:
3819        bonus_args = ""
3820
3821    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3822    _, _, patch = d.ComputePatch()
3823    output_sink("recovery-from-boot.p", patch)
3824
3825  try:
3826    # The following GetTypeAndDevice()s need to use the path in the target
3827    # info_dict instead of source_info_dict.
3828    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3829                                              check_no_slot=False)
3830    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3831                                                      check_no_slot=False)
3832  except KeyError:
3833    return
3834
3835  if full_recovery_image:
3836
3837    # Note that we use /vendor to refer to the recovery resources. This will
3838    # work for a separate vendor partition mounted at /vendor or a
3839    # /system/vendor subdirectory on the system partition, for which init will
3840    # create a symlink from /vendor to /system/vendor.
3841
3842    sh = """#!/vendor/bin/sh
3843if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3844  applypatch \\
3845          --flash /vendor/etc/recovery.img \\
3846          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3847      log -t recovery "Installing new recovery image: succeeded" || \\
3848      log -t recovery "Installing new recovery image: failed"
3849else
3850  log -t recovery "Recovery image already installed"
3851fi
3852""" % {'type': recovery_type,
3853       'device': recovery_device,
3854       'sha1': recovery_img.sha1,
3855       'size': recovery_img.size}
3856  else:
3857    sh = """#!/vendor/bin/sh
3858if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3859  applypatch %(bonus_args)s \\
3860          --patch /vendor/recovery-from-boot.p \\
3861          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3862          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3863      log -t recovery "Installing new recovery image: succeeded" || \\
3864      log -t recovery "Installing new recovery image: failed"
3865else
3866  log -t recovery "Recovery image already installed"
3867fi
3868""" % {'boot_size': boot_img.size,
3869       'boot_sha1': boot_img.sha1,
3870       'recovery_size': recovery_img.size,
3871       'recovery_sha1': recovery_img.sha1,
3872       'boot_type': boot_type,
3873       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3874       'recovery_type': recovery_type,
3875       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3876       'bonus_args': bonus_args}
3877
3878  # The install script location moved from /system/etc to /system/bin in the L
3879  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3880  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3881
3882  logger.info("putting script in %s", sh_location)
3883
3884  output_sink(sh_location, sh.encode())
3885
3886
3887class DynamicPartitionUpdate(object):
3888  def __init__(self, src_group=None, tgt_group=None, progress=None,
3889               block_difference=None):
3890    self.src_group = src_group
3891    self.tgt_group = tgt_group
3892    self.progress = progress
3893    self.block_difference = block_difference
3894
3895  @property
3896  def src_size(self):
3897    if not self.block_difference:
3898      return 0
3899    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3900
3901  @property
3902  def tgt_size(self):
3903    if not self.block_difference:
3904      return 0
3905    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3906
3907  @staticmethod
3908  def _GetSparseImageSize(img):
3909    if not img:
3910      return 0
3911    return img.blocksize * img.total_blocks
3912
3913
3914class DynamicGroupUpdate(object):
3915  def __init__(self, src_size=None, tgt_size=None):
3916    # None: group does not exist. 0: no size limits.
3917    self.src_size = src_size
3918    self.tgt_size = tgt_size
3919
3920
3921class DynamicPartitionsDifference(object):
3922  def __init__(self, info_dict, block_diffs, progress_dict=None,
3923               source_info_dict=None):
3924    if progress_dict is None:
3925      progress_dict = {}
3926
3927    self._remove_all_before_apply = False
3928    if source_info_dict is None:
3929      self._remove_all_before_apply = True
3930      source_info_dict = {}
3931
3932    block_diff_dict = collections.OrderedDict(
3933        [(e.partition, e) for e in block_diffs])
3934
3935    assert len(block_diff_dict) == len(block_diffs), \
3936        "Duplicated BlockDifference object for {}".format(
3937            [partition for partition, count in
3938             collections.Counter(e.partition for e in block_diffs).items()
3939             if count > 1])
3940
3941    self._partition_updates = collections.OrderedDict()
3942
3943    for p, block_diff in block_diff_dict.items():
3944      self._partition_updates[p] = DynamicPartitionUpdate()
3945      self._partition_updates[p].block_difference = block_diff
3946
3947    for p, progress in progress_dict.items():
3948      if p in self._partition_updates:
3949        self._partition_updates[p].progress = progress
3950
3951    tgt_groups = shlex.split(info_dict.get(
3952        "super_partition_groups", "").strip())
3953    src_groups = shlex.split(source_info_dict.get(
3954        "super_partition_groups", "").strip())
3955
3956    for g in tgt_groups:
3957      for p in shlex.split(info_dict.get(
3958              "super_%s_partition_list" % g, "").strip()):
3959        assert p in self._partition_updates, \
3960            "{} is in target super_{}_partition_list but no BlockDifference " \
3961            "object is provided.".format(p, g)
3962        self._partition_updates[p].tgt_group = g
3963
3964    for g in src_groups:
3965      for p in shlex.split(source_info_dict.get(
3966              "super_%s_partition_list" % g, "").strip()):
3967        assert p in self._partition_updates, \
3968            "{} is in source super_{}_partition_list but no BlockDifference " \
3969            "object is provided.".format(p, g)
3970        self._partition_updates[p].src_group = g
3971
3972    target_dynamic_partitions = set(shlex.split(info_dict.get(
3973        "dynamic_partition_list", "").strip()))
3974    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3975                                  if u.tgt_size)
3976    assert block_diffs_with_target == target_dynamic_partitions, \
3977        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3978            list(target_dynamic_partitions), list(block_diffs_with_target))
3979
3980    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3981        "dynamic_partition_list", "").strip()))
3982    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3983                                  if u.src_size)
3984    assert block_diffs_with_source == source_dynamic_partitions, \
3985        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3986            list(source_dynamic_partitions), list(block_diffs_with_source))
3987
3988    if self._partition_updates:
3989      logger.info("Updating dynamic partitions %s",
3990                  self._partition_updates.keys())
3991
3992    self._group_updates = collections.OrderedDict()
3993
3994    for g in tgt_groups:
3995      self._group_updates[g] = DynamicGroupUpdate()
3996      self._group_updates[g].tgt_size = int(info_dict.get(
3997          "super_%s_group_size" % g, "0").strip())
3998
3999    for g in src_groups:
4000      if g not in self._group_updates:
4001        self._group_updates[g] = DynamicGroupUpdate()
4002      self._group_updates[g].src_size = int(source_info_dict.get(
4003          "super_%s_group_size" % g, "0").strip())
4004
4005    self._Compute()
4006
4007  def WriteScript(self, script, output_zip, write_verify_script=False):
4008    script.Comment('--- Start patching dynamic partitions ---')
4009    for p, u in self._partition_updates.items():
4010      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4011        script.Comment('Patch partition %s' % p)
4012        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
4013                                       write_verify_script=False)
4014
4015    op_list_path = MakeTempFile()
4016    with open(op_list_path, 'w') as f:
4017      for line in self._op_list:
4018        f.write('{}\n'.format(line))
4019
4020    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
4021
4022    script.Comment('Update dynamic partition metadata')
4023    script.AppendExtra('assert(update_dynamic_partitions('
4024                       'package_extract_file("dynamic_partitions_op_list")));')
4025
4026    if write_verify_script:
4027      for p, u in self._partition_updates.items():
4028        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4029          u.block_difference.WritePostInstallVerifyScript(script)
4030          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
4031
4032    for p, u in self._partition_updates.items():
4033      if u.tgt_size and u.src_size <= u.tgt_size:
4034        script.Comment('Patch partition %s' % p)
4035        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
4036                                       write_verify_script=write_verify_script)
4037        if write_verify_script:
4038          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
4039
4040    script.Comment('--- End patching dynamic partitions ---')
4041
4042  def _Compute(self):
4043    self._op_list = list()
4044
4045    def append(line):
4046      self._op_list.append(line)
4047
4048    def comment(line):
4049      self._op_list.append("# %s" % line)
4050
4051    if self._remove_all_before_apply:
4052      comment('Remove all existing dynamic partitions and groups before '
4053              'applying full OTA')
4054      append('remove_all_groups')
4055
4056    for p, u in self._partition_updates.items():
4057      if u.src_group and not u.tgt_group:
4058        append('remove %s' % p)
4059
4060    for p, u in self._partition_updates.items():
4061      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4062        comment('Move partition %s from %s to default' % (p, u.src_group))
4063        append('move %s default' % p)
4064
4065    for p, u in self._partition_updates.items():
4066      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4067        comment('Shrink partition %s from %d to %d' %
4068                (p, u.src_size, u.tgt_size))
4069        append('resize %s %s' % (p, u.tgt_size))
4070
4071    for g, u in self._group_updates.items():
4072      if u.src_size is not None and u.tgt_size is None:
4073        append('remove_group %s' % g)
4074      if (u.src_size is not None and u.tgt_size is not None and
4075              u.src_size > u.tgt_size):
4076        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4077        append('resize_group %s %d' % (g, u.tgt_size))
4078
4079    for g, u in self._group_updates.items():
4080      if u.src_size is None and u.tgt_size is not None:
4081        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
4082        append('add_group %s %d' % (g, u.tgt_size))
4083      if (u.src_size is not None and u.tgt_size is not None and
4084              u.src_size < u.tgt_size):
4085        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4086        append('resize_group %s %d' % (g, u.tgt_size))
4087
4088    for p, u in self._partition_updates.items():
4089      if u.tgt_group and not u.src_group:
4090        comment('Add partition %s to group %s' % (p, u.tgt_group))
4091        append('add %s %s' % (p, u.tgt_group))
4092
4093    for p, u in self._partition_updates.items():
4094      if u.tgt_size and u.src_size < u.tgt_size:
4095        comment('Grow partition %s from %d to %d' %
4096                (p, u.src_size, u.tgt_size))
4097        append('resize %s %d' % (p, u.tgt_size))
4098
4099    for p, u in self._partition_updates.items():
4100      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4101        comment('Move partition %s from default to %s' %
4102                (p, u.tgt_group))
4103        append('move %s %s' % (p, u.tgt_group))
4104
4105
4106def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
4107  """
4108  Get build.prop from ramdisk within the boot image
4109
4110  Args:
4111    boot_img: the boot image file. Ramdisk must be compressed with lz4 or gzip format.
4112
4113  Return:
4114    An extracted file that stores properties in the boot image.
4115  """
4116  tmp_dir = MakeTempDir('boot_', suffix='.img')
4117  try:
4118    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
4119                      boot_img, '--out', tmp_dir])
4120    ramdisk = os.path.join(tmp_dir, 'ramdisk')
4121    if not os.path.isfile(ramdisk):
4122      logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
4123      return None
4124    uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
4125    if ramdisk_format == RamdiskFormat.LZ4:
4126      RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
4127    elif ramdisk_format == RamdiskFormat.GZ:
4128      with open(ramdisk, 'rb') as input_stream:
4129        with open(uncompressed_ramdisk, 'wb') as output_stream:
4130          p2 = Run(['gzip', '-d'], stdin=input_stream.fileno(),
4131                   stdout=output_stream.fileno())
4132          p2.wait()
4133    else:
4134      logger.error('Only support lz4 or gzip ramdisk format.')
4135      return None
4136
4137    abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
4138    extracted_ramdisk = MakeTempDir('extracted_ramdisk')
4139    # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
4140    # the host environment.
4141    RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
4142                      cwd=extracted_ramdisk)
4143
4144    for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
4145      prop_file = os.path.join(extracted_ramdisk, search_path)
4146      if os.path.isfile(prop_file):
4147        return prop_file
4148      logger.warning(
4149          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
4150
4151    return None
4152
4153  except ExternalError as e:
4154    logger.warning('Unable to get boot image build props: %s', e)
4155    return None
4156
4157
4158def GetBootImageTimestamp(boot_img):
4159  """
4160  Get timestamp from ramdisk within the boot image
4161
4162  Args:
4163    boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
4164
4165  Return:
4166    An integer that corresponds to the timestamp of the boot image, or None
4167    if file has unknown format. Raise exception if an unexpected error has
4168    occurred.
4169  """
4170  prop_file = GetBootImageBuildProp(boot_img)
4171  if not prop_file:
4172    return None
4173
4174  props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
4175  if props is None:
4176    return None
4177
4178  try:
4179    timestamp = props.GetProp('ro.bootimage.build.date.utc')
4180    if timestamp:
4181      return int(timestamp)
4182    logger.warning(
4183        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
4184    return None
4185
4186  except ExternalError as e:
4187    logger.warning('Unable to get boot image timestamp: %s', e)
4188    return None
4189
4190
4191def IsSparseImage(filepath):
4192  if not os.path.exists(filepath):
4193    return False
4194  with open(filepath, 'rb') as fp:
4195    # Magic for android sparse image format
4196    # https://source.android.com/devices/bootloader/images
4197    return fp.read(4) == b'\x3A\xFF\x26\xED'
4198
4199
4200def UnsparseImage(filepath, target_path=None):
4201  if not IsSparseImage(filepath):
4202    return
4203  if target_path is None:
4204    tmp_img = MakeTempFile(suffix=".img")
4205    RunAndCheckOutput(["simg2img", filepath, tmp_img])
4206    os.rename(tmp_img, filepath)
4207  else:
4208    RunAndCheckOutput(["simg2img", filepath, target_path])
4209
4210
4211def ParseUpdateEngineConfig(path: str):
4212  """Parse the update_engine config stored in file `path`
4213  Args
4214    path: Path to update_engine_config.txt file in target_files
4215
4216  Returns
4217    A tuple of (major, minor) version number . E.g. (2, 8)
4218  """
4219  with open(path, "r") as fp:
4220    # update_engine_config.txt is only supposed to contain two lines,
4221    # PAYLOAD_MAJOR_VERSION and PAYLOAD_MINOR_VERSION. 1024 should be more than
4222    # sufficient. If the length is more than that, something is wrong.
4223    data = fp.read(1024)
4224    major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data)
4225    if not major:
4226      raise ValueError(
4227          f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}")
4228    minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data)
4229    if not minor:
4230      raise ValueError(
4231          f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}")
4232    return (int(major.group(1)), int(minor.group(1)))
4233