• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23import getopt
24import getpass
25import gzip
26import importlib.util
27import json
28import logging
29import logging.config
30import os
31import platform
32import re
33import shlex
34import shutil
35import subprocess
36import stat
37import sys
38import tempfile
39import threading
40import time
41import zipfile
42
43from typing import Iterable, Callable
44from dataclasses import dataclass
45from hashlib import sha1, sha256
46
47import images
48import sparse_img
49from blockimgdiff import BlockImageDiff
50
51logger = logging.getLogger(__name__)
52
53
54@dataclass
55class OptionHandler:
56  extra_long_opts: Iterable[str]
57  handler: Callable
58
59class Options(object):
60
61  def __init__(self):
62    # Set up search path, in order to find framework/ and lib64/. At the time of
63    # running this function, user-supplied search path (`--path`) hasn't been
64    # available. So the value set here is the default, which might be overridden
65    # by commandline flag later.
66    exec_path = os.path.realpath(sys.argv[0])
67    if exec_path.endswith('.py'):
68      script_name = os.path.basename(exec_path)
69      # logger hasn't been initialized yet at this point. Use print to output
70      # warnings.
71      print(
72          'Warning: releasetools script should be invoked as hermetic Python '
73          'executable -- build and run `{}` directly.'.format(
74              script_name[:-3]),
75          file=sys.stderr)
76    self.search_path = os.path.dirname(os.path.dirname(exec_path))
77
78    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
79    if not os.path.exists(os.path.join(self.search_path, self.signapk_path)):
80      if "ANDROID_HOST_OUT" in os.environ:
81        self.search_path = os.environ["ANDROID_HOST_OUT"]
82    self.signapk_shared_library_path = "lib64"   # Relative to search_path
83    self.extra_signapk_args = []
84    self.aapt2_path = "aapt2"
85    self.java_path = "java"  # Use the one on the path by default.
86    self.java_args = ["-Xmx4096m"]  # The default JVM args.
87    self.android_jar_path = None
88    self.public_key_suffix = ".x509.pem"
89    self.private_key_suffix = ".pk8"
90    # use otatools built boot_signer by default
91    self.verbose = False
92    self.tempfiles = []
93    self.device_specific = None
94    self.extras = {}
95    self.info_dict = None
96    self.source_info_dict = None
97    self.target_info_dict = None
98    self.worker_threads = None
99    # Stash size cannot exceed cache_size * threshold.
100    self.cache_size = None
101    self.stash_threshold = 0.8
102    self.logfile = None
103
104
105OPTIONS = Options()
106
107# The block size that's used across the releasetools scripts.
108BLOCK_SIZE = 4096
109
110# Values for "certificate" in apkcerts that mean special things.
111SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
112
113# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
114# that system_other is not in the list because we don't want to include its
115# descriptor into vbmeta.img. When adding a new entry here, the
116# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
117# accordingly.
118AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw',
119                  'recovery', 'system', 'system_ext', 'vendor', 'vendor_boot',
120                  'vendor_kernel_boot', 'vendor_dlkm', 'odm_dlkm',
121                  'system_dlkm')
122
123# Chained VBMeta partitions.
124AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
125
126# avbtool arguments name
127AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG = '--include_descriptors_from_image'
128AVB_ARG_NAME_CHAIN_PARTITION = '--chain_partition'
129
130# Partitions that should have their care_map added to META/care_map.pb
131PARTITIONS_WITH_CARE_MAP = [
132    'system',
133    'vendor',
134    'product',
135    'system_ext',
136    'odm',
137    'vendor_dlkm',
138    'odm_dlkm',
139    'system_dlkm',
140]
141
142# Partitions with a build.prop file
143PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot']
144
145# See sysprop.mk. If file is moved, add new search paths here; don't remove
146# existing search paths.
147RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
148
149
150@dataclass
151class AvbChainedPartitionArg:
152  """The required arguments for avbtool --chain_partition."""
153  partition: str
154  rollback_index_location: int
155  pubkey_path: str
156
157  def to_string(self):
158    """Convert to string command arguments."""
159    return '{}:{}:{}'.format(
160        self.partition, self.rollback_index_location, self.pubkey_path)
161
162
163class ErrorCode(object):
164  """Define error_codes for failures that happen during the actual
165  update package installation.
166
167  Error codes 0-999 are reserved for failures before the package
168  installation (i.e. low battery, package verification failure).
169  Detailed code in 'bootable/recovery/error_code.h' """
170
171  SYSTEM_VERIFICATION_FAILURE = 1000
172  SYSTEM_UPDATE_FAILURE = 1001
173  SYSTEM_UNEXPECTED_CONTENTS = 1002
174  SYSTEM_NONZERO_CONTENTS = 1003
175  SYSTEM_RECOVER_FAILURE = 1004
176  VENDOR_VERIFICATION_FAILURE = 2000
177  VENDOR_UPDATE_FAILURE = 2001
178  VENDOR_UNEXPECTED_CONTENTS = 2002
179  VENDOR_NONZERO_CONTENTS = 2003
180  VENDOR_RECOVER_FAILURE = 2004
181  OEM_PROP_MISMATCH = 3000
182  FINGERPRINT_MISMATCH = 3001
183  THUMBPRINT_MISMATCH = 3002
184  OLDER_BUILD = 3003
185  DEVICE_MISMATCH = 3004
186  BAD_PATCH_FILE = 3005
187  INSUFFICIENT_CACHE_SPACE = 3006
188  TUNE_PARTITION_FAILURE = 3007
189  APPLY_PATCH_FAILURE = 3008
190
191
192class ExternalError(RuntimeError):
193  pass
194
195
196def InitLogging():
197  DEFAULT_LOGGING_CONFIG = {
198      'version': 1,
199      'disable_existing_loggers': False,
200      'formatters': {
201          'standard': {
202              'format':
203                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
204              'datefmt': '%Y-%m-%d %H:%M:%S',
205          },
206      },
207      'handlers': {
208          'default': {
209              'class': 'logging.StreamHandler',
210              'formatter': 'standard',
211              'level': 'WARNING',
212          },
213      },
214      'loggers': {
215          '': {
216              'handlers': ['default'],
217              'propagate': True,
218              'level': 'NOTSET',
219          }
220      }
221  }
222  env_config = os.getenv('LOGGING_CONFIG')
223  if env_config:
224    with open(env_config) as f:
225      config = json.load(f)
226  else:
227    config = DEFAULT_LOGGING_CONFIG
228
229    # Increase the logging level for verbose mode.
230    if OPTIONS.verbose:
231      config = copy.deepcopy(config)
232      config['handlers']['default']['level'] = 'INFO'
233
234    if OPTIONS.logfile:
235      config = copy.deepcopy(config)
236      config['handlers']['logfile'] = {
237          'class': 'logging.FileHandler',
238          'formatter': 'standard',
239          'level': 'INFO',
240          'mode': 'w',
241          'filename': OPTIONS.logfile,
242      }
243      config['loggers']['']['handlers'].append('logfile')
244
245  logging.config.dictConfig(config)
246
247
248def FindHostToolPath(tool_name):
249  """Finds the path to the host tool.
250
251  Args:
252    tool_name: name of the tool to find
253  Returns:
254    path to the tool if found under the same directory as this binary is located at. If not found,
255    tool_name is returned.
256  """
257  my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
258  tool_path = os.path.join(my_dir, tool_name)
259  if os.path.exists(tool_path):
260    return tool_path
261
262  return tool_name
263
264
265def Run(args, verbose=None, **kwargs):
266  """Creates and returns a subprocess.Popen object.
267
268  Args:
269    args: The command represented as a list of strings.
270    verbose: Whether the commands should be shown. Default to the global
271        verbosity if unspecified.
272    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
273        stdin, etc. stdout and stderr will default to subprocess.PIPE and
274        subprocess.STDOUT respectively unless caller specifies any of them.
275        universal_newlines will default to True, as most of the users in
276        releasetools expect string output.
277
278  Returns:
279    A subprocess.Popen object.
280  """
281  if 'stdout' not in kwargs and 'stderr' not in kwargs:
282    kwargs['stdout'] = subprocess.PIPE
283    kwargs['stderr'] = subprocess.STDOUT
284  if 'universal_newlines' not in kwargs:
285    kwargs['universal_newlines'] = True
286
287  if args:
288    # Make a copy of args in case client relies on the content of args later.
289    args = args[:]
290    args[0] = FindHostToolPath(args[0])
291
292  if verbose is None:
293    verbose = OPTIONS.verbose
294
295  # Don't log any if caller explicitly says so.
296  if verbose:
297    logger.info("  Running: \"%s\"", " ".join(args))
298  return subprocess.Popen(args, **kwargs)
299
300
301def RunAndCheckOutput(args, verbose=None, **kwargs):
302  """Runs the given command and returns the output.
303
304  Args:
305    args: The command represented as a list of strings.
306    verbose: Whether the commands should be shown. Default to the global
307        verbosity if unspecified.
308    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
309        stdin, etc. stdout and stderr will default to subprocess.PIPE and
310        subprocess.STDOUT respectively unless caller specifies any of them.
311
312  Returns:
313    The output string.
314
315  Raises:
316    ExternalError: On non-zero exit from the command.
317  """
318  if verbose is None:
319    verbose = OPTIONS.verbose
320  proc = Run(args, verbose=verbose, **kwargs)
321  output, _ = proc.communicate()
322  if output is None:
323    output = ""
324  # Don't log any if caller explicitly says so.
325  if verbose:
326    logger.info("%s", output.rstrip())
327  if proc.returncode != 0:
328    raise ExternalError(
329        "Failed to run command '{}' (exit code {}):\n{}".format(
330            args, proc.returncode, output))
331  return output
332
333
334def RoundUpTo4K(value):
335  rounded_up = value + 4095
336  return rounded_up - (rounded_up % 4096)
337
338
339def CloseInheritedPipes():
340  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
341  before doing other work."""
342  if platform.system() != "Darwin":
343    return
344  for d in range(3, 1025):
345    try:
346      stat = os.fstat(d)
347      if stat is not None:
348        pipebit = stat[0] & 0x1000
349        if pipebit != 0:
350          os.close(d)
351    except OSError:
352      pass
353
354
355class BuildInfo(object):
356  """A class that holds the information for a given build.
357
358  This class wraps up the property querying for a given source or target build.
359  It abstracts away the logic of handling OEM-specific properties, and caches
360  the commonly used properties such as fingerprint.
361
362  There are two types of info dicts: a) build-time info dict, which is generated
363  at build time (i.e. included in a target_files zip); b) OEM info dict that is
364  specified at package generation time (via command line argument
365  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
366  having "oem_fingerprint_properties" in build-time info dict), all the queries
367  would be answered based on build-time info dict only. Otherwise if using
368  OEM-specific properties, some of them will be calculated from two info dicts.
369
370  Users can query properties similarly as using a dict() (e.g. info['fstab']),
371  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
372
373  Attributes:
374    info_dict: The build-time info dict.
375    is_ab: Whether it's a build that uses A/B OTA.
376    oem_dicts: A list of OEM dicts.
377    oem_props: A list of OEM properties that should be read from OEM dicts; None
378        if the build doesn't use any OEM-specific property.
379    fingerprint: The fingerprint of the build, which would be calculated based
380        on OEM properties if applicable.
381    device: The device name, which could come from OEM dicts if applicable.
382  """
383
384  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
385                               "ro.product.manufacturer", "ro.product.model",
386                               "ro.product.name"]
387  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
388      "product", "odm", "vendor", "system_ext", "system"]
389  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
390      "product", "product_services", "odm", "vendor", "system"]
391  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
392
393  # The length of vbmeta digest to append to the fingerprint
394  _VBMETA_DIGEST_SIZE_USED = 8
395
396  def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
397    """Initializes a BuildInfo instance with the given dicts.
398
399    Note that it only wraps up the given dicts, without making copies.
400
401    Arguments:
402      info_dict: The build-time info dict.
403      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
404          that it always uses the first dict to calculate the fingerprint or the
405          device name. The rest would be used for asserting OEM properties only
406          (e.g. one package can be installed on one of these devices).
407      use_legacy_id: Use the legacy build id to construct the fingerprint. This
408          is used when we need a BuildInfo class, while the vbmeta digest is
409          unavailable.
410
411    Raises:
412      ValueError: On invalid inputs.
413    """
414    self.info_dict = info_dict
415    self.oem_dicts = oem_dicts
416
417    self._is_ab = info_dict.get("ab_update") == "true"
418    self.use_legacy_id = use_legacy_id
419
420    # Skip _oem_props if oem_dicts is None to use BuildInfo in
421    # sign_target_files_apks
422    if self.oem_dicts:
423      self._oem_props = info_dict.get("oem_fingerprint_properties")
424    else:
425      self._oem_props = None
426
427    def check_fingerprint(fingerprint):
428      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
429        raise ValueError(
430            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
431            "3.2.2. Build Parameters.".format(fingerprint))
432
433    self._partition_fingerprints = {}
434    for partition in PARTITIONS_WITH_BUILD_PROP:
435      try:
436        fingerprint = self.CalculatePartitionFingerprint(partition)
437        check_fingerprint(fingerprint)
438        self._partition_fingerprints[partition] = fingerprint
439      except ExternalError:
440        continue
441    if "system" in self._partition_fingerprints:
442      # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
443      # need a fingerprint when creating the image.
444      self._partition_fingerprints[
445          "system_other"] = self._partition_fingerprints["system"]
446
447    # These two should be computed only after setting self._oem_props.
448    self._device = self.GetOemProperty("ro.product.device")
449    self._fingerprint = self.CalculateFingerprint()
450    check_fingerprint(self._fingerprint)
451
452  @property
453  def is_ab(self):
454    return self._is_ab
455
456  @property
457  def device(self):
458    return self._device
459
460  @property
461  def fingerprint(self):
462    return self._fingerprint
463
464  @property
465  def is_vabc(self):
466    return self.info_dict.get("virtual_ab_compression") == "true"
467
468  @property
469  def is_android_r(self):
470    system_prop = self.info_dict.get("system.build.prop")
471    return system_prop and system_prop.GetProp("ro.build.version.release") == "11"
472
473  @property
474  def is_release_key(self):
475    system_prop = self.info_dict.get("build.prop")
476    return system_prop and system_prop.GetProp("ro.build.tags") == "release-key"
477
478  @property
479  def vabc_compression_param(self):
480    return self.get("virtual_ab_compression_method", "")
481
482  @property
483  def vabc_cow_version(self):
484    return self.get("virtual_ab_cow_version", "")
485
486  @property
487  def vendor_api_level(self):
488    vendor_prop = self.info_dict.get("vendor.build.prop")
489    if not vendor_prop:
490      return -1
491
492    props = [
493        "ro.board.first_api_level",
494        "ro.product.first_api_level",
495    ]
496    for prop in props:
497      value = vendor_prop.GetProp(prop)
498      try:
499        return int(value)
500      except:
501        pass
502    return -1
503
504  @property
505  def is_vabc_xor(self):
506    vendor_prop = self.info_dict.get("vendor.build.prop")
507    vabc_xor_enabled = vendor_prop and \
508        vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true"
509    return vabc_xor_enabled
510
511  @property
512  def vendor_suppressed_vabc(self):
513    vendor_prop = self.info_dict.get("vendor.build.prop")
514    vabc_suppressed = vendor_prop and \
515        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
516    return vabc_suppressed and vabc_suppressed.lower() == "true"
517
518  @property
519  def oem_props(self):
520    return self._oem_props
521
522  def __getitem__(self, key):
523    return self.info_dict[key]
524
525  def __setitem__(self, key, value):
526    self.info_dict[key] = value
527
528  def get(self, key, default=None):
529    return self.info_dict.get(key, default)
530
531  def items(self):
532    return self.info_dict.items()
533
534  def _GetRawBuildProp(self, prop, partition):
535    prop_file = '{}.build.prop'.format(
536        partition) if partition else 'build.prop'
537    partition_props = self.info_dict.get(prop_file)
538    if not partition_props:
539      return None
540    return partition_props.GetProp(prop)
541
542  def GetPartitionBuildProp(self, prop, partition):
543    """Returns the inquired build property for the provided partition."""
544
545    # Boot image and init_boot image uses ro.[product.]bootimage instead of boot.
546    # This comes from the generic ramdisk
547    prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition
548
549    # If provided a partition for this property, only look within that
550    # partition's build.prop.
551    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
552      prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
553    else:
554      prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
555
556    prop_val = self._GetRawBuildProp(prop, partition)
557    if prop_val is not None:
558      return prop_val
559    raise ExternalError("couldn't find %s in %s.build.prop" %
560                        (prop, partition))
561
562  def GetBuildProp(self, prop):
563    """Returns the inquired build property from the standard build.prop file."""
564    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
565      return self._ResolveRoProductBuildProp(prop)
566
567    if prop == "ro.build.id":
568      return self._GetBuildId()
569
570    prop_val = self._GetRawBuildProp(prop, None)
571    if prop_val is not None:
572      return prop_val
573
574    raise ExternalError("couldn't find %s in build.prop" % (prop,))
575
576  def _ResolveRoProductBuildProp(self, prop):
577    """Resolves the inquired ro.product.* build property"""
578    prop_val = self._GetRawBuildProp(prop, None)
579    if prop_val:
580      return prop_val
581
582    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
583    source_order_val = self._GetRawBuildProp(
584        "ro.product.property_source_order", None)
585    if source_order_val:
586      source_order = source_order_val.split(",")
587    else:
588      source_order = default_source_order
589
590    # Check that all sources in ro.product.property_source_order are valid
591    if any([x not in default_source_order for x in source_order]):
592      raise ExternalError(
593          "Invalid ro.product.property_source_order '{}'".format(source_order))
594
595    for source_partition in source_order:
596      source_prop = prop.replace(
597          "ro.product", "ro.product.{}".format(source_partition), 1)
598      prop_val = self._GetRawBuildProp(source_prop, source_partition)
599      if prop_val:
600        return prop_val
601
602    raise ExternalError("couldn't resolve {}".format(prop))
603
604  def _GetRoProductPropsDefaultSourceOrder(self):
605    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
606    # values of these properties for each Android release.
607    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
608    if android_codename == "REL":
609      android_version = self._GetRawBuildProp("ro.build.version.release", None)
610      if android_version == "10":
611        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
612      # NOTE: float() conversion of android_version will have rounding error.
613      # We are checking for "9" or less, and using "< 10" is well outside of
614      # possible floating point rounding.
615      try:
616        android_version_val = float(android_version)
617      except ValueError:
618        android_version_val = 0
619      if android_version_val < 10:
620        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
621    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
622
623  def _GetPlatformVersion(self):
624    version_sdk = self.GetBuildProp("ro.build.version.sdk")
625    # init code switches to version_release_or_codename (see b/158483506). After
626    # API finalization, release_or_codename will be the same as release. This
627    # is the best effort to support pre-S dev stage builds.
628    if int(version_sdk) >= 30:
629      try:
630        return self.GetBuildProp("ro.build.version.release_or_codename")
631      except ExternalError:
632        logger.warning('Failed to find ro.build.version.release_or_codename')
633
634    return self.GetBuildProp("ro.build.version.release")
635
636  def _GetBuildId(self):
637    build_id = self._GetRawBuildProp("ro.build.id", None)
638    if build_id:
639      return build_id
640
641    legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
642    if not legacy_build_id:
643      raise ExternalError("Couldn't find build id in property file")
644
645    if self.use_legacy_id:
646      return legacy_build_id
647
648    # Append the top 8 chars of vbmeta digest to the existing build id. The
649    # logic needs to match the one in init, so that OTA can deliver correctly.
650    avb_enable = self.info_dict.get("avb_enable") == "true"
651    if not avb_enable:
652      raise ExternalError("AVB isn't enabled when using legacy build id")
653
654    vbmeta_digest = self.info_dict.get("vbmeta_digest")
655    if not vbmeta_digest:
656      raise ExternalError("Vbmeta digest isn't provided when using legacy build"
657                          " id")
658    if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
659      raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
660
661    digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
662    return legacy_build_id + '.' + digest_prefix
663
664  def _GetPartitionPlatformVersion(self, partition):
665    try:
666      return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
667                                        partition)
668    except ExternalError:
669      return self.GetPartitionBuildProp("ro.build.version.release",
670                                        partition)
671
672  def GetOemProperty(self, key):
673    if self.oem_props is not None and key in self.oem_props:
674      return self.oem_dicts[0][key]
675    return self.GetBuildProp(key)
676
677  def GetPartitionFingerprint(self, partition):
678    return self._partition_fingerprints.get(partition, None)
679
680  def CalculatePartitionFingerprint(self, partition):
681    try:
682      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
683    except ExternalError:
684      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
685          self.GetPartitionBuildProp("ro.product.brand", partition),
686          self.GetPartitionBuildProp("ro.product.name", partition),
687          self.GetPartitionBuildProp("ro.product.device", partition),
688          self._GetPartitionPlatformVersion(partition),
689          self.GetPartitionBuildProp("ro.build.id", partition),
690          self.GetPartitionBuildProp(
691              "ro.build.version.incremental", partition),
692          self.GetPartitionBuildProp("ro.build.type", partition),
693          self.GetPartitionBuildProp("ro.build.tags", partition))
694
695  def CalculateFingerprint(self):
696    if self.oem_props is None:
697      try:
698        return self.GetBuildProp("ro.build.fingerprint")
699      except ExternalError:
700        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
701            self.GetBuildProp("ro.product.brand"),
702            self.GetBuildProp("ro.product.name"),
703            self.GetBuildProp("ro.product.device"),
704            self._GetPlatformVersion(),
705            self.GetBuildProp("ro.build.id"),
706            self.GetBuildProp("ro.build.version.incremental"),
707            self.GetBuildProp("ro.build.type"),
708            self.GetBuildProp("ro.build.tags"))
709    return "%s/%s/%s:%s" % (
710        self.GetOemProperty("ro.product.brand"),
711        self.GetOemProperty("ro.product.name"),
712        self.GetOemProperty("ro.product.device"),
713        self.GetBuildProp("ro.build.thumbprint"))
714
715  def WriteMountOemScript(self, script):
716    assert self.oem_props is not None
717    recovery_mount_options = self.info_dict.get("recovery_mount_options")
718    script.Mount("/oem", recovery_mount_options)
719
720  def WriteDeviceAssertions(self, script, oem_no_mount):
721    # Read the property directly if not using OEM properties.
722    if not self.oem_props:
723      script.AssertDevice(self.device)
724      return
725
726    # Otherwise assert OEM properties.
727    if not self.oem_dicts:
728      raise ExternalError(
729          "No OEM file provided to answer expected assertions")
730
731    for prop in self.oem_props.split():
732      values = []
733      for oem_dict in self.oem_dicts:
734        if prop in oem_dict:
735          values.append(oem_dict[prop])
736      if not values:
737        raise ExternalError(
738            "The OEM file is missing the property %s" % (prop,))
739      script.AssertOemProperty(prop, values, oem_no_mount)
740
741
742def DoesInputFileContain(input_file, fn):
743  """Check whether the input target_files.zip contain an entry `fn`"""
744  if isinstance(input_file, zipfile.ZipFile):
745    return fn in input_file.namelist()
746  elif zipfile.is_zipfile(input_file):
747    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
748      return fn in zfp.namelist()
749  else:
750    if not os.path.isdir(input_file):
751      raise ValueError(
752          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
753    path = os.path.join(input_file, *fn.split("/"))
754    return os.path.exists(path)
755
756
757def ReadBytesFromInputFile(input_file, fn):
758  """Reads the bytes of fn from input zipfile or directory."""
759  if isinstance(input_file, zipfile.ZipFile):
760    return input_file.read(fn)
761  elif zipfile.is_zipfile(input_file):
762    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
763      return zfp.read(fn)
764  else:
765    if not os.path.isdir(input_file):
766      raise ValueError(
767          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
768    path = os.path.join(input_file, *fn.split("/"))
769    try:
770      with open(path, "rb") as f:
771        return f.read()
772    except IOError as e:
773      if e.errno == errno.ENOENT:
774        raise KeyError(fn)
775
776
777def ReadFromInputFile(input_file, fn):
778  """Reads the str contents of fn from input zipfile or directory."""
779  return ReadBytesFromInputFile(input_file, fn).decode()
780
781
782def WriteBytesToInputFile(input_file, fn, data):
783  """Write bytes |data| contents to fn of input zipfile or directory."""
784  if isinstance(input_file, zipfile.ZipFile):
785    with input_file.open(fn, "w") as entry_fp:
786      return entry_fp.write(data)
787  elif zipfile.is_zipfile(input_file):
788    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
789      with zfp.open(fn, "w") as entry_fp:
790        return entry_fp.write(data)
791  else:
792    if not os.path.isdir(input_file):
793      raise ValueError(
794          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
795    path = os.path.join(input_file, *fn.split("/"))
796    try:
797      with open(path, "wb") as f:
798        return f.write(data)
799    except IOError as e:
800      if e.errno == errno.ENOENT:
801        raise KeyError(fn)
802
803
804def WriteToInputFile(input_file, fn, str: str):
805  """Write str content to fn of input file or directory"""
806  return WriteBytesToInputFile(input_file, fn, str.encode())
807
808
809def ExtractFromInputFile(input_file, fn):
810  """Extracts the contents of fn from input zipfile or directory into a file."""
811  if isinstance(input_file, zipfile.ZipFile):
812    tmp_file = MakeTempFile(os.path.basename(fn))
813    with open(tmp_file, 'wb') as f:
814      f.write(input_file.read(fn))
815    return tmp_file
816  elif zipfile.is_zipfile(input_file):
817    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
818      tmp_file = MakeTempFile(os.path.basename(fn))
819      with open(tmp_file, "wb") as fp:
820        fp.write(zfp.read(fn))
821      return tmp_file
822  else:
823    if not os.path.isdir(input_file):
824      raise ValueError(
825          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
826    file = os.path.join(input_file, *fn.split("/"))
827    if not os.path.exists(file):
828      raise KeyError(fn)
829    return file
830
831
832class RamdiskFormat(object):
833  LZ4 = 1
834  GZ = 2
835
836
837def GetRamdiskFormat(info_dict):
838  if info_dict.get('lz4_ramdisks') == 'true':
839    ramdisk_format = RamdiskFormat.LZ4
840  else:
841    ramdisk_format = RamdiskFormat.GZ
842  return ramdisk_format
843
844
845def LoadInfoDict(input_file, repacking=False):
846  """Loads the key/value pairs from the given input target_files.
847
848  It reads `META/misc_info.txt` file in the target_files input, does validation
849  checks and returns the parsed key/value pairs for to the given build. It's
850  usually called early when working on input target_files files, e.g. when
851  generating OTAs, or signing builds. Note that the function may be called
852  against an old target_files file (i.e. from past dessert releases). So the
853  property parsing needs to be backward compatible.
854
855  In a `META/misc_info.txt`, a few properties are stored as links to the files
856  in the PRODUCT_OUT directory. It works fine with the build system. However,
857  they are no longer available when (re)generating images from target_files zip.
858  When `repacking` is True, redirect these properties to the actual files in the
859  unzipped directory.
860
861  Args:
862    input_file: The input target_files file, which could be an open
863        zipfile.ZipFile instance, or a str for the dir that contains the files
864        unzipped from a target_files file.
865    repacking: Whether it's trying repack an target_files file after loading the
866        info dict (default: False). If so, it will rewrite a few loaded
867        properties (e.g. selinux_fc, root_dir) to point to the actual files in
868        target_files file. When doing repacking, `input_file` must be a dir.
869
870  Returns:
871    A dict that contains the parsed key/value pairs.
872
873  Raises:
874    AssertionError: On invalid input arguments.
875    ValueError: On malformed input values.
876  """
877  if repacking:
878    assert isinstance(input_file, str), \
879        "input_file must be a path str when doing repacking"
880
881  def read_helper(fn):
882    return ReadFromInputFile(input_file, fn)
883
884  try:
885    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
886  except KeyError:
887    raise ValueError("Failed to find META/misc_info.txt in input target-files")
888
889  if "recovery_api_version" not in d:
890    raise ValueError("Failed to find 'recovery_api_version'")
891  if "fstab_version" not in d:
892    raise ValueError("Failed to find 'fstab_version'")
893
894  if repacking:
895    # "selinux_fc" properties should point to the file_contexts files
896    # (file_contexts.bin) under META/.
897    for key in d:
898      if key.endswith("selinux_fc"):
899        fc_basename = os.path.basename(d[key])
900        fc_config = os.path.join(input_file, "META", fc_basename)
901        assert os.path.exists(fc_config), "{} does not exist".format(fc_config)
902
903        d[key] = fc_config
904
905    # Similarly we need to redirect "root_dir", and "root_fs_config".
906    d["root_dir"] = os.path.join(input_file, "ROOT")
907    d["root_fs_config"] = os.path.join(
908        input_file, "META", "root_filesystem_config.txt")
909
910    partitions = ["system", "vendor", "system_ext", "product", "odm",
911                  "vendor_dlkm", "odm_dlkm", "system_dlkm"]
912    # Redirect {partition}_base_fs_file for each of the named partitions.
913    for part_name in partitions:
914      key_name = part_name + "_base_fs_file"
915      if key_name not in d:
916        continue
917      basename = os.path.basename(d[key_name])
918      base_fs_file = os.path.join(input_file, "META", basename)
919      if os.path.exists(base_fs_file):
920        d[key_name] = base_fs_file
921      else:
922        logger.warning(
923            "Failed to find %s base fs file: %s", part_name, base_fs_file)
924        del d[key_name]
925
926    # Redirecting helper for optional properties like erofs_compress_hints
927    def redirect_file(prop, filename):
928      if prop not in d:
929        return
930      config_file = os.path.join(input_file, "META/" + filename)
931      if os.path.exists(config_file):
932        d[prop] = config_file
933      else:
934        logger.warning(
935            "Failed to find %s fro %s", filename, prop)
936        del d[prop]
937
938    # Redirect erofs_[default_]compress_hints files
939    redirect_file("erofs_default_compress_hints",
940                  "erofs_default_compress_hints.txt")
941    for part in partitions:
942      redirect_file(part + "_erofs_compress_hints",
943                    part + "_erofs_compress_hints.txt")
944
945  def makeint(key):
946    if key in d:
947      d[key] = int(d[key], 0)
948
949  makeint("recovery_api_version")
950  makeint("blocksize")
951  makeint("system_size")
952  makeint("vendor_size")
953  makeint("userdata_size")
954  makeint("cache_size")
955  makeint("recovery_size")
956  makeint("fstab_version")
957
958  boot_images = "boot.img"
959  if "boot_images" in d:
960    boot_images = d["boot_images"]
961  for b in boot_images.split():
962    makeint(b.replace(".img", "_size"))
963
964  # Load recovery fstab if applicable.
965  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
966  ramdisk_format = GetRamdiskFormat(d)
967
968  # Tries to load the build props for all partitions with care_map, including
969  # system and vendor.
970  for partition in PARTITIONS_WITH_BUILD_PROP:
971    partition_prop = "{}.build.prop".format(partition)
972    d[partition_prop] = PartitionBuildProps.FromInputFile(
973        input_file, partition, ramdisk_format=ramdisk_format)
974  d["build.prop"] = d["system.build.prop"]
975
976  if d.get("avb_enable") == "true":
977    build_info = BuildInfo(d, use_legacy_id=True)
978    # Set up the salt for partitions without build.prop
979    if build_info.fingerprint:
980      if "fingerprint" not in d:
981        d["fingerprint"] = build_info.fingerprint
982      if "avb_salt" not in d:
983        d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest()
984    # Set the vbmeta digest if exists
985    try:
986      d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
987    except KeyError:
988      pass
989
990  try:
991    d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
992  except KeyError:
993    logger.warning("Can't find META/ab_partitions.txt")
994  return d
995
996
997def LoadListFromFile(file_path):
998  with open(file_path) as f:
999    return f.read().splitlines()
1000
1001
1002def LoadDictionaryFromFile(file_path):
1003  lines = LoadListFromFile(file_path)
1004  return LoadDictionaryFromLines(lines)
1005
1006
1007def LoadDictionaryFromLines(lines):
1008  d = {}
1009  for line in lines:
1010    line = line.strip()
1011    if not line or line.startswith("#"):
1012      continue
1013    if "=" in line:
1014      name, value = line.split("=", 1)
1015      d[name] = value
1016  return d
1017
1018
1019class PartitionBuildProps(object):
1020  """The class holds the build prop of a particular partition.
1021
1022  This class loads the build.prop and holds the build properties for a given
1023  partition. It also partially recognizes the 'import' statement in the
1024  build.prop; and calculates alternative values of some specific build
1025  properties during runtime.
1026
1027  Attributes:
1028    input_file: a zipped target-file or an unzipped target-file directory.
1029    partition: name of the partition.
1030    props_allow_override: a list of build properties to search for the
1031        alternative values during runtime.
1032    build_props: a dict of build properties for the given partition.
1033    prop_overrides: a set of props that are overridden by import.
1034    placeholder_values: A dict of runtime variables' values to replace the
1035        placeholders in the build.prop file. We expect exactly one value for
1036        each of the variables.
1037    ramdisk_format: If name is "boot", the format of ramdisk inside the
1038        boot image. Otherwise, its value is ignored.
1039        Use lz4 to decompress by default. If its value is gzip, use gzip.
1040  """
1041
1042  def __init__(self, input_file, name, placeholder_values=None):
1043    self.input_file = input_file
1044    self.partition = name
1045    self.props_allow_override = [props.format(name) for props in [
1046        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
1047    self.build_props = {}
1048    self.prop_overrides = set()
1049    self.placeholder_values = {}
1050    if placeholder_values:
1051      self.placeholder_values = copy.deepcopy(placeholder_values)
1052
1053  @staticmethod
1054  def FromDictionary(name, build_props):
1055    """Constructs an instance from a build prop dictionary."""
1056
1057    props = PartitionBuildProps("unknown", name)
1058    props.build_props = build_props.copy()
1059    return props
1060
1061  @staticmethod
1062  def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
1063    """Loads the build.prop file and builds the attributes."""
1064
1065    if name in ("boot", "init_boot"):
1066      data = PartitionBuildProps._ReadBootPropFile(
1067          input_file, name, ramdisk_format=ramdisk_format)
1068    else:
1069      data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
1070
1071    props = PartitionBuildProps(input_file, name, placeholder_values)
1072    props._LoadBuildProp(data)
1073    return props
1074
1075  @staticmethod
1076  def _ReadBootPropFile(input_file, partition_name, ramdisk_format):
1077    """
1078    Read build.prop for boot image from input_file.
1079    Return empty string if not found.
1080    """
1081    image_path = 'IMAGES/' + partition_name + '.img'
1082    try:
1083      boot_img = ExtractFromInputFile(input_file, image_path)
1084    except KeyError:
1085      logger.warning('Failed to read %s', image_path)
1086      return ''
1087    prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
1088    if prop_file is None:
1089      return ''
1090    with open(prop_file, "r") as f:
1091      return f.read()
1092
1093  @staticmethod
1094  def _ReadPartitionPropFile(input_file, name):
1095    """
1096    Read build.prop for name from input_file.
1097    Return empty string if not found.
1098    """
1099    data = ''
1100    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
1101                      '{}/build.prop'.format(name.upper())]:
1102      try:
1103        data = ReadFromInputFile(input_file, prop_file)
1104        break
1105      except KeyError:
1106        logger.warning('Failed to read %s', prop_file)
1107    if data == '':
1108      logger.warning("Failed to read build.prop for partition {}".format(name))
1109    return data
1110
1111  @staticmethod
1112  def FromBuildPropFile(name, build_prop_file):
1113    """Constructs an instance from a build prop file."""
1114
1115    props = PartitionBuildProps("unknown", name)
1116    with open(build_prop_file) as f:
1117      props._LoadBuildProp(f.read())
1118    return props
1119
1120  def _LoadBuildProp(self, data):
1121    for line in data.split('\n'):
1122      line = line.strip()
1123      if not line or line.startswith("#"):
1124        continue
1125      if line.startswith("import"):
1126        overrides = self._ImportParser(line)
1127        duplicates = self.prop_overrides.intersection(overrides.keys())
1128        if duplicates:
1129          raise ValueError('prop {} is overridden multiple times'.format(
1130              ','.join(duplicates)))
1131        self.prop_overrides = self.prop_overrides.union(overrides.keys())
1132        self.build_props.update(overrides)
1133      elif "=" in line:
1134        name, value = line.split("=", 1)
1135        if name in self.prop_overrides:
1136          raise ValueError('prop {} is set again after overridden by import '
1137                           'statement'.format(name))
1138        self.build_props[name] = value
1139
1140  def _ImportParser(self, line):
1141    """Parses the build prop in a given import statement."""
1142
1143    tokens = line.split()
1144    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
1145      raise ValueError('Unrecognized import statement {}'.format(line))
1146
1147    if len(tokens) == 3:
1148      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
1149      return {}
1150
1151    import_path = tokens[1]
1152    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
1153      logger.warn('Unrecognized import path {}'.format(line))
1154      return {}
1155
1156    # We only recognize a subset of import statement that the init process
1157    # supports. And we can loose the restriction based on how the dynamic
1158    # fingerprint is used in practice. The placeholder format should be
1159    # ${placeholder}, and its value should be provided by the caller through
1160    # the placeholder_values.
1161    for prop, value in self.placeholder_values.items():
1162      prop_place_holder = '${{{}}}'.format(prop)
1163      if prop_place_holder in import_path:
1164        import_path = import_path.replace(prop_place_holder, value)
1165    if '$' in import_path:
1166      logger.info('Unresolved place holder in import path %s', import_path)
1167      return {}
1168
1169    import_path = import_path.replace('/{}'.format(self.partition),
1170                                      self.partition.upper())
1171    logger.info('Parsing build props override from %s', import_path)
1172
1173    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
1174    d = LoadDictionaryFromLines(lines)
1175    return {key: val for key, val in d.items()
1176            if key in self.props_allow_override}
1177
1178  def __getstate__(self):
1179    state = self.__dict__.copy()
1180    # Don't pickle baz
1181    if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile):
1182      state["input_file"] = state["input_file"].filename
1183    return state
1184
1185  def GetProp(self, prop):
1186    return self.build_props.get(prop)
1187
1188
1189def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path):
1190  class Partition(object):
1191    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
1192      self.mount_point = mount_point
1193      self.fs_type = fs_type
1194      self.device = device
1195      self.length = length
1196      self.context = context
1197      self.slotselect = slotselect
1198
1199  try:
1200    data = read_helper(recovery_fstab_path)
1201  except KeyError:
1202    logger.warning("Failed to find %s", recovery_fstab_path)
1203    data = ""
1204
1205  assert fstab_version == 2
1206
1207  d = {}
1208  for line in data.split("\n"):
1209    line = line.strip()
1210    if not line or line.startswith("#"):
1211      continue
1212
1213    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
1214    pieces = line.split()
1215    if len(pieces) != 5:
1216      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
1217
1218    # Ignore entries that are managed by vold.
1219    options = pieces[4]
1220    if "voldmanaged=" in options:
1221      continue
1222
1223    # It's a good line, parse it.
1224    length = 0
1225    slotselect = False
1226    options = options.split(",")
1227    for i in options:
1228      if i.startswith("length="):
1229        length = int(i[7:])
1230      elif i == "slotselect":
1231        slotselect = True
1232      else:
1233        # Ignore all unknown options in the unified fstab.
1234        continue
1235
1236    mount_flags = pieces[3]
1237    # Honor the SELinux context if present.
1238    context = None
1239    for i in mount_flags.split(","):
1240      if i.startswith("context="):
1241        context = i
1242
1243    mount_point = pieces[1]
1244    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
1245                               device=pieces[0], length=length, context=context,
1246                               slotselect=slotselect)
1247
1248  return d
1249
1250
1251def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
1252  """Finds the path to recovery fstab and loads its contents."""
1253  # recovery fstab is only meaningful when installing an update via recovery
1254  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
1255  if info_dict.get('ab_update') == 'true' and \
1256     info_dict.get("allow_non_ab") != "true":
1257    return None
1258
1259  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
1260  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
1261  # cases, since it may load the info_dict from an old build (e.g. when
1262  # generating incremental OTAs from that build).
1263  if info_dict.get('no_recovery') != 'true':
1264    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
1265    if not DoesInputFileContain(input_file, recovery_fstab_path):
1266      recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1267    return LoadRecoveryFSTab(
1268        read_helper, info_dict['fstab_version'], recovery_fstab_path)
1269
1270  if info_dict.get('recovery_as_boot') == 'true':
1271    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
1272    if not DoesInputFileContain(input_file, recovery_fstab_path):
1273      recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1274    return LoadRecoveryFSTab(
1275        read_helper, info_dict['fstab_version'], recovery_fstab_path)
1276
1277  return None
1278
1279
1280def DumpInfoDict(d):
1281  for k, v in sorted(d.items()):
1282    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
1283
1284
1285def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
1286  """Merges dynamic partition info variables.
1287
1288  Args:
1289    framework_dict: The dictionary of dynamic partition info variables from the
1290      partial framework target files.
1291    vendor_dict: The dictionary of dynamic partition info variables from the
1292      partial vendor target files.
1293
1294  Returns:
1295    The merged dynamic partition info dictionary.
1296  """
1297
1298  def uniq_concat(a, b):
1299    combined = set(a.split())
1300    combined.update(set(b.split()))
1301    combined = [item.strip() for item in combined if item.strip()]
1302    return " ".join(sorted(combined))
1303
1304  if (framework_dict.get("use_dynamic_partitions") !=
1305          "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
1306    raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
1307
1308  merged_dict = {"use_dynamic_partitions": "true"}
1309  # For keys-value pairs that are the same, copy to merged dict
1310  for key in vendor_dict.keys():
1311    if key in framework_dict and framework_dict[key] == vendor_dict[key]:
1312      merged_dict[key] = vendor_dict[key]
1313
1314  merged_dict["dynamic_partition_list"] = uniq_concat(
1315      framework_dict.get("dynamic_partition_list", ""),
1316      vendor_dict.get("dynamic_partition_list", ""))
1317
1318  # Super block devices are defined by the vendor dict.
1319  if "super_block_devices" in vendor_dict:
1320    merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
1321    for block_device in merged_dict["super_block_devices"].split():
1322      key = "super_%s_device_size" % block_device
1323      if key not in vendor_dict:
1324        raise ValueError("Vendor dict does not contain required key %s." % key)
1325      merged_dict[key] = vendor_dict[key]
1326
1327  # Partition groups and group sizes are defined by the vendor dict because
1328  # these values may vary for each board that uses a shared system image.
1329  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1330  for partition_group in merged_dict["super_partition_groups"].split():
1331    # Set the partition group's size using the value from the vendor dict.
1332    key = "super_%s_group_size" % partition_group
1333    if key not in vendor_dict:
1334      raise ValueError("Vendor dict does not contain required key %s." % key)
1335    merged_dict[key] = vendor_dict[key]
1336
1337    # Set the partition group's partition list using a concatenation of the
1338    # framework and vendor partition lists.
1339    key = "super_%s_partition_list" % partition_group
1340    merged_dict[key] = uniq_concat(
1341        framework_dict.get(key, ""), vendor_dict.get(key, ""))
1342  # in the case that vendor is on s build, but is taking a v3 -> v3 vabc ota, we want to fallback to v2
1343  if "vabc_cow_version" not in vendor_dict or "vabc_cow_version" not in framework_dict:
1344    merged_dict["vabc_cow_version"] = '2'
1345  else:
1346    merged_dict["vabc_cow_version"] = min(vendor_dict["vabc_cow_version"], framework_dict["vabc_cow_version"])
1347  # Various other flags should be copied from the vendor dict, if defined.
1348  for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
1349              "super_metadata_device", "super_partition_error_limit",
1350              "super_partition_size"):
1351    if key in vendor_dict.keys():
1352      merged_dict[key] = vendor_dict[key]
1353
1354  return merged_dict
1355
1356
1357def PartitionMapFromTargetFiles(target_files_dir):
1358  """Builds a map from partition -> path within an extracted target files directory."""
1359  # Keep possible_subdirs in sync with build/make/core/board_config.mk.
1360  possible_subdirs = {
1361      "system": ["SYSTEM"],
1362      "vendor": ["VENDOR", "SYSTEM/vendor"],
1363      "product": ["PRODUCT", "SYSTEM/product"],
1364      "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
1365      "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
1366      "vendor_dlkm": [
1367          "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
1368      ],
1369      "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
1370      "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"],
1371  }
1372  partition_map = {}
1373  for partition, subdirs in possible_subdirs.items():
1374    for subdir in subdirs:
1375      if os.path.exists(os.path.join(target_files_dir, subdir)):
1376        partition_map[partition] = subdir
1377        break
1378  return partition_map
1379
1380
1381def SharedUidPartitionViolations(uid_dict, partition_groups):
1382  """Checks for APK sharedUserIds that cross partition group boundaries.
1383
1384  This uses a single or merged build's shareduid_violation_modules.json
1385  output file, as generated by find_shareduid_violation.py or
1386  core/tasks/find-shareduid-violation.mk.
1387
1388  An error is defined as a sharedUserId that is found in a set of partitions
1389  that span more than one partition group.
1390
1391  Args:
1392    uid_dict: A dictionary created by using the standard json module to read a
1393      complete shareduid_violation_modules.json file.
1394    partition_groups: A list of groups, where each group is a list of
1395      partitions.
1396
1397  Returns:
1398    A list of error messages.
1399  """
1400  errors = []
1401  for uid, partitions in uid_dict.items():
1402    found_in_groups = [
1403        group for group in partition_groups
1404        if set(partitions.keys()) & set(group)
1405    ]
1406    if len(found_in_groups) > 1:
1407      errors.append(
1408          "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
1409          % (uid, ",".join(sorted(partitions.keys()))))
1410  return errors
1411
1412
1413def RunVendoredHostInitVerifier(product_out, partition_map):
1414  """Runs vendor host_init_verifier on the init rc files within selected partitions.
1415
1416  host_init_verifier searches the etc/init path within each selected partition.
1417
1418  Args:
1419    product_out: PRODUCT_OUT directory, containing partition directories.
1420    partition_map: A map of partition name -> relative path within product_out.
1421  """
1422  return RunHostInitVerifier(
1423      product_out,
1424      partition_map,
1425      tool=os.path.join(OPTIONS.vendor_otatools, 'bin', 'host_init_verifier'))
1426
1427
1428def RunHostInitVerifier(product_out, partition_map, tool="host_init_verifier"):
1429  """Runs host_init_verifier on the init rc files within partitions.
1430
1431  host_init_verifier searches the etc/init path within each partition.
1432
1433  Args:
1434    product_out: PRODUCT_OUT directory, containing partition directories.
1435    partition_map: A map of partition name -> relative path within product_out.
1436    tool: Full path to host_init_verifier or binary name
1437  """
1438  allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
1439  cmd = [tool]
1440  for partition, path in partition_map.items():
1441    if partition not in allowed_partitions:
1442      raise ExternalError("Unable to call host_init_verifier for partition %s" %
1443                          partition)
1444    cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
1445    # Add --property-contexts if the file exists on the partition.
1446    property_contexts = "%s_property_contexts" % (
1447        "plat" if partition == "system" else partition)
1448    property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
1449                                          property_contexts)
1450    if os.path.exists(property_contexts_path):
1451      cmd.append("--property-contexts=%s" % property_contexts_path)
1452    # Add the passwd file if the file exists on the partition.
1453    passwd_path = os.path.join(product_out, path, "etc", "passwd")
1454    if os.path.exists(passwd_path):
1455      cmd.extend(["-p", passwd_path])
1456  return RunAndCheckOutput(cmd)
1457
1458
1459def AppendAVBSigningArgs(cmd, partition, avb_salt=None):
1460  """Append signing arguments for avbtool."""
1461  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1462  key_path = ResolveAVBSigningPathArgs(
1463      OPTIONS.info_dict.get("avb_" + partition + "_key_path"))
1464  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1465  if key_path and algorithm:
1466    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1467  if avb_salt is None:
1468    avb_salt = OPTIONS.info_dict.get("avb_salt")
1469  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1470  if avb_salt and not partition.startswith("vbmeta"):
1471    cmd.extend(["--salt", avb_salt])
1472
1473
1474def ResolveAVBSigningPathArgs(split_args):
1475
1476  def ResolveBinaryPath(path):
1477    if os.path.exists(path):
1478      return path
1479    if OPTIONS.search_path:
1480      new_path = os.path.join(OPTIONS.search_path, path)
1481      if os.path.exists(new_path):
1482        return new_path
1483    raise ExternalError(
1484        "Failed to find {}".format(path))
1485
1486  if not split_args:
1487    return split_args
1488
1489  if isinstance(split_args, list):
1490    for index, arg in enumerate(split_args[:-1]):
1491      if arg == '--signing_helper':
1492        signing_helper_path = split_args[index + 1]
1493        split_args[index + 1] = ResolveBinaryPath(signing_helper_path)
1494        break
1495  elif isinstance(split_args, str):
1496    split_args = ResolveBinaryPath(split_args)
1497
1498  return split_args
1499
1500
1501def GetAvbPartitionArg(partition, image, info_dict=None):
1502  """Returns the VBMeta arguments for one partition.
1503
1504  It sets up the VBMeta argument by including the partition descriptor from the
1505  given 'image', or by configuring the partition as a chained partition.
1506
1507  Args:
1508    partition: The name of the partition (e.g. "system").
1509    image: The path to the partition image.
1510    info_dict: A dict returned by common.LoadInfoDict(). Will use
1511        OPTIONS.info_dict if None has been given.
1512
1513  Returns:
1514    A list of VBMeta arguments for one partition.
1515  """
1516  if info_dict is None:
1517    info_dict = OPTIONS.info_dict
1518
1519  # Check if chain partition is used.
1520  key_path = info_dict.get("avb_" + partition + "_key_path")
1521  if not key_path:
1522    return [AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image]
1523
1524  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1525  # into vbmeta.img. The recovery image will be configured on an independent
1526  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1527  # See details at
1528  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1529  if info_dict.get("ab_update") != "true" and partition == "recovery":
1530    return []
1531
1532  # Otherwise chain the partition into vbmeta.
1533  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1534  return [AVB_ARG_NAME_CHAIN_PARTITION, chained_partition_arg]
1535
1536
1537def GetAvbPartitionsArg(partitions,
1538                        resolve_rollback_index_location_conflict=False,
1539                        info_dict=None):
1540  """Returns the VBMeta arguments for all AVB partitions.
1541
1542  It sets up the VBMeta argument by calling GetAvbPartitionArg of all
1543  partitions.
1544
1545  Args:
1546    partitions: A dict of all AVB partitions.
1547    resolve_rollback_index_location_conflict: If true, resolve conflicting avb
1548        rollback index locations by assigning the smallest unused value.
1549    info_dict: A dict returned by common.LoadInfoDict().
1550
1551  Returns:
1552    A list of VBMeta arguments for all partitions.
1553  """
1554  # An AVB partition will be linked into a vbmeta partition by either
1555  # AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG or AVB_ARG_NAME_CHAIN_PARTITION, there
1556  # should be no other cases.
1557  valid_args = {
1558      AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: [],
1559      AVB_ARG_NAME_CHAIN_PARTITION: []
1560  }
1561
1562  for partition, path in sorted(partitions.items()):
1563    avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict)
1564    if not avb_partition_arg:
1565      continue
1566    arg_name, arg_value = avb_partition_arg
1567    assert arg_name in valid_args
1568    valid_args[arg_name].append(arg_value)
1569
1570  # Copy the arguments for non-chained AVB partitions directly without
1571  # intervention.
1572  avb_args = []
1573  for image in valid_args[AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG]:
1574    avb_args.extend([AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image])
1575
1576  # Handle chained AVB partitions. The rollback index location might be
1577  # adjusted if two partitions use the same value. This may happen when mixing
1578  # a shared system image with other vendor images.
1579  used_index_loc = set()
1580  for chained_partition_arg in valid_args[AVB_ARG_NAME_CHAIN_PARTITION]:
1581    if resolve_rollback_index_location_conflict:
1582      while chained_partition_arg.rollback_index_location in used_index_loc:
1583        chained_partition_arg.rollback_index_location += 1
1584
1585    used_index_loc.add(chained_partition_arg.rollback_index_location)
1586    avb_args.extend([AVB_ARG_NAME_CHAIN_PARTITION,
1587                     chained_partition_arg.to_string()])
1588
1589  return avb_args
1590
1591
1592def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1593  """Constructs and returns the arg to build or verify a chained partition.
1594
1595  Args:
1596    partition: The partition name.
1597    info_dict: The info dict to look up the key info and rollback index
1598        location.
1599    key: The key to be used for building or verifying the partition. Defaults to
1600        the key listed in info_dict.
1601
1602  Returns:
1603    An AvbChainedPartitionArg object with rollback_index_location and
1604    pubkey_path that can be used to build or verify vbmeta image.
1605  """
1606  if key is None:
1607    key = info_dict["avb_" + partition + "_key_path"]
1608  key = ResolveAVBSigningPathArgs(key)
1609  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1610  rollback_index_location = info_dict[
1611      "avb_" + partition + "_rollback_index_location"]
1612  return AvbChainedPartitionArg(
1613      partition=partition,
1614      rollback_index_location=int(rollback_index_location),
1615      pubkey_path=pubkey_path)
1616
1617
1618def BuildVBMeta(image_path, partitions, name, needed_partitions,
1619                resolve_rollback_index_location_conflict=False):
1620  """Creates a VBMeta image.
1621
1622  It generates the requested VBMeta image. The requested image could be for
1623  top-level or chained VBMeta image, which is determined based on the name.
1624
1625  Args:
1626    image_path: The output path for the new VBMeta image.
1627    partitions: A dict that's keyed by partition names with image paths as
1628        values. Only valid partition names are accepted, as partitions listed
1629        in common.AVB_PARTITIONS and custom partitions listed in
1630        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1631    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1632    needed_partitions: Partitions whose descriptors should be included into the
1633        generated VBMeta image.
1634    resolve_rollback_index_location_conflict: If true, resolve conflicting avb
1635        rollback index locations by assigning the smallest unused value.
1636
1637  Raises:
1638    AssertionError: On invalid input args.
1639  """
1640  avbtool = OPTIONS.info_dict["avb_avbtool"]
1641  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1642  AppendAVBSigningArgs(cmd, name)
1643
1644  custom_partitions = OPTIONS.info_dict.get(
1645      "avb_custom_images_partition_list", "").strip().split()
1646  custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get(
1647      "avb_custom_vbmeta_images_partition_list", "").strip().split()]
1648
1649  avb_partitions = {}
1650  for partition, path in sorted(partitions.items()):
1651    if partition not in needed_partitions:
1652      continue
1653    assert (partition in AVB_PARTITIONS or
1654            partition in AVB_VBMETA_PARTITIONS or
1655            partition in custom_avb_partitions or
1656            partition in custom_partitions), \
1657        'Unknown partition: {}'.format(partition)
1658    assert os.path.exists(path), \
1659        'Failed to find {} for {}'.format(path, partition)
1660    avb_partitions[partition] = path
1661  cmd.extend(GetAvbPartitionsArg(avb_partitions,
1662                                 resolve_rollback_index_location_conflict))
1663
1664  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1665  if args and args.strip():
1666    split_args = shlex.split(args)
1667    for index, arg in enumerate(split_args[:-1]):
1668      # Check that the image file exists. Some images might be defined
1669      # as a path relative to source tree, which may not be available at the
1670      # same location when running this script (we have the input target_files
1671      # zip only). For such cases, we additionally scan other locations (e.g.
1672      # IMAGES/, RADIO/, etc) before bailing out.
1673      if arg == AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG:
1674        chained_image = split_args[index + 1]
1675        if os.path.exists(chained_image):
1676          continue
1677        found = False
1678        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1679          alt_path = os.path.join(
1680              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1681          if os.path.exists(alt_path):
1682            split_args[index + 1] = alt_path
1683            found = True
1684            break
1685        assert found, 'Failed to find {}'.format(chained_image)
1686
1687    split_args = ResolveAVBSigningPathArgs(split_args)
1688    cmd.extend(split_args)
1689
1690  RunAndCheckOutput(cmd)
1691
1692
1693def _MakeRamdisk(sourcedir, fs_config_file=None,
1694                 dev_node_file=None,
1695                 ramdisk_format=RamdiskFormat.GZ):
1696  ramdisk_img = tempfile.NamedTemporaryFile()
1697
1698  cmd = ["mkbootfs"]
1699
1700  if fs_config_file and os.access(fs_config_file, os.F_OK):
1701    cmd.extend(["-f", fs_config_file])
1702
1703  if dev_node_file and os.access(dev_node_file, os.F_OK):
1704    cmd.extend(["-n", dev_node_file])
1705
1706  cmd.append(os.path.join(sourcedir, "RAMDISK"))
1707
1708  p1 = Run(cmd, stdout=subprocess.PIPE)
1709  if ramdisk_format == RamdiskFormat.LZ4:
1710    p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
1711             stdout=ramdisk_img.file.fileno())
1712  elif ramdisk_format == RamdiskFormat.GZ:
1713    p2 = Run(["gzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1714  else:
1715    raise ValueError("Only support lz4 or gzip ramdisk format.")
1716
1717  p2.wait()
1718  p1.wait()
1719  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1720  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1721
1722  return ramdisk_img
1723
1724
1725def _BuildBootableImage(image_name, sourcedir, fs_config_file,
1726                        dev_node_file=None, info_dict=None,
1727                        has_ramdisk=False, two_step_image=False):
1728  """Build a bootable image from the specified sourcedir.
1729
1730  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1731  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1732  we are building a two-step special image (i.e. building a recovery image to
1733  be loaded into /boot in two-step OTAs).
1734
1735  Return the image data, or None if sourcedir does not appear to contains files
1736  for building the requested image.
1737  """
1738
1739  if info_dict is None:
1740    info_dict = OPTIONS.info_dict
1741
1742  # "boot" or "recovery", without extension.
1743  partition_name = os.path.basename(sourcedir).lower()
1744
1745  kernel = None
1746  if partition_name == "recovery":
1747    if info_dict.get("exclude_kernel_from_recovery_image") == "true":
1748      logger.info("Excluded kernel binary from recovery image.")
1749    else:
1750      kernel = "kernel"
1751  elif partition_name == "init_boot":
1752    pass
1753  else:
1754    kernel = image_name.replace("boot", "kernel")
1755    kernel = kernel.replace(".img", "")
1756  if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1757    return None
1758
1759  kernel_path = os.path.join(sourcedir, kernel) if kernel else None
1760
1761  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1762    return None
1763
1764  img = tempfile.NamedTemporaryFile()
1765
1766  if has_ramdisk:
1767    ramdisk_format = GetRamdiskFormat(info_dict)
1768    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, dev_node_file,
1769                               ramdisk_format=ramdisk_format)
1770
1771  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1772  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1773
1774  cmd = [mkbootimg]
1775  if kernel_path is not None:
1776    cmd.extend(["--kernel", kernel_path])
1777
1778  fn = os.path.join(sourcedir, "second")
1779  if os.access(fn, os.F_OK):
1780    cmd.append("--second")
1781    cmd.append(fn)
1782
1783  fn = os.path.join(sourcedir, "dtb")
1784  if os.access(fn, os.F_OK):
1785    cmd.append("--dtb")
1786    cmd.append(fn)
1787
1788  fn = os.path.join(sourcedir, "cmdline")
1789  if os.access(fn, os.F_OK):
1790    cmd.append("--cmdline")
1791    cmd.append(open(fn).read().rstrip("\n"))
1792
1793  fn = os.path.join(sourcedir, "base")
1794  if os.access(fn, os.F_OK):
1795    cmd.append("--base")
1796    cmd.append(open(fn).read().rstrip("\n"))
1797
1798  fn = os.path.join(sourcedir, "pagesize")
1799  if os.access(fn, os.F_OK):
1800    cmd.append("--pagesize")
1801    cmd.append(open(fn).read().rstrip("\n"))
1802
1803  if partition_name == "recovery":
1804    args = info_dict.get("recovery_mkbootimg_args")
1805    if not args:
1806      # Fall back to "mkbootimg_args" for recovery image
1807      # in case "recovery_mkbootimg_args" is not set.
1808      args = info_dict.get("mkbootimg_args")
1809  elif partition_name == "init_boot":
1810    args = info_dict.get("mkbootimg_init_args")
1811  else:
1812    args = info_dict.get("mkbootimg_args")
1813  if args and args.strip():
1814    cmd.extend(shlex.split(args))
1815
1816  args = info_dict.get("mkbootimg_version_args")
1817  if args and args.strip():
1818    cmd.extend(shlex.split(args))
1819
1820  if has_ramdisk:
1821    cmd.extend(["--ramdisk", ramdisk_img.name])
1822
1823  cmd.extend(["--output", img.name])
1824
1825  if partition_name == "recovery":
1826    if info_dict.get("include_recovery_dtbo") == "true":
1827      fn = os.path.join(sourcedir, "recovery_dtbo")
1828      cmd.extend(["--recovery_dtbo", fn])
1829    if info_dict.get("include_recovery_acpio") == "true":
1830      fn = os.path.join(sourcedir, "recovery_acpio")
1831      cmd.extend(["--recovery_acpio", fn])
1832
1833  RunAndCheckOutput(cmd)
1834
1835  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1836  if info_dict.get("avb_enable") == "true":
1837    avbtool = info_dict["avb_avbtool"]
1838    if partition_name == "recovery":
1839      part_size = info_dict["recovery_size"]
1840    else:
1841      part_size = info_dict[image_name.replace(".img", "_size")]
1842    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1843           "--partition_size", str(part_size), "--partition_name",
1844           partition_name]
1845    salt = None
1846    if kernel_path is not None:
1847      with open(kernel_path, "rb") as fp:
1848        salt = sha256(fp.read()).hexdigest()
1849    AppendAVBSigningArgs(cmd, partition_name, salt)
1850    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1851    if args and args.strip():
1852      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1853      cmd.extend(split_args)
1854    RunAndCheckOutput(cmd)
1855
1856  img.seek(os.SEEK_SET, 0)
1857  data = img.read()
1858
1859  if has_ramdisk:
1860    ramdisk_img.close()
1861  img.close()
1862
1863  return data
1864
1865
1866def _SignBootableImage(image_path, prebuilt_name, partition_name,
1867                       info_dict=None):
1868  """Performs AVB signing for a prebuilt boot.img.
1869
1870  Args:
1871    image_path: The full path of the image, e.g., /path/to/boot.img.
1872    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
1873        boot-5.10.img, recovery.img or init_boot.img.
1874    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1875    info_dict: The information dict read from misc_info.txt.
1876  """
1877  if info_dict is None:
1878    info_dict = OPTIONS.info_dict
1879
1880  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1881  if info_dict.get("avb_enable") == "true":
1882    avbtool = info_dict["avb_avbtool"]
1883    if partition_name == "recovery":
1884      part_size = info_dict["recovery_size"]
1885    else:
1886      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
1887
1888    cmd = [avbtool, "add_hash_footer", "--image", image_path,
1889           "--partition_size", str(part_size), "--partition_name",
1890           partition_name]
1891    # Use sha256 of the kernel as salt for reproducible builds
1892    with tempfile.TemporaryDirectory() as tmpdir:
1893      RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir])
1894      for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]:
1895        path = os.path.join(tmpdir, filename)
1896        if os.path.exists(path) and os.path.getsize(path):
1897          print("Using {} as salt for avb footer of {}".format(
1898              filename, partition_name))
1899          with open(path, "rb") as fp:
1900            salt = sha256(fp.read()).hexdigest()
1901            break
1902    AppendAVBSigningArgs(cmd, partition_name, salt)
1903    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1904    if args and args.strip():
1905      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1906      cmd.extend(split_args)
1907    RunAndCheckOutput(cmd)
1908
1909
1910def HasRamdisk(partition_name, info_dict=None):
1911  """Returns true/false to see if a bootable image should have a ramdisk.
1912
1913  Args:
1914    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1915    info_dict: The information dict read from misc_info.txt.
1916  """
1917  if info_dict is None:
1918    info_dict = OPTIONS.info_dict
1919
1920  if partition_name != "boot":
1921    return True  # init_boot.img or recovery.img has a ramdisk.
1922
1923  if info_dict.get("recovery_as_boot") == "true":
1924    return True  # the recovery-as-boot boot.img has a RECOVERY ramdisk.
1925
1926  if info_dict.get("gki_boot_image_without_ramdisk") == "true":
1927    return False  # A GKI boot.img has no ramdisk since Android-13.
1928
1929  if info_dict.get("init_boot") == "true":
1930    # The ramdisk is moved to the init_boot.img, so there is NO
1931    # ramdisk in the boot.img or boot-<kernel version>.img.
1932    return False
1933
1934  return True
1935
1936
1937def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1938                     info_dict=None, two_step_image=False,
1939                     dev_nodes=False):
1940  """Return a File object with the desired bootable image.
1941
1942  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1943  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1944  the source files in 'unpack_dir'/'tree_subdir'."""
1945
1946  if info_dict is None:
1947    info_dict = OPTIONS.info_dict
1948
1949  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1950  if os.path.exists(prebuilt_path):
1951    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1952    return File.FromLocalFile(name, prebuilt_path)
1953
1954  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1955  if os.path.exists(prebuilt_path):
1956    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1957    return File.FromLocalFile(name, prebuilt_path)
1958
1959  partition_name = tree_subdir.lower()
1960  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
1961  if os.path.exists(prebuilt_path):
1962    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
1963    signed_img = MakeTempFile()
1964    shutil.copy(prebuilt_path, signed_img)
1965    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
1966    return File.FromLocalFile(name, signed_img)
1967
1968  logger.info("building image from target_files %s...", tree_subdir)
1969
1970  has_ramdisk = HasRamdisk(partition_name, info_dict)
1971
1972  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1973  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1974                             os.path.join(unpack_dir, fs_config),
1975                             os.path.join(unpack_dir, 'META/ramdisk_node_list')
1976                             if dev_nodes else None,
1977                             info_dict, has_ramdisk, two_step_image)
1978  if data:
1979    return File(name, data)
1980  return None
1981
1982
1983def _BuildVendorBootImage(sourcedir, fs_config_file, partition_name, info_dict=None):
1984  """Build a vendor boot image from the specified sourcedir.
1985
1986  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1987  turn them into a vendor boot image.
1988
1989  Return the image data, or None if sourcedir does not appear to contains files
1990  for building the requested image.
1991  """
1992
1993  if info_dict is None:
1994    info_dict = OPTIONS.info_dict
1995
1996  img = tempfile.NamedTemporaryFile()
1997
1998  ramdisk_format = GetRamdiskFormat(info_dict)
1999  ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file=fs_config_file, ramdisk_format=ramdisk_format)
2000
2001  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
2002  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
2003
2004  cmd = [mkbootimg]
2005
2006  fn = os.path.join(sourcedir, "dtb")
2007  if os.access(fn, os.F_OK):
2008    has_vendor_kernel_boot = (info_dict.get(
2009        "vendor_kernel_boot", "").lower() == "true")
2010
2011    # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot.
2012    # Otherwise pack dtb into vendor_boot.
2013    if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot":
2014      cmd.append("--dtb")
2015      cmd.append(fn)
2016
2017  fn = os.path.join(sourcedir, "vendor_cmdline")
2018  if os.access(fn, os.F_OK):
2019    cmd.append("--vendor_cmdline")
2020    cmd.append(open(fn).read().rstrip("\n"))
2021
2022  fn = os.path.join(sourcedir, "base")
2023  if os.access(fn, os.F_OK):
2024    cmd.append("--base")
2025    cmd.append(open(fn).read().rstrip("\n"))
2026
2027  fn = os.path.join(sourcedir, "pagesize")
2028  if os.access(fn, os.F_OK):
2029    cmd.append("--pagesize")
2030    cmd.append(open(fn).read().rstrip("\n"))
2031
2032  args = info_dict.get("mkbootimg_args")
2033  if args and args.strip():
2034    cmd.extend(shlex.split(args))
2035
2036  args = info_dict.get("mkbootimg_version_args")
2037  if args and args.strip():
2038    cmd.extend(shlex.split(args))
2039
2040  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
2041  cmd.extend(["--vendor_boot", img.name])
2042
2043  fn = os.path.join(sourcedir, "vendor_bootconfig")
2044  if os.access(fn, os.F_OK):
2045    cmd.append("--vendor_bootconfig")
2046    cmd.append(fn)
2047
2048  ramdisk_fragment_imgs = []
2049  fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
2050  if os.access(fn, os.F_OK):
2051    ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
2052    for ramdisk_fragment in ramdisk_fragments:
2053      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2054                        ramdisk_fragment, "mkbootimg_args")
2055      cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
2056      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2057                        ramdisk_fragment, "prebuilt_ramdisk")
2058      # Use prebuilt image if found, else create ramdisk from supplied files.
2059      if os.access(fn, os.F_OK):
2060        ramdisk_fragment_pathname = fn
2061      else:
2062        ramdisk_fragment_root = os.path.join(
2063            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
2064        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
2065                                            ramdisk_format=ramdisk_format)
2066        ramdisk_fragment_imgs.append(ramdisk_fragment_img)
2067        ramdisk_fragment_pathname = ramdisk_fragment_img.name
2068      cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
2069
2070  RunAndCheckOutput(cmd)
2071
2072  # AVB: if enabled, calculate and add hash.
2073  if info_dict.get("avb_enable") == "true":
2074    avbtool = info_dict["avb_avbtool"]
2075    part_size = info_dict[f'{partition_name}_size']
2076    cmd = [avbtool, "add_hash_footer", "--image", img.name,
2077           "--partition_size", str(part_size), "--partition_name", partition_name]
2078    AppendAVBSigningArgs(cmd, partition_name)
2079    args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args')
2080    if args and args.strip():
2081      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
2082      cmd.extend(split_args)
2083    RunAndCheckOutput(cmd)
2084
2085  img.seek(os.SEEK_SET, 0)
2086  data = img.read()
2087
2088  for f in ramdisk_fragment_imgs:
2089    f.close()
2090  ramdisk_img.close()
2091  img.close()
2092
2093  return data
2094
2095
2096def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2097                       info_dict=None):
2098  """Return a File object with the desired vendor boot image.
2099
2100  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2101  the source files in 'unpack_dir'/'tree_subdir'."""
2102
2103  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2104  if os.path.exists(prebuilt_path):
2105    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2106    return File.FromLocalFile(name, prebuilt_path)
2107
2108  logger.info("building image from target_files %s...", tree_subdir)
2109
2110  if info_dict is None:
2111    info_dict = OPTIONS.info_dict
2112
2113  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
2114  data = _BuildVendorBootImage(
2115      os.path.join(unpack_dir, tree_subdir), os.path.join(unpack_dir, fs_config), "vendor_boot", info_dict)
2116  if data:
2117    return File(name, data)
2118  return None
2119
2120
2121def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2122                             info_dict=None):
2123  """Return a File object with the desired vendor kernel boot image.
2124
2125  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2126  the source files in 'unpack_dir'/'tree_subdir'."""
2127
2128  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2129  if os.path.exists(prebuilt_path):
2130    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2131    return File.FromLocalFile(name, prebuilt_path)
2132
2133  logger.info("building image from target_files %s...", tree_subdir)
2134
2135  if info_dict is None:
2136    info_dict = OPTIONS.info_dict
2137
2138  data = _BuildVendorBootImage(
2139      os.path.join(unpack_dir, tree_subdir), None, "vendor_kernel_boot", info_dict)
2140  if data:
2141    return File(name, data)
2142  return None
2143
2144
2145def Gunzip(in_filename, out_filename):
2146  """Gunzips the given gzip compressed file to a given output file."""
2147  with gzip.open(in_filename, "rb") as in_file, \
2148          open(out_filename, "wb") as out_file:
2149    shutil.copyfileobj(in_file, out_file)
2150
2151
2152def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str):
2153  # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838
2154  # higher bits of |external_attr| are unix file permission and types
2155  unix_filetype = info.external_attr >> 16
2156  file_perm = unix_filetype & 0o777
2157
2158  def CheckMask(a, mask):
2159    return (a & mask) == mask
2160
2161  def IsSymlink(a):
2162    return CheckMask(a, stat.S_IFLNK)
2163
2164  def IsDir(a):
2165    return CheckMask(a, stat.S_IFDIR)
2166  # python3.11 zipfile implementation doesn't handle symlink correctly
2167  if not IsSymlink(unix_filetype):
2168    target = input_zip.extract(info, dirname)
2169    # We want to ensure that the file is at least read/writable by owner and readable by all users
2170    if IsDir(unix_filetype):
2171      os.chmod(target, file_perm | 0o755)
2172    else:
2173      os.chmod(target, file_perm | 0o644)
2174    return target
2175  if dirname is None:
2176    dirname = os.getcwd()
2177  target = os.path.join(dirname, info.filename)
2178  os.makedirs(os.path.dirname(target), exist_ok=True)
2179  if os.path.exists(target):
2180    os.unlink(target)
2181  os.symlink(input_zip.read(info).decode(), target)
2182  return target
2183
2184
2185def UnzipToDir(filename, dirname, patterns=None):
2186  """Unzips the archive to the given directory.
2187
2188  Args:
2189    filename: The name of the zip file to unzip.
2190    dirname: Where the unziped files will land.
2191    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2192        archvie. Non-matching patterns will be filtered out. If there's no match
2193        after the filtering, no file will be unzipped.
2194  """
2195  with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip:
2196    # Filter out non-matching patterns. unzip will complain otherwise.
2197    entries = input_zip.infolist()
2198    # b/283033491
2199    # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header
2200    # In zip64 mode, central directory record's header_offset field might be
2201    # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra
2202    # fields will contain an 8 byte little endian integer at offset 20
2203    # to indicate the actual local header offset.
2204    # As of python3.11, python does not handle zip64 central directories
2205    # correctly, so we will manually do the parsing here.
2206
2207    # ZIP64 central directory extra field has two required fields:
2208    # 2 bytes header ID and 2 bytes size field. Thes two require fields have
2209    # a total size of 4 bytes. Then it has three other 8 bytes field, followed
2210    # by a 4 byte disk number field. The last disk number field is not required
2211    # to be present, but if it is present, the total size of extra field will be
2212    # divisible by 8(because 2+2+4+8*n is always going to be multiple of 8)
2213    # Most extra fields are optional, but when they appear, their must appear
2214    # in the order defined by zip64 spec. Since file header offset is the 2nd
2215    # to last field in zip64 spec, it will only be at last 8 bytes or last 12-4
2216    # bytes, depending on whether disk number is present.
2217    for entry in entries:
2218      if entry.header_offset == 0xFFFFFFFF:
2219        if len(entry.extra) % 8 == 0:
2220          entry.header_offset = int.from_bytes(entry.extra[-12:-4], "little")
2221        else:
2222          entry.header_offset = int.from_bytes(entry.extra[-8:], "little")
2223    if patterns is not None:
2224      filtered = [info for info in entries if any(
2225          [fnmatch.fnmatch(info.filename, p) for p in patterns])]
2226
2227      # There isn't any matching files. Don't unzip anything.
2228      if not filtered:
2229        return
2230      for info in filtered:
2231        UnzipSingleFile(input_zip, info, dirname)
2232    else:
2233      for info in entries:
2234        UnzipSingleFile(input_zip, info, dirname)
2235
2236
2237def UnzipTemp(filename, patterns=None):
2238  """Unzips the given archive into a temporary directory and returns the name.
2239
2240  Args:
2241    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
2242    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
2243
2244    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2245    archvie.
2246
2247  Returns:
2248    The name of the temporary directory.
2249  """
2250
2251  tmp = MakeTempDir(prefix="targetfiles-")
2252  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
2253  if m:
2254    UnzipToDir(m.group(1), tmp, patterns)
2255    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns)
2256    filename = m.group(1)
2257  else:
2258    UnzipToDir(filename, tmp, patterns)
2259
2260  return tmp
2261
2262
2263def GetUserImage(which, tmpdir, input_zip,
2264                 info_dict=None,
2265                 allow_shared_blocks=None,
2266                 reset_file_map=False):
2267  """Returns an Image object suitable for passing to BlockImageDiff.
2268
2269  This function loads the specified image from the given path. If the specified
2270  image is sparse, it also performs additional processing for OTA purpose. For
2271  example, it always adds block 0 to clobbered blocks list. It also detects
2272  files that cannot be reconstructed from the block list, for whom we should
2273  avoid applying imgdiff.
2274
2275  Args:
2276    which: The partition name.
2277    tmpdir: The directory that contains the prebuilt image and block map file.
2278    input_zip: The target-files ZIP archive.
2279    info_dict: The dict to be looked up for relevant info.
2280    allow_shared_blocks: If image is sparse, whether having shared blocks is
2281        allowed. If none, it is looked up from info_dict.
2282    reset_file_map: If true and image is sparse, reset file map before returning
2283        the image.
2284  Returns:
2285    A Image object. If it is a sparse image and reset_file_map is False, the
2286    image will have file_map info loaded.
2287  """
2288  if info_dict is None:
2289    info_dict = LoadInfoDict(input_zip)
2290
2291  is_sparse = IsSparseImage(os.path.join(tmpdir, "IMAGES", which + ".img"))
2292
2293  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
2294  # shared blocks (i.e. some blocks will show up in multiple files' block
2295  # list). We can only allocate such shared blocks to the first "owner", and
2296  # disable imgdiff for all later occurrences.
2297  if allow_shared_blocks is None:
2298    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
2299
2300  if is_sparse:
2301    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks)
2302    if reset_file_map:
2303      img.ResetFileMap()
2304    return img
2305  return GetNonSparseImage(which, tmpdir)
2306
2307
2308def GetNonSparseImage(which, tmpdir):
2309  """Returns a Image object suitable for passing to BlockImageDiff.
2310
2311  This function loads the specified non-sparse image from the given path.
2312
2313  Args:
2314    which: The partition name.
2315    tmpdir: The directory that contains the prebuilt image and block map file.
2316  Returns:
2317    A Image object.
2318  """
2319  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2320  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2321
2322  # The image and map files must have been created prior to calling
2323  # ota_from_target_files.py (since LMP).
2324  assert os.path.exists(path) and os.path.exists(mappath)
2325
2326  return images.FileImage(path)
2327
2328
2329def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
2330  """Returns a SparseImage object suitable for passing to BlockImageDiff.
2331
2332  This function loads the specified sparse image from the given path, and
2333  performs additional processing for OTA purpose. For example, it always adds
2334  block 0 to clobbered blocks list. It also detects files that cannot be
2335  reconstructed from the block list, for whom we should avoid applying imgdiff.
2336
2337  Args:
2338    which: The partition name, e.g. "system", "vendor".
2339    tmpdir: The directory that contains the prebuilt image and block map file.
2340    input_zip: The target-files ZIP archive.
2341    allow_shared_blocks: Whether having shared blocks is allowed.
2342  Returns:
2343    A SparseImage object, with file_map info loaded.
2344  """
2345  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2346  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2347
2348  # The image and map files must have been created prior to calling
2349  # ota_from_target_files.py (since LMP).
2350  assert os.path.exists(path) and os.path.exists(mappath)
2351
2352  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
2353  # it to clobbered_blocks so that it will be written to the target
2354  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
2355  clobbered_blocks = "0"
2356
2357  image = sparse_img.SparseImage(
2358      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks)
2359
2360  # block.map may contain less blocks, because mke2fs may skip allocating blocks
2361  # if they contain all zeros. We can't reconstruct such a file from its block
2362  # list. Tag such entries accordingly. (Bug: 65213616)
2363  for entry in image.file_map:
2364    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
2365    if not entry.startswith('/'):
2366      continue
2367
2368    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
2369    # filename listed in system.map may contain an additional leading slash
2370    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
2371    # results.
2372    # And handle another special case, where files not under /system
2373    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
2374    arcname = entry.lstrip('/')
2375    if which == 'system' and not arcname.startswith('system'):
2376      arcname = 'ROOT/' + arcname
2377    else:
2378      arcname = arcname.replace(which, which.upper(), 1)
2379
2380    assert arcname in input_zip.namelist(), \
2381        "Failed to find the ZIP entry for {}".format(entry)
2382
2383    info = input_zip.getinfo(arcname)
2384    ranges = image.file_map[entry]
2385
2386    # If a RangeSet has been tagged as using shared blocks while loading the
2387    # image, check the original block list to determine its completeness. Note
2388    # that the 'incomplete' flag would be tagged to the original RangeSet only.
2389    if ranges.extra.get('uses_shared_blocks'):
2390      ranges = ranges.extra['uses_shared_blocks']
2391
2392    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
2393      ranges.extra['incomplete'] = True
2394
2395  return image
2396
2397
2398def GetKeyPasswords(keylist):
2399  """Given a list of keys, prompt the user to enter passwords for
2400  those which require them.  Return a {key: password} dict.  password
2401  will be None if the key has no password."""
2402
2403  no_passwords = []
2404  need_passwords = []
2405  key_passwords = {}
2406  devnull = open("/dev/null", "w+b")
2407
2408  # sorted() can't compare strings to None, so convert Nones to strings
2409  for k in sorted(keylist, key=lambda x: x if x is not None else ""):
2410    # We don't need a password for things that aren't really keys.
2411    if k in SPECIAL_CERT_STRINGS or k is None:
2412      no_passwords.append(k)
2413      continue
2414
2415    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2416             "-inform", "DER", "-nocrypt"],
2417            stdin=devnull.fileno(),
2418            stdout=devnull.fileno(),
2419            stderr=subprocess.STDOUT)
2420    p.communicate()
2421    if p.returncode == 0:
2422      # Definitely an unencrypted key.
2423      no_passwords.append(k)
2424    else:
2425      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2426               "-inform", "DER", "-passin", "pass:"],
2427              stdin=devnull.fileno(),
2428              stdout=devnull.fileno(),
2429              stderr=subprocess.PIPE)
2430      _, stderr = p.communicate()
2431      if p.returncode == 0:
2432        # Encrypted key with empty string as password.
2433        key_passwords[k] = ''
2434      elif stderr.startswith('Error decrypting key'):
2435        # Definitely encrypted key.
2436        # It would have said "Error reading key" if it didn't parse correctly.
2437        need_passwords.append(k)
2438      else:
2439        # Potentially, a type of key that openssl doesn't understand.
2440        # We'll let the routines in signapk.jar handle it.
2441        no_passwords.append(k)
2442  devnull.close()
2443
2444  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
2445  key_passwords.update(dict.fromkeys(no_passwords))
2446  return key_passwords
2447
2448
2449def GetMinSdkVersion(apk_name):
2450  """Gets the minSdkVersion declared in the APK.
2451
2452  It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given
2453  APK file. This can be both a decimal number (API Level) or a codename.
2454
2455  Args:
2456    apk_name: The APK filename.
2457
2458  Returns:
2459    The parsed SDK version string.
2460
2461  Raises:
2462    ExternalError: On failing to obtain the min SDK version.
2463  """
2464  proc = Run(
2465      [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE,
2466      stderr=subprocess.PIPE)
2467  stdoutdata, stderrdata = proc.communicate()
2468  if proc.returncode != 0:
2469    raise ExternalError(
2470        "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format(
2471            apk_name, proc.returncode, stdoutdata, stderrdata))
2472
2473  is_split_apk = False
2474  for line in stdoutdata.split("\n"):
2475    # See b/353837347 , split APKs do not have sdk version defined,
2476    # so we default to 21 as split APKs are only supported since SDK
2477    # 21.
2478    if (re.search(r"split=[\"'].*[\"']", line)):
2479      is_split_apk = True
2480    # Due to ag/24161708, looking for lines such as minSdkVersion:'23',minSdkVersion:'M'
2481    # or sdkVersion:'23', sdkVersion:'M'.
2482    m = re.match(r'(?:minSdkVersion|sdkVersion):\'([^\']*)\'', line)
2483    if m:
2484      return m.group(1)
2485  if is_split_apk:
2486    logger.info("%s is a split APK, it does not have minimum SDK version"
2487                " defined. Defaulting to 21 because split APK isn't supported"
2488                " before that.", apk_name)
2489    return 21
2490  raise ExternalError("No minSdkVersion returned by aapt2 for apk: {}".format(apk_name))
2491
2492
2493def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
2494  """Returns the minSdkVersion declared in the APK as a number (API Level).
2495
2496  If minSdkVersion is set to a codename, it is translated to a number using the
2497  provided map.
2498
2499  Args:
2500    apk_name: The APK filename.
2501
2502  Returns:
2503    The parsed SDK version number.
2504
2505  Raises:
2506    ExternalError: On failing to get the min SDK version number.
2507  """
2508  version = GetMinSdkVersion(apk_name)
2509  try:
2510    return int(version)
2511  except ValueError:
2512    # Not a decimal number.
2513    #
2514    # It could be either a straight codename, e.g.
2515    #     UpsideDownCake
2516    #
2517    # Or a codename with API fingerprint SHA, e.g.
2518    #     UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e
2519    #
2520    # Extract the codename and try and map it to a version number.
2521    split = version.split(".")
2522    codename = split[0]
2523    if codename in codename_to_api_level_map:
2524      return codename_to_api_level_map[codename]
2525    raise ExternalError(
2526        "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format(
2527            codename, version, codename_to_api_level_map))
2528
2529
2530def SignFile(input_name, output_name, key, password, min_api_level=None,
2531             codename_to_api_level_map=None, whole_file=False,
2532             extra_signapk_args=None):
2533  """Sign the input_name zip/jar/apk, producing output_name.  Use the
2534  given key and password (the latter may be None if the key does not
2535  have a password.
2536
2537  If whole_file is true, use the "-w" option to SignApk to embed a
2538  signature that covers the whole file in the archive comment of the
2539  zip file.
2540
2541  min_api_level is the API Level (int) of the oldest platform this file may end
2542  up on. If not specified for an APK, the API Level is obtained by interpreting
2543  the minSdkVersion attribute of the APK's AndroidManifest.xml.
2544
2545  codename_to_api_level_map is needed to translate the codename which may be
2546  encountered as the APK's minSdkVersion.
2547
2548  Caller may optionally specify extra args to be passed to SignApk, which
2549  defaults to OPTIONS.extra_signapk_args if omitted.
2550  """
2551  if codename_to_api_level_map is None:
2552    codename_to_api_level_map = {}
2553  if extra_signapk_args is None:
2554    extra_signapk_args = OPTIONS.extra_signapk_args
2555
2556  java_library_path = os.path.join(
2557      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2558
2559  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2560         ["-Djava.library.path=" + java_library_path,
2561          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
2562         extra_signapk_args)
2563  if whole_file:
2564    cmd.append("-w")
2565
2566  min_sdk_version = min_api_level
2567  if min_sdk_version is None:
2568    if not whole_file:
2569      min_sdk_version = GetMinSdkVersionInt(
2570          input_name, codename_to_api_level_map)
2571  if min_sdk_version is not None:
2572    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
2573
2574  cmd.extend([key + OPTIONS.public_key_suffix,
2575              key + OPTIONS.private_key_suffix,
2576              input_name, output_name])
2577
2578  proc = Run(cmd, stdin=subprocess.PIPE)
2579  if password is not None:
2580    password += "\n"
2581  stdoutdata, _ = proc.communicate(password)
2582  if proc.returncode != 0:
2583    raise ExternalError(
2584        "Failed to run {}: return code {}:\n{}".format(cmd,
2585                                                       proc.returncode, stdoutdata))
2586
2587
2588def CheckSize(data, target, info_dict):
2589  """Checks the data string passed against the max size limit.
2590
2591  For non-AVB images, raise exception if the data is too big. Print a warning
2592  if the data is nearing the maximum size.
2593
2594  For AVB images, the actual image size should be identical to the limit.
2595
2596  Args:
2597    data: A string that contains all the data for the partition.
2598    target: The partition name. The ".img" suffix is optional.
2599    info_dict: The dict to be looked up for relevant info.
2600  """
2601  if target.endswith(".img"):
2602    target = target[:-4]
2603  mount_point = "/" + target
2604
2605  fs_type = None
2606  limit = None
2607  if info_dict["fstab"]:
2608    if mount_point == "/userdata":
2609      mount_point = "/data"
2610    p = info_dict["fstab"][mount_point]
2611    fs_type = p.fs_type
2612    device = p.device
2613    if "/" in device:
2614      device = device[device.rfind("/")+1:]
2615    limit = info_dict.get(device + "_size", 0)
2616    if isinstance(limit, str):
2617      limit = int(limit, 0)
2618  if not fs_type or not limit:
2619    return
2620
2621  size = len(data)
2622  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
2623  # path.
2624  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
2625    if size != limit:
2626      raise ExternalError(
2627          "Mismatching image size for %s: expected %d actual %d" % (
2628              target, limit, size))
2629  else:
2630    pct = float(size) * 100.0 / limit
2631    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
2632    if pct >= 99.0:
2633      raise ExternalError(msg)
2634
2635    if pct >= 95.0:
2636      logger.warning("\n  WARNING: %s\n", msg)
2637    else:
2638      logger.info("  %s", msg)
2639
2640
2641def ReadApkCerts(tf_zip):
2642  """Parses the APK certs info from a given target-files zip.
2643
2644  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
2645  tuple with the following elements: (1) a dictionary that maps packages to
2646  certs (based on the "certificate" and "private_key" attributes in the file;
2647  (2) a string representing the extension of compressed APKs in the target files
2648  (e.g ".gz", ".bro").
2649
2650  Args:
2651    tf_zip: The input target_files ZipFile (already open).
2652
2653  Returns:
2654    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
2655        the extension string of compressed APKs (e.g. ".gz"), or None if there's
2656        no compressed APKs.
2657  """
2658  certmap = {}
2659  compressed_extension = None
2660
2661  # META/apkcerts.txt contains the info for _all_ the packages known at build
2662  # time. Filter out the ones that are not installed.
2663  installed_files = set()
2664  for name in tf_zip.namelist():
2665    basename = os.path.basename(name)
2666    if basename:
2667      installed_files.add(basename)
2668
2669  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
2670    line = line.strip()
2671    if not line:
2672      continue
2673    m = re.match(
2674        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
2675        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2676        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2677        line)
2678    if not m:
2679      continue
2680
2681    matches = m.groupdict()
2682    cert = matches["CERT"]
2683    privkey = matches["PRIVKEY"]
2684    name = matches["NAME"]
2685    this_compressed_extension = matches["COMPRESSED"]
2686
2687    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2688    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2689    if cert in SPECIAL_CERT_STRINGS and not privkey:
2690      certmap[name] = cert
2691    elif (cert.endswith(OPTIONS.public_key_suffix) and
2692          privkey.endswith(OPTIONS.private_key_suffix) and
2693          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2694      certmap[name] = cert[:-public_key_suffix_len]
2695    else:
2696      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2697
2698    if not this_compressed_extension:
2699      continue
2700
2701    # Only count the installed files.
2702    filename = name + '.' + this_compressed_extension
2703    if filename not in installed_files:
2704      continue
2705
2706    # Make sure that all the values in the compression map have the same
2707    # extension. We don't support multiple compression methods in the same
2708    # system image.
2709    if compressed_extension:
2710      if this_compressed_extension != compressed_extension:
2711        raise ValueError(
2712            "Multiple compressed extensions: {} vs {}".format(
2713                compressed_extension, this_compressed_extension))
2714    else:
2715      compressed_extension = this_compressed_extension
2716
2717  return (certmap,
2718          ("." + compressed_extension) if compressed_extension else None)
2719
2720
2721COMMON_DOCSTRING = """
2722Global options
2723
2724  -p  (--path) <dir>
2725      Prepend <dir>/bin to the list of places to search for binaries run by this
2726      script, and expect to find jars in <dir>/framework.
2727
2728  -s  (--device_specific) <file>
2729      Path to the Python module containing device-specific releasetools code.
2730
2731  -x  (--extra) <key=value>
2732      Add a key/value pair to the 'extras' dict, which device-specific extension
2733      code may look at.
2734
2735  -v  (--verbose)
2736      Show command lines being executed.
2737
2738  -h  (--help)
2739      Display this usage message and exit.
2740
2741  --logfile <file>
2742      Put verbose logs to specified file (regardless of --verbose option.)
2743"""
2744
2745
2746def Usage(docstring):
2747  print(docstring.rstrip("\n"))
2748  print(COMMON_DOCSTRING)
2749
2750
2751def ParseOptions(argv,
2752                 docstring,
2753                 extra_opts="", extra_long_opts=(),
2754                 extra_option_handler: Iterable[OptionHandler] = None):
2755  """Parse the options in argv and return any arguments that aren't
2756  flags.  docstring is the calling module's docstring, to be displayed
2757  for errors and -h.  extra_opts and extra_long_opts are for flags
2758  defined by the caller, which are processed by passing them to
2759  extra_option_handler."""
2760  extra_long_opts = list(extra_long_opts)
2761  if not isinstance(extra_option_handler, Iterable):
2762    extra_option_handler = [extra_option_handler]
2763
2764  for handler in extra_option_handler:
2765    if isinstance(handler, OptionHandler):
2766      extra_long_opts.extend(handler.extra_long_opts)
2767
2768  try:
2769    opts, args = getopt.getopt(
2770        argv, "hvp:s:x:" + extra_opts,
2771        ["help", "verbose", "path=", "signapk_path=",
2772         "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
2773         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2774         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2775         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2776         "extra=", "logfile="] + list(extra_long_opts))
2777  except getopt.GetoptError as err:
2778    Usage(docstring)
2779    print("**", str(err), "**")
2780    sys.exit(2)
2781
2782  for o, a in opts:
2783    if o in ("-h", "--help"):
2784      Usage(docstring)
2785      sys.exit()
2786    elif o in ("-v", "--verbose"):
2787      OPTIONS.verbose = True
2788    elif o in ("-p", "--path"):
2789      OPTIONS.search_path = a
2790    elif o in ("--signapk_path",):
2791      OPTIONS.signapk_path = a
2792    elif o in ("--signapk_shared_library_path",):
2793      OPTIONS.signapk_shared_library_path = a
2794    elif o in ("--extra_signapk_args",):
2795      OPTIONS.extra_signapk_args = shlex.split(a)
2796    elif o in ("--aapt2_path",):
2797      OPTIONS.aapt2_path = a
2798    elif o in ("--java_path",):
2799      OPTIONS.java_path = a
2800    elif o in ("--java_args",):
2801      OPTIONS.java_args = shlex.split(a)
2802    elif o in ("--android_jar_path",):
2803      OPTIONS.android_jar_path = a
2804    elif o in ("--public_key_suffix",):
2805      OPTIONS.public_key_suffix = a
2806    elif o in ("--private_key_suffix",):
2807      OPTIONS.private_key_suffix = a
2808    elif o in ("--boot_signer_path",):
2809      raise ValueError(
2810          "--boot_signer_path is no longer supported, please switch to AVB")
2811    elif o in ("--boot_signer_args",):
2812      raise ValueError(
2813          "--boot_signer_args is no longer supported, please switch to AVB")
2814    elif o in ("--verity_signer_path",):
2815      raise ValueError(
2816          "--verity_signer_path is no longer supported, please switch to AVB")
2817    elif o in ("--verity_signer_args",):
2818      raise ValueError(
2819          "--verity_signer_args is no longer supported, please switch to AVB")
2820    elif o in ("-s", "--device_specific"):
2821      OPTIONS.device_specific = a
2822    elif o in ("-x", "--extra"):
2823      key, value = a.split("=", 1)
2824      OPTIONS.extras[key] = value
2825    elif o in ("--logfile",):
2826      OPTIONS.logfile = a
2827    else:
2828      if extra_option_handler is None:
2829        raise ValueError("unknown option \"%s\"" % (o,))
2830      success = False
2831      for handler in extra_option_handler:
2832        if isinstance(handler, OptionHandler):
2833          if handler.handler(o, a):
2834            success = True
2835            break
2836        elif handler(o, a):
2837          success = True
2838          break
2839      if not success:
2840        raise ValueError("unknown option \"%s\"" % (o,))
2841
2842
2843  if OPTIONS.search_path:
2844    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2845                          os.pathsep + os.environ["PATH"])
2846
2847  return args
2848
2849
2850def MakeTempFile(prefix='tmp', suffix=''):
2851  """Make a temp file and add it to the list of things to be deleted
2852  when Cleanup() is called.  Return the filename."""
2853  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2854  os.close(fd)
2855  OPTIONS.tempfiles.append(fn)
2856  return fn
2857
2858
2859def MakeTempDir(prefix='tmp', suffix=''):
2860  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2861
2862  Returns:
2863    The absolute pathname of the new directory.
2864  """
2865  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2866  OPTIONS.tempfiles.append(dir_name)
2867  return dir_name
2868
2869
2870def Cleanup():
2871  for i in OPTIONS.tempfiles:
2872    if not os.path.exists(i):
2873      continue
2874    if os.path.isdir(i):
2875      shutil.rmtree(i, ignore_errors=True)
2876    else:
2877      os.remove(i)
2878  del OPTIONS.tempfiles[:]
2879
2880
2881class PasswordManager(object):
2882  def __init__(self):
2883    self.editor = os.getenv("EDITOR")
2884    self.pwfile = os.getenv("ANDROID_PW_FILE")
2885
2886  def GetPasswords(self, items):
2887    """Get passwords corresponding to each string in 'items',
2888    returning a dict.  (The dict may have keys in addition to the
2889    values in 'items'.)
2890
2891    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2892    user edit that file to add more needed passwords.  If no editor is
2893    available, or $ANDROID_PW_FILE isn't define, prompts the user
2894    interactively in the ordinary way.
2895    """
2896
2897    current = self.ReadFile()
2898
2899    first = True
2900    while True:
2901      missing = []
2902      for i in items:
2903        if i not in current or not current[i]:
2904          missing.append(i)
2905      # Are all the passwords already in the file?
2906      if not missing:
2907        return current
2908
2909      for i in missing:
2910        current[i] = ""
2911
2912      if not first:
2913        print("key file %s still missing some passwords." % (self.pwfile,))
2914        if sys.version_info[0] >= 3:
2915          raw_input = input  # pylint: disable=redefined-builtin
2916        answer = raw_input("try to edit again? [y]> ").strip()
2917        if answer and answer[0] not in 'yY':
2918          raise RuntimeError("key passwords unavailable")
2919      first = False
2920
2921      current = self.UpdateAndReadFile(current)
2922
2923  def PromptResult(self, current):  # pylint: disable=no-self-use
2924    """Prompt the user to enter a value (password) for each key in
2925    'current' whose value is fales.  Returns a new dict with all the
2926    values.
2927    """
2928    result = {}
2929    for k, v in sorted(current.items()):
2930      if v:
2931        result[k] = v
2932      else:
2933        while True:
2934          result[k] = getpass.getpass(
2935              "Enter password for %s key> " % k).strip()
2936          if result[k]:
2937            break
2938    return result
2939
2940  def UpdateAndReadFile(self, current):
2941    if not self.editor or not self.pwfile:
2942      return self.PromptResult(current)
2943
2944    f = open(self.pwfile, "w")
2945    os.chmod(self.pwfile, 0o600)
2946    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2947    f.write("# (Additional spaces are harmless.)\n\n")
2948
2949    first_line = None
2950    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2951    for i, (_, k, v) in enumerate(sorted_list):
2952      f.write("[[[  %s  ]]] %s\n" % (v, k))
2953      if not v and first_line is None:
2954        # position cursor on first line with no password.
2955        first_line = i + 4
2956    f.close()
2957
2958    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2959
2960    return self.ReadFile()
2961
2962  def ReadFile(self):
2963    result = {}
2964    if self.pwfile is None:
2965      return result
2966    try:
2967      f = open(self.pwfile, "r")
2968      for line in f:
2969        line = line.strip()
2970        if not line or line[0] == '#':
2971          continue
2972        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2973        if not m:
2974          logger.warning("Failed to parse password file: %s", line)
2975        else:
2976          result[m.group(2)] = m.group(1)
2977      f.close()
2978    except IOError as e:
2979      if e.errno != errno.ENOENT:
2980        logger.exception("Error reading password file:")
2981    return result
2982
2983
2984def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2985             compress_type=None):
2986
2987  # http://b/18015246
2988  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2989  # for files larger than 2GiB. We can work around this by adjusting their
2990  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2991  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2992  # it isn't clear to me exactly what circumstances cause this).
2993  # `zipfile.write()` must be used directly to work around this.
2994  #
2995  # This mess can be avoided if we port to python3.
2996  saved_zip64_limit = zipfile.ZIP64_LIMIT
2997  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2998
2999  if compress_type is None:
3000    compress_type = zip_file.compression
3001  if arcname is None:
3002    arcname = filename
3003
3004  saved_stat = os.stat(filename)
3005
3006  try:
3007    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
3008    # file to be zipped and reset it when we're done.
3009    os.chmod(filename, perms)
3010
3011    # Use a fixed timestamp so the output is repeatable.
3012    # Note: Use of fromtimestamp without specifying a timezone here is
3013    # intentional. zip stores datetimes in local time without a time zone
3014    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
3015    # in the zip archive.
3016    local_epoch = datetime.datetime.fromtimestamp(0)
3017    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
3018    os.utime(filename, (timestamp, timestamp))
3019
3020    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
3021  finally:
3022    os.chmod(filename, saved_stat.st_mode)
3023    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
3024    zipfile.ZIP64_LIMIT = saved_zip64_limit
3025
3026
3027def ZipWriteStr(zip_file: zipfile.ZipFile, zinfo_or_arcname, data, perms=None,
3028                compress_type=None):
3029  """Wrap zipfile.writestr() function to work around the zip64 limit.
3030
3031  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
3032  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
3033  when calling crc32(bytes).
3034
3035  But it still works fine to write a shorter string into a large zip file.
3036  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
3037  when we know the string won't be too long.
3038  """
3039
3040  saved_zip64_limit = zipfile.ZIP64_LIMIT
3041  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3042
3043  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
3044    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
3045    zinfo.compress_type = zip_file.compression
3046    if perms is None:
3047      perms = 0o100644
3048  else:
3049    zinfo = zinfo_or_arcname
3050    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
3051    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
3052    # such a case (since
3053    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
3054    # which seems to make more sense. Otherwise the entry will have 0o000 as the
3055    # permission bits. We follow the logic in Python 3 to get consistent
3056    # behavior between using the two versions.
3057    if not zinfo.external_attr:
3058      zinfo.external_attr = 0o600 << 16
3059
3060  # If compress_type is given, it overrides the value in zinfo.
3061  if compress_type is not None:
3062    zinfo.compress_type = compress_type
3063
3064  # If perms is given, it has a priority.
3065  if perms is not None:
3066    # If perms doesn't set the file type, mark it as a regular file.
3067    if perms & 0o770000 == 0:
3068      perms |= 0o100000
3069    zinfo.external_attr = perms << 16
3070
3071  # Use a fixed timestamp so the output is repeatable.
3072  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
3073
3074  zip_file.writestr(zinfo, data)
3075  zipfile.ZIP64_LIMIT = saved_zip64_limit
3076
3077def ZipExclude(input_zip, output_zip, entries, force=False):
3078  """Deletes entries from a ZIP file.
3079
3080  Args:
3081    zip_filename: The name of the ZIP file.
3082    entries: The name of the entry, or the list of names to be deleted.
3083  """
3084  if isinstance(entries, str):
3085    entries = [entries]
3086  # If list is empty, nothing to do
3087  if not entries:
3088    shutil.copy(input_zip, output_zip)
3089    return
3090
3091  with zipfile.ZipFile(input_zip, 'r') as zin:
3092    if not force and len(set(zin.namelist()).intersection(entries)) == 0:
3093      raise ExternalError(
3094          "Failed to delete zip entries, name not matched: %s" % entries)
3095
3096    fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(input_zip))
3097    os.close(fd)
3098    cmd = ["zip2zip", "-i", input_zip, "-o", new_zipfile]
3099    for entry in entries:
3100      cmd.append("-x")
3101      cmd.append(entry)
3102    RunAndCheckOutput(cmd)
3103  os.replace(new_zipfile, output_zip)
3104
3105
3106def ZipDelete(zip_filename, entries, force=False):
3107  """Deletes entries from a ZIP file.
3108
3109  Args:
3110    zip_filename: The name of the ZIP file.
3111    entries: The name of the entry, or the list of names to be deleted.
3112  """
3113  if isinstance(entries, str):
3114    entries = [entries]
3115  # If list is empty, nothing to do
3116  if not entries:
3117    return
3118
3119  ZipExclude(zip_filename, zip_filename, entries, force)
3120
3121
3122def ZipClose(zip_file):
3123  # http://b/18015246
3124  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
3125  # central directory.
3126  saved_zip64_limit = zipfile.ZIP64_LIMIT
3127  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3128
3129  zip_file.close()
3130
3131  zipfile.ZIP64_LIMIT = saved_zip64_limit
3132
3133
3134class DeviceSpecificParams(object):
3135  module = None
3136
3137  def __init__(self, **kwargs):
3138    """Keyword arguments to the constructor become attributes of this
3139    object, which is passed to all functions in the device-specific
3140    module."""
3141    for k, v in kwargs.items():
3142      setattr(self, k, v)
3143    self.extras = OPTIONS.extras
3144
3145    if self.module is None:
3146      path = OPTIONS.device_specific
3147      if not path:
3148        return
3149      try:
3150        if os.path.isdir(path):
3151          path = os.path.join(path, "releasetools")
3152          if os.path.isdir(path):
3153            path = os.path.join(path, "__init__.py")
3154        if not os.path.exists(path) and os.path.exists(path + ".py"):
3155          path = path + ".py"
3156        spec = importlib.util.spec_from_file_location("device_specific", path)
3157        if not spec:
3158          raise FileNotFoundError(path)
3159        logger.info("loaded device-specific extensions from %s", path)
3160        module = importlib.util.module_from_spec(spec)
3161        spec.loader.exec_module(module)
3162        self.module = module
3163      except (ImportError, FileNotFoundError):
3164        logger.info("unable to load device-specific module; assuming none")
3165
3166  def _DoCall(self, function_name, *args, **kwargs):
3167    """Call the named function in the device-specific module, passing
3168    the given args and kwargs.  The first argument to the call will be
3169    the DeviceSpecific object itself.  If there is no module, or the
3170    module does not define the function, return the value of the
3171    'default' kwarg (which itself defaults to None)."""
3172    if self.module is None or not hasattr(self.module, function_name):
3173      return kwargs.get("default")
3174    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
3175
3176  def FullOTA_Assertions(self):
3177    """Called after emitting the block of assertions at the top of a
3178    full OTA package.  Implementations can add whatever additional
3179    assertions they like."""
3180    return self._DoCall("FullOTA_Assertions")
3181
3182  def FullOTA_InstallBegin(self):
3183    """Called at the start of full OTA installation."""
3184    return self._DoCall("FullOTA_InstallBegin")
3185
3186  def FullOTA_GetBlockDifferences(self):
3187    """Called during full OTA installation and verification.
3188    Implementation should return a list of BlockDifference objects describing
3189    the update on each additional partitions.
3190    """
3191    return self._DoCall("FullOTA_GetBlockDifferences")
3192
3193  def FullOTA_InstallEnd(self):
3194    """Called at the end of full OTA installation; typically this is
3195    used to install the image for the device's baseband processor."""
3196    return self._DoCall("FullOTA_InstallEnd")
3197
3198  def IncrementalOTA_Assertions(self):
3199    """Called after emitting the block of assertions at the top of an
3200    incremental OTA package.  Implementations can add whatever
3201    additional assertions they like."""
3202    return self._DoCall("IncrementalOTA_Assertions")
3203
3204  def IncrementalOTA_VerifyBegin(self):
3205    """Called at the start of the verification phase of incremental
3206    OTA installation; additional checks can be placed here to abort
3207    the script before any changes are made."""
3208    return self._DoCall("IncrementalOTA_VerifyBegin")
3209
3210  def IncrementalOTA_VerifyEnd(self):
3211    """Called at the end of the verification phase of incremental OTA
3212    installation; additional checks can be placed here to abort the
3213    script before any changes are made."""
3214    return self._DoCall("IncrementalOTA_VerifyEnd")
3215
3216  def IncrementalOTA_InstallBegin(self):
3217    """Called at the start of incremental OTA installation (after
3218    verification is complete)."""
3219    return self._DoCall("IncrementalOTA_InstallBegin")
3220
3221  def IncrementalOTA_GetBlockDifferences(self):
3222    """Called during incremental OTA installation and verification.
3223    Implementation should return a list of BlockDifference objects describing
3224    the update on each additional partitions.
3225    """
3226    return self._DoCall("IncrementalOTA_GetBlockDifferences")
3227
3228  def IncrementalOTA_InstallEnd(self):
3229    """Called at the end of incremental OTA installation; typically
3230    this is used to install the image for the device's baseband
3231    processor."""
3232    return self._DoCall("IncrementalOTA_InstallEnd")
3233
3234  def VerifyOTA_Assertions(self):
3235    return self._DoCall("VerifyOTA_Assertions")
3236
3237
3238class File(object):
3239  def __init__(self, name, data, compress_size=None):
3240    self.name = name
3241    self.data = data
3242    self.size = len(data)
3243    self.compress_size = compress_size or self.size
3244    self.sha1 = sha1(data).hexdigest()
3245
3246  @classmethod
3247  def FromLocalFile(cls, name, diskname):
3248    f = open(diskname, "rb")
3249    data = f.read()
3250    f.close()
3251    return File(name, data)
3252
3253  def WriteToTemp(self):
3254    t = tempfile.NamedTemporaryFile()
3255    t.write(self.data)
3256    t.flush()
3257    return t
3258
3259  def WriteToDir(self, d):
3260    output_path = os.path.join(d, self.name)
3261    os.makedirs(os.path.dirname(output_path), exist_ok=True)
3262    with open(output_path, "wb") as fp:
3263      fp.write(self.data)
3264
3265  def AddToZip(self, z, compression=None):
3266    ZipWriteStr(z, self.name, self.data, compress_type=compression)
3267
3268
3269DIFF_PROGRAM_BY_EXT = {
3270    ".gz": "imgdiff",
3271    ".zip": ["imgdiff", "-z"],
3272    ".jar": ["imgdiff", "-z"],
3273    ".apk": ["imgdiff", "-z"],
3274    ".img": "imgdiff",
3275}
3276
3277
3278class Difference(object):
3279  def __init__(self, tf, sf, diff_program=None):
3280    self.tf = tf
3281    self.sf = sf
3282    self.patch = None
3283    self.diff_program = diff_program
3284
3285  def ComputePatch(self):
3286    """Compute the patch (as a string of data) needed to turn sf into
3287    tf.  Returns the same tuple as GetPatch()."""
3288
3289    tf = self.tf
3290    sf = self.sf
3291
3292    if self.diff_program:
3293      diff_program = self.diff_program
3294    else:
3295      ext = os.path.splitext(tf.name)[1]
3296      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
3297
3298    ttemp = tf.WriteToTemp()
3299    stemp = sf.WriteToTemp()
3300
3301    ext = os.path.splitext(tf.name)[1]
3302
3303    try:
3304      ptemp = tempfile.NamedTemporaryFile()
3305      if isinstance(diff_program, list):
3306        cmd = copy.copy(diff_program)
3307      else:
3308        cmd = [diff_program]
3309      cmd.append(stemp.name)
3310      cmd.append(ttemp.name)
3311      cmd.append(ptemp.name)
3312      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3313      err = []
3314
3315      def run():
3316        _, e = p.communicate()
3317        if e:
3318          err.append(e)
3319      th = threading.Thread(target=run)
3320      th.start()
3321      th.join(timeout=300)   # 5 mins
3322      if th.is_alive():
3323        logger.warning("diff command timed out")
3324        p.terminate()
3325        th.join(5)
3326        if th.is_alive():
3327          p.kill()
3328          th.join()
3329
3330      if p.returncode != 0:
3331        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
3332        self.patch = None
3333        return None, None, None
3334      diff = ptemp.read()
3335    finally:
3336      ptemp.close()
3337      stemp.close()
3338      ttemp.close()
3339
3340    self.patch = diff
3341    return self.tf, self.sf, self.patch
3342
3343  def GetPatch(self):
3344    """Returns a tuple of (target_file, source_file, patch_data).
3345
3346    patch_data may be None if ComputePatch hasn't been called, or if
3347    computing the patch failed.
3348    """
3349    return self.tf, self.sf, self.patch
3350
3351
3352def ComputeDifferences(diffs):
3353  """Call ComputePatch on all the Difference objects in 'diffs'."""
3354  logger.info("%d diffs to compute", len(diffs))
3355
3356  # Do the largest files first, to try and reduce the long-pole effect.
3357  by_size = [(i.tf.size, i) for i in diffs]
3358  by_size.sort(reverse=True)
3359  by_size = [i[1] for i in by_size]
3360
3361  lock = threading.Lock()
3362  diff_iter = iter(by_size)   # accessed under lock
3363
3364  def worker():
3365    try:
3366      lock.acquire()
3367      for d in diff_iter:
3368        lock.release()
3369        start = time.time()
3370        d.ComputePatch()
3371        dur = time.time() - start
3372        lock.acquire()
3373
3374        tf, sf, patch = d.GetPatch()
3375        if sf.name == tf.name:
3376          name = tf.name
3377        else:
3378          name = "%s (%s)" % (tf.name, sf.name)
3379        if patch is None:
3380          logger.error("patching failed! %40s", name)
3381        else:
3382          logger.info(
3383              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
3384              tf.size, 100.0 * len(patch) / tf.size, name)
3385      lock.release()
3386    except Exception:
3387      logger.exception("Failed to compute diff from worker")
3388      raise
3389
3390  # start worker threads; wait for them all to finish.
3391  threads = [threading.Thread(target=worker)
3392             for i in range(OPTIONS.worker_threads)]
3393  for th in threads:
3394    th.start()
3395  while threads:
3396    threads.pop().join()
3397
3398
3399class BlockDifference(object):
3400  def __init__(self, partition, tgt, src=None, check_first_block=False,
3401               version=None, disable_imgdiff=False):
3402    self.tgt = tgt
3403    self.src = src
3404    self.partition = partition
3405    self.check_first_block = check_first_block
3406    self.disable_imgdiff = disable_imgdiff
3407
3408    if version is None:
3409      version = max(
3410          int(i) for i in
3411          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
3412    assert version >= 3
3413    self.version = version
3414
3415    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
3416                       version=self.version,
3417                       disable_imgdiff=self.disable_imgdiff)
3418    self.path = os.path.join(MakeTempDir(), partition)
3419    b.Compute(self.path)
3420    self._required_cache = b.max_stashed_size
3421    self.touched_src_ranges = b.touched_src_ranges
3422    self.touched_src_sha1 = b.touched_src_sha1
3423
3424    # On devices with dynamic partitions, for new partitions,
3425    # src is None but OPTIONS.source_info_dict is not.
3426    if OPTIONS.source_info_dict is None:
3427      is_dynamic_build = OPTIONS.info_dict.get(
3428          "use_dynamic_partitions") == "true"
3429      is_dynamic_source = False
3430    else:
3431      is_dynamic_build = OPTIONS.source_info_dict.get(
3432          "use_dynamic_partitions") == "true"
3433      is_dynamic_source = partition in shlex.split(
3434          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
3435
3436    is_dynamic_target = partition in shlex.split(
3437        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
3438
3439    # For dynamic partitions builds, check partition list in both source
3440    # and target build because new partitions may be added, and existing
3441    # partitions may be removed.
3442    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
3443
3444    if is_dynamic:
3445      self.device = 'map_partition("%s")' % partition
3446    else:
3447      if OPTIONS.source_info_dict is None:
3448        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3449                                              OPTIONS.info_dict)
3450      else:
3451        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3452                                              OPTIONS.source_info_dict)
3453      self.device = device_expr
3454
3455  @property
3456  def required_cache(self):
3457    return self._required_cache
3458
3459  def WriteScript(self, script, output_zip, progress=None,
3460                  write_verify_script=False):
3461    if not self.src:
3462      # write the output unconditionally
3463      script.Print("Patching %s image unconditionally..." % (self.partition,))
3464    else:
3465      script.Print("Patching %s image after verification." % (self.partition,))
3466
3467    if progress:
3468      script.ShowProgress(progress, 0)
3469    self._WriteUpdate(script, output_zip)
3470
3471    if write_verify_script:
3472      self.WritePostInstallVerifyScript(script)
3473
3474  def WriteStrictVerifyScript(self, script):
3475    """Verify all the blocks in the care_map, including clobbered blocks.
3476
3477    This differs from the WriteVerifyScript() function: a) it prints different
3478    error messages; b) it doesn't allow half-way updated images to pass the
3479    verification."""
3480
3481    partition = self.partition
3482    script.Print("Verifying %s..." % (partition,))
3483    ranges = self.tgt.care_map
3484    ranges_str = ranges.to_string_raw()
3485    script.AppendExtra(
3486        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
3487        'ui_print("%s has unexpected contents.");' % (
3488            self.device, ranges_str,
3489            self.tgt.TotalSha1(include_clobbered_blocks=True),
3490            self.partition))
3491    script.AppendExtra("")
3492
3493  def WriteVerifyScript(self, script, touched_blocks_only=False):
3494    partition = self.partition
3495
3496    # full OTA
3497    if not self.src:
3498      script.Print("Image %s will be patched unconditionally." % (partition,))
3499
3500    # incremental OTA
3501    else:
3502      if touched_blocks_only:
3503        ranges = self.touched_src_ranges
3504        expected_sha1 = self.touched_src_sha1
3505      else:
3506        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
3507        expected_sha1 = self.src.TotalSha1()
3508
3509      # No blocks to be checked, skipping.
3510      if not ranges:
3511        return
3512
3513      ranges_str = ranges.to_string_raw()
3514      script.AppendExtra(
3515          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
3516          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
3517          '"%s.patch.dat")) then' % (
3518              self.device, ranges_str, expected_sha1,
3519              self.device, partition, partition, partition))
3520      script.Print('Verified %s image...' % (partition,))
3521      script.AppendExtra('else')
3522
3523      if self.version >= 4:
3524
3525        # Bug: 21124327
3526        # When generating incrementals for the system and vendor partitions in
3527        # version 4 or newer, explicitly check the first block (which contains
3528        # the superblock) of the partition to see if it's what we expect. If
3529        # this check fails, give an explicit log message about the partition
3530        # having been remounted R/W (the most likely explanation).
3531        if self.check_first_block:
3532          script.AppendExtra('check_first_block(%s);' % (self.device,))
3533
3534        # If version >= 4, try block recovery before abort update
3535        if partition == "system":
3536          code = ErrorCode.SYSTEM_RECOVER_FAILURE
3537        else:
3538          code = ErrorCode.VENDOR_RECOVER_FAILURE
3539        script.AppendExtra((
3540            'ifelse (block_image_recover({device}, "{ranges}") && '
3541            'block_image_verify({device}, '
3542            'package_extract_file("{partition}.transfer.list"), '
3543            '"{partition}.new.dat", "{partition}.patch.dat"), '
3544            'ui_print("{partition} recovered successfully."), '
3545            'abort("E{code}: {partition} partition fails to recover"));\n'
3546            'endif;').format(device=self.device, ranges=ranges_str,
3547                             partition=partition, code=code))
3548
3549      # Abort the OTA update. Note that the incremental OTA cannot be applied
3550      # even if it may match the checksum of the target partition.
3551      # a) If version < 3, operations like move and erase will make changes
3552      #    unconditionally and damage the partition.
3553      # b) If version >= 3, it won't even reach here.
3554      else:
3555        if partition == "system":
3556          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
3557        else:
3558          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
3559        script.AppendExtra((
3560            'abort("E%d: %s partition has unexpected contents");\n'
3561            'endif;') % (code, partition))
3562
3563  def WritePostInstallVerifyScript(self, script):
3564    partition = self.partition
3565    script.Print('Verifying the updated %s image...' % (partition,))
3566    # Unlike pre-install verification, clobbered_blocks should not be ignored.
3567    ranges = self.tgt.care_map
3568    ranges_str = ranges.to_string_raw()
3569    script.AppendExtra(
3570        'if range_sha1(%s, "%s") == "%s" then' % (
3571            self.device, ranges_str,
3572            self.tgt.TotalSha1(include_clobbered_blocks=True)))
3573
3574    # Bug: 20881595
3575    # Verify that extended blocks are really zeroed out.
3576    if self.tgt.extended:
3577      ranges_str = self.tgt.extended.to_string_raw()
3578      script.AppendExtra(
3579          'if range_sha1(%s, "%s") == "%s" then' % (
3580              self.device, ranges_str,
3581              self._HashZeroBlocks(self.tgt.extended.size())))
3582      script.Print('Verified the updated %s image.' % (partition,))
3583      if partition == "system":
3584        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
3585      else:
3586        code = ErrorCode.VENDOR_NONZERO_CONTENTS
3587      script.AppendExtra(
3588          'else\n'
3589          '  abort("E%d: %s partition has unexpected non-zero contents after '
3590          'OTA update");\n'
3591          'endif;' % (code, partition))
3592    else:
3593      script.Print('Verified the updated %s image.' % (partition,))
3594
3595    if partition == "system":
3596      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
3597    else:
3598      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
3599
3600    script.AppendExtra(
3601        'else\n'
3602        '  abort("E%d: %s partition has unexpected contents after OTA '
3603        'update");\n'
3604        'endif;' % (code, partition))
3605
3606  def _WriteUpdate(self, script, output_zip):
3607    ZipWrite(output_zip,
3608             '{}.transfer.list'.format(self.path),
3609             '{}.transfer.list'.format(self.partition))
3610
3611    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
3612    # its size. Quailty 9 almost triples the compression time but doesn't
3613    # further reduce the size too much. For a typical 1.8G system.new.dat
3614    #                       zip  | brotli(quality 6)  | brotli(quality 9)
3615    #   compressed_size:    942M | 869M (~8% reduced) | 854M
3616    #   compression_time:   75s  | 265s               | 719s
3617    #   decompression_time: 15s  | 25s                | 25s
3618
3619    if not self.src:
3620      brotli_cmd = ['brotli', '--quality=6',
3621                    '--output={}.new.dat.br'.format(self.path),
3622                    '{}.new.dat'.format(self.path)]
3623      print("Compressing {}.new.dat with brotli".format(self.partition))
3624      RunAndCheckOutput(brotli_cmd)
3625
3626      new_data_name = '{}.new.dat.br'.format(self.partition)
3627      ZipWrite(output_zip,
3628               '{}.new.dat.br'.format(self.path),
3629               new_data_name,
3630               compress_type=zipfile.ZIP_STORED)
3631    else:
3632      new_data_name = '{}.new.dat'.format(self.partition)
3633      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
3634
3635    ZipWrite(output_zip,
3636             '{}.patch.dat'.format(self.path),
3637             '{}.patch.dat'.format(self.partition),
3638             compress_type=zipfile.ZIP_STORED)
3639
3640    if self.partition == "system":
3641      code = ErrorCode.SYSTEM_UPDATE_FAILURE
3642    else:
3643      code = ErrorCode.VENDOR_UPDATE_FAILURE
3644
3645    call = ('block_image_update({device}, '
3646            'package_extract_file("{partition}.transfer.list"), '
3647            '"{new_data_name}", "{partition}.patch.dat") ||\n'
3648            '  abort("E{code}: Failed to update {partition} image.");'.format(
3649                device=self.device, partition=self.partition,
3650                new_data_name=new_data_name, code=code))
3651    script.AppendExtra(script.WordWrap(call))
3652
3653  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
3654    data = source.ReadRangeSet(ranges)
3655    ctx = sha1()
3656
3657    for p in data:
3658      ctx.update(p)
3659
3660    return ctx.hexdigest()
3661
3662  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
3663    """Return the hash value for all zero blocks."""
3664    zero_block = '\x00' * 4096
3665    ctx = sha1()
3666    for _ in range(num_blocks):
3667      ctx.update(zero_block)
3668
3669    return ctx.hexdigest()
3670
3671
3672# Expose these two classes to support vendor-specific scripts
3673DataImage = images.DataImage
3674EmptyImage = images.EmptyImage
3675
3676
3677# map recovery.fstab's fs_types to mount/format "partition types"
3678PARTITION_TYPES = {
3679    "ext4": "EMMC",
3680    "emmc": "EMMC",
3681    "f2fs": "EMMC",
3682    "squashfs": "EMMC",
3683    "erofs": "EMMC"
3684}
3685
3686
3687def GetTypeAndDevice(mount_point, info, check_no_slot=True):
3688  """
3689  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
3690  backwards compatibility. It aborts if the fstab entry has slotselect option
3691  (unless check_no_slot is explicitly set to False).
3692  """
3693  fstab = info["fstab"]
3694  if fstab:
3695    if check_no_slot:
3696      assert not fstab[mount_point].slotselect, \
3697          "Use GetTypeAndDeviceExpr instead"
3698    return (PARTITION_TYPES[fstab[mount_point].fs_type],
3699            fstab[mount_point].device)
3700  raise KeyError
3701
3702
3703def GetTypeAndDeviceExpr(mount_point, info):
3704  """
3705  Return the filesystem of the partition, and an edify expression that evaluates
3706  to the device at runtime.
3707  """
3708  fstab = info["fstab"]
3709  if fstab:
3710    p = fstab[mount_point]
3711    device_expr = '"%s"' % fstab[mount_point].device
3712    if p.slotselect:
3713      device_expr = 'add_slot_suffix(%s)' % device_expr
3714    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
3715  raise KeyError
3716
3717
3718def GetEntryForDevice(fstab, device):
3719  """
3720  Returns:
3721    The first entry in fstab whose device is the given value.
3722  """
3723  if not fstab:
3724    return None
3725  for mount_point in fstab:
3726    if fstab[mount_point].device == device:
3727      return fstab[mount_point]
3728  return None
3729
3730
3731def ParseCertificate(data):
3732  """Parses and converts a PEM-encoded certificate into DER-encoded.
3733
3734  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3735
3736  Returns:
3737    The decoded certificate bytes.
3738  """
3739  cert_buffer = []
3740  save = False
3741  for line in data.split("\n"):
3742    if "--END CERTIFICATE--" in line:
3743      break
3744    if save:
3745      cert_buffer.append(line)
3746    if "--BEGIN CERTIFICATE--" in line:
3747      save = True
3748  cert = base64.b64decode("".join(cert_buffer))
3749  return cert
3750
3751
3752def ExtractPublicKey(cert):
3753  """Extracts the public key (PEM-encoded) from the given certificate file.
3754
3755  Args:
3756    cert: The certificate filename.
3757
3758  Returns:
3759    The public key string.
3760
3761  Raises:
3762    AssertionError: On non-zero return from 'openssl'.
3763  """
3764  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3765  # While openssl 1.1 writes the key into the given filename followed by '-out',
3766  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3767  # stdout instead.
3768  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3769  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3770  pubkey, stderrdata = proc.communicate()
3771  assert proc.returncode == 0, \
3772      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3773  return pubkey
3774
3775
3776def ExtractAvbPublicKey(avbtool, key):
3777  """Extracts the AVB public key from the given public or private key.
3778
3779  Args:
3780    avbtool: The AVB tool to use.
3781    key: The input key file, which should be PEM-encoded public or private key.
3782
3783  Returns:
3784    The path to the extracted AVB public key file.
3785  """
3786  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3787  RunAndCheckOutput(
3788      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3789  return output
3790
3791
3792def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3793                      info_dict=None):
3794  """Generates the recovery-from-boot patch and writes the script to output.
3795
3796  Most of the space in the boot and recovery images is just the kernel, which is
3797  identical for the two, so the resulting patch should be efficient. Add it to
3798  the output zip, along with a shell script that is run from init.rc on first
3799  boot to actually do the patching and install the new recovery image.
3800
3801  Args:
3802    input_dir: The top-level input directory of the target-files.zip.
3803    output_sink: The callback function that writes the result.
3804    recovery_img: File object for the recovery image.
3805    boot_img: File objects for the boot image.
3806    info_dict: A dict returned by common.LoadInfoDict() on the input
3807        target_files. Will use OPTIONS.info_dict if None has been given.
3808  """
3809  if info_dict is None:
3810    info_dict = OPTIONS.info_dict
3811
3812  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3813  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3814
3815  if board_uses_vendorimage:
3816    # In this case, the output sink is rooted at VENDOR
3817    recovery_img_path = "etc/recovery.img"
3818    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3819    sh_dir = "bin"
3820  else:
3821    # In this case the output sink is rooted at SYSTEM
3822    recovery_img_path = "vendor/etc/recovery.img"
3823    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3824    sh_dir = "vendor/bin"
3825
3826  if full_recovery_image:
3827    output_sink(recovery_img_path, recovery_img.data)
3828
3829  else:
3830    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
3831    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
3832    path = os.path.join(input_dir, recovery_resource_dat_path)
3833    # Use bsdiff to handle mismatching entries (Bug: 72731506)
3834    if include_recovery_dtbo or include_recovery_acpio:
3835      diff_program = ["bsdiff"]
3836      bonus_args = ""
3837      assert not os.path.exists(path)
3838    else:
3839      diff_program = ["imgdiff"]
3840      if os.path.exists(path):
3841        diff_program.append("-b")
3842        diff_program.append(path)
3843        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3844      else:
3845        bonus_args = ""
3846
3847    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3848    _, _, patch = d.ComputePatch()
3849    output_sink("recovery-from-boot.p", patch)
3850
3851  try:
3852    # The following GetTypeAndDevice()s need to use the path in the target
3853    # info_dict instead of source_info_dict.
3854    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3855                                              check_no_slot=False)
3856    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3857                                                      check_no_slot=False)
3858  except KeyError:
3859    return
3860
3861  if full_recovery_image:
3862
3863    # Note that we use /vendor to refer to the recovery resources. This will
3864    # work for a separate vendor partition mounted at /vendor or a
3865    # /system/vendor subdirectory on the system partition, for which init will
3866    # create a symlink from /vendor to /system/vendor.
3867
3868    sh = """#!/vendor/bin/sh
3869if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3870  applypatch \\
3871          --flash /vendor/etc/recovery.img \\
3872          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3873      log -t recovery "Installing new recovery image: succeeded" || \\
3874      log -t recovery "Installing new recovery image: failed"
3875else
3876  log -t recovery "Recovery image already installed"
3877fi
3878""" % {'type': recovery_type,
3879       'device': recovery_device,
3880       'sha1': recovery_img.sha1,
3881       'size': recovery_img.size}
3882  else:
3883    sh = """#!/vendor/bin/sh
3884if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3885  applypatch %(bonus_args)s \\
3886          --patch /vendor/recovery-from-boot.p \\
3887          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3888          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3889      log -t recovery "Installing new recovery image: succeeded" || \\
3890      log -t recovery "Installing new recovery image: failed"
3891else
3892  log -t recovery "Recovery image already installed"
3893fi
3894""" % {'boot_size': boot_img.size,
3895       'boot_sha1': boot_img.sha1,
3896       'recovery_size': recovery_img.size,
3897       'recovery_sha1': recovery_img.sha1,
3898       'boot_type': boot_type,
3899       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3900       'recovery_type': recovery_type,
3901       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3902       'bonus_args': bonus_args}
3903
3904  # The install script location moved from /system/etc to /system/bin in the L
3905  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3906  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3907
3908  logger.info("putting script in %s", sh_location)
3909
3910  output_sink(sh_location, sh.encode())
3911
3912
3913class DynamicPartitionUpdate(object):
3914  def __init__(self, src_group=None, tgt_group=None, progress=None,
3915               block_difference=None):
3916    self.src_group = src_group
3917    self.tgt_group = tgt_group
3918    self.progress = progress
3919    self.block_difference = block_difference
3920
3921  @property
3922  def src_size(self):
3923    if not self.block_difference:
3924      return 0
3925    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3926
3927  @property
3928  def tgt_size(self):
3929    if not self.block_difference:
3930      return 0
3931    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3932
3933  @staticmethod
3934  def _GetSparseImageSize(img):
3935    if not img:
3936      return 0
3937    return img.blocksize * img.total_blocks
3938
3939
3940class DynamicGroupUpdate(object):
3941  def __init__(self, src_size=None, tgt_size=None):
3942    # None: group does not exist. 0: no size limits.
3943    self.src_size = src_size
3944    self.tgt_size = tgt_size
3945
3946
3947class DynamicPartitionsDifference(object):
3948  def __init__(self, info_dict, block_diffs, progress_dict=None,
3949               source_info_dict=None):
3950    if progress_dict is None:
3951      progress_dict = {}
3952
3953    self._remove_all_before_apply = False
3954    if source_info_dict is None:
3955      self._remove_all_before_apply = True
3956      source_info_dict = {}
3957
3958    block_diff_dict = collections.OrderedDict(
3959        [(e.partition, e) for e in block_diffs])
3960
3961    assert len(block_diff_dict) == len(block_diffs), \
3962        "Duplicated BlockDifference object for {}".format(
3963            [partition for partition, count in
3964             collections.Counter(e.partition for e in block_diffs).items()
3965             if count > 1])
3966
3967    self._partition_updates = collections.OrderedDict()
3968
3969    for p, block_diff in block_diff_dict.items():
3970      self._partition_updates[p] = DynamicPartitionUpdate()
3971      self._partition_updates[p].block_difference = block_diff
3972
3973    for p, progress in progress_dict.items():
3974      if p in self._partition_updates:
3975        self._partition_updates[p].progress = progress
3976
3977    tgt_groups = shlex.split(info_dict.get(
3978        "super_partition_groups", "").strip())
3979    src_groups = shlex.split(source_info_dict.get(
3980        "super_partition_groups", "").strip())
3981
3982    for g in tgt_groups:
3983      for p in shlex.split(info_dict.get(
3984              "super_%s_partition_list" % g, "").strip()):
3985        assert p in self._partition_updates, \
3986            "{} is in target super_{}_partition_list but no BlockDifference " \
3987            "object is provided.".format(p, g)
3988        self._partition_updates[p].tgt_group = g
3989
3990    for g in src_groups:
3991      for p in shlex.split(source_info_dict.get(
3992              "super_%s_partition_list" % g, "").strip()):
3993        assert p in self._partition_updates, \
3994            "{} is in source super_{}_partition_list but no BlockDifference " \
3995            "object is provided.".format(p, g)
3996        self._partition_updates[p].src_group = g
3997
3998    target_dynamic_partitions = set(shlex.split(info_dict.get(
3999        "dynamic_partition_list", "").strip()))
4000    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
4001                                  if u.tgt_size)
4002    assert block_diffs_with_target == target_dynamic_partitions, \
4003        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
4004            list(target_dynamic_partitions), list(block_diffs_with_target))
4005
4006    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
4007        "dynamic_partition_list", "").strip()))
4008    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
4009                                  if u.src_size)
4010    assert block_diffs_with_source == source_dynamic_partitions, \
4011        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
4012            list(source_dynamic_partitions), list(block_diffs_with_source))
4013
4014    if self._partition_updates:
4015      logger.info("Updating dynamic partitions %s",
4016                  self._partition_updates.keys())
4017
4018    self._group_updates = collections.OrderedDict()
4019
4020    for g in tgt_groups:
4021      self._group_updates[g] = DynamicGroupUpdate()
4022      self._group_updates[g].tgt_size = int(info_dict.get(
4023          "super_%s_group_size" % g, "0").strip())
4024
4025    for g in src_groups:
4026      if g not in self._group_updates:
4027        self._group_updates[g] = DynamicGroupUpdate()
4028      self._group_updates[g].src_size = int(source_info_dict.get(
4029          "super_%s_group_size" % g, "0").strip())
4030
4031    self._Compute()
4032
4033  def WriteScript(self, script, output_zip, write_verify_script=False):
4034    script.Comment('--- Start patching dynamic partitions ---')
4035    for p, u in self._partition_updates.items():
4036      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4037        script.Comment('Patch partition %s' % p)
4038        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
4039                                       write_verify_script=False)
4040
4041    op_list_path = MakeTempFile()
4042    with open(op_list_path, 'w') as f:
4043      for line in self._op_list:
4044        f.write('{}\n'.format(line))
4045
4046    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
4047
4048    script.Comment('Update dynamic partition metadata')
4049    script.AppendExtra('assert(update_dynamic_partitions('
4050                       'package_extract_file("dynamic_partitions_op_list")));')
4051
4052    if write_verify_script:
4053      for p, u in self._partition_updates.items():
4054        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4055          u.block_difference.WritePostInstallVerifyScript(script)
4056          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
4057
4058    for p, u in self._partition_updates.items():
4059      if u.tgt_size and u.src_size <= u.tgt_size:
4060        script.Comment('Patch partition %s' % p)
4061        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
4062                                       write_verify_script=write_verify_script)
4063        if write_verify_script:
4064          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
4065
4066    script.Comment('--- End patching dynamic partitions ---')
4067
4068  def _Compute(self):
4069    self._op_list = list()
4070
4071    def append(line):
4072      self._op_list.append(line)
4073
4074    def comment(line):
4075      self._op_list.append("# %s" % line)
4076
4077    if self._remove_all_before_apply:
4078      comment('Remove all existing dynamic partitions and groups before '
4079              'applying full OTA')
4080      append('remove_all_groups')
4081
4082    for p, u in self._partition_updates.items():
4083      if u.src_group and not u.tgt_group:
4084        append('remove %s' % p)
4085
4086    for p, u in self._partition_updates.items():
4087      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4088        comment('Move partition %s from %s to default' % (p, u.src_group))
4089        append('move %s default' % p)
4090
4091    for p, u in self._partition_updates.items():
4092      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4093        comment('Shrink partition %s from %d to %d' %
4094                (p, u.src_size, u.tgt_size))
4095        append('resize %s %s' % (p, u.tgt_size))
4096
4097    for g, u in self._group_updates.items():
4098      if u.src_size is not None and u.tgt_size is None:
4099        append('remove_group %s' % g)
4100      if (u.src_size is not None and u.tgt_size is not None and
4101              u.src_size > u.tgt_size):
4102        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4103        append('resize_group %s %d' % (g, u.tgt_size))
4104
4105    for g, u in self._group_updates.items():
4106      if u.src_size is None and u.tgt_size is not None:
4107        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
4108        append('add_group %s %d' % (g, u.tgt_size))
4109      if (u.src_size is not None and u.tgt_size is not None and
4110              u.src_size < u.tgt_size):
4111        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4112        append('resize_group %s %d' % (g, u.tgt_size))
4113
4114    for p, u in self._partition_updates.items():
4115      if u.tgt_group and not u.src_group:
4116        comment('Add partition %s to group %s' % (p, u.tgt_group))
4117        append('add %s %s' % (p, u.tgt_group))
4118
4119    for p, u in self._partition_updates.items():
4120      if u.tgt_size and u.src_size < u.tgt_size:
4121        comment('Grow partition %s from %d to %d' %
4122                (p, u.src_size, u.tgt_size))
4123        append('resize %s %d' % (p, u.tgt_size))
4124
4125    for p, u in self._partition_updates.items():
4126      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4127        comment('Move partition %s from default to %s' %
4128                (p, u.tgt_group))
4129        append('move %s %s' % (p, u.tgt_group))
4130
4131
4132def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
4133  """
4134  Get build.prop from ramdisk within the boot image
4135
4136  Args:
4137    boot_img: the boot image file. Ramdisk must be compressed with lz4 or gzip format.
4138
4139  Return:
4140    An extracted file that stores properties in the boot image.
4141  """
4142  tmp_dir = MakeTempDir('boot_', suffix='.img')
4143  try:
4144    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
4145                      boot_img, '--out', tmp_dir])
4146    ramdisk = os.path.join(tmp_dir, 'ramdisk')
4147    if not os.path.isfile(ramdisk):
4148      logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
4149      return None
4150    uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
4151    if ramdisk_format == RamdiskFormat.LZ4:
4152      RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
4153    elif ramdisk_format == RamdiskFormat.GZ:
4154      with open(ramdisk, 'rb') as input_stream:
4155        with open(uncompressed_ramdisk, 'wb') as output_stream:
4156          p2 = Run(['gzip', '-d'], stdin=input_stream.fileno(),
4157                   stdout=output_stream.fileno())
4158          p2.wait()
4159    else:
4160      logger.error('Only support lz4 or gzip ramdisk format.')
4161      return None
4162
4163    abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
4164    extracted_ramdisk = MakeTempDir('extracted_ramdisk')
4165    # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
4166    # the host environment.
4167    RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
4168                      cwd=extracted_ramdisk)
4169
4170    for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
4171      prop_file = os.path.join(extracted_ramdisk, search_path)
4172      if os.path.isfile(prop_file):
4173        return prop_file
4174      logger.warning(
4175          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
4176
4177    return None
4178
4179  except ExternalError as e:
4180    logger.warning('Unable to get boot image build props: %s', e)
4181    return None
4182
4183
4184def GetBootImageTimestamp(boot_img):
4185  """
4186  Get timestamp from ramdisk within the boot image
4187
4188  Args:
4189    boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
4190
4191  Return:
4192    An integer that corresponds to the timestamp of the boot image, or None
4193    if file has unknown format. Raise exception if an unexpected error has
4194    occurred.
4195  """
4196  prop_file = GetBootImageBuildProp(boot_img)
4197  if not prop_file:
4198    return None
4199
4200  props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
4201  if props is None:
4202    return None
4203
4204  try:
4205    timestamp = props.GetProp('ro.bootimage.build.date.utc')
4206    if timestamp:
4207      return int(timestamp)
4208    logger.warning(
4209        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
4210    return None
4211
4212  except ExternalError as e:
4213    logger.warning('Unable to get boot image timestamp: %s', e)
4214    return None
4215
4216
4217def IsSparseImage(filepath):
4218  if not os.path.exists(filepath):
4219    return False
4220  with open(filepath, 'rb') as fp:
4221    # Magic for android sparse image format
4222    # https://source.android.com/devices/bootloader/images
4223    return fp.read(4) == b'\x3A\xFF\x26\xED'
4224
4225
4226def UnsparseImage(filepath, target_path=None):
4227  if not IsSparseImage(filepath):
4228    return
4229  if target_path is None:
4230    tmp_img = MakeTempFile(suffix=".img")
4231    RunAndCheckOutput(["simg2img", filepath, tmp_img])
4232    os.rename(tmp_img, filepath)
4233  else:
4234    RunAndCheckOutput(["simg2img", filepath, target_path])
4235
4236
4237def ParseUpdateEngineConfig(path: str):
4238  """Parse the update_engine config stored in file `path`
4239  Args
4240    path: Path to update_engine_config.txt file in target_files
4241
4242  Returns
4243    A tuple of (major, minor) version number . E.g. (2, 8)
4244  """
4245  with open(path, "r") as fp:
4246    # update_engine_config.txt is only supposed to contain two lines,
4247    # PAYLOAD_MAJOR_VERSION and PAYLOAD_MINOR_VERSION. 1024 should be more than
4248    # sufficient. If the length is more than that, something is wrong.
4249    data = fp.read(1024)
4250    major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data)
4251    if not major:
4252      raise ValueError(
4253          f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}")
4254    minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data)
4255    if not minor:
4256      raise ValueError(
4257          f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}")
4258    return (int(major.group(1)), int(minor.group(1)))
4259