• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23from genericpath import isdir
24import getopt
25import getpass
26import gzip
27import imp
28import json
29import logging
30import logging.config
31import os
32import platform
33import re
34import shlex
35import shutil
36import subprocess
37import sys
38import stat
39import tempfile
40import threading
41import time
42import zipfile
43from hashlib import sha1, sha256
44
45import images
46import rangelib
47import sparse_img
48from blockimgdiff import BlockImageDiff
49
50logger = logging.getLogger(__name__)
51
52
53class Options(object):
54
55  def __init__(self):
56    # Set up search path, in order to find framework/ and lib64/. At the time of
57    # running this function, user-supplied search path (`--path`) hasn't been
58    # available. So the value set here is the default, which might be overridden
59    # by commandline flag later.
60    exec_path = os.path.realpath(sys.argv[0])
61    if exec_path.endswith('.py'):
62      script_name = os.path.basename(exec_path)
63      # logger hasn't been initialized yet at this point. Use print to output
64      # warnings.
65      print(
66          'Warning: releasetools script should be invoked as hermetic Python '
67          'executable -- build and run `{}` directly.'.format(
68              script_name[:-3]),
69          file=sys.stderr)
70    self.search_path = os.path.dirname(os.path.dirname(exec_path))
71
72    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
73    if not os.path.exists(os.path.join(self.search_path, self.signapk_path)):
74      if "ANDROID_HOST_OUT" in os.environ:
75        self.search_path = os.environ["ANDROID_HOST_OUT"]
76    self.signapk_shared_library_path = "lib64"   # Relative to search_path
77    self.sign_sepolicy_path = None
78    self.extra_signapk_args = []
79    self.extra_sign_sepolicy_args = []
80    self.aapt2_path = "aapt2"
81    self.java_path = "java"  # Use the one on the path by default.
82    self.java_args = ["-Xmx4096m"]  # The default JVM args.
83    self.android_jar_path = None
84    self.public_key_suffix = ".x509.pem"
85    self.private_key_suffix = ".pk8"
86    # use otatools built boot_signer by default
87    self.verbose = False
88    self.tempfiles = []
89    self.device_specific = None
90    self.extras = {}
91    self.info_dict = None
92    self.source_info_dict = None
93    self.target_info_dict = None
94    self.worker_threads = None
95    # Stash size cannot exceed cache_size * threshold.
96    self.cache_size = None
97    self.stash_threshold = 0.8
98    self.logfile = None
99    self.host_tools = {}
100    self.sepolicy_name = 'sepolicy.apex'
101
102
103OPTIONS = Options()
104
105# The block size that's used across the releasetools scripts.
106BLOCK_SIZE = 4096
107
108# Values for "certificate" in apkcerts that mean special things.
109SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
110
111# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
112# that system_other is not in the list because we don't want to include its
113# descriptor into vbmeta.img. When adding a new entry here, the
114# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
115# accordingly.
116AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery',
117                  'system', 'system_ext', 'vendor', 'vendor_boot', 'vendor_kernel_boot',
118                  'vendor_dlkm', 'odm_dlkm', 'system_dlkm')
119
120# Chained VBMeta partitions.
121AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
122
123# Partitions that should have their care_map added to META/care_map.pb
124PARTITIONS_WITH_CARE_MAP = [
125    'system',
126    'vendor',
127    'product',
128    'system_ext',
129    'odm',
130    'vendor_dlkm',
131    'odm_dlkm',
132    'system_dlkm',
133]
134
135# Partitions with a build.prop file
136PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot']
137
138# See sysprop.mk. If file is moved, add new search paths here; don't remove
139# existing search paths.
140RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
141
142
143class ErrorCode(object):
144  """Define error_codes for failures that happen during the actual
145  update package installation.
146
147  Error codes 0-999 are reserved for failures before the package
148  installation (i.e. low battery, package verification failure).
149  Detailed code in 'bootable/recovery/error_code.h' """
150
151  SYSTEM_VERIFICATION_FAILURE = 1000
152  SYSTEM_UPDATE_FAILURE = 1001
153  SYSTEM_UNEXPECTED_CONTENTS = 1002
154  SYSTEM_NONZERO_CONTENTS = 1003
155  SYSTEM_RECOVER_FAILURE = 1004
156  VENDOR_VERIFICATION_FAILURE = 2000
157  VENDOR_UPDATE_FAILURE = 2001
158  VENDOR_UNEXPECTED_CONTENTS = 2002
159  VENDOR_NONZERO_CONTENTS = 2003
160  VENDOR_RECOVER_FAILURE = 2004
161  OEM_PROP_MISMATCH = 3000
162  FINGERPRINT_MISMATCH = 3001
163  THUMBPRINT_MISMATCH = 3002
164  OLDER_BUILD = 3003
165  DEVICE_MISMATCH = 3004
166  BAD_PATCH_FILE = 3005
167  INSUFFICIENT_CACHE_SPACE = 3006
168  TUNE_PARTITION_FAILURE = 3007
169  APPLY_PATCH_FAILURE = 3008
170
171
172class ExternalError(RuntimeError):
173  pass
174
175
176def InitLogging():
177  DEFAULT_LOGGING_CONFIG = {
178      'version': 1,
179      'disable_existing_loggers': False,
180      'formatters': {
181          'standard': {
182              'format':
183                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
184              'datefmt': '%Y-%m-%d %H:%M:%S',
185          },
186      },
187      'handlers': {
188          'default': {
189              'class': 'logging.StreamHandler',
190              'formatter': 'standard',
191              'level': 'WARNING',
192          },
193      },
194      'loggers': {
195          '': {
196              'handlers': ['default'],
197              'propagate': True,
198              'level': 'INFO',
199          }
200      }
201  }
202  env_config = os.getenv('LOGGING_CONFIG')
203  if env_config:
204    with open(env_config) as f:
205      config = json.load(f)
206  else:
207    config = DEFAULT_LOGGING_CONFIG
208
209    # Increase the logging level for verbose mode.
210    if OPTIONS.verbose:
211      config = copy.deepcopy(config)
212      config['handlers']['default']['level'] = 'INFO'
213
214    if OPTIONS.logfile:
215      config = copy.deepcopy(config)
216      config['handlers']['logfile'] = {
217          'class': 'logging.FileHandler',
218          'formatter': 'standard',
219          'level': 'INFO',
220          'mode': 'w',
221          'filename': OPTIONS.logfile,
222      }
223      config['loggers']['']['handlers'].append('logfile')
224
225  logging.config.dictConfig(config)
226
227
228def SetHostToolLocation(tool_name, location):
229  OPTIONS.host_tools[tool_name] = location
230
231
232def FindHostToolPath(tool_name):
233  """Finds the path to the host tool.
234
235  Args:
236    tool_name: name of the tool to find
237  Returns:
238    path to the tool if found under either one of the host_tools map or under
239    the same directory as this binary is located at. If not found, tool_name
240    is returned.
241  """
242  if tool_name in OPTIONS.host_tools:
243    return OPTIONS.host_tools[tool_name]
244
245  my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
246  tool_path = os.path.join(my_dir, tool_name)
247  if os.path.exists(tool_path):
248    return tool_path
249
250  return tool_name
251
252
253def Run(args, verbose=None, **kwargs):
254  """Creates and returns a subprocess.Popen object.
255
256  Args:
257    args: The command represented as a list of strings.
258    verbose: Whether the commands should be shown. Default to the global
259        verbosity if unspecified.
260    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
261        stdin, etc. stdout and stderr will default to subprocess.PIPE and
262        subprocess.STDOUT respectively unless caller specifies any of them.
263        universal_newlines will default to True, as most of the users in
264        releasetools expect string output.
265
266  Returns:
267    A subprocess.Popen object.
268  """
269  if 'stdout' not in kwargs and 'stderr' not in kwargs:
270    kwargs['stdout'] = subprocess.PIPE
271    kwargs['stderr'] = subprocess.STDOUT
272  if 'universal_newlines' not in kwargs:
273    kwargs['universal_newlines'] = True
274
275  if args:
276    # Make a copy of args in case client relies on the content of args later.
277    args = args[:]
278    args[0] = FindHostToolPath(args[0])
279
280  if verbose is None:
281    verbose = OPTIONS.verbose
282
283  # Don't log any if caller explicitly says so.
284  if verbose:
285    logger.info("  Running: \"%s\"", " ".join(args))
286  return subprocess.Popen(args, **kwargs)
287
288
289def RunAndCheckOutput(args, verbose=None, **kwargs):
290  """Runs the given command and returns the output.
291
292  Args:
293    args: The command represented as a list of strings.
294    verbose: Whether the commands should be shown. Default to the global
295        verbosity if unspecified.
296    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
297        stdin, etc. stdout and stderr will default to subprocess.PIPE and
298        subprocess.STDOUT respectively unless caller specifies any of them.
299
300  Returns:
301    The output string.
302
303  Raises:
304    ExternalError: On non-zero exit from the command.
305  """
306  if verbose is None:
307    verbose = OPTIONS.verbose
308  proc = Run(args, verbose=verbose, **kwargs)
309  output, _ = proc.communicate()
310  if output is None:
311    output = ""
312  # Don't log any if caller explicitly says so.
313  if verbose:
314    logger.info("%s", output.rstrip())
315  if proc.returncode != 0:
316    raise ExternalError(
317        "Failed to run command '{}' (exit code {}):\n{}".format(
318            args, proc.returncode, output))
319  return output
320
321
322def RoundUpTo4K(value):
323  rounded_up = value + 4095
324  return rounded_up - (rounded_up % 4096)
325
326
327def CloseInheritedPipes():
328  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
329  before doing other work."""
330  if platform.system() != "Darwin":
331    return
332  for d in range(3, 1025):
333    try:
334      stat = os.fstat(d)
335      if stat is not None:
336        pipebit = stat[0] & 0x1000
337        if pipebit != 0:
338          os.close(d)
339    except OSError:
340      pass
341
342
343class BuildInfo(object):
344  """A class that holds the information for a given build.
345
346  This class wraps up the property querying for a given source or target build.
347  It abstracts away the logic of handling OEM-specific properties, and caches
348  the commonly used properties such as fingerprint.
349
350  There are two types of info dicts: a) build-time info dict, which is generated
351  at build time (i.e. included in a target_files zip); b) OEM info dict that is
352  specified at package generation time (via command line argument
353  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
354  having "oem_fingerprint_properties" in build-time info dict), all the queries
355  would be answered based on build-time info dict only. Otherwise if using
356  OEM-specific properties, some of them will be calculated from two info dicts.
357
358  Users can query properties similarly as using a dict() (e.g. info['fstab']),
359  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
360
361  Attributes:
362    info_dict: The build-time info dict.
363    is_ab: Whether it's a build that uses A/B OTA.
364    oem_dicts: A list of OEM dicts.
365    oem_props: A list of OEM properties that should be read from OEM dicts; None
366        if the build doesn't use any OEM-specific property.
367    fingerprint: The fingerprint of the build, which would be calculated based
368        on OEM properties if applicable.
369    device: The device name, which could come from OEM dicts if applicable.
370  """
371
372  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
373                               "ro.product.manufacturer", "ro.product.model",
374                               "ro.product.name"]
375  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
376      "product", "odm", "vendor", "system_ext", "system"]
377  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
378      "product", "product_services", "odm", "vendor", "system"]
379  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
380
381  # The length of vbmeta digest to append to the fingerprint
382  _VBMETA_DIGEST_SIZE_USED = 8
383
384  def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
385    """Initializes a BuildInfo instance with the given dicts.
386
387    Note that it only wraps up the given dicts, without making copies.
388
389    Arguments:
390      info_dict: The build-time info dict.
391      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
392          that it always uses the first dict to calculate the fingerprint or the
393          device name. The rest would be used for asserting OEM properties only
394          (e.g. one package can be installed on one of these devices).
395      use_legacy_id: Use the legacy build id to construct the fingerprint. This
396          is used when we need a BuildInfo class, while the vbmeta digest is
397          unavailable.
398
399    Raises:
400      ValueError: On invalid inputs.
401    """
402    self.info_dict = info_dict
403    self.oem_dicts = oem_dicts
404
405    self._is_ab = info_dict.get("ab_update") == "true"
406    self.use_legacy_id = use_legacy_id
407
408    # Skip _oem_props if oem_dicts is None to use BuildInfo in
409    # sign_target_files_apks
410    if self.oem_dicts:
411      self._oem_props = info_dict.get("oem_fingerprint_properties")
412    else:
413      self._oem_props = None
414
415    def check_fingerprint(fingerprint):
416      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
417        raise ValueError(
418            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
419            "3.2.2. Build Parameters.".format(fingerprint))
420
421    self._partition_fingerprints = {}
422    for partition in PARTITIONS_WITH_BUILD_PROP:
423      try:
424        fingerprint = self.CalculatePartitionFingerprint(partition)
425        check_fingerprint(fingerprint)
426        self._partition_fingerprints[partition] = fingerprint
427      except ExternalError:
428        continue
429    if "system" in self._partition_fingerprints:
430      # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
431      # need a fingerprint when creating the image.
432      self._partition_fingerprints[
433          "system_other"] = self._partition_fingerprints["system"]
434
435    # These two should be computed only after setting self._oem_props.
436    self._device = self.GetOemProperty("ro.product.device")
437    self._fingerprint = self.CalculateFingerprint()
438    check_fingerprint(self._fingerprint)
439
440  @property
441  def is_ab(self):
442    return self._is_ab
443
444  @property
445  def device(self):
446    return self._device
447
448  @property
449  def fingerprint(self):
450    return self._fingerprint
451
452  @property
453  def is_vabc(self):
454    vendor_prop = self.info_dict.get("vendor.build.prop")
455    vabc_enabled = vendor_prop and \
456        vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true"
457    return vabc_enabled
458
459  @property
460  def is_android_r(self):
461    system_prop = self.info_dict.get("system.build.prop")
462    return system_prop and system_prop.GetProp("ro.build.version.release") == "11"
463
464  @property
465  def vabc_compression_param(self):
466    return self.get("virtual_ab_compression_method", "")
467
468  @property
469  def vendor_api_level(self):
470    vendor_prop = self.info_dict.get("vendor.build.prop")
471    if not vendor_prop:
472      return -1
473
474    props = [
475        "ro.board.api_level",
476        "ro.board.first_api_level",
477        "ro.product.first_api_level",
478    ]
479    for prop in props:
480      value = vendor_prop.GetProp(prop)
481      try:
482          return int(value)
483      except:
484          pass
485    return -1
486
487  @property
488  def is_vabc_xor(self):
489    vendor_prop = self.info_dict.get("vendor.build.prop")
490    vabc_xor_enabled = vendor_prop and \
491        vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true"
492    return vabc_xor_enabled
493
494  @property
495  def vendor_suppressed_vabc(self):
496    vendor_prop = self.info_dict.get("vendor.build.prop")
497    vabc_suppressed = vendor_prop and \
498        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
499    return vabc_suppressed and vabc_suppressed.lower() == "true"
500
501  @property
502  def oem_props(self):
503    return self._oem_props
504
505  def __getitem__(self, key):
506    return self.info_dict[key]
507
508  def __setitem__(self, key, value):
509    self.info_dict[key] = value
510
511  def get(self, key, default=None):
512    return self.info_dict.get(key, default)
513
514  def items(self):
515    return self.info_dict.items()
516
517  def _GetRawBuildProp(self, prop, partition):
518    prop_file = '{}.build.prop'.format(
519        partition) if partition else 'build.prop'
520    partition_props = self.info_dict.get(prop_file)
521    if not partition_props:
522      return None
523    return partition_props.GetProp(prop)
524
525  def GetPartitionBuildProp(self, prop, partition):
526    """Returns the inquired build property for the provided partition."""
527
528    # Boot image and init_boot image uses ro.[product.]bootimage instead of boot.
529    # This comes from the generic ramdisk
530    prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition
531
532    # If provided a partition for this property, only look within that
533    # partition's build.prop.
534    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
535      prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
536    else:
537      prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
538
539    prop_val = self._GetRawBuildProp(prop, partition)
540    if prop_val is not None:
541      return prop_val
542    raise ExternalError("couldn't find %s in %s.build.prop" %
543                        (prop, partition))
544
545  def GetBuildProp(self, prop):
546    """Returns the inquired build property from the standard build.prop file."""
547    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
548      return self._ResolveRoProductBuildProp(prop)
549
550    if prop == "ro.build.id":
551      return self._GetBuildId()
552
553    prop_val = self._GetRawBuildProp(prop, None)
554    if prop_val is not None:
555      return prop_val
556
557    raise ExternalError("couldn't find %s in build.prop" % (prop,))
558
559  def _ResolveRoProductBuildProp(self, prop):
560    """Resolves the inquired ro.product.* build property"""
561    prop_val = self._GetRawBuildProp(prop, None)
562    if prop_val:
563      return prop_val
564
565    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
566    source_order_val = self._GetRawBuildProp(
567        "ro.product.property_source_order", None)
568    if source_order_val:
569      source_order = source_order_val.split(",")
570    else:
571      source_order = default_source_order
572
573    # Check that all sources in ro.product.property_source_order are valid
574    if any([x not in default_source_order for x in source_order]):
575      raise ExternalError(
576          "Invalid ro.product.property_source_order '{}'".format(source_order))
577
578    for source_partition in source_order:
579      source_prop = prop.replace(
580          "ro.product", "ro.product.{}".format(source_partition), 1)
581      prop_val = self._GetRawBuildProp(source_prop, source_partition)
582      if prop_val:
583        return prop_val
584
585    raise ExternalError("couldn't resolve {}".format(prop))
586
587  def _GetRoProductPropsDefaultSourceOrder(self):
588    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
589    # values of these properties for each Android release.
590    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
591    if android_codename == "REL":
592      android_version = self._GetRawBuildProp("ro.build.version.release", None)
593      if android_version == "10":
594        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
595      # NOTE: float() conversion of android_version will have rounding error.
596      # We are checking for "9" or less, and using "< 10" is well outside of
597      # possible floating point rounding.
598      try:
599        android_version_val = float(android_version)
600      except ValueError:
601        android_version_val = 0
602      if android_version_val < 10:
603        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
604    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
605
606  def _GetPlatformVersion(self):
607    version_sdk = self.GetBuildProp("ro.build.version.sdk")
608    # init code switches to version_release_or_codename (see b/158483506). After
609    # API finalization, release_or_codename will be the same as release. This
610    # is the best effort to support pre-S dev stage builds.
611    if int(version_sdk) >= 30:
612      try:
613        return self.GetBuildProp("ro.build.version.release_or_codename")
614      except ExternalError:
615        logger.warning('Failed to find ro.build.version.release_or_codename')
616
617    return self.GetBuildProp("ro.build.version.release")
618
619  def _GetBuildId(self):
620    build_id = self._GetRawBuildProp("ro.build.id", None)
621    if build_id:
622      return build_id
623
624    legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
625    if not legacy_build_id:
626      raise ExternalError("Couldn't find build id in property file")
627
628    if self.use_legacy_id:
629      return legacy_build_id
630
631    # Append the top 8 chars of vbmeta digest to the existing build id. The
632    # logic needs to match the one in init, so that OTA can deliver correctly.
633    avb_enable = self.info_dict.get("avb_enable") == "true"
634    if not avb_enable:
635      raise ExternalError("AVB isn't enabled when using legacy build id")
636
637    vbmeta_digest = self.info_dict.get("vbmeta_digest")
638    if not vbmeta_digest:
639      raise ExternalError("Vbmeta digest isn't provided when using legacy build"
640                          " id")
641    if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
642      raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
643
644    digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
645    return legacy_build_id + '.' + digest_prefix
646
647  def _GetPartitionPlatformVersion(self, partition):
648    try:
649      return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
650                                        partition)
651    except ExternalError:
652      return self.GetPartitionBuildProp("ro.build.version.release",
653                                        partition)
654
655  def GetOemProperty(self, key):
656    if self.oem_props is not None and key in self.oem_props:
657      return self.oem_dicts[0][key]
658    return self.GetBuildProp(key)
659
660  def GetPartitionFingerprint(self, partition):
661    return self._partition_fingerprints.get(partition, None)
662
663  def CalculatePartitionFingerprint(self, partition):
664    try:
665      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
666    except ExternalError:
667      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
668          self.GetPartitionBuildProp("ro.product.brand", partition),
669          self.GetPartitionBuildProp("ro.product.name", partition),
670          self.GetPartitionBuildProp("ro.product.device", partition),
671          self._GetPartitionPlatformVersion(partition),
672          self.GetPartitionBuildProp("ro.build.id", partition),
673          self.GetPartitionBuildProp(
674              "ro.build.version.incremental", partition),
675          self.GetPartitionBuildProp("ro.build.type", partition),
676          self.GetPartitionBuildProp("ro.build.tags", partition))
677
678  def CalculateFingerprint(self):
679    if self.oem_props is None:
680      try:
681        return self.GetBuildProp("ro.build.fingerprint")
682      except ExternalError:
683        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
684            self.GetBuildProp("ro.product.brand"),
685            self.GetBuildProp("ro.product.name"),
686            self.GetBuildProp("ro.product.device"),
687            self._GetPlatformVersion(),
688            self.GetBuildProp("ro.build.id"),
689            self.GetBuildProp("ro.build.version.incremental"),
690            self.GetBuildProp("ro.build.type"),
691            self.GetBuildProp("ro.build.tags"))
692    return "%s/%s/%s:%s" % (
693        self.GetOemProperty("ro.product.brand"),
694        self.GetOemProperty("ro.product.name"),
695        self.GetOemProperty("ro.product.device"),
696        self.GetBuildProp("ro.build.thumbprint"))
697
698  def WriteMountOemScript(self, script):
699    assert self.oem_props is not None
700    recovery_mount_options = self.info_dict.get("recovery_mount_options")
701    script.Mount("/oem", recovery_mount_options)
702
703  def WriteDeviceAssertions(self, script, oem_no_mount):
704    # Read the property directly if not using OEM properties.
705    if not self.oem_props:
706      script.AssertDevice(self.device)
707      return
708
709    # Otherwise assert OEM properties.
710    if not self.oem_dicts:
711      raise ExternalError(
712          "No OEM file provided to answer expected assertions")
713
714    for prop in self.oem_props.split():
715      values = []
716      for oem_dict in self.oem_dicts:
717        if prop in oem_dict:
718          values.append(oem_dict[prop])
719      if not values:
720        raise ExternalError(
721            "The OEM file is missing the property %s" % (prop,))
722      script.AssertOemProperty(prop, values, oem_no_mount)
723
724
725def DoesInputFileContain(input_file, fn):
726  """Check whether the input target_files.zip contain an entry `fn`"""
727  if isinstance(input_file, zipfile.ZipFile):
728    return fn in input_file.namelist()
729  elif zipfile.is_zipfile(input_file):
730    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
731      return fn in zfp.namelist()
732  else:
733    if not os.path.isdir(input_file):
734      raise ValueError(
735          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
736    path = os.path.join(input_file, *fn.split("/"))
737    return os.path.exists(path)
738
739
740def ReadBytesFromInputFile(input_file, fn):
741  """Reads the bytes of fn from input zipfile or directory."""
742  if isinstance(input_file, zipfile.ZipFile):
743    return input_file.read(fn)
744  elif zipfile.is_zipfile(input_file):
745    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
746      return zfp.read(fn)
747  else:
748    if not os.path.isdir(input_file):
749      raise ValueError(
750          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
751    path = os.path.join(input_file, *fn.split("/"))
752    try:
753      with open(path, "rb") as f:
754        return f.read()
755    except IOError as e:
756      if e.errno == errno.ENOENT:
757        raise KeyError(fn)
758
759
760def ReadFromInputFile(input_file, fn):
761  """Reads the str contents of fn from input zipfile or directory."""
762  return ReadBytesFromInputFile(input_file, fn).decode()
763
764
765def ExtractFromInputFile(input_file, fn):
766  """Extracts the contents of fn from input zipfile or directory into a file."""
767  if isinstance(input_file, zipfile.ZipFile):
768    tmp_file = MakeTempFile(os.path.basename(fn))
769    with open(tmp_file, 'wb') as f:
770      f.write(input_file.read(fn))
771    return tmp_file
772  elif zipfile.is_zipfile(input_file):
773    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
774      tmp_file = MakeTempFile(os.path.basename(fn))
775      with open(tmp_file, "wb") as fp:
776        fp.write(zfp.read(fn))
777      return tmp_file
778  else:
779    if not os.path.isdir(input_file):
780      raise ValueError(
781          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
782    file = os.path.join(input_file, *fn.split("/"))
783    if not os.path.exists(file):
784      raise KeyError(fn)
785    return file
786
787
788class RamdiskFormat(object):
789  LZ4 = 1
790  GZ = 2
791
792
793def GetRamdiskFormat(info_dict):
794  if info_dict.get('lz4_ramdisks') == 'true':
795    ramdisk_format = RamdiskFormat.LZ4
796  else:
797    ramdisk_format = RamdiskFormat.GZ
798  return ramdisk_format
799
800
801def LoadInfoDict(input_file, repacking=False):
802  """Loads the key/value pairs from the given input target_files.
803
804  It reads `META/misc_info.txt` file in the target_files input, does validation
805  checks and returns the parsed key/value pairs for to the given build. It's
806  usually called early when working on input target_files files, e.g. when
807  generating OTAs, or signing builds. Note that the function may be called
808  against an old target_files file (i.e. from past dessert releases). So the
809  property parsing needs to be backward compatible.
810
811  In a `META/misc_info.txt`, a few properties are stored as links to the files
812  in the PRODUCT_OUT directory. It works fine with the build system. However,
813  they are no longer available when (re)generating images from target_files zip.
814  When `repacking` is True, redirect these properties to the actual files in the
815  unzipped directory.
816
817  Args:
818    input_file: The input target_files file, which could be an open
819        zipfile.ZipFile instance, or a str for the dir that contains the files
820        unzipped from a target_files file.
821    repacking: Whether it's trying repack an target_files file after loading the
822        info dict (default: False). If so, it will rewrite a few loaded
823        properties (e.g. selinux_fc, root_dir) to point to the actual files in
824        target_files file. When doing repacking, `input_file` must be a dir.
825
826  Returns:
827    A dict that contains the parsed key/value pairs.
828
829  Raises:
830    AssertionError: On invalid input arguments.
831    ValueError: On malformed input values.
832  """
833  if repacking:
834    assert isinstance(input_file, str), \
835        "input_file must be a path str when doing repacking"
836
837  def read_helper(fn):
838    return ReadFromInputFile(input_file, fn)
839
840  try:
841    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
842  except KeyError:
843    raise ValueError("Failed to find META/misc_info.txt in input target-files")
844
845  if "recovery_api_version" not in d:
846    raise ValueError("Failed to find 'recovery_api_version'")
847  if "fstab_version" not in d:
848    raise ValueError("Failed to find 'fstab_version'")
849
850  if repacking:
851    # "selinux_fc" properties should point to the file_contexts files
852    # (file_contexts.bin) under META/.
853    for key in d:
854      if key.endswith("selinux_fc"):
855        fc_basename = os.path.basename(d[key])
856        fc_config = os.path.join(input_file, "META", fc_basename)
857        assert os.path.exists(fc_config)
858
859        d[key] = fc_config
860
861    # Similarly we need to redirect "root_dir", and "root_fs_config".
862    d["root_dir"] = os.path.join(input_file, "ROOT")
863    d["root_fs_config"] = os.path.join(
864        input_file, "META", "root_filesystem_config.txt")
865
866    # Redirect {partition}_base_fs_file for each of the named partitions.
867    for part_name in ["system", "vendor", "system_ext", "product", "odm",
868                      "vendor_dlkm", "odm_dlkm", "system_dlkm"]:
869      key_name = part_name + "_base_fs_file"
870      if key_name not in d:
871        continue
872      basename = os.path.basename(d[key_name])
873      base_fs_file = os.path.join(input_file, "META", basename)
874      if os.path.exists(base_fs_file):
875        d[key_name] = base_fs_file
876      else:
877        logger.warning(
878            "Failed to find %s base fs file: %s", part_name, base_fs_file)
879        del d[key_name]
880
881  def makeint(key):
882    if key in d:
883      d[key] = int(d[key], 0)
884
885  makeint("recovery_api_version")
886  makeint("blocksize")
887  makeint("system_size")
888  makeint("vendor_size")
889  makeint("userdata_size")
890  makeint("cache_size")
891  makeint("recovery_size")
892  makeint("fstab_version")
893
894  boot_images = "boot.img"
895  if "boot_images" in d:
896    boot_images = d["boot_images"]
897  for b in boot_images.split():
898    makeint(b.replace(".img", "_size"))
899
900  # Load recovery fstab if applicable.
901  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
902  ramdisk_format = GetRamdiskFormat(d)
903
904  # Tries to load the build props for all partitions with care_map, including
905  # system and vendor.
906  for partition in PARTITIONS_WITH_BUILD_PROP:
907    partition_prop = "{}.build.prop".format(partition)
908    d[partition_prop] = PartitionBuildProps.FromInputFile(
909        input_file, partition, ramdisk_format=ramdisk_format)
910  d["build.prop"] = d["system.build.prop"]
911
912  # Set up the salt (based on fingerprint) that will be used when adding AVB
913  # hash / hashtree footers.
914  if d.get("avb_enable") == "true":
915    build_info = BuildInfo(d, use_legacy_id=True)
916    for partition in PARTITIONS_WITH_BUILD_PROP:
917      fingerprint = build_info.GetPartitionFingerprint(partition)
918      if fingerprint:
919        d["avb_{}_salt".format(partition)] = sha256(
920            fingerprint.encode()).hexdigest()
921
922    # Set up the salt for partitions without build.prop
923    if build_info.fingerprint:
924      d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest()
925
926    # Set the vbmeta digest if exists
927    try:
928      d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
929    except KeyError:
930      pass
931
932  try:
933    d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
934  except KeyError:
935    logger.warning("Can't find META/ab_partitions.txt")
936  return d
937
938
939def LoadListFromFile(file_path):
940  with open(file_path) as f:
941    return f.read().splitlines()
942
943
944def LoadDictionaryFromFile(file_path):
945  lines = LoadListFromFile(file_path)
946  return LoadDictionaryFromLines(lines)
947
948
949def LoadDictionaryFromLines(lines):
950  d = {}
951  for line in lines:
952    line = line.strip()
953    if not line or line.startswith("#"):
954      continue
955    if "=" in line:
956      name, value = line.split("=", 1)
957      d[name] = value
958  return d
959
960
961class PartitionBuildProps(object):
962  """The class holds the build prop of a particular partition.
963
964  This class loads the build.prop and holds the build properties for a given
965  partition. It also partially recognizes the 'import' statement in the
966  build.prop; and calculates alternative values of some specific build
967  properties during runtime.
968
969  Attributes:
970    input_file: a zipped target-file or an unzipped target-file directory.
971    partition: name of the partition.
972    props_allow_override: a list of build properties to search for the
973        alternative values during runtime.
974    build_props: a dict of build properties for the given partition.
975    prop_overrides: a set of props that are overridden by import.
976    placeholder_values: A dict of runtime variables' values to replace the
977        placeholders in the build.prop file. We expect exactly one value for
978        each of the variables.
979    ramdisk_format: If name is "boot", the format of ramdisk inside the
980        boot image. Otherwise, its value is ignored.
981        Use lz4 to decompress by default. If its value is gzip, use minigzip.
982  """
983
984  def __init__(self, input_file, name, placeholder_values=None):
985    self.input_file = input_file
986    self.partition = name
987    self.props_allow_override = [props.format(name) for props in [
988        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
989    self.build_props = {}
990    self.prop_overrides = set()
991    self.placeholder_values = {}
992    if placeholder_values:
993      self.placeholder_values = copy.deepcopy(placeholder_values)
994
995  @staticmethod
996  def FromDictionary(name, build_props):
997    """Constructs an instance from a build prop dictionary."""
998
999    props = PartitionBuildProps("unknown", name)
1000    props.build_props = build_props.copy()
1001    return props
1002
1003  @staticmethod
1004  def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
1005    """Loads the build.prop file and builds the attributes."""
1006
1007    if name in ("boot", "init_boot"):
1008      data = PartitionBuildProps._ReadBootPropFile(
1009          input_file, name, ramdisk_format=ramdisk_format)
1010    else:
1011      data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
1012
1013    props = PartitionBuildProps(input_file, name, placeholder_values)
1014    props._LoadBuildProp(data)
1015    return props
1016
1017  @staticmethod
1018  def _ReadBootPropFile(input_file, partition_name, ramdisk_format):
1019    """
1020    Read build.prop for boot image from input_file.
1021    Return empty string if not found.
1022    """
1023    image_path = 'IMAGES/' + partition_name + '.img'
1024    try:
1025      boot_img = ExtractFromInputFile(input_file, image_path)
1026    except KeyError:
1027      logger.warning('Failed to read %s', image_path)
1028      return ''
1029    prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
1030    if prop_file is None:
1031      return ''
1032    with open(prop_file, "r") as f:
1033      return f.read()
1034
1035  @staticmethod
1036  def _ReadPartitionPropFile(input_file, name):
1037    """
1038    Read build.prop for name from input_file.
1039    Return empty string if not found.
1040    """
1041    data = ''
1042    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
1043                      '{}/build.prop'.format(name.upper())]:
1044      try:
1045        data = ReadFromInputFile(input_file, prop_file)
1046        break
1047      except KeyError:
1048        logger.warning('Failed to read %s', prop_file)
1049    if data == '':
1050      logger.warning("Failed to read build.prop for partition {}".format(name))
1051    return data
1052
1053  @staticmethod
1054  def FromBuildPropFile(name, build_prop_file):
1055    """Constructs an instance from a build prop file."""
1056
1057    props = PartitionBuildProps("unknown", name)
1058    with open(build_prop_file) as f:
1059      props._LoadBuildProp(f.read())
1060    return props
1061
1062  def _LoadBuildProp(self, data):
1063    for line in data.split('\n'):
1064      line = line.strip()
1065      if not line or line.startswith("#"):
1066        continue
1067      if line.startswith("import"):
1068        overrides = self._ImportParser(line)
1069        duplicates = self.prop_overrides.intersection(overrides.keys())
1070        if duplicates:
1071          raise ValueError('prop {} is overridden multiple times'.format(
1072              ','.join(duplicates)))
1073        self.prop_overrides = self.prop_overrides.union(overrides.keys())
1074        self.build_props.update(overrides)
1075      elif "=" in line:
1076        name, value = line.split("=", 1)
1077        if name in self.prop_overrides:
1078          raise ValueError('prop {} is set again after overridden by import '
1079                           'statement'.format(name))
1080        self.build_props[name] = value
1081
1082  def _ImportParser(self, line):
1083    """Parses the build prop in a given import statement."""
1084
1085    tokens = line.split()
1086    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
1087      raise ValueError('Unrecognized import statement {}'.format(line))
1088
1089    if len(tokens) == 3:
1090      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
1091      return {}
1092
1093    import_path = tokens[1]
1094    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
1095      logger.warn('Unrecognized import path {}'.format(line))
1096      return {}
1097
1098    # We only recognize a subset of import statement that the init process
1099    # supports. And we can loose the restriction based on how the dynamic
1100    # fingerprint is used in practice. The placeholder format should be
1101    # ${placeholder}, and its value should be provided by the caller through
1102    # the placeholder_values.
1103    for prop, value in self.placeholder_values.items():
1104      prop_place_holder = '${{{}}}'.format(prop)
1105      if prop_place_holder in import_path:
1106        import_path = import_path.replace(prop_place_holder, value)
1107    if '$' in import_path:
1108      logger.info('Unresolved place holder in import path %s', import_path)
1109      return {}
1110
1111    import_path = import_path.replace('/{}'.format(self.partition),
1112                                      self.partition.upper())
1113    logger.info('Parsing build props override from %s', import_path)
1114
1115    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
1116    d = LoadDictionaryFromLines(lines)
1117    return {key: val for key, val in d.items()
1118            if key in self.props_allow_override}
1119
1120  def __getstate__(self):
1121    state = self.__dict__.copy()
1122    # Don't pickle baz
1123    if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile):
1124      state["input_file"] = state["input_file"].filename
1125    return state
1126
1127  def GetProp(self, prop):
1128    return self.build_props.get(prop)
1129
1130
1131def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
1132                      system_root_image=False):
1133  class Partition(object):
1134    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
1135      self.mount_point = mount_point
1136      self.fs_type = fs_type
1137      self.device = device
1138      self.length = length
1139      self.context = context
1140      self.slotselect = slotselect
1141
1142  try:
1143    data = read_helper(recovery_fstab_path)
1144  except KeyError:
1145    logger.warning("Failed to find %s", recovery_fstab_path)
1146    data = ""
1147
1148  assert fstab_version == 2
1149
1150  d = {}
1151  for line in data.split("\n"):
1152    line = line.strip()
1153    if not line or line.startswith("#"):
1154      continue
1155
1156    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
1157    pieces = line.split()
1158    if len(pieces) != 5:
1159      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
1160
1161    # Ignore entries that are managed by vold.
1162    options = pieces[4]
1163    if "voldmanaged=" in options:
1164      continue
1165
1166    # It's a good line, parse it.
1167    length = 0
1168    slotselect = False
1169    options = options.split(",")
1170    for i in options:
1171      if i.startswith("length="):
1172        length = int(i[7:])
1173      elif i == "slotselect":
1174        slotselect = True
1175      else:
1176        # Ignore all unknown options in the unified fstab.
1177        continue
1178
1179    mount_flags = pieces[3]
1180    # Honor the SELinux context if present.
1181    context = None
1182    for i in mount_flags.split(","):
1183      if i.startswith("context="):
1184        context = i
1185
1186    mount_point = pieces[1]
1187    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
1188                               device=pieces[0], length=length, context=context,
1189                               slotselect=slotselect)
1190
1191  # / is used for the system mount point when the root directory is included in
1192  # system. Other areas assume system is always at "/system" so point /system
1193  # at /.
1194  if system_root_image:
1195    assert '/system' not in d and '/' in d
1196    d["/system"] = d["/"]
1197  return d
1198
1199
1200def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
1201  """Finds the path to recovery fstab and loads its contents."""
1202  # recovery fstab is only meaningful when installing an update via recovery
1203  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
1204  if info_dict.get('ab_update') == 'true' and \
1205     info_dict.get("allow_non_ab") != "true":
1206    return None
1207
1208  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
1209  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
1210  # cases, since it may load the info_dict from an old build (e.g. when
1211  # generating incremental OTAs from that build).
1212  system_root_image = info_dict.get('system_root_image') == 'true'
1213  if info_dict.get('no_recovery') != 'true':
1214    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
1215    if isinstance(input_file, zipfile.ZipFile):
1216      if recovery_fstab_path not in input_file.namelist():
1217        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1218    else:
1219      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
1220      if not os.path.exists(path):
1221        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1222    return LoadRecoveryFSTab(
1223        read_helper, info_dict['fstab_version'], recovery_fstab_path,
1224        system_root_image)
1225
1226  if info_dict.get('recovery_as_boot') == 'true':
1227    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
1228    if isinstance(input_file, zipfile.ZipFile):
1229      if recovery_fstab_path not in input_file.namelist():
1230        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1231    else:
1232      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
1233      if not os.path.exists(path):
1234        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1235    return LoadRecoveryFSTab(
1236        read_helper, info_dict['fstab_version'], recovery_fstab_path,
1237        system_root_image)
1238
1239  return None
1240
1241
1242def DumpInfoDict(d):
1243  for k, v in sorted(d.items()):
1244    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
1245
1246
1247def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
1248  """Merges dynamic partition info variables.
1249
1250  Args:
1251    framework_dict: The dictionary of dynamic partition info variables from the
1252      partial framework target files.
1253    vendor_dict: The dictionary of dynamic partition info variables from the
1254      partial vendor target files.
1255
1256  Returns:
1257    The merged dynamic partition info dictionary.
1258  """
1259
1260  def uniq_concat(a, b):
1261    combined = set(a.split())
1262    combined.update(set(b.split()))
1263    combined = [item.strip() for item in combined if item.strip()]
1264    return " ".join(sorted(combined))
1265
1266  if (framework_dict.get("use_dynamic_partitions") !=
1267          "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
1268    raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
1269
1270  merged_dict = {"use_dynamic_partitions": "true"}
1271  # For keys-value pairs that are the same, copy to merged dict
1272  for key in vendor_dict.keys():
1273    if key in framework_dict and framework_dict[key] == vendor_dict[key]:
1274      merged_dict[key] = vendor_dict[key]
1275
1276  merged_dict["dynamic_partition_list"] = uniq_concat(
1277      framework_dict.get("dynamic_partition_list", ""),
1278      vendor_dict.get("dynamic_partition_list", ""))
1279
1280  # Super block devices are defined by the vendor dict.
1281  if "super_block_devices" in vendor_dict:
1282    merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
1283    for block_device in merged_dict["super_block_devices"].split():
1284      key = "super_%s_device_size" % block_device
1285      if key not in vendor_dict:
1286        raise ValueError("Vendor dict does not contain required key %s." % key)
1287      merged_dict[key] = vendor_dict[key]
1288
1289  # Partition groups and group sizes are defined by the vendor dict because
1290  # these values may vary for each board that uses a shared system image.
1291  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1292  for partition_group in merged_dict["super_partition_groups"].split():
1293    # Set the partition group's size using the value from the vendor dict.
1294    key = "super_%s_group_size" % partition_group
1295    if key not in vendor_dict:
1296      raise ValueError("Vendor dict does not contain required key %s." % key)
1297    merged_dict[key] = vendor_dict[key]
1298
1299    # Set the partition group's partition list using a concatenation of the
1300    # framework and vendor partition lists.
1301    key = "super_%s_partition_list" % partition_group
1302    merged_dict[key] = uniq_concat(
1303        framework_dict.get(key, ""), vendor_dict.get(key, ""))
1304
1305  # Various other flags should be copied from the vendor dict, if defined.
1306  for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
1307              "super_metadata_device", "super_partition_error_limit",
1308              "super_partition_size"):
1309    if key in vendor_dict.keys():
1310      merged_dict[key] = vendor_dict[key]
1311
1312  return merged_dict
1313
1314
1315def PartitionMapFromTargetFiles(target_files_dir):
1316  """Builds a map from partition -> path within an extracted target files directory."""
1317  # Keep possible_subdirs in sync with build/make/core/board_config.mk.
1318  possible_subdirs = {
1319      "system": ["SYSTEM"],
1320      "vendor": ["VENDOR", "SYSTEM/vendor"],
1321      "product": ["PRODUCT", "SYSTEM/product"],
1322      "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
1323      "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
1324      "vendor_dlkm": [
1325          "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
1326      ],
1327      "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
1328      "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"],
1329  }
1330  partition_map = {}
1331  for partition, subdirs in possible_subdirs.items():
1332    for subdir in subdirs:
1333      if os.path.exists(os.path.join(target_files_dir, subdir)):
1334        partition_map[partition] = subdir
1335        break
1336  return partition_map
1337
1338
1339def SharedUidPartitionViolations(uid_dict, partition_groups):
1340  """Checks for APK sharedUserIds that cross partition group boundaries.
1341
1342  This uses a single or merged build's shareduid_violation_modules.json
1343  output file, as generated by find_shareduid_violation.py or
1344  core/tasks/find-shareduid-violation.mk.
1345
1346  An error is defined as a sharedUserId that is found in a set of partitions
1347  that span more than one partition group.
1348
1349  Args:
1350    uid_dict: A dictionary created by using the standard json module to read a
1351      complete shareduid_violation_modules.json file.
1352    partition_groups: A list of groups, where each group is a list of
1353      partitions.
1354
1355  Returns:
1356    A list of error messages.
1357  """
1358  errors = []
1359  for uid, partitions in uid_dict.items():
1360    found_in_groups = [
1361        group for group in partition_groups
1362        if set(partitions.keys()) & set(group)
1363    ]
1364    if len(found_in_groups) > 1:
1365      errors.append(
1366          "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
1367          % (uid, ",".join(sorted(partitions.keys()))))
1368  return errors
1369
1370
1371def RunHostInitVerifier(product_out, partition_map):
1372  """Runs host_init_verifier on the init rc files within partitions.
1373
1374  host_init_verifier searches the etc/init path within each partition.
1375
1376  Args:
1377    product_out: PRODUCT_OUT directory, containing partition directories.
1378    partition_map: A map of partition name -> relative path within product_out.
1379  """
1380  allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
1381  cmd = ["host_init_verifier"]
1382  for partition, path in partition_map.items():
1383    if partition not in allowed_partitions:
1384      raise ExternalError("Unable to call host_init_verifier for partition %s" %
1385                          partition)
1386    cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
1387    # Add --property-contexts if the file exists on the partition.
1388    property_contexts = "%s_property_contexts" % (
1389        "plat" if partition == "system" else partition)
1390    property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
1391                                          property_contexts)
1392    if os.path.exists(property_contexts_path):
1393      cmd.append("--property-contexts=%s" % property_contexts_path)
1394    # Add the passwd file if the file exists on the partition.
1395    passwd_path = os.path.join(product_out, path, "etc", "passwd")
1396    if os.path.exists(passwd_path):
1397      cmd.extend(["-p", passwd_path])
1398  return RunAndCheckOutput(cmd)
1399
1400
1401def AppendAVBSigningArgs(cmd, partition):
1402  """Append signing arguments for avbtool."""
1403  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1404  key_path = ResolveAVBSigningPathArgs(OPTIONS.info_dict.get("avb_" + partition + "_key_path"))
1405  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1406  if key_path and algorithm:
1407    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1408  avb_salt = OPTIONS.info_dict.get("avb_salt")
1409  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1410  if avb_salt and not partition.startswith("vbmeta"):
1411    cmd.extend(["--salt", avb_salt])
1412
1413
1414def ResolveAVBSigningPathArgs(split_args):
1415
1416  def ResolveBinaryPath(path):
1417    if os.path.exists(path):
1418      return path
1419    new_path = os.path.join(OPTIONS.search_path, path)
1420    if os.path.exists(new_path):
1421      return new_path
1422    raise ExternalError(
1423      "Failed to find {}".format(new_path))
1424
1425  if not split_args:
1426    return split_args
1427
1428  if isinstance(split_args, list):
1429    for index, arg in enumerate(split_args[:-1]):
1430      if arg == '--signing_helper':
1431        signing_helper_path = split_args[index + 1]
1432        split_args[index + 1] = ResolveBinaryPath(signing_helper_path)
1433        break
1434  elif isinstance(split_args, str):
1435    split_args = ResolveBinaryPath(split_args)
1436
1437  return split_args
1438
1439
1440def GetAvbPartitionArg(partition, image, info_dict=None):
1441  """Returns the VBMeta arguments for partition.
1442
1443  It sets up the VBMeta argument by including the partition descriptor from the
1444  given 'image', or by configuring the partition as a chained partition.
1445
1446  Args:
1447    partition: The name of the partition (e.g. "system").
1448    image: The path to the partition image.
1449    info_dict: A dict returned by common.LoadInfoDict(). Will use
1450        OPTIONS.info_dict if None has been given.
1451
1452  Returns:
1453    A list of VBMeta arguments.
1454  """
1455  if info_dict is None:
1456    info_dict = OPTIONS.info_dict
1457
1458  # Check if chain partition is used.
1459  key_path = info_dict.get("avb_" + partition + "_key_path")
1460  if not key_path:
1461    return ["--include_descriptors_from_image", image]
1462
1463  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1464  # into vbmeta.img. The recovery image will be configured on an independent
1465  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1466  # See details at
1467  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1468  if info_dict.get("ab_update") != "true" and partition == "recovery":
1469    return []
1470
1471  # Otherwise chain the partition into vbmeta.
1472  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1473  return ["--chain_partition", chained_partition_arg]
1474
1475
1476def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1477  """Constructs and returns the arg to build or verify a chained partition.
1478
1479  Args:
1480    partition: The partition name.
1481    info_dict: The info dict to look up the key info and rollback index
1482        location.
1483    key: The key to be used for building or verifying the partition. Defaults to
1484        the key listed in info_dict.
1485
1486  Returns:
1487    A string of form "partition:rollback_index_location:key" that can be used to
1488    build or verify vbmeta image.
1489  """
1490  if key is None:
1491    key = info_dict["avb_" + partition + "_key_path"]
1492  key = ResolveAVBSigningPathArgs(key)
1493  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1494  rollback_index_location = info_dict[
1495      "avb_" + partition + "_rollback_index_location"]
1496  return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
1497
1498
1499def _HasGkiCertificationArgs():
1500  return ("gki_signing_key_path" in OPTIONS.info_dict and
1501          "gki_signing_algorithm" in OPTIONS.info_dict)
1502
1503
1504def _GenerateGkiCertificate(image, image_name):
1505  key_path = OPTIONS.info_dict.get("gki_signing_key_path")
1506  algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
1507
1508  key_path = ResolveAVBSigningPathArgs(key_path)
1509
1510  # Checks key_path exists, before processing --gki_signing_* args.
1511  if not os.path.exists(key_path):
1512    raise ExternalError(
1513        'gki_signing_key_path: "{}" not found'.format(key_path))
1514
1515  output_certificate = tempfile.NamedTemporaryFile()
1516  cmd = [
1517      "generate_gki_certificate",
1518      "--name", image_name,
1519      "--algorithm", algorithm,
1520      "--key", key_path,
1521      "--output", output_certificate.name,
1522      image,
1523  ]
1524
1525  signature_args = OPTIONS.info_dict.get("gki_signing_signature_args", "")
1526  signature_args = signature_args.strip()
1527  if signature_args:
1528    cmd.extend(["--additional_avb_args", signature_args])
1529
1530  args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "")
1531  args = args.strip()
1532  if args:
1533    cmd.extend(["--additional_avb_args", args])
1534
1535  RunAndCheckOutput(cmd)
1536
1537  output_certificate.seek(os.SEEK_SET, 0)
1538  data = output_certificate.read()
1539  output_certificate.close()
1540  return data
1541
1542
1543def BuildVBMeta(image_path, partitions, name, needed_partitions):
1544  """Creates a VBMeta image.
1545
1546  It generates the requested VBMeta image. The requested image could be for
1547  top-level or chained VBMeta image, which is determined based on the name.
1548
1549  Args:
1550    image_path: The output path for the new VBMeta image.
1551    partitions: A dict that's keyed by partition names with image paths as
1552        values. Only valid partition names are accepted, as partitions listed
1553        in common.AVB_PARTITIONS and custom partitions listed in
1554        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1555    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1556    needed_partitions: Partitions whose descriptors should be included into the
1557        generated VBMeta image.
1558
1559  Raises:
1560    AssertionError: On invalid input args.
1561  """
1562  avbtool = OPTIONS.info_dict["avb_avbtool"]
1563  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1564  AppendAVBSigningArgs(cmd, name)
1565
1566  custom_partitions = OPTIONS.info_dict.get(
1567      "avb_custom_images_partition_list", "").strip().split()
1568  custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get(
1569      "avb_custom_vbmeta_images_partition_list", "").strip().split()]
1570
1571  for partition, path in partitions.items():
1572    if partition not in needed_partitions:
1573      continue
1574    assert (partition in AVB_PARTITIONS or
1575            partition in AVB_VBMETA_PARTITIONS or
1576            partition in custom_avb_partitions or
1577            partition in custom_partitions), \
1578        'Unknown partition: {}'.format(partition)
1579    assert os.path.exists(path), \
1580        'Failed to find {} for {}'.format(path, partition)
1581    cmd.extend(GetAvbPartitionArg(partition, path))
1582
1583  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1584  if args and args.strip():
1585    split_args = shlex.split(args)
1586    for index, arg in enumerate(split_args[:-1]):
1587      # Check that the image file exists. Some images might be defined
1588      # as a path relative to source tree, which may not be available at the
1589      # same location when running this script (we have the input target_files
1590      # zip only). For such cases, we additionally scan other locations (e.g.
1591      # IMAGES/, RADIO/, etc) before bailing out.
1592      if arg == '--include_descriptors_from_image':
1593        chained_image = split_args[index + 1]
1594        if os.path.exists(chained_image):
1595          continue
1596        found = False
1597        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1598          alt_path = os.path.join(
1599              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1600          if os.path.exists(alt_path):
1601            split_args[index + 1] = alt_path
1602            found = True
1603            break
1604        assert found, 'Failed to find {}'.format(chained_image)
1605
1606    split_args = ResolveAVBSigningPathArgs(split_args)
1607    cmd.extend(split_args)
1608
1609  RunAndCheckOutput(cmd)
1610
1611
1612def _MakeRamdisk(sourcedir, fs_config_file=None,
1613                 dev_node_file=None,
1614                 ramdisk_format=RamdiskFormat.GZ):
1615  ramdisk_img = tempfile.NamedTemporaryFile()
1616
1617  cmd = ["mkbootfs"]
1618
1619  if fs_config_file and os.access(fs_config_file, os.F_OK):
1620    cmd.extend(["-f", fs_config_file])
1621
1622  if dev_node_file and os.access(dev_node_file, os.F_OK):
1623    cmd.extend(["-n", dev_node_file])
1624
1625  cmd.append(os.path.join(sourcedir, "RAMDISK"))
1626
1627  p1 = Run(cmd, stdout=subprocess.PIPE)
1628  if ramdisk_format == RamdiskFormat.LZ4:
1629    p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
1630             stdout=ramdisk_img.file.fileno())
1631  elif ramdisk_format == RamdiskFormat.GZ:
1632    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1633  else:
1634    raise ValueError("Only support lz4 or minigzip ramdisk format.")
1635
1636  p2.wait()
1637  p1.wait()
1638  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1639  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1640
1641  return ramdisk_img
1642
1643
1644def _BuildBootableImage(image_name, sourcedir, fs_config_file,
1645                        dev_node_file=None, info_dict=None,
1646                        has_ramdisk=False, two_step_image=False):
1647  """Build a bootable image from the specified sourcedir.
1648
1649  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1650  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1651  we are building a two-step special image (i.e. building a recovery image to
1652  be loaded into /boot in two-step OTAs).
1653
1654  Return the image data, or None if sourcedir does not appear to contains files
1655  for building the requested image.
1656  """
1657
1658  if info_dict is None:
1659    info_dict = OPTIONS.info_dict
1660
1661  # "boot" or "recovery", without extension.
1662  partition_name = os.path.basename(sourcedir).lower()
1663
1664  kernel = None
1665  if partition_name == "recovery":
1666    if info_dict.get("exclude_kernel_from_recovery_image") == "true":
1667      logger.info("Excluded kernel binary from recovery image.")
1668    else:
1669      kernel = "kernel"
1670  elif partition_name == "init_boot":
1671    pass
1672  else:
1673    kernel = image_name.replace("boot", "kernel")
1674    kernel = kernel.replace(".img", "")
1675  if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1676    return None
1677
1678  kernel_path = os.path.join(sourcedir, kernel) if kernel else None
1679
1680  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1681    return None
1682
1683  img = tempfile.NamedTemporaryFile()
1684
1685  if has_ramdisk:
1686    ramdisk_format = GetRamdiskFormat(info_dict)
1687    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, dev_node_file,
1688                               ramdisk_format=ramdisk_format)
1689
1690  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1691  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1692
1693  cmd = [mkbootimg]
1694  if kernel_path is not None:
1695    cmd.extend(["--kernel", kernel_path])
1696
1697  fn = os.path.join(sourcedir, "second")
1698  if os.access(fn, os.F_OK):
1699    cmd.append("--second")
1700    cmd.append(fn)
1701
1702  fn = os.path.join(sourcedir, "dtb")
1703  if os.access(fn, os.F_OK):
1704    cmd.append("--dtb")
1705    cmd.append(fn)
1706
1707  fn = os.path.join(sourcedir, "cmdline")
1708  if os.access(fn, os.F_OK):
1709    cmd.append("--cmdline")
1710    cmd.append(open(fn).read().rstrip("\n"))
1711
1712  fn = os.path.join(sourcedir, "base")
1713  if os.access(fn, os.F_OK):
1714    cmd.append("--base")
1715    cmd.append(open(fn).read().rstrip("\n"))
1716
1717  fn = os.path.join(sourcedir, "pagesize")
1718  if os.access(fn, os.F_OK):
1719    cmd.append("--pagesize")
1720    cmd.append(open(fn).read().rstrip("\n"))
1721
1722  if partition_name == "recovery":
1723    args = info_dict.get("recovery_mkbootimg_args")
1724    if not args:
1725      # Fall back to "mkbootimg_args" for recovery image
1726      # in case "recovery_mkbootimg_args" is not set.
1727      args = info_dict.get("mkbootimg_args")
1728  elif partition_name == "init_boot":
1729    args = info_dict.get("mkbootimg_init_args")
1730  else:
1731    args = info_dict.get("mkbootimg_args")
1732  if args and args.strip():
1733    cmd.extend(shlex.split(args))
1734
1735  args = info_dict.get("mkbootimg_version_args")
1736  if args and args.strip():
1737    cmd.extend(shlex.split(args))
1738
1739  if has_ramdisk:
1740    cmd.extend(["--ramdisk", ramdisk_img.name])
1741
1742  img_unsigned = None
1743  if info_dict.get("vboot"):
1744    img_unsigned = tempfile.NamedTemporaryFile()
1745    cmd.extend(["--output", img_unsigned.name])
1746  else:
1747    cmd.extend(["--output", img.name])
1748
1749  if partition_name == "recovery":
1750    if info_dict.get("include_recovery_dtbo") == "true":
1751      fn = os.path.join(sourcedir, "recovery_dtbo")
1752      cmd.extend(["--recovery_dtbo", fn])
1753    if info_dict.get("include_recovery_acpio") == "true":
1754      fn = os.path.join(sourcedir, "recovery_acpio")
1755      cmd.extend(["--recovery_acpio", fn])
1756
1757  RunAndCheckOutput(cmd)
1758
1759  if _HasGkiCertificationArgs():
1760    if not os.path.exists(img.name):
1761      raise ValueError("Cannot find GKI boot.img")
1762    if kernel_path is None or not os.path.exists(kernel_path):
1763      raise ValueError("Cannot find GKI kernel.img")
1764
1765    # Certify GKI images.
1766    boot_signature_bytes = b''
1767    boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot")
1768    boot_signature_bytes += _GenerateGkiCertificate(
1769        kernel_path, "generic_kernel")
1770
1771    BOOT_SIGNATURE_SIZE = 16 * 1024
1772    if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE:
1773      raise ValueError(
1774          f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}")
1775    boot_signature_bytes += (
1776        b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes)))
1777    assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE
1778
1779    with open(img.name, 'ab') as f:
1780      f.write(boot_signature_bytes)
1781
1782  # Sign the image if vboot is non-empty.
1783  if info_dict.get("vboot"):
1784    path = "/" + partition_name
1785    img_keyblock = tempfile.NamedTemporaryFile()
1786    # We have switched from the prebuilt futility binary to using the tool
1787    # (futility-host) built from the source. Override the setting in the old
1788    # TF.zip.
1789    futility = info_dict["futility"]
1790    if futility.startswith("prebuilts/"):
1791      futility = "futility-host"
1792    cmd = [info_dict["vboot_signer_cmd"], futility,
1793           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
1794           info_dict["vboot_key"] + ".vbprivk",
1795           info_dict["vboot_subkey"] + ".vbprivk",
1796           img_keyblock.name,
1797           img.name]
1798    RunAndCheckOutput(cmd)
1799
1800    # Clean up the temp files.
1801    img_unsigned.close()
1802    img_keyblock.close()
1803
1804  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1805  if info_dict.get("avb_enable") == "true":
1806    avbtool = info_dict["avb_avbtool"]
1807    if partition_name == "recovery":
1808      part_size = info_dict["recovery_size"]
1809    else:
1810      part_size = info_dict[image_name.replace(".img", "_size")]
1811    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1812           "--partition_size", str(part_size), "--partition_name",
1813           partition_name]
1814    AppendAVBSigningArgs(cmd, partition_name)
1815    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1816    if args and args.strip():
1817      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1818      cmd.extend(split_args)
1819    RunAndCheckOutput(cmd)
1820
1821  img.seek(os.SEEK_SET, 0)
1822  data = img.read()
1823
1824  if has_ramdisk:
1825    ramdisk_img.close()
1826  img.close()
1827
1828  return data
1829
1830
1831def _SignBootableImage(image_path, prebuilt_name, partition_name,
1832                       info_dict=None):
1833  """Performs AVB signing for a prebuilt boot.img.
1834
1835  Args:
1836    image_path: The full path of the image, e.g., /path/to/boot.img.
1837    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
1838        boot-5.10.img, recovery.img or init_boot.img.
1839    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1840    info_dict: The information dict read from misc_info.txt.
1841  """
1842  if info_dict is None:
1843    info_dict = OPTIONS.info_dict
1844
1845  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1846  if info_dict.get("avb_enable") == "true":
1847    avbtool = info_dict["avb_avbtool"]
1848    if partition_name == "recovery":
1849      part_size = info_dict["recovery_size"]
1850    else:
1851      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
1852
1853    cmd = [avbtool, "add_hash_footer", "--image", image_path,
1854           "--partition_size", str(part_size), "--partition_name",
1855           partition_name]
1856    AppendAVBSigningArgs(cmd, partition_name)
1857    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1858    if args and args.strip():
1859      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1860      cmd.extend(split_args)
1861    RunAndCheckOutput(cmd)
1862
1863
1864def HasRamdisk(partition_name, info_dict=None):
1865  """Returns true/false to see if a bootable image should have a ramdisk.
1866
1867  Args:
1868    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1869    info_dict: The information dict read from misc_info.txt.
1870  """
1871  if info_dict is None:
1872    info_dict = OPTIONS.info_dict
1873
1874  if partition_name != "boot":
1875    return True  # init_boot.img or recovery.img has a ramdisk.
1876
1877  if info_dict.get("recovery_as_boot") == "true":
1878    return True  # the recovery-as-boot boot.img has a RECOVERY ramdisk.
1879
1880  if info_dict.get("gki_boot_image_without_ramdisk") == "true":
1881    return False  # A GKI boot.img has no ramdisk since Android-13.
1882
1883  if info_dict.get("system_root_image") == "true":
1884    # The ramdisk content is merged into the system.img, so there is NO
1885    # ramdisk in the boot.img or boot-<kernel version>.img.
1886    return False
1887
1888  if info_dict.get("init_boot") == "true":
1889    # The ramdisk is moved to the init_boot.img, so there is NO
1890    # ramdisk in the boot.img or boot-<kernel version>.img.
1891    return False
1892
1893  return True
1894
1895
1896def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1897                     info_dict=None, two_step_image=False,
1898                     dev_nodes=False):
1899  """Return a File object with the desired bootable image.
1900
1901  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1902  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1903  the source files in 'unpack_dir'/'tree_subdir'."""
1904
1905  if info_dict is None:
1906    info_dict = OPTIONS.info_dict
1907
1908  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1909  if os.path.exists(prebuilt_path):
1910    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1911    return File.FromLocalFile(name, prebuilt_path)
1912
1913  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1914  if os.path.exists(prebuilt_path):
1915    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1916    return File.FromLocalFile(name, prebuilt_path)
1917
1918  partition_name = tree_subdir.lower()
1919  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
1920  if os.path.exists(prebuilt_path):
1921    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
1922    signed_img = MakeTempFile()
1923    shutil.copy(prebuilt_path, signed_img)
1924    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
1925    return File.FromLocalFile(name, signed_img)
1926
1927  logger.info("building image from target_files %s...", tree_subdir)
1928
1929  has_ramdisk = HasRamdisk(partition_name, info_dict)
1930
1931  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1932  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1933                             os.path.join(unpack_dir, fs_config),
1934                             os.path.join(unpack_dir, 'META/ramdisk_node_list')
1935                             if dev_nodes else None,
1936                             info_dict, has_ramdisk, two_step_image)
1937  if data:
1938    return File(name, data)
1939  return None
1940
1941
1942def _BuildVendorBootImage(sourcedir, partition_name, info_dict=None):
1943  """Build a vendor boot image from the specified sourcedir.
1944
1945  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1946  turn them into a vendor boot image.
1947
1948  Return the image data, or None if sourcedir does not appear to contains files
1949  for building the requested image.
1950  """
1951
1952  if info_dict is None:
1953    info_dict = OPTIONS.info_dict
1954
1955  img = tempfile.NamedTemporaryFile()
1956
1957  ramdisk_format = GetRamdiskFormat(info_dict)
1958  ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
1959
1960  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1961  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1962
1963  cmd = [mkbootimg]
1964
1965  fn = os.path.join(sourcedir, "dtb")
1966  if os.access(fn, os.F_OK):
1967    has_vendor_kernel_boot = (info_dict.get(
1968        "vendor_kernel_boot", "").lower() == "true")
1969
1970    # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot.
1971    # Otherwise pack dtb into vendor_boot.
1972    if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot":
1973      cmd.append("--dtb")
1974      cmd.append(fn)
1975
1976  fn = os.path.join(sourcedir, "vendor_cmdline")
1977  if os.access(fn, os.F_OK):
1978    cmd.append("--vendor_cmdline")
1979    cmd.append(open(fn).read().rstrip("\n"))
1980
1981  fn = os.path.join(sourcedir, "base")
1982  if os.access(fn, os.F_OK):
1983    cmd.append("--base")
1984    cmd.append(open(fn).read().rstrip("\n"))
1985
1986  fn = os.path.join(sourcedir, "pagesize")
1987  if os.access(fn, os.F_OK):
1988    cmd.append("--pagesize")
1989    cmd.append(open(fn).read().rstrip("\n"))
1990
1991  args = info_dict.get("mkbootimg_args")
1992  if args and args.strip():
1993    cmd.extend(shlex.split(args))
1994
1995  args = info_dict.get("mkbootimg_version_args")
1996  if args and args.strip():
1997    cmd.extend(shlex.split(args))
1998
1999  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
2000  cmd.extend(["--vendor_boot", img.name])
2001
2002  fn = os.path.join(sourcedir, "vendor_bootconfig")
2003  if os.access(fn, os.F_OK):
2004    cmd.append("--vendor_bootconfig")
2005    cmd.append(fn)
2006
2007  ramdisk_fragment_imgs = []
2008  fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
2009  if os.access(fn, os.F_OK):
2010    ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
2011    for ramdisk_fragment in ramdisk_fragments:
2012      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2013                        ramdisk_fragment, "mkbootimg_args")
2014      cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
2015      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2016                        ramdisk_fragment, "prebuilt_ramdisk")
2017      # Use prebuilt image if found, else create ramdisk from supplied files.
2018      if os.access(fn, os.F_OK):
2019        ramdisk_fragment_pathname = fn
2020      else:
2021        ramdisk_fragment_root = os.path.join(
2022            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
2023        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
2024                                            ramdisk_format=ramdisk_format)
2025        ramdisk_fragment_imgs.append(ramdisk_fragment_img)
2026        ramdisk_fragment_pathname = ramdisk_fragment_img.name
2027      cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
2028
2029  RunAndCheckOutput(cmd)
2030
2031  # AVB: if enabled, calculate and add hash.
2032  if info_dict.get("avb_enable") == "true":
2033    avbtool = info_dict["avb_avbtool"]
2034    part_size = info_dict[f'{partition_name}_size']
2035    cmd = [avbtool, "add_hash_footer", "--image", img.name,
2036           "--partition_size", str(part_size), "--partition_name", partition_name]
2037    AppendAVBSigningArgs(cmd, partition_name)
2038    args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args')
2039    if args and args.strip():
2040      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
2041      cmd.extend(split_args)
2042    RunAndCheckOutput(cmd)
2043
2044  img.seek(os.SEEK_SET, 0)
2045  data = img.read()
2046
2047  for f in ramdisk_fragment_imgs:
2048    f.close()
2049  ramdisk_img.close()
2050  img.close()
2051
2052  return data
2053
2054
2055def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2056                       info_dict=None):
2057  """Return a File object with the desired vendor boot image.
2058
2059  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2060  the source files in 'unpack_dir'/'tree_subdir'."""
2061
2062  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2063  if os.path.exists(prebuilt_path):
2064    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2065    return File.FromLocalFile(name, prebuilt_path)
2066
2067  logger.info("building image from target_files %s...", tree_subdir)
2068
2069  if info_dict is None:
2070    info_dict = OPTIONS.info_dict
2071
2072  data = _BuildVendorBootImage(
2073      os.path.join(unpack_dir, tree_subdir), "vendor_boot", info_dict)
2074  if data:
2075    return File(name, data)
2076  return None
2077
2078
2079def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2080                             info_dict=None):
2081  """Return a File object with the desired vendor kernel boot image.
2082
2083  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2084  the source files in 'unpack_dir'/'tree_subdir'."""
2085
2086  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2087  if os.path.exists(prebuilt_path):
2088    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2089    return File.FromLocalFile(name, prebuilt_path)
2090
2091  logger.info("building image from target_files %s...", tree_subdir)
2092
2093  if info_dict is None:
2094    info_dict = OPTIONS.info_dict
2095
2096  data = _BuildVendorBootImage(
2097      os.path.join(unpack_dir, tree_subdir), "vendor_kernel_boot", info_dict)
2098  if data:
2099    return File(name, data)
2100  return None
2101
2102
2103def Gunzip(in_filename, out_filename):
2104  """Gunzips the given gzip compressed file to a given output file."""
2105  with gzip.open(in_filename, "rb") as in_file, \
2106          open(out_filename, "wb") as out_file:
2107    shutil.copyfileobj(in_file, out_file)
2108
2109
2110def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str):
2111  # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838
2112  # higher bits of |external_attr| are unix file permission and types
2113  unix_filetype = info.external_attr >> 16
2114
2115  def CheckMask(a, mask):
2116    return (a & mask) == mask
2117
2118  def IsSymlink(a):
2119    return CheckMask(a, stat.S_IFLNK)
2120  # python3.11 zipfile implementation doesn't handle symlink correctly
2121  if not IsSymlink(unix_filetype):
2122    return input_zip.extract(info, dirname)
2123  if dirname is None:
2124    dirname = os.getcwd()
2125  target = os.path.join(dirname, info.filename)
2126  os.makedirs(os.path.dirname(target), exist_ok=True)
2127  os.symlink(input_zip.read(info).decode(), target)
2128
2129
2130def UnzipToDir(filename, dirname, patterns=None):
2131  """Unzips the archive to the given directory.
2132
2133  Args:
2134    filename: The name of the zip file to unzip.
2135    dirname: Where the unziped files will land.
2136    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2137        archvie. Non-matching patterns will be filtered out. If there's no match
2138        after the filtering, no file will be unzipped.
2139  """
2140  with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip:
2141    # Filter out non-matching patterns. unzip will complain otherwise.
2142    entries = input_zip.infolist()
2143    # b/283033491
2144    # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header
2145    # In zip64 mode, central directory record's header_offset field might be
2146    # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra
2147    # fields will contain an 8 byte little endian integer at offset 20
2148    # to indicate the actual local header offset.
2149    # As of python3.11, python does not handle zip64 central directories
2150    # correctly, so we will manually do the parsing here.
2151
2152    # ZIP64 central directory extra field has two required fields:
2153    # 2 bytes header ID and 2 bytes size field. Thes two require fields have
2154    # a total size of 4 bytes. Then it has three other 8 bytes field, followed
2155    # by a 4 byte disk number field. The last disk number field is not required
2156    # to be present, but if it is present, the total size of extra field will be
2157    # divisible by 8(because 2+2+4+8*n is always going to be multiple of 8)
2158    # Most extra fields are optional, but when they appear, their must appear
2159    # in the order defined by zip64 spec. Since file header offset is the 2nd
2160    # to last field in zip64 spec, it will only be at last 8 bytes or last 12-4
2161    # bytes, depending on whether disk number is present.
2162    for entry in entries:
2163      if entry.header_offset == 0xFFFFFFFF:
2164        if len(entry.extra) % 8 == 0:
2165          entry.header_offset = int.from_bytes(entry.extra[-12:-4], "little")
2166        else:
2167          entry.header_offset = int.from_bytes(entry.extra[-8:], "little")
2168    if patterns is not None:
2169      filtered = [info for info in entries if any(
2170          [fnmatch.fnmatch(info.filename, p) for p in patterns])]
2171
2172      # There isn't any matching files. Don't unzip anything.
2173      if not filtered:
2174        return
2175      for info in filtered:
2176        UnzipSingleFile(input_zip, info, dirname)
2177    else:
2178      for info in entries:
2179        UnzipSingleFile(input_zip, info, dirname)
2180
2181
2182def UnzipTemp(filename, patterns=None):
2183  """Unzips the given archive into a temporary directory and returns the name.
2184
2185  Args:
2186    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
2187    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
2188
2189    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2190    archvie.
2191
2192  Returns:
2193    The name of the temporary directory.
2194  """
2195
2196  tmp = MakeTempDir(prefix="targetfiles-")
2197  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
2198  if m:
2199    UnzipToDir(m.group(1), tmp, patterns)
2200    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns)
2201    filename = m.group(1)
2202  else:
2203    UnzipToDir(filename, tmp, patterns)
2204
2205  return tmp
2206
2207
2208def GetUserImage(which, tmpdir, input_zip,
2209                 info_dict=None,
2210                 allow_shared_blocks=None,
2211                 reset_file_map=False):
2212  """Returns an Image object suitable for passing to BlockImageDiff.
2213
2214  This function loads the specified image from the given path. If the specified
2215  image is sparse, it also performs additional processing for OTA purpose. For
2216  example, it always adds block 0 to clobbered blocks list. It also detects
2217  files that cannot be reconstructed from the block list, for whom we should
2218  avoid applying imgdiff.
2219
2220  Args:
2221    which: The partition name.
2222    tmpdir: The directory that contains the prebuilt image and block map file.
2223    input_zip: The target-files ZIP archive.
2224    info_dict: The dict to be looked up for relevant info.
2225    allow_shared_blocks: If image is sparse, whether having shared blocks is
2226        allowed. If none, it is looked up from info_dict.
2227    reset_file_map: If true and image is sparse, reset file map before returning
2228        the image.
2229  Returns:
2230    A Image object. If it is a sparse image and reset_file_map is False, the
2231    image will have file_map info loaded.
2232  """
2233  if info_dict is None:
2234    info_dict = LoadInfoDict(input_zip)
2235
2236  is_sparse = IsSparseImage(os.path.join(tmpdir, "IMAGES", which + ".img"))
2237
2238  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
2239  # shared blocks (i.e. some blocks will show up in multiple files' block
2240  # list). We can only allocate such shared blocks to the first "owner", and
2241  # disable imgdiff for all later occurrences.
2242  if allow_shared_blocks is None:
2243    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
2244
2245  if is_sparse:
2246    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks)
2247    if reset_file_map:
2248      img.ResetFileMap()
2249    return img
2250  return GetNonSparseImage(which, tmpdir)
2251
2252
2253def GetNonSparseImage(which, tmpdir):
2254  """Returns a Image object suitable for passing to BlockImageDiff.
2255
2256  This function loads the specified non-sparse image from the given path.
2257
2258  Args:
2259    which: The partition name.
2260    tmpdir: The directory that contains the prebuilt image and block map file.
2261  Returns:
2262    A Image object.
2263  """
2264  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2265  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2266
2267  # The image and map files must have been created prior to calling
2268  # ota_from_target_files.py (since LMP).
2269  assert os.path.exists(path) and os.path.exists(mappath)
2270
2271  return images.FileImage(path)
2272
2273
2274def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
2275  """Returns a SparseImage object suitable for passing to BlockImageDiff.
2276
2277  This function loads the specified sparse image from the given path, and
2278  performs additional processing for OTA purpose. For example, it always adds
2279  block 0 to clobbered blocks list. It also detects files that cannot be
2280  reconstructed from the block list, for whom we should avoid applying imgdiff.
2281
2282  Args:
2283    which: The partition name, e.g. "system", "vendor".
2284    tmpdir: The directory that contains the prebuilt image and block map file.
2285    input_zip: The target-files ZIP archive.
2286    allow_shared_blocks: Whether having shared blocks is allowed.
2287  Returns:
2288    A SparseImage object, with file_map info loaded.
2289  """
2290  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2291  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2292
2293  # The image and map files must have been created prior to calling
2294  # ota_from_target_files.py (since LMP).
2295  assert os.path.exists(path) and os.path.exists(mappath)
2296
2297  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
2298  # it to clobbered_blocks so that it will be written to the target
2299  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
2300  clobbered_blocks = "0"
2301
2302  image = sparse_img.SparseImage(
2303      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks)
2304
2305  # block.map may contain less blocks, because mke2fs may skip allocating blocks
2306  # if they contain all zeros. We can't reconstruct such a file from its block
2307  # list. Tag such entries accordingly. (Bug: 65213616)
2308  for entry in image.file_map:
2309    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
2310    if not entry.startswith('/'):
2311      continue
2312
2313    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
2314    # filename listed in system.map may contain an additional leading slash
2315    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
2316    # results.
2317    # And handle another special case, where files not under /system
2318    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
2319    arcname = entry.lstrip('/')
2320    if which == 'system' and not arcname.startswith('system'):
2321      arcname = 'ROOT/' + arcname
2322    else:
2323      arcname = arcname.replace(which, which.upper(), 1)
2324
2325    assert arcname in input_zip.namelist(), \
2326        "Failed to find the ZIP entry for {}".format(entry)
2327
2328    info = input_zip.getinfo(arcname)
2329    ranges = image.file_map[entry]
2330
2331    # If a RangeSet has been tagged as using shared blocks while loading the
2332    # image, check the original block list to determine its completeness. Note
2333    # that the 'incomplete' flag would be tagged to the original RangeSet only.
2334    if ranges.extra.get('uses_shared_blocks'):
2335      ranges = ranges.extra['uses_shared_blocks']
2336
2337    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
2338      ranges.extra['incomplete'] = True
2339
2340  return image
2341
2342
2343def GetKeyPasswords(keylist):
2344  """Given a list of keys, prompt the user to enter passwords for
2345  those which require them.  Return a {key: password} dict.  password
2346  will be None if the key has no password."""
2347
2348  no_passwords = []
2349  need_passwords = []
2350  key_passwords = {}
2351  devnull = open("/dev/null", "w+b")
2352
2353  # sorted() can't compare strings to None, so convert Nones to strings
2354  for k in sorted(keylist, key=lambda x: x if x is not None else ""):
2355    # We don't need a password for things that aren't really keys.
2356    if k in SPECIAL_CERT_STRINGS or k is None:
2357      no_passwords.append(k)
2358      continue
2359
2360    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2361             "-inform", "DER", "-nocrypt"],
2362            stdin=devnull.fileno(),
2363            stdout=devnull.fileno(),
2364            stderr=subprocess.STDOUT)
2365    p.communicate()
2366    if p.returncode == 0:
2367      # Definitely an unencrypted key.
2368      no_passwords.append(k)
2369    else:
2370      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2371               "-inform", "DER", "-passin", "pass:"],
2372              stdin=devnull.fileno(),
2373              stdout=devnull.fileno(),
2374              stderr=subprocess.PIPE)
2375      _, stderr = p.communicate()
2376      if p.returncode == 0:
2377        # Encrypted key with empty string as password.
2378        key_passwords[k] = ''
2379      elif stderr.startswith('Error decrypting key'):
2380        # Definitely encrypted key.
2381        # It would have said "Error reading key" if it didn't parse correctly.
2382        need_passwords.append(k)
2383      else:
2384        # Potentially, a type of key that openssl doesn't understand.
2385        # We'll let the routines in signapk.jar handle it.
2386        no_passwords.append(k)
2387  devnull.close()
2388
2389  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
2390  key_passwords.update(dict.fromkeys(no_passwords))
2391  return key_passwords
2392
2393
2394def GetMinSdkVersion(apk_name):
2395  """Gets the minSdkVersion declared in the APK.
2396
2397  It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given
2398  APK file. This can be both a decimal number (API Level) or a codename.
2399
2400  Args:
2401    apk_name: The APK filename.
2402
2403  Returns:
2404    The parsed SDK version string.
2405
2406  Raises:
2407    ExternalError: On failing to obtain the min SDK version.
2408  """
2409  proc = Run(
2410      [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE,
2411      stderr=subprocess.PIPE)
2412  stdoutdata, stderrdata = proc.communicate()
2413  if proc.returncode != 0:
2414    raise ExternalError(
2415        "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format(
2416            apk_name, proc.returncode, stdoutdata, stderrdata))
2417
2418  for line in stdoutdata.split("\n"):
2419    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
2420    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
2421    if m:
2422      return m.group(1)
2423  raise ExternalError("No minSdkVersion returned by aapt2")
2424
2425
2426def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
2427  """Returns the minSdkVersion declared in the APK as a number (API Level).
2428
2429  If minSdkVersion is set to a codename, it is translated to a number using the
2430  provided map.
2431
2432  Args:
2433    apk_name: The APK filename.
2434
2435  Returns:
2436    The parsed SDK version number.
2437
2438  Raises:
2439    ExternalError: On failing to get the min SDK version number.
2440  """
2441  version = GetMinSdkVersion(apk_name)
2442  try:
2443    return int(version)
2444  except ValueError:
2445    # Not a decimal number.
2446    #
2447    # It could be either a straight codename, e.g.
2448    #     UpsideDownCake
2449    #
2450    # Or a codename with API fingerprint SHA, e.g.
2451    #     UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e
2452    #
2453    # Extract the codename and try and map it to a version number.
2454    split = version.split(".")
2455    codename = split[0]
2456    if codename in codename_to_api_level_map:
2457      return codename_to_api_level_map[codename]
2458    raise ExternalError(
2459        "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format(
2460            codename, version, codename_to_api_level_map))
2461
2462
2463def SignFile(input_name, output_name, key, password, min_api_level=None,
2464             codename_to_api_level_map=None, whole_file=False,
2465             extra_signapk_args=None):
2466  """Sign the input_name zip/jar/apk, producing output_name.  Use the
2467  given key and password (the latter may be None if the key does not
2468  have a password.
2469
2470  If whole_file is true, use the "-w" option to SignApk to embed a
2471  signature that covers the whole file in the archive comment of the
2472  zip file.
2473
2474  min_api_level is the API Level (int) of the oldest platform this file may end
2475  up on. If not specified for an APK, the API Level is obtained by interpreting
2476  the minSdkVersion attribute of the APK's AndroidManifest.xml.
2477
2478  codename_to_api_level_map is needed to translate the codename which may be
2479  encountered as the APK's minSdkVersion.
2480
2481  Caller may optionally specify extra args to be passed to SignApk, which
2482  defaults to OPTIONS.extra_signapk_args if omitted.
2483  """
2484  if codename_to_api_level_map is None:
2485    codename_to_api_level_map = {}
2486  if extra_signapk_args is None:
2487    extra_signapk_args = OPTIONS.extra_signapk_args
2488
2489  java_library_path = os.path.join(
2490      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2491
2492  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2493         ["-Djava.library.path=" + java_library_path,
2494          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
2495         extra_signapk_args)
2496  if whole_file:
2497    cmd.append("-w")
2498
2499  min_sdk_version = min_api_level
2500  if min_sdk_version is None:
2501    if not whole_file:
2502      min_sdk_version = GetMinSdkVersionInt(
2503          input_name, codename_to_api_level_map)
2504  if min_sdk_version is not None:
2505    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
2506
2507  cmd.extend([key + OPTIONS.public_key_suffix,
2508              key + OPTIONS.private_key_suffix,
2509              input_name, output_name])
2510
2511  proc = Run(cmd, stdin=subprocess.PIPE)
2512  if password is not None:
2513    password += "\n"
2514  stdoutdata, _ = proc.communicate(password)
2515  if proc.returncode != 0:
2516    raise ExternalError(
2517        "Failed to run {}: return code {}:\n{}".format(cmd,
2518                                                       proc.returncode, stdoutdata))
2519
2520
2521def SignSePolicy(sepolicy, key, password):
2522  """Sign the sepolicy zip, producing an fsverity .fsv_sig and
2523  an RSA .sig signature files.
2524  """
2525
2526  if OPTIONS.sign_sepolicy_path is None:
2527    logger.info("No sign_sepolicy_path specified, %s was not signed", sepolicy)
2528    return False
2529
2530  java_library_path = os.path.join(
2531      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2532
2533  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2534         ["-Djava.library.path=" + java_library_path,
2535          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.sign_sepolicy_path)] +
2536         OPTIONS.extra_sign_sepolicy_args)
2537
2538  cmd.extend([key + OPTIONS.public_key_suffix,
2539              key + OPTIONS.private_key_suffix,
2540              sepolicy, os.path.dirname(sepolicy)])
2541
2542  proc = Run(cmd, stdin=subprocess.PIPE)
2543  if password is not None:
2544    password += "\n"
2545  stdoutdata, _ = proc.communicate(password)
2546  if proc.returncode != 0:
2547    raise ExternalError(
2548        "Failed to run sign sepolicy: return code {}:\n{}".format(
2549            proc.returncode, stdoutdata))
2550  return True
2551
2552
2553def CheckSize(data, target, info_dict):
2554  """Checks the data string passed against the max size limit.
2555
2556  For non-AVB images, raise exception if the data is too big. Print a warning
2557  if the data is nearing the maximum size.
2558
2559  For AVB images, the actual image size should be identical to the limit.
2560
2561  Args:
2562    data: A string that contains all the data for the partition.
2563    target: The partition name. The ".img" suffix is optional.
2564    info_dict: The dict to be looked up for relevant info.
2565  """
2566  if target.endswith(".img"):
2567    target = target[:-4]
2568  mount_point = "/" + target
2569
2570  fs_type = None
2571  limit = None
2572  if info_dict["fstab"]:
2573    if mount_point == "/userdata":
2574      mount_point = "/data"
2575    p = info_dict["fstab"][mount_point]
2576    fs_type = p.fs_type
2577    device = p.device
2578    if "/" in device:
2579      device = device[device.rfind("/")+1:]
2580    limit = info_dict.get(device + "_size")
2581  if not fs_type or not limit:
2582    return
2583
2584  size = len(data)
2585  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
2586  # path.
2587  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
2588    if size != limit:
2589      raise ExternalError(
2590          "Mismatching image size for %s: expected %d actual %d" % (
2591              target, limit, size))
2592  else:
2593    pct = float(size) * 100.0 / limit
2594    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
2595    if pct >= 99.0:
2596      raise ExternalError(msg)
2597
2598    if pct >= 95.0:
2599      logger.warning("\n  WARNING: %s\n", msg)
2600    else:
2601      logger.info("  %s", msg)
2602
2603
2604def ReadApkCerts(tf_zip):
2605  """Parses the APK certs info from a given target-files zip.
2606
2607  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
2608  tuple with the following elements: (1) a dictionary that maps packages to
2609  certs (based on the "certificate" and "private_key" attributes in the file;
2610  (2) a string representing the extension of compressed APKs in the target files
2611  (e.g ".gz", ".bro").
2612
2613  Args:
2614    tf_zip: The input target_files ZipFile (already open).
2615
2616  Returns:
2617    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
2618        the extension string of compressed APKs (e.g. ".gz"), or None if there's
2619        no compressed APKs.
2620  """
2621  certmap = {}
2622  compressed_extension = None
2623
2624  # META/apkcerts.txt contains the info for _all_ the packages known at build
2625  # time. Filter out the ones that are not installed.
2626  installed_files = set()
2627  for name in tf_zip.namelist():
2628    basename = os.path.basename(name)
2629    if basename:
2630      installed_files.add(basename)
2631
2632  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
2633    line = line.strip()
2634    if not line:
2635      continue
2636    m = re.match(
2637        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
2638        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2639        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2640        line)
2641    if not m:
2642      continue
2643
2644    matches = m.groupdict()
2645    cert = matches["CERT"]
2646    privkey = matches["PRIVKEY"]
2647    name = matches["NAME"]
2648    this_compressed_extension = matches["COMPRESSED"]
2649
2650    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2651    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2652    if cert in SPECIAL_CERT_STRINGS and not privkey:
2653      certmap[name] = cert
2654    elif (cert.endswith(OPTIONS.public_key_suffix) and
2655          privkey.endswith(OPTIONS.private_key_suffix) and
2656          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2657      certmap[name] = cert[:-public_key_suffix_len]
2658    else:
2659      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2660
2661    if not this_compressed_extension:
2662      continue
2663
2664    # Only count the installed files.
2665    filename = name + '.' + this_compressed_extension
2666    if filename not in installed_files:
2667      continue
2668
2669    # Make sure that all the values in the compression map have the same
2670    # extension. We don't support multiple compression methods in the same
2671    # system image.
2672    if compressed_extension:
2673      if this_compressed_extension != compressed_extension:
2674        raise ValueError(
2675            "Multiple compressed extensions: {} vs {}".format(
2676                compressed_extension, this_compressed_extension))
2677    else:
2678      compressed_extension = this_compressed_extension
2679
2680  return (certmap,
2681          ("." + compressed_extension) if compressed_extension else None)
2682
2683
2684COMMON_DOCSTRING = """
2685Global options
2686
2687  -p  (--path) <dir>
2688      Prepend <dir>/bin to the list of places to search for binaries run by this
2689      script, and expect to find jars in <dir>/framework.
2690
2691  -s  (--device_specific) <file>
2692      Path to the Python module containing device-specific releasetools code.
2693
2694  -x  (--extra) <key=value>
2695      Add a key/value pair to the 'extras' dict, which device-specific extension
2696      code may look at.
2697
2698  -v  (--verbose)
2699      Show command lines being executed.
2700
2701  -h  (--help)
2702      Display this usage message and exit.
2703
2704  --logfile <file>
2705      Put verbose logs to specified file (regardless of --verbose option.)
2706"""
2707
2708
2709def Usage(docstring):
2710  print(docstring.rstrip("\n"))
2711  print(COMMON_DOCSTRING)
2712
2713
2714def ParseOptions(argv,
2715                 docstring,
2716                 extra_opts="", extra_long_opts=(),
2717                 extra_option_handler=None):
2718  """Parse the options in argv and return any arguments that aren't
2719  flags.  docstring is the calling module's docstring, to be displayed
2720  for errors and -h.  extra_opts and extra_long_opts are for flags
2721  defined by the caller, which are processed by passing them to
2722  extra_option_handler."""
2723
2724  try:
2725    opts, args = getopt.getopt(
2726        argv, "hvp:s:x:" + extra_opts,
2727        ["help", "verbose", "path=", "signapk_path=",
2728         "signapk_shared_library_path=", "extra_signapk_args=",
2729         "sign_sepolicy_path=", "extra_sign_sepolicy_args=", "aapt2_path=",
2730         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2731         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2732         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2733         "extra=", "logfile="] + list(extra_long_opts))
2734  except getopt.GetoptError as err:
2735    Usage(docstring)
2736    print("**", str(err), "**")
2737    sys.exit(2)
2738
2739  for o, a in opts:
2740    if o in ("-h", "--help"):
2741      Usage(docstring)
2742      sys.exit()
2743    elif o in ("-v", "--verbose"):
2744      OPTIONS.verbose = True
2745    elif o in ("-p", "--path"):
2746      OPTIONS.search_path = a
2747    elif o in ("--signapk_path",):
2748      OPTIONS.signapk_path = a
2749    elif o in ("--signapk_shared_library_path",):
2750      OPTIONS.signapk_shared_library_path = a
2751    elif o in ("--extra_signapk_args",):
2752      OPTIONS.extra_signapk_args = shlex.split(a)
2753    elif o in ("--sign_sepolicy_path",):
2754      OPTIONS.sign_sepolicy_path = a
2755    elif o in ("--extra_sign_sepolicy_args",):
2756      OPTIONS.extra_sign_sepolicy_args = shlex.split(a)
2757    elif o in ("--aapt2_path",):
2758      OPTIONS.aapt2_path = a
2759    elif o in ("--java_path",):
2760      OPTIONS.java_path = a
2761    elif o in ("--java_args",):
2762      OPTIONS.java_args = shlex.split(a)
2763    elif o in ("--android_jar_path",):
2764      OPTIONS.android_jar_path = a
2765    elif o in ("--public_key_suffix",):
2766      OPTIONS.public_key_suffix = a
2767    elif o in ("--private_key_suffix",):
2768      OPTIONS.private_key_suffix = a
2769    elif o in ("--boot_signer_path",):
2770      raise ValueError(
2771          "--boot_signer_path is no longer supported, please switch to AVB")
2772    elif o in ("--boot_signer_args",):
2773      raise ValueError(
2774          "--boot_signer_args is no longer supported, please switch to AVB")
2775    elif o in ("--verity_signer_path",):
2776      raise ValueError(
2777          "--verity_signer_path is no longer supported, please switch to AVB")
2778    elif o in ("--verity_signer_args",):
2779      raise ValueError(
2780          "--verity_signer_args is no longer supported, please switch to AVB")
2781    elif o in ("-s", "--device_specific"):
2782      OPTIONS.device_specific = a
2783    elif o in ("-x", "--extra"):
2784      key, value = a.split("=", 1)
2785      OPTIONS.extras[key] = value
2786    elif o in ("--logfile",):
2787      OPTIONS.logfile = a
2788    else:
2789      if extra_option_handler is None or not extra_option_handler(o, a):
2790        assert False, "unknown option \"%s\"" % (o,)
2791
2792  if OPTIONS.search_path:
2793    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2794                          os.pathsep + os.environ["PATH"])
2795
2796  return args
2797
2798
2799def MakeTempFile(prefix='tmp', suffix=''):
2800  """Make a temp file and add it to the list of things to be deleted
2801  when Cleanup() is called.  Return the filename."""
2802  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2803  os.close(fd)
2804  OPTIONS.tempfiles.append(fn)
2805  return fn
2806
2807
2808def MakeTempDir(prefix='tmp', suffix=''):
2809  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2810
2811  Returns:
2812    The absolute pathname of the new directory.
2813  """
2814  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2815  OPTIONS.tempfiles.append(dir_name)
2816  return dir_name
2817
2818
2819def Cleanup():
2820  for i in OPTIONS.tempfiles:
2821    if os.path.isdir(i):
2822      shutil.rmtree(i, ignore_errors=True)
2823    else:
2824      os.remove(i)
2825  del OPTIONS.tempfiles[:]
2826
2827
2828class PasswordManager(object):
2829  def __init__(self):
2830    self.editor = os.getenv("EDITOR")
2831    self.pwfile = os.getenv("ANDROID_PW_FILE")
2832
2833  def GetPasswords(self, items):
2834    """Get passwords corresponding to each string in 'items',
2835    returning a dict.  (The dict may have keys in addition to the
2836    values in 'items'.)
2837
2838    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2839    user edit that file to add more needed passwords.  If no editor is
2840    available, or $ANDROID_PW_FILE isn't define, prompts the user
2841    interactively in the ordinary way.
2842    """
2843
2844    current = self.ReadFile()
2845
2846    first = True
2847    while True:
2848      missing = []
2849      for i in items:
2850        if i not in current or not current[i]:
2851          missing.append(i)
2852      # Are all the passwords already in the file?
2853      if not missing:
2854        return current
2855
2856      for i in missing:
2857        current[i] = ""
2858
2859      if not first:
2860        print("key file %s still missing some passwords." % (self.pwfile,))
2861        if sys.version_info[0] >= 3:
2862          raw_input = input  # pylint: disable=redefined-builtin
2863        answer = raw_input("try to edit again? [y]> ").strip()
2864        if answer and answer[0] not in 'yY':
2865          raise RuntimeError("key passwords unavailable")
2866      first = False
2867
2868      current = self.UpdateAndReadFile(current)
2869
2870  def PromptResult(self, current):  # pylint: disable=no-self-use
2871    """Prompt the user to enter a value (password) for each key in
2872    'current' whose value is fales.  Returns a new dict with all the
2873    values.
2874    """
2875    result = {}
2876    for k, v in sorted(current.items()):
2877      if v:
2878        result[k] = v
2879      else:
2880        while True:
2881          result[k] = getpass.getpass(
2882              "Enter password for %s key> " % k).strip()
2883          if result[k]:
2884            break
2885    return result
2886
2887  def UpdateAndReadFile(self, current):
2888    if not self.editor or not self.pwfile:
2889      return self.PromptResult(current)
2890
2891    f = open(self.pwfile, "w")
2892    os.chmod(self.pwfile, 0o600)
2893    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2894    f.write("# (Additional spaces are harmless.)\n\n")
2895
2896    first_line = None
2897    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2898    for i, (_, k, v) in enumerate(sorted_list):
2899      f.write("[[[  %s  ]]] %s\n" % (v, k))
2900      if not v and first_line is None:
2901        # position cursor on first line with no password.
2902        first_line = i + 4
2903    f.close()
2904
2905    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2906
2907    return self.ReadFile()
2908
2909  def ReadFile(self):
2910    result = {}
2911    if self.pwfile is None:
2912      return result
2913    try:
2914      f = open(self.pwfile, "r")
2915      for line in f:
2916        line = line.strip()
2917        if not line or line[0] == '#':
2918          continue
2919        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2920        if not m:
2921          logger.warning("Failed to parse password file: %s", line)
2922        else:
2923          result[m.group(2)] = m.group(1)
2924      f.close()
2925    except IOError as e:
2926      if e.errno != errno.ENOENT:
2927        logger.exception("Error reading password file:")
2928    return result
2929
2930
2931def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2932             compress_type=None):
2933
2934  # http://b/18015246
2935  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2936  # for files larger than 2GiB. We can work around this by adjusting their
2937  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2938  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2939  # it isn't clear to me exactly what circumstances cause this).
2940  # `zipfile.write()` must be used directly to work around this.
2941  #
2942  # This mess can be avoided if we port to python3.
2943  saved_zip64_limit = zipfile.ZIP64_LIMIT
2944  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2945
2946  if compress_type is None:
2947    compress_type = zip_file.compression
2948  if arcname is None:
2949    arcname = filename
2950
2951  saved_stat = os.stat(filename)
2952
2953  try:
2954    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2955    # file to be zipped and reset it when we're done.
2956    os.chmod(filename, perms)
2957
2958    # Use a fixed timestamp so the output is repeatable.
2959    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2960    # intentional. zip stores datetimes in local time without a time zone
2961    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2962    # in the zip archive.
2963    local_epoch = datetime.datetime.fromtimestamp(0)
2964    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
2965    os.utime(filename, (timestamp, timestamp))
2966
2967    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
2968  finally:
2969    os.chmod(filename, saved_stat.st_mode)
2970    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
2971    zipfile.ZIP64_LIMIT = saved_zip64_limit
2972
2973
2974def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
2975                compress_type=None):
2976  """Wrap zipfile.writestr() function to work around the zip64 limit.
2977
2978  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
2979  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
2980  when calling crc32(bytes).
2981
2982  But it still works fine to write a shorter string into a large zip file.
2983  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
2984  when we know the string won't be too long.
2985  """
2986
2987  saved_zip64_limit = zipfile.ZIP64_LIMIT
2988  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2989
2990  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
2991    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
2992    zinfo.compress_type = zip_file.compression
2993    if perms is None:
2994      perms = 0o100644
2995  else:
2996    zinfo = zinfo_or_arcname
2997    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
2998    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
2999    # such a case (since
3000    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
3001    # which seems to make more sense. Otherwise the entry will have 0o000 as the
3002    # permission bits. We follow the logic in Python 3 to get consistent
3003    # behavior between using the two versions.
3004    if not zinfo.external_attr:
3005      zinfo.external_attr = 0o600 << 16
3006
3007  # If compress_type is given, it overrides the value in zinfo.
3008  if compress_type is not None:
3009    zinfo.compress_type = compress_type
3010
3011  # If perms is given, it has a priority.
3012  if perms is not None:
3013    # If perms doesn't set the file type, mark it as a regular file.
3014    if perms & 0o770000 == 0:
3015      perms |= 0o100000
3016    zinfo.external_attr = perms << 16
3017
3018  # Use a fixed timestamp so the output is repeatable.
3019  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
3020
3021  zip_file.writestr(zinfo, data)
3022  zipfile.ZIP64_LIMIT = saved_zip64_limit
3023
3024
3025def ZipDelete(zip_filename, entries, force=False):
3026  """Deletes entries from a ZIP file.
3027
3028  Args:
3029    zip_filename: The name of the ZIP file.
3030    entries: The name of the entry, or the list of names to be deleted.
3031  """
3032  if isinstance(entries, str):
3033    entries = [entries]
3034  # If list is empty, nothing to do
3035  if not entries:
3036    return
3037
3038  with zipfile.ZipFile(zip_filename, 'r') as zin:
3039    if not force and len(set(zin.namelist()).intersection(entries)) == 0:
3040      raise ExternalError(
3041          "Failed to delete zip entries, name not matched: %s" % entries)
3042
3043    fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(zip_filename))
3044    os.close(fd)
3045    cmd = ["zip2zip", "-i", zip_filename, "-o", new_zipfile]
3046    for entry in entries:
3047      cmd.append("-x")
3048      cmd.append(entry)
3049    RunAndCheckOutput(cmd)
3050
3051  os.replace(new_zipfile, zip_filename)
3052
3053
3054def ZipClose(zip_file):
3055  # http://b/18015246
3056  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
3057  # central directory.
3058  saved_zip64_limit = zipfile.ZIP64_LIMIT
3059  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3060
3061  zip_file.close()
3062
3063  zipfile.ZIP64_LIMIT = saved_zip64_limit
3064
3065
3066class DeviceSpecificParams(object):
3067  module = None
3068
3069  def __init__(self, **kwargs):
3070    """Keyword arguments to the constructor become attributes of this
3071    object, which is passed to all functions in the device-specific
3072    module."""
3073    for k, v in kwargs.items():
3074      setattr(self, k, v)
3075    self.extras = OPTIONS.extras
3076
3077    if self.module is None:
3078      path = OPTIONS.device_specific
3079      if not path:
3080        return
3081      try:
3082        if os.path.isdir(path):
3083          info = imp.find_module("releasetools", [path])
3084        else:
3085          d, f = os.path.split(path)
3086          b, x = os.path.splitext(f)
3087          if x == ".py":
3088            f = b
3089          info = imp.find_module(f, [d])
3090        logger.info("loaded device-specific extensions from %s", path)
3091        self.module = imp.load_module("device_specific", *info)
3092      except ImportError:
3093        logger.info("unable to load device-specific module; assuming none")
3094
3095  def _DoCall(self, function_name, *args, **kwargs):
3096    """Call the named function in the device-specific module, passing
3097    the given args and kwargs.  The first argument to the call will be
3098    the DeviceSpecific object itself.  If there is no module, or the
3099    module does not define the function, return the value of the
3100    'default' kwarg (which itself defaults to None)."""
3101    if self.module is None or not hasattr(self.module, function_name):
3102      return kwargs.get("default")
3103    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
3104
3105  def FullOTA_Assertions(self):
3106    """Called after emitting the block of assertions at the top of a
3107    full OTA package.  Implementations can add whatever additional
3108    assertions they like."""
3109    return self._DoCall("FullOTA_Assertions")
3110
3111  def FullOTA_InstallBegin(self):
3112    """Called at the start of full OTA installation."""
3113    return self._DoCall("FullOTA_InstallBegin")
3114
3115  def FullOTA_GetBlockDifferences(self):
3116    """Called during full OTA installation and verification.
3117    Implementation should return a list of BlockDifference objects describing
3118    the update on each additional partitions.
3119    """
3120    return self._DoCall("FullOTA_GetBlockDifferences")
3121
3122  def FullOTA_InstallEnd(self):
3123    """Called at the end of full OTA installation; typically this is
3124    used to install the image for the device's baseband processor."""
3125    return self._DoCall("FullOTA_InstallEnd")
3126
3127  def IncrementalOTA_Assertions(self):
3128    """Called after emitting the block of assertions at the top of an
3129    incremental OTA package.  Implementations can add whatever
3130    additional assertions they like."""
3131    return self._DoCall("IncrementalOTA_Assertions")
3132
3133  def IncrementalOTA_VerifyBegin(self):
3134    """Called at the start of the verification phase of incremental
3135    OTA installation; additional checks can be placed here to abort
3136    the script before any changes are made."""
3137    return self._DoCall("IncrementalOTA_VerifyBegin")
3138
3139  def IncrementalOTA_VerifyEnd(self):
3140    """Called at the end of the verification phase of incremental OTA
3141    installation; additional checks can be placed here to abort the
3142    script before any changes are made."""
3143    return self._DoCall("IncrementalOTA_VerifyEnd")
3144
3145  def IncrementalOTA_InstallBegin(self):
3146    """Called at the start of incremental OTA installation (after
3147    verification is complete)."""
3148    return self._DoCall("IncrementalOTA_InstallBegin")
3149
3150  def IncrementalOTA_GetBlockDifferences(self):
3151    """Called during incremental OTA installation and verification.
3152    Implementation should return a list of BlockDifference objects describing
3153    the update on each additional partitions.
3154    """
3155    return self._DoCall("IncrementalOTA_GetBlockDifferences")
3156
3157  def IncrementalOTA_InstallEnd(self):
3158    """Called at the end of incremental OTA installation; typically
3159    this is used to install the image for the device's baseband
3160    processor."""
3161    return self._DoCall("IncrementalOTA_InstallEnd")
3162
3163  def VerifyOTA_Assertions(self):
3164    return self._DoCall("VerifyOTA_Assertions")
3165
3166
3167class File(object):
3168  def __init__(self, name, data, compress_size=None):
3169    self.name = name
3170    self.data = data
3171    self.size = len(data)
3172    self.compress_size = compress_size or self.size
3173    self.sha1 = sha1(data).hexdigest()
3174
3175  @classmethod
3176  def FromLocalFile(cls, name, diskname):
3177    f = open(diskname, "rb")
3178    data = f.read()
3179    f.close()
3180    return File(name, data)
3181
3182  def WriteToTemp(self):
3183    t = tempfile.NamedTemporaryFile()
3184    t.write(self.data)
3185    t.flush()
3186    return t
3187
3188  def WriteToDir(self, d):
3189    with open(os.path.join(d, self.name), "wb") as fp:
3190      fp.write(self.data)
3191
3192  def AddToZip(self, z, compression=None):
3193    ZipWriteStr(z, self.name, self.data, compress_type=compression)
3194
3195
3196DIFF_PROGRAM_BY_EXT = {
3197    ".gz": "imgdiff",
3198    ".zip": ["imgdiff", "-z"],
3199    ".jar": ["imgdiff", "-z"],
3200    ".apk": ["imgdiff", "-z"],
3201    ".img": "imgdiff",
3202}
3203
3204
3205class Difference(object):
3206  def __init__(self, tf, sf, diff_program=None):
3207    self.tf = tf
3208    self.sf = sf
3209    self.patch = None
3210    self.diff_program = diff_program
3211
3212  def ComputePatch(self):
3213    """Compute the patch (as a string of data) needed to turn sf into
3214    tf.  Returns the same tuple as GetPatch()."""
3215
3216    tf = self.tf
3217    sf = self.sf
3218
3219    if self.diff_program:
3220      diff_program = self.diff_program
3221    else:
3222      ext = os.path.splitext(tf.name)[1]
3223      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
3224
3225    ttemp = tf.WriteToTemp()
3226    stemp = sf.WriteToTemp()
3227
3228    ext = os.path.splitext(tf.name)[1]
3229
3230    try:
3231      ptemp = tempfile.NamedTemporaryFile()
3232      if isinstance(diff_program, list):
3233        cmd = copy.copy(diff_program)
3234      else:
3235        cmd = [diff_program]
3236      cmd.append(stemp.name)
3237      cmd.append(ttemp.name)
3238      cmd.append(ptemp.name)
3239      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3240      err = []
3241
3242      def run():
3243        _, e = p.communicate()
3244        if e:
3245          err.append(e)
3246      th = threading.Thread(target=run)
3247      th.start()
3248      th.join(timeout=300)   # 5 mins
3249      if th.is_alive():
3250        logger.warning("diff command timed out")
3251        p.terminate()
3252        th.join(5)
3253        if th.is_alive():
3254          p.kill()
3255          th.join()
3256
3257      if p.returncode != 0:
3258        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
3259        self.patch = None
3260        return None, None, None
3261      diff = ptemp.read()
3262    finally:
3263      ptemp.close()
3264      stemp.close()
3265      ttemp.close()
3266
3267    self.patch = diff
3268    return self.tf, self.sf, self.patch
3269
3270  def GetPatch(self):
3271    """Returns a tuple of (target_file, source_file, patch_data).
3272
3273    patch_data may be None if ComputePatch hasn't been called, or if
3274    computing the patch failed.
3275    """
3276    return self.tf, self.sf, self.patch
3277
3278
3279def ComputeDifferences(diffs):
3280  """Call ComputePatch on all the Difference objects in 'diffs'."""
3281  logger.info("%d diffs to compute", len(diffs))
3282
3283  # Do the largest files first, to try and reduce the long-pole effect.
3284  by_size = [(i.tf.size, i) for i in diffs]
3285  by_size.sort(reverse=True)
3286  by_size = [i[1] for i in by_size]
3287
3288  lock = threading.Lock()
3289  diff_iter = iter(by_size)   # accessed under lock
3290
3291  def worker():
3292    try:
3293      lock.acquire()
3294      for d in diff_iter:
3295        lock.release()
3296        start = time.time()
3297        d.ComputePatch()
3298        dur = time.time() - start
3299        lock.acquire()
3300
3301        tf, sf, patch = d.GetPatch()
3302        if sf.name == tf.name:
3303          name = tf.name
3304        else:
3305          name = "%s (%s)" % (tf.name, sf.name)
3306        if patch is None:
3307          logger.error("patching failed! %40s", name)
3308        else:
3309          logger.info(
3310              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
3311              tf.size, 100.0 * len(patch) / tf.size, name)
3312      lock.release()
3313    except Exception:
3314      logger.exception("Failed to compute diff from worker")
3315      raise
3316
3317  # start worker threads; wait for them all to finish.
3318  threads = [threading.Thread(target=worker)
3319             for i in range(OPTIONS.worker_threads)]
3320  for th in threads:
3321    th.start()
3322  while threads:
3323    threads.pop().join()
3324
3325
3326class BlockDifference(object):
3327  def __init__(self, partition, tgt, src=None, check_first_block=False,
3328               version=None, disable_imgdiff=False):
3329    self.tgt = tgt
3330    self.src = src
3331    self.partition = partition
3332    self.check_first_block = check_first_block
3333    self.disable_imgdiff = disable_imgdiff
3334
3335    if version is None:
3336      version = max(
3337          int(i) for i in
3338          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
3339    assert version >= 3
3340    self.version = version
3341
3342    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
3343                       version=self.version,
3344                       disable_imgdiff=self.disable_imgdiff)
3345    self.path = os.path.join(MakeTempDir(), partition)
3346    b.Compute(self.path)
3347    self._required_cache = b.max_stashed_size
3348    self.touched_src_ranges = b.touched_src_ranges
3349    self.touched_src_sha1 = b.touched_src_sha1
3350
3351    # On devices with dynamic partitions, for new partitions,
3352    # src is None but OPTIONS.source_info_dict is not.
3353    if OPTIONS.source_info_dict is None:
3354      is_dynamic_build = OPTIONS.info_dict.get(
3355          "use_dynamic_partitions") == "true"
3356      is_dynamic_source = False
3357    else:
3358      is_dynamic_build = OPTIONS.source_info_dict.get(
3359          "use_dynamic_partitions") == "true"
3360      is_dynamic_source = partition in shlex.split(
3361          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
3362
3363    is_dynamic_target = partition in shlex.split(
3364        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
3365
3366    # For dynamic partitions builds, check partition list in both source
3367    # and target build because new partitions may be added, and existing
3368    # partitions may be removed.
3369    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
3370
3371    if is_dynamic:
3372      self.device = 'map_partition("%s")' % partition
3373    else:
3374      if OPTIONS.source_info_dict is None:
3375        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3376                                              OPTIONS.info_dict)
3377      else:
3378        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3379                                              OPTIONS.source_info_dict)
3380      self.device = device_expr
3381
3382  @property
3383  def required_cache(self):
3384    return self._required_cache
3385
3386  def WriteScript(self, script, output_zip, progress=None,
3387                  write_verify_script=False):
3388    if not self.src:
3389      # write the output unconditionally
3390      script.Print("Patching %s image unconditionally..." % (self.partition,))
3391    else:
3392      script.Print("Patching %s image after verification." % (self.partition,))
3393
3394    if progress:
3395      script.ShowProgress(progress, 0)
3396    self._WriteUpdate(script, output_zip)
3397
3398    if write_verify_script:
3399      self.WritePostInstallVerifyScript(script)
3400
3401  def WriteStrictVerifyScript(self, script):
3402    """Verify all the blocks in the care_map, including clobbered blocks.
3403
3404    This differs from the WriteVerifyScript() function: a) it prints different
3405    error messages; b) it doesn't allow half-way updated images to pass the
3406    verification."""
3407
3408    partition = self.partition
3409    script.Print("Verifying %s..." % (partition,))
3410    ranges = self.tgt.care_map
3411    ranges_str = ranges.to_string_raw()
3412    script.AppendExtra(
3413        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
3414        'ui_print("%s has unexpected contents.");' % (
3415            self.device, ranges_str,
3416            self.tgt.TotalSha1(include_clobbered_blocks=True),
3417            self.partition))
3418    script.AppendExtra("")
3419
3420  def WriteVerifyScript(self, script, touched_blocks_only=False):
3421    partition = self.partition
3422
3423    # full OTA
3424    if not self.src:
3425      script.Print("Image %s will be patched unconditionally." % (partition,))
3426
3427    # incremental OTA
3428    else:
3429      if touched_blocks_only:
3430        ranges = self.touched_src_ranges
3431        expected_sha1 = self.touched_src_sha1
3432      else:
3433        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
3434        expected_sha1 = self.src.TotalSha1()
3435
3436      # No blocks to be checked, skipping.
3437      if not ranges:
3438        return
3439
3440      ranges_str = ranges.to_string_raw()
3441      script.AppendExtra(
3442          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
3443          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
3444          '"%s.patch.dat")) then' % (
3445              self.device, ranges_str, expected_sha1,
3446              self.device, partition, partition, partition))
3447      script.Print('Verified %s image...' % (partition,))
3448      script.AppendExtra('else')
3449
3450      if self.version >= 4:
3451
3452        # Bug: 21124327
3453        # When generating incrementals for the system and vendor partitions in
3454        # version 4 or newer, explicitly check the first block (which contains
3455        # the superblock) of the partition to see if it's what we expect. If
3456        # this check fails, give an explicit log message about the partition
3457        # having been remounted R/W (the most likely explanation).
3458        if self.check_first_block:
3459          script.AppendExtra('check_first_block(%s);' % (self.device,))
3460
3461        # If version >= 4, try block recovery before abort update
3462        if partition == "system":
3463          code = ErrorCode.SYSTEM_RECOVER_FAILURE
3464        else:
3465          code = ErrorCode.VENDOR_RECOVER_FAILURE
3466        script.AppendExtra((
3467            'ifelse (block_image_recover({device}, "{ranges}") && '
3468            'block_image_verify({device}, '
3469            'package_extract_file("{partition}.transfer.list"), '
3470            '"{partition}.new.dat", "{partition}.patch.dat"), '
3471            'ui_print("{partition} recovered successfully."), '
3472            'abort("E{code}: {partition} partition fails to recover"));\n'
3473            'endif;').format(device=self.device, ranges=ranges_str,
3474                             partition=partition, code=code))
3475
3476      # Abort the OTA update. Note that the incremental OTA cannot be applied
3477      # even if it may match the checksum of the target partition.
3478      # a) If version < 3, operations like move and erase will make changes
3479      #    unconditionally and damage the partition.
3480      # b) If version >= 3, it won't even reach here.
3481      else:
3482        if partition == "system":
3483          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
3484        else:
3485          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
3486        script.AppendExtra((
3487            'abort("E%d: %s partition has unexpected contents");\n'
3488            'endif;') % (code, partition))
3489
3490  def WritePostInstallVerifyScript(self, script):
3491    partition = self.partition
3492    script.Print('Verifying the updated %s image...' % (partition,))
3493    # Unlike pre-install verification, clobbered_blocks should not be ignored.
3494    ranges = self.tgt.care_map
3495    ranges_str = ranges.to_string_raw()
3496    script.AppendExtra(
3497        'if range_sha1(%s, "%s") == "%s" then' % (
3498            self.device, ranges_str,
3499            self.tgt.TotalSha1(include_clobbered_blocks=True)))
3500
3501    # Bug: 20881595
3502    # Verify that extended blocks are really zeroed out.
3503    if self.tgt.extended:
3504      ranges_str = self.tgt.extended.to_string_raw()
3505      script.AppendExtra(
3506          'if range_sha1(%s, "%s") == "%s" then' % (
3507              self.device, ranges_str,
3508              self._HashZeroBlocks(self.tgt.extended.size())))
3509      script.Print('Verified the updated %s image.' % (partition,))
3510      if partition == "system":
3511        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
3512      else:
3513        code = ErrorCode.VENDOR_NONZERO_CONTENTS
3514      script.AppendExtra(
3515          'else\n'
3516          '  abort("E%d: %s partition has unexpected non-zero contents after '
3517          'OTA update");\n'
3518          'endif;' % (code, partition))
3519    else:
3520      script.Print('Verified the updated %s image.' % (partition,))
3521
3522    if partition == "system":
3523      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
3524    else:
3525      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
3526
3527    script.AppendExtra(
3528        'else\n'
3529        '  abort("E%d: %s partition has unexpected contents after OTA '
3530        'update");\n'
3531        'endif;' % (code, partition))
3532
3533  def _WriteUpdate(self, script, output_zip):
3534    ZipWrite(output_zip,
3535             '{}.transfer.list'.format(self.path),
3536             '{}.transfer.list'.format(self.partition))
3537
3538    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
3539    # its size. Quailty 9 almost triples the compression time but doesn't
3540    # further reduce the size too much. For a typical 1.8G system.new.dat
3541    #                       zip  | brotli(quality 6)  | brotli(quality 9)
3542    #   compressed_size:    942M | 869M (~8% reduced) | 854M
3543    #   compression_time:   75s  | 265s               | 719s
3544    #   decompression_time: 15s  | 25s                | 25s
3545
3546    if not self.src:
3547      brotli_cmd = ['brotli', '--quality=6',
3548                    '--output={}.new.dat.br'.format(self.path),
3549                    '{}.new.dat'.format(self.path)]
3550      print("Compressing {}.new.dat with brotli".format(self.partition))
3551      RunAndCheckOutput(brotli_cmd)
3552
3553      new_data_name = '{}.new.dat.br'.format(self.partition)
3554      ZipWrite(output_zip,
3555               '{}.new.dat.br'.format(self.path),
3556               new_data_name,
3557               compress_type=zipfile.ZIP_STORED)
3558    else:
3559      new_data_name = '{}.new.dat'.format(self.partition)
3560      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
3561
3562    ZipWrite(output_zip,
3563             '{}.patch.dat'.format(self.path),
3564             '{}.patch.dat'.format(self.partition),
3565             compress_type=zipfile.ZIP_STORED)
3566
3567    if self.partition == "system":
3568      code = ErrorCode.SYSTEM_UPDATE_FAILURE
3569    else:
3570      code = ErrorCode.VENDOR_UPDATE_FAILURE
3571
3572    call = ('block_image_update({device}, '
3573            'package_extract_file("{partition}.transfer.list"), '
3574            '"{new_data_name}", "{partition}.patch.dat") ||\n'
3575            '  abort("E{code}: Failed to update {partition} image.");'.format(
3576                device=self.device, partition=self.partition,
3577                new_data_name=new_data_name, code=code))
3578    script.AppendExtra(script.WordWrap(call))
3579
3580  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
3581    data = source.ReadRangeSet(ranges)
3582    ctx = sha1()
3583
3584    for p in data:
3585      ctx.update(p)
3586
3587    return ctx.hexdigest()
3588
3589  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
3590    """Return the hash value for all zero blocks."""
3591    zero_block = '\x00' * 4096
3592    ctx = sha1()
3593    for _ in range(num_blocks):
3594      ctx.update(zero_block)
3595
3596    return ctx.hexdigest()
3597
3598
3599# Expose these two classes to support vendor-specific scripts
3600DataImage = images.DataImage
3601EmptyImage = images.EmptyImage
3602
3603
3604# map recovery.fstab's fs_types to mount/format "partition types"
3605PARTITION_TYPES = {
3606    "ext4": "EMMC",
3607    "emmc": "EMMC",
3608    "f2fs": "EMMC",
3609    "squashfs": "EMMC",
3610    "erofs": "EMMC"
3611}
3612
3613
3614def GetTypeAndDevice(mount_point, info, check_no_slot=True):
3615  """
3616  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
3617  backwards compatibility. It aborts if the fstab entry has slotselect option
3618  (unless check_no_slot is explicitly set to False).
3619  """
3620  fstab = info["fstab"]
3621  if fstab:
3622    if check_no_slot:
3623      assert not fstab[mount_point].slotselect, \
3624          "Use GetTypeAndDeviceExpr instead"
3625    return (PARTITION_TYPES[fstab[mount_point].fs_type],
3626            fstab[mount_point].device)
3627  raise KeyError
3628
3629
3630def GetTypeAndDeviceExpr(mount_point, info):
3631  """
3632  Return the filesystem of the partition, and an edify expression that evaluates
3633  to the device at runtime.
3634  """
3635  fstab = info["fstab"]
3636  if fstab:
3637    p = fstab[mount_point]
3638    device_expr = '"%s"' % fstab[mount_point].device
3639    if p.slotselect:
3640      device_expr = 'add_slot_suffix(%s)' % device_expr
3641    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
3642  raise KeyError
3643
3644
3645def GetEntryForDevice(fstab, device):
3646  """
3647  Returns:
3648    The first entry in fstab whose device is the given value.
3649  """
3650  if not fstab:
3651    return None
3652  for mount_point in fstab:
3653    if fstab[mount_point].device == device:
3654      return fstab[mount_point]
3655  return None
3656
3657
3658def ParseCertificate(data):
3659  """Parses and converts a PEM-encoded certificate into DER-encoded.
3660
3661  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3662
3663  Returns:
3664    The decoded certificate bytes.
3665  """
3666  cert_buffer = []
3667  save = False
3668  for line in data.split("\n"):
3669    if "--END CERTIFICATE--" in line:
3670      break
3671    if save:
3672      cert_buffer.append(line)
3673    if "--BEGIN CERTIFICATE--" in line:
3674      save = True
3675  cert = base64.b64decode("".join(cert_buffer))
3676  return cert
3677
3678
3679def ExtractPublicKey(cert):
3680  """Extracts the public key (PEM-encoded) from the given certificate file.
3681
3682  Args:
3683    cert: The certificate filename.
3684
3685  Returns:
3686    The public key string.
3687
3688  Raises:
3689    AssertionError: On non-zero return from 'openssl'.
3690  """
3691  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3692  # While openssl 1.1 writes the key into the given filename followed by '-out',
3693  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3694  # stdout instead.
3695  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3696  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3697  pubkey, stderrdata = proc.communicate()
3698  assert proc.returncode == 0, \
3699      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3700  return pubkey
3701
3702
3703def ExtractAvbPublicKey(avbtool, key):
3704  """Extracts the AVB public key from the given public or private key.
3705
3706  Args:
3707    avbtool: The AVB tool to use.
3708    key: The input key file, which should be PEM-encoded public or private key.
3709
3710  Returns:
3711    The path to the extracted AVB public key file.
3712  """
3713  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3714  RunAndCheckOutput(
3715      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3716  return output
3717
3718
3719def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3720                      info_dict=None):
3721  """Generates the recovery-from-boot patch and writes the script to output.
3722
3723  Most of the space in the boot and recovery images is just the kernel, which is
3724  identical for the two, so the resulting patch should be efficient. Add it to
3725  the output zip, along with a shell script that is run from init.rc on first
3726  boot to actually do the patching and install the new recovery image.
3727
3728  Args:
3729    input_dir: The top-level input directory of the target-files.zip.
3730    output_sink: The callback function that writes the result.
3731    recovery_img: File object for the recovery image.
3732    boot_img: File objects for the boot image.
3733    info_dict: A dict returned by common.LoadInfoDict() on the input
3734        target_files. Will use OPTIONS.info_dict if None has been given.
3735  """
3736  if info_dict is None:
3737    info_dict = OPTIONS.info_dict
3738
3739  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3740  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3741
3742  if board_uses_vendorimage:
3743    # In this case, the output sink is rooted at VENDOR
3744    recovery_img_path = "etc/recovery.img"
3745    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3746    sh_dir = "bin"
3747  else:
3748    # In this case the output sink is rooted at SYSTEM
3749    recovery_img_path = "vendor/etc/recovery.img"
3750    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3751    sh_dir = "vendor/bin"
3752
3753  if full_recovery_image:
3754    output_sink(recovery_img_path, recovery_img.data)
3755
3756  else:
3757    system_root_image = info_dict.get("system_root_image") == "true"
3758    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
3759    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
3760    path = os.path.join(input_dir, recovery_resource_dat_path)
3761    # With system-root-image, boot and recovery images will have mismatching
3762    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
3763    # to handle such a case.
3764    if system_root_image or include_recovery_dtbo or include_recovery_acpio:
3765      diff_program = ["bsdiff"]
3766      bonus_args = ""
3767      assert not os.path.exists(path)
3768    else:
3769      diff_program = ["imgdiff"]
3770      if os.path.exists(path):
3771        diff_program.append("-b")
3772        diff_program.append(path)
3773        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3774      else:
3775        bonus_args = ""
3776
3777    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3778    _, _, patch = d.ComputePatch()
3779    output_sink("recovery-from-boot.p", patch)
3780
3781  try:
3782    # The following GetTypeAndDevice()s need to use the path in the target
3783    # info_dict instead of source_info_dict.
3784    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3785                                              check_no_slot=False)
3786    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3787                                                      check_no_slot=False)
3788  except KeyError:
3789    return
3790
3791  if full_recovery_image:
3792
3793    # Note that we use /vendor to refer to the recovery resources. This will
3794    # work for a separate vendor partition mounted at /vendor or a
3795    # /system/vendor subdirectory on the system partition, for which init will
3796    # create a symlink from /vendor to /system/vendor.
3797
3798    sh = """#!/vendor/bin/sh
3799if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3800  applypatch \\
3801          --flash /vendor/etc/recovery.img \\
3802          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3803      log -t recovery "Installing new recovery image: succeeded" || \\
3804      log -t recovery "Installing new recovery image: failed"
3805else
3806  log -t recovery "Recovery image already installed"
3807fi
3808""" % {'type': recovery_type,
3809       'device': recovery_device,
3810       'sha1': recovery_img.sha1,
3811       'size': recovery_img.size}
3812  else:
3813    sh = """#!/vendor/bin/sh
3814if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3815  applypatch %(bonus_args)s \\
3816          --patch /vendor/recovery-from-boot.p \\
3817          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3818          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3819      log -t recovery "Installing new recovery image: succeeded" || \\
3820      log -t recovery "Installing new recovery image: failed"
3821else
3822  log -t recovery "Recovery image already installed"
3823fi
3824""" % {'boot_size': boot_img.size,
3825       'boot_sha1': boot_img.sha1,
3826       'recovery_size': recovery_img.size,
3827       'recovery_sha1': recovery_img.sha1,
3828       'boot_type': boot_type,
3829       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3830       'recovery_type': recovery_type,
3831       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3832       'bonus_args': bonus_args}
3833
3834  # The install script location moved from /system/etc to /system/bin in the L
3835  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3836  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3837
3838  logger.info("putting script in %s", sh_location)
3839
3840  output_sink(sh_location, sh.encode())
3841
3842
3843class DynamicPartitionUpdate(object):
3844  def __init__(self, src_group=None, tgt_group=None, progress=None,
3845               block_difference=None):
3846    self.src_group = src_group
3847    self.tgt_group = tgt_group
3848    self.progress = progress
3849    self.block_difference = block_difference
3850
3851  @property
3852  def src_size(self):
3853    if not self.block_difference:
3854      return 0
3855    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3856
3857  @property
3858  def tgt_size(self):
3859    if not self.block_difference:
3860      return 0
3861    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3862
3863  @staticmethod
3864  def _GetSparseImageSize(img):
3865    if not img:
3866      return 0
3867    return img.blocksize * img.total_blocks
3868
3869
3870class DynamicGroupUpdate(object):
3871  def __init__(self, src_size=None, tgt_size=None):
3872    # None: group does not exist. 0: no size limits.
3873    self.src_size = src_size
3874    self.tgt_size = tgt_size
3875
3876
3877class DynamicPartitionsDifference(object):
3878  def __init__(self, info_dict, block_diffs, progress_dict=None,
3879               source_info_dict=None):
3880    if progress_dict is None:
3881      progress_dict = {}
3882
3883    self._remove_all_before_apply = False
3884    if source_info_dict is None:
3885      self._remove_all_before_apply = True
3886      source_info_dict = {}
3887
3888    block_diff_dict = collections.OrderedDict(
3889        [(e.partition, e) for e in block_diffs])
3890
3891    assert len(block_diff_dict) == len(block_diffs), \
3892        "Duplicated BlockDifference object for {}".format(
3893            [partition for partition, count in
3894             collections.Counter(e.partition for e in block_diffs).items()
3895             if count > 1])
3896
3897    self._partition_updates = collections.OrderedDict()
3898
3899    for p, block_diff in block_diff_dict.items():
3900      self._partition_updates[p] = DynamicPartitionUpdate()
3901      self._partition_updates[p].block_difference = block_diff
3902
3903    for p, progress in progress_dict.items():
3904      if p in self._partition_updates:
3905        self._partition_updates[p].progress = progress
3906
3907    tgt_groups = shlex.split(info_dict.get(
3908        "super_partition_groups", "").strip())
3909    src_groups = shlex.split(source_info_dict.get(
3910        "super_partition_groups", "").strip())
3911
3912    for g in tgt_groups:
3913      for p in shlex.split(info_dict.get(
3914              "super_%s_partition_list" % g, "").strip()):
3915        assert p in self._partition_updates, \
3916            "{} is in target super_{}_partition_list but no BlockDifference " \
3917            "object is provided.".format(p, g)
3918        self._partition_updates[p].tgt_group = g
3919
3920    for g in src_groups:
3921      for p in shlex.split(source_info_dict.get(
3922              "super_%s_partition_list" % g, "").strip()):
3923        assert p in self._partition_updates, \
3924            "{} is in source super_{}_partition_list but no BlockDifference " \
3925            "object is provided.".format(p, g)
3926        self._partition_updates[p].src_group = g
3927
3928    target_dynamic_partitions = set(shlex.split(info_dict.get(
3929        "dynamic_partition_list", "").strip()))
3930    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3931                                  if u.tgt_size)
3932    assert block_diffs_with_target == target_dynamic_partitions, \
3933        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3934            list(target_dynamic_partitions), list(block_diffs_with_target))
3935
3936    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3937        "dynamic_partition_list", "").strip()))
3938    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3939                                  if u.src_size)
3940    assert block_diffs_with_source == source_dynamic_partitions, \
3941        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3942            list(source_dynamic_partitions), list(block_diffs_with_source))
3943
3944    if self._partition_updates:
3945      logger.info("Updating dynamic partitions %s",
3946                  self._partition_updates.keys())
3947
3948    self._group_updates = collections.OrderedDict()
3949
3950    for g in tgt_groups:
3951      self._group_updates[g] = DynamicGroupUpdate()
3952      self._group_updates[g].tgt_size = int(info_dict.get(
3953          "super_%s_group_size" % g, "0").strip())
3954
3955    for g in src_groups:
3956      if g not in self._group_updates:
3957        self._group_updates[g] = DynamicGroupUpdate()
3958      self._group_updates[g].src_size = int(source_info_dict.get(
3959          "super_%s_group_size" % g, "0").strip())
3960
3961    self._Compute()
3962
3963  def WriteScript(self, script, output_zip, write_verify_script=False):
3964    script.Comment('--- Start patching dynamic partitions ---')
3965    for p, u in self._partition_updates.items():
3966      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3967        script.Comment('Patch partition %s' % p)
3968        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3969                                       write_verify_script=False)
3970
3971    op_list_path = MakeTempFile()
3972    with open(op_list_path, 'w') as f:
3973      for line in self._op_list:
3974        f.write('{}\n'.format(line))
3975
3976    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
3977
3978    script.Comment('Update dynamic partition metadata')
3979    script.AppendExtra('assert(update_dynamic_partitions('
3980                       'package_extract_file("dynamic_partitions_op_list")));')
3981
3982    if write_verify_script:
3983      for p, u in self._partition_updates.items():
3984        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3985          u.block_difference.WritePostInstallVerifyScript(script)
3986          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3987
3988    for p, u in self._partition_updates.items():
3989      if u.tgt_size and u.src_size <= u.tgt_size:
3990        script.Comment('Patch partition %s' % p)
3991        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3992                                       write_verify_script=write_verify_script)
3993        if write_verify_script:
3994          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3995
3996    script.Comment('--- End patching dynamic partitions ---')
3997
3998  def _Compute(self):
3999    self._op_list = list()
4000
4001    def append(line):
4002      self._op_list.append(line)
4003
4004    def comment(line):
4005      self._op_list.append("# %s" % line)
4006
4007    if self._remove_all_before_apply:
4008      comment('Remove all existing dynamic partitions and groups before '
4009              'applying full OTA')
4010      append('remove_all_groups')
4011
4012    for p, u in self._partition_updates.items():
4013      if u.src_group and not u.tgt_group:
4014        append('remove %s' % p)
4015
4016    for p, u in self._partition_updates.items():
4017      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4018        comment('Move partition %s from %s to default' % (p, u.src_group))
4019        append('move %s default' % p)
4020
4021    for p, u in self._partition_updates.items():
4022      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4023        comment('Shrink partition %s from %d to %d' %
4024                (p, u.src_size, u.tgt_size))
4025        append('resize %s %s' % (p, u.tgt_size))
4026
4027    for g, u in self._group_updates.items():
4028      if u.src_size is not None and u.tgt_size is None:
4029        append('remove_group %s' % g)
4030      if (u.src_size is not None and u.tgt_size is not None and
4031              u.src_size > u.tgt_size):
4032        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4033        append('resize_group %s %d' % (g, u.tgt_size))
4034
4035    for g, u in self._group_updates.items():
4036      if u.src_size is None and u.tgt_size is not None:
4037        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
4038        append('add_group %s %d' % (g, u.tgt_size))
4039      if (u.src_size is not None and u.tgt_size is not None and
4040              u.src_size < u.tgt_size):
4041        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4042        append('resize_group %s %d' % (g, u.tgt_size))
4043
4044    for p, u in self._partition_updates.items():
4045      if u.tgt_group and not u.src_group:
4046        comment('Add partition %s to group %s' % (p, u.tgt_group))
4047        append('add %s %s' % (p, u.tgt_group))
4048
4049    for p, u in self._partition_updates.items():
4050      if u.tgt_size and u.src_size < u.tgt_size:
4051        comment('Grow partition %s from %d to %d' %
4052                (p, u.src_size, u.tgt_size))
4053        append('resize %s %d' % (p, u.tgt_size))
4054
4055    for p, u in self._partition_updates.items():
4056      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4057        comment('Move partition %s from default to %s' %
4058                (p, u.tgt_group))
4059        append('move %s %s' % (p, u.tgt_group))
4060
4061
4062def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
4063  """
4064  Get build.prop from ramdisk within the boot image
4065
4066  Args:
4067    boot_img: the boot image file. Ramdisk must be compressed with lz4 or minigzip format.
4068
4069  Return:
4070    An extracted file that stores properties in the boot image.
4071  """
4072  tmp_dir = MakeTempDir('boot_', suffix='.img')
4073  try:
4074    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
4075                      boot_img, '--out', tmp_dir])
4076    ramdisk = os.path.join(tmp_dir, 'ramdisk')
4077    if not os.path.isfile(ramdisk):
4078      logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
4079      return None
4080    uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
4081    if ramdisk_format == RamdiskFormat.LZ4:
4082      RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
4083    elif ramdisk_format == RamdiskFormat.GZ:
4084      with open(ramdisk, 'rb') as input_stream:
4085        with open(uncompressed_ramdisk, 'wb') as output_stream:
4086          p2 = Run(['minigzip', '-d'], stdin=input_stream.fileno(),
4087                   stdout=output_stream.fileno())
4088          p2.wait()
4089    else:
4090      logger.error('Only support lz4 or minigzip ramdisk format.')
4091      return None
4092
4093    abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
4094    extracted_ramdisk = MakeTempDir('extracted_ramdisk')
4095    # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
4096    # the host environment.
4097    RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
4098                      cwd=extracted_ramdisk)
4099
4100    for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
4101      prop_file = os.path.join(extracted_ramdisk, search_path)
4102      if os.path.isfile(prop_file):
4103        return prop_file
4104      logger.warning(
4105          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
4106
4107    return None
4108
4109  except ExternalError as e:
4110    logger.warning('Unable to get boot image build props: %s', e)
4111    return None
4112
4113
4114def GetBootImageTimestamp(boot_img):
4115  """
4116  Get timestamp from ramdisk within the boot image
4117
4118  Args:
4119    boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
4120
4121  Return:
4122    An integer that corresponds to the timestamp of the boot image, or None
4123    if file has unknown format. Raise exception if an unexpected error has
4124    occurred.
4125  """
4126  prop_file = GetBootImageBuildProp(boot_img)
4127  if not prop_file:
4128    return None
4129
4130  props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
4131  if props is None:
4132    return None
4133
4134  try:
4135    timestamp = props.GetProp('ro.bootimage.build.date.utc')
4136    if timestamp:
4137      return int(timestamp)
4138    logger.warning(
4139        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
4140    return None
4141
4142  except ExternalError as e:
4143    logger.warning('Unable to get boot image timestamp: %s', e)
4144    return None
4145
4146
4147def IsSparseImage(filepath):
4148  if not os.path.exists(filepath):
4149    return False
4150  with open(filepath, 'rb') as fp:
4151    # Magic for android sparse image format
4152    # https://source.android.com/devices/bootloader/images
4153    return fp.read(4) == b'\x3A\xFF\x26\xED'
4154
4155
4156def ParseUpdateEngineConfig(path: str):
4157  """Parse the update_engine config stored in file `path`
4158  Args
4159    path: Path to update_engine_config.txt file in target_files
4160
4161  Returns
4162    A tuple of (major, minor) version number . E.g. (2, 8)
4163  """
4164  with open(path, "r") as fp:
4165    # update_engine_config.txt is only supposed to contain two lines,
4166    # PAYLOAD_MAJOR_VERSION and PAYLOAD_MINOR_VERSION. 1024 should be more than
4167    # sufficient. If the length is more than that, something is wrong.
4168    data = fp.read(1024)
4169    major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data)
4170    if not major:
4171      raise ValueError(
4172          f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}")
4173    minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data)
4174    if not minor:
4175      raise ValueError(
4176          f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}")
4177    return (int(major.group(1)), int(minor.group(1)))
4178