• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2020 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import itertools
17import logging
18import os
19import shutil
20import struct
21import zipfile
22
23import ota_metadata_pb2
24import common
25import fnmatch
26from common import (ZipDelete, DoesInputFileContain, ReadBytesFromInputFile, OPTIONS, MakeTempFile,
27                    ZipWriteStr, BuildInfo, LoadDictionaryFromFile,
28                    SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps,
29                    GetRamdiskFormat, ParseUpdateEngineConfig)
30from payload_signer import PayloadSigner
31
32
33logger = logging.getLogger(__name__)
34
35OPTIONS.no_signing = False
36OPTIONS.force_non_ab = False
37OPTIONS.wipe_user_data = False
38OPTIONS.downgrade = False
39OPTIONS.key_passwords = {}
40OPTIONS.package_key = None
41OPTIONS.incremental_source = None
42OPTIONS.retrofit_dynamic_partitions = False
43OPTIONS.output_metadata_path = None
44OPTIONS.boot_variable_file = None
45
46METADATA_NAME = 'META-INF/com/android/metadata'
47METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb'
48UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*',
49                 'RADIO/*', '*/build.prop', '*/default.prop', '*/build.default', "*/etc/vintf/*"]
50SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch"
51
52
53# Key is the compression algorithm, value is minimum API level required to
54# use this compression algorithm for VABC OTA on device.
55VABC_COMPRESSION_PARAM_SUPPORT = {
56    "gz": 31,
57    "brotli": 31,
58    "none": 31,
59    # lz4 support is added in Android U
60    "lz4": 34,
61    # zstd support is added in Android V
62    "zstd": 35,
63}
64
65
66def FinalizeMetadata(metadata, input_file, output_file, needed_property_files=None, package_key=None, pw=None):
67  """Finalizes the metadata and signs an A/B OTA package.
68
69  In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
70  that contains the offsets and sizes for the ZIP entries. An example
71  property-files string is as follows.
72
73    "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379"
74
75  OTA server can pass down this string, in addition to the package URL, to the
76  system update client. System update client can then fetch individual ZIP
77  entries (ZIP_STORED) directly at the given offset of the URL.
78
79  Args:
80    metadata: The metadata dict for the package.
81    input_file: The input ZIP filename that doesn't contain the package METADATA
82        entry yet.
83    output_file: The final output ZIP filename.
84    needed_property_files: The list of PropertyFiles' to be generated. Default is [AbOtaPropertyFiles(), StreamingPropertyFiles()]
85    package_key: The key used to sign this OTA package
86    pw: Password for the package_key
87  """
88  no_signing = package_key is None
89
90  if needed_property_files is None:
91    # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
92    # all the info of the latter. However, system updaters and OTA servers need to
93    # take time to switch to the new flag. We keep both of the flags for
94    # P-timeframe, and will remove StreamingPropertyFiles in later release.
95    needed_property_files = (
96        AbOtaPropertyFiles(),
97        StreamingPropertyFiles(),
98    )
99
100  def ComputeAllPropertyFiles(input_file, needed_property_files):
101    # Write the current metadata entry with placeholders.
102    with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
103      for property_files in needed_property_files:
104        metadata.property_files[property_files.name] = property_files.Compute(
105            input_zip)
106
107    ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME], True)
108    with zipfile.ZipFile(input_file, 'a', allowZip64=True) as output_zip:
109      WriteMetadata(metadata, output_zip)
110
111    if no_signing:
112      return input_file
113
114    prelim_signing = MakeTempFile(suffix='.zip')
115    SignOutput(input_file, prelim_signing, package_key, pw)
116    return prelim_signing
117
118  def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
119    with zipfile.ZipFile(prelim_signing, 'r', allowZip64=True) as prelim_signing_zip:
120      for property_files in needed_property_files:
121        metadata.property_files[property_files.name] = property_files.Finalize(
122            prelim_signing_zip,
123            len(metadata.property_files[property_files.name]))
124
125  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP
126  # entries, as well as padding the entry headers. We do a preliminary signing
127  # (with an incomplete metadata entry) to allow that to happen. Then compute
128  # the ZIP entry offsets, write back the final metadata and do the final
129  # signing.
130  prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files)
131  try:
132    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
133  except PropertyFiles.InsufficientSpaceException:
134    # Even with the preliminary signing, the entry orders may change
135    # dramatically, which leads to insufficiently reserved space during the
136    # first call to ComputeAllPropertyFiles(). In that case, we redo all the
137    # preliminary signing works, based on the already ordered ZIP entries, to
138    # address the issue.
139    prelim_signing = ComputeAllPropertyFiles(
140        prelim_signing, needed_property_files)
141    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
142
143  # Replace the METADATA entry.
144  ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME])
145  with zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) as output_zip:
146    WriteMetadata(metadata, output_zip)
147
148  # Re-sign the package after updating the metadata entry.
149  if no_signing:
150    logger.info(f"Signing disabled for output file {output_file}")
151    shutil.copy(prelim_signing, output_file)
152  else:
153    logger.info(
154        f"Signing the output file {output_file} with key {package_key}")
155    SignOutput(prelim_signing, output_file, package_key, pw)
156
157  # Reopen the final signed zip to double check the streaming metadata.
158  with zipfile.ZipFile(output_file, allowZip64=True) as output_zip:
159    for property_files in needed_property_files:
160      property_files.Verify(
161          output_zip, metadata.property_files[property_files.name].strip())
162
163  # If requested, dump the metadata to a separate file.
164  output_metadata_path = OPTIONS.output_metadata_path
165  if output_metadata_path:
166    WriteMetadata(metadata, output_metadata_path)
167
168
169def WriteMetadata(metadata_proto, output):
170  """Writes the metadata to the zip archive or a file.
171
172  Args:
173    metadata_proto: The metadata protobuf for the package.
174    output: A ZipFile object or a string of the output file path. If a string
175      path is given, the metadata in the protobuf format will be written to
176      {output}.pb, e.g. ota_metadata.pb
177  """
178
179  metadata_dict = BuildLegacyOtaMetadata(metadata_proto)
180  legacy_metadata = "".join(["%s=%s\n" % kv for kv in
181                             sorted(metadata_dict.items())])
182  if isinstance(output, zipfile.ZipFile):
183    ZipWriteStr(output, METADATA_PROTO_NAME, metadata_proto.SerializeToString(),
184                compress_type=zipfile.ZIP_STORED)
185    ZipWriteStr(output, METADATA_NAME, legacy_metadata,
186                compress_type=zipfile.ZIP_STORED)
187    return
188
189  with open('{}.pb'.format(output), 'wb') as f:
190    f.write(metadata_proto.SerializeToString())
191  with open(output, 'w') as f:
192    f.write(legacy_metadata)
193
194
195def UpdateDeviceState(device_state, build_info, boot_variable_values,
196                      is_post_build):
197  """Update the fields of the DeviceState proto with build info."""
198
199  def UpdatePartitionStates(partition_states):
200    """Update the per-partition state according to its build.prop"""
201    if not build_info.is_ab:
202      return
203    build_info_set = ComputeRuntimeBuildInfos(build_info,
204                                              boot_variable_values)
205    assert "ab_partitions" in build_info.info_dict,\
206        "ab_partitions property required for ab update."
207    ab_partitions = set(build_info.info_dict.get("ab_partitions"))
208
209    # delta_generator will error out on unused timestamps,
210    # so only generate timestamps for dynamic partitions
211    # used in OTA update.
212    for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions):
213      partition_prop = build_info.info_dict.get(
214          '{}.build.prop'.format(partition))
215      # Skip if the partition is missing, or it doesn't have a build.prop
216      if not partition_prop or not partition_prop.build_props:
217        continue
218
219      partition_state = partition_states.add()
220      partition_state.partition_name = partition
221      # Update the partition's runtime device names and fingerprints
222      partition_devices = set()
223      partition_fingerprints = set()
224      for runtime_build_info in build_info_set:
225        partition_devices.add(
226            runtime_build_info.GetPartitionBuildProp('ro.product.device',
227                                                     partition))
228        partition_fingerprints.add(
229            runtime_build_info.GetPartitionFingerprint(partition))
230
231      partition_state.device.extend(sorted(partition_devices))
232      partition_state.build.extend(sorted(partition_fingerprints))
233
234      # TODO(xunchang) set the boot image's version with kmi. Note the boot
235      # image doesn't have a file map.
236      partition_state.version = build_info.GetPartitionBuildProp(
237          'ro.build.date.utc', partition)
238
239  # TODO(xunchang), we can save a call to ComputeRuntimeBuildInfos.
240  build_devices, build_fingerprints = \
241      CalculateRuntimeDevicesAndFingerprints(build_info, boot_variable_values)
242  device_state.device.extend(sorted(build_devices))
243  device_state.build.extend(sorted(build_fingerprints))
244  device_state.build_incremental = build_info.GetBuildProp(
245      'ro.build.version.incremental')
246
247  UpdatePartitionStates(device_state.partition_state)
248
249  if is_post_build:
250    device_state.sdk_level = build_info.GetBuildProp(
251        'ro.build.version.sdk')
252    device_state.security_patch_level = build_info.GetBuildProp(
253        'ro.build.version.security_patch')
254    # Use the actual post-timestamp, even for a downgrade case.
255    device_state.timestamp = int(build_info.GetBuildProp('ro.build.date.utc'))
256
257
258def GetPackageMetadata(target_info, source_info=None):
259  """Generates and returns the metadata proto.
260
261  It generates a ota_metadata protobuf that contains the info to be written
262  into an OTA package (META-INF/com/android/metadata.pb). It also handles the
263  detection of downgrade / data wipe based on the global options.
264
265  Args:
266    target_info: The BuildInfo instance that holds the target build info.
267    source_info: The BuildInfo instance that holds the source build info, or
268        None if generating full OTA.
269
270  Returns:
271    A protobuf to be written into package metadata entry.
272  """
273  assert isinstance(target_info, BuildInfo)
274  assert source_info is None or isinstance(source_info, BuildInfo)
275
276  boot_variable_values = {}
277  if OPTIONS.boot_variable_file:
278    d = LoadDictionaryFromFile(OPTIONS.boot_variable_file)
279    for key, values in d.items():
280      boot_variable_values[key] = [val.strip() for val in values.split(',')]
281
282  metadata_proto = ota_metadata_pb2.OtaMetadata()
283  # TODO(xunchang) some fields, e.g. post-device isn't necessary. We can
284  # consider skipping them if they aren't used by clients.
285  UpdateDeviceState(metadata_proto.postcondition, target_info,
286                    boot_variable_values, True)
287
288  if target_info.is_ab and not OPTIONS.force_non_ab:
289    metadata_proto.type = ota_metadata_pb2.OtaMetadata.AB
290    metadata_proto.required_cache = 0
291  else:
292    metadata_proto.type = ota_metadata_pb2.OtaMetadata.BLOCK
293    # cache requirement will be updated by the non-A/B codes.
294
295  if OPTIONS.wipe_user_data:
296    metadata_proto.wipe = True
297
298  if OPTIONS.retrofit_dynamic_partitions:
299    metadata_proto.retrofit_dynamic_partitions = True
300
301  is_incremental = source_info is not None
302  if is_incremental:
303    UpdateDeviceState(metadata_proto.precondition, source_info,
304                      boot_variable_values, False)
305  else:
306    metadata_proto.precondition.device.extend(
307        metadata_proto.postcondition.device)
308
309  # Detect downgrades and set up downgrade flags accordingly.
310  if is_incremental:
311    HandleDowngradeMetadata(metadata_proto, target_info, source_info)
312
313  return metadata_proto
314
315
316def BuildLegacyOtaMetadata(metadata_proto):
317  """Converts the metadata proto to a legacy metadata dict.
318
319  This metadata dict is used to build the legacy metadata text file for
320  backward compatibility. We won't add new keys to the legacy metadata format.
321  If new information is needed, we should add it as a new field in OtaMetadata
322  proto definition.
323  """
324
325  separator = '|'
326
327  metadata_dict = {}
328  if metadata_proto.type == ota_metadata_pb2.OtaMetadata.AB:
329    metadata_dict['ota-type'] = 'AB'
330  elif metadata_proto.type == ota_metadata_pb2.OtaMetadata.BLOCK:
331    metadata_dict['ota-type'] = 'BLOCK'
332  if metadata_proto.wipe:
333    metadata_dict['ota-wipe'] = 'yes'
334  if metadata_proto.retrofit_dynamic_partitions:
335    metadata_dict['ota-retrofit-dynamic-partitions'] = 'yes'
336  if metadata_proto.downgrade:
337    metadata_dict['ota-downgrade'] = 'yes'
338
339  metadata_dict['ota-required-cache'] = str(metadata_proto.required_cache)
340
341  post_build = metadata_proto.postcondition
342  metadata_dict['post-build'] = separator.join(post_build.build)
343  metadata_dict['post-build-incremental'] = post_build.build_incremental
344  metadata_dict['post-sdk-level'] = post_build.sdk_level
345  metadata_dict['post-security-patch-level'] = post_build.security_patch_level
346  metadata_dict['post-timestamp'] = str(post_build.timestamp)
347
348  pre_build = metadata_proto.precondition
349  metadata_dict['pre-device'] = separator.join(pre_build.device)
350  # incremental updates
351  if len(pre_build.build) != 0:
352    metadata_dict['pre-build'] = separator.join(pre_build.build)
353    metadata_dict['pre-build-incremental'] = pre_build.build_incremental
354
355  if metadata_proto.spl_downgrade:
356    metadata_dict['spl-downgrade'] = 'yes'
357  metadata_dict.update(metadata_proto.property_files)
358
359  return metadata_dict
360
361
362def HandleDowngradeMetadata(metadata_proto, target_info, source_info):
363  # Only incremental OTAs are allowed to reach here.
364  assert OPTIONS.incremental_source is not None
365
366  post_timestamp = target_info.GetBuildProp("ro.build.date.utc")
367  pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
368  is_downgrade = int(post_timestamp) < int(pre_timestamp)
369
370  if OPTIONS.spl_downgrade:
371    metadata_proto.spl_downgrade = True
372
373  if OPTIONS.downgrade:
374    if not is_downgrade:
375      raise RuntimeError(
376          "--downgrade or --override_timestamp specified but no downgrade "
377          "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp))
378    metadata_proto.downgrade = True
379  else:
380    if is_downgrade:
381      raise RuntimeError(
382          "Downgrade detected based on timestamp check: pre: %s, post: %s. "
383          "Need to specify --override_timestamp OR --downgrade to allow "
384          "building the incremental." % (pre_timestamp, post_timestamp))
385
386
387def ComputeRuntimeBuildInfos(default_build_info, boot_variable_values):
388  """Returns a set of build info objects that may exist during runtime."""
389
390  build_info_set = {default_build_info}
391  if not boot_variable_values:
392    return build_info_set
393
394  # Calculate all possible combinations of the values for the boot variables.
395  keys = boot_variable_values.keys()
396  value_list = boot_variable_values.values()
397  combinations = [dict(zip(keys, values))
398                  for values in itertools.product(*value_list)]
399  for placeholder_values in combinations:
400    # Reload the info_dict as some build properties may change their values
401    # based on the value of ro.boot* properties.
402    info_dict = copy.deepcopy(default_build_info.info_dict)
403    for partition in PARTITIONS_WITH_BUILD_PROP:
404      partition_prop_key = "{}.build.prop".format(partition)
405      input_file = info_dict[partition_prop_key].input_file
406      ramdisk = GetRamdiskFormat(info_dict)
407      if isinstance(input_file, zipfile.ZipFile):
408        with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip:
409          info_dict[partition_prop_key] = \
410              PartitionBuildProps.FromInputFile(input_zip, partition,
411                                                placeholder_values,
412                                                ramdisk)
413      else:
414        info_dict[partition_prop_key] = \
415            PartitionBuildProps.FromInputFile(input_file, partition,
416                                              placeholder_values,
417                                              ramdisk)
418    info_dict["build.prop"] = info_dict["system.build.prop"]
419    build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts))
420
421  return build_info_set
422
423
424def CalculateRuntimeDevicesAndFingerprints(default_build_info,
425                                           boot_variable_values):
426  """Returns a tuple of sets for runtime devices and fingerprints"""
427
428  device_names = set()
429  fingerprints = set()
430  build_info_set = ComputeRuntimeBuildInfos(default_build_info,
431                                            boot_variable_values)
432  for runtime_build_info in build_info_set:
433    device_names.add(runtime_build_info.device)
434    fingerprints.add(runtime_build_info.fingerprint)
435  return device_names, fingerprints
436
437
438def GetZipEntryOffset(zfp, entry_info):
439  """Get offset to a beginning of a particular zip entry
440  Args:
441    fp: zipfile.ZipFile
442    entry_info: zipfile.ZipInfo
443
444  Returns:
445    (offset, size) tuple
446  """
447  # Don't use len(entry_info.extra). Because that returns size of extra
448  # fields in central directory. We need to look at local file directory,
449  # as these two might have different sizes.
450
451  # We cannot work with zipfile.ZipFile instances, we need a |fp| for the underlying file.
452  zfp = zfp.fp
453  zfp.seek(entry_info.header_offset)
454  data = zfp.read(zipfile.sizeFileHeader)
455  fheader = struct.unpack(zipfile.structFileHeader, data)
456  # Last two fields of local file header are filename length and
457  # extra length
458  filename_len = fheader[-2]
459  extra_len = fheader[-1]
460  offset = entry_info.header_offset
461  offset += zipfile.sizeFileHeader
462  offset += filename_len + extra_len
463  size = entry_info.file_size
464  return (offset, size)
465
466
467class PropertyFiles(object):
468  """A class that computes the property-files string for an OTA package.
469
470  A property-files string is a comma-separated string that contains the
471  offset/size info for an OTA package. The entries, which must be ZIP_STORED,
472  can be fetched directly with the package URL along with the offset/size info.
473  These strings can be used for streaming A/B OTAs, or allowing an updater to
474  download package metadata entry directly, without paying the cost of
475  downloading entire package.
476
477  Computing the final property-files string requires two passes. Because doing
478  the whole package signing (with signapk.jar) will possibly reorder the ZIP
479  entries, which may in turn invalidate earlier computed ZIP entry offset/size
480  values.
481
482  This class provides functions to be called for each pass. The general flow is
483  as follows.
484
485    property_files = PropertyFiles()
486    # The first pass, which writes placeholders before doing initial signing.
487    property_files.Compute()
488    SignOutput()
489
490    # The second pass, by replacing the placeholders with actual data.
491    property_files.Finalize()
492    SignOutput()
493
494  And the caller can additionally verify the final result.
495
496    property_files.Verify()
497  """
498
499  def __init__(self):
500    self.name = None
501    self.required = ()
502    self.optional = ()
503
504  def Compute(self, input_zip):
505    """Computes and returns a property-files string with placeholders.
506
507    We reserve extra space for the offset and size of the metadata entry itself,
508    although we don't know the final values until the package gets signed.
509
510    Args:
511      input_zip: The input ZIP file.
512
513    Returns:
514      A string with placeholders for the metadata offset/size info, e.g.
515      "payload.bin:679:343,payload_properties.txt:378:45,metadata:        ".
516    """
517    return self.GetPropertyFilesString(input_zip, reserve_space=True)
518
519  class InsufficientSpaceException(Exception):
520    pass
521
522  def Finalize(self, input_zip, reserved_length):
523    """Finalizes a property-files string with actual METADATA offset/size info.
524
525    The input ZIP file has been signed, with the ZIP entries in the desired
526    place (signapk.jar will possibly reorder the ZIP entries). Now we compute
527    the ZIP entry offsets and construct the property-files string with actual
528    data. Note that during this process, we must pad the property-files string
529    to the reserved length, so that the METADATA entry size remains the same.
530    Otherwise the entries' offsets and sizes may change again.
531
532    Args:
533      input_zip: The input ZIP file.
534      reserved_length: The reserved length of the property-files string during
535          the call to Compute(). The final string must be no more than this
536          size.
537
538    Returns:
539      A property-files string including the metadata offset/size info, e.g.
540      "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379  ".
541
542    Raises:
543      InsufficientSpaceException: If the reserved length is insufficient to hold
544          the final string.
545    """
546    result = self.GetPropertyFilesString(input_zip, reserve_space=False)
547    if len(result) > reserved_length:
548      raise self.InsufficientSpaceException(
549          'Insufficient reserved space: reserved={}, actual={}'.format(
550              reserved_length, len(result)))
551
552    result += ' ' * (reserved_length - len(result))
553    return result
554
555  def Verify(self, input_zip, expected):
556    """Verifies the input ZIP file contains the expected property-files string.
557
558    Args:
559      input_zip: The input ZIP file.
560      expected: The property-files string that's computed from Finalize().
561
562    Raises:
563      AssertionError: On finding a mismatch.
564    """
565    actual = self.GetPropertyFilesString(input_zip)
566    assert actual == expected, \
567        "Mismatching streaming metadata: {} vs {}.".format(actual, expected)
568
569  def GetPropertyFilesString(self, zip_file, reserve_space=False):
570    """
571    Constructs the property-files string per request.
572
573    Args:
574      zip_file: The input ZIP file.
575      reserved_length: The reserved length of the property-files string.
576
577    Returns:
578      A property-files string including the metadata offset/size info, e.g.
579      "payload.bin:679:343,payload_properties.txt:378:45,metadata:     ".
580    """
581
582    def ComputeEntryOffsetSize(name):
583      """Computes the zip entry offset and size."""
584      info = zip_file.getinfo(name)
585      (offset, size) = GetZipEntryOffset(zip_file, info)
586      return '%s:%d:%d' % (os.path.basename(name), offset, size)
587
588    tokens = []
589    tokens.extend(self._GetPrecomputed(zip_file))
590    for entry in self.required:
591      tokens.append(ComputeEntryOffsetSize(entry))
592    for entry in self.optional:
593      if entry in zip_file.namelist():
594        tokens.append(ComputeEntryOffsetSize(entry))
595
596    # 'META-INF/com/android/metadata' is required. We don't know its actual
597    # offset and length (as well as the values for other entries). So we reserve
598    # 15-byte as a placeholder ('offset:length'), which is sufficient to cover
599    # the space for metadata entry. Because 'offset' allows a max of 10-digit
600    # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the
601    # reserved space serves the metadata entry only.
602    if reserve_space:
603      tokens.append('metadata:' + ' ' * 15)
604      tokens.append('metadata.pb:' + ' ' * 15)
605    else:
606      tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
607      if METADATA_PROTO_NAME in zip_file.namelist():
608        tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
609
610    return ','.join(tokens)
611
612  def _GetPrecomputed(self, input_zip):
613    """Computes the additional tokens to be included into the property-files.
614
615    This applies to tokens without actual ZIP entries, such as
616    payload_metadata.bin. We want to expose the offset/size to updaters, so
617    that they can download the payload metadata directly with the info.
618
619    Args:
620      input_zip: The input zip file.
621
622    Returns:
623      A list of strings (tokens) to be added to the property-files string.
624    """
625    # pylint: disable=no-self-use
626    # pylint: disable=unused-argument
627    return []
628
629
630def SignOutput(temp_zip_name, output_zip_name, package_key=None, pw=None):
631  if package_key is None:
632    package_key = OPTIONS.package_key
633  if pw is None and OPTIONS.key_passwords:
634    pw = OPTIONS.key_passwords[package_key]
635
636  SignFile(temp_zip_name, output_zip_name, package_key, pw,
637           whole_file=True)
638
639
640def ConstructOtaApexInfo(target_zip, source_file=None):
641  """If applicable, add the source version to the apex info."""
642
643  def _ReadApexInfo(input_zip):
644    if not DoesInputFileContain(input_zip, "META/apex_info.pb"):
645      logger.warning("target_file doesn't contain apex_info.pb %s", input_zip)
646      return None
647    return ReadBytesFromInputFile(input_zip, "META/apex_info.pb")
648
649  target_apex_string = _ReadApexInfo(target_zip)
650  # Return early if the target apex info doesn't exist or is empty.
651  if not target_apex_string:
652    return target_apex_string
653
654  # If the source apex info isn't available, just return the target info
655  if not source_file:
656    return target_apex_string
657
658  source_apex_string = _ReadApexInfo(source_file)
659  if not source_apex_string:
660    return target_apex_string
661
662  source_apex_proto = ota_metadata_pb2.ApexMetadata()
663  source_apex_proto.ParseFromString(source_apex_string)
664  source_apex_versions = {apex.package_name: apex.version for apex in
665                          source_apex_proto.apex_info}
666
667  # If the apex package is available in the source build, initialize the source
668  # apex version.
669  target_apex_proto = ota_metadata_pb2.ApexMetadata()
670  target_apex_proto.ParseFromString(target_apex_string)
671  for target_apex in target_apex_proto.apex_info:
672    name = target_apex.package_name
673    if name in source_apex_versions:
674      target_apex.source_version = source_apex_versions[name]
675
676  return target_apex_proto.SerializeToString()
677
678
679def IsLz4diffCompatible(source_file: str, target_file: str):
680  """Check whether lz4diff versions in two builds are compatible
681
682  Args:
683    source_file: Path to source build's target_file.zip
684    target_file: Path to target build's target_file.zip
685
686  Returns:
687    bool true if and only if lz4diff versions are compatible
688  """
689  if source_file is None or target_file is None:
690    return False
691  # Right now we enable lz4diff as long as source build has liblz4.so.
692  # In the future we might introduce version system to lz4diff as well.
693  if zipfile.is_zipfile(source_file):
694    with zipfile.ZipFile(source_file, "r") as zfp:
695      return "META/liblz4.so" in zfp.namelist()
696  else:
697    assert os.path.isdir(source_file)
698    return os.path.exists(os.path.join(source_file, "META", "liblz4.so"))
699
700
701def IsZucchiniCompatible(source_file: str, target_file: str):
702  """Check whether zucchini versions in two builds are compatible
703
704  Args:
705    source_file: Path to source build's target_file.zip
706    target_file: Path to target build's target_file.zip
707
708  Returns:
709    bool true if and only if zucchini versions are compatible
710  """
711  if source_file is None or target_file is None:
712    return False
713  assert os.path.exists(source_file)
714  assert os.path.exists(target_file)
715
716  assert zipfile.is_zipfile(source_file) or os.path.isdir(source_file)
717  assert zipfile.is_zipfile(target_file) or os.path.isdir(target_file)
718  _ZUCCHINI_CONFIG_ENTRY_NAME = "META/zucchini_config.txt"
719
720  def ReadEntry(path, entry):
721    # Read an entry inside a .zip file or extracted dir of .zip file
722    if zipfile.is_zipfile(path):
723      with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
724        if entry in zfp.namelist():
725          return zfp.read(entry).decode()
726    else:
727      entry_path = os.path.join(path, entry)
728      if os.path.exists(entry_path):
729        with open(entry_path, "r") as fp:
730          return fp.read()
731    return False
732  sourceEntry = ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
733  targetEntry = ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
734  return sourceEntry and targetEntry and sourceEntry == targetEntry
735
736
737def ExtractTargetFiles(path: str):
738  if os.path.isdir(path):
739    logger.info("target files %s is already extracted", path)
740    return path
741  extracted_dir = common.MakeTempDir("target_files")
742  common.UnzipToDir(path, extracted_dir, UNZIP_PATTERN + [""])
743  return extracted_dir
744
745
746def LocatePartitionPath(target_files_dir: str, partition: str, allow_empty):
747  path = os.path.join(target_files_dir, "RADIO", partition + ".img")
748  if os.path.exists(path):
749    return path
750  path = os.path.join(target_files_dir, "IMAGES", partition + ".img")
751  if os.path.exists(path):
752    return path
753  if allow_empty:
754    return ""
755  raise common.ExternalError(
756      "Partition {} not found in target files {}".format(partition, target_files_dir))
757
758
759def GetPartitionImages(target_files_dir: str, ab_partitions, allow_empty=True):
760  assert os.path.isdir(target_files_dir)
761  return ":".join([LocatePartitionPath(target_files_dir, partition, allow_empty) for partition in ab_partitions])
762
763
764def LocatePartitionMap(target_files_dir: str, partition: str):
765  path = os.path.join(target_files_dir, "RADIO", partition + ".map")
766  if os.path.exists(path):
767    return path
768  return ""
769
770
771def GetPartitionMaps(target_files_dir: str, ab_partitions):
772  assert os.path.isdir(target_files_dir)
773  return ":".join([LocatePartitionMap(target_files_dir, partition) for partition in ab_partitions])
774
775
776class PayloadGenerator(object):
777  """Manages the creation and the signing of an A/B OTA Payload."""
778
779  PAYLOAD_BIN = 'payload.bin'
780  PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
781  SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
782  SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
783
784  def __init__(self, secondary=False, wipe_user_data=False, minor_version=None, is_partial_update=False):
785    """Initializes a Payload instance.
786
787    Args:
788      secondary: Whether it's generating a secondary payload (default: False).
789    """
790    self.payload_file = None
791    self.payload_properties = None
792    self.secondary = secondary
793    self.wipe_user_data = wipe_user_data
794    self.minor_version = minor_version
795    self.is_partial_update = is_partial_update
796
797  def _Run(self, cmd):  # pylint: disable=no-self-use
798    # Don't pipe (buffer) the output if verbose is set. Let
799    # brillo_update_payload write to stdout/stderr directly, so its progress can
800    # be monitored.
801    if OPTIONS.verbose:
802      common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
803    else:
804      common.RunAndCheckOutput(cmd)
805
806  def Generate(self, target_file, source_file=None, additional_args=None):
807    """Generates a payload from the given target-files zip(s).
808
809    Args:
810      target_file: The filename of the target build target-files zip.
811      source_file: The filename of the source build target-files zip; or None if
812          generating a full OTA.
813      additional_args: A list of additional args that should be passed to
814          delta_generator binary; or None.
815    """
816    if additional_args is None:
817      additional_args = []
818
819    payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
820    target_dir = ExtractTargetFiles(target_file)
821    cmd = ["delta_generator",
822           "--out_file", payload_file]
823    with open(os.path.join(target_dir, "META", "ab_partitions.txt")) as fp:
824      ab_partitions = fp.read().strip().split("\n")
825    cmd.extend(["--partition_names", ":".join(ab_partitions)])
826    cmd.extend(
827        ["--new_partitions", GetPartitionImages(target_dir, ab_partitions, False)])
828    cmd.extend(
829        ["--new_mapfiles", GetPartitionMaps(target_dir, ab_partitions)])
830    if source_file is not None:
831      source_dir = ExtractTargetFiles(source_file)
832      cmd.extend(
833          ["--old_partitions", GetPartitionImages(source_dir, ab_partitions, True)])
834      cmd.extend(
835          ["--old_mapfiles", GetPartitionMaps(source_dir, ab_partitions)])
836
837      if OPTIONS.disable_fec_computation:
838        cmd.extend(["--disable_fec_computation=true"])
839      if OPTIONS.disable_verity_computation:
840        cmd.extend(["--disable_verity_computation=true"])
841    postinstall_config = os.path.join(
842        target_dir, "META", "postinstall_config.txt")
843
844    if os.path.exists(postinstall_config):
845      cmd.extend(["--new_postinstall_config_file", postinstall_config])
846    dynamic_partition_info = os.path.join(
847        target_dir, "META", "dynamic_partitions_info.txt")
848
849    if os.path.exists(dynamic_partition_info):
850      cmd.extend(["--dynamic_partition_info_file", dynamic_partition_info])
851
852    major_version, minor_version = ParseUpdateEngineConfig(
853        os.path.join(target_dir, "META", "update_engine_config.txt"))
854    if source_file:
855      major_version, minor_version = ParseUpdateEngineConfig(
856          os.path.join(source_dir, "META", "update_engine_config.txt"))
857    if self.minor_version:
858      minor_version = self.minor_version
859    cmd.extend(["--major_version", str(major_version)])
860    if source_file is not None or self.is_partial_update:
861      cmd.extend(["--minor_version", str(minor_version)])
862    if self.is_partial_update:
863      cmd.extend(["--is_partial_update=true"])
864    cmd.extend(additional_args)
865    self._Run(cmd)
866
867    self.payload_file = payload_file
868    self.payload_properties = None
869
870  def Sign(self, payload_signer):
871    """Generates and signs the hashes of the payload and metadata.
872
873    Args:
874      payload_signer: A PayloadSigner() instance that serves the signing work.
875
876    Raises:
877      AssertionError: On any failure when calling brillo_update_payload script.
878    """
879    assert isinstance(payload_signer, PayloadSigner)
880
881    # 1. Generate hashes of the payload and metadata files.
882    payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
883    metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
884    cmd = ["brillo_update_payload", "hash",
885           "--unsigned_payload", self.payload_file,
886           "--signature_size", str(payload_signer.maximum_signature_size),
887           "--metadata_hash_file", metadata_sig_file,
888           "--payload_hash_file", payload_sig_file]
889    self._Run(cmd)
890
891    # 2. Sign the hashes.
892    signed_payload_sig_file = payload_signer.SignHashFile(payload_sig_file)
893    signed_metadata_sig_file = payload_signer.SignHashFile(metadata_sig_file)
894
895    # 3. Insert the signatures back into the payload file.
896    signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
897                                              suffix=".bin")
898    cmd = ["brillo_update_payload", "sign",
899           "--unsigned_payload", self.payload_file,
900           "--payload", signed_payload_file,
901           "--signature_size", str(payload_signer.maximum_signature_size),
902           "--metadata_signature_file", signed_metadata_sig_file,
903           "--payload_signature_file", signed_payload_sig_file]
904    self._Run(cmd)
905
906    self.payload_file = signed_payload_file
907
908  def WriteToZip(self, output_zip):
909    """Writes the payload to the given zip.
910
911    Args:
912      output_zip: The output ZipFile instance.
913    """
914    assert self.payload_file is not None
915    # 4. Dump the signed payload properties.
916    properties_file = common.MakeTempFile(prefix="payload-properties-",
917                                          suffix=".txt")
918    cmd = ["brillo_update_payload", "properties",
919           "--payload", self.payload_file,
920           "--properties_file", properties_file]
921    self._Run(cmd)
922
923    if self.secondary:
924      with open(properties_file, "a") as f:
925        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
926
927    if self.wipe_user_data:
928      with open(properties_file, "a") as f:
929        f.write("POWERWASH=1\n")
930
931    self.payload_properties = properties_file
932
933    if self.secondary:
934      payload_arcname = PayloadGenerator.SECONDARY_PAYLOAD_BIN
935      payload_properties_arcname = PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT
936    else:
937      payload_arcname = PayloadGenerator.PAYLOAD_BIN
938      payload_properties_arcname = PayloadGenerator.PAYLOAD_PROPERTIES_TXT
939
940    # Add the signed payload file and properties into the zip. In order to
941    # support streaming, we pack them as ZIP_STORED. So these entries can be
942    # read directly with the offset and length pairs.
943    common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
944                    compress_type=zipfile.ZIP_STORED)
945    common.ZipWrite(output_zip, self.payload_properties,
946                    arcname=payload_properties_arcname,
947                    compress_type=zipfile.ZIP_STORED)
948
949
950class StreamingPropertyFiles(PropertyFiles):
951  """A subclass for computing the property-files for streaming A/B OTAs."""
952
953  def __init__(self):
954    super(StreamingPropertyFiles, self).__init__()
955    self.name = 'ota-streaming-property-files'
956    self.required = (
957        # payload.bin and payload_properties.txt must exist.
958        'payload.bin',
959        'payload_properties.txt',
960    )
961    self.optional = (
962        # apex_info.pb isn't directly used in the update flow
963        'apex_info.pb',
964        # care_map is available only if dm-verity is enabled.
965        'care_map.pb',
966        'care_map.txt',
967        # compatibility.zip is available only if target supports Treble.
968        'compatibility.zip',
969    )
970
971
972class AbOtaPropertyFiles(StreamingPropertyFiles):
973  """The property-files for A/B OTA that includes payload_metadata.bin info.
974
975  Since P, we expose one more token (aka property-file), in addition to the ones
976  for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
977  'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
978  doesn't exist as a separate ZIP entry, but can be used to verify if the
979  payload can be applied on the given device.
980
981  For backward compatibility, we keep both of the 'ota-streaming-property-files'
982  and the newly added 'ota-property-files' in P. The new token will only be
983  available in 'ota-property-files'.
984  """
985
986  def __init__(self):
987    super(AbOtaPropertyFiles, self).__init__()
988    self.name = 'ota-property-files'
989
990  def _GetPrecomputed(self, input_zip):
991    offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
992    return ['payload_metadata.bin:{}:{}'.format(offset, size)]
993
994  @staticmethod
995  def _GetPayloadMetadataOffsetAndSize(input_zip):
996    """Computes the offset and size of the payload metadata for a given package.
997
998    (From system/update_engine/update_metadata.proto)
999    A delta update file contains all the deltas needed to update a system from
1000    one specific version to another specific version. The update format is
1001    represented by this struct pseudocode:
1002
1003    struct delta_update_file {
1004      char magic[4] = "CrAU";
1005      uint64 file_format_version;
1006      uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
1007
1008      // Only present if format_version > 1:
1009      uint32 metadata_signature_size;
1010
1011      // The Bzip2 compressed DeltaArchiveManifest
1012      char manifest[metadata_signature_size];
1013
1014      // The signature of the metadata (from the beginning of the payload up to
1015      // this location, not including the signature itself). This is a
1016      // serialized Signatures message.
1017      char medatada_signature_message[metadata_signature_size];
1018
1019      // Data blobs for files, no specific format. The specific offset
1020      // and length of each data blob is recorded in the DeltaArchiveManifest.
1021      struct {
1022        char data[];
1023      } blobs[];
1024
1025      // These two are not signed:
1026      uint64 payload_signatures_message_size;
1027      char payload_signatures_message[];
1028    };
1029
1030    'payload-metadata.bin' contains all the bytes from the beginning of the
1031    payload, till the end of 'medatada_signature_message'.
1032    """
1033    payload_info = input_zip.getinfo('payload.bin')
1034    (payload_offset, payload_size) = GetZipEntryOffset(input_zip, payload_info)
1035
1036    # Read the underlying raw zipfile at specified offset
1037    payload_fp = input_zip.fp
1038    payload_fp.seek(payload_offset)
1039    header_bin = payload_fp.read(24)
1040
1041    # network byte order (big-endian)
1042    header = struct.unpack("!IQQL", header_bin)
1043
1044    # 'CrAU'
1045    magic = header[0]
1046    assert magic == 0x43724155, "Invalid magic: {:x}, computed offset {}" \
1047        .format(magic, payload_offset)
1048
1049    manifest_size = header[2]
1050    metadata_signature_size = header[3]
1051    metadata_total = 24 + manifest_size + metadata_signature_size
1052    assert metadata_total <= payload_size
1053
1054    return (payload_offset, metadata_total)
1055
1056
1057def Fnmatch(filename, pattersn):
1058  return any([fnmatch.fnmatch(filename, pat) for pat in pattersn])
1059
1060
1061def CopyTargetFilesDir(input_dir):
1062  output_dir = common.MakeTempDir("target_files")
1063  shutil.copytree(os.path.join(input_dir, "IMAGES"), os.path.join(
1064      output_dir, "IMAGES"), dirs_exist_ok=True)
1065  shutil.copytree(os.path.join(input_dir, "META"), os.path.join(
1066      output_dir, "META"), dirs_exist_ok=True)
1067  for (dirpath, _, filenames) in os.walk(input_dir):
1068    for filename in filenames:
1069      path = os.path.join(dirpath, filename)
1070      relative_path = path.removeprefix(input_dir).removeprefix("/")
1071      if not Fnmatch(relative_path, UNZIP_PATTERN):
1072        continue
1073      if filename.endswith(".prop") or filename == "prop.default" or "/etc/vintf/" in relative_path:
1074        target_path = os.path.join(
1075            output_dir, relative_path)
1076        os.makedirs(os.path.dirname(target_path), exist_ok=True)
1077        shutil.copy(path, target_path)
1078  return output_dir
1079