• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2022 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import argparse
16import logging
17import struct
18import sys
19import update_payload
20import tempfile
21import zipfile
22import os
23import care_map_pb2
24
25import common
26from typing import BinaryIO, List
27from update_metadata_pb2 import DeltaArchiveManifest, DynamicPartitionMetadata, DynamicPartitionGroup
28from ota_metadata_pb2 import OtaMetadata
29from update_payload import Payload
30
31from payload_signer import PayloadSigner
32from ota_utils import PayloadGenerator, METADATA_PROTO_NAME, FinalizeMetadata
33from ota_signing_utils import AddSigningArgumentParse
34
35logger = logging.getLogger(__name__)
36
37CARE_MAP_ENTRY = "care_map.pb"
38APEX_INFO_ENTRY = "apex_info.pb"
39
40
41def WriteDataBlob(payload: Payload, outfp: BinaryIO, read_size=1024*64):
42  for i in range(0, payload.total_data_length, read_size):
43    blob = payload.ReadDataBlob(
44        i, min(i+read_size, payload.total_data_length)-i)
45    outfp.write(blob)
46
47
48def ConcatBlobs(payloads: List[Payload], outfp: BinaryIO):
49  for payload in payloads:
50    WriteDataBlob(payload, outfp)
51
52
53def TotalDataLength(partitions):
54  for partition in reversed(partitions):
55    for op in reversed(partition.operations):
56      if op.data_length > 0:
57        return op.data_offset + op.data_length
58  return 0
59
60
61def ExtendPartitionUpdates(partitions, new_partitions):
62  prefix_blob_length = TotalDataLength(partitions)
63  partitions.extend(new_partitions)
64  for part in partitions[-len(new_partitions):]:
65    for op in part.operations:
66      if op.HasField("data_length") and op.data_length != 0:
67        op.data_offset += prefix_blob_length
68
69
70class DuplicatePartitionError(ValueError):
71  pass
72
73
74def MergeDynamicPartitionGroups(groups: List[DynamicPartitionGroup], new_groups: List[DynamicPartitionGroup]):
75  new_groups = {new_group.name: new_group for new_group in new_groups}
76  for group in groups:
77    if group.name not in new_groups:
78      continue
79    new_group = new_groups[group.name]
80    common_partitions = set(group.partition_names).intersection(
81        set(new_group.partition_names))
82    if len(common_partitions) != 0:
83      raise DuplicatePartitionError(
84          f"Old group and new group should not have any intersections, {group.partition_names}, {new_group.partition_names}, common partitions: {common_partitions}")
85    group.partition_names.extend(new_group.partition_names)
86    group.size = max(new_group.size, group.size)
87    del new_groups[group.name]
88  for new_group in new_groups.values():
89    groups.append(new_group)
90
91
92def MergeDynamicPartitionMetadata(metadata: DynamicPartitionMetadata, new_metadata: DynamicPartitionMetadata):
93  MergeDynamicPartitionGroups(metadata.groups, new_metadata.groups)
94  metadata.snapshot_enabled &= new_metadata.snapshot_enabled
95  metadata.vabc_enabled &= new_metadata.vabc_enabled
96  assert metadata.vabc_compression_param == new_metadata.vabc_compression_param, f"{metadata.vabc_compression_param} vs. {new_metadata.vabc_compression_param}"
97  metadata.cow_version = max(metadata.cow_version, new_metadata.cow_version)
98
99
100def MergeManifests(payloads: List[Payload]) -> DeltaArchiveManifest:
101  if len(payloads) == 0:
102    return None
103  if len(payloads) == 1:
104    return payloads[0].manifest
105
106  output_manifest = DeltaArchiveManifest()
107  output_manifest.block_size = payloads[0].manifest.block_size
108  output_manifest.partial_update = True
109  output_manifest.dynamic_partition_metadata.snapshot_enabled = payloads[
110      0].manifest.dynamic_partition_metadata.snapshot_enabled
111  output_manifest.dynamic_partition_metadata.vabc_enabled = payloads[
112      0].manifest.dynamic_partition_metadata.vabc_enabled
113  output_manifest.dynamic_partition_metadata.vabc_compression_param = payloads[
114      0].manifest.dynamic_partition_metadata.vabc_compression_param
115  apex_info = {}
116  for payload in payloads:
117    manifest = payload.manifest
118    assert manifest.block_size == output_manifest.block_size
119    output_manifest.minor_version = max(
120        output_manifest.minor_version, manifest.minor_version)
121    output_manifest.max_timestamp = max(
122        output_manifest.max_timestamp, manifest.max_timestamp)
123    output_manifest.apex_info.extend(manifest.apex_info)
124    for apex in manifest.apex_info:
125      apex_info[apex.package_name] = apex
126    ExtendPartitionUpdates(output_manifest.partitions, manifest.partitions)
127    try:
128      MergeDynamicPartitionMetadata(
129          output_manifest.dynamic_partition_metadata, manifest.dynamic_partition_metadata)
130    except DuplicatePartitionError:
131      logger.error(
132          "OTA %s has duplicate partition with some of the previous OTAs", payload.name)
133      raise
134
135  for apex_name in sorted(apex_info.keys()):
136    output_manifest.apex_info.extend(apex_info[apex_name])
137
138  return output_manifest
139
140
141def MergePayloads(payloads: List[Payload]):
142  with tempfile.NamedTemporaryFile(prefix="payload_blob") as tmpfile:
143    ConcatBlobs(payloads, tmpfile)
144
145
146def MergeCareMap(paths: List[str]):
147  care_map = care_map_pb2.CareMap()
148  for path in paths:
149    with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
150      if CARE_MAP_ENTRY in zfp.namelist():
151        care_map_bytes = zfp.read(CARE_MAP_ENTRY)
152        partial_care_map = care_map_pb2.CareMap()
153        partial_care_map.ParseFromString(care_map_bytes)
154        care_map.partitions.extend(partial_care_map.partitions)
155  if len(care_map.partitions) == 0:
156    return b""
157  return care_map.SerializeToString()
158
159
160def WriteHeaderAndManifest(manifest: DeltaArchiveManifest, fp: BinaryIO):
161  __MAGIC = b"CrAU"
162  __MAJOR_VERSION = 2
163  manifest_bytes = manifest.SerializeToString()
164  fp.write(struct.pack(f">4sQQL", __MAGIC,
165           __MAJOR_VERSION, len(manifest_bytes), 0))
166  fp.write(manifest_bytes)
167
168
169def AddOtaMetadata(input_ota, metadata_ota, output_ota, package_key, pw):
170  with zipfile.ZipFile(metadata_ota, 'r') as zfp:
171    metadata = OtaMetadata()
172    metadata.ParseFromString(zfp.read(METADATA_PROTO_NAME))
173    FinalizeMetadata(metadata, input_ota, output_ota,
174                     package_key=package_key, pw=pw)
175    return output_ota
176
177
178def CheckOutput(output_ota):
179  payload = update_payload.Payload(output_ota)
180  payload.CheckOpDataHash()
181
182
183def CheckDuplicatePartitions(payloads: List[Payload]):
184  partition_to_ota = {}
185  for payload in payloads:
186    for group in payload.manifest.dynamic_partition_metadata.groups:
187      for part in group.partition_names:
188        if part in partition_to_ota:
189          raise DuplicatePartitionError(
190              f"OTA {partition_to_ota[part].name} and {payload.name} have duplicating partition {part}")
191        partition_to_ota[part] = payload
192
193
194def ApexInfo(file_paths):
195  if len(file_paths) > 1:
196    logger.info("More than one target file specified, will ignore "
197                "apex_info.pb (if any)")
198    return None
199  with zipfile.ZipFile(file_paths[0], "r", allowZip64=True) as zfp:
200    if APEX_INFO_ENTRY in zfp.namelist():
201      apex_info_bytes = zfp.read(APEX_INFO_ENTRY)
202      return apex_info_bytes
203  return None
204
205
206def main(argv):
207  parser = argparse.ArgumentParser(description='Merge multiple partial OTAs')
208  parser.add_argument('packages', type=str, nargs='+',
209                      help='Paths to OTA packages to merge')
210  parser.add_argument('--output', type=str,
211                      help='Paths to output merged ota', required=True)
212  parser.add_argument('--metadata_ota', type=str,
213                      help='Output zip will use build metadata from this OTA package, if unspecified, use the last OTA package in merge list')
214  parser.add_argument('-v', action="store_true",
215                      help="Enable verbose logging", dest="verbose")
216  AddSigningArgumentParse(parser)
217
218  parser.epilog = ('This tool can also be used to resign a regular OTA. For a single regular OTA, '
219                   'apex_info.pb will be written to output. When merging multiple OTAs, '
220                   'apex_info.pb will not be written.')
221  args = parser.parse_args(argv[1:])
222  file_paths = args.packages
223
224  common.OPTIONS.verbose = args.verbose
225  if args.verbose:
226    logger.setLevel(logging.INFO)
227
228  logger.info(args)
229  if args.search_path:
230    common.OPTIONS.search_path = args.search_path
231
232  metadata_ota = args.packages[-1]
233  if args.metadata_ota is not None:
234    metadata_ota = args.metadata_ota
235    assert os.path.exists(metadata_ota)
236
237  payloads = [Payload(path) for path in file_paths]
238
239  CheckDuplicatePartitions(payloads)
240
241  merged_manifest = MergeManifests(payloads)
242
243  # Get signing keys
244  key_passwords = common.GetKeyPasswords([args.package_key])
245
246  apex_info_bytes = ApexInfo(file_paths)
247
248  with tempfile.NamedTemporaryFile() as unsigned_payload:
249    WriteHeaderAndManifest(merged_manifest, unsigned_payload)
250    ConcatBlobs(payloads, unsigned_payload)
251    unsigned_payload.flush()
252
253    generator = PayloadGenerator()
254    generator.payload_file = unsigned_payload.name
255    logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
256
257    if args.package_key:
258      logger.info("Signing payload...")
259      # TODO: remove OPTIONS when no longer used as fallback in payload_signer
260      common.OPTIONS.payload_signer_args = None
261      common.OPTIONS.payload_signer_maximum_signature_size = None
262      signer = PayloadSigner(args.package_key, args.private_key_suffix,
263                             key_passwords[args.package_key],
264                             payload_signer=args.payload_signer,
265                             payload_signer_args=args.payload_signer_args,
266                             payload_signer_maximum_signature_size=args.payload_signer_maximum_signature_size)
267      generator.payload_file = unsigned_payload.name
268      generator.Sign(signer)
269
270    logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
271
272    logger.info("Writing to %s", args.output)
273
274    key_passwords = common.GetKeyPasswords([args.package_key])
275    with tempfile.NamedTemporaryFile(prefix="signed_ota", suffix=".zip") as signed_ota:
276      with zipfile.ZipFile(signed_ota, "w") as zfp:
277        generator.WriteToZip(zfp)
278        care_map_bytes = MergeCareMap(args.packages)
279        if care_map_bytes:
280          common.ZipWriteStr(zfp, CARE_MAP_ENTRY, care_map_bytes)
281        if apex_info_bytes:
282          logger.info("Writing %s", APEX_INFO_ENTRY)
283          common.ZipWriteStr(zfp, APEX_INFO_ENTRY, apex_info_bytes)
284      AddOtaMetadata(signed_ota.name, metadata_ota,
285                     args.output, args.package_key, key_passwords[args.package_key])
286  return 0
287
288
289if __name__ == '__main__':
290  logging.basicConfig()
291  sys.exit(main(sys.argv))
292