• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#
2# Copyright (C) 2013 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#      http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16
17"""Verifying the integrity of a Chrome OS update payload.
18
19This module is used internally by the main Payload class for verifying the
20integrity of an update payload. The interface for invoking the checks is as
21follows:
22
23  checker = PayloadChecker(payload)
24  checker.Run(...)
25"""
26
27from __future__ import print_function
28
29import array
30import base64
31import collections
32import hashlib
33import itertools
34import os
35import subprocess
36
37from update_payload import common
38from update_payload import error
39from update_payload import format_utils
40from update_payload import histogram
41from update_payload import update_metadata_pb2
42
43
44#
45# Constants.
46#
47
48_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
49_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
50_CHECK_PAYLOAD_SIG = 'payload-sig'
51CHECKS_TO_DISABLE = (
52    _CHECK_DST_PSEUDO_EXTENTS,
53    _CHECK_MOVE_SAME_SRC_DST_BLOCK,
54    _CHECK_PAYLOAD_SIG,
55)
56
57_TYPE_FULL = 'full'
58_TYPE_DELTA = 'delta'
59
60_DEFAULT_BLOCK_SIZE = 4096
61
62_DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem'
63_DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
64                                         _DEFAULT_PUBKEY_BASE_NAME)
65
66# Supported minor version map to payload types allowed to be using them.
67_SUPPORTED_MINOR_VERSIONS = {
68    0: (_TYPE_FULL,),
69    1: (_TYPE_DELTA,),
70    2: (_TYPE_DELTA,),
71    3: (_TYPE_DELTA,),
72    4: (_TYPE_DELTA,),
73    5: (_TYPE_DELTA,),
74    6: (_TYPE_DELTA,),
75}
76
77_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
78
79#
80# Helper functions.
81#
82
83def _IsPowerOfTwo(val):
84  """Returns True iff val is a power of two."""
85  return val > 0 and (val & (val - 1)) == 0
86
87
88def _AddFormat(format_func, value):
89  """Adds a custom formatted representation to ordinary string representation.
90
91  Args:
92    format_func: A value formatter.
93    value: Value to be formatted and returned.
94
95  Returns:
96    A string 'x (y)' where x = str(value) and y = format_func(value).
97  """
98  ret = str(value)
99  formatted_str = format_func(value)
100  if formatted_str:
101    ret += ' (%s)' % formatted_str
102  return ret
103
104
105def _AddHumanReadableSize(size):
106  """Adds a human readable representation to a byte size value."""
107  return _AddFormat(format_utils.BytesToHumanReadable, size)
108
109
110#
111# Payload report generator.
112#
113
114class _PayloadReport(object):
115  """A payload report generator.
116
117  A report is essentially a sequence of nodes, which represent data points. It
118  is initialized to have a "global", untitled section. A node may be a
119  sub-report itself.
120  """
121
122  # Report nodes: Field, sub-report, section.
123  class Node(object):
124    """A report node interface."""
125
126    @staticmethod
127    def _Indent(indent, line):
128      """Indents a line by a given indentation amount.
129
130      Args:
131        indent: The indentation amount.
132        line: The line content (string).
133
134      Returns:
135        The properly indented line (string).
136      """
137      return '%*s%s' % (indent, '', line)
138
139    def GenerateLines(self, base_indent, sub_indent, curr_section):
140      """Generates the report lines for this node.
141
142      Args:
143        base_indent: Base indentation for each line.
144        sub_indent: Additional indentation for sub-nodes.
145        curr_section: The current report section object.
146
147      Returns:
148        A pair consisting of a list of properly indented report lines and a new
149        current section object.
150      """
151      raise NotImplementedError
152
153  class FieldNode(Node):
154    """A field report node, representing a (name, value) pair."""
155
156    def __init__(self, name, value, linebreak, indent):
157      super(_PayloadReport.FieldNode, self).__init__()
158      self.name = name
159      self.value = value
160      self.linebreak = linebreak
161      self.indent = indent
162
163    def GenerateLines(self, base_indent, sub_indent, curr_section):
164      """Generates a properly formatted 'name : value' entry."""
165      report_output = ''
166      if self.name:
167        report_output += self.name.ljust(curr_section.max_field_name_len) + ' :'
168      value_lines = str(self.value).splitlines()
169      if self.linebreak and self.name:
170        report_output += '\n' + '\n'.join(
171            ['%*s%s' % (self.indent, '', line) for line in value_lines])
172      else:
173        if self.name:
174          report_output += ' '
175        report_output += '%*s' % (self.indent, '')
176        cont_line_indent = len(report_output)
177        indented_value_lines = [value_lines[0]]
178        indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line)
179                                     for line in value_lines[1:]])
180        report_output += '\n'.join(indented_value_lines)
181
182      report_lines = [self._Indent(base_indent, line + '\n')
183                      for line in report_output.split('\n')]
184      return report_lines, curr_section
185
186  class SubReportNode(Node):
187    """A sub-report node, representing a nested report."""
188
189    def __init__(self, title, report):
190      super(_PayloadReport.SubReportNode, self).__init__()
191      self.title = title
192      self.report = report
193
194    def GenerateLines(self, base_indent, sub_indent, curr_section):
195      """Recurse with indentation."""
196      report_lines = [self._Indent(base_indent, self.title + ' =>\n')]
197      report_lines.extend(self.report.GenerateLines(base_indent + sub_indent,
198                                                    sub_indent))
199      return report_lines, curr_section
200
201  class SectionNode(Node):
202    """A section header node."""
203
204    def __init__(self, title=None):
205      super(_PayloadReport.SectionNode, self).__init__()
206      self.title = title
207      self.max_field_name_len = 0
208
209    def GenerateLines(self, base_indent, sub_indent, curr_section):
210      """Dump a title line, return self as the (new) current section."""
211      report_lines = []
212      if self.title:
213        report_lines.append(self._Indent(base_indent,
214                                         '=== %s ===\n' % self.title))
215      return report_lines, self
216
217  def __init__(self):
218    self.report = []
219    self.last_section = self.global_section = self.SectionNode()
220    self.is_finalized = False
221
222  def GenerateLines(self, base_indent, sub_indent):
223    """Generates the lines in the report, properly indented.
224
225    Args:
226      base_indent: The indentation used for root-level report lines.
227      sub_indent: The indentation offset used for sub-reports.
228
229    Returns:
230      A list of indented report lines.
231    """
232    report_lines = []
233    curr_section = self.global_section
234    for node in self.report:
235      node_report_lines, curr_section = node.GenerateLines(
236          base_indent, sub_indent, curr_section)
237      report_lines.extend(node_report_lines)
238
239    return report_lines
240
241  def Dump(self, out_file, base_indent=0, sub_indent=2):
242    """Dumps the report to a file.
243
244    Args:
245      out_file: File object to output the content to.
246      base_indent: Base indentation for report lines.
247      sub_indent: Added indentation for sub-reports.
248    """
249    report_lines = self.GenerateLines(base_indent, sub_indent)
250    if report_lines and not self.is_finalized:
251      report_lines.append('(incomplete report)\n')
252
253    for line in report_lines:
254      out_file.write(line)
255
256  def AddField(self, name, value, linebreak=False, indent=0):
257    """Adds a field/value pair to the payload report.
258
259    Args:
260      name: The field's name.
261      value: The field's value.
262      linebreak: Whether the value should be printed on a new line.
263      indent: Amount of extra indent for each line of the value.
264    """
265    assert not self.is_finalized
266    if name and self.last_section.max_field_name_len < len(name):
267      self.last_section.max_field_name_len = len(name)
268    self.report.append(self.FieldNode(name, value, linebreak, indent))
269
270  def AddSubReport(self, title):
271    """Adds and returns a sub-report with a title."""
272    assert not self.is_finalized
273    sub_report = self.SubReportNode(title, type(self)())
274    self.report.append(sub_report)
275    return sub_report.report
276
277  def AddSection(self, title):
278    """Adds a new section title."""
279    assert not self.is_finalized
280    self.last_section = self.SectionNode(title)
281    self.report.append(self.last_section)
282
283  def Finalize(self):
284    """Seals the report, marking it as complete."""
285    self.is_finalized = True
286
287
288#
289# Payload verification.
290#
291
292class PayloadChecker(object):
293  """Checking the integrity of an update payload.
294
295  This is a short-lived object whose purpose is to isolate the logic used for
296  verifying the integrity of an update payload.
297  """
298
299  def __init__(self, payload, assert_type=None, block_size=0,
300               allow_unhashed=False, disabled_tests=()):
301    """Initialize the checker.
302
303    Args:
304      payload: The payload object to check.
305      assert_type: Assert that payload is either 'full' or 'delta' (optional).
306      block_size: Expected filesystem / payload block size (optional).
307      allow_unhashed: Allow operations with unhashed data blobs.
308      disabled_tests: Sequence of tests to disable.
309    """
310    if not payload.is_init:
311      raise ValueError('Uninitialized update payload.')
312
313    # Set checker configuration.
314    self.payload = payload
315    self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE
316    if not _IsPowerOfTwo(self.block_size):
317      raise error.PayloadError(
318          'Expected block (%d) size is not a power of two.' % self.block_size)
319    if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
320      raise error.PayloadError('Invalid assert_type value (%r).' %
321                               assert_type)
322    self.payload_type = assert_type
323    self.allow_unhashed = allow_unhashed
324
325    # Disable specific tests.
326    self.check_dst_pseudo_extents = (
327        _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
328    self.check_move_same_src_dst_block = (
329        _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
330    self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
331
332    # Reset state; these will be assigned when the manifest is checked.
333    self.sigs_offset = 0
334    self.sigs_size = 0
335    self.old_part_info = {}
336    self.new_part_info = {}
337    self.new_fs_sizes = collections.defaultdict(int)
338    self.old_fs_sizes = collections.defaultdict(int)
339    self.minor_version = None
340    self.major_version = None
341
342  @staticmethod
343  def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
344                 msg_name=None, linebreak=False, indent=0):
345    """Adds an element from a protobuf message to the payload report.
346
347    Checks to see whether a message contains a given element, and if so adds
348    the element value to the provided report. A missing mandatory element
349    causes an exception to be raised.
350
351    Args:
352      msg: The message containing the element.
353      name: The name of the element.
354      report: A report object to add the element name/value to.
355      is_mandatory: Whether or not this element must be present.
356      is_submsg: Whether this element is itself a message.
357      convert: A function for converting the element value for reporting.
358      msg_name: The name of the message object (for error reporting).
359      linebreak: Whether the value report should induce a line break.
360      indent: Amount of indent used for reporting the value.
361
362    Returns:
363      A pair consisting of the element value and the generated sub-report for
364      it (if the element is a sub-message, None otherwise). If the element is
365      missing, returns (None, None).
366
367    Raises:
368      error.PayloadError if a mandatory element is missing.
369    """
370    element_result = collections.namedtuple('element_result', ['msg', 'report'])
371
372    if not msg.HasField(name):
373      if is_mandatory:
374        raise error.PayloadError('%smissing mandatory %s %r.' %
375                                 (msg_name + ' ' if msg_name else '',
376                                  'sub-message' if is_submsg else 'field',
377                                  name))
378      return element_result(None, None)
379
380    value = getattr(msg, name)
381    if is_submsg:
382      return element_result(value, report and report.AddSubReport(name))
383    else:
384      if report:
385        report.AddField(name, convert(value), linebreak=linebreak,
386                        indent=indent)
387      return element_result(value, None)
388
389  @staticmethod
390  def _CheckRepeatedElemNotPresent(msg, field_name, msg_name):
391    """Checks that a repeated element is not specified in the message.
392
393    Args:
394      msg: The message containing the element.
395      field_name: The name of the element.
396      msg_name: The name of the message object (for error reporting).
397
398    Raises:
399      error.PayloadError if the repeated element is present or non-empty.
400    """
401    if getattr(msg, field_name, None):
402      raise error.PayloadError('%sfield %r not empty.' %
403                               (msg_name + ' ' if msg_name else '', field_name))
404
405  @staticmethod
406  def _CheckElemNotPresent(msg, field_name, msg_name):
407    """Checks that an element is not specified in the message.
408
409    Args:
410      msg: The message containing the element.
411      field_name: The name of the element.
412      msg_name: The name of the message object (for error reporting).
413
414    Raises:
415      error.PayloadError if the repeated element is present.
416    """
417    if msg.HasField(field_name):
418      raise error.PayloadError('%sfield %r exists.' %
419                               (msg_name + ' ' if msg_name else '', field_name))
420
421  @staticmethod
422  def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
423                           linebreak=False, indent=0):
424    """Adds a mandatory field; returning first component from _CheckElem."""
425    return PayloadChecker._CheckElem(msg, field_name, report, True, False,
426                                     convert=convert, msg_name=msg_name,
427                                     linebreak=linebreak, indent=indent)[0]
428
429  @staticmethod
430  def _CheckOptionalField(msg, field_name, report, convert=str,
431                          linebreak=False, indent=0):
432    """Adds an optional field; returning first component from _CheckElem."""
433    return PayloadChecker._CheckElem(msg, field_name, report, False, False,
434                                     convert=convert, linebreak=linebreak,
435                                     indent=indent)[0]
436
437  @staticmethod
438  def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name):
439    """Adds a mandatory sub-message; wrapper for _CheckElem."""
440    return PayloadChecker._CheckElem(msg, submsg_name, report, True, True,
441                                     msg_name)
442
443  @staticmethod
444  def _CheckOptionalSubMsg(msg, submsg_name, report):
445    """Adds an optional sub-message; wrapper for _CheckElem."""
446    return PayloadChecker._CheckElem(msg, submsg_name, report, False, True)
447
448  @staticmethod
449  def _CheckPresentIff(val1, val2, name1, name2, obj_name):
450    """Checks that val1 is None iff val2 is None.
451
452    Args:
453      val1: first value to be compared.
454      val2: second value to be compared.
455      name1: name of object holding the first value.
456      name2: name of object holding the second value.
457      obj_name: Name of the object containing these values.
458
459    Raises:
460      error.PayloadError if assertion does not hold.
461    """
462    if None in (val1, val2) and val1 is not val2:
463      present, missing = (name1, name2) if val2 is None else (name2, name1)
464      raise error.PayloadError('%r present without %r%s.' %
465                               (present, missing,
466                                ' in ' + obj_name if obj_name else ''))
467
468  @staticmethod
469  def _CheckPresentIffMany(vals, name, obj_name):
470    """Checks that a set of vals and names imply every other element.
471
472    Args:
473      vals: The set of values to be compared.
474      name: The name of the objects holding the corresponding value.
475      obj_name: Name of the object containing these values.
476
477    Raises:
478      error.PayloadError if assertion does not hold.
479    """
480    if any(vals) and not all(vals):
481      raise error.PayloadError('%r is not present in all values%s.' %
482                               (name, ' in ' + obj_name if obj_name else ''))
483
484  @staticmethod
485  def _Run(cmd, send_data=None):
486    """Runs a subprocess, returns its output.
487
488    Args:
489      cmd: Sequence of command-line argument for invoking the subprocess.
490      send_data: Data to feed to the process via its stdin.
491
492    Returns:
493      A tuple containing the stdout and stderr output of the process.
494    """
495    run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
496                                   stdout=subprocess.PIPE)
497    try:
498      result = run_process.communicate(input=send_data)
499    finally:
500      exit_code = run_process.wait()
501
502    if exit_code:
503      raise RuntimeError('Subprocess %r failed with code %r.' %
504                         (cmd, exit_code))
505
506    return result
507
508  @staticmethod
509  def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
510    """Verifies an actual hash against a signed one.
511
512    Args:
513      sig_data: The raw signature data.
514      pubkey_file_name: Public key used for verifying signature.
515      actual_hash: The actual hash digest.
516      sig_name: Signature name for error reporting.
517
518    Raises:
519      error.PayloadError if signature could not be verified.
520    """
521    if len(sig_data) != 256:
522      raise error.PayloadError(
523          '%s: signature size (%d) not as expected (256).' %
524          (sig_name, len(sig_data)))
525    signed_data, _ = PayloadChecker._Run(
526        ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
527        send_data=sig_data)
528
529    if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32:
530      raise error.PayloadError('%s: unexpected signed data length (%d).' %
531                               (sig_name, len(signed_data)))
532
533    if not signed_data.startswith(common.SIG_ASN1_HEADER):
534      raise error.PayloadError('%s: not containing standard ASN.1 prefix.' %
535                               sig_name)
536
537    signed_hash = signed_data[len(common.SIG_ASN1_HEADER):]
538    if signed_hash != actual_hash:
539      raise error.PayloadError(
540          '%s: signed hash (%s) different from actual (%s).' %
541          (sig_name, common.FormatSha256(signed_hash),
542           common.FormatSha256(actual_hash)))
543
544  @staticmethod
545  def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
546                            block_name=None):
547    """Checks that a given length fits given block space.
548
549    This ensures that the number of blocks allocated is appropriate for the
550    length of the data residing in these blocks.
551
552    Args:
553      length: The actual length of the data.
554      num_blocks: The number of blocks allocated for it.
555      block_size: The size of each block in bytes.
556      length_name: Name of length (used for error reporting).
557      block_name: Name of block (used for error reporting).
558
559    Raises:
560      error.PayloadError if the aforementioned invariant is not satisfied.
561    """
562    # Check: length <= num_blocks * block_size.
563    if length > num_blocks * block_size:
564      raise error.PayloadError(
565          '%s (%d) > num %sblocks (%d) * block_size (%d).' %
566          (length_name, length, block_name or '', num_blocks, block_size))
567
568    # Check: length > (num_blocks - 1) * block_size.
569    if length <= (num_blocks - 1) * block_size:
570      raise error.PayloadError(
571          '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' %
572          (length_name, length, block_name or '', num_blocks - 1, block_size))
573
574  def _CheckManifestMinorVersion(self, report):
575    """Checks the payload manifest minor_version field.
576
577    Args:
578      report: The report object to add to.
579
580    Raises:
581      error.PayloadError if any of the checks fail.
582    """
583    self.minor_version = self._CheckOptionalField(self.payload.manifest,
584                                                  'minor_version', report)
585    if self.minor_version in _SUPPORTED_MINOR_VERSIONS:
586      if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]:
587        raise error.PayloadError(
588            'Minor version %d not compatible with payload type %s.' %
589            (self.minor_version, self.payload_type))
590    elif self.minor_version is None:
591      raise error.PayloadError('Minor version is not set.')
592    else:
593      raise error.PayloadError('Unsupported minor version: %d' %
594                               self.minor_version)
595
596  def _CheckManifest(self, report, part_sizes=None):
597    """Checks the payload manifest.
598
599    Args:
600      report: A report object to add to.
601      part_sizes: Map of partition label to partition size in bytes.
602
603    Returns:
604      A tuple consisting of the partition block size used during the update
605      (integer), the signatures block offset and size.
606
607    Raises:
608      error.PayloadError if any of the checks fail.
609    """
610    self.major_version = self.payload.header.version
611
612    part_sizes = collections.defaultdict(int, part_sizes)
613    manifest = self.payload.manifest
614    report.AddSection('manifest')
615
616    # Check: block_size must exist and match the expected value.
617    actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
618                                                  report, 'manifest')
619    if actual_block_size != self.block_size:
620      raise error.PayloadError('Block_size (%d) not as expected (%d).' %
621                               (actual_block_size, self.block_size))
622
623    # Check: signatures_offset <==> signatures_size.
624    self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
625                                                report)
626    self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size',
627                                              report)
628    self._CheckPresentIff(self.sigs_offset, self.sigs_size,
629                          'signatures_offset', 'signatures_size', 'manifest')
630
631    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
632      for real_name, proto_name in common.CROS_PARTITIONS:
633        self.old_part_info[real_name] = self._CheckOptionalSubMsg(
634            manifest, 'old_%s_info' % proto_name, report)
635        self.new_part_info[real_name] = self._CheckMandatorySubMsg(
636            manifest, 'new_%s_info' % proto_name, report, 'manifest')
637
638      # Check: old_kernel_info <==> old_rootfs_info.
639      self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
640                            self.old_part_info[common.ROOTFS].msg,
641                            'old_kernel_info', 'old_rootfs_info', 'manifest')
642    else:
643      for part in manifest.partitions:
644        name = part.partition_name
645        self.old_part_info[name] = self._CheckOptionalSubMsg(
646            part, 'old_partition_info', report)
647        self.new_part_info[name] = self._CheckMandatorySubMsg(
648            part, 'new_partition_info', report, 'manifest.partitions')
649
650      # Check: Old-style partition infos should not be specified.
651      for _, part in common.CROS_PARTITIONS:
652        self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
653        self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
654
655      # Check: If old_partition_info is specified anywhere, it must be
656      # specified everywhere.
657      old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
658      self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
659                                'manifest.partitions')
660
661    is_delta = any(part and part.msg for part in self.old_part_info.values())
662    if is_delta:
663      # Assert/mark delta payload.
664      if self.payload_type == _TYPE_FULL:
665        raise error.PayloadError(
666            'Apparent full payload contains old_{kernel,rootfs}_info.')
667      self.payload_type = _TYPE_DELTA
668
669      for part, (msg, part_report) in self.old_part_info.iteritems():
670        # Check: {size, hash} present in old_{kernel,rootfs}_info.
671        field = 'old_%s_info' % part
672        self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
673                                                            part_report, field)
674        self._CheckMandatoryField(msg, 'hash', part_report, field,
675                                  convert=common.FormatSha256)
676
677        # Check: old_{kernel,rootfs} size must fit in respective partition.
678        if self.old_fs_sizes[part] > part_sizes[part] > 0:
679          raise error.PayloadError(
680              'Old %s content (%d) exceed partition size (%d).' %
681              (part, self.old_fs_sizes[part], part_sizes[part]))
682    else:
683      # Assert/mark full payload.
684      if self.payload_type == _TYPE_DELTA:
685        raise error.PayloadError(
686            'Apparent delta payload missing old_{kernel,rootfs}_info.')
687      self.payload_type = _TYPE_FULL
688
689    # Check: new_{kernel,rootfs}_info present; contains {size, hash}.
690    for part, (msg, part_report) in self.new_part_info.iteritems():
691      field = 'new_%s_info' % part
692      self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
693                                                          part_report, field)
694      self._CheckMandatoryField(msg, 'hash', part_report, field,
695                                convert=common.FormatSha256)
696
697      # Check: new_{kernel,rootfs} size must fit in respective partition.
698      if self.new_fs_sizes[part] > part_sizes[part] > 0:
699        raise error.PayloadError(
700            'New %s content (%d) exceed partition size (%d).' %
701            (part, self.new_fs_sizes[part], part_sizes[part]))
702
703    # Check: minor_version makes sense for the payload type. This check should
704    # run after the payload type has been set.
705    self._CheckManifestMinorVersion(report)
706
707  def _CheckLength(self, length, total_blocks, op_name, length_name):
708    """Checks whether a length matches the space designated in extents.
709
710    Args:
711      length: The total length of the data.
712      total_blocks: The total number of blocks in extents.
713      op_name: Operation name (for error reporting).
714      length_name: Length name (for error reporting).
715
716    Raises:
717      error.PayloadError is there a problem with the length.
718    """
719    # Check: length is non-zero.
720    if length == 0:
721      raise error.PayloadError('%s: %s is zero.' % (op_name, length_name))
722
723    # Check that length matches number of blocks.
724    self._CheckBlocksFitLength(length, total_blocks, self.block_size,
725                               '%s: %s' % (op_name, length_name))
726
727  def _CheckExtents(self, extents, usable_size, block_counters, name,
728                    allow_pseudo=False, allow_signature=False):
729    """Checks a sequence of extents.
730
731    Args:
732      extents: The sequence of extents to check.
733      usable_size: The usable size of the partition to which the extents apply.
734      block_counters: Array of counters corresponding to the number of blocks.
735      name: The name of the extent block.
736      allow_pseudo: Whether or not pseudo block numbers are allowed.
737      allow_signature: Whether or not the extents are used for a signature.
738
739    Returns:
740      The total number of blocks in the extents.
741
742    Raises:
743      error.PayloadError if any of the entailed checks fails.
744    """
745    total_num_blocks = 0
746    for ex, ex_name in common.ExtentIter(extents, name):
747      # Check: Mandatory fields.
748      start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
749                                                        None, ex_name)
750      num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
751                                                       ex_name)
752      end_block = start_block + num_blocks
753
754      # Check: num_blocks > 0.
755      if num_blocks == 0:
756        raise error.PayloadError('%s: extent length is zero.' % ex_name)
757
758      if start_block != common.PSEUDO_EXTENT_MARKER:
759        # Check: Make sure we're within the partition limit.
760        if usable_size and end_block * self.block_size > usable_size:
761          raise error.PayloadError(
762              '%s: extent (%s) exceeds usable partition size (%d).' %
763              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
764
765        # Record block usage.
766        for i in xrange(start_block, end_block):
767          block_counters[i] += 1
768      elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
769        # Pseudo-extents must be allowed explicitly, or otherwise be part of a
770        # signature operation (in which case there has to be exactly one).
771        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
772
773      total_num_blocks += num_blocks
774
775    return total_num_blocks
776
777  def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
778    """Specific checks for REPLACE/REPLACE_BZ/REPLACE_XZ operations.
779
780    Args:
781      op: The operation object from the manifest.
782      data_length: The length of the data blob associated with the operation.
783      total_dst_blocks: Total number of blocks in dst_extents.
784      op_name: Operation name for error reporting.
785
786    Raises:
787      error.PayloadError if any check fails.
788    """
789    # Check: Does not contain src extents.
790    if op.src_extents:
791      raise error.PayloadError('%s: contains src_extents.' % op_name)
792
793    # Check: Contains data.
794    if data_length is None:
795      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
796
797    if op.type == common.OpType.REPLACE:
798      PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
799                                           self.block_size,
800                                           op_name + '.data_length', 'dst')
801    else:
802      # Check: data_length must be smaller than the allotted dst blocks.
803      if data_length >= total_dst_blocks * self.block_size:
804        raise error.PayloadError(
805            '%s: data_length (%d) must be less than allotted dst block '
806            'space (%d * %d).' %
807            (op_name, data_length, total_dst_blocks, self.block_size))
808
809  def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
810                          total_dst_blocks, op_name):
811    """Specific checks for MOVE operations.
812
813    Args:
814      op: The operation object from the manifest.
815      data_offset: The offset of a data blob for the operation.
816      total_src_blocks: Total number of blocks in src_extents.
817      total_dst_blocks: Total number of blocks in dst_extents.
818      op_name: Operation name for error reporting.
819
820    Raises:
821      error.PayloadError if any check fails.
822    """
823    # Check: No data_{offset,length}.
824    if data_offset is not None:
825      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
826
827    # Check: total_src_blocks == total_dst_blocks.
828    if total_src_blocks != total_dst_blocks:
829      raise error.PayloadError(
830          '%s: total src blocks (%d) != total dst blocks (%d).' %
831          (op_name, total_src_blocks, total_dst_blocks))
832
833    # Check: For all i, i-th src block index != i-th dst block index.
834    i = 0
835    src_extent_iter = iter(op.src_extents)
836    dst_extent_iter = iter(op.dst_extents)
837    src_extent = dst_extent = None
838    src_idx = src_num = dst_idx = dst_num = 0
839    while i < total_src_blocks:
840      # Get the next source extent, if needed.
841      if not src_extent:
842        try:
843          src_extent = src_extent_iter.next()
844        except StopIteration:
845          raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
846                                   (op_name, i, total_src_blocks))
847        src_idx = src_extent.start_block
848        src_num = src_extent.num_blocks
849
850      # Get the next dest extent, if needed.
851      if not dst_extent:
852        try:
853          dst_extent = dst_extent_iter.next()
854        except StopIteration:
855          raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
856                                   (op_name, i, total_dst_blocks))
857        dst_idx = dst_extent.start_block
858        dst_num = dst_extent.num_blocks
859
860      # Check: start block is not 0. See crbug/480751; there are still versions
861      # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
862      # so we need to fail payloads that try to MOVE to/from block 0.
863      if src_idx == 0 or dst_idx == 0:
864        raise error.PayloadError(
865            '%s: MOVE operation cannot have extent with start block 0' %
866            op_name)
867
868      if self.check_move_same_src_dst_block and src_idx == dst_idx:
869        raise error.PayloadError(
870            '%s: src/dst block number %d is the same (%d).' %
871            (op_name, i, src_idx))
872
873      advance = min(src_num, dst_num)
874      i += advance
875
876      src_idx += advance
877      src_num -= advance
878      if src_num == 0:
879        src_extent = None
880
881      dst_idx += advance
882      dst_num -= advance
883      if dst_num == 0:
884        dst_extent = None
885
886    # Make sure we've exhausted all src/dst extents.
887    if src_extent:
888      raise error.PayloadError('%s: excess src blocks.' % op_name)
889    if dst_extent:
890      raise error.PayloadError('%s: excess dst blocks.' % op_name)
891
892  def _CheckZeroOperation(self, op, op_name):
893    """Specific checks for ZERO operations.
894
895    Args:
896      op: The operation object from the manifest.
897      op_name: Operation name for error reporting.
898
899    Raises:
900      error.PayloadError if any check fails.
901    """
902    # Check: Does not contain src extents, data_length and data_offset.
903    if op.src_extents:
904      raise error.PayloadError('%s: contains src_extents.' % op_name)
905    if op.data_length:
906      raise error.PayloadError('%s: contains data_length.' % op_name)
907    if op.data_offset:
908      raise error.PayloadError('%s: contains data_offset.' % op_name)
909
910  def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
911    """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
912       operations.
913
914    Args:
915      op: The operation.
916      data_length: The length of the data blob associated with the operation.
917      total_dst_blocks: Total number of blocks in dst_extents.
918      op_name: Operation name for error reporting.
919
920    Raises:
921      error.PayloadError if any check fails.
922    """
923    # Check: data_{offset,length} present.
924    if data_length is None:
925      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
926
927    # Check: data_length is strictly smaller than the allotted dst blocks.
928    if data_length >= total_dst_blocks * self.block_size:
929      raise error.PayloadError(
930          '%s: data_length (%d) must be smaller than allotted dst space '
931          '(%d * %d = %d).' %
932          (op_name, data_length, total_dst_blocks, self.block_size,
933           total_dst_blocks * self.block_size))
934
935    # Check the existence of src_length and dst_length for legacy bsdiffs.
936    if (op.type == common.OpType.BSDIFF or
937        (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
938      if not op.HasField('src_length') or not op.HasField('dst_length'):
939        raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
940    else:
941      if op.HasField('src_length') or op.HasField('dst_length'):
942        raise error.PayloadError('%s: unneeded {src,dst}_length.' % op_name)
943
944  def _CheckSourceCopyOperation(self, data_offset, total_src_blocks,
945                                total_dst_blocks, op_name):
946    """Specific checks for SOURCE_COPY.
947
948    Args:
949      data_offset: The offset of a data blob for the operation.
950      total_src_blocks: Total number of blocks in src_extents.
951      total_dst_blocks: Total number of blocks in dst_extents.
952      op_name: Operation name for error reporting.
953
954    Raises:
955      error.PayloadError if any check fails.
956    """
957    # Check: No data_{offset,length}.
958    if data_offset is not None:
959      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
960
961    # Check: total_src_blocks == total_dst_blocks.
962    if total_src_blocks != total_dst_blocks:
963      raise error.PayloadError(
964          '%s: total src blocks (%d) != total dst blocks (%d).' %
965          (op_name, total_src_blocks, total_dst_blocks))
966
967  def _CheckAnySourceOperation(self, op, total_src_blocks, op_name):
968    """Specific checks for SOURCE_* operations.
969
970    Args:
971      op: The operation object from the manifest.
972      total_src_blocks: Total number of blocks in src_extents.
973      op_name: Operation name for error reporting.
974
975    Raises:
976      error.PayloadError if any check fails.
977    """
978    # Check: total_src_blocks != 0.
979    if total_src_blocks == 0:
980      raise error.PayloadError('%s: no src blocks in a source op.' % op_name)
981
982    # Check: src_sha256_hash present in minor version >= 3.
983    if self.minor_version >= 3 and op.src_sha256_hash is None:
984      raise error.PayloadError('%s: source hash missing.' % op_name)
985
986  def _CheckOperation(self, op, op_name, is_last, old_block_counters,
987                      new_block_counters, old_usable_size, new_usable_size,
988                      prev_data_offset, allow_signature, blob_hash_counts):
989    """Checks a single update operation.
990
991    Args:
992      op: The operation object.
993      op_name: Operation name string for error reporting.
994      is_last: Whether this is the last operation in the sequence.
995      old_block_counters: Arrays of block read counters.
996      new_block_counters: Arrays of block write counters.
997      old_usable_size: The overall usable size for src data in bytes.
998      new_usable_size: The overall usable size for dst data in bytes.
999      prev_data_offset: Offset of last used data bytes.
1000      allow_signature: Whether this may be a signature operation.
1001      blob_hash_counts: Counters for hashed/unhashed blobs.
1002
1003    Returns:
1004      The amount of data blob associated with the operation.
1005
1006    Raises:
1007      error.PayloadError if any check has failed.
1008    """
1009    # Check extents.
1010    total_src_blocks = self._CheckExtents(
1011        op.src_extents, old_usable_size, old_block_counters,
1012        op_name + '.src_extents', allow_pseudo=True)
1013    allow_signature_in_extents = (allow_signature and is_last and
1014                                  op.type == common.OpType.REPLACE)
1015    total_dst_blocks = self._CheckExtents(
1016        op.dst_extents, new_usable_size, new_block_counters,
1017        op_name + '.dst_extents',
1018        allow_pseudo=(not self.check_dst_pseudo_extents),
1019        allow_signature=allow_signature_in_extents)
1020
1021    # Check: data_offset present <==> data_length present.
1022    data_offset = self._CheckOptionalField(op, 'data_offset', None)
1023    data_length = self._CheckOptionalField(op, 'data_length', None)
1024    self._CheckPresentIff(data_offset, data_length, 'data_offset',
1025                          'data_length', op_name)
1026
1027    # Check: At least one dst_extent.
1028    if not op.dst_extents:
1029      raise error.PayloadError('%s: dst_extents is empty.' % op_name)
1030
1031    # Check {src,dst}_length, if present.
1032    if op.HasField('src_length'):
1033      self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length')
1034    if op.HasField('dst_length'):
1035      self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length')
1036
1037    if op.HasField('data_sha256_hash'):
1038      blob_hash_counts['hashed'] += 1
1039
1040      # Check: Operation carries data.
1041      if data_offset is None:
1042        raise error.PayloadError(
1043            '%s: data_sha256_hash present but no data_{offset,length}.' %
1044            op_name)
1045
1046      # Check: Hash verifies correctly.
1047      actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
1048                                                             data_length))
1049      if op.data_sha256_hash != actual_hash.digest():
1050        raise error.PayloadError(
1051            '%s: data_sha256_hash (%s) does not match actual hash (%s).' %
1052            (op_name, common.FormatSha256(op.data_sha256_hash),
1053             common.FormatSha256(actual_hash.digest())))
1054    elif data_offset is not None:
1055      if allow_signature_in_extents:
1056        blob_hash_counts['signature'] += 1
1057      elif self.allow_unhashed:
1058        blob_hash_counts['unhashed'] += 1
1059      else:
1060        raise error.PayloadError('%s: unhashed operation not allowed.' %
1061                                 op_name)
1062
1063    if data_offset is not None:
1064      # Check: Contiguous use of data section.
1065      if data_offset != prev_data_offset:
1066        raise error.PayloadError(
1067            '%s: data offset (%d) not matching amount used so far (%d).' %
1068            (op_name, data_offset, prev_data_offset))
1069
1070    # Type-specific checks.
1071    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
1072      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
1073    elif (op.type == common.OpType.REPLACE_XZ and
1074          (self.minor_version >= 3 or
1075           self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)):
1076      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
1077    elif op.type == common.OpType.MOVE and self.minor_version == 1:
1078      self._CheckMoveOperation(op, data_offset, total_src_blocks,
1079                               total_dst_blocks, op_name)
1080    elif op.type == common.OpType.ZERO and self.minor_version >= 4:
1081      self._CheckZeroOperation(op, op_name)
1082    elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
1083      self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
1084    elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
1085      self._CheckSourceCopyOperation(data_offset, total_src_blocks,
1086                                     total_dst_blocks, op_name)
1087      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
1088    elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2:
1089      self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
1090      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
1091    elif op.type == common.OpType.BROTLI_BSDIFF and self.minor_version >= 4:
1092      self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
1093      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
1094    elif op.type == common.OpType.PUFFDIFF and self.minor_version >= 5:
1095      self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
1096      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
1097    else:
1098      raise error.PayloadError(
1099          'Operation %s (type %d) not allowed in minor version %d' %
1100          (op_name, op.type, self.minor_version))
1101    return data_length if data_length is not None else 0
1102
1103  def _SizeToNumBlocks(self, size):
1104    """Returns the number of blocks needed to contain a given byte size."""
1105    return (size + self.block_size - 1) / self.block_size
1106
1107  def _AllocBlockCounters(self, total_size):
1108    """Returns a freshly initialized array of block counters.
1109
1110    Note that the generated array is not portable as is due to byte-ordering
1111    issues, hence it should not be serialized.
1112
1113    Args:
1114      total_size: The total block size in bytes.
1115
1116    Returns:
1117      An array of unsigned short elements initialized to zero, one for each of
1118      the blocks necessary for containing the partition.
1119    """
1120    return array.array('H',
1121                       itertools.repeat(0, self._SizeToNumBlocks(total_size)))
1122
1123  def _CheckOperations(self, operations, report, base_name, old_fs_size,
1124                       new_fs_size, old_usable_size, new_usable_size,
1125                       prev_data_offset, allow_signature):
1126    """Checks a sequence of update operations.
1127
1128    Args:
1129      operations: The sequence of operations to check.
1130      report: The report object to add to.
1131      base_name: The name of the operation block.
1132      old_fs_size: The old filesystem size in bytes.
1133      new_fs_size: The new filesystem size in bytes.
1134      old_usable_size: The overall usable size of the old partition in bytes.
1135      new_usable_size: The overall usable size of the new partition in bytes.
1136      prev_data_offset: Offset of last used data bytes.
1137      allow_signature: Whether this sequence may contain signature operations.
1138
1139    Returns:
1140      The total data blob size used.
1141
1142    Raises:
1143      error.PayloadError if any of the checks fails.
1144    """
1145    # The total size of data blobs used by operations scanned thus far.
1146    total_data_used = 0
1147    # Counts of specific operation types.
1148    op_counts = {
1149        common.OpType.REPLACE: 0,
1150        common.OpType.REPLACE_BZ: 0,
1151        common.OpType.REPLACE_XZ: 0,
1152        common.OpType.MOVE: 0,
1153        common.OpType.ZERO: 0,
1154        common.OpType.BSDIFF: 0,
1155        common.OpType.SOURCE_COPY: 0,
1156        common.OpType.SOURCE_BSDIFF: 0,
1157        common.OpType.PUFFDIFF: 0,
1158        common.OpType.BROTLI_BSDIFF: 0,
1159    }
1160    # Total blob sizes for each operation type.
1161    op_blob_totals = {
1162        common.OpType.REPLACE: 0,
1163        common.OpType.REPLACE_BZ: 0,
1164        common.OpType.REPLACE_XZ: 0,
1165        # MOVE operations don't have blobs.
1166        common.OpType.BSDIFF: 0,
1167        # SOURCE_COPY operations don't have blobs.
1168        common.OpType.SOURCE_BSDIFF: 0,
1169        common.OpType.PUFFDIFF: 0,
1170        common.OpType.BROTLI_BSDIFF: 0,
1171    }
1172    # Counts of hashed vs unhashed operations.
1173    blob_hash_counts = {
1174        'hashed': 0,
1175        'unhashed': 0,
1176    }
1177    if allow_signature:
1178      blob_hash_counts['signature'] = 0
1179
1180    # Allocate old and new block counters.
1181    old_block_counters = (self._AllocBlockCounters(old_usable_size)
1182                          if old_fs_size else None)
1183    new_block_counters = self._AllocBlockCounters(new_usable_size)
1184
1185    # Process and verify each operation.
1186    op_num = 0
1187    for op, op_name in common.OperationIter(operations, base_name):
1188      op_num += 1
1189
1190      # Check: Type is valid.
1191      if op.type not in op_counts.keys():
1192        raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
1193      op_counts[op.type] += 1
1194
1195      is_last = op_num == len(operations)
1196      curr_data_used = self._CheckOperation(
1197          op, op_name, is_last, old_block_counters, new_block_counters,
1198          old_usable_size, new_usable_size,
1199          prev_data_offset + total_data_used, allow_signature,
1200          blob_hash_counts)
1201      if curr_data_used:
1202        op_blob_totals[op.type] += curr_data_used
1203        total_data_used += curr_data_used
1204
1205    # Report totals and breakdown statistics.
1206    report.AddField('total operations', op_num)
1207    report.AddField(
1208        None,
1209        histogram.Histogram.FromCountDict(op_counts,
1210                                          key_names=common.OpType.NAMES),
1211        indent=1)
1212    report.AddField('total blobs', sum(blob_hash_counts.values()))
1213    report.AddField(None,
1214                    histogram.Histogram.FromCountDict(blob_hash_counts),
1215                    indent=1)
1216    report.AddField('total blob size', _AddHumanReadableSize(total_data_used))
1217    report.AddField(
1218        None,
1219        histogram.Histogram.FromCountDict(op_blob_totals,
1220                                          formatter=_AddHumanReadableSize,
1221                                          key_names=common.OpType.NAMES),
1222        indent=1)
1223
1224    # Report read/write histograms.
1225    if old_block_counters:
1226      report.AddField('block read hist',
1227                      histogram.Histogram.FromKeyList(old_block_counters),
1228                      linebreak=True, indent=1)
1229
1230    new_write_hist = histogram.Histogram.FromKeyList(
1231        new_block_counters[:self._SizeToNumBlocks(new_fs_size)])
1232    report.AddField('block write hist', new_write_hist, linebreak=True,
1233                    indent=1)
1234
1235    # Check: Full update must write each dst block once.
1236    if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
1237      raise error.PayloadError(
1238          '%s: not all blocks written exactly once during full update.' %
1239          base_name)
1240
1241    return total_data_used
1242
1243  def _CheckSignatures(self, report, pubkey_file_name):
1244    """Checks a payload's signature block."""
1245    sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size)
1246    sigs = update_metadata_pb2.Signatures()
1247    sigs.ParseFromString(sigs_raw)
1248    report.AddSection('signatures')
1249
1250    # Check: At least one signature present.
1251    if not sigs.signatures:
1252      raise error.PayloadError('Signature block is empty.')
1253
1254    last_ops_section = (self.payload.manifest.kernel_install_operations or
1255                        self.payload.manifest.install_operations)
1256
1257    # Only major version 1 has the fake signature OP at the end.
1258    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
1259      fake_sig_op = last_ops_section[-1]
1260      # Check: signatures_{offset,size} must match the last (fake) operation.
1261      if not (fake_sig_op.type == common.OpType.REPLACE and
1262              self.sigs_offset == fake_sig_op.data_offset and
1263              self.sigs_size == fake_sig_op.data_length):
1264        raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not'
1265                                 ' match last operation (%d+%d).' %
1266                                 (self.sigs_offset, self.sigs_size,
1267                                  fake_sig_op.data_offset,
1268                                  fake_sig_op.data_length))
1269
1270    # Compute the checksum of all data up to signature blob.
1271    # TODO(garnold) we're re-reading the whole data section into a string
1272    # just to compute the checksum; instead, we could do it incrementally as
1273    # we read the blobs one-by-one, under the assumption that we're reading
1274    # them in order (which currently holds). This should be reconsidered.
1275    payload_hasher = self.payload.manifest_hasher.copy()
1276    common.Read(self.payload.payload_file, self.sigs_offset,
1277                offset=self.payload.data_offset, hasher=payload_hasher)
1278
1279    for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
1280      sig_report = report.AddSubReport(sig_name)
1281
1282      # Check: Signature contains mandatory fields.
1283      self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
1284      self._CheckMandatoryField(sig, 'data', None, sig_name)
1285      sig_report.AddField('data len', len(sig.data))
1286
1287      # Check: Signatures pertains to actual payload hash.
1288      if sig.version == 1:
1289        self._CheckSha256Signature(sig.data, pubkey_file_name,
1290                                   payload_hasher.digest(), sig_name)
1291      else:
1292        raise error.PayloadError('Unknown signature version (%d).' %
1293                                 sig.version)
1294
1295  def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0,
1296          part_sizes=None, report_out_file=None):
1297    """Checker entry point, invoking all checks.
1298
1299    Args:
1300      pubkey_file_name: Public key used for signature verification.
1301      metadata_sig_file: Metadata signature, if verification is desired.
1302      metadata_size: Metadata size, if verification is desired.
1303      part_sizes: Mapping of partition label to size in bytes (default: infer
1304        based on payload type and version or filesystem).
1305      report_out_file: File object to dump the report to.
1306
1307    Raises:
1308      error.PayloadError if payload verification failed.
1309    """
1310    if not pubkey_file_name:
1311      pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME
1312
1313    report = _PayloadReport()
1314
1315    # Get payload file size.
1316    self.payload.payload_file.seek(0, 2)
1317    payload_file_size = self.payload.payload_file.tell()
1318    self.payload.ResetFile()
1319
1320    try:
1321      # Check metadata_size (if provided).
1322      if metadata_size and self.payload.metadata_size != metadata_size:
1323        raise error.PayloadError('Invalid payload metadata size in payload(%d) '
1324                                 'vs given(%d)' % (self.payload.metadata_size,
1325                                                   metadata_size))
1326
1327      # Check metadata signature (if provided).
1328      if metadata_sig_file:
1329        metadata_sig = base64.b64decode(metadata_sig_file.read())
1330        self._CheckSha256Signature(metadata_sig, pubkey_file_name,
1331                                   self.payload.manifest_hasher.digest(),
1332                                   'metadata signature')
1333
1334      # Part 1: Check the file header.
1335      report.AddSection('header')
1336      # Check: Payload version is valid.
1337      if self.payload.header.version not in (1, 2):
1338        raise error.PayloadError('Unknown payload version (%d).' %
1339                                 self.payload.header.version)
1340      report.AddField('version', self.payload.header.version)
1341      report.AddField('manifest len', self.payload.header.manifest_len)
1342
1343      # Part 2: Check the manifest.
1344      self._CheckManifest(report, part_sizes)
1345      assert self.payload_type, 'payload type should be known by now'
1346
1347      manifest = self.payload.manifest
1348
1349      # Part 3: Examine partition operations.
1350      install_operations = []
1351      if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
1352        # partitions field should not ever exist in major version 1 payloads
1353        self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
1354
1355        install_operations.append((common.ROOTFS, manifest.install_operations))
1356        install_operations.append((common.KERNEL,
1357                                   manifest.kernel_install_operations))
1358
1359      else:
1360        self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
1361                                          'manifest')
1362        self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
1363                                          'manifest')
1364
1365        for update in manifest.partitions:
1366          install_operations.append((update.partition_name, update.operations))
1367
1368      total_blob_size = 0
1369      for part, operations in install_operations:
1370        report.AddSection('%s operations' % part)
1371
1372        new_fs_usable_size = self.new_fs_sizes[part]
1373        old_fs_usable_size = self.old_fs_sizes[part]
1374
1375        if part_sizes.get(part, None):
1376          new_fs_usable_size = old_fs_usable_size = part_sizes[part]
1377        # Infer the usable partition size when validating rootfs operations:
1378        # - If rootfs partition size was provided, use that.
1379        # - Otherwise, if this is an older delta (minor version < 2), stick with
1380        #   a known constant size. This is necessary because older deltas may
1381        #   exceed the filesystem size when moving data blocks around.
1382        # - Otherwise, use the encoded filesystem size.
1383        elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \
1384            self.minor_version in (None, 1):
1385          new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
1386
1387        # TODO(garnold)(chromium:243559) only default to the filesystem size if
1388        # no explicit size provided *and* the partition size is not embedded in
1389        # the payload; see issue for more details.
1390        total_blob_size += self._CheckOperations(
1391            operations, report, '%s_install_operations' % part,
1392            self.old_fs_sizes[part], self.new_fs_sizes[part],
1393            old_fs_usable_size, new_fs_usable_size, total_blob_size,
1394            (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION
1395             and part == common.KERNEL))
1396
1397      # Check: Operations data reach the end of the payload file.
1398      used_payload_size = self.payload.data_offset + total_blob_size
1399      # Major versions 2 and higher have a signature at the end, so it should be
1400      # considered in the total size of the image.
1401      if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and
1402          self.sigs_size):
1403        used_payload_size += self.sigs_size
1404
1405      if used_payload_size != payload_file_size:
1406        raise error.PayloadError(
1407            'Used payload size (%d) different from actual file size (%d).' %
1408            (used_payload_size, payload_file_size))
1409
1410      # Part 4: Handle payload signatures message.
1411      if self.check_payload_sig and self.sigs_size:
1412        self._CheckSignatures(report, pubkey_file_name)
1413
1414      # Part 5: Summary.
1415      report.AddSection('summary')
1416      report.AddField('update type', self.payload_type)
1417
1418      report.Finalize()
1419    finally:
1420      if report_out_file:
1421        report.Dump(report_out_file)
1422