• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2
3# Copyright 2016, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18"""Command-line tool for partitioning Brillo images."""
19
20
21import argparse
22import bisect
23import copy
24import json
25import math
26import numbers
27import os
28import struct
29import sys
30import uuid
31import zlib
32
33# Python 2.6 required for modern exception syntax
34if sys.hexversion < 0x02060000:
35  print >> sys.stderr, "Python 2.6 or newer is required."
36  sys.exit(1)
37
38# Keywords used in JSON files.
39JSON_KEYWORD_SETTINGS = 'settings'
40JSON_KEYWORD_SETTINGS_AB_SUFFIXES = 'ab_suffixes'
41JSON_KEYWORD_SETTINGS_DISK_SIZE = 'disk_size'
42JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT = 'disk_alignment'
43JSON_KEYWORD_SETTINGS_DISK_GUID = 'disk_guid'
44JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN = 'partitions_offset_begin'
45JSON_KEYWORD_PARTITIONS = 'partitions'
46JSON_KEYWORD_PARTITIONS_LABEL = 'label'
47JSON_KEYWORD_PARTITIONS_OFFSET = 'offset'
48JSON_KEYWORD_PARTITIONS_SIZE = 'size'
49JSON_KEYWORD_PARTITIONS_GROW = 'grow'
50JSON_KEYWORD_PARTITIONS_GUID = 'guid'
51JSON_KEYWORD_PARTITIONS_TYPE_GUID = 'type_guid'
52JSON_KEYWORD_PARTITIONS_FLAGS = 'flags'
53JSON_KEYWORD_PARTITIONS_PERSIST = 'persist'
54JSON_KEYWORD_PARTITIONS_IGNORE = 'ignore'
55JSON_KEYWORD_PARTITIONS_AB = 'ab'
56JSON_KEYWORD_PARTITIONS_AB_EXPANDED = 'ab_expanded'
57JSON_KEYWORD_PARTITIONS_POSITION = 'position'
58JSON_KEYWORD_AUTO = 'auto'
59
60# Possible values for the --type option of the query_partition
61# sub-command.
62QUERY_PARTITION_TYPES = ['size',
63                         'offset',
64                         'guid',
65                         'type_guid',
66                         'flags',
67                         'persist']
68
69BPT_VERSION_MAJOR = 1
70BPT_VERSION_MINOR = 0
71
72DISK_SECTOR_SIZE = 512
73
74GPT_NUM_LBAS = 33
75
76GPT_MIN_PART_NUM = 1
77GPT_MAX_PART_NUM = 128
78
79KNOWN_TYPE_GUIDS = {
80    'brillo_boot': 'bb499290-b57e-49f6-bf41-190386693794',
81    'brillo_bootloader': '4892aeb3-a45f-4c5f-875f-da3303c0795c',
82    'brillo_system': '0f2778c4-5cc1-4300-8670-6c88b7e57ed6',
83    'brillo_odm': 'e99d84d7-2c1b-44cf-8c58-effae2dc2558',
84    'brillo_oem': 'aa3434b2-ddc3-4065-8b1a-18e99ea15cb7',
85    'brillo_userdata': '0bb7e6ed-4424-49c0-9372-7fbab465ab4c',
86    'brillo_misc': '6b2378b0-0fbc-4aa9-a4f6-4d6e17281c47',
87    'brillo_vbmeta': 'b598858a-5fe3-418e-b8c4-824b41f4adfc',
88    'brillo_vendor_specific': '314f99d5-b2bf-4883-8d03-e2f2ce507d6a',
89    'linux_fs': '0fc63daf-8483-4772-8e79-3d69d8477de4',
90    'ms_basic_data': 'ebd0a0a2-b9e5-4433-87c0-68b6b72699c7',
91    'efi_system': 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b'
92}
93
94
95def RoundToMultiple(number, size, round_down=False):
96  """Rounds a number up (or down) to nearest multiple of another number.
97
98  Args:
99    number: The number to round up.
100    size: The multiple to round up to.
101    round_down: If True, the number will be rounded down.
102
103  Returns:
104    If |number| is a multiple of |size|, returns |number|, otherwise
105    returns |number| + |size| - |remainder| (if |round_down| is False) or
106    |number| - |remainder| (if |round_down| is True). Always returns
107    an integer.
108  """
109  remainder = number % size
110  if remainder == 0:
111    return int(number)
112  if round_down:
113    return int(number - remainder)
114  return int(number + size - remainder)
115
116
117def ParseNumber(arg):
118  """Number parser.
119
120  If |arg| is an integer, that value is returned. Otherwise int(arg, 0)
121  is returned.
122
123  This function is suitable for use in the |type| parameter of
124  |ArgumentParser|'s add_argument() function. An improvement to just
125  using type=int is that this function supports numbers in other
126  bases, e.g. "0x1234".
127
128  Arguments:
129    arg: Argument (int or string) to parse.
130
131  Returns:
132    The parsed value, as an integer.
133
134  Raises:
135    ValueError: If the argument could not be parsed.
136  """
137  if isinstance(arg, numbers.Integral):
138    return arg
139  return int(arg, 0)
140
141
142def ParseGuid(arg):
143  """Parser for RFC 4122 GUIDs.
144
145  Arguments:
146    arg: The argument, as a string.
147
148  Returns:
149    UUID in hyphenated format.
150
151  Raises:
152    ValueError: If the given string cannot be parsed.
153  """
154  return str(uuid.UUID(arg))
155
156
157def ParseSize(arg):
158  """Parser for size strings with decimal and binary unit support.
159
160  This supports both integers and strings.
161
162  Arguments:
163    arg: The string to parse.
164
165  Returns:
166    The parsed size in bytes as an integer.
167
168  Raises:
169    ValueError: If the given string cannot be parsed.
170  """
171  if isinstance(arg, numbers.Integral):
172    return arg
173
174  ws_index = arg.find(' ')
175  if ws_index != -1:
176    num = float(arg[0:ws_index])
177    factor = 1
178    if arg.endswith('KiB'):
179      factor = 1024
180    elif arg.endswith('MiB'):
181      factor = 1024*1024
182    elif arg.endswith('GiB'):
183      factor = 1024*1024*1024
184    elif arg.endswith('TiB'):
185      factor = 1024*1024*1024*1024
186    elif arg.endswith('PiB'):
187      factor = 1024*1024*1024*1024*1024
188    elif arg.endswith('kB'):
189      factor = 1000
190    elif arg.endswith('MB'):
191      factor = 1000*1000
192    elif arg.endswith('GB'):
193      factor = 1000*1000*1000
194    elif arg.endswith('TB'):
195      factor = 1000*1000*1000*1000
196    elif arg.endswith('PB'):
197      factor = 1000*1000*1000*1000*1000
198    else:
199      raise ValueError('Cannot parse string "{}"'.format(arg))
200    value = num*factor
201    # If the resulting value isn't an integer, round up.
202    if not value.is_integer():
203      value = int(math.ceil(value))
204  else:
205    value = int(arg, 0)
206  return value
207
208
209class ImageChunk(object):
210  """Data structure used for representing chunks in Android sparse files.
211
212  Attributes:
213    chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
214    chunk_offset: Offset in the sparse file where this chunk begins.
215    output_offset: Offset in de-sparsified file where output begins.
216    output_size: Number of bytes in output.
217    input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
218    fill_data: Blob with data to fill if TYPE_FILL otherwise None.
219  """
220
221  FORMAT = '<2H2I'
222  TYPE_RAW = 0xcac1
223  TYPE_FILL = 0xcac2
224  TYPE_DONT_CARE = 0xcac3
225  TYPE_CRC32 = 0xcac4
226
227  def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
228               input_offset, fill_data):
229    """Initializes an ImageChunk object.
230
231    Arguments:
232      chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
233      chunk_offset: Offset in the sparse file where this chunk begins.
234      output_offset: Offset in de-sparsified file.
235      output_size: Number of bytes in output.
236      input_offset: Offset in sparse file if TYPE_RAW otherwise None.
237      fill_data: Blob with data to fill if TYPE_FILL otherwise None.
238
239    Raises:
240      ValueError: If data is not well-formed.
241    """
242    self.chunk_type = chunk_type
243    self.chunk_offset = chunk_offset
244    self.output_offset = output_offset
245    self.output_size = output_size
246    self.input_offset = input_offset
247    self.fill_data = fill_data
248    # Check invariants.
249    if self.chunk_type == self.TYPE_RAW:
250      if self.fill_data is not None:
251        raise ValueError('RAW chunk cannot have fill_data set.')
252      if not self.input_offset:
253        raise ValueError('RAW chunk must have input_offset set.')
254    elif self.chunk_type == self.TYPE_FILL:
255      if self.fill_data is None:
256        raise ValueError('FILL chunk must have fill_data set.')
257      if self.input_offset:
258        raise ValueError('FILL chunk cannot have input_offset set.')
259    elif self.chunk_type == self.TYPE_DONT_CARE:
260      if self.fill_data is not None:
261        raise ValueError('DONT_CARE chunk cannot have fill_data set.')
262      if self.input_offset:
263        raise ValueError('DONT_CARE chunk cannot have input_offset set.')
264    else:
265      raise ValueError('Invalid chunk type')
266
267
268class ImageHandler(object):
269  """Abstraction for image I/O with support for Android sparse images.
270
271  This class provides an interface for working with image files that
272  may be using the Android Sparse Image format. When an instance is
273  constructed, we test whether it's an Android sparse file. If so,
274  operations will be on the sparse file by interpreting the sparse
275  format, otherwise they will be directly on the file. Either way the
276  operations do the same.
277
278  For reading, this interface mimics a file object - it has seek(),
279  tell(), and read() methods. For writing, only truncation
280  (truncate()) and appending is supported (append_raw(),
281  append_fill(), and append_dont_care()). Additionally, data can only
282  be written in units of the block size.
283
284  Attributes:
285    is_sparse: Whether the file being operated on is sparse.
286    block_size: The block size, typically 4096.
287    image_size: The size of the unsparsified file.
288
289  """
290  # See system/core/libsparse/sparse_format.h for details.
291  MAGIC = 0xed26ff3a
292  HEADER_FORMAT = '<I4H4I'
293
294  # These are formats and offset of just the |total_chunks| and
295  # |total_blocks| fields.
296  NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
297  NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
298
299  def __init__(self, image_filename):
300    """Initializes an image handler.
301
302    Arguments:
303      image_filename: The name of the file to operate on.
304
305    Raises:
306      ValueError: If data in the file is invalid.
307    """
308    self._image_filename = image_filename
309    self._read_header()
310
311  def _read_header(self):
312    """Initializes internal data structures used for reading file.
313
314    This may be called multiple times and is typically called after
315    modifying the file (e.g. appending, truncation).
316
317    Raises:
318      ValueError: If data in the file is invalid.
319    """
320    self.is_sparse = False
321    self.block_size = 4096
322    self._file_pos = 0
323    self._image = open(self._image_filename, 'r+b')
324    self._image.seek(0, os.SEEK_END)
325    self.image_size = self._image.tell()
326
327    self._image.seek(0, os.SEEK_SET)
328    header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
329    if len(header_bin) < struct.calcsize(self.HEADER_FORMAT):
330      # Not a sparse image, our job here is done.
331      return
332    (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
333     block_size, self._num_total_blocks, self._num_total_chunks,
334     _) = struct.unpack(self.HEADER_FORMAT, header_bin)
335    if magic != self.MAGIC:
336      # Not a sparse image, our job here is done.
337      return
338    if not (major_version == 1 and minor_version == 0):
339      raise ValueError('Encountered sparse image format version {}.{} but '
340                       'only 1.0 is supported'.format(major_version,
341                                                      minor_version))
342    if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
343      raise ValueError('Unexpected file_hdr_sz value {}.'.
344                       format(file_hdr_sz))
345    if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
346      raise ValueError('Unexpected chunk_hdr_sz value {}.'.
347                       format(chunk_hdr_sz))
348
349    self.block_size = block_size
350
351    # Build an list of chunks by parsing the file.
352    self._chunks = []
353
354    # Find the smallest offset where only "Don't care" chunks
355    # follow. This will be the size of the content in the sparse
356    # image.
357    offset = 0
358    output_offset = 0
359    for _ in xrange(1, self._num_total_chunks + 1):
360      chunk_offset = self._image.tell()
361
362      header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
363      (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
364                                                          header_bin)
365      data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
366
367      if chunk_type == ImageChunk.TYPE_RAW:
368        if data_sz != (chunk_sz * self.block_size):
369          raise ValueError('Raw chunk input size ({}) does not match output '
370                           'size ({})'.
371                           format(data_sz, chunk_sz*self.block_size))
372        self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
373                                       chunk_offset,
374                                       output_offset,
375                                       chunk_sz*self.block_size,
376                                       self._image.tell(),
377                                       None))
378        self._image.read(data_sz)
379
380      elif chunk_type == ImageChunk.TYPE_FILL:
381        if data_sz != 4:
382          raise ValueError('Fill chunk should have 4 bytes of fill, but this '
383                           'has {}'.format(data_sz))
384        fill_data = self._image.read(4)
385        self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
386                                       chunk_offset,
387                                       output_offset,
388                                       chunk_sz*self.block_size,
389                                       None,
390                                       fill_data))
391      elif chunk_type == ImageChunk.TYPE_DONT_CARE:
392        if data_sz != 0:
393          raise ValueError('Don\'t care chunk input size is non-zero ({})'.
394                           format(data_sz))
395        self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
396                                       chunk_offset,
397                                       output_offset,
398                                       chunk_sz*self.block_size,
399                                       None,
400                                       None))
401      elif chunk_type == ImageChunk.TYPE_CRC32:
402        if data_sz != 4:
403          raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
404                           'this has {}'.format(data_sz))
405        self._image.read(4)
406      else:
407        raise ValueError('Unknown chunk type {}'.format(chunk_type))
408
409      offset += chunk_sz
410      output_offset += chunk_sz * self.block_size
411
412    # Record where sparse data end.
413    self._sparse_end = self._image.tell()
414
415    # Now that we've traversed all chunks, sanity check.
416    if self._num_total_blocks != offset:
417      raise ValueError('The header said we should have {} output blocks, '
418                       'but we saw {}'.format(self._num_total_blocks, offset))
419    junk_len = len(self._image.read())
420    if junk_len > 0:
421      raise ValueError('There were {} bytes of extra data at the end of the '
422                       'file.'.format(junk_len))
423
424    # Assign |image_size|.
425    self.image_size = output_offset
426
427    # This is used when bisecting in read() to find the initial slice.
428    self._chunk_output_offsets = [i.output_offset for i in self._chunks]
429
430    self.is_sparse = True
431
432  def _update_chunks_and_blocks(self):
433    """Helper function to update the image header.
434
435    The the |total_chunks| and |total_blocks| fields in the header
436    will be set to value of the |_num_total_blocks| and
437    |_num_total_chunks| attributes.
438
439    """
440    self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
441    self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
442                                  self._num_total_blocks,
443                                  self._num_total_chunks))
444
445  def append_dont_care(self, num_bytes):
446    """Appends a DONT_CARE chunk to the sparse file.
447
448    The given number of bytes must be a multiple of the block size.
449
450    Arguments:
451      num_bytes: Size in number of bytes of the DONT_CARE chunk.
452    """
453    assert num_bytes % self.block_size == 0
454
455    if not self.is_sparse:
456      self._image.seek(0, os.SEEK_END)
457      # This is more efficient that writing NUL bytes since it'll add
458      # a hole on file systems that support sparse files (native
459      # sparse, not Android sparse).
460      self._image.truncate(self._image.tell() + num_bytes)
461      self._read_header()
462      return
463
464    self._num_total_chunks += 1
465    self._num_total_blocks += num_bytes / self.block_size
466    self._update_chunks_and_blocks()
467
468    self._image.seek(self._sparse_end, os.SEEK_SET)
469    self._image.write(struct.pack(ImageChunk.FORMAT,
470                                  ImageChunk.TYPE_DONT_CARE,
471                                  0,  # Reserved
472                                  num_bytes / self.block_size,
473                                  struct.calcsize(ImageChunk.FORMAT)))
474    self._read_header()
475
476  def append_raw(self, data):
477    """Appends a RAW chunk to the sparse file.
478
479    The length of the given data must be a multiple of the block size.
480
481    Arguments:
482      data: Data to append.
483    """
484    assert len(data) % self.block_size == 0
485
486    if not self.is_sparse:
487      self._image.seek(0, os.SEEK_END)
488      self._image.write(data)
489      self._read_header()
490      return
491
492    self._num_total_chunks += 1
493    self._num_total_blocks += len(data) / self.block_size
494    self._update_chunks_and_blocks()
495
496    self._image.seek(self._sparse_end, os.SEEK_SET)
497    self._image.write(struct.pack(ImageChunk.FORMAT,
498                                  ImageChunk.TYPE_RAW,
499                                  0,  # Reserved
500                                  len(data) / self.block_size,
501                                  len(data) +
502                                  struct.calcsize(ImageChunk.FORMAT)))
503    self._image.write(data)
504    self._read_header()
505
506  def append_fill(self, fill_data, size):
507    """Appends a fill chunk to the sparse file.
508
509    The total length of the fill data must be a multiple of the block size.
510
511    Arguments:
512      fill_data: Fill data to append - must be four bytes.
513      size: Number of chunk - must be a multiple of four and the block size.
514    """
515    assert len(fill_data) == 4
516    assert size % 4 == 0
517    assert size % self.block_size == 0
518
519    if not self.is_sparse:
520      self._image.seek(0, os.SEEK_END)
521      self._image.write(fill_data * (size/4))
522      self._read_header()
523      return
524
525    self._num_total_chunks += 1
526    self._num_total_blocks += size / self.block_size
527    self._update_chunks_and_blocks()
528
529    self._image.seek(self._sparse_end, os.SEEK_SET)
530    self._image.write(struct.pack(ImageChunk.FORMAT,
531                                  ImageChunk.TYPE_FILL,
532                                  0,  # Reserved
533                                  size / self.block_size,
534                                  4 + struct.calcsize(ImageChunk.FORMAT)))
535    self._image.write(fill_data)
536    self._read_header()
537
538  def seek(self, offset):
539    """Sets the cursor position for reading from unsparsified file.
540
541    Arguments:
542      offset: Offset to seek to from the beginning of the file.
543    """
544    self._file_pos = offset
545
546  def read(self, size):
547    """Reads data from the unsparsified file.
548
549    This method may return fewer than |size| bytes of data if the end
550    of the file was encountered.
551
552    The file cursor for reading is advanced by the number of bytes
553    read.
554
555    Arguments:
556      size: Number of bytes to read.
557
558    Returns:
559      The data.
560
561    """
562    if not self.is_sparse:
563      self._image.seek(self._file_pos)
564      data = self._image.read(size)
565      self._file_pos += len(data)
566      return data
567
568    # Iterate over all chunks.
569    chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
570                                    self._file_pos) - 1
571    data = bytearray()
572    to_go = size
573    while to_go > 0:
574      chunk = self._chunks[chunk_idx]
575      chunk_pos_offset = self._file_pos - chunk.output_offset
576      chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
577
578      if chunk.chunk_type == ImageChunk.TYPE_RAW:
579        self._image.seek(chunk.input_offset + chunk_pos_offset)
580        data.extend(self._image.read(chunk_pos_to_go))
581      elif chunk.chunk_type == ImageChunk.TYPE_FILL:
582        all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2)
583        offset_mod = chunk_pos_offset % len(chunk.fill_data)
584        data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
585      else:
586        assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
587        data.extend('\0' * chunk_pos_to_go)
588
589      to_go -= chunk_pos_to_go
590      self._file_pos += chunk_pos_to_go
591      chunk_idx += 1
592      # Generate partial read in case of EOF.
593      if chunk_idx >= len(self._chunks):
594        break
595
596    return data
597
598  def tell(self):
599    """Returns the file cursor position for reading from unsparsified file.
600
601    Returns:
602      The file cursor position for reading.
603    """
604    return self._file_pos
605
606  def truncate(self, size):
607    """Truncates the unsparsified file.
608
609    Arguments:
610      size: Desired size of unsparsified file.
611
612    Raises:
613      ValueError: If desired size isn't a multiple of the block size.
614    """
615    if not self.is_sparse:
616      self._image.truncate(size)
617      self._read_header()
618      return
619
620    if size % self.block_size != 0:
621      raise ValueError('Cannot truncate to a size which is not a multiple '
622                       'of the block size')
623
624    if size == self.image_size:
625      # Trivial where there's nothing to do.
626      return
627    elif size < self.image_size:
628      chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
629      chunk = self._chunks[chunk_idx]
630      if chunk.output_offset != size:
631        # Truncation in the middle of a trunk - need to keep the chunk
632        # and modify it.
633        chunk_idx_for_update = chunk_idx + 1
634        num_to_keep = size - chunk.output_offset
635        assert num_to_keep % self.block_size == 0
636        if chunk.chunk_type == ImageChunk.TYPE_RAW:
637          truncate_at = (chunk.chunk_offset +
638                         struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
639          data_sz = num_to_keep
640        elif chunk.chunk_type == ImageChunk.TYPE_FILL:
641          truncate_at = (chunk.chunk_offset +
642                         struct.calcsize(ImageChunk.FORMAT) + 4)
643          data_sz = 4
644        else:
645          assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
646          truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
647          data_sz = 0
648        chunk_sz = num_to_keep/self.block_size
649        total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
650        self._image.seek(chunk.chunk_offset)
651        self._image.write(struct.pack(ImageChunk.FORMAT,
652                                      chunk.chunk_type,
653                                      0,  # Reserved
654                                      chunk_sz,
655                                      total_sz))
656        chunk.output_size = num_to_keep
657      else:
658        # Truncation at trunk boundary.
659        truncate_at = chunk.chunk_offset
660        chunk_idx_for_update = chunk_idx
661
662      self._num_total_chunks = chunk_idx_for_update
663      self._num_total_blocks = 0
664      for i in range(0, chunk_idx_for_update):
665        self._num_total_blocks += self._chunks[i].output_size / self.block_size
666      self._update_chunks_and_blocks()
667      self._image.truncate(truncate_at)
668
669      # We've modified the file so re-read all data.
670      self._read_header()
671    else:
672      # Truncating to grow - just add a DONT_CARE section.
673      self.append_dont_care(size - self.image_size)
674
675
676class GuidGenerator(object):
677  """An interface for obtaining strings that are GUIDs.
678
679  To facilitate unit testing, this abstraction is used instead of the
680  directly using the uuid module.
681  """
682
683  def dispense_guid(self, partition_number):
684    """Dispenses a GUID.
685
686    Arguments:
687      partition_number: The partition number or 0 if requesting a GUID
688                        for the whole disk.
689
690    Returns:
691      A RFC 4122 compliant GUID, as a string.
692    """
693    return str(uuid.uuid4())
694
695
696class Partition(object):
697  """Object representing a partition.
698
699  Attributes:
700    label: The partition label.
701    offset: Offset of the partition on the disk, or None.
702    size: Size of the partition or None if not specified.
703    grow: True if partition has been requested to use all remaining space.
704    guid: Instance GUID (RFC 4122 compliant) as a string or None or 'auto'
705          if it should be automatically generated.
706    type_guid: Type GUID (RFC 4122 compliant) as a string or a known type
707               from the |KNOWN_TYPE_GUIDS| map.
708    flags: GUID flags.
709    persist: If true, sets bit 0 of flags indicating that this partition should
710             not be deleted by the bootloader.
711    ab: If True, the partition is an A/B partition.
712    ab_expanded: If True, the A/B partitions have been generated.
713    ignore: If True, the partition should not be included in the final output.
714    position: The requested position of the partition or 0 if it doesn't matter.
715  """
716
717  def __init__(self):
718    """Initializer method."""
719    self.label = ''
720    self.offset = None
721    self.size = None
722    self.grow = False
723    self.guid = None
724    self.type_guid = None
725    self.flags = 0
726    self.persist = False
727    self.ab = False
728    self.ab_expanded = False
729    self.ignore = False
730    self.position = 0
731
732  def add_info(self, pobj):
733    """Add information to partition.
734
735    Arguments:
736      pobj: A JSON object with information about the partition.
737    """
738    self.label = pobj[JSON_KEYWORD_PARTITIONS_LABEL]
739    value = pobj.get(JSON_KEYWORD_PARTITIONS_OFFSET)
740    if value is not None:
741      self.offset = ParseSize(value)
742    value = pobj.get(JSON_KEYWORD_PARTITIONS_SIZE)
743    if value is not None:
744      self.size = ParseSize(value)
745    value = pobj.get(JSON_KEYWORD_PARTITIONS_GROW)
746    if value is not None:
747      self.grow = value
748    value = pobj.get(JSON_KEYWORD_PARTITIONS_AB)
749    if value is not None:
750      self.ab = value
751    value = pobj.get(JSON_KEYWORD_PARTITIONS_AB_EXPANDED)
752    if value is not None:
753      self.ab_expanded = value
754    value = pobj.get(JSON_KEYWORD_PARTITIONS_GUID)
755    if value is not None:
756      self.guid = value
757    value = pobj.get(JSON_KEYWORD_PARTITIONS_IGNORE)
758    if value is not None:
759      self.ignore = value
760    value = pobj.get(JSON_KEYWORD_PARTITIONS_TYPE_GUID)
761    if value is not None:
762      self.type_guid = str.lower(str(value))
763      if self.type_guid in KNOWN_TYPE_GUIDS:
764        self.type_guid = KNOWN_TYPE_GUIDS[self.type_guid]
765    value = pobj.get(JSON_KEYWORD_PARTITIONS_FLAGS)
766    if value is not None:
767      self.flags = ParseNumber(value)
768    value = pobj.get(JSON_KEYWORD_PARTITIONS_PERSIST)
769    if value is not None:
770      self.persist = value
771      if value:
772        self.flags = self.flags | 0x1
773    value = pobj.get(JSON_KEYWORD_PARTITIONS_POSITION)
774    if value is not None:
775      self.position = ParseNumber(value)
776
777  def expand_guid(self, guid_generator, partition_number):
778    """Assign instance GUID and type GUID if required.
779
780    Arguments:
781      guid_generator: A GuidGenerator object.
782      partition_number: The partition number, starting from 1.
783    """
784    if not self.guid or self.guid == JSON_KEYWORD_AUTO:
785      self.guid = guid_generator.dispense_guid(partition_number)
786    if not self.type_guid:
787      self.type_guid = KNOWN_TYPE_GUIDS['brillo_vendor_specific']
788
789  def validate(self):
790    """Sanity checks data in object."""
791
792    try:
793      _ = uuid.UUID(str(self.guid))
794    except ValueError:
795      raise ValueError('The string "{}" is not a valid GPT instance GUID on '
796                       'partition with label "{}".'.format(
797                           str(self.guid), self.label))
798
799    try:
800      _ = uuid.UUID(str(self.type_guid))
801    except ValueError:
802      raise ValueError('The string "{}" is not a valid GPT type GUID on '
803                       'partition with label "{}".'.format(
804                           str(self.type_guid), self.label))
805
806    if not self.size:
807      if not self.grow:
808        raise ValueError('Size can only be unset if "grow" is True.')
809
810  def cmp(self, other):
811    """Comparison method."""
812    self_position = self.position
813    if self_position == 0:
814      self_position = GPT_MAX_PART_NUM
815    other_position = other.position
816    if other_position == 0:
817      other_position = GPT_MAX_PART_NUM
818    return cmp(self_position, other_position)
819
820
821class Settings(object):
822  """An object for holding settings.
823
824  Attributes:
825    ab_suffixes: A list of A/B suffixes to use.
826    disk_size: An integer with the disk size in bytes.
827    partitions_offset_begin: An integer with the disk partitions
828                             offset begin size in bytes.
829    disk_alignment: The alignment to use for partitions.
830    disk_guid: The GUID to use for the disk or None or 'auto' if
831               automatically generated.
832  """
833
834  def __init__(self):
835    """Initializer with defaults."""
836    self.ab_suffixes = ['_a', '_b']
837    self.disk_size = None
838    self.partitions_offset_begin = 0
839    self.disk_alignment = 4096
840    self.disk_guid = JSON_KEYWORD_AUTO
841
842
843class BptError(Exception):
844  """Application-specific errors.
845
846  These errors represent issues for which a stack-trace should not be
847  presented.
848
849  Attributes:
850    message: Error message.
851  """
852
853  def __init__(self, message):
854    Exception.__init__(self, message)
855
856
857class BptParsingError(BptError):
858  """Represents an error with an input file.
859
860  Attributes:
861    message: Error message.
862    filename: Name of the file that caused an error.
863  """
864
865  def __init__(self, filename, message):
866    self.filename = filename
867    BptError.__init__(self, message)
868
869
870class Bpt(object):
871  """Business logic for bpttool command-line tool."""
872
873  def _read_json(self, input_files, ab_collapse=True):
874    """Parses a stack of JSON files into suitable data structures.
875
876    The order of files matters as later files can modify partitions
877    declared in earlier files.
878
879    Arguments:
880      input_files: An ordered list of open files.
881      ab_collapse: If True, collapse A/B partitions.
882
883    Returns:
884      A tuple where the first element is a list of Partition objects
885      and the second element is a Settings object.
886
887    Raises:
888      BptParsingError: If an input file has an error.
889    """
890    partitions = []
891    settings = Settings()
892
893    # Read all input file and merge partitions and settings.
894    for f in input_files:
895      try:
896        obj = json.loads(f.read())
897      except ValueError as e:
898        # Unfortunately we can't easily get the line number where the
899        # error occurred.
900        raise BptParsingError(f.name, e.message)
901
902      sobj = obj.get(JSON_KEYWORD_SETTINGS)
903      if sobj:
904        ab_suffixes = sobj.get(JSON_KEYWORD_SETTINGS_AB_SUFFIXES)
905        if ab_suffixes:
906          settings.ab_suffixes = ab_suffixes
907        disk_size = sobj.get(JSON_KEYWORD_SETTINGS_DISK_SIZE)
908        if disk_size:
909          settings.disk_size = ParseSize(disk_size)
910        partitions_offset_begin = sobj.get(
911                JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN)
912        if partitions_offset_begin:
913          settings.partitions_offset_begin = ParseSize(partitions_offset_begin)
914        disk_alignment = sobj.get(JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT)
915        if disk_alignment:
916          settings.disk_alignment = ParseSize(disk_alignment)
917        disk_guid = sobj.get(JSON_KEYWORD_SETTINGS_DISK_GUID)
918        if disk_guid:
919          settings.disk_guid = disk_guid
920
921      pobjs = obj.get(JSON_KEYWORD_PARTITIONS)
922      if pobjs:
923        for pobj in pobjs:
924          if ab_collapse and pobj.get(JSON_KEYWORD_PARTITIONS_AB_EXPANDED):
925            # If we encounter an expanded partition, unexpand it. This
926            # is to make it possible to use output-JSON (from this tool)
927            # and stack it with an input-JSON file that e.g. specifies
928            # size='256 GiB' for the 'system' partition.
929            label = pobj[JSON_KEYWORD_PARTITIONS_LABEL]
930            if label.endswith(settings.ab_suffixes[0]):
931              # Modify first A/B copy so it doesn't have the trailing suffix.
932              new_len = len(label) - len(settings.ab_suffixes[0])
933              pobj[JSON_KEYWORD_PARTITIONS_LABEL] = label[0:new_len]
934              pobj[JSON_KEYWORD_PARTITIONS_AB_EXPANDED] = False
935              pobj[JSON_KEYWORD_PARTITIONS_GUID] = JSON_KEYWORD_AUTO
936            else:
937              # Skip other A/B copies.
938              continue
939          # Find or create a partition.
940          p = None
941          for candidate in partitions:
942            if candidate.label == pobj[JSON_KEYWORD_PARTITIONS_LABEL]:
943              p = candidate
944              break
945          if not p:
946            p = Partition()
947            partitions.append(p)
948          p.add_info(pobj)
949
950    return partitions, settings
951
952  def _generate_json(self, partitions, settings):
953    """Generate a string with JSON representing partitions and settings.
954
955    Arguments:
956      partitions: A list of Partition objects.
957      settings: A Settings object.
958
959    Returns:
960      A JSON string.
961    """
962    suffixes_str = '['
963    for n in range(0, len(settings.ab_suffixes)):
964      if n != 0:
965        suffixes_str += ', '
966      suffixes_str += '"{}"'.format(settings.ab_suffixes[n])
967    suffixes_str += ']'
968
969    ret = ('{{\n'
970           '  "' + JSON_KEYWORD_SETTINGS + '": {{\n'
971           '    "' + JSON_KEYWORD_SETTINGS_AB_SUFFIXES + '": {},\n'
972           '    "' + JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN + '": {},\n'
973           '    "' + JSON_KEYWORD_SETTINGS_DISK_SIZE + '": {},\n'
974           '    "' + JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT + '": {},\n'
975           '    "' + JSON_KEYWORD_SETTINGS_DISK_GUID + '": "{}"\n'
976           '  }},\n'
977           '  "' + JSON_KEYWORD_PARTITIONS + '": [\n').format(
978               suffixes_str,
979               settings.partitions_offset_begin,
980               settings.disk_size,
981               settings.disk_alignment,
982               settings.disk_guid)
983
984    for n in range(0, len(partitions)):
985      p = partitions[n]
986      ret += ('    {{\n'
987              '      "' + JSON_KEYWORD_PARTITIONS_LABEL + '": "{}",\n'
988              '      "' + JSON_KEYWORD_PARTITIONS_OFFSET + '": {},\n'
989              '      "' + JSON_KEYWORD_PARTITIONS_SIZE + '": {},\n'
990              '      "' + JSON_KEYWORD_PARTITIONS_GROW + '": {},\n'
991              '      "' + JSON_KEYWORD_PARTITIONS_GUID + '": "{}",\n'
992              '      "' + JSON_KEYWORD_PARTITIONS_TYPE_GUID + '": "{}",\n'
993              '      "' + JSON_KEYWORD_PARTITIONS_FLAGS + '": "{:#018x}",\n'
994              '      "' + JSON_KEYWORD_PARTITIONS_PERSIST + '": {},\n'
995              '      "' + JSON_KEYWORD_PARTITIONS_IGNORE + '": {},\n'
996              '      "' + JSON_KEYWORD_PARTITIONS_AB + '": {},\n'
997              '      "' + JSON_KEYWORD_PARTITIONS_AB_EXPANDED + '": {},\n'
998              '      "' + JSON_KEYWORD_PARTITIONS_POSITION + '": {}\n'
999              '    }}{}\n').format(p.label,
1000                                   p.offset,
1001                                   p.size,
1002                                   'true' if p.grow else 'false',
1003                                   p.guid,
1004                                   p.type_guid,
1005                                   p.flags,
1006                                   'true' if p.persist else 'false',
1007                                   'true' if p.ignore else 'false',
1008                                   'true' if p.ab else 'false',
1009                                   'true' if p.ab_expanded else 'false',
1010                                   p.position,
1011                                   '' if n == len(partitions) - 1 else ',')
1012    ret += ('  ]\n'
1013            '}\n')
1014    return ret
1015
1016  def _lba_to_chs(self, lba):
1017    """Converts LBA to CHS.
1018
1019    Arguments:
1020      lba: The sector number to convert.
1021
1022    Returns:
1023      An array containing the CHS encoded the way it's expected in a
1024      MBR partition table.
1025    """
1026    # See https://en.wikipedia.org/wiki/Cylinder-head-sector
1027    num_heads = 255
1028    num_sectors = 63
1029    # If LBA isn't going to fit in CHS, return maximum CHS values.
1030    max_lba = 255*num_heads*num_sectors
1031    if lba > max_lba:
1032      return [255, 255, 255]
1033    c = lba / (num_heads*num_sectors)
1034    h = (lba / num_sectors) % num_heads
1035    s = lba % num_sectors
1036    return [h, (((c>>8) & 0x03)<<6) | (s & 0x3f), c & 0xff]
1037
1038  def _generate_protective_mbr(self, settings):
1039    """Generate Protective MBR.
1040
1041    Arguments:
1042      settings: A Settings object.
1043
1044    Returns:
1045      A string with the binary protective MBR (512 bytes).
1046    """
1047    # See https://en.wikipedia.org/wiki/Master_boot_record for MBR layout.
1048    #
1049    # The first partition starts at offset 446 (0x1be).
1050    lba_start = 1
1051    lba_end = settings.disk_size/DISK_SECTOR_SIZE - 1
1052    start_chs = self._lba_to_chs(lba_start)
1053    end_chs = self._lba_to_chs(lba_end)
1054    pmbr = struct.pack('<446s'     # Bootloader code
1055                       'B'         # Status.
1056                       'BBB'       # CHS start.
1057                       'B'         # Partition type.
1058                       'BBB'       # CHS end.
1059                       'I'         # LBA of partition start.
1060                       'I'         # Number of sectors in partition.
1061                       '48x'       # Padding to get to offset 510 (0x1fe).
1062                       'BB',       # Boot signature.
1063                       '\xfa\xeb\xfe', # cli ; jmp $ (x86)
1064                       0x00,
1065                       start_chs[0], start_chs[1], start_chs[2],
1066                       0xee,       # MBR Partition Type: GPT protective MBR.
1067                       end_chs[0], end_chs[1], end_chs[2],
1068                       1,          # LBA start
1069                       lba_end,
1070                       0x55, 0xaa)
1071    return pmbr
1072
1073  def _generate_gpt(self, partitions, settings, primary=True):
1074    """Generate GUID Partition Table.
1075
1076    Arguments:
1077      partitions: A list of Partition objects.
1078      settings: A Settings object.
1079      primary: True to generate primary GPT, False to generate secondary.
1080
1081    Returns:
1082      A string with the binary GUID Partition Table (33*512 bytes).
1083    """
1084    # See https://en.wikipedia.org/wiki/Master_boot_record for MBR layout.
1085    #
1086    # The first partition starts at offset 446 (0x1be).
1087
1088    disk_num_lbas = settings.disk_size/DISK_SECTOR_SIZE
1089    if primary:
1090      current_lba = 1
1091      other_lba = disk_num_lbas - 1
1092      partitions_lba = 2
1093    else:
1094      current_lba = disk_num_lbas - 1
1095      other_lba = 1
1096      partitions_lba = disk_num_lbas - GPT_NUM_LBAS
1097    first_usable_lba = GPT_NUM_LBAS + 1
1098    last_usable_lba = disk_num_lbas - GPT_NUM_LBAS - 1
1099
1100    part_array = []
1101    for p in partitions:
1102      part_array.append(struct.pack(
1103          '<16s'    # Partition type GUID.
1104          '16s'     # Partition instance GUID.
1105          'QQ'      # First and last LBA.
1106          'Q'       # Flags.
1107          '72s',    # Name (36 UTF-16LE code units).
1108          uuid.UUID(p.type_guid).get_bytes_le(),
1109          uuid.UUID(p.guid).get_bytes_le(),
1110          p.offset/DISK_SECTOR_SIZE,
1111          (p.offset + p.size)/DISK_SECTOR_SIZE - 1,
1112          p.flags,
1113          p.label.encode(encoding='utf-16le')))
1114
1115    part_array.append(((128 - len(partitions))*128) * '\0')
1116    part_array_str = ''.join(part_array)
1117
1118    partitions_crc32 = zlib.crc32(part_array_str) % (1<<32)
1119
1120    header_crc32 = 0
1121    while True:
1122      header = struct.pack(
1123          '<8s'    # Signature.
1124          '4B'     # Version.
1125          'I'      # Header size.
1126          'I'      # CRC32 (must be zero during calculation).
1127          'I'      # Reserved (must be zero).
1128          'QQ'     # Current and Other LBA.
1129          'QQ'     # First and last usable LBA.
1130          '16s'    # Disk GUID.
1131          'Q'      # Starting LBA of array of partitions.
1132          'I'      # Number of partitions.
1133          'I'      # Partition entry size, in bytes.
1134          'I'      # CRC32 of partition array
1135          '420x',  # Padding to get to 512 bytes.
1136          'EFI PART',
1137          0x00, 0x00, 0x01, 0x00,
1138          92,
1139          header_crc32,
1140          0x00000000,
1141          current_lba, other_lba,
1142          first_usable_lba, last_usable_lba,
1143          uuid.UUID(settings.disk_guid).get_bytes_le(),
1144          partitions_lba,
1145          128,
1146          128,
1147          partitions_crc32)
1148      if header_crc32 != 0:
1149        break
1150      header_crc32 = zlib.crc32(header[0:92]) % (1<<32)
1151
1152    if primary:
1153      return header + part_array_str
1154    else:
1155      return part_array_str + header
1156
1157  def _generate_gpt_bin(self, partitions, settings):
1158    """Generate a bytearray representing partitions and settings.
1159
1160    The blob will have three partition tables, laid out one after
1161    another: 1) Protective MBR (512 bytes); 2) Primary GPT (33*512
1162    bytes); and 3) Secondary GPT (33*512 bytes).
1163
1164    The total size will be 34,304 bytes.
1165
1166    Arguments:
1167      partitions: A list of Partition objects.
1168      settings: A Settings object.
1169
1170    Returns:
1171      A bytearray() object.
1172    """
1173    protective_mbr = self._generate_protective_mbr(settings)
1174    primary_gpt = self._generate_gpt(partitions, settings)
1175    secondary_gpt = self._generate_gpt(partitions, settings, primary=False)
1176    ret = protective_mbr + primary_gpt + secondary_gpt
1177    return ret
1178
1179  def _validate_disk_partitions(self, partitions, disk_size):
1180    """Check that a list of partitions have assigned offsets and fits on a
1181       disk of a given size.
1182
1183    This function checks partition offsets and sizes to see if they may fit on
1184    a disk image.
1185
1186    Arguments:
1187      partitions: A list of Partition objects.
1188      settings: Integer size of disk image.
1189
1190    Raises:
1191      BptError: If checked condition is not satisfied.
1192    """
1193    for p in partitions:
1194      if not p.offset or p.offset < (GPT_NUM_LBAS + 1)*DISK_SECTOR_SIZE:
1195        raise BptError('Partition with label "{}" has no offset.'
1196                       .format(p.label))
1197      if not p.size or p.size < 0:
1198        raise BptError('Partition with label "{}" has no size.'
1199                        .format(p.label))
1200      if (p.offset + p.size) > (disk_size - GPT_NUM_LBAS*DISK_SECTOR_SIZE):
1201        raise BptError('Partition with label "{}" exceeds the disk '
1202                       'image size.'.format(p.label))
1203
1204  def make_table(self,
1205                 inputs,
1206                 ab_suffixes=None,
1207                 partitions_offset_begin=None,
1208                 disk_size=None,
1209                 disk_alignment=None,
1210                 disk_guid=None,
1211                 guid_generator=None):
1212    """Implementation of the 'make_table' command.
1213
1214    This function takes a list of input partition definition files,
1215    flattens them, expands A/B partitions, grows partitions, and lays
1216    out partitions according to alignment constraints.
1217
1218    Arguments:
1219      inputs: List of JSON files to parse.
1220      ab_suffixes: List of the A/B suffixes (as a comma-separated string)
1221                   to use or None to not override.
1222      partitions_offset_begin: Size of disk partitions offset
1223                               begin or None to not override.
1224      disk_size: Size of disk or None to not override.
1225      disk_alignment: Disk alignment or None to not override.
1226      disk_guid: Disk GUID as a string or None to not override.
1227      guid_generator: A GuidGenerator or None to use the default.
1228
1229    Returns:
1230      A tuple where the first argument is a JSON string for the resulting
1231      partitions and the second argument is the binary partition tables.
1232
1233    Raises:
1234      BptParsingError: If an input file has an error.
1235      BptError: If another application-specific error occurs
1236    """
1237    partitions, settings = self._read_json(inputs)
1238
1239    # Command-line arguments override anything specified in input
1240    # files.
1241    if disk_size:
1242      settings.disk_size = int(math.ceil(disk_size))
1243    if disk_alignment:
1244      settings.disk_alignment = int(disk_alignment)
1245    if partitions_offset_begin:
1246      settings.partitions_offset_begin = int(partitions_offset_begin)
1247    if ab_suffixes:
1248      settings.ab_suffixes = ab_suffixes.split(',')
1249    if disk_guid:
1250      settings.disk_guid = disk_guid
1251
1252    if not guid_generator:
1253      guid_generator = GuidGenerator()
1254
1255    # We need to know the disk size. Also round it down to ensure it's
1256    # a multiple of the sector size.
1257    if not settings.disk_size:
1258      raise BptError('Disk size not specified. Use --disk_size option '
1259                     'or specify it in an input file.\n')
1260    settings.disk_size = RoundToMultiple(settings.disk_size,
1261                                         DISK_SECTOR_SIZE,
1262                                         round_down=True)
1263
1264    # Alignment must be divisible by disk sector size.
1265    if settings.disk_alignment % DISK_SECTOR_SIZE != 0:
1266      raise BptError(
1267          'Disk alignment size of {} is not divisible by {}.\n'.format(
1268              settings.disk_alignment, DISK_SECTOR_SIZE))
1269
1270    if settings.partitions_offset_begin != 0:
1271      # Disk partitions offset begin size must be
1272      # divisible by disk sector size.
1273      if settings.partitions_offset_begin % settings.disk_alignment != 0:
1274        raise BptError(
1275            'Disk Partitions offset begin size of {} '
1276            'is not divisible by {}.\n'.format(
1277                settings.partitions_offset_begin, settings.disk_alignment))
1278      settings.partitions_offset_begin = max(settings.partitions_offset_begin,
1279                                           DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
1280      settings.partitions_offset_begin = RoundToMultiple(
1281          settings.partitions_offset_begin, settings.disk_alignment)
1282
1283    # Expand A/B partitions and skip ignored partitions.
1284    expanded_partitions = []
1285    for p in partitions:
1286      if p.ignore:
1287        continue
1288      if p.ab and not p.ab_expanded:
1289        p.ab_expanded = True
1290        for suffix in settings.ab_suffixes:
1291          new_p = copy.deepcopy(p)
1292          new_p.label += suffix
1293          expanded_partitions.append(new_p)
1294      else:
1295        expanded_partitions.append(p)
1296    partitions = expanded_partitions
1297
1298    # Expand Disk GUID if needed.
1299    if not settings.disk_guid or settings.disk_guid == JSON_KEYWORD_AUTO:
1300      settings.disk_guid = guid_generator.dispense_guid(0)
1301
1302    # Sort according to 'position' attribute.
1303    partitions = sorted(partitions, cmp=lambda x, y: x.cmp(y))
1304
1305    # Automatically generate GUIDs if the GUID is unset or set to
1306    # 'auto'. Also validate the rest of the fields.
1307    part_no = 1
1308    for p in partitions:
1309      p.expand_guid(guid_generator, part_no)
1310      p.validate()
1311      part_no += 1
1312
1313    # Idenfify partition to grow and lay out partitions, ignoring the
1314    # one to grow. This way we can figure out how much space is left.
1315    #
1316    # Right now we only support a single 'grow' partition but we could
1317    # support more in the future by splitting up the available bytes
1318    # between them.
1319    grow_part = None
1320    # offset minimal size: DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS)
1321    offset = max(settings.partitions_offset_begin,
1322                 DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
1323    for p in partitions:
1324      if p.grow:
1325        if grow_part:
1326          raise BptError('Only a single partition can be automatically '
1327                         'grown.\n')
1328        grow_part = p
1329      else:
1330        # Ensure size is a multiple of DISK_SECTOR_SIZE by rounding up
1331        # (user may specify it as e.g. "1.5 GB" which is not divisible
1332        # by 512).
1333        p.size = RoundToMultiple(p.size, DISK_SECTOR_SIZE)
1334        # Align offset to disk alignment.
1335        offset = RoundToMultiple(offset, settings.disk_alignment)
1336        offset += p.size
1337
1338    # After laying out (respecting alignment) all non-grow
1339    # partitions, check that the given disk size is big enough.
1340    if offset > settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS:
1341      raise BptError('Disk size of {} bytes is too small for partitions '
1342                     'totaling {} bytes.\n'.format(
1343                         settings.disk_size, offset))
1344
1345    # If we have a grow partition, it'll starts at the next
1346    # available alignment offset and we can calculate its size as
1347    # follows.
1348    if grow_part:
1349      offset = RoundToMultiple(offset, settings.disk_alignment)
1350      grow_part.size = RoundToMultiple(
1351          settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS - offset,
1352          settings.disk_alignment,
1353          round_down=True)
1354      if grow_part.size < DISK_SECTOR_SIZE:
1355        raise BptError('Not enough space for partition "{}" to be '
1356                       'automatically grown.\n'.format(grow_part.label))
1357
1358    # Now we can assign partition start offsets for all partitions,
1359    # including the grow partition.
1360    # offset minimal size: DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS)
1361    offset = max(settings.partitions_offset_begin,
1362                 DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
1363    for p in partitions:
1364      # Align offset.
1365      offset = RoundToMultiple(offset, settings.disk_alignment)
1366      p.offset = offset
1367      offset += p.size
1368    assert offset <= settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS
1369
1370    json_str = self._generate_json(partitions, settings)
1371
1372    gpt_bin = self._generate_gpt_bin(partitions, settings)
1373
1374    return json_str, gpt_bin
1375
1376  def make_disk_image(self, output, bpt, images, allow_empty_partitions=False):
1377    """Implementation of the 'make_disk_image' command.
1378
1379    This function takes in a list of partitions images and a bpt file
1380    for the purpose of creating a raw disk image with a protective MBR,
1381    primary and secondary GPT, and content for each partition as specified.
1382
1383    Arguments:
1384      output: Output file where disk image is to be written to.
1385      bpt: BPT JSON file to parse.
1386      images: List of partition image paths to be combined (as specified by
1387              bpt).  Each element is of the form.
1388              'PARTITION_NAME:/PATH/TO/PARTITION_IMAGE'
1389      allow_empty_partitions: If True, partitions defined in |bpt| need not to
1390                              be present in |images|. Otherwise an exception is
1391                              thrown if a partition is referenced in |bpt| but
1392                              not in |images|.
1393
1394    Raises:
1395      BptParsingError: If an image file has an error.
1396      BptError: If another application-specific error occurs.
1397    """
1398    # Generate partition list and settings.
1399    partitions, settings = self._read_json([bpt], ab_collapse=False)
1400
1401    # Validated partition sizes and offsets.
1402    self._validate_disk_partitions(partitions, settings.disk_size)
1403
1404    # Sort according to 'offset' attribute.
1405    partitions = sorted(partitions, cmp=lambda x, y: cmp(x.offset, y.offset))
1406
1407    # Create necessary tables.
1408    protective_mbr = self._generate_protective_mbr(settings)
1409    primary_gpt = self._generate_gpt(partitions, settings)
1410    secondary_gpt = self._generate_gpt(partitions, settings, primary=False)
1411
1412    # Start at 0 offset for mbr and primary gpt.
1413    output.seek(0)
1414    output.write(protective_mbr)
1415    output.write(primary_gpt)
1416
1417    # Create mapping of partition name to partition image file.
1418    image_file_names = {}
1419    try:
1420      for name_path in images:
1421        name, path = name_path.split(":")
1422        image_file_names[name] = path
1423    except ValueError as e:
1424      raise BptParsingError(name_path, 'Bad image argument {}.'.format(
1425                            images[i]))
1426
1427    # Read image and insert in correct offset.
1428    for p in partitions:
1429      if p.label not in image_file_names:
1430        if allow_empty_partitions:
1431          continue
1432        else:
1433          raise BptParsingError(bpt.name, 'No content specified for partition'
1434                                ' with label {}'.format(p.label))
1435
1436      input_image = ImageHandler(image_file_names[p.label])
1437      output.seek(p.offset)
1438      partition_blob = input_image.read(p.size)
1439      output.write(partition_blob)
1440
1441    # Put secondary GPT and end of disk.
1442    output.seek(settings.disk_size - len(secondary_gpt))
1443    output.write(secondary_gpt)
1444
1445  def query_partition(self, input_file, part_label, query_type, ab_collapse):
1446    """Implementation of the 'query_partition' command.
1447
1448    This reads the partition definition file given by |input_file| and
1449    returns information of type |query_type| for the partition with
1450    label |part_label|.
1451
1452    Arguments:
1453      input_file: A JSON file to parse.
1454      part_label: Label of partition to query information about.
1455      query_type: The information to query, see |QUERY_PARTITION_TYPES|.
1456      ab_collapse: If True, collapse A/B partitions.
1457
1458    Returns:
1459      The requested information as a string or None if there is no
1460      partition with the given label.
1461
1462    Raises:
1463      BptParsingError: If an input file has an error.
1464      BptError: If another application-specific error occurs
1465    """
1466
1467    partitions, _ = self._read_json([input_file], ab_collapse)
1468
1469    part = None
1470    for p in partitions:
1471      if p.label == part_label:
1472        part = p
1473        break
1474
1475    if not part:
1476      return None
1477
1478    value = part.__dict__.get(query_type)
1479    # Print out flags as a hex-value.
1480    if query_type == 'flags':
1481      return '{:#018x}'.format(value)
1482    return str(value)
1483
1484
1485class BptTool(object):
1486  """Object for bpttool command-line tool."""
1487
1488  def __init__(self):
1489    """Initializer method."""
1490    self.bpt = Bpt()
1491
1492  def run(self, argv):
1493    """Command-line processor.
1494
1495    Arguments:
1496      argv: Pass sys.argv from main.
1497    """
1498    parser = argparse.ArgumentParser()
1499    subparsers = parser.add_subparsers(title='subcommands')
1500
1501    sub_parser = subparsers.add_parser(
1502        'version',
1503        help='Prints version of bpttool.')
1504    sub_parser.set_defaults(func=self.version)
1505
1506    sub_parser = subparsers.add_parser(
1507        'make_table',
1508        help='Lays out partitions and creates partition table.')
1509    sub_parser.add_argument('--input',
1510                            help='Path to partition definition file.',
1511                            type=argparse.FileType('r'),
1512                            action='append')
1513    sub_parser.add_argument('--ab_suffixes',
1514                            help='Set or override A/B suffixes.')
1515    sub_parser.add_argument('--partitions_offset_begin',
1516                            help='Set or override disk partitions '
1517                                 'offset begin size.',
1518                            type=ParseSize)
1519    sub_parser.add_argument('--disk_size',
1520                            help='Set or override disk size.',
1521                            type=ParseSize)
1522    sub_parser.add_argument('--disk_alignment',
1523                            help='Set or override disk alignment.',
1524                            type=ParseSize)
1525    sub_parser.add_argument('--disk_guid',
1526                            help='Set or override disk GUID.',
1527                            type=ParseGuid)
1528    sub_parser.add_argument('--output_json',
1529                            help='JSON output file name.',
1530                            type=argparse.FileType('w'))
1531    sub_parser.add_argument('--output_gpt',
1532                            help='Output file name for MBR/GPT/GPT file.',
1533                            type=argparse.FileType('w'))
1534    sub_parser.set_defaults(func=self.make_table)
1535
1536    sub_parser = subparsers.add_parser(
1537        'make_disk_image',
1538        help='Creates disk image for loaded with partitions.')
1539    sub_parser.add_argument('--output',
1540                            help='Path to image output.',
1541                            type=argparse.FileType('w'),
1542                            required=True)
1543    sub_parser.add_argument('--input',
1544                            help='Path to bpt file input.',
1545                            type=argparse.FileType('r'),
1546                            required=True)
1547    sub_parser.add_argument('--image',
1548                            help='Partition name and path to image file.',
1549                            metavar='PARTITION_NAME:PATH',
1550                            action='append')
1551    sub_parser.add_argument('--allow_empty_partitions',
1552                            help='Allow skipping partitions in bpt file.',
1553                            action='store_true')
1554    sub_parser.set_defaults(func=self.make_disk_image)
1555
1556    sub_parser = subparsers.add_parser(
1557        'query_partition',
1558        help='Looks up informtion about a partition.')
1559    sub_parser.add_argument('--input',
1560                            help='Path to partition definition file.',
1561                            type=argparse.FileType('r'),
1562                            required=True)
1563    sub_parser.add_argument('--label',
1564                            help='Label of partition to look up.',
1565                            required=True)
1566    sub_parser.add_argument('--ab_collapse',
1567                            help='Collapse A/B partitions.',
1568                            action='store_true')
1569    sub_parser.add_argument('--type',
1570                            help='Type of information to look up.',
1571                            choices=QUERY_PARTITION_TYPES,
1572                            required=True)
1573    sub_parser.set_defaults(func=self.query_partition)
1574
1575    args = parser.parse_args(argv[1:])
1576    args.func(args)
1577
1578  def version(self, _):
1579    """Implements the 'version' sub-command."""
1580    print '{}.{}'.format(BPT_VERSION_MAJOR, BPT_VERSION_MINOR)
1581
1582  def query_partition(self, args):
1583    """Implements the 'query_partition' sub-command."""
1584    try:
1585      result = self.bpt.query_partition(args.input,
1586                                        args.label,
1587                                        args.type,
1588                                        args.ab_collapse)
1589    except BptParsingError as e:
1590      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
1591      sys.exit(1)
1592    except BptError as e:
1593      sys.stderr.write('{}\n'.format(e.message))
1594      sys.exit(1)
1595
1596    if not result:
1597      sys.stderr.write('No partition with label "{}".\n'.format(args.label))
1598      sys.exit(1)
1599
1600    print result
1601
1602  def make_table(self, args):
1603    """Implements the 'make_table' sub-command."""
1604    if not args.input:
1605      sys.stderr.write('Option --input is required one or more times.\n')
1606      sys.exit(1)
1607
1608    try:
1609      (json_str, gpt_bin) = self.bpt.make_table(args.input, args.ab_suffixes,
1610                                                args.partitions_offset_begin,
1611                                                args.disk_size,
1612                                                args.disk_alignment,
1613                                                args.disk_guid)
1614    except BptParsingError as e:
1615      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
1616      sys.exit(1)
1617    except BptError as e:
1618      sys.stderr.write('{}\n'.format(e.message))
1619      sys.exit(1)
1620
1621    if args.output_json:
1622      args.output_json.write(json_str)
1623    if args.output_gpt:
1624      args.output_gpt.write(gpt_bin)
1625
1626  def make_disk_image(self, args):
1627    """Implements the 'make_disk_image' sub-command."""
1628    if not args.input:
1629      sys.stderr.write('Option --input is required.\n')
1630      sys.exit(1)
1631    if not args.output:
1632      sys.stderr.write('Option --ouptut is required.\n')
1633      sys.exit(1)
1634
1635    try:
1636      self.bpt.make_disk_image(args.output,
1637                               args.input,
1638                               args.image,
1639                               args.allow_empty_partitions)
1640    except BptParsingError as e:
1641      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
1642      sys.exit(1)
1643    except 'BptError' as e:
1644      sys.stderr.write('{}\n'.format(e.message))
1645      sys.exit(1)
1646
1647if __name__ == '__main__':
1648  tool = BptTool()
1649  tool.run(sys.argv)
1650