• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2#
3# Copyright (C) 2014 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17from __future__ import print_function
18
19import argparse
20import bisect
21import logging
22import os
23import struct
24import threading
25from hashlib import sha1
26
27import rangelib
28
29logger = logging.getLogger(__name__)
30
31
32class SparseImage(object):
33  """Wraps a sparse image file into an image object.
34
35  Wraps a sparse image file (and optional file map and clobbered_blocks) into
36  an image object suitable for passing to BlockImageDiff. file_map contains
37  the mapping between files and their blocks. clobbered_blocks contains the set
38  of blocks that should be always written to the target regardless of the old
39  contents (i.e. copying instead of patching). clobbered_blocks should be in
40  the form of a string like "0" or "0 1-5 8".
41  """
42
43  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
44               mode="rb", build_map=True, allow_shared_blocks=False,
45               hashtree_info_generator=None):
46    self.simg_f = f = open(simg_fn, mode)
47
48    header_bin = f.read(28)
49    header = struct.unpack("<I4H4I", header_bin)
50
51    magic = header[0]
52    major_version = header[1]
53    minor_version = header[2]
54    file_hdr_sz = header[3]
55    chunk_hdr_sz = header[4]
56    self.blocksize = blk_sz = header[5]
57    self.total_blocks = total_blks = header[6]
58    self.total_chunks = total_chunks = header[7]
59
60    if magic != 0xED26FF3A:
61      raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
62    if major_version != 1 or minor_version != 0:
63      raise ValueError("I know about version 1.0, but this is version %u.%u" %
64                       (major_version, minor_version))
65    if file_hdr_sz != 28:
66      raise ValueError("File header size was expected to be 28, but is %u." %
67                       (file_hdr_sz,))
68    if chunk_hdr_sz != 12:
69      raise ValueError("Chunk header size was expected to be 12, but is %u." %
70                       (chunk_hdr_sz,))
71
72    logger.info(
73        "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
74        blk_sz, total_chunks)
75
76    if not build_map:
77      assert not hashtree_info_generator, \
78        "Cannot generate the hashtree info without building the offset map."
79      return
80
81    pos = 0   # in blocks
82    care_data = []
83    self.offset_map = offset_map = []
84    self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
85
86    for i in range(total_chunks):
87      header_bin = f.read(12)
88      header = struct.unpack("<2H2I", header_bin)
89      chunk_type = header[0]
90      chunk_sz = header[2]
91      total_sz = header[3]
92      data_sz = total_sz - 12
93
94      if chunk_type == 0xCAC1:
95        if data_sz != (chunk_sz * blk_sz):
96          raise ValueError(
97              "Raw chunk input size (%u) does not match output size (%u)" %
98              (data_sz, chunk_sz * blk_sz))
99        else:
100          care_data.append(pos)
101          care_data.append(pos + chunk_sz)
102          offset_map.append((pos, chunk_sz, f.tell(), None))
103          pos += chunk_sz
104          f.seek(data_sz, os.SEEK_CUR)
105
106      elif chunk_type == 0xCAC2:
107        fill_data = f.read(4)
108        care_data.append(pos)
109        care_data.append(pos + chunk_sz)
110        offset_map.append((pos, chunk_sz, None, fill_data))
111        pos += chunk_sz
112
113      elif chunk_type == 0xCAC3:
114        if data_sz != 0:
115          raise ValueError("Don't care chunk input size is non-zero (%u)" %
116                           (data_sz))
117        # Fills the don't care data ranges with zeros.
118        # TODO(xunchang) pass the care_map to hashtree info generator.
119        if hashtree_info_generator:
120          fill_data = '\x00' * 4
121          # In order to compute verity hashtree on device, we need to write
122          # zeros explicitly to the don't care ranges. Because these ranges may
123          # contain non-zero data from the previous build.
124          care_data.append(pos)
125          care_data.append(pos + chunk_sz)
126          offset_map.append((pos, chunk_sz, None, fill_data))
127
128        pos += chunk_sz
129
130      elif chunk_type == 0xCAC4:
131        raise ValueError("CRC32 chunks are not supported")
132
133      else:
134        raise ValueError("Unknown chunk type 0x%04X not supported" %
135                         (chunk_type,))
136
137    self.generator_lock = threading.Lock()
138
139    self.care_map = rangelib.RangeSet(care_data)
140    self.offset_index = [i[0] for i in offset_map]
141
142    # Bug: 20881595
143    # Introduce extended blocks as a workaround for the bug. dm-verity may
144    # touch blocks that are not in the care_map due to block device
145    # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
146    # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
147    # are the maximum read-ahead we configure for dm-verity block devices.
148    extended = self.care_map.extend(512)
149    all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
150    extended = extended.intersect(all_blocks).subtract(self.care_map)
151    self.extended = extended
152
153    self.hashtree_info = None
154    if hashtree_info_generator:
155      self.hashtree_info = hashtree_info_generator.Generate(self)
156
157    if file_map_fn:
158      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
159                            allow_shared_blocks)
160    else:
161      self.file_map = {"__DATA": self.care_map}
162
163  def AppendFillChunk(self, data, blocks):
164    f = self.simg_f
165
166    # Append a fill chunk
167    f.seek(0, os.SEEK_END)
168    f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
169
170    # Update the sparse header
171    self.total_blocks += blocks
172    self.total_chunks += 1
173
174    f.seek(16, os.SEEK_SET)
175    f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
176
177  def RangeSha1(self, ranges):
178    h = sha1()
179    for data in self._GetRangeData(ranges):
180      h.update(data)
181    return h.hexdigest()
182
183  def ReadRangeSet(self, ranges):
184    return [d for d in self._GetRangeData(ranges)]
185
186  def TotalSha1(self, include_clobbered_blocks=False):
187    """Return the SHA-1 hash of all data in the 'care' regions.
188
189    If include_clobbered_blocks is True, it returns the hash including the
190    clobbered_blocks."""
191    ranges = self.care_map
192    if not include_clobbered_blocks:
193      ranges = ranges.subtract(self.clobbered_blocks)
194    return self.RangeSha1(ranges)
195
196  def WriteRangeDataToFd(self, ranges, fd):
197    for data in self._GetRangeData(ranges):
198      fd.write(data)
199
200  def _GetRangeData(self, ranges):
201    """Generator that produces all the image data in 'ranges'.  The
202    number of individual pieces returned is arbitrary (and in
203    particular is not necessarily equal to the number of ranges in
204    'ranges'.
205
206    Use a lock to protect the generator so that we will not run two
207    instances of this generator on the same object simultaneously."""
208
209    f = self.simg_f
210    with self.generator_lock:
211      for s, e in ranges:
212        to_read = e-s
213        idx = bisect.bisect_right(self.offset_index, s) - 1
214        chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
215
216        # for the first chunk we may be starting partway through it.
217        remain = chunk_len - (s - chunk_start)
218        this_read = min(remain, to_read)
219        if filepos is not None:
220          p = filepos + ((s - chunk_start) * self.blocksize)
221          f.seek(p, os.SEEK_SET)
222          yield f.read(this_read * self.blocksize)
223        else:
224          yield fill_data * (this_read * (self.blocksize >> 2))
225        to_read -= this_read
226
227        while to_read > 0:
228          # continue with following chunks if this range spans multiple chunks.
229          idx += 1
230          chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
231          this_read = min(chunk_len, to_read)
232          if filepos is not None:
233            f.seek(filepos, os.SEEK_SET)
234            yield f.read(this_read * self.blocksize)
235          else:
236            yield fill_data * (this_read * (self.blocksize >> 2))
237          to_read -= this_read
238
239  def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
240    """Loads the given block map file.
241
242    Args:
243      fn: The filename of the block map file.
244      clobbered_blocks: A RangeSet instance for the clobbered blocks.
245      allow_shared_blocks: Whether having shared blocks is allowed.
246    """
247    remaining = self.care_map
248    self.file_map = out = {}
249
250    with open(fn) as f:
251      for line in f:
252        fn, ranges_text = line.rstrip().split(None, 1)
253        raw_ranges = rangelib.RangeSet.parse(ranges_text)
254
255        # Note: e2fsdroid records holes in the extent tree as "0" blocks.
256        # This causes confusion because clobbered_blocks always includes
257        # the superblock (physical block #0). Since the 0 blocks here do
258        # not represent actual physical blocks, remove them from the set.
259        ranges = raw_ranges.subtract(rangelib.RangeSet("0"))
260        # b/150334561 we need to perserve the monotonic property of the raw
261        # range. Otherwise, the validation script will read the blocks with
262        # wrong order when pulling files from the image.
263        ranges.monotonic = raw_ranges.monotonic
264        ranges.extra['text_str'] = ranges_text
265
266        if allow_shared_blocks:
267          # Find the shared blocks that have been claimed by others. If so, tag
268          # the entry so that we can skip applying imgdiff on this file.
269          shared_blocks = ranges.subtract(remaining)
270          if shared_blocks:
271            non_shared = ranges.subtract(shared_blocks)
272            if not non_shared:
273              continue
274
275            # Put the non-shared RangeSet as the value in the block map, which
276            # has a copy of the original RangeSet.
277            non_shared.extra['uses_shared_blocks'] = ranges
278            ranges = non_shared
279
280        out[fn] = ranges
281        assert ranges.size() == ranges.intersect(remaining).size()
282
283        # Currently we assume that blocks in clobbered_blocks are not part of
284        # any file.
285        assert not clobbered_blocks.overlaps(ranges)
286        remaining = remaining.subtract(ranges)
287
288    remaining = remaining.subtract(clobbered_blocks)
289    if self.hashtree_info:
290      remaining = remaining.subtract(self.hashtree_info.hashtree_range)
291
292    # For all the remaining blocks in the care_map (ie, those that
293    # aren't part of the data for any file nor part of the clobbered_blocks),
294    # divide them into blocks that are all zero and blocks that aren't.
295    # (Zero blocks are handled specially because (1) there are usually
296    # a lot of them and (2) bsdiff handles files with long sequences of
297    # repeated bytes especially poorly.)
298
299    zero_blocks = []
300    nonzero_blocks = []
301    reference = '\0' * self.blocksize
302
303    # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
304    # the whole system image will be treated as a single file. But for some
305    # unknown bug, the updater will be killed due to OOM when writing back the
306    # patched image to flash (observed on lenok-userdebug MEA49). Prior to
307    # getting a real fix, we evenly divide the non-zero blocks into smaller
308    # groups (currently 1024 blocks or 4MB per group).
309    # Bug: 23227672
310    MAX_BLOCKS_PER_GROUP = 1024
311    nonzero_groups = []
312
313    f = self.simg_f
314    for s, e in remaining:
315      for b in range(s, e):
316        idx = bisect.bisect_right(self.offset_index, b) - 1
317        chunk_start, _, filepos, fill_data = self.offset_map[idx]
318        if filepos is not None:
319          filepos += (b-chunk_start) * self.blocksize
320          f.seek(filepos, os.SEEK_SET)
321          data = f.read(self.blocksize)
322        else:
323          if fill_data == reference[:4]:   # fill with all zeros
324            data = reference
325          else:
326            data = None
327
328        if data == reference:
329          zero_blocks.append(b)
330          zero_blocks.append(b+1)
331        else:
332          nonzero_blocks.append(b)
333          nonzero_blocks.append(b+1)
334
335          if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
336            nonzero_groups.append(nonzero_blocks)
337            # Clear the list.
338            nonzero_blocks = []
339
340    if nonzero_blocks:
341      nonzero_groups.append(nonzero_blocks)
342      nonzero_blocks = []
343
344    assert zero_blocks or nonzero_groups or clobbered_blocks
345
346    if zero_blocks:
347      out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
348    if nonzero_groups:
349      for i, blocks in enumerate(nonzero_groups):
350        out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
351    if clobbered_blocks:
352      out["__COPY"] = clobbered_blocks
353    if self.hashtree_info:
354      out["__HASHTREE"] = self.hashtree_info.hashtree_range
355
356  def ResetFileMap(self):
357    """Throw away the file map and treat the entire image as
358    undifferentiated data."""
359    self.file_map = {"__DATA": self.care_map}
360
361
362def GetImagePartitionSize(img):
363  try:
364    simg = SparseImage(img, build_map=False)
365    return simg.blocksize * simg.total_blocks
366  except ValueError:
367    return os.path.getsize(img)
368
369
370if __name__ == '__main__':
371  parser = argparse.ArgumentParser()
372  parser.add_argument('image')
373  parser.add_argument('--get_partition_size', action='store_true',
374                      help='Return partition size of the image')
375  args = parser.parse_args()
376  if args.get_partition_size:
377    print(GetImagePartitionSize(args.image))
378