• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2#
3# Copyright 2016 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17import os
18import sys
19import struct
20
21FAT_TABLE_START = 0x200
22DEL_MARKER = 0xe5
23ESCAPE_DEL_MARKER = 0x05
24
25ATTRIBUTE_READ_ONLY = 0x1
26ATTRIBUTE_HIDDEN = 0x2
27ATTRIBUTE_SYSTEM = 0x4
28ATTRIBUTE_VOLUME_LABEL = 0x8
29ATTRIBUTE_SUBDIRECTORY = 0x10
30ATTRIBUTE_ARCHIVE = 0x20
31ATTRIBUTE_DEVICE = 0x40
32
33LFN_ATTRIBUTES = \
34    ATTRIBUTE_VOLUME_LABEL | \
35    ATTRIBUTE_SYSTEM | \
36    ATTRIBUTE_HIDDEN | \
37    ATTRIBUTE_READ_ONLY
38LFN_ATTRIBUTES_BYTE = struct.pack("B", LFN_ATTRIBUTES)
39
40MAX_CLUSTER_ID = 0x7FFF
41
42def read_le_short(f):
43  "Read a little-endian 2-byte integer from the given file-like object"
44  return struct.unpack("<H", f.read(2))[0]
45
46def read_le_long(f):
47  "Read a little-endian 4-byte integer from the given file-like object"
48  return struct.unpack("<L", f.read(4))[0]
49
50def read_byte(f):
51  "Read a 1-byte integer from the given file-like object"
52  return struct.unpack("B", f.read(1))[0]
53
54def skip_bytes(f, n):
55  "Fast-forward the given file-like object by n bytes"
56  f.seek(n, os.SEEK_CUR)
57
58def skip_short(f):
59  "Fast-forward the given file-like object 2 bytes"
60  skip_bytes(f, 2)
61
62def skip_byte(f):
63  "Fast-forward the given file-like object 1 byte"
64  skip_bytes(f, 1)
65
66def rewind_bytes(f, n):
67  "Rewind the given file-like object n bytes"
68  skip_bytes(f, -n)
69
70def rewind_short(f):
71  "Rewind the given file-like object 2 bytes"
72  rewind_bytes(f, 2)
73
74class fake_file(object):
75  """
76  Interface for python file-like objects that we use to manipulate the image.
77  Inheritors must have an idx member which indicates the file pointer, and a
78  size member which indicates the total file size.
79  """
80
81  def seek(self, amount, direction=0):
82    "Implementation of seek from python's file-like object interface."
83    if direction == os.SEEK_CUR:
84      self.idx += amount
85    elif direction == os.SEEK_END:
86      self.idx = self.size - amount
87    else:
88      self.idx = amount
89
90    if self.idx < 0:
91      self.idx = 0
92    if self.idx > self.size:
93      self.idx = self.size
94
95class fat_file(fake_file):
96  """
97  A file inside of our fat image. The file may or may not have a dentry, and
98  if it does this object knows nothing about it. All we see is a valid cluster
99  chain.
100  """
101
102  def __init__(self, fs, cluster, size=None):
103    """
104    fs: The fat() object for the image this file resides in.
105    cluster: The first cluster of data for this file.
106    size: The size of this file. If not given, we use the total length of the
107          cluster chain that starts from the cluster argument.
108    """
109    self.fs = fs
110    self.start_cluster = cluster
111    self.size = size
112
113    if self.size is None:
114      self.size = fs.get_chain_size(cluster)
115
116    self.idx = 0
117
118  def read(self, size):
119    "Read method for pythonic file-like interface."
120    if self.idx + size > self.size:
121      size = self.size - self.idx
122    got = self.fs.read_file(self.start_cluster, self.idx, size)
123    self.idx += len(got)
124    return got
125
126  def write(self, data):
127    "Write method for pythonic file-like interface."
128    self.fs.write_file(self.start_cluster, self.idx, data)
129    self.idx += len(data)
130
131    if self.idx > self.size:
132      self.size = self.idx
133
134def shorten(name, index):
135  """
136  Create a file short name from the given long name (with the extension already
137  removed). The index argument gives a disambiguating integer to work into the
138  name to avoid collisions.
139  """
140  name = "".join(name.split('.')).upper()
141  postfix = "~" + str(index)
142  return name[:8 - len(postfix)] + postfix
143
144class fat_dir(object):
145  "A directory in our fat filesystem."
146
147  def __init__(self, backing):
148    """
149    backing: A file-like object from which we can read dentry info. Should have
150    an fs member allowing us to get to the underlying image.
151    """
152    self.backing = backing
153    self.dentries = []
154    to_read = self.backing.size / 32
155
156    self.backing.seek(0)
157
158    while to_read > 0:
159      (dent, consumed) = self.backing.fs.read_dentry(self.backing)
160      to_read -= consumed
161
162      if dent:
163        self.dentries.append(dent)
164
165  def __str__(self):
166    return "\n".join([str(x) for x in self.dentries]) + "\n"
167
168  def add_dentry(self, attributes, shortname, ext, longname, first_cluster,
169      size):
170    """
171    Add a new dentry to this directory.
172    attributes: Attribute flags for this dentry. See the ATTRIBUTE_ constants
173                above.
174    shortname: Short name of this file. Up to 8 characters, no dots.
175    ext: Extension for this file. Up to 3 characters, no dots.
176    longname: The long name for this file, with extension. Largely unrestricted.
177    first_cluster: The first cluster in the cluster chain holding the contents
178                   of this file.
179    size: The size of this file. Set to 0 for subdirectories.
180    """
181    new_dentry = dentry(self.backing.fs, attributes, shortname, ext,
182        longname, first_cluster, size)
183    new_dentry.commit(self.backing)
184    self.dentries.append(new_dentry)
185    return new_dentry
186
187  def make_short_name(self, name):
188    """
189    Given a long file name, return an 8.3 short name as a tuple. Name will be
190    engineered not to collide with other such names in this folder.
191    """
192    parts = name.rsplit('.', 1)
193
194    if len(parts) == 1:
195      parts.append('')
196
197    name = parts[0]
198    ext = parts[1].upper()
199
200    index = 1
201    shortened = shorten(name, index)
202
203    for dent in self.dentries:
204      assert dent.longname != name, "File must not exist"
205      if dent.shortname == shortened:
206        index += 1
207        shortened = shorten(name, index)
208
209    if len(name) <= 8 and len(ext) <= 3 and not '.' in name:
210      return (name.upper().ljust(8), ext.ljust(3))
211
212    return (shortened.ljust(8), ext[:3].ljust(3))
213
214  def new_file(self, name, data=None):
215    """
216    Add a new regular file to this directory.
217    name: The name of the new file.
218    data: The contents of the new file. Given as a file-like object.
219    """
220    size = 0
221    if data:
222      data.seek(0, os.SEEK_END)
223      size = data.tell()
224
225    # Empty files shouldn't have any clusters assigned.
226    chunk = self.backing.fs.allocate(size) if size > 0 else 0
227    (shortname, ext) = self.make_short_name(name)
228    self.add_dentry(0, shortname, ext, name, chunk, size)
229
230    if data is None:
231      return
232
233    data_file = fat_file(self.backing.fs, chunk, size)
234    data.seek(0)
235    data_file.write(data.read())
236
237  def open_subdirectory(self, name):
238    """
239    Open a subdirectory of this directory with the given name. If the
240    subdirectory doesn't exist, a new one is created instead.
241    Returns a fat_dir().
242    """
243    for dent in self.dentries:
244      if dent.longname == name:
245        return dent.open_directory()
246
247    chunk = self.backing.fs.allocate(1)
248    (shortname, ext) = self.make_short_name(name)
249    new_dentry = self.add_dentry(ATTRIBUTE_SUBDIRECTORY, shortname,
250            ext, name, chunk, 0)
251    result = new_dentry.open_directory()
252
253    parent_cluster = 0
254
255    if hasattr(self.backing, 'start_cluster'):
256      parent_cluster = self.backing.start_cluster
257
258    result.add_dentry(ATTRIBUTE_SUBDIRECTORY, '.', '', '', chunk, 0)
259    result.add_dentry(ATTRIBUTE_SUBDIRECTORY, '..', '', '', parent_cluster, 0)
260
261    return result
262
263def lfn_checksum(name_data):
264  """
265  Given the characters of an 8.3 file name (concatenated *without* the dot),
266  Compute a one-byte checksum which needs to appear in corresponding long file
267  name entries.
268  """
269  assert len(name_data) == 11, "Name data should be exactly 11 characters"
270  name_data = struct.unpack("B" * 11, name_data)
271
272  result = 0
273
274  for char in name_data:
275    last_bit = (result & 1) << 7
276    result = (result >> 1) | last_bit
277    result += char
278    result = result & 0xFF
279
280  return struct.pack("B", result)
281
282class dentry(object):
283  "A directory entry"
284  def __init__(self, fs, attributes, shortname, ext, longname,
285      first_cluster, size):
286    """
287    fs: The fat() object for the image we're stored in.
288    attributes: The attribute flags for this dentry. See the ATTRIBUTE_ flags
289                above.
290    shortname: The short name stored in this dentry. Up to 8 characters, no
291               dots.
292    ext: The file extension stored in this dentry. Up to 3 characters, no
293         dots.
294    longname: The long file name stored in this dentry.
295    first_cluster: The first cluster in the cluster chain backing the file
296                   this dentry points to.
297    size: Size of the file this dentry points to. 0 for subdirectories.
298    """
299    self.fs = fs
300    self.attributes = attributes
301    self.shortname = shortname
302    self.ext = ext
303    self.longname = longname
304    self.first_cluster = first_cluster
305    self.size = size
306
307  def name(self):
308    "A friendly text file name for this dentry."
309    if self.longname:
310      return self.longname
311
312    if not self.ext or len(self.ext) == 0:
313      return self.shortname
314
315    return self.shortname + "." + self.ext
316
317  def __str__(self):
318    return self.name() + " (" + str(self.size) + \
319      " bytes @ " + str(self.first_cluster) + ")"
320
321  def is_directory(self):
322    "Return whether this dentry points to a directory."
323    return (self.attributes & ATTRIBUTE_SUBDIRECTORY) != 0
324
325  def open_file(self):
326    "Open the target of this dentry if it is a regular file."
327    assert not self.is_directory(), "Cannot open directory as file"
328    return fat_file(self.fs, self.first_cluster, self.size)
329
330  def open_directory(self):
331    "Open the target of this dentry if it is a directory."
332    assert self.is_directory(), "Cannot open file as directory"
333    return fat_dir(fat_file(self.fs, self.first_cluster))
334
335  def longname_records(self, checksum):
336    """
337    Get the longname records necessary to store this dentry's long name,
338    packed as a series of 32-byte strings.
339    """
340    if self.longname is None:
341      return []
342    if len(self.longname) == 0:
343      return []
344
345    encoded_long_name = self.longname.encode('utf-16-le')
346    long_name_padding = "\0" * (26 - (len(encoded_long_name) % 26))
347    padded_long_name = encoded_long_name + long_name_padding
348
349    chunks = [padded_long_name[i:i+26] for i in range(0,
350      len(padded_long_name), 26)]
351    records = []
352    sequence_number = 1
353
354    for c in chunks:
355      sequence_byte = struct.pack("B", sequence_number)
356      sequence_number += 1
357      record = sequence_byte + c[:10] + LFN_ATTRIBUTES_BYTE + "\0" + \
358          checksum + c[10:22] + "\0\0" + c[22:]
359      records.append(record)
360
361    last = records.pop()
362    last_seq = struct.unpack("B", last[0])[0]
363    last_seq = last_seq | 0x40
364    last = struct.pack("B", last_seq) + last[1:]
365    records.append(last)
366    records.reverse()
367
368    return records
369
370  def commit(self, f):
371    """
372    Write this dentry into the given file-like object,
373    which is assumed to contain a FAT directory.
374    """
375    f.seek(0)
376    padded_short_name = self.shortname.ljust(8)
377    padded_ext = self.ext.ljust(3)
378    name_data = padded_short_name + padded_ext
379    longname_record_data = self.longname_records(lfn_checksum(name_data))
380    record = struct.pack("<11sBBBHHHHHHHL",
381        name_data,
382        self.attributes,
383        0,
384        0,
385        0,
386        0,
387        0,
388        0,
389        0,
390        0,
391        self.first_cluster,
392        self.size)
393    entry = "".join(longname_record_data + [record])
394
395    record_count = len(longname_record_data) + 1
396
397    found_count = 0
398    while found_count < record_count:
399      record = f.read(32)
400
401      if record is None or len(record) != 32:
402        # We reached the EOF, so we need to extend the file with a new cluster.
403        f.write("\0" * self.fs.bytes_per_cluster)
404        f.seek(-self.fs.bytes_per_cluster, os.SEEK_CUR)
405        record = f.read(32)
406
407      marker = struct.unpack("B", record[0])[0]
408
409      if marker == DEL_MARKER or marker == 0:
410        found_count += 1
411      else:
412        found_count = 0
413
414    f.seek(-(record_count * 32), os.SEEK_CUR)
415    f.write(entry)
416
417class root_dentry_file(fake_file):
418  """
419  File-like object for the root directory. The root directory isn't stored in a
420  normal file, so we can't use a normal fat_file object to create a view of it.
421  """
422  def __init__(self, fs):
423    self.fs = fs
424    self.idx = 0
425    self.size = fs.root_entries * 32
426
427  def read(self, count):
428    f = self.fs.f
429    f.seek(self.fs.data_start() + self.idx)
430
431    if self.idx + count > self.size:
432      count = self.size - self.idx
433
434    ret = f.read(count)
435    self.idx += len(ret)
436    return ret
437
438  def write(self, data):
439    f = self.fs.f
440    f.seek(self.fs.data_start() + self.idx)
441
442    if self.idx + len(data) > self.size:
443      data = data[:self.size - self.idx]
444
445    f.write(data)
446    self.idx += len(data)
447    if self.idx > self.size:
448      self.size = self.idx
449
450class fat(object):
451  "A FAT image"
452
453  def __init__(self, path):
454    """
455    path: Path to an image file containing a FAT file system.
456    """
457    f = open(path, "r+b")
458
459    self.f = f
460
461    f.seek(0xb)
462    bytes_per_sector = read_le_short(f)
463    sectors_per_cluster = read_byte(f)
464
465    self.bytes_per_cluster = bytes_per_sector * sectors_per_cluster
466
467    reserved_sectors = read_le_short(f)
468    assert reserved_sectors == 1, \
469        "Can only handle FAT with 1 reserved sector"
470
471    fat_count = read_byte(f)
472    assert fat_count == 2, "Can only handle FAT with 2 tables"
473
474    self.root_entries = read_le_short(f)
475
476    skip_short(f) # Image size. Sort of. Useless field.
477    skip_byte(f) # Media type. We don't care.
478
479    self.fat_size = read_le_short(f) * bytes_per_sector
480    self.root = fat_dir(root_dentry_file(self))
481
482  def data_start(self):
483    """
484    Index of the first byte after the FAT tables.
485    """
486    return FAT_TABLE_START + self.fat_size * 2
487
488  def get_chain_size(self, head_cluster):
489    """
490    Return how many total bytes are in the cluster chain rooted at the given
491    cluster.
492    """
493    if head_cluster == 0:
494      return 0
495
496    f = self.f
497    f.seek(FAT_TABLE_START + head_cluster * 2)
498
499    cluster_count = 0
500
501    while head_cluster <= MAX_CLUSTER_ID:
502      cluster_count += 1
503      head_cluster = read_le_short(f)
504      f.seek(FAT_TABLE_START + head_cluster * 2)
505
506    return cluster_count * self.bytes_per_cluster
507
508  def read_dentry(self, f=None):
509    """
510    Read and decode a dentry from the given file-like object at its current
511    seek position.
512    """
513    f = f or self.f
514    attributes = None
515
516    consumed = 1
517
518    lfn_entries = {}
519
520    while True:
521      skip_bytes(f, 11)
522      attributes = read_byte(f)
523      rewind_bytes(f, 12)
524
525      if attributes & LFN_ATTRIBUTES != LFN_ATTRIBUTES:
526        break
527
528      consumed += 1
529
530      seq = read_byte(f)
531      chars = f.read(10)
532      skip_bytes(f, 3) # Various hackish nonsense
533      chars += f.read(12)
534      skip_short(f) # Lots more nonsense
535      chars += f.read(4)
536
537      chars = unicode(chars, "utf-16-le").encode("utf-8")
538
539      lfn_entries[seq] = chars
540
541    ind = read_byte(f)
542
543    if ind == 0 or ind == DEL_MARKER:
544      skip_bytes(f, 31)
545      return (None, consumed)
546
547    if ind == ESCAPE_DEL_MARKER:
548      ind = DEL_MARKER
549
550    ind = str(unichr(ind))
551
552    if ind == '.':
553      skip_bytes(f, 31)
554      return (None, consumed)
555
556    shortname = ind + f.read(7).rstrip()
557    ext = f.read(3).rstrip()
558    skip_bytes(f, 15) # Assorted flags, ctime/atime/mtime, etc.
559    first_cluster = read_le_short(f)
560    size = read_le_long(f)
561
562    lfn = lfn_entries.items()
563    lfn.sort(key=lambda x: x[0])
564    lfn = reduce(lambda x, y: x + y[1], lfn, "")
565
566    if len(lfn) == 0:
567      lfn = None
568    else:
569      lfn = lfn.split('\0', 1)[0]
570
571    return (dentry(self, attributes, shortname, ext, lfn, first_cluster,
572      size), consumed)
573
574  def read_file(self, head_cluster, start_byte, size):
575    """
576    Read from a given FAT file.
577    head_cluster: The first cluster in the file.
578    start_byte: How many bytes in to the file to begin the read.
579    size: How many bytes to read.
580    """
581    f = self.f
582
583    assert size >= 0, "Can't read a negative amount"
584    if size == 0:
585      return ""
586
587    got_data = ""
588
589    while True:
590      size_now = size
591      if start_byte + size > self.bytes_per_cluster:
592        size_now = self.bytes_per_cluster - start_byte
593
594      if start_byte < self.bytes_per_cluster:
595        size -= size_now
596
597        cluster_bytes_from_root = (head_cluster - 2) * \
598            self.bytes_per_cluster
599        bytes_from_root = cluster_bytes_from_root + start_byte
600        bytes_from_data_start = bytes_from_root + self.root_entries * 32
601
602        f.seek(self.data_start() + bytes_from_data_start)
603        line = f.read(size_now)
604        got_data += line
605
606        if size == 0:
607          return got_data
608
609      start_byte -= self.bytes_per_cluster
610
611      if start_byte < 0:
612        start_byte = 0
613
614      f.seek(FAT_TABLE_START + head_cluster * 2)
615      assert head_cluster <= MAX_CLUSTER_ID, "Out-of-bounds read"
616      head_cluster = read_le_short(f)
617      assert head_cluster > 0, "Read free cluster"
618
619    return got_data
620
621  def write_cluster_entry(self, entry):
622    """
623    Write a cluster entry to the FAT table. Assumes our backing file is already
624    seeked to the correct entry in the first FAT table.
625    """
626    f = self.f
627    f.write(struct.pack("<H", entry))
628    skip_bytes(f, self.fat_size - 2)
629    f.write(struct.pack("<H", entry))
630    rewind_bytes(f, self.fat_size)
631
632  def allocate(self, amount):
633    """
634    Allocate a new cluster chain big enough to hold at least the given amount
635    of bytes.
636    """
637    assert amount > 0, "Must allocate a non-zero amount."
638
639    f = self.f
640    f.seek(FAT_TABLE_START + 4)
641
642    current = None
643    current_size = 0
644    free_zones = {}
645
646    pos = 2
647    while pos < self.fat_size / 2:
648      data = read_le_short(f)
649
650      if data == 0 and current is not None:
651        current_size += 1
652      elif data == 0:
653        current = pos
654        current_size = 1
655      elif current is not None:
656        free_zones[current] = current_size
657        current = None
658
659      pos += 1
660
661    if current is not None:
662      free_zones[current] = current_size
663
664    free_zones = free_zones.items()
665    free_zones.sort(key=lambda x: x[1])
666
667    grabbed_zones = []
668    grabbed = 0
669
670    while grabbed < amount and len(free_zones) > 0:
671      zone = free_zones.pop()
672      grabbed += zone[1] * self.bytes_per_cluster
673      grabbed_zones.append(zone)
674
675    if grabbed < amount:
676      return None
677
678    excess = (grabbed - amount) / self.bytes_per_cluster
679
680    grabbed_zones[-1] = (grabbed_zones[-1][0],
681        grabbed_zones[-1][1] - excess)
682
683    out = None
684    grabbed_zones.reverse()
685
686    for cluster, size in grabbed_zones:
687      entries = range(cluster + 1, cluster + size)
688      entries.append(out or 0xFFFF)
689      out = cluster
690      f.seek(FAT_TABLE_START + cluster * 2)
691      for entry in entries:
692        self.write_cluster_entry(entry)
693
694    return out
695
696  def extend_cluster(self, cluster, amount):
697    """
698    Given a cluster which is the *last* cluster in a chain, extend it to hold
699    at least `amount` more bytes.
700    """
701    if amount == 0:
702      return
703    f = self.f
704    entry_offset = FAT_TABLE_START + cluster * 2
705    f.seek(entry_offset)
706    assert read_le_short(f) == 0xFFFF, "Extending from middle of chain"
707
708    return_cluster = self.allocate(amount)
709    f.seek(entry_offset)
710    self.write_cluster_entry(return_cluster)
711    return return_cluster
712
713  def write_file(self, head_cluster, start_byte, data):
714    """
715    Write to a given FAT file.
716
717    head_cluster: The first cluster in the file.
718    start_byte: How many bytes in to the file to begin the write.
719    data: The data to write.
720    """
721    f = self.f
722    last_offset = start_byte + len(data)
723    current_offset = 0
724    current_cluster = head_cluster
725
726    while current_offset < last_offset:
727      # Write everything that falls in the cluster starting at current_offset.
728      data_begin = max(0, current_offset - start_byte)
729      data_end = min(len(data),
730                     current_offset + self.bytes_per_cluster - start_byte)
731      if data_end > data_begin:
732        cluster_file_offset = (self.data_start() + self.root_entries * 32 +
733                               (current_cluster - 2) * self.bytes_per_cluster)
734        f.seek(cluster_file_offset + max(0, start_byte - current_offset))
735        f.write(data[data_begin:data_end])
736
737      # Advance to the next cluster in the chain or get a new cluster if needed.
738      current_offset += self.bytes_per_cluster
739      if last_offset > current_offset:
740        f.seek(FAT_TABLE_START + current_cluster * 2)
741        next_cluster = read_le_short(f)
742        if next_cluster > MAX_CLUSTER_ID:
743          next_cluster = self.extend_cluster(current_cluster, len(data))
744        current_cluster = next_cluster
745        assert current_cluster > 0, "Cannot write free cluster"
746
747
748def add_item(directory, item):
749  """
750  Copy a file into the given FAT directory. If the path given is a directory,
751  copy recursively.
752  directory: fat_dir to copy the file in to
753  item: Path of local file to copy
754  """
755  if os.path.isdir(item):
756    base = os.path.basename(item)
757    if len(base) == 0:
758      base = os.path.basename(item[:-1])
759    sub = directory.open_subdirectory(base)
760    for next_item in sorted(os.listdir(item)):
761      add_item(sub, os.path.join(item, next_item))
762  else:
763    with open(item, 'rb') as f:
764      directory.new_file(os.path.basename(item), f)
765
766if __name__ == "__main__":
767  if len(sys.argv) < 3:
768    print("Usage: fat16copy.py <image> <file> [<file> ...]")
769    print("Files are copied into the root of the image.")
770    print("Directories are copied recursively")
771    sys.exit(1)
772
773  root = fat(sys.argv[1]).root
774
775  for p in sys.argv[2:]:
776    add_item(root, p)
777