Lines Matching refs:self
81 def seek(self, amount, direction=0): argument
84 self.idx += amount
86 self.idx = self.size - amount
88 self.idx = amount
90 if self.idx < 0:
91 self.idx = 0
92 if self.idx > self.size:
93 self.idx = self.size
102 def __init__(self, fs, cluster, size=None): argument
109 self.fs = fs
110 self.start_cluster = cluster
111 self.size = size
113 if self.size is None:
114 self.size = fs.get_chain_size(cluster)
116 self.idx = 0
118 def read(self, size): argument
120 if self.idx + size > self.size:
121 size = self.size - self.idx
122 got = self.fs.read_file(self.start_cluster, self.idx, size)
123 self.idx += len(got)
126 def write(self, data): argument
128 self.fs.write_file(self.start_cluster, self.idx, data)
129 self.idx += len(data)
131 if self.idx > self.size:
132 self.size = self.idx
147 def __init__(self, backing): argument
152 self.backing = backing
153 self.dentries = []
154 to_read = self.backing.size / 32
156 self.backing.seek(0)
159 (dent, consumed) = self.backing.fs.read_dentry(self.backing)
163 self.dentries.append(dent)
165 def __str__(self): argument
166 return "\n".join([str(x) for x in self.dentries]) + "\n"
168 def add_dentry(self, attributes, shortname, ext, longname, first_cluster, argument
181 new_dentry = dentry(self.backing.fs, attributes, shortname, ext,
183 new_dentry.commit(self.backing)
184 self.dentries.append(new_dentry)
187 def make_short_name(self, name): argument
203 for dent in self.dentries:
214 def new_file(self, name, data=None): argument
226 chunk = self.backing.fs.allocate(size) if size > 0 else 0
227 (shortname, ext) = self.make_short_name(name)
228 self.add_dentry(0, shortname, ext, name, chunk, size)
233 data_file = fat_file(self.backing.fs, chunk, size)
237 def open_subdirectory(self, name): argument
243 for dent in self.dentries:
247 chunk = self.backing.fs.allocate(1)
248 (shortname, ext) = self.make_short_name(name)
249 new_dentry = self.add_dentry(ATTRIBUTE_SUBDIRECTORY, shortname,
255 if hasattr(self.backing, 'start_cluster'):
256 parent_cluster = self.backing.start_cluster
284 def __init__(self, fs, attributes, shortname, ext, longname, argument
299 self.fs = fs
300 self.attributes = attributes
301 self.shortname = shortname
302 self.ext = ext
303 self.longname = longname
304 self.first_cluster = first_cluster
305 self.size = size
307 def name(self): argument
309 if self.longname:
310 return self.longname
312 if not self.ext or len(self.ext) == 0:
313 return self.shortname
315 return self.shortname + "." + self.ext
317 def __str__(self): argument
318 return self.name() + " (" + str(self.size) + \
319 " bytes @ " + str(self.first_cluster) + ")"
321 def is_directory(self): argument
323 return (self.attributes & ATTRIBUTE_SUBDIRECTORY) != 0
325 def open_file(self): argument
327 assert not self.is_directory(), "Cannot open directory as file"
328 return fat_file(self.fs, self.first_cluster, self.size)
330 def open_directory(self): argument
332 assert self.is_directory(), "Cannot open file as directory"
333 return fat_dir(fat_file(self.fs, self.first_cluster))
335 def longname_records(self, checksum): argument
340 if self.longname is None:
342 if len(self.longname) == 0:
345 encoded_long_name = self.longname.encode('utf-16-le')
370 def commit(self, f): argument
376 padded_short_name = self.shortname.ljust(8)
377 padded_ext = self.ext.ljust(3)
379 longname_record_data = self.longname_records(lfn_checksum(name_data))
382 self.attributes,
391 self.first_cluster,
392 self.size)
403 f.write("\0" * self.fs.bytes_per_cluster)
404 f.seek(-self.fs.bytes_per_cluster, os.SEEK_CUR)
422 def __init__(self, fs): argument
423 self.fs = fs
424 self.idx = 0
425 self.size = fs.root_entries * 32
427 def read(self, count): argument
428 f = self.fs.f
429 f.seek(self.fs.data_start() + self.idx)
431 if self.idx + count > self.size:
432 count = self.size - self.idx
435 self.idx += len(ret)
438 def write(self, data): argument
439 f = self.fs.f
440 f.seek(self.fs.data_start() + self.idx)
442 if self.idx + len(data) > self.size:
443 data = data[:self.size - self.idx]
446 self.idx += len(data)
447 if self.idx > self.size:
448 self.size = self.idx
453 def __init__(self, path): argument
459 self.f = f
465 self.bytes_per_cluster = bytes_per_sector * sectors_per_cluster
474 self.root_entries = read_le_short(f)
479 self.fat_size = read_le_short(f) * bytes_per_sector
480 self.root = fat_dir(root_dentry_file(self))
482 def data_start(self): argument
486 return FAT_TABLE_START + self.fat_size * 2
488 def get_chain_size(self, head_cluster): argument
496 f = self.f
506 return cluster_count * self.bytes_per_cluster
508 def read_dentry(self, f=None): argument
513 f = f or self.f
571 return (dentry(self, attributes, shortname, ext, lfn, first_cluster,
574 def read_file(self, head_cluster, start_byte, size): argument
581 f = self.f
591 if start_byte + size > self.bytes_per_cluster:
592 size_now = self.bytes_per_cluster - start_byte
594 if start_byte < self.bytes_per_cluster:
598 self.bytes_per_cluster
600 bytes_from_data_start = bytes_from_root + self.root_entries * 32
602 f.seek(self.data_start() + bytes_from_data_start)
609 start_byte -= self.bytes_per_cluster
621 def write_cluster_entry(self, entry): argument
626 f = self.f
628 skip_bytes(f, self.fat_size - 2)
630 rewind_bytes(f, self.fat_size)
632 def allocate(self, amount): argument
639 f = self.f
647 while pos < self.fat_size / 2:
672 grabbed += zone[1] * self.bytes_per_cluster
678 excess = (grabbed - amount) / self.bytes_per_cluster
692 self.write_cluster_entry(entry)
696 def extend_cluster(self, cluster, amount): argument
703 f = self.f
708 return_cluster = self.allocate(amount)
710 self.write_cluster_entry(return_cluster)
713 def write_file(self, head_cluster, start_byte, data): argument
721 f = self.f
730 current_offset + self.bytes_per_cluster - start_byte)
732 cluster_file_offset = (self.data_start() + self.root_entries * 32 +
733 (current_cluster - 2) * self.bytes_per_cluster)
738 current_offset += self.bytes_per_cluster
743 next_cluster = self.extend_cluster(current_cluster, len(data))