1""" 2Python implementation of the io module. 3""" 4 5import os 6import abc 7import codecs 8import errno 9import stat 10import sys 11# Import _thread instead of threading to reduce startup cost 12from _thread import allocate_lock as Lock 13if sys.platform in {'win32', 'cygwin'}: 14 from msvcrt import setmode as _setmode 15else: 16 _setmode = None 17 18import io 19from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END) 20 21valid_seek_flags = {0, 1, 2} # Hardwired values 22if hasattr(os, 'SEEK_HOLE') : 23 valid_seek_flags.add(os.SEEK_HOLE) 24 valid_seek_flags.add(os.SEEK_DATA) 25 26# open() uses st_blksize whenever we can 27DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes 28 29# NOTE: Base classes defined here are registered with the "official" ABCs 30# defined in io.py. We don't use real inheritance though, because we don't want 31# to inherit the C implementations. 32 33# Rebind for compatibility 34BlockingIOError = BlockingIOError 35 36 37def open(file, mode="r", buffering=-1, encoding=None, errors=None, 38 newline=None, closefd=True, opener=None): 39 40 r"""Open file and return a stream. Raise OSError upon failure. 41 42 file is either a text or byte string giving the name (and the path 43 if the file isn't in the current working directory) of the file to 44 be opened or an integer file descriptor of the file to be 45 wrapped. (If a file descriptor is given, it is closed when the 46 returned I/O object is closed, unless closefd is set to False.) 47 48 mode is an optional string that specifies the mode in which the file is 49 opened. It defaults to 'r' which means open for reading in text mode. Other 50 common values are 'w' for writing (truncating the file if it already 51 exists), 'x' for exclusive creation of a new file, and 'a' for appending 52 (which on some Unix systems, means that all writes append to the end of the 53 file regardless of the current seek position). In text mode, if encoding is 54 not specified the encoding used is platform dependent. (For reading and 55 writing raw bytes use binary mode and leave encoding unspecified.) The 56 available modes are: 57 58 ========= =============================================================== 59 Character Meaning 60 --------- --------------------------------------------------------------- 61 'r' open for reading (default) 62 'w' open for writing, truncating the file first 63 'x' create a new file and open it for writing 64 'a' open for writing, appending to the end of the file if it exists 65 'b' binary mode 66 't' text mode (default) 67 '+' open a disk file for updating (reading and writing) 68 'U' universal newline mode (deprecated) 69 ========= =============================================================== 70 71 The default mode is 'rt' (open for reading text). For binary random 72 access, the mode 'w+b' opens and truncates the file to 0 bytes, while 73 'r+b' opens the file without truncation. The 'x' mode implies 'w' and 74 raises an `FileExistsError` if the file already exists. 75 76 Python distinguishes between files opened in binary and text modes, 77 even when the underlying operating system doesn't. Files opened in 78 binary mode (appending 'b' to the mode argument) return contents as 79 bytes objects without any decoding. In text mode (the default, or when 80 't' is appended to the mode argument), the contents of the file are 81 returned as strings, the bytes having been first decoded using a 82 platform-dependent encoding or using the specified encoding if given. 83 84 'U' mode is deprecated and will raise an exception in future versions 85 of Python. It has no effect in Python 3. Use newline to control 86 universal newlines mode. 87 88 buffering is an optional integer used to set the buffering policy. 89 Pass 0 to switch buffering off (only allowed in binary mode), 1 to select 90 line buffering (only usable in text mode), and an integer > 1 to indicate 91 the size of a fixed-size chunk buffer. When no buffering argument is 92 given, the default buffering policy works as follows: 93 94 * Binary files are buffered in fixed-size chunks; the size of the buffer 95 is chosen using a heuristic trying to determine the underlying device's 96 "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`. 97 On many systems, the buffer will typically be 4096 or 8192 bytes long. 98 99 * "Interactive" text files (files for which isatty() returns True) 100 use line buffering. Other text files use the policy described above 101 for binary files. 102 103 encoding is the str name of the encoding used to decode or encode the 104 file. This should only be used in text mode. The default encoding is 105 platform dependent, but any encoding supported by Python can be 106 passed. See the codecs module for the list of supported encodings. 107 108 errors is an optional string that specifies how encoding errors are to 109 be handled---this argument should not be used in binary mode. Pass 110 'strict' to raise a ValueError exception if there is an encoding error 111 (the default of None has the same effect), or pass 'ignore' to ignore 112 errors. (Note that ignoring encoding errors can lead to data loss.) 113 See the documentation for codecs.register for a list of the permitted 114 encoding error strings. 115 116 newline is a string controlling how universal newlines works (it only 117 applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works 118 as follows: 119 120 * On input, if newline is None, universal newlines mode is 121 enabled. Lines in the input can end in '\n', '\r', or '\r\n', and 122 these are translated into '\n' before being returned to the 123 caller. If it is '', universal newline mode is enabled, but line 124 endings are returned to the caller untranslated. If it has any of 125 the other legal values, input lines are only terminated by the given 126 string, and the line ending is returned to the caller untranslated. 127 128 * On output, if newline is None, any '\n' characters written are 129 translated to the system default line separator, os.linesep. If 130 newline is '', no translation takes place. If newline is any of the 131 other legal values, any '\n' characters written are translated to 132 the given string. 133 134 closedfd is a bool. If closefd is False, the underlying file descriptor will 135 be kept open when the file is closed. This does not work when a file name is 136 given and must be True in that case. 137 138 The newly created file is non-inheritable. 139 140 A custom opener can be used by passing a callable as *opener*. The 141 underlying file descriptor for the file object is then obtained by calling 142 *opener* with (*file*, *flags*). *opener* must return an open file 143 descriptor (passing os.open as *opener* results in functionality similar to 144 passing None). 145 146 open() returns a file object whose type depends on the mode, and 147 through which the standard file operations such as reading and writing 148 are performed. When open() is used to open a file in a text mode ('w', 149 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open 150 a file in a binary mode, the returned class varies: in read binary 151 mode, it returns a BufferedReader; in write binary and append binary 152 modes, it returns a BufferedWriter, and in read/write mode, it returns 153 a BufferedRandom. 154 155 It is also possible to use a string or bytearray as a file for both 156 reading and writing. For strings StringIO can be used like a file 157 opened in a text mode, and for bytes a BytesIO can be used like a file 158 opened in a binary mode. 159 """ 160 if not isinstance(file, int): 161 file = os.fspath(file) 162 if not isinstance(file, (str, bytes, int)): 163 raise TypeError("invalid file: %r" % file) 164 if not isinstance(mode, str): 165 raise TypeError("invalid mode: %r" % mode) 166 if not isinstance(buffering, int): 167 raise TypeError("invalid buffering: %r" % buffering) 168 if encoding is not None and not isinstance(encoding, str): 169 raise TypeError("invalid encoding: %r" % encoding) 170 if errors is not None and not isinstance(errors, str): 171 raise TypeError("invalid errors: %r" % errors) 172 modes = set(mode) 173 if modes - set("axrwb+tU") or len(mode) > len(modes): 174 raise ValueError("invalid mode: %r" % mode) 175 creating = "x" in modes 176 reading = "r" in modes 177 writing = "w" in modes 178 appending = "a" in modes 179 updating = "+" in modes 180 text = "t" in modes 181 binary = "b" in modes 182 if "U" in modes: 183 if creating or writing or appending or updating: 184 raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'") 185 import warnings 186 warnings.warn("'U' mode is deprecated", 187 DeprecationWarning, 2) 188 reading = True 189 if text and binary: 190 raise ValueError("can't have text and binary mode at once") 191 if creating + reading + writing + appending > 1: 192 raise ValueError("can't have read/write/append mode at once") 193 if not (creating or reading or writing or appending): 194 raise ValueError("must have exactly one of read/write/append mode") 195 if binary and encoding is not None: 196 raise ValueError("binary mode doesn't take an encoding argument") 197 if binary and errors is not None: 198 raise ValueError("binary mode doesn't take an errors argument") 199 if binary and newline is not None: 200 raise ValueError("binary mode doesn't take a newline argument") 201 raw = FileIO(file, 202 (creating and "x" or "") + 203 (reading and "r" or "") + 204 (writing and "w" or "") + 205 (appending and "a" or "") + 206 (updating and "+" or ""), 207 closefd, opener=opener) 208 result = raw 209 try: 210 line_buffering = False 211 if buffering == 1 or buffering < 0 and raw.isatty(): 212 buffering = -1 213 line_buffering = True 214 if buffering < 0: 215 buffering = DEFAULT_BUFFER_SIZE 216 try: 217 bs = os.fstat(raw.fileno()).st_blksize 218 except (OSError, AttributeError): 219 pass 220 else: 221 if bs > 1: 222 buffering = bs 223 if buffering < 0: 224 raise ValueError("invalid buffering size") 225 if buffering == 0: 226 if binary: 227 return result 228 raise ValueError("can't have unbuffered text I/O") 229 if updating: 230 buffer = BufferedRandom(raw, buffering) 231 elif creating or writing or appending: 232 buffer = BufferedWriter(raw, buffering) 233 elif reading: 234 buffer = BufferedReader(raw, buffering) 235 else: 236 raise ValueError("unknown mode: %r" % mode) 237 result = buffer 238 if binary: 239 return result 240 text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) 241 result = text 242 text.mode = mode 243 return result 244 except: 245 result.close() 246 raise 247 248 249class DocDescriptor: 250 """Helper for builtins.open.__doc__ 251 """ 252 def __get__(self, obj, typ): 253 return ( 254 "open(file, mode='r', buffering=-1, encoding=None, " 255 "errors=None, newline=None, closefd=True)\n\n" + 256 open.__doc__) 257 258class OpenWrapper: 259 """Wrapper for builtins.open 260 261 Trick so that open won't become a bound method when stored 262 as a class variable (as dbm.dumb does). 263 264 See initstdio() in Python/pylifecycle.c. 265 """ 266 __doc__ = DocDescriptor() 267 268 def __new__(cls, *args, **kwargs): 269 return open(*args, **kwargs) 270 271 272# In normal operation, both `UnsupportedOperation`s should be bound to the 273# same object. 274try: 275 UnsupportedOperation = io.UnsupportedOperation 276except AttributeError: 277 class UnsupportedOperation(OSError, ValueError): 278 pass 279 280 281class IOBase(metaclass=abc.ABCMeta): 282 283 """The abstract base class for all I/O classes, acting on streams of 284 bytes. There is no public constructor. 285 286 This class provides dummy implementations for many methods that 287 derived classes can override selectively; the default implementations 288 represent a file that cannot be read, written or seeked. 289 290 Even though IOBase does not declare read, readinto, or write because 291 their signatures will vary, implementations and clients should 292 consider those methods part of the interface. Also, implementations 293 may raise UnsupportedOperation when operations they do not support are 294 called. 295 296 The basic type used for binary data read from or written to a file is 297 bytes. Other bytes-like objects are accepted as method arguments too. In 298 some cases (such as readinto), a writable object is required. Text I/O 299 classes work with str data. 300 301 Note that calling any method (even inquiries) on a closed stream is 302 undefined. Implementations may raise OSError in this case. 303 304 IOBase (and its subclasses) support the iterator protocol, meaning 305 that an IOBase object can be iterated over yielding the lines in a 306 stream. 307 308 IOBase also supports the :keyword:`with` statement. In this example, 309 fp is closed after the suite of the with statement is complete: 310 311 with open('spam.txt', 'r') as fp: 312 fp.write('Spam and eggs!') 313 """ 314 315 ### Internal ### 316 317 def _unsupported(self, name): 318 """Internal: raise an OSError exception for unsupported operations.""" 319 raise UnsupportedOperation("%s.%s() not supported" % 320 (self.__class__.__name__, name)) 321 322 ### Positioning ### 323 324 def seek(self, pos, whence=0): 325 """Change stream position. 326 327 Change the stream position to byte offset pos. Argument pos is 328 interpreted relative to the position indicated by whence. Values 329 for whence are ints: 330 331 * 0 -- start of stream (the default); offset should be zero or positive 332 * 1 -- current stream position; offset may be negative 333 * 2 -- end of stream; offset is usually negative 334 Some operating systems / file systems could provide additional values. 335 336 Return an int indicating the new absolute position. 337 """ 338 self._unsupported("seek") 339 340 def tell(self): 341 """Return an int indicating the current stream position.""" 342 return self.seek(0, 1) 343 344 def truncate(self, pos=None): 345 """Truncate file to size bytes. 346 347 Size defaults to the current IO position as reported by tell(). Return 348 the new size. 349 """ 350 self._unsupported("truncate") 351 352 ### Flush and close ### 353 354 def flush(self): 355 """Flush write buffers, if applicable. 356 357 This is not implemented for read-only and non-blocking streams. 358 """ 359 self._checkClosed() 360 # XXX Should this return the number of bytes written??? 361 362 __closed = False 363 364 def close(self): 365 """Flush and close the IO object. 366 367 This method has no effect if the file is already closed. 368 """ 369 if not self.__closed: 370 try: 371 self.flush() 372 finally: 373 self.__closed = True 374 375 def __del__(self): 376 """Destructor. Calls close().""" 377 # The try/except block is in case this is called at program 378 # exit time, when it's possible that globals have already been 379 # deleted, and then the close() call might fail. Since 380 # there's nothing we can do about such failures and they annoy 381 # the end users, we suppress the traceback. 382 try: 383 self.close() 384 except: 385 pass 386 387 ### Inquiries ### 388 389 def seekable(self): 390 """Return a bool indicating whether object supports random access. 391 392 If False, seek(), tell() and truncate() will raise OSError. 393 This method may need to do a test seek(). 394 """ 395 return False 396 397 def _checkSeekable(self, msg=None): 398 """Internal: raise UnsupportedOperation if file is not seekable 399 """ 400 if not self.seekable(): 401 raise UnsupportedOperation("File or stream is not seekable." 402 if msg is None else msg) 403 404 def readable(self): 405 """Return a bool indicating whether object was opened for reading. 406 407 If False, read() will raise OSError. 408 """ 409 return False 410 411 def _checkReadable(self, msg=None): 412 """Internal: raise UnsupportedOperation if file is not readable 413 """ 414 if not self.readable(): 415 raise UnsupportedOperation("File or stream is not readable." 416 if msg is None else msg) 417 418 def writable(self): 419 """Return a bool indicating whether object was opened for writing. 420 421 If False, write() and truncate() will raise OSError. 422 """ 423 return False 424 425 def _checkWritable(self, msg=None): 426 """Internal: raise UnsupportedOperation if file is not writable 427 """ 428 if not self.writable(): 429 raise UnsupportedOperation("File or stream is not writable." 430 if msg is None else msg) 431 432 @property 433 def closed(self): 434 """closed: bool. True iff the file has been closed. 435 436 For backwards compatibility, this is a property, not a predicate. 437 """ 438 return self.__closed 439 440 def _checkClosed(self, msg=None): 441 """Internal: raise a ValueError if file is closed 442 """ 443 if self.closed: 444 raise ValueError("I/O operation on closed file." 445 if msg is None else msg) 446 447 ### Context manager ### 448 449 def __enter__(self): # That's a forward reference 450 """Context management protocol. Returns self (an instance of IOBase).""" 451 self._checkClosed() 452 return self 453 454 def __exit__(self, *args): 455 """Context management protocol. Calls close()""" 456 self.close() 457 458 ### Lower-level APIs ### 459 460 # XXX Should these be present even if unimplemented? 461 462 def fileno(self): 463 """Returns underlying file descriptor (an int) if one exists. 464 465 An OSError is raised if the IO object does not use a file descriptor. 466 """ 467 self._unsupported("fileno") 468 469 def isatty(self): 470 """Return a bool indicating whether this is an 'interactive' stream. 471 472 Return False if it can't be determined. 473 """ 474 self._checkClosed() 475 return False 476 477 ### Readline[s] and writelines ### 478 479 def readline(self, size=-1): 480 r"""Read and return a line of bytes from the stream. 481 482 If size is specified, at most size bytes will be read. 483 Size should be an int. 484 485 The line terminator is always b'\n' for binary files; for text 486 files, the newlines argument to open can be used to select the line 487 terminator(s) recognized. 488 """ 489 # For backwards compatibility, a (slowish) readline(). 490 if hasattr(self, "peek"): 491 def nreadahead(): 492 readahead = self.peek(1) 493 if not readahead: 494 return 1 495 n = (readahead.find(b"\n") + 1) or len(readahead) 496 if size >= 0: 497 n = min(n, size) 498 return n 499 else: 500 def nreadahead(): 501 return 1 502 if size is None: 503 size = -1 504 else: 505 try: 506 size_index = size.__index__ 507 except AttributeError: 508 raise TypeError(f"{size!r} is not an integer") 509 else: 510 size = size_index() 511 res = bytearray() 512 while size < 0 or len(res) < size: 513 b = self.read(nreadahead()) 514 if not b: 515 break 516 res += b 517 if res.endswith(b"\n"): 518 break 519 return bytes(res) 520 521 def __iter__(self): 522 self._checkClosed() 523 return self 524 525 def __next__(self): 526 line = self.readline() 527 if not line: 528 raise StopIteration 529 return line 530 531 def readlines(self, hint=None): 532 """Return a list of lines from the stream. 533 534 hint can be specified to control the number of lines read: no more 535 lines will be read if the total size (in bytes/characters) of all 536 lines so far exceeds hint. 537 """ 538 if hint is None or hint <= 0: 539 return list(self) 540 n = 0 541 lines = [] 542 for line in self: 543 lines.append(line) 544 n += len(line) 545 if n >= hint: 546 break 547 return lines 548 549 def writelines(self, lines): 550 self._checkClosed() 551 for line in lines: 552 self.write(line) 553 554io.IOBase.register(IOBase) 555 556 557class RawIOBase(IOBase): 558 559 """Base class for raw binary I/O.""" 560 561 # The read() method is implemented by calling readinto(); derived 562 # classes that want to support read() only need to implement 563 # readinto() as a primitive operation. In general, readinto() can be 564 # more efficient than read(). 565 566 # (It would be tempting to also provide an implementation of 567 # readinto() in terms of read(), in case the latter is a more suitable 568 # primitive operation, but that would lead to nasty recursion in case 569 # a subclass doesn't implement either.) 570 571 def read(self, size=-1): 572 """Read and return up to size bytes, where size is an int. 573 574 Returns an empty bytes object on EOF, or None if the object is 575 set not to block and has no data to read. 576 """ 577 if size is None: 578 size = -1 579 if size < 0: 580 return self.readall() 581 b = bytearray(size.__index__()) 582 n = self.readinto(b) 583 if n is None: 584 return None 585 del b[n:] 586 return bytes(b) 587 588 def readall(self): 589 """Read until EOF, using multiple read() call.""" 590 res = bytearray() 591 while True: 592 data = self.read(DEFAULT_BUFFER_SIZE) 593 if not data: 594 break 595 res += data 596 if res: 597 return bytes(res) 598 else: 599 # b'' or None 600 return data 601 602 def readinto(self, b): 603 """Read bytes into a pre-allocated bytes-like object b. 604 605 Returns an int representing the number of bytes read (0 for EOF), or 606 None if the object is set not to block and has no data to read. 607 """ 608 self._unsupported("readinto") 609 610 def write(self, b): 611 """Write the given buffer to the IO stream. 612 613 Returns the number of bytes written, which may be less than the 614 length of b in bytes. 615 """ 616 self._unsupported("write") 617 618io.RawIOBase.register(RawIOBase) 619from _io import FileIO 620RawIOBase.register(FileIO) 621 622 623class BufferedIOBase(IOBase): 624 625 """Base class for buffered IO objects. 626 627 The main difference with RawIOBase is that the read() method 628 supports omitting the size argument, and does not have a default 629 implementation that defers to readinto(). 630 631 In addition, read(), readinto() and write() may raise 632 BlockingIOError if the underlying raw stream is in non-blocking 633 mode and not ready; unlike their raw counterparts, they will never 634 return None. 635 636 A typical implementation should not inherit from a RawIOBase 637 implementation, but wrap one. 638 """ 639 640 def read(self, size=-1): 641 """Read and return up to size bytes, where size is an int. 642 643 If the argument is omitted, None, or negative, reads and 644 returns all data until EOF. 645 646 If the argument is positive, and the underlying raw stream is 647 not 'interactive', multiple raw reads may be issued to satisfy 648 the byte count (unless EOF is reached first). But for 649 interactive raw streams (XXX and for pipes?), at most one raw 650 read will be issued, and a short result does not imply that 651 EOF is imminent. 652 653 Returns an empty bytes array on EOF. 654 655 Raises BlockingIOError if the underlying raw stream has no 656 data at the moment. 657 """ 658 self._unsupported("read") 659 660 def read1(self, size=-1): 661 """Read up to size bytes with at most one read() system call, 662 where size is an int. 663 """ 664 self._unsupported("read1") 665 666 def readinto(self, b): 667 """Read bytes into a pre-allocated bytes-like object b. 668 669 Like read(), this may issue multiple reads to the underlying raw 670 stream, unless the latter is 'interactive'. 671 672 Returns an int representing the number of bytes read (0 for EOF). 673 674 Raises BlockingIOError if the underlying raw stream has no 675 data at the moment. 676 """ 677 678 return self._readinto(b, read1=False) 679 680 def readinto1(self, b): 681 """Read bytes into buffer *b*, using at most one system call 682 683 Returns an int representing the number of bytes read (0 for EOF). 684 685 Raises BlockingIOError if the underlying raw stream has no 686 data at the moment. 687 """ 688 689 return self._readinto(b, read1=True) 690 691 def _readinto(self, b, read1): 692 if not isinstance(b, memoryview): 693 b = memoryview(b) 694 b = b.cast('B') 695 696 if read1: 697 data = self.read1(len(b)) 698 else: 699 data = self.read(len(b)) 700 n = len(data) 701 702 b[:n] = data 703 704 return n 705 706 def write(self, b): 707 """Write the given bytes buffer to the IO stream. 708 709 Return the number of bytes written, which is always the length of b 710 in bytes. 711 712 Raises BlockingIOError if the buffer is full and the 713 underlying raw stream cannot accept more data at the moment. 714 """ 715 self._unsupported("write") 716 717 def detach(self): 718 """ 719 Separate the underlying raw stream from the buffer and return it. 720 721 After the raw stream has been detached, the buffer is in an unusable 722 state. 723 """ 724 self._unsupported("detach") 725 726io.BufferedIOBase.register(BufferedIOBase) 727 728 729class _BufferedIOMixin(BufferedIOBase): 730 731 """A mixin implementation of BufferedIOBase with an underlying raw stream. 732 733 This passes most requests on to the underlying raw stream. It 734 does *not* provide implementations of read(), readinto() or 735 write(). 736 """ 737 738 def __init__(self, raw): 739 self._raw = raw 740 741 ### Positioning ### 742 743 def seek(self, pos, whence=0): 744 new_position = self.raw.seek(pos, whence) 745 if new_position < 0: 746 raise OSError("seek() returned an invalid position") 747 return new_position 748 749 def tell(self): 750 pos = self.raw.tell() 751 if pos < 0: 752 raise OSError("tell() returned an invalid position") 753 return pos 754 755 def truncate(self, pos=None): 756 # Flush the stream. We're mixing buffered I/O with lower-level I/O, 757 # and a flush may be necessary to synch both views of the current 758 # file state. 759 self.flush() 760 761 if pos is None: 762 pos = self.tell() 763 # XXX: Should seek() be used, instead of passing the position 764 # XXX directly to truncate? 765 return self.raw.truncate(pos) 766 767 ### Flush and close ### 768 769 def flush(self): 770 if self.closed: 771 raise ValueError("flush on closed file") 772 self.raw.flush() 773 774 def close(self): 775 if self.raw is not None and not self.closed: 776 try: 777 # may raise BlockingIOError or BrokenPipeError etc 778 self.flush() 779 finally: 780 self.raw.close() 781 782 def detach(self): 783 if self.raw is None: 784 raise ValueError("raw stream already detached") 785 self.flush() 786 raw = self._raw 787 self._raw = None 788 return raw 789 790 ### Inquiries ### 791 792 def seekable(self): 793 return self.raw.seekable() 794 795 @property 796 def raw(self): 797 return self._raw 798 799 @property 800 def closed(self): 801 return self.raw.closed 802 803 @property 804 def name(self): 805 return self.raw.name 806 807 @property 808 def mode(self): 809 return self.raw.mode 810 811 def __getstate__(self): 812 raise TypeError("can not serialize a '{0}' object" 813 .format(self.__class__.__name__)) 814 815 def __repr__(self): 816 modname = self.__class__.__module__ 817 clsname = self.__class__.__qualname__ 818 try: 819 name = self.name 820 except Exception: 821 return "<{}.{}>".format(modname, clsname) 822 else: 823 return "<{}.{} name={!r}>".format(modname, clsname, name) 824 825 ### Lower-level APIs ### 826 827 def fileno(self): 828 return self.raw.fileno() 829 830 def isatty(self): 831 return self.raw.isatty() 832 833 834class BytesIO(BufferedIOBase): 835 836 """Buffered I/O implementation using an in-memory bytes buffer.""" 837 838 def __init__(self, initial_bytes=None): 839 buf = bytearray() 840 if initial_bytes is not None: 841 buf += initial_bytes 842 self._buffer = buf 843 self._pos = 0 844 845 def __getstate__(self): 846 if self.closed: 847 raise ValueError("__getstate__ on closed file") 848 return self.__dict__.copy() 849 850 def getvalue(self): 851 """Return the bytes value (contents) of the buffer 852 """ 853 if self.closed: 854 raise ValueError("getvalue on closed file") 855 return bytes(self._buffer) 856 857 def getbuffer(self): 858 """Return a readable and writable view of the buffer. 859 """ 860 if self.closed: 861 raise ValueError("getbuffer on closed file") 862 return memoryview(self._buffer) 863 864 def close(self): 865 self._buffer.clear() 866 super().close() 867 868 def read(self, size=-1): 869 if self.closed: 870 raise ValueError("read from closed file") 871 if size is None: 872 size = -1 873 else: 874 try: 875 size_index = size.__index__ 876 except AttributeError: 877 raise TypeError(f"{size!r} is not an integer") 878 else: 879 size = size_index() 880 if size < 0: 881 size = len(self._buffer) 882 if len(self._buffer) <= self._pos: 883 return b"" 884 newpos = min(len(self._buffer), self._pos + size) 885 b = self._buffer[self._pos : newpos] 886 self._pos = newpos 887 return bytes(b) 888 889 def read1(self, size=-1): 890 """This is the same as read. 891 """ 892 return self.read(size) 893 894 def write(self, b): 895 if self.closed: 896 raise ValueError("write to closed file") 897 if isinstance(b, str): 898 raise TypeError("can't write str to binary stream") 899 with memoryview(b) as view: 900 n = view.nbytes # Size of any bytes-like object 901 if n == 0: 902 return 0 903 pos = self._pos 904 if pos > len(self._buffer): 905 # Inserts null bytes between the current end of the file 906 # and the new write position. 907 padding = b'\x00' * (pos - len(self._buffer)) 908 self._buffer += padding 909 self._buffer[pos:pos + n] = b 910 self._pos += n 911 return n 912 913 def seek(self, pos, whence=0): 914 if self.closed: 915 raise ValueError("seek on closed file") 916 try: 917 pos_index = pos.__index__ 918 except AttributeError: 919 raise TypeError(f"{pos!r} is not an integer") 920 else: 921 pos = pos_index() 922 if whence == 0: 923 if pos < 0: 924 raise ValueError("negative seek position %r" % (pos,)) 925 self._pos = pos 926 elif whence == 1: 927 self._pos = max(0, self._pos + pos) 928 elif whence == 2: 929 self._pos = max(0, len(self._buffer) + pos) 930 else: 931 raise ValueError("unsupported whence value") 932 return self._pos 933 934 def tell(self): 935 if self.closed: 936 raise ValueError("tell on closed file") 937 return self._pos 938 939 def truncate(self, pos=None): 940 if self.closed: 941 raise ValueError("truncate on closed file") 942 if pos is None: 943 pos = self._pos 944 else: 945 try: 946 pos_index = pos.__index__ 947 except AttributeError: 948 raise TypeError(f"{pos!r} is not an integer") 949 else: 950 pos = pos_index() 951 if pos < 0: 952 raise ValueError("negative truncate position %r" % (pos,)) 953 del self._buffer[pos:] 954 return pos 955 956 def readable(self): 957 if self.closed: 958 raise ValueError("I/O operation on closed file.") 959 return True 960 961 def writable(self): 962 if self.closed: 963 raise ValueError("I/O operation on closed file.") 964 return True 965 966 def seekable(self): 967 if self.closed: 968 raise ValueError("I/O operation on closed file.") 969 return True 970 971 972class BufferedReader(_BufferedIOMixin): 973 974 """BufferedReader(raw[, buffer_size]) 975 976 A buffer for a readable, sequential BaseRawIO object. 977 978 The constructor creates a BufferedReader for the given readable raw 979 stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE 980 is used. 981 """ 982 983 def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): 984 """Create a new buffered reader using the given readable raw IO object. 985 """ 986 if not raw.readable(): 987 raise OSError('"raw" argument must be readable.') 988 989 _BufferedIOMixin.__init__(self, raw) 990 if buffer_size <= 0: 991 raise ValueError("invalid buffer size") 992 self.buffer_size = buffer_size 993 self._reset_read_buf() 994 self._read_lock = Lock() 995 996 def readable(self): 997 return self.raw.readable() 998 999 def _reset_read_buf(self): 1000 self._read_buf = b"" 1001 self._read_pos = 0 1002 1003 def read(self, size=None): 1004 """Read size bytes. 1005 1006 Returns exactly size bytes of data unless the underlying raw IO 1007 stream reaches EOF or if the call would block in non-blocking 1008 mode. If size is negative, read until EOF or until read() would 1009 block. 1010 """ 1011 if size is not None and size < -1: 1012 raise ValueError("invalid number of bytes to read") 1013 with self._read_lock: 1014 return self._read_unlocked(size) 1015 1016 def _read_unlocked(self, n=None): 1017 nodata_val = b"" 1018 empty_values = (b"", None) 1019 buf = self._read_buf 1020 pos = self._read_pos 1021 1022 # Special case for when the number of bytes to read is unspecified. 1023 if n is None or n == -1: 1024 self._reset_read_buf() 1025 if hasattr(self.raw, 'readall'): 1026 chunk = self.raw.readall() 1027 if chunk is None: 1028 return buf[pos:] or None 1029 else: 1030 return buf[pos:] + chunk 1031 chunks = [buf[pos:]] # Strip the consumed bytes. 1032 current_size = 0 1033 while True: 1034 # Read until EOF or until read() would block. 1035 chunk = self.raw.read() 1036 if chunk in empty_values: 1037 nodata_val = chunk 1038 break 1039 current_size += len(chunk) 1040 chunks.append(chunk) 1041 return b"".join(chunks) or nodata_val 1042 1043 # The number of bytes to read is specified, return at most n bytes. 1044 avail = len(buf) - pos # Length of the available buffered data. 1045 if n <= avail: 1046 # Fast path: the data to read is fully buffered. 1047 self._read_pos += n 1048 return buf[pos:pos+n] 1049 # Slow path: read from the stream until enough bytes are read, 1050 # or until an EOF occurs or until read() would block. 1051 chunks = [buf[pos:]] 1052 wanted = max(self.buffer_size, n) 1053 while avail < n: 1054 chunk = self.raw.read(wanted) 1055 if chunk in empty_values: 1056 nodata_val = chunk 1057 break 1058 avail += len(chunk) 1059 chunks.append(chunk) 1060 # n is more than avail only when an EOF occurred or when 1061 # read() would have blocked. 1062 n = min(n, avail) 1063 out = b"".join(chunks) 1064 self._read_buf = out[n:] # Save the extra data in the buffer. 1065 self._read_pos = 0 1066 return out[:n] if out else nodata_val 1067 1068 def peek(self, size=0): 1069 """Returns buffered bytes without advancing the position. 1070 1071 The argument indicates a desired minimal number of bytes; we 1072 do at most one raw read to satisfy it. We never return more 1073 than self.buffer_size. 1074 """ 1075 with self._read_lock: 1076 return self._peek_unlocked(size) 1077 1078 def _peek_unlocked(self, n=0): 1079 want = min(n, self.buffer_size) 1080 have = len(self._read_buf) - self._read_pos 1081 if have < want or have <= 0: 1082 to_read = self.buffer_size - have 1083 current = self.raw.read(to_read) 1084 if current: 1085 self._read_buf = self._read_buf[self._read_pos:] + current 1086 self._read_pos = 0 1087 return self._read_buf[self._read_pos:] 1088 1089 def read1(self, size=-1): 1090 """Reads up to size bytes, with at most one read() system call.""" 1091 # Returns up to size bytes. If at least one byte is buffered, we 1092 # only return buffered bytes. Otherwise, we do one raw read. 1093 if size < 0: 1094 size = self.buffer_size 1095 if size == 0: 1096 return b"" 1097 with self._read_lock: 1098 self._peek_unlocked(1) 1099 return self._read_unlocked( 1100 min(size, len(self._read_buf) - self._read_pos)) 1101 1102 # Implementing readinto() and readinto1() is not strictly necessary (we 1103 # could rely on the base class that provides an implementation in terms of 1104 # read() and read1()). We do it anyway to keep the _pyio implementation 1105 # similar to the io implementation (which implements the methods for 1106 # performance reasons). 1107 def _readinto(self, buf, read1): 1108 """Read data into *buf* with at most one system call.""" 1109 1110 # Need to create a memoryview object of type 'b', otherwise 1111 # we may not be able to assign bytes to it, and slicing it 1112 # would create a new object. 1113 if not isinstance(buf, memoryview): 1114 buf = memoryview(buf) 1115 if buf.nbytes == 0: 1116 return 0 1117 buf = buf.cast('B') 1118 1119 written = 0 1120 with self._read_lock: 1121 while written < len(buf): 1122 1123 # First try to read from internal buffer 1124 avail = min(len(self._read_buf) - self._read_pos, len(buf)) 1125 if avail: 1126 buf[written:written+avail] = \ 1127 self._read_buf[self._read_pos:self._read_pos+avail] 1128 self._read_pos += avail 1129 written += avail 1130 if written == len(buf): 1131 break 1132 1133 # If remaining space in callers buffer is larger than 1134 # internal buffer, read directly into callers buffer 1135 if len(buf) - written > self.buffer_size: 1136 n = self.raw.readinto(buf[written:]) 1137 if not n: 1138 break # eof 1139 written += n 1140 1141 # Otherwise refill internal buffer - unless we're 1142 # in read1 mode and already got some data 1143 elif not (read1 and written): 1144 if not self._peek_unlocked(1): 1145 break # eof 1146 1147 # In readinto1 mode, return as soon as we have some data 1148 if read1 and written: 1149 break 1150 1151 return written 1152 1153 def tell(self): 1154 return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos 1155 1156 def seek(self, pos, whence=0): 1157 if whence not in valid_seek_flags: 1158 raise ValueError("invalid whence value") 1159 with self._read_lock: 1160 if whence == 1: 1161 pos -= len(self._read_buf) - self._read_pos 1162 pos = _BufferedIOMixin.seek(self, pos, whence) 1163 self._reset_read_buf() 1164 return pos 1165 1166class BufferedWriter(_BufferedIOMixin): 1167 1168 """A buffer for a writeable sequential RawIO object. 1169 1170 The constructor creates a BufferedWriter for the given writeable raw 1171 stream. If the buffer_size is not given, it defaults to 1172 DEFAULT_BUFFER_SIZE. 1173 """ 1174 1175 def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): 1176 if not raw.writable(): 1177 raise OSError('"raw" argument must be writable.') 1178 1179 _BufferedIOMixin.__init__(self, raw) 1180 if buffer_size <= 0: 1181 raise ValueError("invalid buffer size") 1182 self.buffer_size = buffer_size 1183 self._write_buf = bytearray() 1184 self._write_lock = Lock() 1185 1186 def writable(self): 1187 return self.raw.writable() 1188 1189 def write(self, b): 1190 if isinstance(b, str): 1191 raise TypeError("can't write str to binary stream") 1192 with self._write_lock: 1193 if self.closed: 1194 raise ValueError("write to closed file") 1195 # XXX we can implement some more tricks to try and avoid 1196 # partial writes 1197 if len(self._write_buf) > self.buffer_size: 1198 # We're full, so let's pre-flush the buffer. (This may 1199 # raise BlockingIOError with characters_written == 0.) 1200 self._flush_unlocked() 1201 before = len(self._write_buf) 1202 self._write_buf.extend(b) 1203 written = len(self._write_buf) - before 1204 if len(self._write_buf) > self.buffer_size: 1205 try: 1206 self._flush_unlocked() 1207 except BlockingIOError as e: 1208 if len(self._write_buf) > self.buffer_size: 1209 # We've hit the buffer_size. We have to accept a partial 1210 # write and cut back our buffer. 1211 overage = len(self._write_buf) - self.buffer_size 1212 written -= overage 1213 self._write_buf = self._write_buf[:self.buffer_size] 1214 raise BlockingIOError(e.errno, e.strerror, written) 1215 return written 1216 1217 def truncate(self, pos=None): 1218 with self._write_lock: 1219 self._flush_unlocked() 1220 if pos is None: 1221 pos = self.raw.tell() 1222 return self.raw.truncate(pos) 1223 1224 def flush(self): 1225 with self._write_lock: 1226 self._flush_unlocked() 1227 1228 def _flush_unlocked(self): 1229 if self.closed: 1230 raise ValueError("flush on closed file") 1231 while self._write_buf: 1232 try: 1233 n = self.raw.write(self._write_buf) 1234 except BlockingIOError: 1235 raise RuntimeError("self.raw should implement RawIOBase: it " 1236 "should not raise BlockingIOError") 1237 if n is None: 1238 raise BlockingIOError( 1239 errno.EAGAIN, 1240 "write could not complete without blocking", 0) 1241 if n > len(self._write_buf) or n < 0: 1242 raise OSError("write() returned incorrect number of bytes") 1243 del self._write_buf[:n] 1244 1245 def tell(self): 1246 return _BufferedIOMixin.tell(self) + len(self._write_buf) 1247 1248 def seek(self, pos, whence=0): 1249 if whence not in valid_seek_flags: 1250 raise ValueError("invalid whence value") 1251 with self._write_lock: 1252 self._flush_unlocked() 1253 return _BufferedIOMixin.seek(self, pos, whence) 1254 1255 def close(self): 1256 with self._write_lock: 1257 if self.raw is None or self.closed: 1258 return 1259 # We have to release the lock and call self.flush() (which will 1260 # probably just re-take the lock) in case flush has been overridden in 1261 # a subclass or the user set self.flush to something. This is the same 1262 # behavior as the C implementation. 1263 try: 1264 # may raise BlockingIOError or BrokenPipeError etc 1265 self.flush() 1266 finally: 1267 with self._write_lock: 1268 self.raw.close() 1269 1270 1271class BufferedRWPair(BufferedIOBase): 1272 1273 """A buffered reader and writer object together. 1274 1275 A buffered reader object and buffered writer object put together to 1276 form a sequential IO object that can read and write. This is typically 1277 used with a socket or two-way pipe. 1278 1279 reader and writer are RawIOBase objects that are readable and 1280 writeable respectively. If the buffer_size is omitted it defaults to 1281 DEFAULT_BUFFER_SIZE. 1282 """ 1283 1284 # XXX The usefulness of this (compared to having two separate IO 1285 # objects) is questionable. 1286 1287 def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE): 1288 """Constructor. 1289 1290 The arguments are two RawIO instances. 1291 """ 1292 if not reader.readable(): 1293 raise OSError('"reader" argument must be readable.') 1294 1295 if not writer.writable(): 1296 raise OSError('"writer" argument must be writable.') 1297 1298 self.reader = BufferedReader(reader, buffer_size) 1299 self.writer = BufferedWriter(writer, buffer_size) 1300 1301 def read(self, size=-1): 1302 if size is None: 1303 size = -1 1304 return self.reader.read(size) 1305 1306 def readinto(self, b): 1307 return self.reader.readinto(b) 1308 1309 def write(self, b): 1310 return self.writer.write(b) 1311 1312 def peek(self, size=0): 1313 return self.reader.peek(size) 1314 1315 def read1(self, size=-1): 1316 return self.reader.read1(size) 1317 1318 def readinto1(self, b): 1319 return self.reader.readinto1(b) 1320 1321 def readable(self): 1322 return self.reader.readable() 1323 1324 def writable(self): 1325 return self.writer.writable() 1326 1327 def flush(self): 1328 return self.writer.flush() 1329 1330 def close(self): 1331 try: 1332 self.writer.close() 1333 finally: 1334 self.reader.close() 1335 1336 def isatty(self): 1337 return self.reader.isatty() or self.writer.isatty() 1338 1339 @property 1340 def closed(self): 1341 return self.writer.closed 1342 1343 1344class BufferedRandom(BufferedWriter, BufferedReader): 1345 1346 """A buffered interface to random access streams. 1347 1348 The constructor creates a reader and writer for a seekable stream, 1349 raw, given in the first argument. If the buffer_size is omitted it 1350 defaults to DEFAULT_BUFFER_SIZE. 1351 """ 1352 1353 def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): 1354 raw._checkSeekable() 1355 BufferedReader.__init__(self, raw, buffer_size) 1356 BufferedWriter.__init__(self, raw, buffer_size) 1357 1358 def seek(self, pos, whence=0): 1359 if whence not in valid_seek_flags: 1360 raise ValueError("invalid whence value") 1361 self.flush() 1362 if self._read_buf: 1363 # Undo read ahead. 1364 with self._read_lock: 1365 self.raw.seek(self._read_pos - len(self._read_buf), 1) 1366 # First do the raw seek, then empty the read buffer, so that 1367 # if the raw seek fails, we don't lose buffered data forever. 1368 pos = self.raw.seek(pos, whence) 1369 with self._read_lock: 1370 self._reset_read_buf() 1371 if pos < 0: 1372 raise OSError("seek() returned invalid position") 1373 return pos 1374 1375 def tell(self): 1376 if self._write_buf: 1377 return BufferedWriter.tell(self) 1378 else: 1379 return BufferedReader.tell(self) 1380 1381 def truncate(self, pos=None): 1382 if pos is None: 1383 pos = self.tell() 1384 # Use seek to flush the read buffer. 1385 return BufferedWriter.truncate(self, pos) 1386 1387 def read(self, size=None): 1388 if size is None: 1389 size = -1 1390 self.flush() 1391 return BufferedReader.read(self, size) 1392 1393 def readinto(self, b): 1394 self.flush() 1395 return BufferedReader.readinto(self, b) 1396 1397 def peek(self, size=0): 1398 self.flush() 1399 return BufferedReader.peek(self, size) 1400 1401 def read1(self, size=-1): 1402 self.flush() 1403 return BufferedReader.read1(self, size) 1404 1405 def readinto1(self, b): 1406 self.flush() 1407 return BufferedReader.readinto1(self, b) 1408 1409 def write(self, b): 1410 if self._read_buf: 1411 # Undo readahead 1412 with self._read_lock: 1413 self.raw.seek(self._read_pos - len(self._read_buf), 1) 1414 self._reset_read_buf() 1415 return BufferedWriter.write(self, b) 1416 1417 1418class FileIO(RawIOBase): 1419 _fd = -1 1420 _created = False 1421 _readable = False 1422 _writable = False 1423 _appending = False 1424 _seekable = None 1425 _closefd = True 1426 1427 def __init__(self, file, mode='r', closefd=True, opener=None): 1428 """Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading, 1429 writing, exclusive creation or appending. The file will be created if it 1430 doesn't exist when opened for writing or appending; it will be truncated 1431 when opened for writing. A FileExistsError will be raised if it already 1432 exists when opened for creating. Opening a file for creating implies 1433 writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode 1434 to allow simultaneous reading and writing. A custom opener can be used by 1435 passing a callable as *opener*. The underlying file descriptor for the file 1436 object is then obtained by calling opener with (*name*, *flags*). 1437 *opener* must return an open file descriptor (passing os.open as *opener* 1438 results in functionality similar to passing None). 1439 """ 1440 if self._fd >= 0: 1441 # Have to close the existing file first. 1442 try: 1443 if self._closefd: 1444 os.close(self._fd) 1445 finally: 1446 self._fd = -1 1447 1448 if isinstance(file, float): 1449 raise TypeError('integer argument expected, got float') 1450 if isinstance(file, int): 1451 fd = file 1452 if fd < 0: 1453 raise ValueError('negative file descriptor') 1454 else: 1455 fd = -1 1456 1457 if not isinstance(mode, str): 1458 raise TypeError('invalid mode: %s' % (mode,)) 1459 if not set(mode) <= set('xrwab+'): 1460 raise ValueError('invalid mode: %s' % (mode,)) 1461 if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1: 1462 raise ValueError('Must have exactly one of create/read/write/append ' 1463 'mode and at most one plus') 1464 1465 if 'x' in mode: 1466 self._created = True 1467 self._writable = True 1468 flags = os.O_EXCL | os.O_CREAT 1469 elif 'r' in mode: 1470 self._readable = True 1471 flags = 0 1472 elif 'w' in mode: 1473 self._writable = True 1474 flags = os.O_CREAT | os.O_TRUNC 1475 elif 'a' in mode: 1476 self._writable = True 1477 self._appending = True 1478 flags = os.O_APPEND | os.O_CREAT 1479 1480 if '+' in mode: 1481 self._readable = True 1482 self._writable = True 1483 1484 if self._readable and self._writable: 1485 flags |= os.O_RDWR 1486 elif self._readable: 1487 flags |= os.O_RDONLY 1488 else: 1489 flags |= os.O_WRONLY 1490 1491 flags |= getattr(os, 'O_BINARY', 0) 1492 1493 noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or 1494 getattr(os, 'O_CLOEXEC', 0)) 1495 flags |= noinherit_flag 1496 1497 owned_fd = None 1498 try: 1499 if fd < 0: 1500 if not closefd: 1501 raise ValueError('Cannot use closefd=False with file name') 1502 if opener is None: 1503 fd = os.open(file, flags, 0o666) 1504 else: 1505 fd = opener(file, flags) 1506 if not isinstance(fd, int): 1507 raise TypeError('expected integer from opener') 1508 if fd < 0: 1509 raise OSError('Negative file descriptor') 1510 owned_fd = fd 1511 if not noinherit_flag: 1512 os.set_inheritable(fd, False) 1513 1514 self._closefd = closefd 1515 fdfstat = os.fstat(fd) 1516 try: 1517 if stat.S_ISDIR(fdfstat.st_mode): 1518 raise IsADirectoryError(errno.EISDIR, 1519 os.strerror(errno.EISDIR), file) 1520 except AttributeError: 1521 # Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR 1522 # don't exist. 1523 pass 1524 self._blksize = getattr(fdfstat, 'st_blksize', 0) 1525 if self._blksize <= 1: 1526 self._blksize = DEFAULT_BUFFER_SIZE 1527 1528 if _setmode: 1529 # don't translate newlines (\r\n <=> \n) 1530 _setmode(fd, os.O_BINARY) 1531 1532 self.name = file 1533 if self._appending: 1534 # For consistent behaviour, we explicitly seek to the 1535 # end of file (otherwise, it might be done only on the 1536 # first write()). 1537 os.lseek(fd, 0, SEEK_END) 1538 except: 1539 if owned_fd is not None: 1540 os.close(owned_fd) 1541 raise 1542 self._fd = fd 1543 1544 def __del__(self): 1545 if self._fd >= 0 and self._closefd and not self.closed: 1546 import warnings 1547 warnings.warn('unclosed file %r' % (self,), ResourceWarning, 1548 stacklevel=2, source=self) 1549 self.close() 1550 1551 def __getstate__(self): 1552 raise TypeError("cannot serialize '%s' object", self.__class__.__name__) 1553 1554 def __repr__(self): 1555 class_name = '%s.%s' % (self.__class__.__module__, 1556 self.__class__.__qualname__) 1557 if self.closed: 1558 return '<%s [closed]>' % class_name 1559 try: 1560 name = self.name 1561 except AttributeError: 1562 return ('<%s fd=%d mode=%r closefd=%r>' % 1563 (class_name, self._fd, self.mode, self._closefd)) 1564 else: 1565 return ('<%s name=%r mode=%r closefd=%r>' % 1566 (class_name, name, self.mode, self._closefd)) 1567 1568 def _checkReadable(self): 1569 if not self._readable: 1570 raise UnsupportedOperation('File not open for reading') 1571 1572 def _checkWritable(self, msg=None): 1573 if not self._writable: 1574 raise UnsupportedOperation('File not open for writing') 1575 1576 def read(self, size=None): 1577 """Read at most size bytes, returned as bytes. 1578 1579 Only makes one system call, so less data may be returned than requested 1580 In non-blocking mode, returns None if no data is available. 1581 Return an empty bytes object at EOF. 1582 """ 1583 self._checkClosed() 1584 self._checkReadable() 1585 if size is None or size < 0: 1586 return self.readall() 1587 try: 1588 return os.read(self._fd, size) 1589 except BlockingIOError: 1590 return None 1591 1592 def readall(self): 1593 """Read all data from the file, returned as bytes. 1594 1595 In non-blocking mode, returns as much as is immediately available, 1596 or None if no data is available. Return an empty bytes object at EOF. 1597 """ 1598 self._checkClosed() 1599 self._checkReadable() 1600 bufsize = DEFAULT_BUFFER_SIZE 1601 try: 1602 pos = os.lseek(self._fd, 0, SEEK_CUR) 1603 end = os.fstat(self._fd).st_size 1604 if end >= pos: 1605 bufsize = end - pos + 1 1606 except OSError: 1607 pass 1608 1609 result = bytearray() 1610 while True: 1611 if len(result) >= bufsize: 1612 bufsize = len(result) 1613 bufsize += max(bufsize, DEFAULT_BUFFER_SIZE) 1614 n = bufsize - len(result) 1615 try: 1616 chunk = os.read(self._fd, n) 1617 except BlockingIOError: 1618 if result: 1619 break 1620 return None 1621 if not chunk: # reached the end of the file 1622 break 1623 result += chunk 1624 1625 return bytes(result) 1626 1627 def readinto(self, b): 1628 """Same as RawIOBase.readinto().""" 1629 m = memoryview(b).cast('B') 1630 data = self.read(len(m)) 1631 n = len(data) 1632 m[:n] = data 1633 return n 1634 1635 def write(self, b): 1636 """Write bytes b to file, return number written. 1637 1638 Only makes one system call, so not all of the data may be written. 1639 The number of bytes actually written is returned. In non-blocking mode, 1640 returns None if the write would block. 1641 """ 1642 self._checkClosed() 1643 self._checkWritable() 1644 try: 1645 return os.write(self._fd, b) 1646 except BlockingIOError: 1647 return None 1648 1649 def seek(self, pos, whence=SEEK_SET): 1650 """Move to new file position. 1651 1652 Argument offset is a byte count. Optional argument whence defaults to 1653 SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values 1654 are SEEK_CUR or 1 (move relative to current position, positive or negative), 1655 and SEEK_END or 2 (move relative to end of file, usually negative, although 1656 many platforms allow seeking beyond the end of a file). 1657 1658 Note that not all file objects are seekable. 1659 """ 1660 if isinstance(pos, float): 1661 raise TypeError('an integer is required') 1662 self._checkClosed() 1663 return os.lseek(self._fd, pos, whence) 1664 1665 def tell(self): 1666 """tell() -> int. Current file position. 1667 1668 Can raise OSError for non seekable files.""" 1669 self._checkClosed() 1670 return os.lseek(self._fd, 0, SEEK_CUR) 1671 1672 def truncate(self, size=None): 1673 """Truncate the file to at most size bytes. 1674 1675 Size defaults to the current file position, as returned by tell(). 1676 The current file position is changed to the value of size. 1677 """ 1678 self._checkClosed() 1679 self._checkWritable() 1680 if size is None: 1681 size = self.tell() 1682 os.ftruncate(self._fd, size) 1683 return size 1684 1685 def close(self): 1686 """Close the file. 1687 1688 A closed file cannot be used for further I/O operations. close() may be 1689 called more than once without error. 1690 """ 1691 if not self.closed: 1692 try: 1693 if self._closefd: 1694 os.close(self._fd) 1695 finally: 1696 super().close() 1697 1698 def seekable(self): 1699 """True if file supports random-access.""" 1700 self._checkClosed() 1701 if self._seekable is None: 1702 try: 1703 self.tell() 1704 except OSError: 1705 self._seekable = False 1706 else: 1707 self._seekable = True 1708 return self._seekable 1709 1710 def readable(self): 1711 """True if file was opened in a read mode.""" 1712 self._checkClosed() 1713 return self._readable 1714 1715 def writable(self): 1716 """True if file was opened in a write mode.""" 1717 self._checkClosed() 1718 return self._writable 1719 1720 def fileno(self): 1721 """Return the underlying file descriptor (an integer).""" 1722 self._checkClosed() 1723 return self._fd 1724 1725 def isatty(self): 1726 """True if the file is connected to a TTY device.""" 1727 self._checkClosed() 1728 return os.isatty(self._fd) 1729 1730 @property 1731 def closefd(self): 1732 """True if the file descriptor will be closed by close().""" 1733 return self._closefd 1734 1735 @property 1736 def mode(self): 1737 """String giving the file mode""" 1738 if self._created: 1739 if self._readable: 1740 return 'xb+' 1741 else: 1742 return 'xb' 1743 elif self._appending: 1744 if self._readable: 1745 return 'ab+' 1746 else: 1747 return 'ab' 1748 elif self._readable: 1749 if self._writable: 1750 return 'rb+' 1751 else: 1752 return 'rb' 1753 else: 1754 return 'wb' 1755 1756 1757class TextIOBase(IOBase): 1758 1759 """Base class for text I/O. 1760 1761 This class provides a character and line based interface to stream 1762 I/O. There is no readinto method because Python's character strings 1763 are immutable. There is no public constructor. 1764 """ 1765 1766 def read(self, size=-1): 1767 """Read at most size characters from stream, where size is an int. 1768 1769 Read from underlying buffer until we have size characters or we hit EOF. 1770 If size is negative or omitted, read until EOF. 1771 1772 Returns a string. 1773 """ 1774 self._unsupported("read") 1775 1776 def write(self, s): 1777 """Write string s to stream and returning an int.""" 1778 self._unsupported("write") 1779 1780 def truncate(self, pos=None): 1781 """Truncate size to pos, where pos is an int.""" 1782 self._unsupported("truncate") 1783 1784 def readline(self): 1785 """Read until newline or EOF. 1786 1787 Returns an empty string if EOF is hit immediately. 1788 """ 1789 self._unsupported("readline") 1790 1791 def detach(self): 1792 """ 1793 Separate the underlying buffer from the TextIOBase and return it. 1794 1795 After the underlying buffer has been detached, the TextIO is in an 1796 unusable state. 1797 """ 1798 self._unsupported("detach") 1799 1800 @property 1801 def encoding(self): 1802 """Subclasses should override.""" 1803 return None 1804 1805 @property 1806 def newlines(self): 1807 """Line endings translated so far. 1808 1809 Only line endings translated during reading are considered. 1810 1811 Subclasses should override. 1812 """ 1813 return None 1814 1815 @property 1816 def errors(self): 1817 """Error setting of the decoder or encoder. 1818 1819 Subclasses should override.""" 1820 return None 1821 1822io.TextIOBase.register(TextIOBase) 1823 1824 1825class IncrementalNewlineDecoder(codecs.IncrementalDecoder): 1826 r"""Codec used when reading a file in universal newlines mode. It wraps 1827 another incremental decoder, translating \r\n and \r into \n. It also 1828 records the types of newlines encountered. When used with 1829 translate=False, it ensures that the newline sequence is returned in 1830 one piece. 1831 """ 1832 def __init__(self, decoder, translate, errors='strict'): 1833 codecs.IncrementalDecoder.__init__(self, errors=errors) 1834 self.translate = translate 1835 self.decoder = decoder 1836 self.seennl = 0 1837 self.pendingcr = False 1838 1839 def decode(self, input, final=False): 1840 # decode input (with the eventual \r from a previous pass) 1841 if self.decoder is None: 1842 output = input 1843 else: 1844 output = self.decoder.decode(input, final=final) 1845 if self.pendingcr and (output or final): 1846 output = "\r" + output 1847 self.pendingcr = False 1848 1849 # retain last \r even when not translating data: 1850 # then readline() is sure to get \r\n in one pass 1851 if output.endswith("\r") and not final: 1852 output = output[:-1] 1853 self.pendingcr = True 1854 1855 # Record which newlines are read 1856 crlf = output.count('\r\n') 1857 cr = output.count('\r') - crlf 1858 lf = output.count('\n') - crlf 1859 self.seennl |= (lf and self._LF) | (cr and self._CR) \ 1860 | (crlf and self._CRLF) 1861 1862 if self.translate: 1863 if crlf: 1864 output = output.replace("\r\n", "\n") 1865 if cr: 1866 output = output.replace("\r", "\n") 1867 1868 return output 1869 1870 def getstate(self): 1871 if self.decoder is None: 1872 buf = b"" 1873 flag = 0 1874 else: 1875 buf, flag = self.decoder.getstate() 1876 flag <<= 1 1877 if self.pendingcr: 1878 flag |= 1 1879 return buf, flag 1880 1881 def setstate(self, state): 1882 buf, flag = state 1883 self.pendingcr = bool(flag & 1) 1884 if self.decoder is not None: 1885 self.decoder.setstate((buf, flag >> 1)) 1886 1887 def reset(self): 1888 self.seennl = 0 1889 self.pendingcr = False 1890 if self.decoder is not None: 1891 self.decoder.reset() 1892 1893 _LF = 1 1894 _CR = 2 1895 _CRLF = 4 1896 1897 @property 1898 def newlines(self): 1899 return (None, 1900 "\n", 1901 "\r", 1902 ("\r", "\n"), 1903 "\r\n", 1904 ("\n", "\r\n"), 1905 ("\r", "\r\n"), 1906 ("\r", "\n", "\r\n") 1907 )[self.seennl] 1908 1909 1910class TextIOWrapper(TextIOBase): 1911 1912 r"""Character and line based layer over a BufferedIOBase object, buffer. 1913 1914 encoding gives the name of the encoding that the stream will be 1915 decoded or encoded with. It defaults to locale.getpreferredencoding(False). 1916 1917 errors determines the strictness of encoding and decoding (see the 1918 codecs.register) and defaults to "strict". 1919 1920 newline can be None, '', '\n', '\r', or '\r\n'. It controls the 1921 handling of line endings. If it is None, universal newlines is 1922 enabled. With this enabled, on input, the lines endings '\n', '\r', 1923 or '\r\n' are translated to '\n' before being returned to the 1924 caller. Conversely, on output, '\n' is translated to the system 1925 default line separator, os.linesep. If newline is any other of its 1926 legal values, that newline becomes the newline when the file is read 1927 and it is returned untranslated. On output, '\n' is converted to the 1928 newline. 1929 1930 If line_buffering is True, a call to flush is implied when a call to 1931 write contains a newline character. 1932 """ 1933 1934 _CHUNK_SIZE = 2048 1935 1936 # The write_through argument has no effect here since this 1937 # implementation always writes through. The argument is present only 1938 # so that the signature can match the signature of the C version. 1939 def __init__(self, buffer, encoding=None, errors=None, newline=None, 1940 line_buffering=False, write_through=False): 1941 self._check_newline(newline) 1942 if encoding is None: 1943 try: 1944 encoding = os.device_encoding(buffer.fileno()) 1945 except (AttributeError, UnsupportedOperation): 1946 pass 1947 if encoding is None: 1948 try: 1949 import locale 1950 except ImportError: 1951 # Importing locale may fail if Python is being built 1952 encoding = "ascii" 1953 else: 1954 encoding = locale.getpreferredencoding(False) 1955 1956 if not isinstance(encoding, str): 1957 raise ValueError("invalid encoding: %r" % encoding) 1958 1959 if not codecs.lookup(encoding)._is_text_encoding: 1960 msg = ("%r is not a text encoding; " 1961 "use codecs.open() to handle arbitrary codecs") 1962 raise LookupError(msg % encoding) 1963 1964 if errors is None: 1965 errors = "strict" 1966 else: 1967 if not isinstance(errors, str): 1968 raise ValueError("invalid errors: %r" % errors) 1969 1970 self._buffer = buffer 1971 self._decoded_chars = '' # buffer for text returned from decoder 1972 self._decoded_chars_used = 0 # offset into _decoded_chars for read() 1973 self._snapshot = None # info for reconstructing decoder state 1974 self._seekable = self._telling = self.buffer.seekable() 1975 self._has_read1 = hasattr(self.buffer, 'read1') 1976 self._configure(encoding, errors, newline, 1977 line_buffering, write_through) 1978 1979 def _check_newline(self, newline): 1980 if newline is not None and not isinstance(newline, str): 1981 raise TypeError("illegal newline type: %r" % (type(newline),)) 1982 if newline not in (None, "", "\n", "\r", "\r\n"): 1983 raise ValueError("illegal newline value: %r" % (newline,)) 1984 1985 def _configure(self, encoding=None, errors=None, newline=None, 1986 line_buffering=False, write_through=False): 1987 self._encoding = encoding 1988 self._errors = errors 1989 self._encoder = None 1990 self._decoder = None 1991 self._b2cratio = 0.0 1992 1993 self._readuniversal = not newline 1994 self._readtranslate = newline is None 1995 self._readnl = newline 1996 self._writetranslate = newline != '' 1997 self._writenl = newline or os.linesep 1998 1999 self._line_buffering = line_buffering 2000 self._write_through = write_through 2001 2002 # don't write a BOM in the middle of a file 2003 if self._seekable and self.writable(): 2004 position = self.buffer.tell() 2005 if position != 0: 2006 try: 2007 self._get_encoder().setstate(0) 2008 except LookupError: 2009 # Sometimes the encoder doesn't exist 2010 pass 2011 2012 # self._snapshot is either None, or a tuple (dec_flags, next_input) 2013 # where dec_flags is the second (integer) item of the decoder state 2014 # and next_input is the chunk of input bytes that comes next after the 2015 # snapshot point. We use this to reconstruct decoder states in tell(). 2016 2017 # Naming convention: 2018 # - "bytes_..." for integer variables that count input bytes 2019 # - "chars_..." for integer variables that count decoded characters 2020 2021 def __repr__(self): 2022 result = "<{}.{}".format(self.__class__.__module__, 2023 self.__class__.__qualname__) 2024 try: 2025 name = self.name 2026 except Exception: 2027 pass 2028 else: 2029 result += " name={0!r}".format(name) 2030 try: 2031 mode = self.mode 2032 except Exception: 2033 pass 2034 else: 2035 result += " mode={0!r}".format(mode) 2036 return result + " encoding={0!r}>".format(self.encoding) 2037 2038 @property 2039 def encoding(self): 2040 return self._encoding 2041 2042 @property 2043 def errors(self): 2044 return self._errors 2045 2046 @property 2047 def line_buffering(self): 2048 return self._line_buffering 2049 2050 @property 2051 def write_through(self): 2052 return self._write_through 2053 2054 @property 2055 def buffer(self): 2056 return self._buffer 2057 2058 def reconfigure(self, *, 2059 encoding=None, errors=None, newline=Ellipsis, 2060 line_buffering=None, write_through=None): 2061 """Reconfigure the text stream with new parameters. 2062 2063 This also flushes the stream. 2064 """ 2065 if (self._decoder is not None 2066 and (encoding is not None or errors is not None 2067 or newline is not Ellipsis)): 2068 raise UnsupportedOperation( 2069 "It is not possible to set the encoding or newline of stream " 2070 "after the first read") 2071 2072 if errors is None: 2073 if encoding is None: 2074 errors = self._errors 2075 else: 2076 errors = 'strict' 2077 elif not isinstance(errors, str): 2078 raise TypeError("invalid errors: %r" % errors) 2079 2080 if encoding is None: 2081 encoding = self._encoding 2082 else: 2083 if not isinstance(encoding, str): 2084 raise TypeError("invalid encoding: %r" % encoding) 2085 2086 if newline is Ellipsis: 2087 newline = self._readnl 2088 self._check_newline(newline) 2089 2090 if line_buffering is None: 2091 line_buffering = self.line_buffering 2092 if write_through is None: 2093 write_through = self.write_through 2094 2095 self.flush() 2096 self._configure(encoding, errors, newline, 2097 line_buffering, write_through) 2098 2099 def seekable(self): 2100 if self.closed: 2101 raise ValueError("I/O operation on closed file.") 2102 return self._seekable 2103 2104 def readable(self): 2105 return self.buffer.readable() 2106 2107 def writable(self): 2108 return self.buffer.writable() 2109 2110 def flush(self): 2111 self.buffer.flush() 2112 self._telling = self._seekable 2113 2114 def close(self): 2115 if self.buffer is not None and not self.closed: 2116 try: 2117 self.flush() 2118 finally: 2119 self.buffer.close() 2120 2121 @property 2122 def closed(self): 2123 return self.buffer.closed 2124 2125 @property 2126 def name(self): 2127 return self.buffer.name 2128 2129 def fileno(self): 2130 return self.buffer.fileno() 2131 2132 def isatty(self): 2133 return self.buffer.isatty() 2134 2135 def write(self, s): 2136 'Write data, where s is a str' 2137 if self.closed: 2138 raise ValueError("write to closed file") 2139 if not isinstance(s, str): 2140 raise TypeError("can't write %s to text stream" % 2141 s.__class__.__name__) 2142 length = len(s) 2143 haslf = (self._writetranslate or self._line_buffering) and "\n" in s 2144 if haslf and self._writetranslate and self._writenl != "\n": 2145 s = s.replace("\n", self._writenl) 2146 encoder = self._encoder or self._get_encoder() 2147 # XXX What if we were just reading? 2148 b = encoder.encode(s) 2149 self.buffer.write(b) 2150 if self._line_buffering and (haslf or "\r" in s): 2151 self.flush() 2152 self._set_decoded_chars('') 2153 self._snapshot = None 2154 if self._decoder: 2155 self._decoder.reset() 2156 return length 2157 2158 def _get_encoder(self): 2159 make_encoder = codecs.getincrementalencoder(self._encoding) 2160 self._encoder = make_encoder(self._errors) 2161 return self._encoder 2162 2163 def _get_decoder(self): 2164 make_decoder = codecs.getincrementaldecoder(self._encoding) 2165 decoder = make_decoder(self._errors) 2166 if self._readuniversal: 2167 decoder = IncrementalNewlineDecoder(decoder, self._readtranslate) 2168 self._decoder = decoder 2169 return decoder 2170 2171 # The following three methods implement an ADT for _decoded_chars. 2172 # Text returned from the decoder is buffered here until the client 2173 # requests it by calling our read() or readline() method. 2174 def _set_decoded_chars(self, chars): 2175 """Set the _decoded_chars buffer.""" 2176 self._decoded_chars = chars 2177 self._decoded_chars_used = 0 2178 2179 def _get_decoded_chars(self, n=None): 2180 """Advance into the _decoded_chars buffer.""" 2181 offset = self._decoded_chars_used 2182 if n is None: 2183 chars = self._decoded_chars[offset:] 2184 else: 2185 chars = self._decoded_chars[offset:offset + n] 2186 self._decoded_chars_used += len(chars) 2187 return chars 2188 2189 def _rewind_decoded_chars(self, n): 2190 """Rewind the _decoded_chars buffer.""" 2191 if self._decoded_chars_used < n: 2192 raise AssertionError("rewind decoded_chars out of bounds") 2193 self._decoded_chars_used -= n 2194 2195 def _read_chunk(self): 2196 """ 2197 Read and decode the next chunk of data from the BufferedReader. 2198 """ 2199 2200 # The return value is True unless EOF was reached. The decoded 2201 # string is placed in self._decoded_chars (replacing its previous 2202 # value). The entire input chunk is sent to the decoder, though 2203 # some of it may remain buffered in the decoder, yet to be 2204 # converted. 2205 2206 if self._decoder is None: 2207 raise ValueError("no decoder") 2208 2209 if self._telling: 2210 # To prepare for tell(), we need to snapshot a point in the 2211 # file where the decoder's input buffer is empty. 2212 2213 dec_buffer, dec_flags = self._decoder.getstate() 2214 # Given this, we know there was a valid snapshot point 2215 # len(dec_buffer) bytes ago with decoder state (b'', dec_flags). 2216 2217 # Read a chunk, decode it, and put the result in self._decoded_chars. 2218 if self._has_read1: 2219 input_chunk = self.buffer.read1(self._CHUNK_SIZE) 2220 else: 2221 input_chunk = self.buffer.read(self._CHUNK_SIZE) 2222 eof = not input_chunk 2223 decoded_chars = self._decoder.decode(input_chunk, eof) 2224 self._set_decoded_chars(decoded_chars) 2225 if decoded_chars: 2226 self._b2cratio = len(input_chunk) / len(self._decoded_chars) 2227 else: 2228 self._b2cratio = 0.0 2229 2230 if self._telling: 2231 # At the snapshot point, len(dec_buffer) bytes before the read, 2232 # the next input to be decoded is dec_buffer + input_chunk. 2233 self._snapshot = (dec_flags, dec_buffer + input_chunk) 2234 2235 return not eof 2236 2237 def _pack_cookie(self, position, dec_flags=0, 2238 bytes_to_feed=0, need_eof=0, chars_to_skip=0): 2239 # The meaning of a tell() cookie is: seek to position, set the 2240 # decoder flags to dec_flags, read bytes_to_feed bytes, feed them 2241 # into the decoder with need_eof as the EOF flag, then skip 2242 # chars_to_skip characters of the decoded result. For most simple 2243 # decoders, tell() will often just give a byte offset in the file. 2244 return (position | (dec_flags<<64) | (bytes_to_feed<<128) | 2245 (chars_to_skip<<192) | bool(need_eof)<<256) 2246 2247 def _unpack_cookie(self, bigint): 2248 rest, position = divmod(bigint, 1<<64) 2249 rest, dec_flags = divmod(rest, 1<<64) 2250 rest, bytes_to_feed = divmod(rest, 1<<64) 2251 need_eof, chars_to_skip = divmod(rest, 1<<64) 2252 return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip 2253 2254 def tell(self): 2255 if not self._seekable: 2256 raise UnsupportedOperation("underlying stream is not seekable") 2257 if not self._telling: 2258 raise OSError("telling position disabled by next() call") 2259 self.flush() 2260 position = self.buffer.tell() 2261 decoder = self._decoder 2262 if decoder is None or self._snapshot is None: 2263 if self._decoded_chars: 2264 # This should never happen. 2265 raise AssertionError("pending decoded text") 2266 return position 2267 2268 # Skip backward to the snapshot point (see _read_chunk). 2269 dec_flags, next_input = self._snapshot 2270 position -= len(next_input) 2271 2272 # How many decoded characters have been used up since the snapshot? 2273 chars_to_skip = self._decoded_chars_used 2274 if chars_to_skip == 0: 2275 # We haven't moved from the snapshot point. 2276 return self._pack_cookie(position, dec_flags) 2277 2278 # Starting from the snapshot position, we will walk the decoder 2279 # forward until it gives us enough decoded characters. 2280 saved_state = decoder.getstate() 2281 try: 2282 # Fast search for an acceptable start point, close to our 2283 # current pos. 2284 # Rationale: calling decoder.decode() has a large overhead 2285 # regardless of chunk size; we want the number of such calls to 2286 # be O(1) in most situations (common decoders, non-crazy input). 2287 # Actually, it will be exactly 1 for fixed-size codecs (all 2288 # 8-bit codecs, also UTF-16 and UTF-32). 2289 skip_bytes = int(self._b2cratio * chars_to_skip) 2290 skip_back = 1 2291 assert skip_bytes <= len(next_input) 2292 while skip_bytes > 0: 2293 decoder.setstate((b'', dec_flags)) 2294 # Decode up to temptative start point 2295 n = len(decoder.decode(next_input[:skip_bytes])) 2296 if n <= chars_to_skip: 2297 b, d = decoder.getstate() 2298 if not b: 2299 # Before pos and no bytes buffered in decoder => OK 2300 dec_flags = d 2301 chars_to_skip -= n 2302 break 2303 # Skip back by buffered amount and reset heuristic 2304 skip_bytes -= len(b) 2305 skip_back = 1 2306 else: 2307 # We're too far ahead, skip back a bit 2308 skip_bytes -= skip_back 2309 skip_back = skip_back * 2 2310 else: 2311 skip_bytes = 0 2312 decoder.setstate((b'', dec_flags)) 2313 2314 # Note our initial start point. 2315 start_pos = position + skip_bytes 2316 start_flags = dec_flags 2317 if chars_to_skip == 0: 2318 # We haven't moved from the start point. 2319 return self._pack_cookie(start_pos, start_flags) 2320 2321 # Feed the decoder one byte at a time. As we go, note the 2322 # nearest "safe start point" before the current location 2323 # (a point where the decoder has nothing buffered, so seek() 2324 # can safely start from there and advance to this location). 2325 bytes_fed = 0 2326 need_eof = 0 2327 # Chars decoded since `start_pos` 2328 chars_decoded = 0 2329 for i in range(skip_bytes, len(next_input)): 2330 bytes_fed += 1 2331 chars_decoded += len(decoder.decode(next_input[i:i+1])) 2332 dec_buffer, dec_flags = decoder.getstate() 2333 if not dec_buffer and chars_decoded <= chars_to_skip: 2334 # Decoder buffer is empty, so this is a safe start point. 2335 start_pos += bytes_fed 2336 chars_to_skip -= chars_decoded 2337 start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 2338 if chars_decoded >= chars_to_skip: 2339 break 2340 else: 2341 # We didn't get enough decoded data; signal EOF to get more. 2342 chars_decoded += len(decoder.decode(b'', final=True)) 2343 need_eof = 1 2344 if chars_decoded < chars_to_skip: 2345 raise OSError("can't reconstruct logical file position") 2346 2347 # The returned cookie corresponds to the last safe start point. 2348 return self._pack_cookie( 2349 start_pos, start_flags, bytes_fed, need_eof, chars_to_skip) 2350 finally: 2351 decoder.setstate(saved_state) 2352 2353 def truncate(self, pos=None): 2354 self.flush() 2355 if pos is None: 2356 pos = self.tell() 2357 return self.buffer.truncate(pos) 2358 2359 def detach(self): 2360 if self.buffer is None: 2361 raise ValueError("buffer is already detached") 2362 self.flush() 2363 buffer = self._buffer 2364 self._buffer = None 2365 return buffer 2366 2367 def seek(self, cookie, whence=0): 2368 def _reset_encoder(position): 2369 """Reset the encoder (merely useful for proper BOM handling)""" 2370 try: 2371 encoder = self._encoder or self._get_encoder() 2372 except LookupError: 2373 # Sometimes the encoder doesn't exist 2374 pass 2375 else: 2376 if position != 0: 2377 encoder.setstate(0) 2378 else: 2379 encoder.reset() 2380 2381 if self.closed: 2382 raise ValueError("tell on closed file") 2383 if not self._seekable: 2384 raise UnsupportedOperation("underlying stream is not seekable") 2385 if whence == 1: # seek relative to current position 2386 if cookie != 0: 2387 raise UnsupportedOperation("can't do nonzero cur-relative seeks") 2388 # Seeking to the current position should attempt to 2389 # sync the underlying buffer with the current position. 2390 whence = 0 2391 cookie = self.tell() 2392 if whence == 2: # seek relative to end of file 2393 if cookie != 0: 2394 raise UnsupportedOperation("can't do nonzero end-relative seeks") 2395 self.flush() 2396 position = self.buffer.seek(0, 2) 2397 self._set_decoded_chars('') 2398 self._snapshot = None 2399 if self._decoder: 2400 self._decoder.reset() 2401 _reset_encoder(position) 2402 return position 2403 if whence != 0: 2404 raise ValueError("unsupported whence (%r)" % (whence,)) 2405 if cookie < 0: 2406 raise ValueError("negative seek position %r" % (cookie,)) 2407 self.flush() 2408 2409 # The strategy of seek() is to go back to the safe start point 2410 # and replay the effect of read(chars_to_skip) from there. 2411 start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \ 2412 self._unpack_cookie(cookie) 2413 2414 # Seek back to the safe start point. 2415 self.buffer.seek(start_pos) 2416 self._set_decoded_chars('') 2417 self._snapshot = None 2418 2419 # Restore the decoder to its state from the safe start point. 2420 if cookie == 0 and self._decoder: 2421 self._decoder.reset() 2422 elif self._decoder or dec_flags or chars_to_skip: 2423 self._decoder = self._decoder or self._get_decoder() 2424 self._decoder.setstate((b'', dec_flags)) 2425 self._snapshot = (dec_flags, b'') 2426 2427 if chars_to_skip: 2428 # Just like _read_chunk, feed the decoder and save a snapshot. 2429 input_chunk = self.buffer.read(bytes_to_feed) 2430 self._set_decoded_chars( 2431 self._decoder.decode(input_chunk, need_eof)) 2432 self._snapshot = (dec_flags, input_chunk) 2433 2434 # Skip chars_to_skip of the decoded characters. 2435 if len(self._decoded_chars) < chars_to_skip: 2436 raise OSError("can't restore logical file position") 2437 self._decoded_chars_used = chars_to_skip 2438 2439 _reset_encoder(cookie) 2440 return cookie 2441 2442 def read(self, size=None): 2443 self._checkReadable() 2444 if size is None: 2445 size = -1 2446 else: 2447 try: 2448 size_index = size.__index__ 2449 except AttributeError: 2450 raise TypeError(f"{size!r} is not an integer") 2451 else: 2452 size = size_index() 2453 decoder = self._decoder or self._get_decoder() 2454 if size < 0: 2455 # Read everything. 2456 result = (self._get_decoded_chars() + 2457 decoder.decode(self.buffer.read(), final=True)) 2458 self._set_decoded_chars('') 2459 self._snapshot = None 2460 return result 2461 else: 2462 # Keep reading chunks until we have size characters to return. 2463 eof = False 2464 result = self._get_decoded_chars(size) 2465 while len(result) < size and not eof: 2466 eof = not self._read_chunk() 2467 result += self._get_decoded_chars(size - len(result)) 2468 return result 2469 2470 def __next__(self): 2471 self._telling = False 2472 line = self.readline() 2473 if not line: 2474 self._snapshot = None 2475 self._telling = self._seekable 2476 raise StopIteration 2477 return line 2478 2479 def readline(self, size=None): 2480 if self.closed: 2481 raise ValueError("read from closed file") 2482 if size is None: 2483 size = -1 2484 else: 2485 try: 2486 size_index = size.__index__ 2487 except AttributeError: 2488 raise TypeError(f"{size!r} is not an integer") 2489 else: 2490 size = size_index() 2491 2492 # Grab all the decoded text (we will rewind any extra bits later). 2493 line = self._get_decoded_chars() 2494 2495 start = 0 2496 # Make the decoder if it doesn't already exist. 2497 if not self._decoder: 2498 self._get_decoder() 2499 2500 pos = endpos = None 2501 while True: 2502 if self._readtranslate: 2503 # Newlines are already translated, only search for \n 2504 pos = line.find('\n', start) 2505 if pos >= 0: 2506 endpos = pos + 1 2507 break 2508 else: 2509 start = len(line) 2510 2511 elif self._readuniversal: 2512 # Universal newline search. Find any of \r, \r\n, \n 2513 # The decoder ensures that \r\n are not split in two pieces 2514 2515 # In C we'd look for these in parallel of course. 2516 nlpos = line.find("\n", start) 2517 crpos = line.find("\r", start) 2518 if crpos == -1: 2519 if nlpos == -1: 2520 # Nothing found 2521 start = len(line) 2522 else: 2523 # Found \n 2524 endpos = nlpos + 1 2525 break 2526 elif nlpos == -1: 2527 # Found lone \r 2528 endpos = crpos + 1 2529 break 2530 elif nlpos < crpos: 2531 # Found \n 2532 endpos = nlpos + 1 2533 break 2534 elif nlpos == crpos + 1: 2535 # Found \r\n 2536 endpos = crpos + 2 2537 break 2538 else: 2539 # Found \r 2540 endpos = crpos + 1 2541 break 2542 else: 2543 # non-universal 2544 pos = line.find(self._readnl) 2545 if pos >= 0: 2546 endpos = pos + len(self._readnl) 2547 break 2548 2549 if size >= 0 and len(line) >= size: 2550 endpos = size # reached length size 2551 break 2552 2553 # No line ending seen yet - get more data' 2554 while self._read_chunk(): 2555 if self._decoded_chars: 2556 break 2557 if self._decoded_chars: 2558 line += self._get_decoded_chars() 2559 else: 2560 # end of file 2561 self._set_decoded_chars('') 2562 self._snapshot = None 2563 return line 2564 2565 if size >= 0 and endpos > size: 2566 endpos = size # don't exceed size 2567 2568 # Rewind _decoded_chars to just after the line ending we found. 2569 self._rewind_decoded_chars(len(line) - endpos) 2570 return line[:endpos] 2571 2572 @property 2573 def newlines(self): 2574 return self._decoder.newlines if self._decoder else None 2575 2576 2577class StringIO(TextIOWrapper): 2578 """Text I/O implementation using an in-memory buffer. 2579 2580 The initial_value argument sets the value of object. The newline 2581 argument is like the one of TextIOWrapper's constructor. 2582 """ 2583 2584 def __init__(self, initial_value="", newline="\n"): 2585 super(StringIO, self).__init__(BytesIO(), 2586 encoding="utf-8", 2587 errors="surrogatepass", 2588 newline=newline) 2589 # Issue #5645: make universal newlines semantics the same as in the 2590 # C version, even under Windows. 2591 if newline is None: 2592 self._writetranslate = False 2593 if initial_value is not None: 2594 if not isinstance(initial_value, str): 2595 raise TypeError("initial_value must be str or None, not {0}" 2596 .format(type(initial_value).__name__)) 2597 self.write(initial_value) 2598 self.seek(0) 2599 2600 def getvalue(self): 2601 self.flush() 2602 decoder = self._decoder or self._get_decoder() 2603 old_state = decoder.getstate() 2604 decoder.reset() 2605 try: 2606 return decoder.decode(self.buffer.getvalue(), final=True) 2607 finally: 2608 decoder.setstate(old_state) 2609 2610 def __repr__(self): 2611 # TextIOWrapper tells the encoding in its repr. In StringIO, 2612 # that's an implementation detail. 2613 return object.__repr__(self) 2614 2615 @property 2616 def errors(self): 2617 return None 2618 2619 @property 2620 def encoding(self): 2621 return None 2622 2623 def detach(self): 2624 # This doesn't make sense on StringIO. 2625 self._unsupported("detach") 2626