• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::error;
2 use crate::fmt;
3 use crate::io::{
4     self, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE,
5 };
6 use crate::mem;
7 use crate::ptr;
8 
9 /// Wraps a writer and buffers its output.
10 ///
11 /// It can be excessively inefficient to work directly with something that
12 /// implements [`Write`]. For example, every call to
13 /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
14 /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
15 /// writer in large, infrequent batches.
16 ///
17 /// `BufWriter<W>` can improve the speed of programs that make *small* and
18 /// *repeated* write calls to the same file or network socket. It does not
19 /// help when writing very large amounts at once, or writing just one or a few
20 /// times. It also provides no advantage when writing to a destination that is
21 /// in memory, like a <code>[Vec]\<u8></code>.
22 ///
23 /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
24 /// dropping will attempt to flush the contents of the buffer, any errors
25 /// that happen in the process of dropping will be ignored. Calling [`flush`]
26 /// ensures that the buffer is empty and thus dropping will not even attempt
27 /// file operations.
28 ///
29 /// # Examples
30 ///
31 /// Let's write the numbers one through ten to a [`TcpStream`]:
32 ///
33 /// ```no_run
34 /// use std::io::prelude::*;
35 /// use std::net::TcpStream;
36 ///
37 /// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
38 ///
39 /// for i in 0..10 {
40 ///     stream.write(&[i+1]).unwrap();
41 /// }
42 /// ```
43 ///
44 /// Because we're not buffering, we write each one in turn, incurring the
45 /// overhead of a system call per byte written. We can fix this with a
46 /// `BufWriter<W>`:
47 ///
48 /// ```no_run
49 /// use std::io::prelude::*;
50 /// use std::io::BufWriter;
51 /// use std::net::TcpStream;
52 ///
53 /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
54 ///
55 /// for i in 0..10 {
56 ///     stream.write(&[i+1]).unwrap();
57 /// }
58 /// stream.flush().unwrap();
59 /// ```
60 ///
61 /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
62 /// together by the buffer and will all be written out in one system call when
63 /// the `stream` is flushed.
64 ///
65 // HACK(#78696): can't use `crate` for associated items
66 /// [`TcpStream::write`]: super::super::super::net::TcpStream::write
67 /// [`TcpStream`]: crate::net::TcpStream
68 /// [`flush`]: BufWriter::flush
69 #[stable(feature = "rust1", since = "1.0.0")]
70 pub struct BufWriter<W: ?Sized + Write> {
71     // The buffer. Avoid using this like a normal `Vec` in common code paths.
72     // That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
73     // methods that require bounds checking or the like. This makes an enormous
74     // difference to performance (we may want to stop using a `Vec` entirely).
75     buf: Vec<u8>,
76     // #30888: If the inner writer panics in a call to write, we don't want to
77     // write the buffered data a second time in BufWriter's destructor. This
78     // flag tells the Drop impl if it should skip the flush.
79     panicked: bool,
80     inner: W,
81 }
82 
83 impl<W: Write> BufWriter<W> {
84     /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KiB,
85     /// but may change in the future.
86     ///
87     /// # Examples
88     ///
89     /// ```no_run
90     /// use std::io::BufWriter;
91     /// use std::net::TcpStream;
92     ///
93     /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
94     /// ```
95     #[stable(feature = "rust1", since = "1.0.0")]
new(inner: W) -> BufWriter<W>96     pub fn new(inner: W) -> BufWriter<W> {
97         BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
98     }
99 
100     /// Creates a new `BufWriter<W>` with at least the specified buffer capacity.
101     ///
102     /// # Examples
103     ///
104     /// Creating a buffer with a buffer of at least a hundred bytes.
105     ///
106     /// ```no_run
107     /// use std::io::BufWriter;
108     /// use std::net::TcpStream;
109     ///
110     /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
111     /// let mut buffer = BufWriter::with_capacity(100, stream);
112     /// ```
113     #[stable(feature = "rust1", since = "1.0.0")]
with_capacity(capacity: usize, inner: W) -> BufWriter<W>114     pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
115         BufWriter { inner, buf: Vec::with_capacity(capacity), panicked: false }
116     }
117 
118     /// Unwraps this `BufWriter<W>`, returning the underlying writer.
119     ///
120     /// The buffer is written out before returning the writer.
121     ///
122     /// # Errors
123     ///
124     /// An [`Err`] will be returned if an error occurs while flushing the buffer.
125     ///
126     /// # Examples
127     ///
128     /// ```no_run
129     /// use std::io::BufWriter;
130     /// use std::net::TcpStream;
131     ///
132     /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
133     ///
134     /// // unwrap the TcpStream and flush the buffer
135     /// let stream = buffer.into_inner().unwrap();
136     /// ```
137     #[stable(feature = "rust1", since = "1.0.0")]
into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>>138     pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
139         match self.flush_buf() {
140             Err(e) => Err(IntoInnerError::new(self, e)),
141             Ok(()) => Ok(self.into_parts().0),
142         }
143     }
144 
145     /// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but
146     /// unwritten data.
147     ///
148     /// If the underlying writer panicked, it is not known what portion of the data was written.
149     /// In this case, we return `WriterPanicked` for the buffered data (from which the buffer
150     /// contents can still be recovered).
151     ///
152     /// `into_parts` makes no attempt to flush data and cannot fail.
153     ///
154     /// # Examples
155     ///
156     /// ```
157     /// use std::io::{BufWriter, Write};
158     ///
159     /// let mut buffer = [0u8; 10];
160     /// let mut stream = BufWriter::new(buffer.as_mut());
161     /// write!(stream, "too much data").unwrap();
162     /// stream.flush().expect_err("it doesn't fit");
163     /// let (recovered_writer, buffered_data) = stream.into_parts();
164     /// assert_eq!(recovered_writer.len(), 0);
165     /// assert_eq!(&buffered_data.unwrap(), b"ata");
166     /// ```
167     #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
into_parts(mut self) -> (W, Result<Vec<u8>, WriterPanicked>)168     pub fn into_parts(mut self) -> (W, Result<Vec<u8>, WriterPanicked>) {
169         let buf = mem::take(&mut self.buf);
170         let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
171 
172         // SAFETY: forget(self) prevents double dropping inner
173         let inner = unsafe { ptr::read(&self.inner) };
174         mem::forget(self);
175 
176         (inner, buf)
177     }
178 }
179 
180 impl<W: ?Sized + Write> BufWriter<W> {
181     /// Send data in our local buffer into the inner writer, looping as
182     /// necessary until either it's all been sent or an error occurs.
183     ///
184     /// Because all the data in the buffer has been reported to our owner as
185     /// "successfully written" (by returning nonzero success values from
186     /// `write`), any 0-length writes from `inner` must be reported as i/o
187     /// errors from this method.
flush_buf(&mut self) -> io::Result<()>188     pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> {
189         /// Helper struct to ensure the buffer is updated after all the writes
190         /// are complete. It tracks the number of written bytes and drains them
191         /// all from the front of the buffer when dropped.
192         struct BufGuard<'a> {
193             buffer: &'a mut Vec<u8>,
194             written: usize,
195         }
196 
197         impl<'a> BufGuard<'a> {
198             fn new(buffer: &'a mut Vec<u8>) -> Self {
199                 Self { buffer, written: 0 }
200             }
201 
202             /// The unwritten part of the buffer
203             fn remaining(&self) -> &[u8] {
204                 &self.buffer[self.written..]
205             }
206 
207             /// Flag some bytes as removed from the front of the buffer
208             fn consume(&mut self, amt: usize) {
209                 self.written += amt;
210             }
211 
212             /// true if all of the bytes have been written
213             fn done(&self) -> bool {
214                 self.written >= self.buffer.len()
215             }
216         }
217 
218         impl Drop for BufGuard<'_> {
219             fn drop(&mut self) {
220                 if self.written > 0 {
221                     self.buffer.drain(..self.written);
222                 }
223             }
224         }
225 
226         let mut guard = BufGuard::new(&mut self.buf);
227         while !guard.done() {
228             self.panicked = true;
229             let r = self.inner.write(guard.remaining());
230             self.panicked = false;
231 
232             match r {
233                 Ok(0) => {
234                     return Err(io::const_io_error!(
235                         ErrorKind::WriteZero,
236                         "failed to write the buffered data",
237                     ));
238                 }
239                 Ok(n) => guard.consume(n),
240                 Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
241                 Err(e) => return Err(e),
242             }
243         }
244         Ok(())
245     }
246 
247     /// Buffer some data without flushing it, regardless of the size of the
248     /// data. Writes as much as possible without exceeding capacity. Returns
249     /// the number of bytes written.
write_to_buf(&mut self, buf: &[u8]) -> usize250     pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
251         let available = self.spare_capacity();
252         let amt_to_buffer = available.min(buf.len());
253 
254         // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
255         unsafe {
256             self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
257         }
258 
259         amt_to_buffer
260     }
261 
262     /// Gets a reference to the underlying writer.
263     ///
264     /// # Examples
265     ///
266     /// ```no_run
267     /// use std::io::BufWriter;
268     /// use std::net::TcpStream;
269     ///
270     /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
271     ///
272     /// // we can use reference just like buffer
273     /// let reference = buffer.get_ref();
274     /// ```
275     #[stable(feature = "rust1", since = "1.0.0")]
get_ref(&self) -> &W276     pub fn get_ref(&self) -> &W {
277         &self.inner
278     }
279 
280     /// Gets a mutable reference to the underlying writer.
281     ///
282     /// It is inadvisable to directly write to the underlying writer.
283     ///
284     /// # Examples
285     ///
286     /// ```no_run
287     /// use std::io::BufWriter;
288     /// use std::net::TcpStream;
289     ///
290     /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
291     ///
292     /// // we can use reference just like buffer
293     /// let reference = buffer.get_mut();
294     /// ```
295     #[stable(feature = "rust1", since = "1.0.0")]
get_mut(&mut self) -> &mut W296     pub fn get_mut(&mut self) -> &mut W {
297         &mut self.inner
298     }
299 
300     /// Returns a reference to the internally buffered data.
301     ///
302     /// # Examples
303     ///
304     /// ```no_run
305     /// use std::io::BufWriter;
306     /// use std::net::TcpStream;
307     ///
308     /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
309     ///
310     /// // See how many bytes are currently buffered
311     /// let bytes_buffered = buf_writer.buffer().len();
312     /// ```
313     #[stable(feature = "bufreader_buffer", since = "1.37.0")]
buffer(&self) -> &[u8]314     pub fn buffer(&self) -> &[u8] {
315         &self.buf
316     }
317 
318     /// Returns a mutable reference to the internal buffer.
319     ///
320     /// This can be used to write data directly into the buffer without triggering writers
321     /// to the underlying writer.
322     ///
323     /// That the buffer is a `Vec` is an implementation detail.
324     /// Callers should not modify the capacity as there currently is no public API to do so
325     /// and thus any capacity changes would be unexpected by the user.
buffer_mut(&mut self) -> &mut Vec<u8>326     pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> {
327         &mut self.buf
328     }
329 
330     /// Returns the number of bytes the internal buffer can hold without flushing.
331     ///
332     /// # Examples
333     ///
334     /// ```no_run
335     /// use std::io::BufWriter;
336     /// use std::net::TcpStream;
337     ///
338     /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
339     ///
340     /// // Check the capacity of the inner buffer
341     /// let capacity = buf_writer.capacity();
342     /// // Calculate how many bytes can be written without flushing
343     /// let without_flush = capacity - buf_writer.buffer().len();
344     /// ```
345     #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
capacity(&self) -> usize346     pub fn capacity(&self) -> usize {
347         self.buf.capacity()
348     }
349 
350     // Ensure this function does not get inlined into `write`, so that it
351     // remains inlineable and its common path remains as short as possible.
352     // If this function ends up being called frequently relative to `write`,
353     // it's likely a sign that the client is using an improperly sized buffer
354     // or their write patterns are somewhat pathological.
355     #[cold]
356     #[inline(never)]
write_cold(&mut self, buf: &[u8]) -> io::Result<usize>357     fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
358         if buf.len() > self.spare_capacity() {
359             self.flush_buf()?;
360         }
361 
362         // Why not len > capacity? To avoid a needless trip through the buffer when the input
363         // exactly fills it. We'd just need to flush it to the underlying writer anyway.
364         if buf.len() >= self.buf.capacity() {
365             self.panicked = true;
366             let r = self.get_mut().write(buf);
367             self.panicked = false;
368             r
369         } else {
370             // Write to the buffer. In this case, we write to the buffer even if it fills it
371             // exactly. Doing otherwise would mean flushing the buffer, then writing this
372             // input to the inner writer, which in many cases would be a worse strategy.
373 
374             // SAFETY: There was either enough spare capacity already, or there wasn't and we
375             // flushed the buffer to ensure that there is. In the latter case, we know that there
376             // is because flushing ensured that our entire buffer is spare capacity, and we entered
377             // this block because the input buffer length is less than that capacity. In either
378             // case, it's safe to write the input buffer to our buffer.
379             unsafe {
380                 self.write_to_buffer_unchecked(buf);
381             }
382 
383             Ok(buf.len())
384         }
385     }
386 
387     // Ensure this function does not get inlined into `write_all`, so that it
388     // remains inlineable and its common path remains as short as possible.
389     // If this function ends up being called frequently relative to `write_all`,
390     // it's likely a sign that the client is using an improperly sized buffer
391     // or their write patterns are somewhat pathological.
392     #[cold]
393     #[inline(never)]
write_all_cold(&mut self, buf: &[u8]) -> io::Result<()>394     fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
395         // Normally, `write_all` just calls `write` in a loop. We can do better
396         // by calling `self.get_mut().write_all()` directly, which avoids
397         // round trips through the buffer in the event of a series of partial
398         // writes in some circumstances.
399 
400         if buf.len() > self.spare_capacity() {
401             self.flush_buf()?;
402         }
403 
404         // Why not len > capacity? To avoid a needless trip through the buffer when the input
405         // exactly fills it. We'd just need to flush it to the underlying writer anyway.
406         if buf.len() >= self.buf.capacity() {
407             self.panicked = true;
408             let r = self.get_mut().write_all(buf);
409             self.panicked = false;
410             r
411         } else {
412             // Write to the buffer. In this case, we write to the buffer even if it fills it
413             // exactly. Doing otherwise would mean flushing the buffer, then writing this
414             // input to the inner writer, which in many cases would be a worse strategy.
415 
416             // SAFETY: There was either enough spare capacity already, or there wasn't and we
417             // flushed the buffer to ensure that there is. In the latter case, we know that there
418             // is because flushing ensured that our entire buffer is spare capacity, and we entered
419             // this block because the input buffer length is less than that capacity. In either
420             // case, it's safe to write the input buffer to our buffer.
421             unsafe {
422                 self.write_to_buffer_unchecked(buf);
423             }
424 
425             Ok(())
426         }
427     }
428 
429     // SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
430     // i.e., that input buffer length is less than or equal to spare capacity.
431     #[inline]
write_to_buffer_unchecked(&mut self, buf: &[u8])432     unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
433         debug_assert!(buf.len() <= self.spare_capacity());
434         let old_len = self.buf.len();
435         let buf_len = buf.len();
436         let src = buf.as_ptr();
437         let dst = self.buf.as_mut_ptr().add(old_len);
438         ptr::copy_nonoverlapping(src, dst, buf_len);
439         self.buf.set_len(old_len + buf_len);
440     }
441 
442     #[inline]
spare_capacity(&self) -> usize443     fn spare_capacity(&self) -> usize {
444         self.buf.capacity() - self.buf.len()
445     }
446 }
447 
448 #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
449 /// Error returned for the buffered data from `BufWriter::into_parts`, when the underlying
450 /// writer has previously panicked.  Contains the (possibly partly written) buffered data.
451 ///
452 /// # Example
453 ///
454 /// ```
455 /// use std::io::{self, BufWriter, Write};
456 /// use std::panic::{catch_unwind, AssertUnwindSafe};
457 ///
458 /// struct PanickingWriter;
459 /// impl Write for PanickingWriter {
460 ///   fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() }
461 ///   fn flush(&mut self) -> io::Result<()> { panic!() }
462 /// }
463 ///
464 /// let mut stream = BufWriter::new(PanickingWriter);
465 /// write!(stream, "some data").unwrap();
466 /// let result = catch_unwind(AssertUnwindSafe(|| {
467 ///     stream.flush().unwrap()
468 /// }));
469 /// assert!(result.is_err());
470 /// let (recovered_writer, buffered_data) = stream.into_parts();
471 /// assert!(matches!(recovered_writer, PanickingWriter));
472 /// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data");
473 /// ```
474 pub struct WriterPanicked {
475     buf: Vec<u8>,
476 }
477 
478 impl WriterPanicked {
479     /// Returns the perhaps-unwritten data.  Some of this data may have been written by the
480     /// panicking call(s) to the underlying writer, so simply writing it again is not a good idea.
481     #[must_use = "`self` will be dropped if the result is not used"]
482     #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
into_inner(self) -> Vec<u8>483     pub fn into_inner(self) -> Vec<u8> {
484         self.buf
485     }
486 
487     const DESCRIPTION: &'static str =
488         "BufWriter inner writer panicked, what data remains unwritten is not known";
489 }
490 
491 #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
492 impl error::Error for WriterPanicked {
493     #[allow(deprecated, deprecated_in_future)]
description(&self) -> &str494     fn description(&self) -> &str {
495         Self::DESCRIPTION
496     }
497 }
498 
499 #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
500 impl fmt::Display for WriterPanicked {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result501     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
502         write!(f, "{}", Self::DESCRIPTION)
503     }
504 }
505 
506 #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
507 impl fmt::Debug for WriterPanicked {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result508     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
509         f.debug_struct("WriterPanicked")
510             .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
511             .finish()
512     }
513 }
514 
515 #[stable(feature = "rust1", since = "1.0.0")]
516 impl<W: ?Sized + Write> Write for BufWriter<W> {
517     #[inline]
write(&mut self, buf: &[u8]) -> io::Result<usize>518     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
519         // Use < instead of <= to avoid a needless trip through the buffer in some cases.
520         // See `write_cold` for details.
521         if buf.len() < self.spare_capacity() {
522             // SAFETY: safe by above conditional.
523             unsafe {
524                 self.write_to_buffer_unchecked(buf);
525             }
526 
527             Ok(buf.len())
528         } else {
529             self.write_cold(buf)
530         }
531     }
532 
533     #[inline]
write_all(&mut self, buf: &[u8]) -> io::Result<()>534     fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
535         // Use < instead of <= to avoid a needless trip through the buffer in some cases.
536         // See `write_all_cold` for details.
537         if buf.len() < self.spare_capacity() {
538             // SAFETY: safe by above conditional.
539             unsafe {
540                 self.write_to_buffer_unchecked(buf);
541             }
542 
543             Ok(())
544         } else {
545             self.write_all_cold(buf)
546         }
547     }
548 
write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize>549     fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
550         // FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
551         // to `write` and `write_all`. The performance benefits can be significant. See #79930.
552         if self.get_ref().is_write_vectored() {
553             // We have to handle the possibility that the total length of the buffers overflows
554             // `usize` (even though this can only happen if multiple `IoSlice`s reference the
555             // same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
556             // computation overflows, then surely the input cannot fit in our buffer, so we forward
557             // to the inner writer's `write_vectored` method to let it handle it appropriately.
558             let saturated_total_len =
559                 bufs.iter().fold(0usize, |acc, b| acc.saturating_add(b.len()));
560 
561             if saturated_total_len > self.spare_capacity() {
562                 // Flush if the total length of the input exceeds our buffer's spare capacity.
563                 // If we would have overflowed, this condition also holds, and we need to flush.
564                 self.flush_buf()?;
565             }
566 
567             if saturated_total_len >= self.buf.capacity() {
568                 // Forward to our inner writer if the total length of the input is greater than or
569                 // equal to our buffer capacity. If we would have overflowed, this condition also
570                 // holds, and we punt to the inner writer.
571                 self.panicked = true;
572                 let r = self.get_mut().write_vectored(bufs);
573                 self.panicked = false;
574                 r
575             } else {
576                 // `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
577 
578                 // SAFETY: We checked whether or not the spare capacity was large enough above. If
579                 // it was, then we're safe already. If it wasn't, we flushed, making sufficient
580                 // room for any input <= the buffer size, which includes this input.
581                 unsafe {
582                     bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
583                 };
584 
585                 Ok(saturated_total_len)
586             }
587         } else {
588             let mut iter = bufs.iter();
589             let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
590                 // This is the first non-empty slice to write, so if it does
591                 // not fit in the buffer, we still get to flush and proceed.
592                 if buf.len() > self.spare_capacity() {
593                     self.flush_buf()?;
594                 }
595                 if buf.len() >= self.buf.capacity() {
596                     // The slice is at least as large as the buffering capacity,
597                     // so it's better to write it directly, bypassing the buffer.
598                     self.panicked = true;
599                     let r = self.get_mut().write(buf);
600                     self.panicked = false;
601                     return r;
602                 } else {
603                     // SAFETY: We checked whether or not the spare capacity was large enough above.
604                     // If it was, then we're safe already. If it wasn't, we flushed, making
605                     // sufficient room for any input <= the buffer size, which includes this input.
606                     unsafe {
607                         self.write_to_buffer_unchecked(buf);
608                     }
609 
610                     buf.len()
611                 }
612             } else {
613                 return Ok(0);
614             };
615             debug_assert!(total_written != 0);
616             for buf in iter {
617                 if buf.len() <= self.spare_capacity() {
618                     // SAFETY: safe by above conditional.
619                     unsafe {
620                         self.write_to_buffer_unchecked(buf);
621                     }
622 
623                     // This cannot overflow `usize`. If we are here, we've written all of the bytes
624                     // so far to our buffer, and we've ensured that we never exceed the buffer's
625                     // capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
626                     total_written += buf.len();
627                 } else {
628                     break;
629                 }
630             }
631             Ok(total_written)
632         }
633     }
634 
is_write_vectored(&self) -> bool635     fn is_write_vectored(&self) -> bool {
636         true
637     }
638 
flush(&mut self) -> io::Result<()>639     fn flush(&mut self) -> io::Result<()> {
640         self.flush_buf().and_then(|()| self.get_mut().flush())
641     }
642 }
643 
644 #[stable(feature = "rust1", since = "1.0.0")]
645 impl<W: ?Sized + Write> fmt::Debug for BufWriter<W>
646 where
647     W: fmt::Debug,
648 {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result649     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
650         fmt.debug_struct("BufWriter")
651             .field("writer", &&self.inner)
652             .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
653             .finish()
654     }
655 }
656 
657 #[stable(feature = "rust1", since = "1.0.0")]
658 impl<W: ?Sized + Write + Seek> Seek for BufWriter<W> {
659     /// Seek to the offset, in bytes, in the underlying writer.
660     ///
661     /// Seeking always writes out the internal buffer before seeking.
seek(&mut self, pos: SeekFrom) -> io::Result<u64>662     fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
663         self.flush_buf()?;
664         self.get_mut().seek(pos)
665     }
666 }
667 
668 #[stable(feature = "rust1", since = "1.0.0")]
669 impl<W: ?Sized + Write> Drop for BufWriter<W> {
drop(&mut self)670     fn drop(&mut self) {
671         if !self.panicked {
672             // dtors should not panic, so we ignore a failed flush
673             let _r = self.flush_buf();
674         }
675     }
676 }
677