1 | use crate::error; |
2 | use crate::fmt; |
3 | use crate::io::{ |
4 | self, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE, |
5 | }; |
6 | use crate::mem; |
7 | use crate::ptr; |
8 | |
9 | /// Wraps a writer and buffers its output. |
10 | /// |
11 | /// It can be excessively inefficient to work directly with something that |
12 | /// implements [`Write`]. For example, every call to |
13 | /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A |
14 | /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying |
15 | /// writer in large, infrequent batches. |
16 | /// |
17 | /// `BufWriter<W>` can improve the speed of programs that make *small* and |
18 | /// *repeated* write calls to the same file or network socket. It does not |
19 | /// help when writing very large amounts at once, or writing just one or a few |
20 | /// times. It also provides no advantage when writing to a destination that is |
21 | /// in memory, like a <code>[Vec]\<u8></code>. |
22 | /// |
23 | /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though |
24 | /// dropping will attempt to flush the contents of the buffer, any errors |
25 | /// that happen in the process of dropping will be ignored. Calling [`flush`] |
26 | /// ensures that the buffer is empty and thus dropping will not even attempt |
27 | /// file operations. |
28 | /// |
29 | /// # Examples |
30 | /// |
31 | /// Let's write the numbers one through ten to a [`TcpStream`]: |
32 | /// |
33 | /// ```no_run |
34 | /// use std::io::prelude::*; |
35 | /// use std::net::TcpStream; |
36 | /// |
37 | /// let mut stream = TcpStream::connect("127.0.0.1:34254" ).unwrap(); |
38 | /// |
39 | /// for i in 0..10 { |
40 | /// stream.write(&[i+1]).unwrap(); |
41 | /// } |
42 | /// ``` |
43 | /// |
44 | /// Because we're not buffering, we write each one in turn, incurring the |
45 | /// overhead of a system call per byte written. We can fix this with a |
46 | /// `BufWriter<W>`: |
47 | /// |
48 | /// ```no_run |
49 | /// use std::io::prelude::*; |
50 | /// use std::io::BufWriter; |
51 | /// use std::net::TcpStream; |
52 | /// |
53 | /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
54 | /// |
55 | /// for i in 0..10 { |
56 | /// stream.write(&[i+1]).unwrap(); |
57 | /// } |
58 | /// stream.flush().unwrap(); |
59 | /// ``` |
60 | /// |
61 | /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped |
62 | /// together by the buffer and will all be written out in one system call when |
63 | /// the `stream` is flushed. |
64 | /// |
65 | /// [`TcpStream::write`]: crate::net::TcpStream::write |
66 | /// [`TcpStream`]: crate::net::TcpStream |
67 | /// [`flush`]: BufWriter::flush |
68 | #[stable (feature = "rust1" , since = "1.0.0" )] |
69 | pub struct BufWriter<W: ?Sized + Write> { |
70 | // The buffer. Avoid using this like a normal `Vec` in common code paths. |
71 | // That is, don't use `buf.push`, `buf.extend_from_slice`, or any other |
72 | // methods that require bounds checking or the like. This makes an enormous |
73 | // difference to performance (we may want to stop using a `Vec` entirely). |
74 | buf: Vec<u8>, |
75 | // #30888: If the inner writer panics in a call to write, we don't want to |
76 | // write the buffered data a second time in BufWriter's destructor. This |
77 | // flag tells the Drop impl if it should skip the flush. |
78 | panicked: bool, |
79 | inner: W, |
80 | } |
81 | |
82 | impl<W: Write> BufWriter<W> { |
83 | /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KiB, |
84 | /// but may change in the future. |
85 | /// |
86 | /// # Examples |
87 | /// |
88 | /// ```no_run |
89 | /// use std::io::BufWriter; |
90 | /// use std::net::TcpStream; |
91 | /// |
92 | /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
93 | /// ``` |
94 | #[stable (feature = "rust1" , since = "1.0.0" )] |
95 | pub fn new(inner: W) -> BufWriter<W> { |
96 | BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner) |
97 | } |
98 | |
99 | /// Creates a new `BufWriter<W>` with at least the specified buffer capacity. |
100 | /// |
101 | /// # Examples |
102 | /// |
103 | /// Creating a buffer with a buffer of at least a hundred bytes. |
104 | /// |
105 | /// ```no_run |
106 | /// use std::io::BufWriter; |
107 | /// use std::net::TcpStream; |
108 | /// |
109 | /// let stream = TcpStream::connect("127.0.0.1:34254" ).unwrap(); |
110 | /// let mut buffer = BufWriter::with_capacity(100, stream); |
111 | /// ``` |
112 | #[stable (feature = "rust1" , since = "1.0.0" )] |
113 | pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> { |
114 | BufWriter { inner, buf: Vec::with_capacity(capacity), panicked: false } |
115 | } |
116 | |
117 | /// Unwraps this `BufWriter<W>`, returning the underlying writer. |
118 | /// |
119 | /// The buffer is written out before returning the writer. |
120 | /// |
121 | /// # Errors |
122 | /// |
123 | /// An [`Err`] will be returned if an error occurs while flushing the buffer. |
124 | /// |
125 | /// # Examples |
126 | /// |
127 | /// ```no_run |
128 | /// use std::io::BufWriter; |
129 | /// use std::net::TcpStream; |
130 | /// |
131 | /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
132 | /// |
133 | /// // unwrap the TcpStream and flush the buffer |
134 | /// let stream = buffer.into_inner().unwrap(); |
135 | /// ``` |
136 | #[stable (feature = "rust1" , since = "1.0.0" )] |
137 | pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> { |
138 | match self.flush_buf() { |
139 | Err(e) => Err(IntoInnerError::new(self, e)), |
140 | Ok(()) => Ok(self.into_parts().0), |
141 | } |
142 | } |
143 | |
144 | /// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but |
145 | /// unwritten data. |
146 | /// |
147 | /// If the underlying writer panicked, it is not known what portion of the data was written. |
148 | /// In this case, we return `WriterPanicked` for the buffered data (from which the buffer |
149 | /// contents can still be recovered). |
150 | /// |
151 | /// `into_parts` makes no attempt to flush data and cannot fail. |
152 | /// |
153 | /// # Examples |
154 | /// |
155 | /// ``` |
156 | /// use std::io::{BufWriter, Write}; |
157 | /// |
158 | /// let mut buffer = [0u8; 10]; |
159 | /// let mut stream = BufWriter::new(buffer.as_mut()); |
160 | /// write!(stream, "too much data" ).unwrap(); |
161 | /// stream.flush().expect_err("it doesn't fit" ); |
162 | /// let (recovered_writer, buffered_data) = stream.into_parts(); |
163 | /// assert_eq!(recovered_writer.len(), 0); |
164 | /// assert_eq!(&buffered_data.unwrap(), b"ata" ); |
165 | /// ``` |
166 | #[stable (feature = "bufwriter_into_parts" , since = "1.56.0" )] |
167 | pub fn into_parts(mut self) -> (W, Result<Vec<u8>, WriterPanicked>) { |
168 | let buf = mem::take(&mut self.buf); |
169 | let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) }; |
170 | |
171 | // SAFETY: forget(self) prevents double dropping inner |
172 | let inner = unsafe { ptr::read(&self.inner) }; |
173 | mem::forget(self); |
174 | |
175 | (inner, buf) |
176 | } |
177 | } |
178 | |
179 | impl<W: ?Sized + Write> BufWriter<W> { |
180 | /// Send data in our local buffer into the inner writer, looping as |
181 | /// necessary until either it's all been sent or an error occurs. |
182 | /// |
183 | /// Because all the data in the buffer has been reported to our owner as |
184 | /// "successfully written" (by returning nonzero success values from |
185 | /// `write`), any 0-length writes from `inner` must be reported as i/o |
186 | /// errors from this method. |
187 | pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> { |
188 | /// Helper struct to ensure the buffer is updated after all the writes |
189 | /// are complete. It tracks the number of written bytes and drains them |
190 | /// all from the front of the buffer when dropped. |
191 | struct BufGuard<'a> { |
192 | buffer: &'a mut Vec<u8>, |
193 | written: usize, |
194 | } |
195 | |
196 | impl<'a> BufGuard<'a> { |
197 | fn new(buffer: &'a mut Vec<u8>) -> Self { |
198 | Self { buffer, written: 0 } |
199 | } |
200 | |
201 | /// The unwritten part of the buffer |
202 | fn remaining(&self) -> &[u8] { |
203 | &self.buffer[self.written..] |
204 | } |
205 | |
206 | /// Flag some bytes as removed from the front of the buffer |
207 | fn consume(&mut self, amt: usize) { |
208 | self.written += amt; |
209 | } |
210 | |
211 | /// true if all of the bytes have been written |
212 | fn done(&self) -> bool { |
213 | self.written >= self.buffer.len() |
214 | } |
215 | } |
216 | |
217 | impl Drop for BufGuard<'_> { |
218 | fn drop(&mut self) { |
219 | if self.written > 0 { |
220 | self.buffer.drain(..self.written); |
221 | } |
222 | } |
223 | } |
224 | |
225 | let mut guard = BufGuard::new(&mut self.buf); |
226 | while !guard.done() { |
227 | self.panicked = true; |
228 | let r = self.inner.write(guard.remaining()); |
229 | self.panicked = false; |
230 | |
231 | match r { |
232 | Ok(0) => { |
233 | return Err(io::const_io_error!( |
234 | ErrorKind::WriteZero, |
235 | "failed to write the buffered data" , |
236 | )); |
237 | } |
238 | Ok(n) => guard.consume(n), |
239 | Err(ref e) if e.is_interrupted() => {} |
240 | Err(e) => return Err(e), |
241 | } |
242 | } |
243 | Ok(()) |
244 | } |
245 | |
246 | /// Buffer some data without flushing it, regardless of the size of the |
247 | /// data. Writes as much as possible without exceeding capacity. Returns |
248 | /// the number of bytes written. |
249 | pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize { |
250 | let available = self.spare_capacity(); |
251 | let amt_to_buffer = available.min(buf.len()); |
252 | |
253 | // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction. |
254 | unsafe { |
255 | self.write_to_buffer_unchecked(&buf[..amt_to_buffer]); |
256 | } |
257 | |
258 | amt_to_buffer |
259 | } |
260 | |
261 | /// Gets a reference to the underlying writer. |
262 | /// |
263 | /// # Examples |
264 | /// |
265 | /// ```no_run |
266 | /// use std::io::BufWriter; |
267 | /// use std::net::TcpStream; |
268 | /// |
269 | /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
270 | /// |
271 | /// // we can use reference just like buffer |
272 | /// let reference = buffer.get_ref(); |
273 | /// ``` |
274 | #[stable (feature = "rust1" , since = "1.0.0" )] |
275 | pub fn get_ref(&self) -> &W { |
276 | &self.inner |
277 | } |
278 | |
279 | /// Gets a mutable reference to the underlying writer. |
280 | /// |
281 | /// It is inadvisable to directly write to the underlying writer. |
282 | /// |
283 | /// # Examples |
284 | /// |
285 | /// ```no_run |
286 | /// use std::io::BufWriter; |
287 | /// use std::net::TcpStream; |
288 | /// |
289 | /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
290 | /// |
291 | /// // we can use reference just like buffer |
292 | /// let reference = buffer.get_mut(); |
293 | /// ``` |
294 | #[stable (feature = "rust1" , since = "1.0.0" )] |
295 | pub fn get_mut(&mut self) -> &mut W { |
296 | &mut self.inner |
297 | } |
298 | |
299 | /// Returns a reference to the internally buffered data. |
300 | /// |
301 | /// # Examples |
302 | /// |
303 | /// ```no_run |
304 | /// use std::io::BufWriter; |
305 | /// use std::net::TcpStream; |
306 | /// |
307 | /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
308 | /// |
309 | /// // See how many bytes are currently buffered |
310 | /// let bytes_buffered = buf_writer.buffer().len(); |
311 | /// ``` |
312 | #[stable (feature = "bufreader_buffer" , since = "1.37.0" )] |
313 | pub fn buffer(&self) -> &[u8] { |
314 | &self.buf |
315 | } |
316 | |
317 | /// Returns a mutable reference to the internal buffer. |
318 | /// |
319 | /// This can be used to write data directly into the buffer without triggering writers |
320 | /// to the underlying writer. |
321 | /// |
322 | /// That the buffer is a `Vec` is an implementation detail. |
323 | /// Callers should not modify the capacity as there currently is no public API to do so |
324 | /// and thus any capacity changes would be unexpected by the user. |
325 | pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> { |
326 | &mut self.buf |
327 | } |
328 | |
329 | /// Returns the number of bytes the internal buffer can hold without flushing. |
330 | /// |
331 | /// # Examples |
332 | /// |
333 | /// ```no_run |
334 | /// use std::io::BufWriter; |
335 | /// use std::net::TcpStream; |
336 | /// |
337 | /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254" ).unwrap()); |
338 | /// |
339 | /// // Check the capacity of the inner buffer |
340 | /// let capacity = buf_writer.capacity(); |
341 | /// // Calculate how many bytes can be written without flushing |
342 | /// let without_flush = capacity - buf_writer.buffer().len(); |
343 | /// ``` |
344 | #[stable (feature = "buffered_io_capacity" , since = "1.46.0" )] |
345 | pub fn capacity(&self) -> usize { |
346 | self.buf.capacity() |
347 | } |
348 | |
349 | // Ensure this function does not get inlined into `write`, so that it |
350 | // remains inlineable and its common path remains as short as possible. |
351 | // If this function ends up being called frequently relative to `write`, |
352 | // it's likely a sign that the client is using an improperly sized buffer |
353 | // or their write patterns are somewhat pathological. |
354 | #[cold ] |
355 | #[inline (never)] |
356 | fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> { |
357 | if buf.len() > self.spare_capacity() { |
358 | self.flush_buf()?; |
359 | } |
360 | |
361 | // Why not len > capacity? To avoid a needless trip through the buffer when the input |
362 | // exactly fills it. We'd just need to flush it to the underlying writer anyway. |
363 | if buf.len() >= self.buf.capacity() { |
364 | self.panicked = true; |
365 | let r = self.get_mut().write(buf); |
366 | self.panicked = false; |
367 | r |
368 | } else { |
369 | // Write to the buffer. In this case, we write to the buffer even if it fills it |
370 | // exactly. Doing otherwise would mean flushing the buffer, then writing this |
371 | // input to the inner writer, which in many cases would be a worse strategy. |
372 | |
373 | // SAFETY: There was either enough spare capacity already, or there wasn't and we |
374 | // flushed the buffer to ensure that there is. In the latter case, we know that there |
375 | // is because flushing ensured that our entire buffer is spare capacity, and we entered |
376 | // this block because the input buffer length is less than that capacity. In either |
377 | // case, it's safe to write the input buffer to our buffer. |
378 | unsafe { |
379 | self.write_to_buffer_unchecked(buf); |
380 | } |
381 | |
382 | Ok(buf.len()) |
383 | } |
384 | } |
385 | |
386 | // Ensure this function does not get inlined into `write_all`, so that it |
387 | // remains inlineable and its common path remains as short as possible. |
388 | // If this function ends up being called frequently relative to `write_all`, |
389 | // it's likely a sign that the client is using an improperly sized buffer |
390 | // or their write patterns are somewhat pathological. |
391 | #[cold ] |
392 | #[inline (never)] |
393 | fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> { |
394 | // Normally, `write_all` just calls `write` in a loop. We can do better |
395 | // by calling `self.get_mut().write_all()` directly, which avoids |
396 | // round trips through the buffer in the event of a series of partial |
397 | // writes in some circumstances. |
398 | |
399 | if buf.len() > self.spare_capacity() { |
400 | self.flush_buf()?; |
401 | } |
402 | |
403 | // Why not len > capacity? To avoid a needless trip through the buffer when the input |
404 | // exactly fills it. We'd just need to flush it to the underlying writer anyway. |
405 | if buf.len() >= self.buf.capacity() { |
406 | self.panicked = true; |
407 | let r = self.get_mut().write_all(buf); |
408 | self.panicked = false; |
409 | r |
410 | } else { |
411 | // Write to the buffer. In this case, we write to the buffer even if it fills it |
412 | // exactly. Doing otherwise would mean flushing the buffer, then writing this |
413 | // input to the inner writer, which in many cases would be a worse strategy. |
414 | |
415 | // SAFETY: There was either enough spare capacity already, or there wasn't and we |
416 | // flushed the buffer to ensure that there is. In the latter case, we know that there |
417 | // is because flushing ensured that our entire buffer is spare capacity, and we entered |
418 | // this block because the input buffer length is less than that capacity. In either |
419 | // case, it's safe to write the input buffer to our buffer. |
420 | unsafe { |
421 | self.write_to_buffer_unchecked(buf); |
422 | } |
423 | |
424 | Ok(()) |
425 | } |
426 | } |
427 | |
428 | // SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`, |
429 | // i.e., that input buffer length is less than or equal to spare capacity. |
430 | #[inline ] |
431 | unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) { |
432 | debug_assert!(buf.len() <= self.spare_capacity()); |
433 | let old_len = self.buf.len(); |
434 | let buf_len = buf.len(); |
435 | let src = buf.as_ptr(); |
436 | let dst = self.buf.as_mut_ptr().add(old_len); |
437 | ptr::copy_nonoverlapping(src, dst, buf_len); |
438 | self.buf.set_len(old_len + buf_len); |
439 | } |
440 | |
441 | #[inline ] |
442 | fn spare_capacity(&self) -> usize { |
443 | self.buf.capacity() - self.buf.len() |
444 | } |
445 | } |
446 | |
447 | #[stable (feature = "bufwriter_into_parts" , since = "1.56.0" )] |
448 | /// Error returned for the buffered data from `BufWriter::into_parts`, when the underlying |
449 | /// writer has previously panicked. Contains the (possibly partly written) buffered data. |
450 | /// |
451 | /// # Example |
452 | /// |
453 | /// ``` |
454 | /// use std::io::{self, BufWriter, Write}; |
455 | /// use std::panic::{catch_unwind, AssertUnwindSafe}; |
456 | /// |
457 | /// struct PanickingWriter; |
458 | /// impl Write for PanickingWriter { |
459 | /// fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() } |
460 | /// fn flush(&mut self) -> io::Result<()> { panic!() } |
461 | /// } |
462 | /// |
463 | /// let mut stream = BufWriter::new(PanickingWriter); |
464 | /// write!(stream, "some data" ).unwrap(); |
465 | /// let result = catch_unwind(AssertUnwindSafe(|| { |
466 | /// stream.flush().unwrap() |
467 | /// })); |
468 | /// assert!(result.is_err()); |
469 | /// let (recovered_writer, buffered_data) = stream.into_parts(); |
470 | /// assert!(matches!(recovered_writer, PanickingWriter)); |
471 | /// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data" ); |
472 | /// ``` |
473 | pub struct WriterPanicked { |
474 | buf: Vec<u8>, |
475 | } |
476 | |
477 | impl WriterPanicked { |
478 | /// Returns the perhaps-unwritten data. Some of this data may have been written by the |
479 | /// panicking call(s) to the underlying writer, so simply writing it again is not a good idea. |
480 | #[must_use = "`self` will be dropped if the result is not used" ] |
481 | #[stable (feature = "bufwriter_into_parts" , since = "1.56.0" )] |
482 | pub fn into_inner(self) -> Vec<u8> { |
483 | self.buf |
484 | } |
485 | |
486 | const DESCRIPTION: &'static str = |
487 | "BufWriter inner writer panicked, what data remains unwritten is not known" ; |
488 | } |
489 | |
490 | #[stable (feature = "bufwriter_into_parts" , since = "1.56.0" )] |
491 | impl error::Error for WriterPanicked { |
492 | #[allow (deprecated, deprecated_in_future)] |
493 | fn description(&self) -> &str { |
494 | Self::DESCRIPTION |
495 | } |
496 | } |
497 | |
498 | #[stable (feature = "bufwriter_into_parts" , since = "1.56.0" )] |
499 | impl fmt::Display for WriterPanicked { |
500 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
501 | write!(f, " {}" , Self::DESCRIPTION) |
502 | } |
503 | } |
504 | |
505 | #[stable (feature = "bufwriter_into_parts" , since = "1.56.0" )] |
506 | impl fmt::Debug for WriterPanicked { |
507 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
508 | f&mut DebugStruct<'_, '_>.debug_struct("WriterPanicked" ) |
509 | .field(name:"buffer" , &format_args!(" {}/ {}" , self.buf.len(), self.buf.capacity())) |
510 | .finish() |
511 | } |
512 | } |
513 | |
514 | #[stable (feature = "rust1" , since = "1.0.0" )] |
515 | impl<W: ?Sized + Write> Write for BufWriter<W> { |
516 | #[inline ] |
517 | fn write(&mut self, buf: &[u8]) -> io::Result<usize> { |
518 | // Use < instead of <= to avoid a needless trip through the buffer in some cases. |
519 | // See `write_cold` for details. |
520 | if buf.len() < self.spare_capacity() { |
521 | // SAFETY: safe by above conditional. |
522 | unsafe { |
523 | self.write_to_buffer_unchecked(buf); |
524 | } |
525 | |
526 | Ok(buf.len()) |
527 | } else { |
528 | self.write_cold(buf) |
529 | } |
530 | } |
531 | |
532 | #[inline ] |
533 | fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { |
534 | // Use < instead of <= to avoid a needless trip through the buffer in some cases. |
535 | // See `write_all_cold` for details. |
536 | if buf.len() < self.spare_capacity() { |
537 | // SAFETY: safe by above conditional. |
538 | unsafe { |
539 | self.write_to_buffer_unchecked(buf); |
540 | } |
541 | |
542 | Ok(()) |
543 | } else { |
544 | self.write_all_cold(buf) |
545 | } |
546 | } |
547 | |
548 | fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { |
549 | // FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied |
550 | // to `write` and `write_all`. The performance benefits can be significant. See #79930. |
551 | if self.get_ref().is_write_vectored() { |
552 | // We have to handle the possibility that the total length of the buffers overflows |
553 | // `usize` (even though this can only happen if multiple `IoSlice`s reference the |
554 | // same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the |
555 | // computation overflows, then surely the input cannot fit in our buffer, so we forward |
556 | // to the inner writer's `write_vectored` method to let it handle it appropriately. |
557 | let mut saturated_total_len: usize = 0; |
558 | |
559 | for buf in bufs { |
560 | saturated_total_len = saturated_total_len.saturating_add(buf.len()); |
561 | |
562 | if saturated_total_len > self.spare_capacity() && !self.buf.is_empty() { |
563 | // Flush if the total length of the input exceeds our buffer's spare capacity. |
564 | // If we would have overflowed, this condition also holds, and we need to flush. |
565 | self.flush_buf()?; |
566 | } |
567 | |
568 | if saturated_total_len >= self.buf.capacity() { |
569 | // Forward to our inner writer if the total length of the input is greater than or |
570 | // equal to our buffer capacity. If we would have overflowed, this condition also |
571 | // holds, and we punt to the inner writer. |
572 | self.panicked = true; |
573 | let r = self.get_mut().write_vectored(bufs); |
574 | self.panicked = false; |
575 | return r; |
576 | } |
577 | } |
578 | |
579 | // `saturated_total_len < self.buf.capacity()` implies that we did not saturate. |
580 | |
581 | // SAFETY: We checked whether or not the spare capacity was large enough above. If |
582 | // it was, then we're safe already. If it wasn't, we flushed, making sufficient |
583 | // room for any input <= the buffer size, which includes this input. |
584 | unsafe { |
585 | bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b)); |
586 | }; |
587 | |
588 | Ok(saturated_total_len) |
589 | } else { |
590 | let mut iter = bufs.iter(); |
591 | let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) { |
592 | // This is the first non-empty slice to write, so if it does |
593 | // not fit in the buffer, we still get to flush and proceed. |
594 | if buf.len() > self.spare_capacity() { |
595 | self.flush_buf()?; |
596 | } |
597 | if buf.len() >= self.buf.capacity() { |
598 | // The slice is at least as large as the buffering capacity, |
599 | // so it's better to write it directly, bypassing the buffer. |
600 | self.panicked = true; |
601 | let r = self.get_mut().write(buf); |
602 | self.panicked = false; |
603 | return r; |
604 | } else { |
605 | // SAFETY: We checked whether or not the spare capacity was large enough above. |
606 | // If it was, then we're safe already. If it wasn't, we flushed, making |
607 | // sufficient room for any input <= the buffer size, which includes this input. |
608 | unsafe { |
609 | self.write_to_buffer_unchecked(buf); |
610 | } |
611 | |
612 | buf.len() |
613 | } |
614 | } else { |
615 | return Ok(0); |
616 | }; |
617 | debug_assert!(total_written != 0); |
618 | for buf in iter { |
619 | if buf.len() <= self.spare_capacity() { |
620 | // SAFETY: safe by above conditional. |
621 | unsafe { |
622 | self.write_to_buffer_unchecked(buf); |
623 | } |
624 | |
625 | // This cannot overflow `usize`. If we are here, we've written all of the bytes |
626 | // so far to our buffer, and we've ensured that we never exceed the buffer's |
627 | // capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`. |
628 | total_written += buf.len(); |
629 | } else { |
630 | break; |
631 | } |
632 | } |
633 | Ok(total_written) |
634 | } |
635 | } |
636 | |
637 | fn is_write_vectored(&self) -> bool { |
638 | true |
639 | } |
640 | |
641 | fn flush(&mut self) -> io::Result<()> { |
642 | self.flush_buf().and_then(|()| self.get_mut().flush()) |
643 | } |
644 | } |
645 | |
646 | #[stable (feature = "rust1" , since = "1.0.0" )] |
647 | impl<W: ?Sized + Write> fmt::Debug for BufWriter<W> |
648 | where |
649 | W: fmt::Debug, |
650 | { |
651 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { |
652 | fmt&mut DebugStruct<'_, '_>.debug_struct("BufWriter" ) |
653 | .field("writer" , &&self.inner) |
654 | .field(name:"buffer" , &format_args!(" {}/ {}" , self.buf.len(), self.buf.capacity())) |
655 | .finish() |
656 | } |
657 | } |
658 | |
659 | #[stable (feature = "rust1" , since = "1.0.0" )] |
660 | impl<W: ?Sized + Write + Seek> Seek for BufWriter<W> { |
661 | /// Seek to the offset, in bytes, in the underlying writer. |
662 | /// |
663 | /// Seeking always writes out the internal buffer before seeking. |
664 | fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { |
665 | self.flush_buf()?; |
666 | self.get_mut().seek(pos) |
667 | } |
668 | } |
669 | |
670 | #[stable (feature = "rust1" , since = "1.0.0" )] |
671 | impl<W: ?Sized + Write> Drop for BufWriter<W> { |
672 | fn drop(&mut self) { |
673 | if !self.panicked { |
674 | // dtors should not panic, so we ignore a failed flush |
675 | let _r: Result<(), Error> = self.flush_buf(); |
676 | } |
677 | } |
678 | } |
679 | |