diff options
| author | Ben Kimock <kimockb@gmail.com> | 2022-06-30 21:55:19 -0400 |
|---|---|---|
| committer | Ben Kimock <kimockb@gmail.com> | 2022-07-24 12:50:05 -0400 |
| commit | 761ddf3e7fe2dea4e0dc437ffca24be8e529852b (patch) | |
| tree | 1b4f19b40e110ef9b9285c007755c6623850e628 /library/std/src/io/buffered/bufreader/buffer.rs | |
| parent | b4151a41a0b275dee59ffbbc115e7bfc5be8a8c3 (diff) | |
| download | rust-761ddf3e7fe2dea4e0dc437ffca24be8e529852b.tar.gz rust-761ddf3e7fe2dea4e0dc437ffca24be8e529852b.zip | |
Remove some redundant checks from BufReader
The implementation of BufReader contains a lot of redundant checks. While any one of these checks is not particularly expensive to execute, especially when taken together they dramatically inhibit LLVM's ability to make subsequent optimizations.
Diffstat (limited to 'library/std/src/io/buffered/bufreader/buffer.rs')
| -rw-r--r-- | library/std/src/io/buffered/bufreader/buffer.rs | 75 |
1 files changed, 75 insertions, 0 deletions
diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs new file mode 100644 index 00000000000..92fe47745d9 --- /dev/null +++ b/library/std/src/io/buffered/bufreader/buffer.rs @@ -0,0 +1,75 @@ +use crate::cmp; +use crate::io::{self, Read, ReadBuf}; +use crate::mem::MaybeUninit; + +pub struct Buffer { + buf: Box<[MaybeUninit<u8>]>, + pos: usize, + cap: usize, + init: usize, +} + +impl Buffer { + pub fn with_capacity(capacity: usize) -> Self { + let buf = Box::new_uninit_slice(capacity); + Self { buf, pos: 0, cap: 0, init: 0 } + } + + pub fn buffer(&self) -> &[u8] { + // SAFETY: self.cap is always <= self.init, so self.buf[self.pos..self.cap] is always init + // Additionally, both self.pos and self.cap are valid and and self.cap => self.pos, and + // that region is initialized because those are all invariants of this type. + unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.get_unchecked(self.pos..self.cap)) } + } + + pub fn capacity(&self) -> usize { + self.buf.len() + } + + pub fn cap(&self) -> usize { + self.cap + } + + pub fn pos(&self) -> usize { + self.pos + } + + pub fn discard_buffer(&mut self) { + self.pos = 0; + self.cap = 0; + } + + pub fn consume(&mut self, amt: usize) { + self.pos = cmp::min(self.pos + amt, self.cap); + } + + pub fn unconsume(&mut self, amt: usize) { + self.pos = self.pos.saturating_sub(amt); + } + + pub fn fill_buf(&mut self, mut reader: impl Read) -> io::Result<&[u8]> { + // If we've reached the end of our internal buffer then we need to fetch + // some more data from the underlying reader. + // Branch using `>=` instead of the more correct `==` + // to tell the compiler that the pos..cap slice is always valid. + if self.pos >= self.cap { + debug_assert!(self.pos == self.cap); + + let mut readbuf = ReadBuf::uninit(&mut self.buf); + + // SAFETY: `self.init` is either 0 or set to `readbuf.initialized_len()` + // from the last time this function was called + unsafe { + readbuf.assume_init(self.init); + } + + reader.read_buf(&mut readbuf)?; + + self.cap = readbuf.filled_len(); + self.init = readbuf.initialized_len(); + + self.pos = 0; + } + Ok(self.buffer()) + } +} |
