From 341805288e8a055162bef64055a7962ecffbf103 Mon Sep 17 00:00:00 2001 From: Ruud van Asseldonk Date: Fri, 4 Nov 2016 00:20:11 +0100 Subject: Move small-copy optimization into copy_from_slice Ultimately copy_from_slice is being a bottleneck, not io::Cursor::read. It might be worthwhile to move the check here, so more places can benefit from it. --- src/libstd/io/cursor.rs | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) (limited to 'src/libstd') diff --git a/src/libstd/io/cursor.rs b/src/libstd/io/cursor.rs index 9b50168a954..1b5023380a7 100644 --- a/src/libstd/io/cursor.rs +++ b/src/libstd/io/cursor.rs @@ -219,21 +219,9 @@ impl io::Seek for Cursor where T: AsRef<[u8]> { #[stable(feature = "rust1", since = "1.0.0")] impl Read for Cursor where T: AsRef<[u8]> { fn read(&mut self, buf: &mut [u8]) -> io::Result { - // First check if the amount of bytes we want to read is small: the read - // in the else branch will end up calling `<&[u8] as Read>::read()`, - // which will copy the buffer using a memcopy. If we only want to read a - // single byte, then the overhead of the function call is significant. - let num_read = { - let mut inner_buf = self.fill_buf()?; - if buf.len() == 1 && inner_buf.len() > 0 { - buf[0] = inner_buf[0]; - 1 - } else { - Read::read(&mut inner_buf, buf)? - } - }; - self.pos += num_read as u64; - Ok(num_read) + let n = Read::read(&mut self.fill_buf()?, buf)?; + self.pos += n as u64; + Ok(n) } } -- cgit 1.4.1-3-g733a5