about summary refs log tree commit diff
path: root/src/libstd/io
diff options
context:
space:
mode:
authorRuud van Asseldonk <dev@veniogames.com>2016-11-04 00:20:11 +0100
committerRuud van Asseldonk <dev@veniogames.com>2016-11-30 11:09:29 +0100
commit341805288e8a055162bef64055a7962ecffbf103 (patch)
tree191dd0cfdcf9725d05c20c976b27008f811df2e0 /src/libstd/io
parentcd7fade0a9c1c8762d2fba7c65c1b82e8d369711 (diff)
downloadrust-341805288e8a055162bef64055a7962ecffbf103.tar.gz
rust-341805288e8a055162bef64055a7962ecffbf103.zip
Move small-copy optimization into copy_from_slice
Ultimately copy_from_slice is being a bottleneck, not io::Cursor::read.
It might be worthwhile to move the check here, so more places can
benefit from it.
Diffstat (limited to 'src/libstd/io')
-rw-r--r--src/libstd/io/cursor.rs18
1 files changed, 3 insertions, 15 deletions
diff --git a/src/libstd/io/cursor.rs b/src/libstd/io/cursor.rs
index 9b50168a954..1b5023380a7 100644
--- a/src/libstd/io/cursor.rs
+++ b/src/libstd/io/cursor.rs
@@ -219,21 +219,9 @@ impl<T> io::Seek for Cursor<T> where T: AsRef<[u8]> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> Read for Cursor<T> where T: AsRef<[u8]> {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        // First check if the amount of bytes we want to read is small: the read
-        // in the else branch will end up calling `<&[u8] as Read>::read()`,
-        // which will copy the buffer using a memcopy. If we only want to read a
-        // single byte, then the overhead of the function call is significant.
-        let num_read = {
-            let mut inner_buf = self.fill_buf()?;
-            if buf.len() == 1 && inner_buf.len() > 0 {
-                buf[0] = inner_buf[0];
-                1
-            } else {
-                Read::read(&mut inner_buf, buf)?
-            }
-        };
-        self.pos += num_read as u64;
-        Ok(num_read)
+        let n = Read::read(&mut self.fill_buf()?, buf)?;
+        self.pos += n as u64;
+        Ok(n)
     }
 }