about summary refs log tree commit diff
path: root/src/libstd/io
diff options
context:
space:
mode:
authorRuud van Asseldonk <dev@veniogames.com>2016-11-12 15:58:58 +0100
committerRuud van Asseldonk <dev@veniogames.com>2016-11-30 11:09:29 +0100
commit3be2c3b3092e934bdc2db67d5bdcabd611deca9c (patch)
treec52d41e6d87f47378f9b93e5bd62b34c6e1172f6 /src/libstd/io
parent341805288e8a055162bef64055a7962ecffbf103 (diff)
downloadrust-3be2c3b3092e934bdc2db67d5bdcabd611deca9c.tar.gz
rust-3be2c3b3092e934bdc2db67d5bdcabd611deca9c.zip
Move small-copy optimization into <&[u8] as Read>
Based on the discussion in https://github.com/rust-lang/rust/pull/37573,
it is likely better to keep this limited to std::io, instead of
modifying a function which users expect to be a memcpy.
Diffstat (limited to 'src/libstd/io')
-rw-r--r--src/libstd/io/impls.rs22
1 files changed, 20 insertions, 2 deletions
diff --git a/src/libstd/io/impls.rs b/src/libstd/io/impls.rs
index 6b26c016638..f691289811b 100644
--- a/src/libstd/io/impls.rs
+++ b/src/libstd/io/impls.rs
@@ -157,7 +157,16 @@ impl<'a> Read for &'a [u8] {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         let amt = cmp::min(buf.len(), self.len());
         let (a, b) = self.split_at(amt);
-        buf[..amt].copy_from_slice(a);
+
+        // First check if the amount of bytes we want to read is small:
+        // `copy_from_slice` will generally expand to a call to `memcpy`, and
+        // for a single byte the overhead is significant.
+        if amt == 1 {
+            buf[0] = a[0];
+        } else {
+            buf[..amt].copy_from_slice(a);
+        }
+
         *self = b;
         Ok(amt)
     }
@@ -169,7 +178,16 @@ impl<'a> Read for &'a [u8] {
                                   "failed to fill whole buffer"));
         }
         let (a, b) = self.split_at(buf.len());
-        buf.copy_from_slice(a);
+
+        // First check if the amount of bytes we want to read is small:
+        // `copy_from_slice` will generally expand to a call to `memcpy`, and
+        // for a single byte the overhead is significant.
+        if buf.len() == 1 {
+            buf[0] = a[0];
+        } else {
+            buf.copy_from_slice(a);
+        }
+
         *self = b;
         Ok(())
     }