about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2015-01-14 09:52:08 +0000
committerbors <bors@rust-lang.org>2015-01-14 09:52:08 +0000
commitd52398ef8cd93c6089ceacb176ae0dbe213d301e (patch)
tree08e4dfeec71c72dead90902cc43cf5c1a2a44ab7 /src/libstd
parent3614e1de6cf7abc7754c23f93476bef0e2625e99 (diff)
parent89f1848b556971a4278f2b2385137ca6c9e07094 (diff)
downloadrust-d52398ef8cd93c6089ceacb176ae0dbe213d301e.tar.gz
rust-d52398ef8cd93c6089ceacb176ae0dbe213d301e.zip
auto merge of #21076 : sfackler/rust/bufferedreader-undef, r=Gankro
It's passed to the underlying reader, so uninitialized memory == sad
times.

We might want to shrink the default buffer size as well. 64k is pretty
huge. Java uses 8k by default, and Go uses 4k for reference.

r? @alexcrichton 
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/io/buffered.rs21
1 files changed, 10 insertions, 11 deletions
diff --git a/src/libstd/io/buffered.rs b/src/libstd/io/buffered.rs
index 60e2641ed8c..8c38bc009cc 100644
--- a/src/libstd/io/buffered.rs
+++ b/src/libstd/io/buffered.rs
@@ -15,7 +15,7 @@
 use cmp;
 use fmt;
 use io::{Reader, Writer, Stream, Buffer, DEFAULT_BUF_SIZE, IoResult};
-use iter::{IteratorExt, ExactSizeIterator};
+use iter::{IteratorExt, ExactSizeIterator, repeat};
 use ops::Drop;
 use option::Option;
 use option::Option::{Some, None};
@@ -62,17 +62,11 @@ impl<R> fmt::Show for BufferedReader<R> where R: fmt::Show {
 impl<R: Reader> BufferedReader<R> {
     /// Creates a new `BufferedReader` with the specified buffer capacity
     pub fn with_capacity(cap: uint, inner: R) -> BufferedReader<R> {
-        // It's *much* faster to create an uninitialized buffer than it is to
-        // fill everything in with 0. This buffer is entirely an implementation
-        // detail and is never exposed, so we're safe to not initialize
-        // everything up-front. This allows creation of BufferedReader instances
-        // to be very cheap (large mallocs are not nearly as expensive as large
-        // callocs).
-        let mut buf = Vec::with_capacity(cap);
-        unsafe { buf.set_len(cap); }
         BufferedReader {
             inner: inner,
-            buf: buf,
+            // We can't use the same trick here as we do for BufferedWriter,
+            // since this memory is visible to the inner Reader.
+            buf: repeat(0).take(cap).collect(),
             pos: 0,
             cap: 0,
         }
@@ -166,7 +160,12 @@ impl<W> fmt::Show for BufferedWriter<W> where W: fmt::Show {
 impl<W: Writer> BufferedWriter<W> {
     /// Creates a new `BufferedWriter` with the specified buffer capacity
     pub fn with_capacity(cap: uint, inner: W) -> BufferedWriter<W> {
-        // See comments in BufferedReader for why this uses unsafe code.
+        // It's *much* faster to create an uninitialized buffer than it is to
+        // fill everything in with 0. This buffer is entirely an implementation
+        // detail and is never exposed, so we're safe to not initialize
+        // everything up-front. This allows creation of BufferedWriter instances
+        // to be very cheap (large mallocs are not nearly as expensive as large
+        // callocs).
         let mut buf = Vec::with_capacity(cap);
         unsafe { buf.set_len(cap); }
         BufferedWriter {