about summary refs log tree commit diff
path: root/src/libtest
diff options
context:
space:
mode:
authorSeiichi Uchida <seuchida@gmail.com>2018-03-21 23:12:24 +0900
committerSeiichi Uchida <seuchida@gmail.com>2018-03-21 23:12:24 +0900
commit75dcc61d3c1cce7b4428dd85bb588e99d9faf7a9 (patch)
treebbe9202d92ac35cc2636501c1a7e174b1a463209 /src/libtest
parentc19264fa835a1eca86de4fd2e86a87b3919e57cf (diff)
downloadrust-75dcc61d3c1cce7b4428dd85bb588e99d9faf7a9.tar.gz
rust-75dcc61d3c1cce7b4428dd85bb588e99d9faf7a9.zip
Cargo fmt libtest
Diffstat (limited to 'src/libtest')
-rw-r--r--src/libtest/formatters/json.rs49
-rw-r--r--src/libtest/formatters/pretty.rs9
-rw-r--r--src/libtest/formatters/terse.rs9
-rw-r--r--src/libtest/lib.rs325
-rw-r--r--src/libtest/stats.rs23
5 files changed, 182 insertions, 233 deletions
diff --git a/src/libtest/formatters/json.rs b/src/libtest/formatters/json.rs
index d323d50f702..89235d897bd 100644
--- a/src/libtest/formatters/json.rs
+++ b/src/libtest/formatters/json.rs
@@ -36,17 +36,12 @@ impl<T: Write> JsonFormatter<T> {
         if let Some(extras) = extra {
             self.write_message(&*format!(
                 r#"{{ "type": "{}", "name": "{}", "event": "{}", {} }}"#,
-                ty,
-                name,
-                evt,
-                extras
+                ty, name, evt, extras
             ))
         } else {
             self.write_message(&*format!(
                 r#"{{ "type": "{}", "name": "{}", "event": "{}" }}"#,
-                ty,
-                name,
-                evt
+                ty, name, evt
             ))
         }
     }
@@ -89,14 +84,12 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
                 self.write_event("test", desc.name.as_slice(), "failed", extra_data)
             }
 
-            TrFailedMsg(ref m) => {
-                self.write_event(
-                    "test",
-                    desc.name.as_slice(),
-                    "failed",
-                    Some(format!(r#""message": "{}""#, EscapedString(m))),
-                )
-            }
+            TrFailedMsg(ref m) => self.write_event(
+                "test",
+                desc.name.as_slice(),
+                "failed",
+                Some(format!(r#""message": "{}""#, EscapedString(m))),
+            ),
 
             TrIgnored => self.write_event("test", desc.name.as_slice(), "ignored", None),
 
@@ -116,13 +109,10 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
 
                 let line = format!(
                     "{{ \"type\": \"bench\", \
-                                \"name\": \"{}\", \
-                                \"median\": {}, \
-                                \"deviation\": {}{} }}",
-                    desc.name,
-                    median,
-                    deviation,
-                    mbps
+                     \"name\": \"{}\", \
+                     \"median\": {}, \
+                     \"deviation\": {}{} }}",
+                    desc.name, median, deviation, mbps
                 );
 
                 self.write_message(&*line)
@@ -138,16 +128,15 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
     }
 
     fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
-
         self.write_message(&*format!(
             "{{ \"type\": \"suite\", \
-            \"event\": \"{}\", \
-            \"passed\": {}, \
-            \"failed\": {}, \
-            \"allowed_fail\": {}, \
-            \"ignored\": {}, \
-            \"measured\": {}, \
-            \"filtered_out\": \"{}\" }}",
+             \"event\": \"{}\", \
+             \"passed\": {}, \
+             \"failed\": {}, \
+             \"allowed_fail\": {}, \
+             \"ignored\": {}, \
+             \"measured\": {}, \
+             \"filtered_out\": \"{}\" }}",
             if state.failed == 0 { "ok" } else { "failed" },
             state.passed,
             state.failed + state.allowed_fail,
diff --git a/src/libtest/formatters/pretty.rs b/src/libtest/formatters/pretty.rs
index f2064deefce..8e5fa00b5f2 100644
--- a/src/libtest/formatters/pretty.rs
+++ b/src/libtest/formatters/pretty.rs
@@ -196,8 +196,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
 
         self.write_plain(&format!(
             "test {} has been running for over {} seconds\n",
-            desc.name,
-            TEST_WARN_TIMEOUT_S
+            desc.name, TEST_WARN_TIMEOUT_S
         ))
     }
 
@@ -232,11 +231,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
         } else {
             format!(
                 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
-                state.passed,
-                state.failed,
-                state.ignored,
-                state.measured,
-                state.filtered_out
+                state.passed, state.failed, state.ignored, state.measured, state.filtered_out
             )
         };
 
diff --git a/src/libtest/formatters/terse.rs b/src/libtest/formatters/terse.rs
index 88689485144..85286027d69 100644
--- a/src/libtest/formatters/terse.rs
+++ b/src/libtest/formatters/terse.rs
@@ -195,8 +195,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
     fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
         self.write_plain(&format!(
             "test {} has been running for over {} seconds\n",
-            desc.name,
-            TEST_WARN_TIMEOUT_S
+            desc.name, TEST_WARN_TIMEOUT_S
         ))
     }
 
@@ -231,11 +230,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
         } else {
             format!(
                 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
-                state.passed,
-                state.failed,
-                state.ignored,
-                state.measured,
-                state.filtered_out
+                state.passed, state.failed, state.ignored, state.measured, state.filtered_out
             )
         };
 
diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs
index 59d701dd0fb..b8be1aeff17 100644
--- a/src/libtest/lib.rs
+++ b/src/libtest/lib.rs
@@ -30,10 +30,8 @@
 #![unstable(feature = "test", issue = "27812")]
 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
        html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
-       html_root_url = "https://doc.rust-lang.org/nightly/",
-       test(attr(deny(warnings))))]
+       html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
 #![deny(warnings)]
-
 #![feature(asm)]
 #![feature(fnbox)]
 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
@@ -43,10 +41,10 @@
 #![feature(termination_trait_lib)]
 
 extern crate getopts;
-extern crate term;
 #[cfg(any(unix, target_os = "cloudabi"))]
 extern crate libc;
 extern crate panic_unwind;
+extern crate term;
 
 pub use self::TestFn::*;
 pub use self::ColorConfig::*;
@@ -72,7 +70,7 @@ use std::process::Termination;
 use std::sync::mpsc::{channel, Sender};
 use std::sync::{Arc, Mutex};
 use std::thread;
-use std::time::{Instant, Duration};
+use std::time::{Duration, Instant};
 use std::borrow::Cow;
 use std::process;
 
@@ -81,16 +79,16 @@ const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in qu
 
 // to be used by rustc to compile tests in libtest
 pub mod test {
-    pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
-             TrFailedMsg, TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName,
-             DynTestName, DynTestFn, assert_test_result, run_test, test_main, test_main_static,
-             filter_tests, parse_opts, StaticBenchFn, ShouldPanic, Options};
+    pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
+             Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
+             StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
+             TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
 }
 
 pub mod stats;
 mod formatters;
 
-use formatters::{OutputFormatter, PrettyFormatter, TerseFormatter, JsonFormatter};
+use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
 
 // The name of a test. By convention this follows the rules for rust
 // paths; i.e. it should be a series of identifiers separated by double
@@ -255,7 +253,9 @@ pub struct Options {
 
 impl Options {
     pub fn new() -> Options {
-        Options { display_output: false }
+        Options {
+            display_output: false,
+        }
     }
 
     pub fn display_output(mut self, display_output: bool) -> Options {
@@ -272,7 +272,7 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
         Some(Err(msg)) => {
             eprintln!("error: {}", msg);
             process::exit(101);
-        },
+        }
         None => return,
     };
 
@@ -289,7 +289,7 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
             Err(e) => {
                 eprintln!("error: io error when listing tests: {:?}", e);
                 process::exit(101);
-            },
+            }
         }
     }
 }
@@ -306,18 +306,14 @@ pub fn test_main_static(tests: &[TestDescAndFn]) {
     let owned_tests = tests
         .iter()
         .map(|t| match t.testfn {
-            StaticTestFn(f) => {
-                TestDescAndFn {
-                    testfn: StaticTestFn(f),
-                    desc: t.desc.clone(),
-                }
-            }
-            StaticBenchFn(f) => {
-                TestDescAndFn {
-                    testfn: StaticBenchFn(f),
-                    desc: t.desc.clone(),
-                }
-            }
+            StaticTestFn(f) => TestDescAndFn {
+                testfn: StaticTestFn(f),
+                desc: t.desc.clone(),
+            },
+            StaticBenchFn(f) => TestDescAndFn {
+                testfn: StaticBenchFn(f),
+                desc: t.desc.clone(),
+            },
             _ => panic!("non-static tests passed to test::test_main_static"),
         })
         .collect();
@@ -397,34 +393,34 @@ fn optgroups() -> getopts::Options {
             "",
             "logfile",
             "Write logs to the specified file instead \
-                                of stdout",
+             of stdout",
             "PATH",
         )
         .optflag(
             "",
             "nocapture",
             "don't capture stdout/stderr of each \
-                                   task, allow printing directly",
+             task, allow printing directly",
         )
         .optopt(
             "",
             "test-threads",
             "Number of threads used for running tests \
-                                     in parallel",
+             in parallel",
             "n_threads",
         )
         .optmulti(
             "",
             "skip",
             "Skip tests whose names contain FILTER (this flag can \
-                               be used multiple times)",
+             be used multiple times)",
             "FILTER",
         )
         .optflag(
             "q",
             "quiet",
             "Display one character per test instead of one line. \
-                                Alias to --format=terse",
+             Alias to --format=terse",
         )
         .optflag(
             "",
@@ -516,8 +512,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
     if let Some(opt) = matches.opt_str("Z") {
         if !is_nightly() {
             return Some(Err(
-                "the option `Z` is only accepted on the nightly compiler"
-                    .into(),
+                "the option `Z` is only accepted on the nightly compiler".into(),
             ));
         }
 
@@ -562,19 +557,17 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
     }
 
     let test_threads = match matches.opt_str("test-threads") {
-        Some(n_str) => {
-            match n_str.parse::<usize>() {
-                Ok(0) => return Some(Err(format!("argument for --test-threads must not be 0"))),
-                Ok(n) => Some(n),
-                Err(e) => {
-                    return Some(Err(format!(
-                        "argument for --test-threads must be a number > 0 \
-                                             (error: {})",
-                        e
-                    )))
-                }
+        Some(n_str) => match n_str.parse::<usize>() {
+            Ok(0) => return Some(Err(format!("argument for --test-threads must not be 0"))),
+            Ok(n) => Some(n),
+            Err(e) => {
+                return Some(Err(format!(
+                    "argument for --test-threads must be a number > 0 \
+                     (error: {})",
+                    e
+                )))
             }
-        }
+        },
         None => None,
     };
 
@@ -586,7 +579,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
         Some(v) => {
             return Some(Err(format!(
                 "argument for --color must be auto, always, or never (was \
-                                     {})",
+                 {})",
                 v
             )))
         }
@@ -599,8 +592,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
         Some("json") => {
             if !allow_unstable {
                 return Some(Err(
-                    "The \"json\" format is only accepted on the nightly compiler"
-                        .into(),
+                    "The \"json\" format is only accepted on the nightly compiler".into(),
                 ));
             }
             OutputFormat::Json
@@ -609,7 +601,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
         Some(v) => {
             return Some(Err(format!(
                 "argument for --format must be pretty, terse, or json (was \
-                                     {})",
+                 {})",
                 v
             )))
         }
@@ -811,8 +803,7 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res
                 ntest += 1;
                 "test"
             }
-            StaticBenchFn(..) |
-            DynBenchFn(..) => {
+            StaticBenchFn(..) | DynBenchFn(..) => {
                 nbench += 1;
                 "benchmark"
             }
@@ -834,7 +825,8 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res
             writeln!(output, "")?;
         }
 
-        writeln!(output,
+        writeln!(
+            output,
             "{}, {}",
             plural(ntest, "test"),
             plural(nbench, "benchmark")
@@ -851,7 +843,6 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
         st: &mut ConsoleTestState,
         out: &mut OutputFormatter,
     ) -> io::Result<()> {
-
         match (*event).clone() {
             TeFiltered(ref filtered_tests) => {
                 st.total = filtered_tests.len();
@@ -989,8 +980,7 @@ fn use_color(opts: &TestOpts) -> bool {
     }
 }
 
-#[cfg(any(target_os = "cloudabi",
-          target_os = "redox",
+#[cfg(any(target_os = "cloudabi", target_os = "redox",
           all(target_arch = "wasm32", not(target_os = "emscripten"))))]
 fn stdout_isatty() -> bool {
     // FIXME: Implement isatty on Redox
@@ -1089,10 +1079,12 @@ where
         let now = Instant::now();
         let timed_out = running_tests
             .iter()
-            .filter_map(|(desc, timeout)| if &now >= timeout {
-                Some(desc.clone())
-            } else {
-                None
+            .filter_map(|(desc, timeout)| {
+                if &now >= timeout {
+                    Some(desc.clone())
+                } else {
+                    None
+                }
             })
             .collect();
         for test in &timed_out {
@@ -1174,12 +1166,10 @@ fn get_concurrency() -> usize {
             let opt_n: Option<usize> = s.parse().ok();
             match opt_n {
                 Some(n) if n > 0 => n,
-                _ => {
-                    panic!(
-                        "RUST_TEST_THREADS is `{}`, should be a positive integer.",
-                        s
-                    )
-                }
+                _ => panic!(
+                    "RUST_TEST_THREADS is `{}`, should be a positive integer.",
+                    s
+                ),
             }
         }
         Err(..) => num_cpus(),
@@ -1223,20 +1213,15 @@ fn get_concurrency() -> usize {
         1
     }
 
-    #[cfg(any(target_os = "android",
-              target_os = "cloudabi",
-              target_os = "emscripten",
-              target_os = "fuchsia",
-              target_os = "ios",
-              target_os = "linux",
-              target_os = "macos",
-              target_os = "solaris"))]
+    #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
+              target_os = "fuchsia", target_os = "ios", target_os = "linux",
+              target_os = "macos", target_os = "solaris"))]
     fn num_cpus() -> usize {
         unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
     }
 
     #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
-                target_os = "netbsd"))]
+              target_os = "netbsd"))]
     fn num_cpus() -> usize {
         use std::ptr;
 
@@ -1308,26 +1293,28 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
     // Remove tests that don't match the test filter
     filtered = match opts.filter {
         None => filtered,
-        Some(ref filter) => {
-            filtered
-                .into_iter()
-                .filter(|test| if opts.filter_exact {
+        Some(ref filter) => filtered
+            .into_iter()
+            .filter(|test| {
+                if opts.filter_exact {
                     test.desc.name.as_slice() == &filter[..]
                 } else {
                     test.desc.name.as_slice().contains(&filter[..])
-                })
-                .collect()
-        }
+                }
+            })
+            .collect(),
     };
 
     // Skip tests that match any of the skip filters
     filtered = filtered
         .into_iter()
         .filter(|t| {
-            !opts.skip.iter().any(|sf| if opts.filter_exact {
-                t.desc.name.as_slice() == &sf[..]
-            } else {
-                t.desc.name.as_slice().contains(&sf[..])
+            !opts.skip.iter().any(|sf| {
+                if opts.filter_exact {
+                    t.desc.name.as_slice() == &sf[..]
+                } else {
+                    t.desc.name.as_slice().contains(&sf[..])
+                }
             })
         })
         .collect();
@@ -1354,31 +1341,23 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
     };
 
     // Sort the tests alphabetically
-    filtered.sort_by(|t1, t2| {
-        t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())
-    });
+    filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
 
     filtered
 }
 
 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
     // convert benchmarks to tests, if we're not benchmarking them
-    tests.into_iter().map(|x| {
-        let testfn = match x.testfn {
-            DynBenchFn(bench) => {
-                DynTestFn(Box::new(move || {
-                    bench::run_once(|b| {
-                        __rust_begin_short_backtrace(|| bench.run(b))
-                    })
-                }))
-            }
-            StaticBenchFn(benchfn) => {
-                DynTestFn(Box::new(move || {
-                    bench::run_once(|b| {
-                        __rust_begin_short_backtrace(|| benchfn(b))
-                    })
-                }))
-            }
+    tests
+        .into_iter()
+        .map(|x| {
+            let testfn = match x.testfn {
+                DynBenchFn(bench) => DynTestFn(Box::new(move || {
+                    bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
+                })),
+                StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
+                    bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
+                })),
                 f => f,
             };
             TestDescAndFn {
@@ -1395,22 +1374,22 @@ pub fn run_test(
     test: TestDescAndFn,
     monitor_ch: Sender<MonitorMsg>,
 ) {
-
     let TestDescAndFn { desc, testfn } = test;
 
-    let ignore_because_panic_abort = cfg!(target_arch = "wasm32") &&
-        !cfg!(target_os = "emscripten") &&
-        desc.should_panic != ShouldPanic::No;
+    let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
+        && desc.should_panic != ShouldPanic::No;
 
     if force_ignore || desc.ignore || ignore_because_panic_abort {
         monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
         return;
     }
 
-    fn run_test_inner(desc: TestDesc,
-                      monitor_ch: Sender<MonitorMsg>,
-                      nocapture: bool,
-                      testfn: Box<FnBox() + Send>) {
+    fn run_test_inner(
+        desc: TestDesc,
+        monitor_ch: Sender<MonitorMsg>,
+        nocapture: bool,
+        testfn: Box<FnBox() + Send>,
+    ) {
         // Buffer for capturing standard I/O
         let data = Arc::new(Mutex::new(Vec::new()));
         let data2 = data.clone();
@@ -1440,7 +1419,6 @@ pub fn run_test(
                 .unwrap();
         };
 
-
         // If the platform is single-threaded we're just going to run
         // the test synchronously, regardless of the concurrency
         // level.
@@ -1455,27 +1433,25 @@ pub fn run_test(
 
     match testfn {
         DynBenchFn(bencher) => {
-            ::bench::benchmark(desc,
-                                monitor_ch,
-                                opts.nocapture,
-                                |harness| bencher.run(harness));
+            ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
+                bencher.run(harness)
+            });
         }
         StaticBenchFn(benchfn) => {
-            ::bench::benchmark(desc,
-                                monitor_ch,
-                                opts.nocapture,
-                                |harness| (benchfn.clone())(harness));
+            ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
+                (benchfn.clone())(harness)
+            });
         }
         DynTestFn(f) => {
-            let cb = move || {
-                __rust_begin_short_backtrace(f)
-            };
+            let cb = move || __rust_begin_short_backtrace(f);
             run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
         }
-        StaticTestFn(f) => {
-            run_test_inner(desc, monitor_ch, opts.nocapture,
-                           Box::new(move || __rust_begin_short_backtrace(f)))
-        }
+        StaticTestFn(f) => run_test_inner(
+            desc,
+            monitor_ch,
+            opts.nocapture,
+            Box::new(move || __rust_begin_short_backtrace(f)),
+        ),
     }
 }
 
@@ -1487,8 +1463,7 @@ fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
 
 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
     match (&desc.should_panic, task_result) {
-        (&ShouldPanic::No, Ok(())) |
-        (&ShouldPanic::Yes, Err(_)) => TrOk,
+        (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
         (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
             if err.downcast_ref::<String>()
                 .map(|e| &**e)
@@ -1545,7 +1520,6 @@ impl MetricMap {
     }
 }
 
-
 // Benchmarking
 
 /// A function that is opaque to the optimizer, to allow benchmarks to
@@ -1566,7 +1540,6 @@ pub fn black_box<T>(dummy: T) -> T {
     dummy
 }
 
-
 impl Bencher {
     /// Callback for benchmark functions to run in their body.
     pub fn iter<T, F>(&mut self, mut inner: F)
@@ -1605,7 +1578,6 @@ where
     return ns_from_dur(start.elapsed());
 }
 
-
 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
 where
     F: FnMut() -> T,
@@ -1649,8 +1621,8 @@ where
 
         // If we've run for 100ms and seem to have converged to a
         // stable median.
-        if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
-            summ.median - summ5.median < summ5.median_abs_dev
+        if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
+            && summ.median - summ5.median < summ5.median_abs_dev
         {
             return summ5;
         }
@@ -1680,7 +1652,7 @@ pub mod bench {
     use std::io;
     use std::sync::{Arc, Mutex};
     use stats;
-    use super::{Bencher, BenchSamples, BenchMode, Sink, MonitorMsg, TestDesc, Sender, TestResult};
+    use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
 
     pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
     where
@@ -1711,7 +1683,8 @@ pub mod bench {
             io::set_panic(panicio);
         };
 
-        let test_result = match result { //bs.bench(f) {
+        let test_result = match result {
+            //bs.bench(f) {
             Ok(Some(ns_iter_summ)) => {
                 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
                 let mb_s = bs.bytes * 1000 / ns_iter;
@@ -1732,9 +1705,7 @@ pub mod bench {
                 };
                 TestResult::TrBench(bs)
             }
-            Err(_) => {
-                TestResult::TrFailed
-            }
+            Err(_) => TestResult::TrFailed,
         };
 
         let stdout = data.lock().unwrap().to_vec();
@@ -1756,9 +1727,9 @@ pub mod bench {
 
 #[cfg(test)]
 mod tests {
-    use test::{TrFailed, TrFailedMsg, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc,
-               TestDescAndFn, TestOpts, run_test, MetricMap, StaticTestName, DynTestName,
-               DynTestFn, ShouldPanic};
+    use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
+               StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
+               TrIgnored, TrOk};
     use std::sync::mpsc::channel;
     use bench;
     use Bencher;
@@ -1904,25 +1875,26 @@ mod tests {
         opts.run_tests = true;
         opts.run_ignored = true;
 
-        let tests =
-            vec![TestDescAndFn {
-                             desc: TestDesc {
-                                 name: StaticTestName("1"),
-                                 ignore: true,
-                                 should_panic: ShouldPanic::No,
-                                 allow_fail: false,
-                             },
-                             testfn: DynTestFn(Box::new(move || {})),
-                         },
-                         TestDescAndFn {
-                             desc: TestDesc {
-                                 name: StaticTestName("2"),
-                                 ignore: false,
-                                 should_panic: ShouldPanic::No,
-                                 allow_fail: false,
-                             },
-                             testfn: DynTestFn(Box::new(move || {})),
-                         }];
+        let tests = vec![
+            TestDescAndFn {
+                desc: TestDesc {
+                    name: StaticTestName("1"),
+                    ignore: true,
+                    should_panic: ShouldPanic::No,
+                    allow_fail: false,
+                },
+                testfn: DynTestFn(Box::new(move || {})),
+            },
+            TestDescAndFn {
+                desc: TestDesc {
+                    name: StaticTestName("2"),
+                    ignore: false,
+                    should_panic: ShouldPanic::No,
+                    allow_fail: false,
+                },
+                testfn: DynTestFn(Box::new(move || {})),
+            },
+        ];
         let filtered = filter_tests(&opts, tests);
 
         assert_eq!(filtered.len(), 1);
@@ -1935,17 +1907,16 @@ mod tests {
         fn tests() -> Vec<TestDescAndFn> {
             vec!["base", "base::test", "base::test1", "base::test2"]
                 .into_iter()
-                .map(|name| {
-                    TestDescAndFn {
-                        desc: TestDesc {
-                            name: StaticTestName(name),
-                            ignore: false,
-                            should_panic: ShouldPanic::No,
-                            allow_fail: false,
-                        },
-                        testfn: DynTestFn(Box::new(move || {}))
-                    }
-                }).collect()
+                .map(|name| TestDescAndFn {
+                    desc: TestDesc {
+                        name: StaticTestName(name),
+                        ignore: false,
+                        should_panic: ShouldPanic::No,
+                        allow_fail: false,
+                    },
+                    testfn: DynTestFn(Box::new(move || {})),
+                })
+                .collect()
         }
 
         let substr = filter_tests(
@@ -2127,10 +2098,7 @@ mod tests {
             allow_fail: false,
         };
 
-        ::bench::benchmark(desc,
-                            tx,
-                            true,
-                            f);
+        ::bench::benchmark(desc, tx, true, f);
         rx.recv().unwrap();
     }
 
@@ -2149,10 +2117,7 @@ mod tests {
             allow_fail: false,
         };
 
-        ::bench::benchmark(desc,
-                            tx,
-                            true,
-                            f);
+        ::bench::benchmark(desc, tx, true, f);
         rx.recv().unwrap();
     }
 }
diff --git a/src/libtest/stats.rs b/src/libtest/stats.rs
index e22fdf77fc1..ddb5dcf2a1c 100644
--- a/src/libtest/stats.rs
+++ b/src/libtest/stats.rs
@@ -279,7 +279,6 @@ impl Stats for [f64] {
     }
 }
 
-
 // Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using
 // linear interpolation. If samples are not sorted, return nonsensical value.
 fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
@@ -304,7 +303,6 @@ fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
     lo + (hi - lo) * d
 }
 
-
 /// Winsorize a set of samples, replacing values above the `100-pct` percentile
 /// and below the `pct` percentile with those percentiles themselves. This is a
 /// way of minimizing the effect of outliers, at the cost of biasing the sample.
@@ -338,15 +336,18 @@ mod tests {
     use std::io;
 
     macro_rules! assert_approx_eq {
-        ($a:expr, $b:expr) => ({
+        ($a: expr, $b: expr) => {{
             let (a, b) = (&$a, &$b);
-            assert!((*a - *b).abs() < 1.0e-6,
-                    "{} is not approximately equal to {}", *a, *b);
-        })
+            assert!(
+                (*a - *b).abs() < 1.0e-6,
+                "{} is not approximately equal to {}",
+                *a,
+                *b
+            );
+        }};
     }
 
     fn check(samples: &[f64], summ: &Summary) {
-
         let summ2 = Summary::new(samples);
 
         let mut w = io::sink();
@@ -911,14 +912,18 @@ mod bench {
 
     #[bench]
     pub fn sum_three_items(b: &mut Bencher) {
-        b.iter(|| { [1e20f64, 1.5f64, -1e20f64].sum(); })
+        b.iter(|| {
+            [1e20f64, 1.5f64, -1e20f64].sum();
+        })
     }
     #[bench]
     pub fn sum_many_f64(b: &mut Bencher) {
         let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
         let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
 
-        b.iter(|| { v.sum(); })
+        b.iter(|| {
+            v.sum();
+        })
     }
 
     #[bench]