about summary refs log tree commit diff
path: root/library/test/src
diff options
context:
space:
mode:
Diffstat (limited to 'library/test/src')
-rw-r--r--library/test/src/bench.rs238
-rw-r--r--library/test/src/cli.rs431
-rw-r--r--library/test/src/console.rs293
-rw-r--r--library/test/src/event.rs36
-rw-r--r--library/test/src/formatters/json.rs246
-rw-r--r--library/test/src/formatters/mod.rs40
-rw-r--r--library/test/src/formatters/pretty.rs281
-rw-r--r--library/test/src/formatters/terse.rs258
-rw-r--r--library/test/src/helpers/concurrency.rs117
-rw-r--r--library/test/src/helpers/exit_code.rs20
-rw-r--r--library/test/src/helpers/isatty.rs32
-rw-r--r--library/test/src/helpers/metrics.rs50
-rw-r--r--library/test/src/helpers/mod.rs8
-rw-r--r--library/test/src/helpers/sink.rs24
-rw-r--r--library/test/src/lib.rs644
-rw-r--r--library/test/src/options.rs87
-rw-r--r--library/test/src/stats.rs319
-rw-r--r--library/test/src/stats/tests.rs591
-rw-r--r--library/test/src/test_result.rs115
-rw-r--r--library/test/src/tests.rs688
-rw-r--r--library/test/src/time.rs193
-rw-r--r--library/test/src/types.rs145
22 files changed, 4856 insertions, 0 deletions
diff --git a/library/test/src/bench.rs b/library/test/src/bench.rs
new file mode 100644
index 00000000000..e92e5b9829e
--- /dev/null
+++ b/library/test/src/bench.rs
@@ -0,0 +1,238 @@
+//! Benchmarking module.
+pub use std::hint::black_box;
+
+use super::{
+    event::CompletedTest, helpers::sink::Sink, options::BenchMode, test_result::TestResult,
+    types::TestDesc, Sender,
+};
+
+use crate::stats;
+use std::cmp;
+use std::io;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::{Arc, Mutex};
+use std::time::{Duration, Instant};
+
+/// Manager of the benchmarking runs.
+///
+/// This is fed into functions marked with `#[bench]` to allow for
+/// set-up & tear-down before running a piece of code repeatedly via a
+/// call to `iter`.
+#[derive(Clone)]
+pub struct Bencher {
+    mode: BenchMode,
+    summary: Option<stats::Summary>,
+    pub bytes: u64,
+}
+
+impl Bencher {
+    /// Callback for benchmark functions to run in their body.
+    pub fn iter<T, F>(&mut self, mut inner: F)
+    where
+        F: FnMut() -> T,
+    {
+        if self.mode == BenchMode::Single {
+            ns_iter_inner(&mut inner, 1);
+            return;
+        }
+
+        self.summary = Some(iter(&mut inner));
+    }
+
+    pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
+    where
+        F: FnMut(&mut Bencher),
+    {
+        f(self);
+        self.summary
+    }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct BenchSamples {
+    pub ns_iter_summ: stats::Summary,
+    pub mb_s: usize,
+}
+
+pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
+    use std::fmt::Write;
+    let mut output = String::new();
+
+    let median = bs.ns_iter_summ.median as usize;
+    let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
+
+    output
+        .write_fmt(format_args!(
+            "{:>11} ns/iter (+/- {})",
+            fmt_thousands_sep(median, ','),
+            fmt_thousands_sep(deviation, ',')
+        ))
+        .unwrap();
+    if bs.mb_s != 0 {
+        output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
+    }
+    output
+}
+
+// Format a number with thousands separators
+fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
+    use std::fmt::Write;
+    let mut output = String::new();
+    let mut trailing = false;
+    for &pow in &[9, 6, 3, 0] {
+        let base = 10_usize.pow(pow);
+        if pow == 0 || trailing || n / base != 0 {
+            if !trailing {
+                output.write_fmt(format_args!("{}", n / base)).unwrap();
+            } else {
+                output.write_fmt(format_args!("{:03}", n / base)).unwrap();
+            }
+            if pow != 0 {
+                output.push(sep);
+            }
+            trailing = true;
+        }
+        n %= base;
+    }
+
+    output
+}
+
+fn ns_from_dur(dur: Duration) -> u64 {
+    dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
+}
+
+fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
+where
+    F: FnMut() -> T,
+{
+    let start = Instant::now();
+    for _ in 0..k {
+        black_box(inner());
+    }
+    ns_from_dur(start.elapsed())
+}
+
+pub fn iter<T, F>(inner: &mut F) -> stats::Summary
+where
+    F: FnMut() -> T,
+{
+    // Initial bench run to get ballpark figure.
+    let ns_single = ns_iter_inner(inner, 1);
+
+    // Try to estimate iter count for 1ms falling back to 1m
+    // iterations if first run took < 1ns.
+    let ns_target_total = 1_000_000; // 1ms
+    let mut n = ns_target_total / cmp::max(1, ns_single);
+
+    // if the first run took more than 1ms we don't want to just
+    // be left doing 0 iterations on every loop. The unfortunate
+    // side effect of not being able to do as many runs is
+    // automatically handled by the statistical analysis below
+    // (i.e., larger error bars).
+    n = cmp::max(1, n);
+
+    let mut total_run = Duration::new(0, 0);
+    let samples: &mut [f64] = &mut [0.0_f64; 50];
+    loop {
+        let loop_start = Instant::now();
+
+        for p in &mut *samples {
+            *p = ns_iter_inner(inner, n) as f64 / n as f64;
+        }
+
+        stats::winsorize(samples, 5.0);
+        let summ = stats::Summary::new(samples);
+
+        for p in &mut *samples {
+            let ns = ns_iter_inner(inner, 5 * n);
+            *p = ns as f64 / (5 * n) as f64;
+        }
+
+        stats::winsorize(samples, 5.0);
+        let summ5 = stats::Summary::new(samples);
+
+        let loop_run = loop_start.elapsed();
+
+        // If we've run for 100ms and seem to have converged to a
+        // stable median.
+        if loop_run > Duration::from_millis(100)
+            && summ.median_abs_dev_pct < 1.0
+            && summ.median - summ5.median < summ5.median_abs_dev
+        {
+            return summ5;
+        }
+
+        total_run = total_run + loop_run;
+        // Longest we ever run for is 3s.
+        if total_run > Duration::from_secs(3) {
+            return summ5;
+        }
+
+        // If we overflow here just return the results so far. We check a
+        // multiplier of 10 because we're about to multiply by 2 and the
+        // next iteration of the loop will also multiply by 5 (to calculate
+        // the summ5 result)
+        n = match n.checked_mul(10) {
+            Some(_) => n * 2,
+            None => {
+                return summ5;
+            }
+        };
+    }
+}
+
+pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<CompletedTest>, nocapture: bool, f: F)
+where
+    F: FnMut(&mut Bencher),
+{
+    let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 };
+
+    let data = Arc::new(Mutex::new(Vec::new()));
+    let oldio = if !nocapture {
+        Some((
+            io::set_print(Some(Sink::new_boxed(&data))),
+            io::set_panic(Some(Sink::new_boxed(&data))),
+        ))
+    } else {
+        None
+    };
+
+    let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
+
+    if let Some((printio, panicio)) = oldio {
+        io::set_print(printio);
+        io::set_panic(panicio);
+    }
+
+    let test_result = match result {
+        //bs.bench(f) {
+        Ok(Some(ns_iter_summ)) => {
+            let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
+            let mb_s = bs.bytes * 1000 / ns_iter;
+
+            let bs = BenchSamples { ns_iter_summ, mb_s: mb_s as usize };
+            TestResult::TrBench(bs)
+        }
+        Ok(None) => {
+            // iter not called, so no data.
+            // FIXME: error in this case?
+            let samples: &mut [f64] = &mut [0.0_f64; 1];
+            let bs = BenchSamples { ns_iter_summ: stats::Summary::new(samples), mb_s: 0 };
+            TestResult::TrBench(bs)
+        }
+        Err(_) => TestResult::TrFailed,
+    };
+
+    let stdout = data.lock().unwrap().to_vec();
+    let message = CompletedTest::new(desc, test_result, None, stdout);
+    monitor_ch.send(message).unwrap();
+}
+
+pub fn run_once<F>(f: F)
+where
+    F: FnMut(&mut Bencher),
+{
+    let mut bs = Bencher { mode: BenchMode::Single, summary: None, bytes: 0 };
+    bs.bench(f);
+}
diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs
new file mode 100644
index 00000000000..97a659f22d7
--- /dev/null
+++ b/library/test/src/cli.rs
@@ -0,0 +1,431 @@
+//! Module converting command-line arguments into test configuration.
+
+use std::env;
+use std::path::PathBuf;
+
+use super::helpers::isatty;
+use super::options::{ColorConfig, Options, OutputFormat, RunIgnored};
+use super::time::TestTimeOptions;
+
+#[derive(Debug)]
+pub struct TestOpts {
+    pub list: bool,
+    pub filter: Option<String>,
+    pub filter_exact: bool,
+    pub force_run_in_process: bool,
+    pub exclude_should_panic: bool,
+    pub run_ignored: RunIgnored,
+    pub run_tests: bool,
+    pub bench_benchmarks: bool,
+    pub logfile: Option<PathBuf>,
+    pub nocapture: bool,
+    pub color: ColorConfig,
+    pub format: OutputFormat,
+    pub test_threads: Option<usize>,
+    pub skip: Vec<String>,
+    pub time_options: Option<TestTimeOptions>,
+    pub options: Options,
+}
+
+impl TestOpts {
+    pub fn use_color(&self) -> bool {
+        match self.color {
+            ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(),
+            ColorConfig::AlwaysColor => true,
+            ColorConfig::NeverColor => false,
+        }
+    }
+}
+
+/// Result of parsing the options.
+pub type OptRes = Result<TestOpts, String>;
+/// Result of parsing the option part.
+type OptPartRes<T> = Result<T, String>;
+
+fn optgroups() -> getopts::Options {
+    let mut opts = getopts::Options::new();
+    opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
+        .optflag("", "ignored", "Run only ignored tests")
+        .optflag("", "force-run-in-process", "Forces tests to run in-process when panic=abort")
+        .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
+        .optflag("", "test", "Run tests and not benchmarks")
+        .optflag("", "bench", "Run benchmarks instead of tests")
+        .optflag("", "list", "List all tests and benchmarks")
+        .optflag("h", "help", "Display this message (longer with --help)")
+        .optopt(
+            "",
+            "logfile",
+            "Write logs to the specified file instead \
+             of stdout",
+            "PATH",
+        )
+        .optflag(
+            "",
+            "nocapture",
+            "don't capture stdout/stderr of each \
+             task, allow printing directly",
+        )
+        .optopt(
+            "",
+            "test-threads",
+            "Number of threads used for running tests \
+             in parallel",
+            "n_threads",
+        )
+        .optmulti(
+            "",
+            "skip",
+            "Skip tests whose names contain FILTER (this flag can \
+             be used multiple times)",
+            "FILTER",
+        )
+        .optflag(
+            "q",
+            "quiet",
+            "Display one character per test instead of one line. \
+             Alias to --format=terse",
+        )
+        .optflag("", "exact", "Exactly match filters rather than by substring")
+        .optopt(
+            "",
+            "color",
+            "Configure coloring of output:
+            auto   = colorize if stdout is a tty and tests are run on serially (default);
+            always = always colorize output;
+            never  = never colorize output;",
+            "auto|always|never",
+        )
+        .optopt(
+            "",
+            "format",
+            "Configure formatting of output:
+            pretty = Print verbose output;
+            terse  = Display one character per test;
+            json   = Output a json document",
+            "pretty|terse|json",
+        )
+        .optflag("", "show-output", "Show captured stdout of successful tests")
+        .optopt(
+            "Z",
+            "",
+            "Enable nightly-only flags:
+            unstable-options = Allow use of experimental features",
+            "unstable-options",
+        )
+        .optflagopt(
+            "",
+            "report-time",
+            "Show execution time of each test. Available values:
+            plain   = do not colorize the execution time (default);
+            colored = colorize output according to the `color` parameter value;
+
+            Threshold values for colorized output can be configured via
+            `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
+            `RUST_TEST_TIME_DOCTEST` environment variables.
+
+            Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
+            Durations must be specified in milliseconds, e.g. `500,2000` means that the warn time
+            is 0.5 seconds, and the critical time is 2 seconds.
+
+            Not available for --format=terse",
+            "plain|colored",
+        )
+        .optflag(
+            "",
+            "ensure-time",
+            "Treat excess of the test execution time limit as error.
+
+            Threshold values for this option can be configured via
+            `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
+            `RUST_TEST_TIME_DOCTEST` environment variables.
+
+            Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
+
+            `CRITICAL_TIME` here means the limit that should not be exceeded by test.
+            ",
+        );
+    opts
+}
+
+fn usage(binary: &str, options: &getopts::Options) {
+    let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
+    println!(
+        r#"{usage}
+
+The FILTER string is tested against the name of all tests, and only those
+tests whose names contain the filter are run.
+
+By default, all tests are run in parallel. This can be altered with the
+--test-threads flag or the RUST_TEST_THREADS environment variable when running
+tests (set it to 1).
+
+All tests have their standard output and standard error captured by default.
+This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
+environment variable to a value other than "0". Logging is not captured by default.
+
+Test Attributes:
+
+    `#[test]`        - Indicates a function is a test to be run. This function
+                       takes no arguments.
+    `#[bench]`       - Indicates a function is a benchmark to be run. This
+                       function takes one argument (test::Bencher).
+    `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
+                        the code causes a panic (an assertion failure or panic!)
+                        A message may be provided, which the failure string must
+                        contain: #[should_panic(expected = "foo")].
+    `#[ignore]`       - When applied to a function which is already attributed as a
+                        test, then the test runner will ignore these tests during
+                        normal test runs. Running with --ignored or --include-ignored will run
+                        these tests."#,
+        usage = options.usage(&message)
+    );
+}
+
+/// Parses command line arguments into test options.
+/// Returns `None` if help was requested (since we only show help message and don't run tests),
+/// returns `Some(Err(..))` if provided arguments are incorrect,
+/// otherwise creates a `TestOpts` object and returns it.
+pub fn parse_opts(args: &[String]) -> Option<OptRes> {
+    // Parse matches.
+    let opts = optgroups();
+    let args = args.get(1..).unwrap_or(args);
+    let matches = match opts.parse(args) {
+        Ok(m) => m,
+        Err(f) => return Some(Err(f.to_string())),
+    };
+
+    // Check if help was requested.
+    if matches.opt_present("h") {
+        // Show help and do nothing more.
+        usage(&args[0], &opts);
+        return None;
+    }
+
+    // Actually parse the opts.
+    let opts_result = parse_opts_impl(matches);
+
+    Some(opts_result)
+}
+
+// Gets the option value and checks if unstable features are enabled.
+macro_rules! unstable_optflag {
+    ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{
+        let opt = $matches.opt_present($option_name);
+        if !$allow_unstable && opt {
+            return Err(format!(
+                "The \"{}\" flag is only accepted on the nightly compiler with -Z unstable-options",
+                $option_name
+            ));
+        }
+
+        opt
+    }};
+}
+
+// Implementation of `parse_opts` that doesn't care about help message
+// and returns a `Result`.
+fn parse_opts_impl(matches: getopts::Matches) -> OptRes {
+    let allow_unstable = get_allow_unstable(&matches)?;
+
+    // Unstable flags
+    let force_run_in_process = unstable_optflag!(matches, allow_unstable, "force-run-in-process");
+    let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic");
+    let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored");
+    let time_options = get_time_options(&matches, allow_unstable)?;
+
+    let quiet = matches.opt_present("quiet");
+    let exact = matches.opt_present("exact");
+    let list = matches.opt_present("list");
+    let skip = matches.opt_strs("skip");
+
+    let bench_benchmarks = matches.opt_present("bench");
+    let run_tests = !bench_benchmarks || matches.opt_present("test");
+
+    let logfile = get_log_file(&matches)?;
+    let run_ignored = get_run_ignored(&matches, include_ignored)?;
+    let filter = get_filter(&matches)?;
+    let nocapture = get_nocapture(&matches)?;
+    let test_threads = get_test_threads(&matches)?;
+    let color = get_color_config(&matches)?;
+    let format = get_format(&matches, quiet, allow_unstable)?;
+
+    let options = Options::new().display_output(matches.opt_present("show-output"));
+
+    let test_opts = TestOpts {
+        list,
+        filter,
+        filter_exact: exact,
+        force_run_in_process,
+        exclude_should_panic,
+        run_ignored,
+        run_tests,
+        bench_benchmarks,
+        logfile,
+        nocapture,
+        color,
+        format,
+        test_threads,
+        skip,
+        time_options,
+        options,
+    };
+
+    Ok(test_opts)
+}
+
+// FIXME: Copied from librustc_ast until linkage errors are resolved. Issue #47566
+fn is_nightly() -> bool {
+    // Whether this is a feature-staged build, i.e., on the beta or stable channel
+    let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
+    // Whether we should enable unstable features for bootstrapping
+    let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
+
+    bootstrap || !disable_unstable_features
+}
+
+// Gets the CLI options associated with `report-time` feature.
+fn get_time_options(
+    matches: &getopts::Matches,
+    allow_unstable: bool,
+) -> OptPartRes<Option<TestTimeOptions>> {
+    let report_time = unstable_optflag!(matches, allow_unstable, "report-time");
+    let colored_opt_str = matches.opt_str("report-time");
+    let mut report_time_colored = report_time && colored_opt_str == Some("colored".into());
+    let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time");
+
+    // If `ensure-test-time` option is provided, time output is enforced,
+    // so user won't be confused if any of tests will silently fail.
+    let options = if report_time || ensure_test_time {
+        if ensure_test_time && !report_time {
+            report_time_colored = true;
+        }
+        Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored))
+    } else {
+        None
+    };
+
+    Ok(options)
+}
+
+fn get_test_threads(matches: &getopts::Matches) -> OptPartRes<Option<usize>> {
+    let test_threads = match matches.opt_str("test-threads") {
+        Some(n_str) => match n_str.parse::<usize>() {
+            Ok(0) => return Err("argument for --test-threads must not be 0".to_string()),
+            Ok(n) => Some(n),
+            Err(e) => {
+                return Err(format!(
+                    "argument for --test-threads must be a number > 0 \
+                     (error: {})",
+                    e
+                ));
+            }
+        },
+        None => None,
+    };
+
+    Ok(test_threads)
+}
+
+fn get_format(
+    matches: &getopts::Matches,
+    quiet: bool,
+    allow_unstable: bool,
+) -> OptPartRes<OutputFormat> {
+    let format = match matches.opt_str("format").as_deref() {
+        None if quiet => OutputFormat::Terse,
+        Some("pretty") | None => OutputFormat::Pretty,
+        Some("terse") => OutputFormat::Terse,
+        Some("json") => {
+            if !allow_unstable {
+                return Err("The \"json\" format is only accepted on the nightly compiler".into());
+            }
+            OutputFormat::Json
+        }
+
+        Some(v) => {
+            return Err(format!(
+                "argument for --format must be pretty, terse, or json (was \
+                 {})",
+                v
+            ));
+        }
+    };
+
+    Ok(format)
+}
+
+fn get_color_config(matches: &getopts::Matches) -> OptPartRes<ColorConfig> {
+    let color = match matches.opt_str("color").as_deref() {
+        Some("auto") | None => ColorConfig::AutoColor,
+        Some("always") => ColorConfig::AlwaysColor,
+        Some("never") => ColorConfig::NeverColor,
+
+        Some(v) => {
+            return Err(format!(
+                "argument for --color must be auto, always, or never (was \
+                 {})",
+                v
+            ));
+        }
+    };
+
+    Ok(color)
+}
+
+fn get_nocapture(matches: &getopts::Matches) -> OptPartRes<bool> {
+    let mut nocapture = matches.opt_present("nocapture");
+    if !nocapture {
+        nocapture = match env::var("RUST_TEST_NOCAPTURE") {
+            Ok(val) => &val != "0",
+            Err(_) => false,
+        };
+    }
+
+    Ok(nocapture)
+}
+
+fn get_run_ignored(matches: &getopts::Matches, include_ignored: bool) -> OptPartRes<RunIgnored> {
+    let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
+        (true, true) => {
+            return Err("the options --include-ignored and --ignored are mutually exclusive".into());
+        }
+        (true, false) => RunIgnored::Yes,
+        (false, true) => RunIgnored::Only,
+        (false, false) => RunIgnored::No,
+    };
+
+    Ok(run_ignored)
+}
+
+fn get_filter(matches: &getopts::Matches) -> OptPartRes<Option<String>> {
+    let filter = if !matches.free.is_empty() { Some(matches.free[0].clone()) } else { None };
+
+    Ok(filter)
+}
+
+fn get_allow_unstable(matches: &getopts::Matches) -> OptPartRes<bool> {
+    let mut allow_unstable = false;
+
+    if let Some(opt) = matches.opt_str("Z") {
+        if !is_nightly() {
+            return Err("the option `Z` is only accepted on the nightly compiler".into());
+        }
+
+        match &*opt {
+            "unstable-options" => {
+                allow_unstable = true;
+            }
+            _ => {
+                return Err("Unrecognized option to `Z`".into());
+            }
+        }
+    };
+
+    Ok(allow_unstable)
+}
+
+fn get_log_file(matches: &getopts::Matches) -> OptPartRes<Option<PathBuf>> {
+    let logfile = matches.opt_str("logfile").map(|s| PathBuf::from(&s));
+
+    Ok(logfile)
+}
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
new file mode 100644
index 00000000000..ff741e3bd53
--- /dev/null
+++ b/library/test/src/console.rs
@@ -0,0 +1,293 @@
+//! Module providing interface for running tests in the console.
+
+use std::fs::File;
+use std::io;
+use std::io::prelude::Write;
+
+use super::{
+    bench::fmt_bench_samples,
+    cli::TestOpts,
+    event::{CompletedTest, TestEvent},
+    filter_tests,
+    formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter},
+    helpers::{concurrency::get_concurrency, metrics::MetricMap},
+    options::{Options, OutputFormat},
+    run_tests,
+    test_result::TestResult,
+    time::TestExecTime,
+    types::{NamePadding, TestDesc, TestDescAndFn},
+};
+
+/// Generic wrapper over stdout.
+pub enum OutputLocation<T> {
+    Pretty(Box<term::StdoutTerminal>),
+    Raw(T),
+}
+
+impl<T: Write> Write for OutputLocation<T> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        match *self {
+            OutputLocation::Pretty(ref mut term) => term.write(buf),
+            OutputLocation::Raw(ref mut stdout) => stdout.write(buf),
+        }
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        match *self {
+            OutputLocation::Pretty(ref mut term) => term.flush(),
+            OutputLocation::Raw(ref mut stdout) => stdout.flush(),
+        }
+    }
+}
+
+pub struct ConsoleTestState {
+    pub log_out: Option<File>,
+    pub total: usize,
+    pub passed: usize,
+    pub failed: usize,
+    pub ignored: usize,
+    pub allowed_fail: usize,
+    pub filtered_out: usize,
+    pub measured: usize,
+    pub metrics: MetricMap,
+    pub failures: Vec<(TestDesc, Vec<u8>)>,
+    pub not_failures: Vec<(TestDesc, Vec<u8>)>,
+    pub time_failures: Vec<(TestDesc, Vec<u8>)>,
+    pub options: Options,
+}
+
+impl ConsoleTestState {
+    pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
+        let log_out = match opts.logfile {
+            Some(ref path) => Some(File::create(path)?),
+            None => None,
+        };
+
+        Ok(ConsoleTestState {
+            log_out,
+            total: 0,
+            passed: 0,
+            failed: 0,
+            ignored: 0,
+            allowed_fail: 0,
+            filtered_out: 0,
+            measured: 0,
+            metrics: MetricMap::new(),
+            failures: Vec::new(),
+            not_failures: Vec::new(),
+            time_failures: Vec::new(),
+            options: opts.options,
+        })
+    }
+
+    pub fn write_log<F, S>(&mut self, msg: F) -> io::Result<()>
+    where
+        S: AsRef<str>,
+        F: FnOnce() -> S,
+    {
+        match self.log_out {
+            None => Ok(()),
+            Some(ref mut o) => {
+                let msg = msg();
+                let msg = msg.as_ref();
+                o.write_all(msg.as_bytes())
+            }
+        }
+    }
+
+    pub fn write_log_result(
+        &mut self,
+        test: &TestDesc,
+        result: &TestResult,
+        exec_time: Option<&TestExecTime>,
+    ) -> io::Result<()> {
+        self.write_log(|| {
+            format!(
+                "{} {}",
+                match *result {
+                    TestResult::TrOk => "ok".to_owned(),
+                    TestResult::TrFailed => "failed".to_owned(),
+                    TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg),
+                    TestResult::TrIgnored => "ignored".to_owned(),
+                    TestResult::TrAllowedFail => "failed (allowed)".to_owned(),
+                    TestResult::TrBench(ref bs) => fmt_bench_samples(bs),
+                    TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(),
+                },
+                test.name,
+            )
+        })?;
+        if let Some(exec_time) = exec_time {
+            self.write_log(|| format!(" <{}>", exec_time))?;
+        }
+        self.write_log(|| "\n")
+    }
+
+    fn current_test_count(&self) -> usize {
+        self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
+    }
+}
+
+// List the tests to console, and optionally to logfile. Filters are honored.
+pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
+    let mut output = match term::stdout() {
+        None => OutputLocation::Raw(io::stdout()),
+        Some(t) => OutputLocation::Pretty(t),
+    };
+
+    let quiet = opts.format == OutputFormat::Terse;
+    let mut st = ConsoleTestState::new(opts)?;
+
+    let mut ntest = 0;
+    let mut nbench = 0;
+
+    for test in filter_tests(&opts, tests) {
+        use crate::TestFn::*;
+
+        let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
+
+        let fntype = match testfn {
+            StaticTestFn(..) | DynTestFn(..) => {
+                ntest += 1;
+                "test"
+            }
+            StaticBenchFn(..) | DynBenchFn(..) => {
+                nbench += 1;
+                "benchmark"
+            }
+        };
+
+        writeln!(output, "{}: {}", name, fntype)?;
+        st.write_log(|| format!("{} {}\n", fntype, name))?;
+    }
+
+    fn plural(count: u32, s: &str) -> String {
+        match count {
+            1 => format!("{} {}", 1, s),
+            n => format!("{} {}s", n, s),
+        }
+    }
+
+    if !quiet {
+        if ntest != 0 || nbench != 0 {
+            writeln!(output)?;
+        }
+
+        writeln!(output, "{}, {}", plural(ntest, "test"), plural(nbench, "benchmark"))?;
+    }
+
+    Ok(())
+}
+
+// Updates `ConsoleTestState` depending on result of the test execution.
+fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) {
+    let test = completed_test.desc;
+    let stdout = completed_test.stdout;
+    match completed_test.result {
+        TestResult::TrOk => {
+            st.passed += 1;
+            st.not_failures.push((test, stdout));
+        }
+        TestResult::TrIgnored => st.ignored += 1,
+        TestResult::TrAllowedFail => st.allowed_fail += 1,
+        TestResult::TrBench(bs) => {
+            st.metrics.insert_metric(
+                test.name.as_slice(),
+                bs.ns_iter_summ.median,
+                bs.ns_iter_summ.max - bs.ns_iter_summ.min,
+            );
+            st.measured += 1
+        }
+        TestResult::TrFailed => {
+            st.failed += 1;
+            st.failures.push((test, stdout));
+        }
+        TestResult::TrFailedMsg(msg) => {
+            st.failed += 1;
+            let mut stdout = stdout;
+            stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
+            st.failures.push((test, stdout));
+        }
+        TestResult::TrTimedFail => {
+            st.failed += 1;
+            st.time_failures.push((test, stdout));
+        }
+    }
+}
+
+// Handler for events that occur during test execution.
+// It is provided as a callback to the `run_tests` function.
+fn on_test_event(
+    event: &TestEvent,
+    st: &mut ConsoleTestState,
+    out: &mut dyn OutputFormatter,
+) -> io::Result<()> {
+    match (*event).clone() {
+        TestEvent::TeFiltered(ref filtered_tests) => {
+            st.total = filtered_tests.len();
+            out.write_run_start(filtered_tests.len())?;
+        }
+        TestEvent::TeFilteredOut(filtered_out) => {
+            st.filtered_out = filtered_out;
+        }
+        TestEvent::TeWait(ref test) => out.write_test_start(test)?,
+        TestEvent::TeTimeout(ref test) => out.write_timeout(test)?,
+        TestEvent::TeResult(completed_test) => {
+            let test = &completed_test.desc;
+            let result = &completed_test.result;
+            let exec_time = &completed_test.exec_time;
+            let stdout = &completed_test.stdout;
+
+            st.write_log_result(test, result, exec_time.as_ref())?;
+            out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?;
+            handle_test_result(st, completed_test);
+        }
+    }
+
+    Ok(())
+}
+
+/// A simple console test runner.
+/// Runs provided tests reporting process and results to the stdout.
+pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
+    let output = match term::stdout() {
+        None => OutputLocation::Raw(io::stdout()),
+        Some(t) => OutputLocation::Pretty(t),
+    };
+
+    let max_name_len = tests
+        .iter()
+        .max_by_key(|t| len_if_padded(*t))
+        .map(|t| t.desc.name.as_slice().len())
+        .unwrap_or(0);
+
+    let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
+
+    let mut out: Box<dyn OutputFormatter> = match opts.format {
+        OutputFormat::Pretty => Box::new(PrettyFormatter::new(
+            output,
+            opts.use_color(),
+            max_name_len,
+            is_multithreaded,
+            opts.time_options,
+        )),
+        OutputFormat::Terse => {
+            Box::new(TerseFormatter::new(output, opts.use_color(), max_name_len, is_multithreaded))
+        }
+        OutputFormat::Json => Box::new(JsonFormatter::new(output)),
+    };
+    let mut st = ConsoleTestState::new(opts)?;
+
+    run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;
+
+    assert!(st.current_test_count() == st.total);
+
+    out.write_run_finish(&st)
+}
+
+// Calculates padding for given test description.
+fn len_if_padded(t: &TestDescAndFn) -> usize {
+    match t.testfn.padding() {
+        NamePadding::PadNone => 0,
+        NamePadding::PadOnRight => t.desc.name.as_slice().len(),
+    }
+}
diff --git a/library/test/src/event.rs b/library/test/src/event.rs
new file mode 100644
index 00000000000..297bb72aecb
--- /dev/null
+++ b/library/test/src/event.rs
@@ -0,0 +1,36 @@
+//! Module containing different events that can occur
+//! during tests execution process.
+
+use super::test_result::TestResult;
+use super::time::TestExecTime;
+use super::types::TestDesc;
+
+#[derive(Debug, Clone)]
+pub struct CompletedTest {
+    pub desc: TestDesc,
+    pub result: TestResult,
+    pub exec_time: Option<TestExecTime>,
+    pub stdout: Vec<u8>,
+}
+
+impl CompletedTest {
+    pub fn new(
+        desc: TestDesc,
+        result: TestResult,
+        exec_time: Option<TestExecTime>,
+        stdout: Vec<u8>,
+    ) -> Self {
+        Self { desc, result, exec_time, stdout }
+    }
+}
+
+unsafe impl Send for CompletedTest {}
+
+#[derive(Debug, Clone)]
+pub enum TestEvent {
+    TeFiltered(Vec<TestDesc>),
+    TeWait(TestDesc),
+    TeResult(CompletedTest),
+    TeTimeout(TestDesc),
+    TeFilteredOut(usize),
+}
diff --git a/library/test/src/formatters/json.rs b/library/test/src/formatters/json.rs
new file mode 100644
index 00000000000..9ebc991d638
--- /dev/null
+++ b/library/test/src/formatters/json.rs
@@ -0,0 +1,246 @@
+use std::{borrow::Cow, io, io::prelude::Write};
+
+use super::OutputFormatter;
+use crate::{
+    console::{ConsoleTestState, OutputLocation},
+    test_result::TestResult,
+    time,
+    types::TestDesc,
+};
+
+pub(crate) struct JsonFormatter<T> {
+    out: OutputLocation<T>,
+}
+
+impl<T: Write> JsonFormatter<T> {
+    pub fn new(out: OutputLocation<T>) -> Self {
+        Self { out }
+    }
+
+    fn writeln_message(&mut self, s: &str) -> io::Result<()> {
+        assert!(!s.contains('\n'));
+
+        self.out.write_all(s.as_ref())?;
+        self.out.write_all(b"\n")
+    }
+
+    fn write_message(&mut self, s: &str) -> io::Result<()> {
+        assert!(!s.contains('\n'));
+
+        self.out.write_all(s.as_ref())
+    }
+
+    fn write_event(
+        &mut self,
+        ty: &str,
+        name: &str,
+        evt: &str,
+        exec_time: Option<&time::TestExecTime>,
+        stdout: Option<Cow<'_, str>>,
+        extra: Option<&str>,
+    ) -> io::Result<()> {
+        self.write_message(&*format!(
+            r#"{{ "type": "{}", "name": "{}", "event": "{}""#,
+            ty, name, evt
+        ))?;
+        if let Some(exec_time) = exec_time {
+            self.write_message(&*format!(r#", "exec_time": "{}""#, exec_time))?;
+        }
+        if let Some(stdout) = stdout {
+            self.write_message(&*format!(r#", "stdout": "{}""#, EscapedString(stdout)))?;
+        }
+        if let Some(extra) = extra {
+            self.write_message(&*format!(r#", {}"#, extra))?;
+        }
+        self.writeln_message(" }")
+    }
+}
+
+impl<T: Write> OutputFormatter for JsonFormatter<T> {
+    fn write_run_start(&mut self, test_count: usize) -> io::Result<()> {
+        self.writeln_message(&*format!(
+            r#"{{ "type": "suite", "event": "started", "test_count": {} }}"#,
+            test_count
+        ))
+    }
+
+    fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
+        self.writeln_message(&*format!(
+            r#"{{ "type": "test", "event": "started", "name": "{}" }}"#,
+            desc.name
+        ))
+    }
+
+    fn write_result(
+        &mut self,
+        desc: &TestDesc,
+        result: &TestResult,
+        exec_time: Option<&time::TestExecTime>,
+        stdout: &[u8],
+        state: &ConsoleTestState,
+    ) -> io::Result<()> {
+        let display_stdout = state.options.display_output || *result != TestResult::TrOk;
+        let stdout = if display_stdout && !stdout.is_empty() {
+            Some(String::from_utf8_lossy(stdout))
+        } else {
+            None
+        };
+        match *result {
+            TestResult::TrOk => {
+                self.write_event("test", desc.name.as_slice(), "ok", exec_time, stdout, None)
+            }
+
+            TestResult::TrFailed => {
+                self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None)
+            }
+
+            TestResult::TrTimedFail => self.write_event(
+                "test",
+                desc.name.as_slice(),
+                "failed",
+                exec_time,
+                stdout,
+                Some(r#""reason": "time limit exceeded""#),
+            ),
+
+            TestResult::TrFailedMsg(ref m) => self.write_event(
+                "test",
+                desc.name.as_slice(),
+                "failed",
+                exec_time,
+                stdout,
+                Some(&*format!(r#""message": "{}""#, EscapedString(m))),
+            ),
+
+            TestResult::TrIgnored => {
+                self.write_event("test", desc.name.as_slice(), "ignored", exec_time, stdout, None)
+            }
+
+            TestResult::TrAllowedFail => self.write_event(
+                "test",
+                desc.name.as_slice(),
+                "allowed_failure",
+                exec_time,
+                stdout,
+                None,
+            ),
+
+            TestResult::TrBench(ref bs) => {
+                let median = bs.ns_iter_summ.median as usize;
+                let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
+
+                let mbps = if bs.mb_s == 0 {
+                    String::new()
+                } else {
+                    format!(r#", "mib_per_second": {}"#, bs.mb_s)
+                };
+
+                let line = format!(
+                    "{{ \"type\": \"bench\", \
+                     \"name\": \"{}\", \
+                     \"median\": {}, \
+                     \"deviation\": {}{} }}",
+                    desc.name, median, deviation, mbps
+                );
+
+                self.writeln_message(&*line)
+            }
+        }
+    }
+
+    fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
+        self.writeln_message(&*format!(
+            r#"{{ "type": "test", "event": "timeout", "name": "{}" }}"#,
+            desc.name
+        ))
+    }
+
+    fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+        self.writeln_message(&*format!(
+            "{{ \"type\": \"suite\", \
+             \"event\": \"{}\", \
+             \"passed\": {}, \
+             \"failed\": {}, \
+             \"allowed_fail\": {}, \
+             \"ignored\": {}, \
+             \"measured\": {}, \
+             \"filtered_out\": {} }}",
+            if state.failed == 0 { "ok" } else { "failed" },
+            state.passed,
+            state.failed + state.allowed_fail,
+            state.allowed_fail,
+            state.ignored,
+            state.measured,
+            state.filtered_out
+        ))?;
+
+        Ok(state.failed == 0)
+    }
+}
+
+/// A formatting utility used to print strings with characters in need of escaping.
+/// Base code taken form `libserialize::json::escape_str`
+struct EscapedString<S: AsRef<str>>(S);
+
+impl<S: AsRef<str>> ::std::fmt::Display for EscapedString<S> {
+    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+        let mut start = 0;
+
+        for (i, byte) in self.0.as_ref().bytes().enumerate() {
+            let escaped = match byte {
+                b'"' => "\\\"",
+                b'\\' => "\\\\",
+                b'\x00' => "\\u0000",
+                b'\x01' => "\\u0001",
+                b'\x02' => "\\u0002",
+                b'\x03' => "\\u0003",
+                b'\x04' => "\\u0004",
+                b'\x05' => "\\u0005",
+                b'\x06' => "\\u0006",
+                b'\x07' => "\\u0007",
+                b'\x08' => "\\b",
+                b'\t' => "\\t",
+                b'\n' => "\\n",
+                b'\x0b' => "\\u000b",
+                b'\x0c' => "\\f",
+                b'\r' => "\\r",
+                b'\x0e' => "\\u000e",
+                b'\x0f' => "\\u000f",
+                b'\x10' => "\\u0010",
+                b'\x11' => "\\u0011",
+                b'\x12' => "\\u0012",
+                b'\x13' => "\\u0013",
+                b'\x14' => "\\u0014",
+                b'\x15' => "\\u0015",
+                b'\x16' => "\\u0016",
+                b'\x17' => "\\u0017",
+                b'\x18' => "\\u0018",
+                b'\x19' => "\\u0019",
+                b'\x1a' => "\\u001a",
+                b'\x1b' => "\\u001b",
+                b'\x1c' => "\\u001c",
+                b'\x1d' => "\\u001d",
+                b'\x1e' => "\\u001e",
+                b'\x1f' => "\\u001f",
+                b'\x7f' => "\\u007f",
+                _ => {
+                    continue;
+                }
+            };
+
+            if start < i {
+                f.write_str(&self.0.as_ref()[start..i])?;
+            }
+
+            f.write_str(escaped)?;
+
+            start = i + 1;
+        }
+
+        if start != self.0.as_ref().len() {
+            f.write_str(&self.0.as_ref()[start..])?;
+        }
+
+        Ok(())
+    }
+}
diff --git a/library/test/src/formatters/mod.rs b/library/test/src/formatters/mod.rs
new file mode 100644
index 00000000000..1fb840520a6
--- /dev/null
+++ b/library/test/src/formatters/mod.rs
@@ -0,0 +1,40 @@
+use std::{io, io::prelude::Write};
+
+use crate::{
+    console::ConsoleTestState,
+    test_result::TestResult,
+    time,
+    types::{TestDesc, TestName},
+};
+
+mod json;
+mod pretty;
+mod terse;
+
+pub(crate) use self::json::JsonFormatter;
+pub(crate) use self::pretty::PrettyFormatter;
+pub(crate) use self::terse::TerseFormatter;
+
+pub(crate) trait OutputFormatter {
+    fn write_run_start(&mut self, test_count: usize) -> io::Result<()>;
+    fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()>;
+    fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()>;
+    fn write_result(
+        &mut self,
+        desc: &TestDesc,
+        result: &TestResult,
+        exec_time: Option<&time::TestExecTime>,
+        stdout: &[u8],
+        state: &ConsoleTestState,
+    ) -> io::Result<()>;
+    fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool>;
+}
+
+pub(crate) fn write_stderr_delimiter(test_output: &mut Vec<u8>, test_name: &TestName) {
+    match test_output.last() {
+        Some(b'\n') => (),
+        Some(_) => test_output.push(b'\n'),
+        None => (),
+    }
+    writeln!(test_output, "---- {} stderr ----", test_name).unwrap();
+}
diff --git a/library/test/src/formatters/pretty.rs b/library/test/src/formatters/pretty.rs
new file mode 100644
index 00000000000..4a93e084df1
--- /dev/null
+++ b/library/test/src/formatters/pretty.rs
@@ -0,0 +1,281 @@
+use std::{io, io::prelude::Write};
+
+use super::OutputFormatter;
+use crate::{
+    bench::fmt_bench_samples,
+    console::{ConsoleTestState, OutputLocation},
+    test_result::TestResult,
+    time,
+    types::TestDesc,
+};
+
+pub(crate) struct PrettyFormatter<T> {
+    out: OutputLocation<T>,
+    use_color: bool,
+    time_options: Option<time::TestTimeOptions>,
+
+    /// Number of columns to fill when aligning names
+    max_name_len: usize,
+
+    is_multithreaded: bool,
+}
+
+impl<T: Write> PrettyFormatter<T> {
+    pub fn new(
+        out: OutputLocation<T>,
+        use_color: bool,
+        max_name_len: usize,
+        is_multithreaded: bool,
+        time_options: Option<time::TestTimeOptions>,
+    ) -> Self {
+        PrettyFormatter { out, use_color, max_name_len, is_multithreaded, time_options }
+    }
+
+    #[cfg(test)]
+    pub fn output_location(&self) -> &OutputLocation<T> {
+        &self.out
+    }
+
+    pub fn write_ok(&mut self) -> io::Result<()> {
+        self.write_short_result("ok", term::color::GREEN)
+    }
+
+    pub fn write_failed(&mut self) -> io::Result<()> {
+        self.write_short_result("FAILED", term::color::RED)
+    }
+
+    pub fn write_ignored(&mut self) -> io::Result<()> {
+        self.write_short_result("ignored", term::color::YELLOW)
+    }
+
+    pub fn write_allowed_fail(&mut self) -> io::Result<()> {
+        self.write_short_result("FAILED (allowed)", term::color::YELLOW)
+    }
+
+    pub fn write_time_failed(&mut self) -> io::Result<()> {
+        self.write_short_result("FAILED (time limit exceeded)", term::color::RED)
+    }
+
+    pub fn write_bench(&mut self) -> io::Result<()> {
+        self.write_pretty("bench", term::color::CYAN)
+    }
+
+    pub fn write_short_result(
+        &mut self,
+        result: &str,
+        color: term::color::Color,
+    ) -> io::Result<()> {
+        self.write_pretty(result, color)
+    }
+
+    pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
+        match self.out {
+            OutputLocation::Pretty(ref mut term) => {
+                if self.use_color {
+                    term.fg(color)?;
+                }
+                term.write_all(word.as_bytes())?;
+                if self.use_color {
+                    term.reset()?;
+                }
+                term.flush()
+            }
+            OutputLocation::Raw(ref mut stdout) => {
+                stdout.write_all(word.as_bytes())?;
+                stdout.flush()
+            }
+        }
+    }
+
+    pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
+        let s = s.as_ref();
+        self.out.write_all(s.as_bytes())?;
+        self.out.flush()
+    }
+
+    fn write_time(
+        &mut self,
+        desc: &TestDesc,
+        exec_time: Option<&time::TestExecTime>,
+    ) -> io::Result<()> {
+        if let (Some(opts), Some(time)) = (self.time_options, exec_time) {
+            let time_str = format!(" <{}>", time);
+
+            let color = if opts.colored {
+                if opts.is_critical(desc, time) {
+                    Some(term::color::RED)
+                } else if opts.is_warn(desc, time) {
+                    Some(term::color::YELLOW)
+                } else {
+                    None
+                }
+            } else {
+                None
+            };
+
+            match color {
+                Some(color) => self.write_pretty(&time_str, color)?,
+                None => self.write_plain(&time_str)?,
+            }
+        }
+
+        Ok(())
+    }
+
+    fn write_results(
+        &mut self,
+        inputs: &Vec<(TestDesc, Vec<u8>)>,
+        results_type: &str,
+    ) -> io::Result<()> {
+        let results_out_str = format!("\n{}:\n", results_type);
+
+        self.write_plain(&results_out_str)?;
+
+        let mut results = Vec::new();
+        let mut stdouts = String::new();
+        for &(ref f, ref stdout) in inputs {
+            results.push(f.name.to_string());
+            if !stdout.is_empty() {
+                stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
+                let output = String::from_utf8_lossy(stdout);
+                stdouts.push_str(&output);
+                stdouts.push_str("\n");
+            }
+        }
+        if !stdouts.is_empty() {
+            self.write_plain("\n")?;
+            self.write_plain(&stdouts)?;
+        }
+
+        self.write_plain(&results_out_str)?;
+        results.sort();
+        for name in &results {
+            self.write_plain(&format!("    {}\n", name))?;
+        }
+        Ok(())
+    }
+
+    pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+        self.write_results(&state.not_failures, "successes")
+    }
+
+    pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+        self.write_results(&state.failures, "failures")
+    }
+
+    pub fn write_time_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+        self.write_results(&state.time_failures, "failures (time limit exceeded)")
+    }
+
+    fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
+        let name = desc.padded_name(self.max_name_len, desc.name.padding());
+        self.write_plain(&format!("test {} ... ", name))?;
+
+        Ok(())
+    }
+}
+
+impl<T: Write> OutputFormatter for PrettyFormatter<T> {
+    fn write_run_start(&mut self, test_count: usize) -> io::Result<()> {
+        let noun = if test_count != 1 { "tests" } else { "test" };
+        self.write_plain(&format!("\nrunning {} {}\n", test_count, noun))
+    }
+
+    fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
+        // When running tests concurrently, we should not print
+        // the test's name as the result will be mis-aligned.
+        // When running the tests serially, we print the name here so
+        // that the user can see which test hangs.
+        if !self.is_multithreaded {
+            self.write_test_name(desc)?;
+        }
+
+        Ok(())
+    }
+
+    fn write_result(
+        &mut self,
+        desc: &TestDesc,
+        result: &TestResult,
+        exec_time: Option<&time::TestExecTime>,
+        _: &[u8],
+        _: &ConsoleTestState,
+    ) -> io::Result<()> {
+        if self.is_multithreaded {
+            self.write_test_name(desc)?;
+        }
+
+        match *result {
+            TestResult::TrOk => self.write_ok()?,
+            TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?,
+            TestResult::TrIgnored => self.write_ignored()?,
+            TestResult::TrAllowedFail => self.write_allowed_fail()?,
+            TestResult::TrBench(ref bs) => {
+                self.write_bench()?;
+                self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?;
+            }
+            TestResult::TrTimedFail => self.write_time_failed()?,
+        }
+
+        self.write_time(desc, exec_time)?;
+        self.write_plain("\n")
+    }
+
+    fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
+        if self.is_multithreaded {
+            self.write_test_name(desc)?;
+        }
+
+        self.write_plain(&format!(
+            "test {} has been running for over {} seconds\n",
+            desc.name,
+            time::TEST_WARN_TIMEOUT_S
+        ))
+    }
+
+    fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+        if state.options.display_output {
+            self.write_successes(state)?;
+        }
+        let success = state.failed == 0;
+        if !success {
+            if !state.failures.is_empty() {
+                self.write_failures(state)?;
+            }
+
+            if !state.time_failures.is_empty() {
+                self.write_time_failures(state)?;
+            }
+        }
+
+        self.write_plain("\ntest result: ")?;
+
+        if success {
+            // There's no parallelism at this point so it's safe to use color
+            self.write_pretty("ok", term::color::GREEN)?;
+        } else {
+            self.write_pretty("FAILED", term::color::RED)?;
+        }
+
+        let s = if state.allowed_fail > 0 {
+            format!(
+                ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n",
+                state.passed,
+                state.failed + state.allowed_fail,
+                state.allowed_fail,
+                state.ignored,
+                state.measured,
+                state.filtered_out
+            )
+        } else {
+            format!(
+                ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
+                state.passed, state.failed, state.ignored, state.measured, state.filtered_out
+            )
+        };
+
+        self.write_plain(&s)?;
+
+        Ok(success)
+    }
+}
diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs
new file mode 100644
index 00000000000..5a264d20057
--- /dev/null
+++ b/library/test/src/formatters/terse.rs
@@ -0,0 +1,258 @@
+use std::{io, io::prelude::Write};
+
+use super::OutputFormatter;
+use crate::{
+    bench::fmt_bench_samples,
+    console::{ConsoleTestState, OutputLocation},
+    test_result::TestResult,
+    time,
+    types::NamePadding,
+    types::TestDesc,
+};
+
+// insert a '\n' after 100 tests in quiet mode
+const QUIET_MODE_MAX_COLUMN: usize = 100;
+
+pub(crate) struct TerseFormatter<T> {
+    out: OutputLocation<T>,
+    use_color: bool,
+    is_multithreaded: bool,
+    /// Number of columns to fill when aligning names
+    max_name_len: usize,
+
+    test_count: usize,
+    total_test_count: usize,
+}
+
+impl<T: Write> TerseFormatter<T> {
+    pub fn new(
+        out: OutputLocation<T>,
+        use_color: bool,
+        max_name_len: usize,
+        is_multithreaded: bool,
+    ) -> Self {
+        TerseFormatter {
+            out,
+            use_color,
+            max_name_len,
+            is_multithreaded,
+            test_count: 0,
+            total_test_count: 0, // initialized later, when write_run_start is called
+        }
+    }
+
+    pub fn write_ok(&mut self) -> io::Result<()> {
+        self.write_short_result(".", term::color::GREEN)
+    }
+
+    pub fn write_failed(&mut self) -> io::Result<()> {
+        self.write_short_result("F", term::color::RED)
+    }
+
+    pub fn write_ignored(&mut self) -> io::Result<()> {
+        self.write_short_result("i", term::color::YELLOW)
+    }
+
+    pub fn write_allowed_fail(&mut self) -> io::Result<()> {
+        self.write_short_result("a", term::color::YELLOW)
+    }
+
+    pub fn write_bench(&mut self) -> io::Result<()> {
+        self.write_pretty("bench", term::color::CYAN)
+    }
+
+    pub fn write_short_result(
+        &mut self,
+        result: &str,
+        color: term::color::Color,
+    ) -> io::Result<()> {
+        self.write_pretty(result, color)?;
+        if self.test_count % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 {
+            // we insert a new line every 100 dots in order to flush the
+            // screen when dealing with line-buffered output (e.g., piping to
+            // `stamp` in the rust CI).
+            let out = format!(" {}/{}\n", self.test_count + 1, self.total_test_count);
+            self.write_plain(&out)?;
+        }
+
+        self.test_count += 1;
+        Ok(())
+    }
+
+    pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
+        match self.out {
+            OutputLocation::Pretty(ref mut term) => {
+                if self.use_color {
+                    term.fg(color)?;
+                }
+                term.write_all(word.as_bytes())?;
+                if self.use_color {
+                    term.reset()?;
+                }
+                term.flush()
+            }
+            OutputLocation::Raw(ref mut stdout) => {
+                stdout.write_all(word.as_bytes())?;
+                stdout.flush()
+            }
+        }
+    }
+
+    pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
+        let s = s.as_ref();
+        self.out.write_all(s.as_bytes())?;
+        self.out.flush()
+    }
+
+    pub fn write_outputs(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+        self.write_plain("\nsuccesses:\n")?;
+        let mut successes = Vec::new();
+        let mut stdouts = String::new();
+        for &(ref f, ref stdout) in &state.not_failures {
+            successes.push(f.name.to_string());
+            if !stdout.is_empty() {
+                stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
+                let output = String::from_utf8_lossy(stdout);
+                stdouts.push_str(&output);
+                stdouts.push_str("\n");
+            }
+        }
+        if !stdouts.is_empty() {
+            self.write_plain("\n")?;
+            self.write_plain(&stdouts)?;
+        }
+
+        self.write_plain("\nsuccesses:\n")?;
+        successes.sort();
+        for name in &successes {
+            self.write_plain(&format!("    {}\n", name))?;
+        }
+        Ok(())
+    }
+
+    pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+        self.write_plain("\nfailures:\n")?;
+        let mut failures = Vec::new();
+        let mut fail_out = String::new();
+        for &(ref f, ref stdout) in &state.failures {
+            failures.push(f.name.to_string());
+            if !stdout.is_empty() {
+                fail_out.push_str(&format!("---- {} stdout ----\n", f.name));
+                let output = String::from_utf8_lossy(stdout);
+                fail_out.push_str(&output);
+                fail_out.push_str("\n");
+            }
+        }
+        if !fail_out.is_empty() {
+            self.write_plain("\n")?;
+            self.write_plain(&fail_out)?;
+        }
+
+        self.write_plain("\nfailures:\n")?;
+        failures.sort();
+        for name in &failures {
+            self.write_plain(&format!("    {}\n", name))?;
+        }
+        Ok(())
+    }
+
+    fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
+        let name = desc.padded_name(self.max_name_len, desc.name.padding());
+        self.write_plain(&format!("test {} ... ", name))?;
+
+        Ok(())
+    }
+}
+
+impl<T: Write> OutputFormatter for TerseFormatter<T> {
+    fn write_run_start(&mut self, test_count: usize) -> io::Result<()> {
+        self.total_test_count = test_count;
+        let noun = if test_count != 1 { "tests" } else { "test" };
+        self.write_plain(&format!("\nrunning {} {}\n", test_count, noun))
+    }
+
+    fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
+        // Remnants from old libtest code that used the padding value
+        // in order to indicate benchmarks.
+        // When running benchmarks, terse-mode should still print their name as if
+        // it is the Pretty formatter.
+        if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight {
+            self.write_test_name(desc)?;
+        }
+
+        Ok(())
+    }
+
+    fn write_result(
+        &mut self,
+        desc: &TestDesc,
+        result: &TestResult,
+        _: Option<&time::TestExecTime>,
+        _: &[u8],
+        _: &ConsoleTestState,
+    ) -> io::Result<()> {
+        match *result {
+            TestResult::TrOk => self.write_ok(),
+            TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => {
+                self.write_failed()
+            }
+            TestResult::TrIgnored => self.write_ignored(),
+            TestResult::TrAllowedFail => self.write_allowed_fail(),
+            TestResult::TrBench(ref bs) => {
+                if self.is_multithreaded {
+                    self.write_test_name(desc)?;
+                }
+                self.write_bench()?;
+                self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
+            }
+        }
+    }
+
+    fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
+        self.write_plain(&format!(
+            "test {} has been running for over {} seconds\n",
+            desc.name,
+            time::TEST_WARN_TIMEOUT_S
+        ))
+    }
+
+    fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+        if state.options.display_output {
+            self.write_outputs(state)?;
+        }
+        let success = state.failed == 0;
+        if !success {
+            self.write_failures(state)?;
+        }
+
+        self.write_plain("\ntest result: ")?;
+
+        if success {
+            // There's no parallelism at this point so it's safe to use color
+            self.write_pretty("ok", term::color::GREEN)?;
+        } else {
+            self.write_pretty("FAILED", term::color::RED)?;
+        }
+
+        let s = if state.allowed_fail > 0 {
+            format!(
+                ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n",
+                state.passed,
+                state.failed + state.allowed_fail,
+                state.allowed_fail,
+                state.ignored,
+                state.measured,
+                state.filtered_out
+            )
+        } else {
+            format!(
+                ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
+                state.passed, state.failed, state.ignored, state.measured, state.filtered_out
+            )
+        };
+
+        self.write_plain(&s)?;
+
+        Ok(success)
+    }
+}
diff --git a/library/test/src/helpers/concurrency.rs b/library/test/src/helpers/concurrency.rs
new file mode 100644
index 00000000000..2fe87247e3a
--- /dev/null
+++ b/library/test/src/helpers/concurrency.rs
@@ -0,0 +1,117 @@
+//! Helper module which helps to determine amount of threads to be used
+//! during tests execution.
+use std::env;
+
+#[allow(deprecated)]
+pub fn get_concurrency() -> usize {
+    return match env::var("RUST_TEST_THREADS") {
+        Ok(s) => {
+            let opt_n: Option<usize> = s.parse().ok();
+            match opt_n {
+                Some(n) if n > 0 => n,
+                _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s),
+            }
+        }
+        Err(..) => num_cpus(),
+    };
+}
+
+cfg_if::cfg_if! {
+    if #[cfg(windows)] {
+        #[allow(nonstandard_style)]
+        fn num_cpus() -> usize {
+            #[repr(C)]
+            struct SYSTEM_INFO {
+                wProcessorArchitecture: u16,
+                wReserved: u16,
+                dwPageSize: u32,
+                lpMinimumApplicationAddress: *mut u8,
+                lpMaximumApplicationAddress: *mut u8,
+                dwActiveProcessorMask: *mut u8,
+                dwNumberOfProcessors: u32,
+                dwProcessorType: u32,
+                dwAllocationGranularity: u32,
+                wProcessorLevel: u16,
+                wProcessorRevision: u16,
+            }
+            extern "system" {
+                fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
+            }
+            unsafe {
+                let mut sysinfo = std::mem::zeroed();
+                GetSystemInfo(&mut sysinfo);
+                sysinfo.dwNumberOfProcessors as usize
+            }
+        }
+    } else if #[cfg(any(
+        target_os = "android",
+        target_os = "cloudabi",
+        target_os = "emscripten",
+        target_os = "fuchsia",
+        target_os = "ios",
+        target_os = "linux",
+        target_os = "macos",
+        target_os = "solaris",
+        target_os = "illumos",
+    ))] {
+        fn num_cpus() -> usize {
+            unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
+        }
+    } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))] {
+        fn num_cpus() -> usize {
+            use std::ptr;
+
+            let mut cpus: libc::c_uint = 0;
+            let mut cpus_size = std::mem::size_of_val(&cpus);
+
+            unsafe {
+                cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
+            }
+            if cpus < 1 {
+                let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
+                unsafe {
+                    libc::sysctl(
+                        mib.as_mut_ptr(),
+                        2,
+                        &mut cpus as *mut _ as *mut _,
+                        &mut cpus_size as *mut _ as *mut _,
+                        ptr::null_mut(),
+                        0,
+                    );
+                }
+                if cpus < 1 {
+                    cpus = 1;
+                }
+            }
+            cpus as usize
+        }
+    } else if #[cfg(target_os = "openbsd")] {
+        fn num_cpus() -> usize {
+            use std::ptr;
+
+            let mut cpus: libc::c_uint = 0;
+            let mut cpus_size = std::mem::size_of_val(&cpus);
+            let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
+
+            unsafe {
+                libc::sysctl(
+                    mib.as_mut_ptr(),
+                    2,
+                    &mut cpus as *mut _ as *mut _,
+                    &mut cpus_size as *mut _ as *mut _,
+                    ptr::null_mut(),
+                    0,
+                );
+            }
+            if cpus < 1 {
+                cpus = 1;
+            }
+            cpus as usize
+        }
+    } else {
+        // FIXME: implement on vxWorks, Redox, HermitCore, Haiku, l4re
+        fn num_cpus() -> usize {
+            1
+        }
+    }
+}
diff --git a/library/test/src/helpers/exit_code.rs b/library/test/src/helpers/exit_code.rs
new file mode 100644
index 00000000000..31e234d9818
--- /dev/null
+++ b/library/test/src/helpers/exit_code.rs
@@ -0,0 +1,20 @@
+//! Helper module to detect subprocess exit code.
+
+use std::process::ExitStatus;
+
+#[cfg(not(unix))]
+pub fn get_exit_code(status: ExitStatus) -> Result<i32, String> {
+    status.code().ok_or("received no exit code from child process".into())
+}
+
+#[cfg(unix)]
+pub fn get_exit_code(status: ExitStatus) -> Result<i32, String> {
+    use std::os::unix::process::ExitStatusExt;
+    match status.code() {
+        Some(code) => Ok(code),
+        None => match status.signal() {
+            Some(signal) => Err(format!("child process exited with signal {}", signal)),
+            None => Err("child process exited with unknown signal".into()),
+        },
+    }
+}
diff --git a/library/test/src/helpers/isatty.rs b/library/test/src/helpers/isatty.rs
new file mode 100644
index 00000000000..874ecc37645
--- /dev/null
+++ b/library/test/src/helpers/isatty.rs
@@ -0,0 +1,32 @@
+//! Helper module which provides a function to test
+//! if stdout is a tty.
+
+cfg_if::cfg_if! {
+    if #[cfg(unix)] {
+        pub fn stdout_isatty() -> bool {
+            unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
+        }
+    } else if #[cfg(windows)] {
+        pub fn stdout_isatty() -> bool {
+            type DWORD = u32;
+            type BOOL = i32;
+            type HANDLE = *mut u8;
+            type LPDWORD = *mut u32;
+            const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
+            extern "system" {
+                fn GetStdHandle(which: DWORD) -> HANDLE;
+                fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
+            }
+            unsafe {
+                let handle = GetStdHandle(STD_OUTPUT_HANDLE);
+                let mut out = 0;
+                GetConsoleMode(handle, &mut out) != 0
+            }
+        }
+    } else {
+        // FIXME: Implement isatty on SGX
+        pub fn stdout_isatty() -> bool {
+            false
+        }
+    }
+}
diff --git a/library/test/src/helpers/metrics.rs b/library/test/src/helpers/metrics.rs
new file mode 100644
index 00000000000..f77a23e6875
--- /dev/null
+++ b/library/test/src/helpers/metrics.rs
@@ -0,0 +1,50 @@
+//! Benchmark metrics.
+use std::collections::BTreeMap;
+
+#[derive(Clone, PartialEq, Debug, Copy)]
+pub struct Metric {
+    value: f64,
+    noise: f64,
+}
+
+impl Metric {
+    pub fn new(value: f64, noise: f64) -> Metric {
+        Metric { value, noise }
+    }
+}
+
+#[derive(Clone, PartialEq)]
+pub struct MetricMap(BTreeMap<String, Metric>);
+
+impl MetricMap {
+    pub fn new() -> MetricMap {
+        MetricMap(BTreeMap::new())
+    }
+
+    /// Insert a named `value` (+/- `noise`) metric into the map. The value
+    /// must be non-negative. The `noise` indicates the uncertainty of the
+    /// metric, which doubles as the "noise range" of acceptable
+    /// pairwise-regressions on this named value, when comparing from one
+    /// metric to the next using `compare_to_old`.
+    ///
+    /// If `noise` is positive, then it means this metric is of a value
+    /// you want to see grow smaller, so a change larger than `noise` in the
+    /// positive direction represents a regression.
+    ///
+    /// If `noise` is negative, then it means this metric is of a value
+    /// you want to see grow larger, so a change larger than `noise` in the
+    /// negative direction represents a regression.
+    pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
+        let m = Metric { value, noise };
+        self.0.insert(name.to_owned(), m);
+    }
+
+    pub fn fmt_metrics(&self) -> String {
+        let v = self
+            .0
+            .iter()
+            .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
+            .collect::<Vec<_>>();
+        v.join(", ")
+    }
+}
diff --git a/library/test/src/helpers/mod.rs b/library/test/src/helpers/mod.rs
new file mode 100644
index 00000000000..eb416b10150
--- /dev/null
+++ b/library/test/src/helpers/mod.rs
@@ -0,0 +1,8 @@
+//! Module with common helpers not directly related to tests
+//! but used in `libtest`.
+
+pub mod concurrency;
+pub mod exit_code;
+pub mod isatty;
+pub mod metrics;
+pub mod sink;
diff --git a/library/test/src/helpers/sink.rs b/library/test/src/helpers/sink.rs
new file mode 100644
index 00000000000..aa7fe248773
--- /dev/null
+++ b/library/test/src/helpers/sink.rs
@@ -0,0 +1,24 @@
+//! Module providing a helper structure to capture output in subprocesses.
+
+use std::{
+    io,
+    io::prelude::Write,
+    sync::{Arc, Mutex},
+};
+
+pub struct Sink(Arc<Mutex<Vec<u8>>>);
+
+impl Sink {
+    pub fn new_boxed(data: &Arc<Mutex<Vec<u8>>>) -> Box<Self> {
+        Box::new(Self(data.clone()))
+    }
+}
+
+impl Write for Sink {
+    fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+        Write::write(&mut *self.0.lock().unwrap(), data)
+    }
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
new file mode 100644
index 00000000000..933b647071f
--- /dev/null
+++ b/library/test/src/lib.rs
@@ -0,0 +1,644 @@
+//! Support code for rustc's built in unit-test and micro-benchmarking
+//! framework.
+//!
+//! Almost all user code will only be interested in `Bencher` and
+//! `black_box`. All other interactions (such as writing tests and
+//! benchmarks themselves) should be done via the `#[test]` and
+//! `#[bench]` attributes.
+//!
+//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
+
+// Currently, not much of this is meant for users. It is intended to
+// support the simplest interface possible for representing and
+// running tests while providing a base that other test frameworks may
+// build off of.
+
+// N.B., this is also specified in this crate's Cargo.toml, but librustc_ast contains logic specific to
+// this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
+// cargo) to detect this crate.
+
+#![crate_name = "test"]
+#![unstable(feature = "test", issue = "50297")]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
+#![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
+#![feature(rustc_private)]
+#![feature(nll)]
+#![feature(bool_to_option)]
+#![feature(set_stdio)]
+#![feature(panic_unwind)]
+#![feature(staged_api)]
+#![feature(termination_trait_lib)]
+#![feature(test)]
+
+// Public reexports
+pub use self::bench::{black_box, Bencher};
+pub use self::console::run_tests_console;
+pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
+pub use self::types::TestName::*;
+pub use self::types::*;
+pub use self::ColorConfig::*;
+pub use cli::TestOpts;
+
+// Module to be used by rustc to compile tests in libtest
+pub mod test {
+    pub use crate::{
+        assert_test_result,
+        bench::Bencher,
+        cli::{parse_opts, TestOpts},
+        filter_tests,
+        helpers::metrics::{Metric, MetricMap},
+        options::{Options, RunIgnored, RunStrategy, ShouldPanic},
+        run_test, test_main, test_main_static,
+        test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
+        time::{TestExecTime, TestTimeOptions},
+        types::{
+            DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
+            TestDescAndFn, TestName, TestType,
+        },
+    };
+}
+
+use std::{
+    env, io,
+    io::prelude::Write,
+    panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
+    process::{self, Command, Termination},
+    sync::mpsc::{channel, Sender},
+    sync::{Arc, Mutex},
+    thread,
+    time::{Duration, Instant},
+};
+
+pub mod bench;
+mod cli;
+mod console;
+mod event;
+mod formatters;
+mod helpers;
+mod options;
+pub mod stats;
+mod test_result;
+mod time;
+mod types;
+
+#[cfg(test)]
+mod tests;
+
+use event::{CompletedTest, TestEvent};
+use helpers::concurrency::get_concurrency;
+use helpers::exit_code::get_exit_code;
+use helpers::sink::Sink;
+use options::{Concurrent, RunStrategy};
+use test_result::*;
+use time::TestExecTime;
+
+// Process exit code to be used to indicate test failures.
+const ERROR_EXIT_CODE: i32 = 101;
+
+const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
+
+// The default console test runner. It accepts the command line
+// arguments and a vector of test_descs.
+pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
+    let mut opts = match cli::parse_opts(args) {
+        Some(Ok(o)) => o,
+        Some(Err(msg)) => {
+            eprintln!("error: {}", msg);
+            process::exit(ERROR_EXIT_CODE);
+        }
+        None => return,
+    };
+    if let Some(options) = options {
+        opts.options = options;
+    }
+    if opts.list {
+        if let Err(e) = console::list_tests_console(&opts, tests) {
+            eprintln!("error: io error when listing tests: {:?}", e);
+            process::exit(ERROR_EXIT_CODE);
+        }
+    } else {
+        match console::run_tests_console(&opts, tests) {
+            Ok(true) => {}
+            Ok(false) => process::exit(ERROR_EXIT_CODE),
+            Err(e) => {
+                eprintln!("error: io error when listing tests: {:?}", e);
+                process::exit(ERROR_EXIT_CODE);
+            }
+        }
+    }
+}
+
+/// A variant optimized for invocation with a static test vector.
+/// This will panic (intentionally) when fed any dynamic tests.
+///
+/// This is the entry point for the main function generated by `rustc --test`
+/// when panic=unwind.
+pub fn test_main_static(tests: &[&TestDescAndFn]) {
+    let args = env::args().collect::<Vec<_>>();
+    let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
+    test_main(&args, owned_tests, None)
+}
+
+/// A variant optimized for invocation with a static test vector.
+/// This will panic (intentionally) when fed any dynamic tests.
+///
+/// Runs tests in panic=abort mode, which involves spawning subprocesses for
+/// tests.
+///
+/// This is the entry point for the main function generated by `rustc --test`
+/// when panic=abort.
+pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
+    // If we're being run in SpawnedSecondary mode, run the test here. run_test
+    // will then exit the process.
+    if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
+        env::remove_var(SECONDARY_TEST_INVOKER_VAR);
+        let test = tests
+            .iter()
+            .filter(|test| test.desc.name.as_slice() == name)
+            .map(make_owned_test)
+            .next()
+            .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
+        let TestDescAndFn { desc, testfn } = test;
+        let testfn = match testfn {
+            StaticTestFn(f) => f,
+            _ => panic!("only static tests are supported"),
+        };
+        run_test_in_spawned_subprocess(desc, Box::new(testfn));
+    }
+
+    let args = env::args().collect::<Vec<_>>();
+    let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
+    test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
+}
+
+/// Clones static values for putting into a dynamic vector, which test_main()
+/// needs to hand out ownership of tests to parallel test runners.
+///
+/// This will panic when fed any dynamic tests, because they cannot be cloned.
+fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
+    match test.testfn {
+        StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
+        StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
+        _ => panic!("non-static tests passed to test::test_main_static"),
+    }
+}
+
+/// Invoked when unit tests terminate. Should panic if the unit
+/// Tests is considered a failure. By default, invokes `report()`
+/// and checks for a `0` result.
+pub fn assert_test_result<T: Termination>(result: T) {
+    let code = result.report();
+    assert_eq!(
+        code, 0,
+        "the test returned a termination value with a non-zero status code ({}) \
+         which indicates a failure",
+        code
+    );
+}
+
+pub fn run_tests<F>(
+    opts: &TestOpts,
+    tests: Vec<TestDescAndFn>,
+    mut notify_about_test_event: F,
+) -> io::Result<()>
+where
+    F: FnMut(TestEvent) -> io::Result<()>,
+{
+    use std::collections::{self, HashMap};
+    use std::hash::BuildHasherDefault;
+    use std::sync::mpsc::RecvTimeoutError;
+    // Use a deterministic hasher
+    type TestMap =
+        HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
+
+    let tests_len = tests.len();
+
+    let mut filtered_tests = filter_tests(opts, tests);
+    if !opts.bench_benchmarks {
+        filtered_tests = convert_benchmarks_to_tests(filtered_tests);
+    }
+
+    let filtered_tests = {
+        let mut filtered_tests = filtered_tests;
+        for test in filtered_tests.iter_mut() {
+            test.desc.name = test.desc.name.with_padding(test.testfn.padding());
+        }
+
+        filtered_tests
+    };
+
+    let filtered_out = tests_len - filtered_tests.len();
+    let event = TestEvent::TeFilteredOut(filtered_out);
+    notify_about_test_event(event)?;
+
+    let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
+
+    let event = TestEvent::TeFiltered(filtered_descs);
+    notify_about_test_event(event)?;
+
+    let (filtered_tests, filtered_benchs): (Vec<_>, _) =
+        filtered_tests.into_iter().partition(|e| match e.testfn {
+            StaticTestFn(_) | DynTestFn(_) => true,
+            _ => false,
+        });
+
+    let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
+
+    let mut remaining = filtered_tests;
+    remaining.reverse();
+    let mut pending = 0;
+
+    let (tx, rx) = channel::<CompletedTest>();
+    let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
+        RunStrategy::SpawnPrimary
+    } else {
+        RunStrategy::InProcess
+    };
+
+    let mut running_tests: TestMap = HashMap::default();
+
+    fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
+        let now = Instant::now();
+        let timed_out = running_tests
+            .iter()
+            .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone()) } else { None })
+            .collect();
+        for test in &timed_out {
+            running_tests.remove(test);
+        }
+        timed_out
+    };
+
+    fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
+        running_tests.values().min().map(|next_timeout| {
+            let now = Instant::now();
+            if *next_timeout >= now { *next_timeout - now } else { Duration::new(0, 0) }
+        })
+    };
+
+    if concurrency == 1 {
+        while !remaining.is_empty() {
+            let test = remaining.pop().unwrap();
+            let event = TestEvent::TeWait(test.desc.clone());
+            notify_about_test_event(event)?;
+            run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No);
+            let completed_test = rx.recv().unwrap();
+
+            let event = TestEvent::TeResult(completed_test);
+            notify_about_test_event(event)?;
+        }
+    } else {
+        while pending > 0 || !remaining.is_empty() {
+            while pending < concurrency && !remaining.is_empty() {
+                let test = remaining.pop().unwrap();
+                let timeout = time::get_default_test_timeout();
+                running_tests.insert(test.desc.clone(), timeout);
+
+                let event = TestEvent::TeWait(test.desc.clone());
+                notify_about_test_event(event)?; //here no pad
+                run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes);
+                pending += 1;
+            }
+
+            let mut res;
+            loop {
+                if let Some(timeout) = calc_timeout(&running_tests) {
+                    res = rx.recv_timeout(timeout);
+                    for test in get_timed_out_tests(&mut running_tests) {
+                        let event = TestEvent::TeTimeout(test);
+                        notify_about_test_event(event)?;
+                    }
+
+                    match res {
+                        Err(RecvTimeoutError::Timeout) => {
+                            // Result is not yet ready, continue waiting.
+                        }
+                        _ => {
+                            // We've got a result, stop the loop.
+                            break;
+                        }
+                    }
+                } else {
+                    res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
+                    break;
+                }
+            }
+
+            let completed_test = res.unwrap();
+            running_tests.remove(&completed_test.desc);
+
+            let event = TestEvent::TeResult(completed_test);
+            notify_about_test_event(event)?;
+            pending -= 1;
+        }
+    }
+
+    if opts.bench_benchmarks {
+        // All benchmarks run at the end, in serial.
+        for b in filtered_benchs {
+            let event = TestEvent::TeWait(b.desc.clone());
+            notify_about_test_event(event)?;
+            run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No);
+            let completed_test = rx.recv().unwrap();
+
+            let event = TestEvent::TeResult(completed_test);
+            notify_about_test_event(event)?;
+        }
+    }
+    Ok(())
+}
+
+pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
+    let mut filtered = tests;
+    let matches_filter = |test: &TestDescAndFn, filter: &str| {
+        let test_name = test.desc.name.as_slice();
+
+        match opts.filter_exact {
+            true => test_name == filter,
+            false => test_name.contains(filter),
+        }
+    };
+
+    // Remove tests that don't match the test filter
+    if let Some(ref filter) = opts.filter {
+        filtered.retain(|test| matches_filter(test, filter));
+    }
+
+    // Skip tests that match any of the skip filters
+    filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
+
+    // Excludes #[should_panic] tests
+    if opts.exclude_should_panic {
+        filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
+    }
+
+    // maybe unignore tests
+    match opts.run_ignored {
+        RunIgnored::Yes => {
+            filtered.iter_mut().for_each(|test| test.desc.ignore = false);
+        }
+        RunIgnored::Only => {
+            filtered.retain(|test| test.desc.ignore);
+            filtered.iter_mut().for_each(|test| test.desc.ignore = false);
+        }
+        RunIgnored::No => {}
+    }
+
+    // Sort the tests alphabetically
+    filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
+
+    filtered
+}
+
+pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
+    // convert benchmarks to tests, if we're not benchmarking them
+    tests
+        .into_iter()
+        .map(|x| {
+            let testfn = match x.testfn {
+                DynBenchFn(bench) => DynTestFn(Box::new(move || {
+                    bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
+                })),
+                StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
+                    bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
+                })),
+                f => f,
+            };
+            TestDescAndFn { desc: x.desc, testfn }
+        })
+        .collect()
+}
+
+pub fn run_test(
+    opts: &TestOpts,
+    force_ignore: bool,
+    test: TestDescAndFn,
+    strategy: RunStrategy,
+    monitor_ch: Sender<CompletedTest>,
+    concurrency: Concurrent,
+) {
+    let TestDescAndFn { desc, testfn } = test;
+
+    // Emscripten can catch panics but other wasm targets cannot
+    let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
+        && cfg!(target_arch = "wasm32")
+        && !cfg!(target_os = "emscripten");
+
+    if force_ignore || desc.ignore || ignore_because_no_process_support {
+        let message = CompletedTest::new(desc, TrIgnored, None, Vec::new());
+        monitor_ch.send(message).unwrap();
+        return;
+    }
+
+    struct TestRunOpts {
+        pub strategy: RunStrategy,
+        pub nocapture: bool,
+        pub concurrency: Concurrent,
+        pub time: Option<time::TestTimeOptions>,
+    }
+
+    fn run_test_inner(
+        desc: TestDesc,
+        monitor_ch: Sender<CompletedTest>,
+        testfn: Box<dyn FnOnce() + Send>,
+        opts: TestRunOpts,
+    ) {
+        let concurrency = opts.concurrency;
+        let name = desc.name.clone();
+
+        let runtest = move || match opts.strategy {
+            RunStrategy::InProcess => run_test_in_process(
+                desc,
+                opts.nocapture,
+                opts.time.is_some(),
+                testfn,
+                monitor_ch,
+                opts.time,
+            ),
+            RunStrategy::SpawnPrimary => spawn_test_subprocess(
+                desc,
+                opts.nocapture,
+                opts.time.is_some(),
+                monitor_ch,
+                opts.time,
+            ),
+        };
+
+        // If the platform is single-threaded we're just going to run
+        // the test synchronously, regardless of the concurrency
+        // level.
+        let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
+        if concurrency == Concurrent::Yes && supports_threads {
+            let cfg = thread::Builder::new().name(name.as_slice().to_owned());
+            cfg.spawn(runtest).unwrap();
+        } else {
+            runtest();
+        }
+    }
+
+    let test_run_opts =
+        TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
+
+    match testfn {
+        DynBenchFn(bencher) => {
+            // Benchmarks aren't expected to panic, so we run them all in-process.
+            crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
+                bencher.run(harness)
+            });
+        }
+        StaticBenchFn(benchfn) => {
+            // Benchmarks aren't expected to panic, so we run them all in-process.
+            crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn);
+        }
+        DynTestFn(f) => {
+            match strategy {
+                RunStrategy::InProcess => (),
+                _ => panic!("Cannot run dynamic test fn out-of-process"),
+            };
+            run_test_inner(
+                desc,
+                monitor_ch,
+                Box::new(move || __rust_begin_short_backtrace(f)),
+                test_run_opts,
+            );
+        }
+        StaticTestFn(f) => run_test_inner(
+            desc,
+            monitor_ch,
+            Box::new(move || __rust_begin_short_backtrace(f)),
+            test_run_opts,
+        ),
+    }
+}
+
+/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
+#[inline(never)]
+fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
+    f()
+}
+
+fn run_test_in_process(
+    desc: TestDesc,
+    nocapture: bool,
+    report_time: bool,
+    testfn: Box<dyn FnOnce() + Send>,
+    monitor_ch: Sender<CompletedTest>,
+    time_opts: Option<time::TestTimeOptions>,
+) {
+    // Buffer for capturing standard I/O
+    let data = Arc::new(Mutex::new(Vec::new()));
+
+    let oldio = if !nocapture {
+        Some((
+            io::set_print(Some(Sink::new_boxed(&data))),
+            io::set_panic(Some(Sink::new_boxed(&data))),
+        ))
+    } else {
+        None
+    };
+
+    let start = report_time.then(Instant::now);
+    let result = catch_unwind(AssertUnwindSafe(testfn));
+    let exec_time = start.map(|start| {
+        let duration = start.elapsed();
+        TestExecTime(duration)
+    });
+
+    if let Some((printio, panicio)) = oldio {
+        io::set_print(printio);
+        io::set_panic(panicio);
+    }
+
+    let test_result = match result {
+        Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
+        Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
+    };
+    let stdout = data.lock().unwrap().to_vec();
+    let message = CompletedTest::new(desc, test_result, exec_time, stdout);
+    monitor_ch.send(message).unwrap();
+}
+
+fn spawn_test_subprocess(
+    desc: TestDesc,
+    nocapture: bool,
+    report_time: bool,
+    monitor_ch: Sender<CompletedTest>,
+    time_opts: Option<time::TestTimeOptions>,
+) {
+    let (result, test_output, exec_time) = (|| {
+        let args = env::args().collect::<Vec<_>>();
+        let current_exe = &args[0];
+
+        let mut command = Command::new(current_exe);
+        command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
+        if nocapture {
+            command.stdout(process::Stdio::inherit());
+            command.stderr(process::Stdio::inherit());
+        }
+
+        let start = report_time.then(Instant::now);
+        let output = match command.output() {
+            Ok(out) => out,
+            Err(e) => {
+                let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
+                return (TrFailed, err.into_bytes(), None);
+            }
+        };
+        let exec_time = start.map(|start| {
+            let duration = start.elapsed();
+            TestExecTime(duration)
+        });
+
+        let std::process::Output { stdout, stderr, status } = output;
+        let mut test_output = stdout;
+        formatters::write_stderr_delimiter(&mut test_output, &desc.name);
+        test_output.extend_from_slice(&stderr);
+
+        let result = match (|| -> Result<TestResult, String> {
+            let exit_code = get_exit_code(status)?;
+            Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time))
+        })() {
+            Ok(r) => r,
+            Err(e) => {
+                write!(&mut test_output, "Unexpected error: {}", e).unwrap();
+                TrFailed
+            }
+        };
+
+        (result, test_output, exec_time)
+    })();
+
+    let message = CompletedTest::new(desc, result, exec_time, test_output);
+    monitor_ch.send(message).unwrap();
+}
+
+fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! {
+    let builtin_panic_hook = panic::take_hook();
+    let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
+        let test_result = match panic_info {
+            Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
+            None => calc_result(&desc, Ok(()), &None, &None),
+        };
+
+        // We don't support serializing TrFailedMsg, so just
+        // print the message out to stderr.
+        if let TrFailedMsg(msg) = &test_result {
+            eprintln!("{}", msg);
+        }
+
+        if let Some(info) = panic_info {
+            builtin_panic_hook(info);
+        }
+
+        if let TrOk = test_result {
+            process::exit(test_result::TR_OK);
+        } else {
+            process::exit(test_result::TR_FAILED);
+        }
+    });
+    let record_result2 = record_result.clone();
+    panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
+    testfn();
+    record_result(None);
+    unreachable!("panic=abort callback should have exited the process")
+}
diff --git a/library/test/src/options.rs b/library/test/src/options.rs
new file mode 100644
index 00000000000..8e7bd8de924
--- /dev/null
+++ b/library/test/src/options.rs
@@ -0,0 +1,87 @@
+//! Enums denoting options for test execution.
+
+/// Whether to execute tests concurrently or not
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Concurrent {
+    Yes,
+    No,
+}
+
+/// Number of times to run a benchmarked function
+#[derive(Clone, PartialEq, Eq)]
+pub enum BenchMode {
+    Auto,
+    Single,
+}
+
+/// Whether test is expected to panic or not
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum ShouldPanic {
+    No,
+    Yes,
+    YesWithMessage(&'static str),
+}
+
+/// Whether should console output be colored or not
+#[derive(Copy, Clone, Debug)]
+pub enum ColorConfig {
+    AutoColor,
+    AlwaysColor,
+    NeverColor,
+}
+
+/// Format of the test results output
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum OutputFormat {
+    /// Verbose output
+    Pretty,
+    /// Quiet output
+    Terse,
+    /// JSON output
+    Json,
+}
+
+/// Whether ignored test should be run or not
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum RunIgnored {
+    Yes,
+    No,
+    /// Run only ignored tests
+    Only,
+}
+
+#[derive(Clone, Copy)]
+pub enum RunStrategy {
+    /// Runs the test in the current process, and sends the result back over the
+    /// supplied channel.
+    InProcess,
+
+    /// Spawns a subprocess to run the test, and sends the result back over the
+    /// supplied channel. Requires `argv[0]` to exist and point to the binary
+    /// that's currently running.
+    SpawnPrimary,
+}
+
+/// Options for the test run defined by the caller (instead of CLI arguments).
+/// In case we want to add other options as well, just add them in this struct.
+#[derive(Copy, Clone, Debug)]
+pub struct Options {
+    pub display_output: bool,
+    pub panic_abort: bool,
+}
+
+impl Options {
+    pub fn new() -> Options {
+        Options { display_output: false, panic_abort: false }
+    }
+
+    pub fn display_output(mut self, display_output: bool) -> Options {
+        self.display_output = display_output;
+        self
+    }
+
+    pub fn panic_abort(mut self, panic_abort: bool) -> Options {
+        self.panic_abort = panic_abort;
+        self
+    }
+}
diff --git a/library/test/src/stats.rs b/library/test/src/stats.rs
new file mode 100644
index 00000000000..077005371c0
--- /dev/null
+++ b/library/test/src/stats.rs
@@ -0,0 +1,319 @@
+#![allow(missing_docs)]
+#![allow(deprecated)] // Float
+
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::mem;
+
+#[cfg(test)]
+mod tests;
+
+fn local_cmp(x: f64, y: f64) -> Ordering {
+    // arbitrarily decide that NaNs are larger than everything.
+    if y.is_nan() {
+        Less
+    } else if x.is_nan() {
+        Greater
+    } else if x < y {
+        Less
+    } else if x == y {
+        Equal
+    } else {
+        Greater
+    }
+}
+
+fn local_sort(v: &mut [f64]) {
+    v.sort_by(|x: &f64, y: &f64| local_cmp(*x, *y));
+}
+
+/// Trait that provides simple descriptive statistics on a univariate set of numeric samples.
+pub trait Stats {
+    /// Sum of the samples.
+    ///
+    /// Note: this method sacrifices performance at the altar of accuracy
+    /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at:
+    /// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric
+    /// Predicates"][paper]
+    ///
+    /// [paper]: http://www.cs.cmu.edu/~quake-papers/robust-arithmetic.ps
+    fn sum(&self) -> f64;
+
+    /// Minimum value of the samples.
+    fn min(&self) -> f64;
+
+    /// Maximum value of the samples.
+    fn max(&self) -> f64;
+
+    /// Arithmetic mean (average) of the samples: sum divided by sample-count.
+    ///
+    /// See: <https://en.wikipedia.org/wiki/Arithmetic_mean>
+    fn mean(&self) -> f64;
+
+    /// Median of the samples: value separating the lower half of the samples from the higher half.
+    /// Equal to `self.percentile(50.0)`.
+    ///
+    /// See: <https://en.wikipedia.org/wiki/Median>
+    fn median(&self) -> f64;
+
+    /// Variance of the samples: bias-corrected mean of the squares of the differences of each
+    /// sample from the sample mean. Note that this calculates the _sample variance_ rather than the
+    /// population variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n`
+    /// bias that would appear if we calculated a population variance, by dividing by `(n-1)` rather
+    /// than `n`.
+    ///
+    /// See: <https://en.wikipedia.org/wiki/Variance>
+    fn var(&self) -> f64;
+
+    /// Standard deviation: the square root of the sample variance.
+    ///
+    /// Note: this is not a robust statistic for non-normal distributions. Prefer the
+    /// `median_abs_dev` for unknown distributions.
+    ///
+    /// See: <https://en.wikipedia.org/wiki/Standard_deviation>
+    fn std_dev(&self) -> f64;
+
+    /// Standard deviation as a percent of the mean value. See `std_dev` and `mean`.
+    ///
+    /// Note: this is not a robust statistic for non-normal distributions. Prefer the
+    /// `median_abs_dev_pct` for unknown distributions.
+    fn std_dev_pct(&self) -> f64;
+
+    /// Scaled median of the absolute deviations of each sample from the sample median. This is a
+    /// robust (distribution-agnostic) estimator of sample variability. Use this in preference to
+    /// `std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled
+    /// by the constant `1.4826` to allow its use as a consistent estimator for the standard
+    /// deviation.
+    ///
+    /// See: <http://en.wikipedia.org/wiki/Median_absolute_deviation>
+    fn median_abs_dev(&self) -> f64;
+
+    /// Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`.
+    fn median_abs_dev_pct(&self) -> f64;
+
+    /// Percentile: the value below which `pct` percent of the values in `self` fall. For example,
+    /// percentile(95.0) will return the value `v` such that 95% of the samples `s` in `self`
+    /// satisfy `s <= v`.
+    ///
+    /// Calculated by linear interpolation between closest ranks.
+    ///
+    /// See: <http://en.wikipedia.org/wiki/Percentile>
+    fn percentile(&self, pct: f64) -> f64;
+
+    /// Quartiles of the sample: three values that divide the sample into four equal groups, each
+    /// with 1/4 of the data. The middle value is the median. See `median` and `percentile`. This
+    /// function may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but
+    /// is otherwise equivalent.
+    ///
+    /// See also: <https://en.wikipedia.org/wiki/Quartile>
+    fn quartiles(&self) -> (f64, f64, f64);
+
+    /// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th
+    /// percentile (3rd quartile). See `quartiles`.
+    ///
+    /// See also: <https://en.wikipedia.org/wiki/Interquartile_range>
+    fn iqr(&self) -> f64;
+}
+
+/// Extracted collection of all the summary statistics of a sample set.
+#[derive(Debug, Clone, PartialEq, Copy)]
+#[allow(missing_docs)]
+pub struct Summary {
+    pub sum: f64,
+    pub min: f64,
+    pub max: f64,
+    pub mean: f64,
+    pub median: f64,
+    pub var: f64,
+    pub std_dev: f64,
+    pub std_dev_pct: f64,
+    pub median_abs_dev: f64,
+    pub median_abs_dev_pct: f64,
+    pub quartiles: (f64, f64, f64),
+    pub iqr: f64,
+}
+
+impl Summary {
+    /// Construct a new summary of a sample set.
+    pub fn new(samples: &[f64]) -> Summary {
+        Summary {
+            sum: samples.sum(),
+            min: samples.min(),
+            max: samples.max(),
+            mean: samples.mean(),
+            median: samples.median(),
+            var: samples.var(),
+            std_dev: samples.std_dev(),
+            std_dev_pct: samples.std_dev_pct(),
+            median_abs_dev: samples.median_abs_dev(),
+            median_abs_dev_pct: samples.median_abs_dev_pct(),
+            quartiles: samples.quartiles(),
+            iqr: samples.iqr(),
+        }
+    }
+}
+
+impl Stats for [f64] {
+    // FIXME #11059 handle NaN, inf and overflow
+    fn sum(&self) -> f64 {
+        let mut partials = vec![];
+
+        for &x in self {
+            let mut x = x;
+            let mut j = 0;
+            // This inner loop applies `hi`/`lo` summation to each
+            // partial so that the list of partial sums remains exact.
+            for i in 0..partials.len() {
+                let mut y: f64 = partials[i];
+                if x.abs() < y.abs() {
+                    mem::swap(&mut x, &mut y);
+                }
+                // Rounded `x+y` is stored in `hi` with round-off stored in
+                // `lo`. Together `hi+lo` are exactly equal to `x+y`.
+                let hi = x + y;
+                let lo = y - (hi - x);
+                if lo != 0.0 {
+                    partials[j] = lo;
+                    j += 1;
+                }
+                x = hi;
+            }
+            if j >= partials.len() {
+                partials.push(x);
+            } else {
+                partials[j] = x;
+                partials.truncate(j + 1);
+            }
+        }
+        let zero: f64 = 0.0;
+        partials.iter().fold(zero, |p, q| p + *q)
+    }
+
+    fn min(&self) -> f64 {
+        assert!(!self.is_empty());
+        self.iter().fold(self[0], |p, q| p.min(*q))
+    }
+
+    fn max(&self) -> f64 {
+        assert!(!self.is_empty());
+        self.iter().fold(self[0], |p, q| p.max(*q))
+    }
+
+    fn mean(&self) -> f64 {
+        assert!(!self.is_empty());
+        self.sum() / (self.len() as f64)
+    }
+
+    fn median(&self) -> f64 {
+        self.percentile(50_f64)
+    }
+
+    fn var(&self) -> f64 {
+        if self.len() < 2 {
+            0.0
+        } else {
+            let mean = self.mean();
+            let mut v: f64 = 0.0;
+            for s in self {
+                let x = *s - mean;
+                v = v + x * x;
+            }
+            // N.B., this is _supposed to be_ len-1, not len. If you
+            // change it back to len, you will be calculating a
+            // population variance, not a sample variance.
+            let denom = (self.len() - 1) as f64;
+            v / denom
+        }
+    }
+
+    fn std_dev(&self) -> f64 {
+        self.var().sqrt()
+    }
+
+    fn std_dev_pct(&self) -> f64 {
+        let hundred = 100_f64;
+        (self.std_dev() / self.mean()) * hundred
+    }
+
+    fn median_abs_dev(&self) -> f64 {
+        let med = self.median();
+        let abs_devs: Vec<f64> = self.iter().map(|&v| (med - v).abs()).collect();
+        // This constant is derived by smarter statistics brains than me, but it is
+        // consistent with how R and other packages treat the MAD.
+        let number = 1.4826;
+        abs_devs.median() * number
+    }
+
+    fn median_abs_dev_pct(&self) -> f64 {
+        let hundred = 100_f64;
+        (self.median_abs_dev() / self.median()) * hundred
+    }
+
+    fn percentile(&self, pct: f64) -> f64 {
+        let mut tmp = self.to_vec();
+        local_sort(&mut tmp);
+        percentile_of_sorted(&tmp, pct)
+    }
+
+    fn quartiles(&self) -> (f64, f64, f64) {
+        let mut tmp = self.to_vec();
+        local_sort(&mut tmp);
+        let first = 25_f64;
+        let a = percentile_of_sorted(&tmp, first);
+        let second = 50_f64;
+        let b = percentile_of_sorted(&tmp, second);
+        let third = 75_f64;
+        let c = percentile_of_sorted(&tmp, third);
+        (a, b, c)
+    }
+
+    fn iqr(&self) -> f64 {
+        let (a, _, c) = self.quartiles();
+        c - a
+    }
+}
+
+// Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using
+// linear interpolation. If samples are not sorted, return nonsensical value.
+fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
+    assert!(!sorted_samples.is_empty());
+    if sorted_samples.len() == 1 {
+        return sorted_samples[0];
+    }
+    let zero: f64 = 0.0;
+    assert!(zero <= pct);
+    let hundred = 100_f64;
+    assert!(pct <= hundred);
+    if pct == hundred {
+        return sorted_samples[sorted_samples.len() - 1];
+    }
+    let length = (sorted_samples.len() - 1) as f64;
+    let rank = (pct / hundred) * length;
+    let lrank = rank.floor();
+    let d = rank - lrank;
+    let n = lrank as usize;
+    let lo = sorted_samples[n];
+    let hi = sorted_samples[n + 1];
+    lo + (hi - lo) * d
+}
+
+/// Winsorize a set of samples, replacing values above the `100-pct` percentile
+/// and below the `pct` percentile with those percentiles themselves. This is a
+/// way of minimizing the effect of outliers, at the cost of biasing the sample.
+/// It differs from trimming in that it does not change the number of samples,
+/// just changes the values of those that are outliers.
+///
+/// See: <http://en.wikipedia.org/wiki/Winsorising>
+pub fn winsorize(samples: &mut [f64], pct: f64) {
+    let mut tmp = samples.to_vec();
+    local_sort(&mut tmp);
+    let lo = percentile_of_sorted(&tmp, pct);
+    let hundred = 100_f64;
+    let hi = percentile_of_sorted(&tmp, hundred - pct);
+    for samp in samples {
+        if *samp > hi {
+            *samp = hi
+        } else if *samp < lo {
+            *samp = lo
+        }
+    }
+}
diff --git a/library/test/src/stats/tests.rs b/library/test/src/stats/tests.rs
new file mode 100644
index 00000000000..3a6e8401bf1
--- /dev/null
+++ b/library/test/src/stats/tests.rs
@@ -0,0 +1,591 @@
+use super::*;
+
+extern crate test;
+use self::test::test::Bencher;
+use std::io;
+use std::io::prelude::*;
+
+// Test vectors generated from R, using the script src/etc/stat-test-vectors.r.
+
+macro_rules! assert_approx_eq {
+    ($a: expr, $b: expr) => {{
+        let (a, b) = (&$a, &$b);
+        assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b);
+    }};
+}
+
+fn check(samples: &[f64], summ: &Summary) {
+    let summ2 = Summary::new(samples);
+
+    let mut w = io::sink();
+    let w = &mut w;
+    (write!(w, "\n")).unwrap();
+
+    assert_eq!(summ.sum, summ2.sum);
+    assert_eq!(summ.min, summ2.min);
+    assert_eq!(summ.max, summ2.max);
+    assert_eq!(summ.mean, summ2.mean);
+    assert_eq!(summ.median, summ2.median);
+
+    // We needed a few more digits to get exact equality on these
+    // but they're within float epsilon, which is 1.0e-6.
+    assert_approx_eq!(summ.var, summ2.var);
+    assert_approx_eq!(summ.std_dev, summ2.std_dev);
+    assert_approx_eq!(summ.std_dev_pct, summ2.std_dev_pct);
+    assert_approx_eq!(summ.median_abs_dev, summ2.median_abs_dev);
+    assert_approx_eq!(summ.median_abs_dev_pct, summ2.median_abs_dev_pct);
+
+    assert_eq!(summ.quartiles, summ2.quartiles);
+    assert_eq!(summ.iqr, summ2.iqr);
+}
+
+#[test]
+fn test_min_max_nan() {
+    let xs = &[1.0, 2.0, f64::NAN, 3.0, 4.0];
+    let summary = Summary::new(xs);
+    assert_eq!(summary.min, 1.0);
+    assert_eq!(summary.max, 4.0);
+}
+
+#[test]
+fn test_norm2() {
+    let val = &[958.0000000000, 924.0000000000];
+    let summ = &Summary {
+        sum: 1882.0000000000,
+        min: 924.0000000000,
+        max: 958.0000000000,
+        mean: 941.0000000000,
+        median: 941.0000000000,
+        var: 578.0000000000,
+        std_dev: 24.0416305603,
+        std_dev_pct: 2.5549022912,
+        median_abs_dev: 25.2042000000,
+        median_abs_dev_pct: 2.6784484591,
+        quartiles: (932.5000000000, 941.0000000000, 949.5000000000),
+        iqr: 17.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_norm10narrow() {
+    let val = &[
+        966.0000000000,
+        985.0000000000,
+        1110.0000000000,
+        848.0000000000,
+        821.0000000000,
+        975.0000000000,
+        962.0000000000,
+        1157.0000000000,
+        1217.0000000000,
+        955.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 9996.0000000000,
+        min: 821.0000000000,
+        max: 1217.0000000000,
+        mean: 999.6000000000,
+        median: 970.5000000000,
+        var: 16050.7111111111,
+        std_dev: 126.6914010938,
+        std_dev_pct: 12.6742097933,
+        median_abs_dev: 102.2994000000,
+        median_abs_dev_pct: 10.5408964451,
+        quartiles: (956.7500000000, 970.5000000000, 1078.7500000000),
+        iqr: 122.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_norm10medium() {
+    let val = &[
+        954.0000000000,
+        1064.0000000000,
+        855.0000000000,
+        1000.0000000000,
+        743.0000000000,
+        1084.0000000000,
+        704.0000000000,
+        1023.0000000000,
+        357.0000000000,
+        869.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 8653.0000000000,
+        min: 357.0000000000,
+        max: 1084.0000000000,
+        mean: 865.3000000000,
+        median: 911.5000000000,
+        var: 48628.4555555556,
+        std_dev: 220.5186059170,
+        std_dev_pct: 25.4846418487,
+        median_abs_dev: 195.7032000000,
+        median_abs_dev_pct: 21.4704552935,
+        quartiles: (771.0000000000, 911.5000000000, 1017.2500000000),
+        iqr: 246.2500000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_norm10wide() {
+    let val = &[
+        505.0000000000,
+        497.0000000000,
+        1591.0000000000,
+        887.0000000000,
+        1026.0000000000,
+        136.0000000000,
+        1580.0000000000,
+        940.0000000000,
+        754.0000000000,
+        1433.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 9349.0000000000,
+        min: 136.0000000000,
+        max: 1591.0000000000,
+        mean: 934.9000000000,
+        median: 913.5000000000,
+        var: 239208.9888888889,
+        std_dev: 489.0899599142,
+        std_dev_pct: 52.3146817750,
+        median_abs_dev: 611.5725000000,
+        median_abs_dev_pct: 66.9482758621,
+        quartiles: (567.2500000000, 913.5000000000, 1331.2500000000),
+        iqr: 764.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_norm25verynarrow() {
+    let val = &[
+        991.0000000000,
+        1018.0000000000,
+        998.0000000000,
+        1013.0000000000,
+        974.0000000000,
+        1007.0000000000,
+        1014.0000000000,
+        999.0000000000,
+        1011.0000000000,
+        978.0000000000,
+        985.0000000000,
+        999.0000000000,
+        983.0000000000,
+        982.0000000000,
+        1015.0000000000,
+        1002.0000000000,
+        977.0000000000,
+        948.0000000000,
+        1040.0000000000,
+        974.0000000000,
+        996.0000000000,
+        989.0000000000,
+        1015.0000000000,
+        994.0000000000,
+        1024.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 24926.0000000000,
+        min: 948.0000000000,
+        max: 1040.0000000000,
+        mean: 997.0400000000,
+        median: 998.0000000000,
+        var: 393.2066666667,
+        std_dev: 19.8294393937,
+        std_dev_pct: 1.9888308788,
+        median_abs_dev: 22.2390000000,
+        median_abs_dev_pct: 2.2283567134,
+        quartiles: (983.0000000000, 998.0000000000, 1013.0000000000),
+        iqr: 30.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_exp10a() {
+    let val = &[
+        23.0000000000,
+        11.0000000000,
+        2.0000000000,
+        57.0000000000,
+        4.0000000000,
+        12.0000000000,
+        5.0000000000,
+        29.0000000000,
+        3.0000000000,
+        21.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 167.0000000000,
+        min: 2.0000000000,
+        max: 57.0000000000,
+        mean: 16.7000000000,
+        median: 11.5000000000,
+        var: 287.7888888889,
+        std_dev: 16.9643416875,
+        std_dev_pct: 101.5828843560,
+        median_abs_dev: 13.3434000000,
+        median_abs_dev_pct: 116.0295652174,
+        quartiles: (4.2500000000, 11.5000000000, 22.5000000000),
+        iqr: 18.2500000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_exp10b() {
+    let val = &[
+        24.0000000000,
+        17.0000000000,
+        6.0000000000,
+        38.0000000000,
+        25.0000000000,
+        7.0000000000,
+        51.0000000000,
+        2.0000000000,
+        61.0000000000,
+        32.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 263.0000000000,
+        min: 2.0000000000,
+        max: 61.0000000000,
+        mean: 26.3000000000,
+        median: 24.5000000000,
+        var: 383.5666666667,
+        std_dev: 19.5848580967,
+        std_dev_pct: 74.4671410520,
+        median_abs_dev: 22.9803000000,
+        median_abs_dev_pct: 93.7971428571,
+        quartiles: (9.5000000000, 24.5000000000, 36.5000000000),
+        iqr: 27.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_exp10c() {
+    let val = &[
+        71.0000000000,
+        2.0000000000,
+        32.0000000000,
+        1.0000000000,
+        6.0000000000,
+        28.0000000000,
+        13.0000000000,
+        37.0000000000,
+        16.0000000000,
+        36.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 242.0000000000,
+        min: 1.0000000000,
+        max: 71.0000000000,
+        mean: 24.2000000000,
+        median: 22.0000000000,
+        var: 458.1777777778,
+        std_dev: 21.4050876611,
+        std_dev_pct: 88.4507754589,
+        median_abs_dev: 21.4977000000,
+        median_abs_dev_pct: 97.7168181818,
+        quartiles: (7.7500000000, 22.0000000000, 35.0000000000),
+        iqr: 27.2500000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_exp25() {
+    let val = &[
+        3.0000000000,
+        24.0000000000,
+        1.0000000000,
+        19.0000000000,
+        7.0000000000,
+        5.0000000000,
+        30.0000000000,
+        39.0000000000,
+        31.0000000000,
+        13.0000000000,
+        25.0000000000,
+        48.0000000000,
+        1.0000000000,
+        6.0000000000,
+        42.0000000000,
+        63.0000000000,
+        2.0000000000,
+        12.0000000000,
+        108.0000000000,
+        26.0000000000,
+        1.0000000000,
+        7.0000000000,
+        44.0000000000,
+        25.0000000000,
+        11.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 593.0000000000,
+        min: 1.0000000000,
+        max: 108.0000000000,
+        mean: 23.7200000000,
+        median: 19.0000000000,
+        var: 601.0433333333,
+        std_dev: 24.5161851301,
+        std_dev_pct: 103.3565983562,
+        median_abs_dev: 19.2738000000,
+        median_abs_dev_pct: 101.4410526316,
+        quartiles: (6.0000000000, 19.0000000000, 31.0000000000),
+        iqr: 25.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_binom25() {
+    let val = &[
+        18.0000000000,
+        17.0000000000,
+        27.0000000000,
+        15.0000000000,
+        21.0000000000,
+        25.0000000000,
+        17.0000000000,
+        24.0000000000,
+        25.0000000000,
+        24.0000000000,
+        26.0000000000,
+        26.0000000000,
+        23.0000000000,
+        15.0000000000,
+        23.0000000000,
+        17.0000000000,
+        18.0000000000,
+        18.0000000000,
+        21.0000000000,
+        16.0000000000,
+        15.0000000000,
+        31.0000000000,
+        20.0000000000,
+        17.0000000000,
+        15.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 514.0000000000,
+        min: 15.0000000000,
+        max: 31.0000000000,
+        mean: 20.5600000000,
+        median: 20.0000000000,
+        var: 20.8400000000,
+        std_dev: 4.5650848842,
+        std_dev_pct: 22.2037202539,
+        median_abs_dev: 5.9304000000,
+        median_abs_dev_pct: 29.6520000000,
+        quartiles: (17.0000000000, 20.0000000000, 24.0000000000),
+        iqr: 7.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_pois25lambda30() {
+    let val = &[
+        27.0000000000,
+        33.0000000000,
+        34.0000000000,
+        34.0000000000,
+        24.0000000000,
+        39.0000000000,
+        28.0000000000,
+        27.0000000000,
+        31.0000000000,
+        28.0000000000,
+        38.0000000000,
+        21.0000000000,
+        33.0000000000,
+        36.0000000000,
+        29.0000000000,
+        37.0000000000,
+        32.0000000000,
+        34.0000000000,
+        31.0000000000,
+        39.0000000000,
+        25.0000000000,
+        31.0000000000,
+        32.0000000000,
+        40.0000000000,
+        24.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 787.0000000000,
+        min: 21.0000000000,
+        max: 40.0000000000,
+        mean: 31.4800000000,
+        median: 32.0000000000,
+        var: 26.5933333333,
+        std_dev: 5.1568724372,
+        std_dev_pct: 16.3814245145,
+        median_abs_dev: 5.9304000000,
+        median_abs_dev_pct: 18.5325000000,
+        quartiles: (28.0000000000, 32.0000000000, 34.0000000000),
+        iqr: 6.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_pois25lambda40() {
+    let val = &[
+        42.0000000000,
+        50.0000000000,
+        42.0000000000,
+        46.0000000000,
+        34.0000000000,
+        45.0000000000,
+        34.0000000000,
+        49.0000000000,
+        39.0000000000,
+        28.0000000000,
+        40.0000000000,
+        35.0000000000,
+        37.0000000000,
+        39.0000000000,
+        46.0000000000,
+        44.0000000000,
+        32.0000000000,
+        45.0000000000,
+        42.0000000000,
+        37.0000000000,
+        48.0000000000,
+        42.0000000000,
+        33.0000000000,
+        42.0000000000,
+        48.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 1019.0000000000,
+        min: 28.0000000000,
+        max: 50.0000000000,
+        mean: 40.7600000000,
+        median: 42.0000000000,
+        var: 34.4400000000,
+        std_dev: 5.8685603004,
+        std_dev_pct: 14.3978417577,
+        median_abs_dev: 5.9304000000,
+        median_abs_dev_pct: 14.1200000000,
+        quartiles: (37.0000000000, 42.0000000000, 45.0000000000),
+        iqr: 8.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_pois25lambda50() {
+    let val = &[
+        45.0000000000,
+        43.0000000000,
+        44.0000000000,
+        61.0000000000,
+        51.0000000000,
+        53.0000000000,
+        59.0000000000,
+        52.0000000000,
+        49.0000000000,
+        51.0000000000,
+        51.0000000000,
+        50.0000000000,
+        49.0000000000,
+        56.0000000000,
+        42.0000000000,
+        52.0000000000,
+        51.0000000000,
+        43.0000000000,
+        48.0000000000,
+        48.0000000000,
+        50.0000000000,
+        42.0000000000,
+        43.0000000000,
+        42.0000000000,
+        60.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 1235.0000000000,
+        min: 42.0000000000,
+        max: 61.0000000000,
+        mean: 49.4000000000,
+        median: 50.0000000000,
+        var: 31.6666666667,
+        std_dev: 5.6273143387,
+        std_dev_pct: 11.3913245723,
+        median_abs_dev: 4.4478000000,
+        median_abs_dev_pct: 8.8956000000,
+        quartiles: (44.0000000000, 50.0000000000, 52.0000000000),
+        iqr: 8.0000000000,
+    };
+    check(val, summ);
+}
+#[test]
+fn test_unif25() {
+    let val = &[
+        99.0000000000,
+        55.0000000000,
+        92.0000000000,
+        79.0000000000,
+        14.0000000000,
+        2.0000000000,
+        33.0000000000,
+        49.0000000000,
+        3.0000000000,
+        32.0000000000,
+        84.0000000000,
+        59.0000000000,
+        22.0000000000,
+        86.0000000000,
+        76.0000000000,
+        31.0000000000,
+        29.0000000000,
+        11.0000000000,
+        41.0000000000,
+        53.0000000000,
+        45.0000000000,
+        44.0000000000,
+        98.0000000000,
+        98.0000000000,
+        7.0000000000,
+    ];
+    let summ = &Summary {
+        sum: 1242.0000000000,
+        min: 2.0000000000,
+        max: 99.0000000000,
+        mean: 49.6800000000,
+        median: 45.0000000000,
+        var: 1015.6433333333,
+        std_dev: 31.8691595957,
+        std_dev_pct: 64.1488719719,
+        median_abs_dev: 45.9606000000,
+        median_abs_dev_pct: 102.1346666667,
+        quartiles: (29.0000000000, 45.0000000000, 79.0000000000),
+        iqr: 50.0000000000,
+    };
+    check(val, summ);
+}
+
+#[test]
+fn test_sum_f64s() {
+    assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999);
+}
+#[test]
+fn test_sum_f64_between_ints_that_sum_to_0() {
+    assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2);
+}
+
+#[bench]
+pub fn sum_three_items(b: &mut Bencher) {
+    b.iter(|| {
+        [1e20f64, 1.5f64, -1e20f64].sum();
+    })
+}
+#[bench]
+pub fn sum_many_f64(b: &mut Bencher) {
+    let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
+    let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
+
+    b.iter(|| {
+        v.sum();
+    })
+}
+
+#[bench]
+pub fn no_iter(_: &mut Bencher) {}
diff --git a/library/test/src/test_result.rs b/library/test/src/test_result.rs
new file mode 100644
index 00000000000..465f3f8f994
--- /dev/null
+++ b/library/test/src/test_result.rs
@@ -0,0 +1,115 @@
+use std::any::Any;
+
+use super::bench::BenchSamples;
+use super::options::ShouldPanic;
+use super::time;
+use super::types::TestDesc;
+
+pub use self::TestResult::*;
+
+// Return codes for secondary process.
+// Start somewhere other than 0 so we know the return code means what we think
+// it means.
+pub const TR_OK: i32 = 50;
+pub const TR_FAILED: i32 = 51;
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum TestResult {
+    TrOk,
+    TrFailed,
+    TrFailedMsg(String),
+    TrIgnored,
+    TrAllowedFail,
+    TrBench(BenchSamples),
+    TrTimedFail,
+}
+
+unsafe impl Send for TestResult {}
+
+/// Creates a `TestResult` depending on the raw result of test execution
+/// and associated data.
+pub fn calc_result<'a>(
+    desc: &TestDesc,
+    task_result: Result<(), &'a (dyn Any + 'static + Send)>,
+    time_opts: &Option<time::TestTimeOptions>,
+    exec_time: &Option<time::TestExecTime>,
+) -> TestResult {
+    let result = match (&desc.should_panic, task_result) {
+        (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk,
+        (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
+            let maybe_panic_str = err
+                .downcast_ref::<String>()
+                .map(|e| &**e)
+                .or_else(|| err.downcast_ref::<&'static str>().copied());
+
+            if maybe_panic_str.map(|e| e.contains(msg)).unwrap_or(false) {
+                TestResult::TrOk
+            } else if desc.allow_fail {
+                TestResult::TrAllowedFail
+            } else if let Some(panic_str) = maybe_panic_str {
+                TestResult::TrFailedMsg(format!(
+                    r#"panic did not contain expected string
+      panic message: `{:?}`,
+ expected substring: `{:?}`"#,
+                    panic_str, msg
+                ))
+            } else {
+                TestResult::TrFailedMsg(format!(
+                    r#"expected panic with string value,
+ found non-string value: `{:?}`
+     expected substring: `{:?}`"#,
+                    (**err).type_id(),
+                    msg
+                ))
+            }
+        }
+        (&ShouldPanic::Yes, Ok(())) => {
+            TestResult::TrFailedMsg("test did not panic as expected".to_string())
+        }
+        _ if desc.allow_fail => TestResult::TrAllowedFail,
+        _ => TestResult::TrFailed,
+    };
+
+    // If test is already failed (or allowed to fail), do not change the result.
+    if result != TestResult::TrOk {
+        return result;
+    }
+
+    // Check if test is failed due to timeout.
+    if let (Some(opts), Some(time)) = (time_opts, exec_time) {
+        if opts.error_on_excess && opts.is_critical(desc, time) {
+            return TestResult::TrTimedFail;
+        }
+    }
+
+    result
+}
+
+/// Creates a `TestResult` depending on the exit code of test subprocess.
+pub fn get_result_from_exit_code(
+    desc: &TestDesc,
+    code: i32,
+    time_opts: &Option<time::TestTimeOptions>,
+    exec_time: &Option<time::TestExecTime>,
+) -> TestResult {
+    let result = match (desc.allow_fail, code) {
+        (_, TR_OK) => TestResult::TrOk,
+        (true, TR_FAILED) => TestResult::TrAllowedFail,
+        (false, TR_FAILED) => TestResult::TrFailed,
+        (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)),
+    };
+
+    // If test is already failed (or allowed to fail), do not change the result.
+    if result != TestResult::TrOk {
+        return result;
+    }
+
+    // Check if test is failed due to timeout.
+    if let (Some(opts), Some(time)) = (time_opts, exec_time) {
+        if opts.error_on_excess && opts.is_critical(desc, time) {
+            return TestResult::TrTimedFail;
+        }
+    }
+
+    result
+}
diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs
new file mode 100644
index 00000000000..85a0705f69c
--- /dev/null
+++ b/library/test/src/tests.rs
@@ -0,0 +1,688 @@
+use super::*;
+
+use crate::{
+    bench::Bencher,
+    console::OutputLocation,
+    formatters::PrettyFormatter,
+    options::OutputFormat,
+    test::{
+        filter_tests,
+        parse_opts,
+        run_test,
+        DynTestFn,
+        DynTestName,
+        MetricMap,
+        RunIgnored,
+        RunStrategy,
+        ShouldPanic,
+        StaticTestName,
+        TestDesc,
+        TestDescAndFn,
+        TestOpts,
+        TrIgnored,
+        TrOk,
+        // FIXME (introduced by #65251)
+        // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions,
+        // TestType, TrFailedMsg, TrIgnored, TrOk,
+    },
+    time::{TestTimeOptions, TimeThreshold},
+};
+use std::any::TypeId;
+use std::sync::mpsc::channel;
+use std::time::Duration;
+
+impl TestOpts {
+    fn new() -> TestOpts {
+        TestOpts {
+            list: false,
+            filter: None,
+            filter_exact: false,
+            force_run_in_process: false,
+            exclude_should_panic: false,
+            run_ignored: RunIgnored::No,
+            run_tests: false,
+            bench_benchmarks: false,
+            logfile: None,
+            nocapture: false,
+            color: AutoColor,
+            format: OutputFormat::Pretty,
+            test_threads: None,
+            skip: vec![],
+            time_options: None,
+            options: Options::new(),
+        }
+    }
+}
+
+fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
+    vec![
+        TestDescAndFn {
+            desc: TestDesc {
+                name: StaticTestName("1"),
+                ignore: true,
+                should_panic: ShouldPanic::No,
+                allow_fail: false,
+                test_type: TestType::Unknown,
+            },
+            testfn: DynTestFn(Box::new(move || {})),
+        },
+        TestDescAndFn {
+            desc: TestDesc {
+                name: StaticTestName("2"),
+                ignore: false,
+                should_panic: ShouldPanic::No,
+                allow_fail: false,
+                test_type: TestType::Unknown,
+            },
+            testfn: DynTestFn(Box::new(move || {})),
+        },
+    ]
+}
+
+#[test]
+pub fn do_not_run_ignored_tests() {
+    fn f() {
+        panic!();
+    }
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: true,
+            should_panic: ShouldPanic::No,
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_ne!(result, TrOk);
+}
+
+#[test]
+pub fn ignored_tests_result_in_ignored() {
+    fn f() {}
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: true,
+            should_panic: ShouldPanic::No,
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_eq!(result, TrIgnored);
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic() {
+    fn f() {
+        panic!();
+    }
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::Yes,
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_eq!(result, TrOk);
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_good_message() {
+    fn f() {
+        panic!("an error message");
+    }
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::YesWithMessage("error message"),
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_eq!(result, TrOk);
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_bad_message() {
+    use crate::tests::TrFailedMsg;
+    fn f() {
+        panic!("an error message");
+    }
+    let expected = "foobar";
+    let failed_msg = r#"panic did not contain expected string
+      panic message: `"an error message"`,
+ expected substring: `"foobar"`"#;
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::YesWithMessage(expected),
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_eq!(result, TrFailedMsg(failed_msg.to_string()));
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_non_string_message_type() {
+    use crate::tests::TrFailedMsg;
+    fn f() {
+        panic!(1i32);
+    }
+    let expected = "foobar";
+    let failed_msg = format!(
+        r#"expected panic with string value,
+ found non-string value: `{:?}`
+     expected substring: `"foobar"`"#,
+        TypeId::of::<i32>()
+    );
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::YesWithMessage(expected),
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_eq!(result, TrFailedMsg(failed_msg));
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_but_succeeds() {
+    fn f() {}
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::Yes,
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let (tx, rx) = channel();
+    run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+    assert_eq!(result, TrFailedMsg("test did not panic as expected".to_string()));
+}
+
+fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
+    fn f() {}
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::No,
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    let time_options = if report_time { Some(TestTimeOptions::default()) } else { None };
+
+    let test_opts = TestOpts { time_options, ..TestOpts::new() };
+    let (tx, rx) = channel();
+    run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let exec_time = rx.recv().unwrap().exec_time;
+    exec_time
+}
+
+#[test]
+fn test_should_not_report_time() {
+    let exec_time = report_time_test_template(false);
+    assert!(exec_time.is_none());
+}
+
+#[test]
+fn test_should_report_time() {
+    let exec_time = report_time_test_template(true);
+    assert!(exec_time.is_some());
+}
+
+fn time_test_failure_template(test_type: TestType) -> TestResult {
+    fn f() {}
+    let desc = TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("whatever"),
+            ignore: false,
+            should_panic: ShouldPanic::No,
+            allow_fail: false,
+            test_type,
+        },
+        testfn: DynTestFn(Box::new(f)),
+    };
+    // `Default` will initialize all the thresholds to 0 milliseconds.
+    let mut time_options = TestTimeOptions::default();
+    time_options.error_on_excess = true;
+
+    let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() };
+    let (tx, rx) = channel();
+    run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No);
+    let result = rx.recv().unwrap().result;
+
+    result
+}
+
+#[test]
+fn test_error_on_exceed() {
+    let types = [TestType::UnitTest, TestType::IntegrationTest, TestType::DocTest];
+
+    for test_type in types.iter() {
+        let result = time_test_failure_template(*test_type);
+
+        assert_eq!(result, TestResult::TrTimedFail);
+    }
+
+    // Check that for unknown tests thresholds aren't applied.
+    let result = time_test_failure_template(TestType::Unknown);
+    assert_eq!(result, TestResult::TrOk);
+}
+
+fn typed_test_desc(test_type: TestType) -> TestDesc {
+    TestDesc {
+        name: StaticTestName("whatever"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+        test_type,
+    }
+}
+
+fn test_exec_time(millis: u64) -> TestExecTime {
+    TestExecTime(Duration::from_millis(millis))
+}
+
+#[test]
+fn test_time_options_threshold() {
+    let unit = TimeThreshold::new(Duration::from_millis(50), Duration::from_millis(100));
+    let integration = TimeThreshold::new(Duration::from_millis(500), Duration::from_millis(1000));
+    let doc = TimeThreshold::new(Duration::from_millis(5000), Duration::from_millis(10000));
+
+    let options = TestTimeOptions {
+        error_on_excess: false,
+        colored: false,
+        unit_threshold: unit.clone(),
+        integration_threshold: integration.clone(),
+        doctest_threshold: doc.clone(),
+    };
+
+    let test_vector = [
+        (TestType::UnitTest, unit.warn.as_millis() - 1, false, false),
+        (TestType::UnitTest, unit.warn.as_millis(), true, false),
+        (TestType::UnitTest, unit.critical.as_millis(), true, true),
+        (TestType::IntegrationTest, integration.warn.as_millis() - 1, false, false),
+        (TestType::IntegrationTest, integration.warn.as_millis(), true, false),
+        (TestType::IntegrationTest, integration.critical.as_millis(), true, true),
+        (TestType::DocTest, doc.warn.as_millis() - 1, false, false),
+        (TestType::DocTest, doc.warn.as_millis(), true, false),
+        (TestType::DocTest, doc.critical.as_millis(), true, true),
+    ];
+
+    for (test_type, time, expected_warn, expected_critical) in test_vector.iter() {
+        let test_desc = typed_test_desc(*test_type);
+        let exec_time = test_exec_time(*time as u64);
+
+        assert_eq!(options.is_warn(&test_desc, &exec_time), *expected_warn);
+        assert_eq!(options.is_critical(&test_desc, &exec_time), *expected_critical);
+    }
+}
+
+#[test]
+fn parse_ignored_flag() {
+    let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
+    let opts = parse_opts(&args).unwrap().unwrap();
+    assert_eq!(opts.run_ignored, RunIgnored::Only);
+}
+
+#[test]
+fn parse_show_output_flag() {
+    let args = vec!["progname".to_string(), "filter".to_string(), "--show-output".to_string()];
+    let opts = parse_opts(&args).unwrap().unwrap();
+    assert!(opts.options.display_output);
+}
+
+#[test]
+fn parse_include_ignored_flag() {
+    let args = vec![
+        "progname".to_string(),
+        "filter".to_string(),
+        "-Zunstable-options".to_string(),
+        "--include-ignored".to_string(),
+    ];
+    let opts = parse_opts(&args).unwrap().unwrap();
+    assert_eq!(opts.run_ignored, RunIgnored::Yes);
+}
+
+#[test]
+pub fn filter_for_ignored_option() {
+    // When we run ignored tests the test filter should filter out all the
+    // unignored tests and flip the ignore flag on the rest to false
+
+    let mut opts = TestOpts::new();
+    opts.run_tests = true;
+    opts.run_ignored = RunIgnored::Only;
+
+    let tests = one_ignored_one_unignored_test();
+    let filtered = filter_tests(&opts, tests);
+
+    assert_eq!(filtered.len(), 1);
+    assert_eq!(filtered[0].desc.name.to_string(), "1");
+    assert!(!filtered[0].desc.ignore);
+}
+
+#[test]
+pub fn run_include_ignored_option() {
+    // When we "--include-ignored" tests, the ignore flag should be set to false on
+    // all tests and no test filtered out
+
+    let mut opts = TestOpts::new();
+    opts.run_tests = true;
+    opts.run_ignored = RunIgnored::Yes;
+
+    let tests = one_ignored_one_unignored_test();
+    let filtered = filter_tests(&opts, tests);
+
+    assert_eq!(filtered.len(), 2);
+    assert!(!filtered[0].desc.ignore);
+    assert!(!filtered[1].desc.ignore);
+}
+
+#[test]
+pub fn exclude_should_panic_option() {
+    let mut opts = TestOpts::new();
+    opts.run_tests = true;
+    opts.exclude_should_panic = true;
+
+    let mut tests = one_ignored_one_unignored_test();
+    tests.push(TestDescAndFn {
+        desc: TestDesc {
+            name: StaticTestName("3"),
+            ignore: false,
+            should_panic: ShouldPanic::Yes,
+            allow_fail: false,
+            test_type: TestType::Unknown,
+        },
+        testfn: DynTestFn(Box::new(move || {})),
+    });
+
+    let filtered = filter_tests(&opts, tests);
+
+    assert_eq!(filtered.len(), 2);
+    assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No));
+}
+
+#[test]
+pub fn exact_filter_match() {
+    fn tests() -> Vec<TestDescAndFn> {
+        vec!["base", "base::test", "base::test1", "base::test2"]
+            .into_iter()
+            .map(|name| TestDescAndFn {
+                desc: TestDesc {
+                    name: StaticTestName(name),
+                    ignore: false,
+                    should_panic: ShouldPanic::No,
+                    allow_fail: false,
+                    test_type: TestType::Unknown,
+                },
+                testfn: DynTestFn(Box::new(move || {})),
+            })
+            .collect()
+    }
+
+    let substr =
+        filter_tests(&TestOpts { filter: Some("base".into()), ..TestOpts::new() }, tests());
+    assert_eq!(substr.len(), 4);
+
+    let substr = filter_tests(&TestOpts { filter: Some("bas".into()), ..TestOpts::new() }, tests());
+    assert_eq!(substr.len(), 4);
+
+    let substr =
+        filter_tests(&TestOpts { filter: Some("::test".into()), ..TestOpts::new() }, tests());
+    assert_eq!(substr.len(), 3);
+
+    let substr =
+        filter_tests(&TestOpts { filter: Some("base::test".into()), ..TestOpts::new() }, tests());
+    assert_eq!(substr.len(), 3);
+
+    let exact = filter_tests(
+        &TestOpts { filter: Some("base".into()), filter_exact: true, ..TestOpts::new() },
+        tests(),
+    );
+    assert_eq!(exact.len(), 1);
+
+    let exact = filter_tests(
+        &TestOpts { filter: Some("bas".into()), filter_exact: true, ..TestOpts::new() },
+        tests(),
+    );
+    assert_eq!(exact.len(), 0);
+
+    let exact = filter_tests(
+        &TestOpts { filter: Some("::test".into()), filter_exact: true, ..TestOpts::new() },
+        tests(),
+    );
+    assert_eq!(exact.len(), 0);
+
+    let exact = filter_tests(
+        &TestOpts { filter: Some("base::test".into()), filter_exact: true, ..TestOpts::new() },
+        tests(),
+    );
+    assert_eq!(exact.len(), 1);
+}
+
+#[test]
+pub fn sort_tests() {
+    let mut opts = TestOpts::new();
+    opts.run_tests = true;
+
+    let names = vec![
+        "sha1::test".to_string(),
+        "isize::test_to_str".to_string(),
+        "isize::test_pow".to_string(),
+        "test::do_not_run_ignored_tests".to_string(),
+        "test::ignored_tests_result_in_ignored".to_string(),
+        "test::first_free_arg_should_be_a_filter".to_string(),
+        "test::parse_ignored_flag".to_string(),
+        "test::parse_include_ignored_flag".to_string(),
+        "test::filter_for_ignored_option".to_string(),
+        "test::run_include_ignored_option".to_string(),
+        "test::sort_tests".to_string(),
+    ];
+    let tests = {
+        fn testfn() {}
+        let mut tests = Vec::new();
+        for name in &names {
+            let test = TestDescAndFn {
+                desc: TestDesc {
+                    name: DynTestName((*name).clone()),
+                    ignore: false,
+                    should_panic: ShouldPanic::No,
+                    allow_fail: false,
+                    test_type: TestType::Unknown,
+                },
+                testfn: DynTestFn(Box::new(testfn)),
+            };
+            tests.push(test);
+        }
+        tests
+    };
+    let filtered = filter_tests(&opts, tests);
+
+    let expected = vec![
+        "isize::test_pow".to_string(),
+        "isize::test_to_str".to_string(),
+        "sha1::test".to_string(),
+        "test::do_not_run_ignored_tests".to_string(),
+        "test::filter_for_ignored_option".to_string(),
+        "test::first_free_arg_should_be_a_filter".to_string(),
+        "test::ignored_tests_result_in_ignored".to_string(),
+        "test::parse_ignored_flag".to_string(),
+        "test::parse_include_ignored_flag".to_string(),
+        "test::run_include_ignored_option".to_string(),
+        "test::sort_tests".to_string(),
+    ];
+
+    for (a, b) in expected.iter().zip(filtered) {
+        assert_eq!(*a, b.desc.name.to_string());
+    }
+}
+
+#[test]
+pub fn test_metricmap_compare() {
+    let mut m1 = MetricMap::new();
+    let mut m2 = MetricMap::new();
+    m1.insert_metric("in-both-noise", 1000.0, 200.0);
+    m2.insert_metric("in-both-noise", 1100.0, 200.0);
+
+    m1.insert_metric("in-first-noise", 1000.0, 2.0);
+    m2.insert_metric("in-second-noise", 1000.0, 2.0);
+
+    m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
+    m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
+
+    m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
+    m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
+
+    m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
+    m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
+
+    m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
+    m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
+}
+
+#[test]
+pub fn test_bench_once_no_iter() {
+    fn f(_: &mut Bencher) {}
+    bench::run_once(f);
+}
+
+#[test]
+pub fn test_bench_once_iter() {
+    fn f(b: &mut Bencher) {
+        b.iter(|| {})
+    }
+    bench::run_once(f);
+}
+
+#[test]
+pub fn test_bench_no_iter() {
+    fn f(_: &mut Bencher) {}
+
+    let (tx, rx) = channel();
+
+    let desc = TestDesc {
+        name: StaticTestName("f"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+        test_type: TestType::Unknown,
+    };
+
+    crate::bench::benchmark(desc, tx, true, f);
+    rx.recv().unwrap();
+}
+
+#[test]
+pub fn test_bench_iter() {
+    fn f(b: &mut Bencher) {
+        b.iter(|| {})
+    }
+
+    let (tx, rx) = channel();
+
+    let desc = TestDesc {
+        name: StaticTestName("f"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+        test_type: TestType::Unknown,
+    };
+
+    crate::bench::benchmark(desc, tx, true, f);
+    rx.recv().unwrap();
+}
+
+#[test]
+fn should_sort_failures_before_printing_them() {
+    let test_a = TestDesc {
+        name: StaticTestName("a"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+        test_type: TestType::Unknown,
+    };
+
+    let test_b = TestDesc {
+        name: StaticTestName("b"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+        test_type: TestType::Unknown,
+    };
+
+    let mut out = PrettyFormatter::new(OutputLocation::Raw(Vec::new()), false, 10, false, None);
+
+    let st = console::ConsoleTestState {
+        log_out: None,
+        total: 0,
+        passed: 0,
+        failed: 0,
+        ignored: 0,
+        allowed_fail: 0,
+        filtered_out: 0,
+        measured: 0,
+        metrics: MetricMap::new(),
+        failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
+        options: Options::new(),
+        not_failures: Vec::new(),
+        time_failures: Vec::new(),
+    };
+
+    out.write_failures(&st).unwrap();
+    let s = match out.output_location() {
+        &OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]),
+        &OutputLocation::Pretty(_) => unreachable!(),
+    };
+
+    let apos = s.find("a").unwrap();
+    let bpos = s.find("b").unwrap();
+    assert!(apos < bpos);
+}
diff --git a/library/test/src/time.rs b/library/test/src/time.rs
new file mode 100644
index 00000000000..96c090f9b01
--- /dev/null
+++ b/library/test/src/time.rs
@@ -0,0 +1,193 @@
+//! Module `time` contains everything related to the time measurement of unit tests
+//! execution.
+//! Two main purposes of this module:
+//! - Check whether test is timed out.
+//! - Provide helpers for `report-time` and `measure-time` options.
+
+use std::env;
+use std::fmt;
+use std::str::FromStr;
+use std::time::{Duration, Instant};
+
+use super::types::{TestDesc, TestType};
+
+pub const TEST_WARN_TIMEOUT_S: u64 = 60;
+
+/// This small module contains constants used by `report-time` option.
+/// Those constants values will be used if corresponding environment variables are not set.
+///
+/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`,
+/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`,
+/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`.
+///
+/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means
+/// warn time, and 200 means critical time.
+pub mod time_constants {
+    use super::TEST_WARN_TIMEOUT_S;
+    use std::time::Duration;
+
+    /// Environment variable for overriding default threshold for unit-tests.
+    pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT";
+
+    // Unit tests are supposed to be really quick.
+    pub const UNIT_WARN: Duration = Duration::from_millis(50);
+    pub const UNIT_CRITICAL: Duration = Duration::from_millis(100);
+
+    /// Environment variable for overriding default threshold for unit-tests.
+    pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION";
+
+    // Integration tests may have a lot of work, so they can take longer to execute.
+    pub const INTEGRATION_WARN: Duration = Duration::from_millis(500);
+    pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000);
+
+    /// Environment variable for overriding default threshold for unit-tests.
+    pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST";
+
+    // Doctests are similar to integration tests, because they can include a lot of
+    // initialization code.
+    pub const DOCTEST_WARN: Duration = INTEGRATION_WARN;
+    pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL;
+
+    // Do not suppose anything about unknown tests, base limits on the
+    // `TEST_WARN_TIMEOUT_S` constant.
+    pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S);
+    pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2);
+}
+
+/// Returns an `Instance` object denoting when the test should be considered
+/// timed out.
+pub fn get_default_test_timeout() -> Instant {
+    Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S)
+}
+
+/// The meassured execution time of a unit test.
+#[derive(Debug, Clone, PartialEq)]
+pub struct TestExecTime(pub Duration);
+
+impl fmt::Display for TestExecTime {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:.3}s", self.0.as_secs_f64())
+    }
+}
+
+/// Structure denoting time limits for test execution.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+pub struct TimeThreshold {
+    pub warn: Duration,
+    pub critical: Duration,
+}
+
+impl TimeThreshold {
+    /// Creates a new `TimeThreshold` instance with provided durations.
+    pub fn new(warn: Duration, critical: Duration) -> Self {
+        Self { warn, critical }
+    }
+
+    /// Attempts to create a `TimeThreshold` instance with values obtained
+    /// from the environment variable, and returns `None` if the variable
+    /// is not set.
+    /// Environment variable format is expected to match `\d+,\d+`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if variable with provided name is set but contains inappropriate
+    /// value.
+    pub fn from_env_var(env_var_name: &str) -> Option<Self> {
+        let durations_str = env::var(env_var_name).ok()?;
+
+        // Split string into 2 substrings by comma and try to parse numbers.
+        let mut durations = durations_str.splitn(2, ',').map(|v| {
+            u64::from_str(v).unwrap_or_else(|_| {
+                panic!(
+                    "Duration value in variable {} is expected to be a number, but got {}",
+                    env_var_name, v
+                )
+            })
+        });
+
+        // Callback to be called if the environment variable has unexpected structure.
+        let panic_on_incorrect_value = || {
+            panic!(
+                "Duration variable {} expected to have 2 numbers separated by comma, but got {}",
+                env_var_name, durations_str
+            );
+        };
+
+        let (warn, critical) = (
+            durations.next().unwrap_or_else(panic_on_incorrect_value),
+            durations.next().unwrap_or_else(panic_on_incorrect_value),
+        );
+
+        if warn > critical {
+            panic!("Test execution warn time should be less or equal to the critical time");
+        }
+
+        Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical)))
+    }
+}
+
+/// Structure with parameters for calculating test execution time.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+pub struct TestTimeOptions {
+    /// Denotes if the test critical execution time limit excess should be considered
+    /// a test failure.
+    pub error_on_excess: bool,
+    pub colored: bool,
+    pub unit_threshold: TimeThreshold,
+    pub integration_threshold: TimeThreshold,
+    pub doctest_threshold: TimeThreshold,
+}
+
+impl TestTimeOptions {
+    pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self {
+        let unit_threshold = TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME)
+            .unwrap_or_else(Self::default_unit);
+
+        let integration_threshold =
+            TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME)
+                .unwrap_or_else(Self::default_integration);
+
+        let doctest_threshold = TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME)
+            .unwrap_or_else(Self::default_doctest);
+
+        Self { error_on_excess, colored, unit_threshold, integration_threshold, doctest_threshold }
+    }
+
+    pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
+        exec_time.0 >= self.warn_time(test)
+    }
+
+    pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
+        exec_time.0 >= self.critical_time(test)
+    }
+
+    fn warn_time(&self, test: &TestDesc) -> Duration {
+        match test.test_type {
+            TestType::UnitTest => self.unit_threshold.warn,
+            TestType::IntegrationTest => self.integration_threshold.warn,
+            TestType::DocTest => self.doctest_threshold.warn,
+            TestType::Unknown => time_constants::UNKNOWN_WARN,
+        }
+    }
+
+    fn critical_time(&self, test: &TestDesc) -> Duration {
+        match test.test_type {
+            TestType::UnitTest => self.unit_threshold.critical,
+            TestType::IntegrationTest => self.integration_threshold.critical,
+            TestType::DocTest => self.doctest_threshold.critical,
+            TestType::Unknown => time_constants::UNKNOWN_CRITICAL,
+        }
+    }
+
+    fn default_unit() -> TimeThreshold {
+        TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL)
+    }
+
+    fn default_integration() -> TimeThreshold {
+        TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL)
+    }
+
+    fn default_doctest() -> TimeThreshold {
+        TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL)
+    }
+}
diff --git a/library/test/src/types.rs b/library/test/src/types.rs
new file mode 100644
index 00000000000..5b75d2f367f
--- /dev/null
+++ b/library/test/src/types.rs
@@ -0,0 +1,145 @@
+//! Common types used by `libtest`.
+
+use std::borrow::Cow;
+use std::fmt;
+
+use super::bench::Bencher;
+use super::options;
+
+pub use NamePadding::*;
+pub use TestFn::*;
+pub use TestName::*;
+
+/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html)
+/// conventions.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum TestType {
+    /// Unit-tests are expected to be in the `src` folder of the crate.
+    UnitTest,
+    /// Integration-style tests are expected to be in the `tests` folder of the crate.
+    IntegrationTest,
+    /// Doctests are created by the `librustdoc` manually, so it's a different type of test.
+    DocTest,
+    /// Tests for the sources that don't follow the project layout convention
+    /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly).
+    Unknown,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub enum NamePadding {
+    PadNone,
+    PadOnRight,
+}
+
+// The name of a test. By convention this follows the rules for rust
+// paths; i.e., it should be a series of identifiers separated by double
+// colons. This way if some test runner wants to arrange the tests
+// hierarchically it may.
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub enum TestName {
+    StaticTestName(&'static str),
+    DynTestName(String),
+    AlignedTestName(Cow<'static, str>, NamePadding),
+}
+
+impl TestName {
+    pub fn as_slice(&self) -> &str {
+        match *self {
+            StaticTestName(s) => s,
+            DynTestName(ref s) => s,
+            AlignedTestName(ref s, _) => &*s,
+        }
+    }
+
+    pub fn padding(&self) -> NamePadding {
+        match self {
+            &AlignedTestName(_, p) => p,
+            _ => PadNone,
+        }
+    }
+
+    pub fn with_padding(&self, padding: NamePadding) -> TestName {
+        let name = match *self {
+            TestName::StaticTestName(name) => Cow::Borrowed(name),
+            TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
+            TestName::AlignedTestName(ref name, _) => name.clone(),
+        };
+
+        TestName::AlignedTestName(name, padding)
+    }
+}
+impl fmt::Display for TestName {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self.as_slice(), f)
+    }
+}
+
+/// Represents a benchmark function.
+pub trait TDynBenchFn: Send {
+    fn run(&self, harness: &mut Bencher);
+}
+
+// A function that runs a test. If the function returns successfully,
+// the test succeeds; if the function panics then the test fails. We
+// may need to come up with a more clever definition of test in order
+// to support isolation of tests into threads.
+pub enum TestFn {
+    StaticTestFn(fn()),
+    StaticBenchFn(fn(&mut Bencher)),
+    DynTestFn(Box<dyn FnOnce() + Send>),
+    DynBenchFn(Box<dyn TDynBenchFn + 'static>),
+}
+
+impl TestFn {
+    pub fn padding(&self) -> NamePadding {
+        match *self {
+            StaticTestFn(..) => PadNone,
+            StaticBenchFn(..) => PadOnRight,
+            DynTestFn(..) => PadNone,
+            DynBenchFn(..) => PadOnRight,
+        }
+    }
+}
+
+impl fmt::Debug for TestFn {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(match *self {
+            StaticTestFn(..) => "StaticTestFn(..)",
+            StaticBenchFn(..) => "StaticBenchFn(..)",
+            DynTestFn(..) => "DynTestFn(..)",
+            DynBenchFn(..) => "DynBenchFn(..)",
+        })
+    }
+}
+
+// The definition of a single test. A test runner will run a list of
+// these.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct TestDesc {
+    pub name: TestName,
+    pub ignore: bool,
+    pub should_panic: options::ShouldPanic,
+    pub allow_fail: bool,
+    pub test_type: TestType,
+}
+
+impl TestDesc {
+    pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
+        let mut name = String::from(self.name.as_slice());
+        let fill = column_count.saturating_sub(name.len());
+        let pad = " ".repeat(fill);
+        match align {
+            PadNone => name,
+            PadOnRight => {
+                name.push_str(&pad);
+                name
+            }
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct TestDescAndFn {
+    pub desc: TestDesc,
+    pub testfn: TestFn,
+}