summary refs log tree commit diff
path: root/src/libtest
diff options
context:
space:
mode:
authorVadim Petrochenkov <vadim.petrochenkov@gmail.com>2019-08-01 02:28:43 +0300
committerVadim Petrochenkov <vadim.petrochenkov@gmail.com>2019-08-02 01:59:01 +0300
commit751af273209a0466d41ea8af5ff0c318a7f221ec (patch)
tree1722b8aefeaa9dc7d6ae81413d765e09b55f36c7 /src/libtest
parent77eacaeabd834184761eb690650ccf6b078bdf9e (diff)
downloadrust-751af273209a0466d41ea8af5ff0c318a7f221ec.tar.gz
rust-751af273209a0466d41ea8af5ff0c318a7f221ec.zip
libtest: Unconfigure tests during normal build
Diffstat (limited to 'src/libtest')
-rw-r--r--src/libtest/lib.rs74
-rw-r--r--src/libtest/stats.rs34
-rw-r--r--src/libtest/stats/tests.rs27
-rw-r--r--src/libtest/tests.rs49
4 files changed, 89 insertions, 95 deletions
diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs
index a75975ba754..b36c5be4c07 100644
--- a/src/libtest/lib.rs
+++ b/src/libtest/lib.rs
@@ -70,6 +70,9 @@ use std::sync::{Arc, Mutex};
 use std::thread;
 use std::time::{Duration, Instant};
 
+#[cfg(test)]
+mod tests;
+
 const TEST_WARN_TIMEOUT_S: u64 = 60;
 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
 
@@ -495,18 +498,18 @@ environment variable to a value other than "0". Logging is not captured by defau
 
 Test Attributes:
 
-    #[test]        - Indicates a function is a test to be run. This function
-                     takes no arguments.
-    #[bench]       - Indicates a function is a benchmark to be run. This
-                     function takes one argument (test::Bencher).
-    #[should_panic] - This function (also labeled with #[test]) will only pass if
-                     the code causes a panic (an assertion failure or panic!)
-                     A message may be provided, which the failure string must
-                     contain: #[should_panic(expected = "foo")].
-    #[ignore]      - When applied to a function which is already attributed as a
-                     test, then the test runner will ignore these tests during
-                     normal test runs. Running with --ignored or --include-ignored will run
-                     these tests."#,
+    `#[test]`        - Indicates a function is a test to be run. This function
+                       takes no arguments.
+    `#[bench]`       - Indicates a function is a benchmark to be run. This
+                       function takes one argument (test::Bencher).
+    `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
+                        the code causes a panic (an assertion failure or panic!)
+                        A message may be provided, which the failure string must
+                        contain: #[should_panic(expected = "foo")].
+    `#[ignore]`       - When applied to a function which is already attributed as a
+                        test, then the test runner will ignore these tests during
+                        normal test runs. Running with --ignored or --include-ignored will run
+                        these tests."#,
         usage = options.usage(&message)
     );
 }
@@ -974,50 +977,6 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
     return out.write_run_finish(&st);
 }
 
-#[test]
-fn should_sort_failures_before_printing_them() {
-    let test_a = TestDesc {
-        name: StaticTestName("a"),
-        ignore: false,
-        should_panic: ShouldPanic::No,
-        allow_fail: false,
-    };
-
-    let test_b = TestDesc {
-        name: StaticTestName("b"),
-        ignore: false,
-        should_panic: ShouldPanic::No,
-        allow_fail: false,
-    };
-
-    let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
-
-    let st = ConsoleTestState {
-        log_out: None,
-        total: 0,
-        passed: 0,
-        failed: 0,
-        ignored: 0,
-        allowed_fail: 0,
-        filtered_out: 0,
-        measured: 0,
-        metrics: MetricMap::new(),
-        failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
-        options: Options::new(),
-        not_failures: Vec::new(),
-    };
-
-    out.write_failures(&st).unwrap();
-    let s = match out.output_location() {
-        &Raw(ref m) => String::from_utf8_lossy(&m[..]),
-        &Pretty(_) => unreachable!(),
-    };
-
-    let apos = s.find("a").unwrap();
-    let bpos = s.find("b").unwrap();
-    assert!(apos < bpos);
-}
-
 fn use_color(opts: &TestOpts) -> bool {
     match opts.color {
         AutoColor => !opts.nocapture && stdout_isatty(),
@@ -1775,6 +1734,3 @@ pub mod bench {
         bs.bench(f);
     }
 }
-
-#[cfg(test)]
-mod tests;
diff --git a/src/libtest/stats.rs b/src/libtest/stats.rs
index 32c30061983..6577ec8ad23 100644
--- a/src/libtest/stats.rs
+++ b/src/libtest/stats.rs
@@ -4,6 +4,9 @@
 use std::cmp::Ordering::{self, Equal, Greater, Less};
 use std::mem;
 
+#[cfg(test)]
+mod tests;
+
 fn local_cmp(x: f64, y: f64) -> Ordering {
     // arbitrarily decide that NaNs are larger than everything.
     if y.is_nan() {
@@ -314,34 +317,3 @@ pub fn winsorize(samples: &mut [f64], pct: f64) {
         }
     }
 }
-
-// Test vectors generated from R, using the script src/etc/stat-test-vectors.r.
-
-#[cfg(test)]
-mod tests;
-
-#[cfg(test)]
-mod bench {
-    extern crate test;
-    use self::test::Bencher;
-    use crate::stats::Stats;
-
-    #[bench]
-    pub fn sum_three_items(b: &mut Bencher) {
-        b.iter(|| {
-            [1e20f64, 1.5f64, -1e20f64].sum();
-        })
-    }
-    #[bench]
-    pub fn sum_many_f64(b: &mut Bencher) {
-        let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
-        let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
-
-        b.iter(|| {
-            v.sum();
-        })
-    }
-
-    #[bench]
-    pub fn no_iter(_: &mut Bencher) {}
-}
diff --git a/src/libtest/stats/tests.rs b/src/libtest/stats/tests.rs
index 59f93645360..7d1d635186f 100644
--- a/src/libtest/stats/tests.rs
+++ b/src/libtest/stats/tests.rs
@@ -1,8 +1,12 @@
-use crate::stats::Stats;
-use crate::stats::Summary;
+use super::*;
+
+extern crate test;
 use std::f64;
 use std::io::prelude::*;
 use std::io;
+use self::test::Bencher;
+
+// Test vectors generated from R, using the script src/etc/stat-test-vectors.r.
 
 macro_rules! assert_approx_eq {
     ($a: expr, $b: expr) => {{
@@ -572,3 +576,22 @@ fn test_sum_f64s() {
 fn test_sum_f64_between_ints_that_sum_to_0() {
     assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2);
 }
+
+#[bench]
+pub fn sum_three_items(b: &mut Bencher) {
+    b.iter(|| {
+        [1e20f64, 1.5f64, -1e20f64].sum();
+    })
+}
+#[bench]
+pub fn sum_many_f64(b: &mut Bencher) {
+    let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
+    let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
+
+    b.iter(|| {
+        v.sum();
+    })
+}
+
+#[bench]
+pub fn no_iter(_: &mut Bencher) {}
diff --git a/src/libtest/tests.rs b/src/libtest/tests.rs
index d8734d8caa0..05b38f17e2b 100644
--- a/src/libtest/tests.rs
+++ b/src/libtest/tests.rs
@@ -1,11 +1,10 @@
-use crate::bench;
+use super::*;
+
 use crate::test::{
     filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
     ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
     TrIgnored, TrOk,
 };
-use crate::Bencher;
-use crate::Concurrent;
 use std::sync::mpsc::channel;
 
 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
@@ -451,3 +450,47 @@ pub fn test_bench_iter() {
     crate::bench::benchmark(desc, tx, true, f);
     rx.recv().unwrap();
 }
+
+#[test]
+fn should_sort_failures_before_printing_them() {
+    let test_a = TestDesc {
+        name: StaticTestName("a"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+    };
+
+    let test_b = TestDesc {
+        name: StaticTestName("b"),
+        ignore: false,
+        should_panic: ShouldPanic::No,
+        allow_fail: false,
+    };
+
+    let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
+
+    let st = ConsoleTestState {
+        log_out: None,
+        total: 0,
+        passed: 0,
+        failed: 0,
+        ignored: 0,
+        allowed_fail: 0,
+        filtered_out: 0,
+        measured: 0,
+        metrics: MetricMap::new(),
+        failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
+        options: Options::new(),
+        not_failures: Vec::new(),
+    };
+
+    out.write_failures(&st).unwrap();
+    let s = match out.output_location() {
+        &Raw(ref m) => String::from_utf8_lossy(&m[..]),
+        &Pretty(_) => unreachable!(),
+    };
+
+    let apos = s.find("a").unwrap();
+    let bpos = s.find("b").unwrap();
+    assert!(apos < bpos);
+}