diff options
123 files changed, 2834 insertions, 956 deletions
diff --git a/configure b/configure index a287291c280..0a07e41fb97 100755 --- a/configure +++ b/configure @@ -1890,6 +1890,12 @@ else step_msg "complete" fi +if [ "$CFG_SRC_DIR" = `pwd` ]; then + X_PY=x.py +else + X_PY=${CFG_SRC_DIR_RELATIVE}x.py +fi + if [ -z "$CFG_DISABLE_RUSTBUILD" ]; then msg "NOTE you have now configured rust to use a rewritten build system" msg " called rustbuild, and as a result this may have bugs that " @@ -1897,7 +1903,7 @@ if [ -z "$CFG_DISABLE_RUSTBUILD" ]; then msg " go back to the old build system with --disable-rustbuild and" msg " please feel free to report any bugs!" msg "" - msg "run \`python x.py --help\`" + msg "run \`python ${X_PY} --help\`" else warn "the makefile-based build system is deprecated in favor of rustbuild" msg "" diff --git a/mk/crates.mk b/mk/crates.mk index acb36b2f7da..79df941aeb3 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -52,7 +52,7 @@ TARGET_CRATES := libc std term \ getopts collections test rand \ compiler_builtins core alloc \ - rustc_unicode rustc_bitflags \ + std_unicode rustc_bitflags \ alloc_system alloc_jemalloc \ panic_abort panic_unwind unwind RUSTC_CRATES := rustc rustc_typeck rustc_mir rustc_borrowck rustc_resolve rustc_driver \ @@ -69,11 +69,11 @@ DEPS_compiler_builtins := core native:compiler-rt DEPS_alloc := core libc alloc_system DEPS_alloc_system := core libc DEPS_alloc_jemalloc := core libc native:jemalloc -DEPS_collections := core alloc rustc_unicode +DEPS_collections := core alloc std_unicode DEPS_libc := core DEPS_rand := core DEPS_rustc_bitflags := core -DEPS_rustc_unicode := core +DEPS_std_unicode := core DEPS_panic_abort := libc alloc DEPS_panic_unwind := libc alloc unwind DEPS_unwind := libc @@ -81,7 +81,7 @@ DEPS_unwind := libc RUSTFLAGS_compiler_builtins := -lstatic=compiler-rt RUSTFLAGS_panic_abort := -C panic=abort -DEPS_std := core libc rand alloc collections compiler_builtins rustc_unicode \ +DEPS_std := core libc rand alloc collections compiler_builtins std_unicode \ native:backtrace \ alloc_system panic_abort panic_unwind unwind DEPS_arena := std @@ -96,7 +96,7 @@ DEPS_serialize := std log DEPS_term := std DEPS_test := std getopts term native:rust_test_helpers -DEPS_syntax := std term serialize log arena libc rustc_bitflags rustc_unicode rustc_errors syntax_pos rustc_data_structures +DEPS_syntax := std term serialize log arena libc rustc_bitflags std_unicode rustc_errors syntax_pos rustc_data_structures DEPS_syntax_ext := syntax syntax_pos rustc_errors fmt_macros proc_macro DEPS_syntax_pos := serialize DEPS_proc_macro_tokens := syntax syntax_pos log @@ -158,7 +158,7 @@ ONLY_RLIB_libc := 1 ONLY_RLIB_alloc := 1 ONLY_RLIB_rand := 1 ONLY_RLIB_collections := 1 -ONLY_RLIB_rustc_unicode := 1 +ONLY_RLIB_std_unicode := 1 ONLY_RLIB_rustc_bitflags := 1 ONLY_RLIB_alloc_system := 1 ONLY_RLIB_alloc_jemalloc := 1 @@ -169,7 +169,7 @@ ONLY_RLIB_unwind := 1 TARGET_SPECIFIC_alloc_jemalloc := 1 # Documented-by-default crates -DOC_CRATES := std alloc collections core libc rustc_unicode +DOC_CRATES := std alloc collections core libc std_unicode ifeq ($(CFG_DISABLE_JEMALLOC),) RUSTFLAGS_rustc_back := --cfg 'feature="jemalloc"' diff --git a/mk/tests.mk b/mk/tests.mk index 35ee7697a7a..345fc1679b0 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -15,7 +15,7 @@ # The names of crates that must be tested -# libcore/librustc_unicode tests are in a separate crate +# libcore/libstd_unicode tests are in a separate crate DEPS_coretest := $(eval $(call RUST_CRATE,coretest)) diff --git a/src/Cargo.lock b/src/Cargo.lock index b3388563adc..4c6aeeddd38 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -72,7 +72,7 @@ version = "0.0.0" dependencies = [ "alloc 0.0.0", "core 0.0.0", - "rustc_unicode 0.0.0", + "std_unicode 0.0.0", ] [[package]] @@ -546,13 +546,6 @@ dependencies = [ ] [[package]] -name = "rustc_unicode" -version = "0.0.0" -dependencies = [ - "core 0.0.0", -] - -[[package]] name = "rustdoc" version = "0.0.0" dependencies = [ @@ -599,7 +592,7 @@ dependencies = [ "panic_abort 0.0.0", "panic_unwind 0.0.0", "rand 0.0.0", - "rustc_unicode 0.0.0", + "std_unicode 0.0.0", "unwind 0.0.0", ] @@ -612,6 +605,13 @@ dependencies = [ ] [[package]] +name = "std_unicode" +version = "0.0.0" +dependencies = [ + "core 0.0.0", +] + +[[package]] name = "syntax" version = "0.0.0" dependencies = [ diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index 879eca60cc7..2f674a311fe 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -125,6 +125,11 @@ fn main() { cmd.arg("-C").arg(format!("codegen-units={}", s)); } + // Emit save-analysis info. + if env::var("RUSTC_SAVE_ANALYSIS") == Ok("api".to_string()) { + cmd.arg("-Zsave-analysis-api"); + } + // Dealing with rpath here is a little special, so let's go into some // detail. First off, `-rpath` is a linker option on Unix platforms // which adds to the runtime dynamic loader path when looking for diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index c5675fd46cb..e0798860275 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -23,6 +23,7 @@ use std::process::Command; use build_helper::output; use {Build, Compiler, Mode}; +use dist; use util::{self, dylib_path, dylib_path_var}; const ADB_TEST_DIR: &'static str = "/data/tmp"; @@ -464,8 +465,7 @@ fn krate_emscripten(build: &Build, println!("running {}", test_file_name); let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured"); let mut cmd = Command::new(nodejs); - cmd.arg(&test_file_name) - .stderr(::std::process::Stdio::inherit()); + cmd.arg(&test_file_name); if build.config.quiet_tests { cmd.arg("--quiet"); } @@ -517,3 +517,32 @@ pub fn android_copy_libs(build: &Build, } } } + +/// Run "distcheck", a 'make check' from a tarball +pub fn distcheck(build: &Build) { + if build.config.build != "x86_64-unknown-linux-gnu" { + return + } + if !build.config.host.iter().any(|s| s == "x86_64-unknown-linux-gnu") { + return + } + if !build.config.target.iter().any(|s| s == "x86_64-unknown-linux-gnu") { + return + } + + let dir = build.out.join("tmp").join("distcheck"); + let _ = fs::remove_dir_all(&dir); + t!(fs::create_dir_all(&dir)); + + let mut cmd = Command::new("tar"); + cmd.arg("-xzf") + .arg(dist::rust_src_location(build)) + .arg("--strip-components=1") + .current_dir(&dir); + build.run(&mut cmd); + build.run(Command::new("./configure") + .current_dir(&dir)); + build.run(Command::new("make") + .arg("check") + .current_dir(&dir)); +} diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index d603455122e..1d3445a9eac 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -23,7 +23,7 @@ use std::io::Write; use std::path::{PathBuf, Path}; use std::process::Command; -use {Build, Compiler}; +use {Build, Compiler, Mode}; use util::{cp_r, libdir, is_dylib, cp_filtered, copy}; pub fn package_vers(build: &Build) -> &str { @@ -284,6 +284,55 @@ pub fn std(build: &Build, compiler: &Compiler, target: &str) { t!(fs::remove_dir_all(&image)); } +pub fn rust_src_location(build: &Build) -> PathBuf { + let plain_name = format!("rustc-{}-src", package_vers(build)); + distdir(build).join(&format!("{}.tar.gz", plain_name)) +} + +/// Creates a tarball of save-analysis metadata, if available. +pub fn analysis(build: &Build, compiler: &Compiler, target: &str) { + println!("Dist analysis"); + + if build.config.channel != "nightly" { + println!("Skipping dist-analysis - not on nightly channel"); + return; + } + if compiler.stage != 2 { + return + } + + let name = format!("rust-analysis-{}", package_vers(build)); + let image = tmpdir(build).join(format!("{}-{}-image", name, target)); + + let src = build.stage_out(compiler, Mode::Libstd).join(target).join("release").join("deps"); + + let image_src = src.join("save-analysis"); + let dst = image.join("lib/rustlib").join(target).join("analysis"); + t!(fs::create_dir_all(&dst)); + cp_r(&image_src, &dst); + + let mut cmd = Command::new("sh"); + cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh"))) + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=save-analysis-saved.") + .arg(format!("--image-dir={}", sanitize_sh(&image))) + .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build)))) + .arg(format!("--output-dir={}", sanitize_sh(&distdir(build)))) + .arg(format!("--package-name={}-{}", name, target)) + .arg(format!("--component-name=rust-analysis-{}", target)) + .arg("--legacy-manifest-dirs=rustlib,cargo"); + build.run(&mut cmd); + t!(fs::remove_dir_all(&image)); + + // Create plain source tarball + let mut cmd = Command::new("tar"); + cmd.arg("-czf").arg(sanitize_sh(&distdir(build).join(&format!("{}.tar.gz", name)))) + .arg("analysis") + .current_dir(&src); + build.run(&mut cmd); +} + /// Creates the `rust-src` installer component and the plain source tarball pub fn rust_src(build: &Build) { println!("Dist src"); @@ -374,7 +423,7 @@ pub fn rust_src(build: &Build) { // Create plain source tarball let mut cmd = Command::new("tar"); - cmd.arg("-czf").arg(sanitize_sh(&distdir(build).join(&format!("{}.tar.gz", plain_name)))) + cmd.arg("-czf").arg(sanitize_sh(&rust_src_location(build))) .arg(&plain_name) .current_dir(&dst); build.run(&mut cmd); diff --git a/src/bootstrap/job.rs b/src/bootstrap/job.rs index b4d7aff97da..c3859275e6f 100644 --- a/src/bootstrap/job.rs +++ b/src/bootstrap/job.rs @@ -51,6 +51,7 @@ type LPVOID = *mut u8; type JOBOBJECTINFOCLASS = i32; type SIZE_T = usize; type LARGE_INTEGER = i64; +type UINT = u32; type ULONG_PTR = usize; type ULONGLONG = u64; @@ -59,6 +60,8 @@ const DUPLICATE_SAME_ACCESS: DWORD = 0x2; const PROCESS_DUP_HANDLE: DWORD = 0x40; const JobObjectExtendedLimitInformation: JOBOBJECTINFOCLASS = 9; const JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE: DWORD = 0x2000; +const SEM_FAILCRITICALERRORS: UINT = 0x0001; +const SEM_NOGPFAULTERRORBOX: UINT = 0x0002; extern "system" { fn CreateJobObjectW(lpJobAttributes: *mut u8, lpName: *const u8) -> HANDLE; @@ -79,6 +82,7 @@ extern "system" { JobObjectInformationClass: JOBOBJECTINFOCLASS, lpJobObjectInformation: LPVOID, cbJobObjectInformationLength: DWORD) -> BOOL; + fn SetErrorMode(mode: UINT) -> UINT; } #[repr(C)] @@ -115,6 +119,13 @@ struct JOBOBJECT_BASIC_LIMIT_INFORMATION { } pub unsafe fn setup() { + // Tell Windows to not show any UI on errors (such as not finding a required dll + // during startup or terminating abnormally). This is important for running tests, + // since some of them use abnormal termination by design. + // This mode is inherited by all child processes. + let mode = SetErrorMode(SEM_NOGPFAULTERRORBOX); // read inherited flags + SetErrorMode(mode | SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); + // Create a new job object for us to use let job = CreateJobObjectW(0 as *mut _, 0 as *const _); assert!(job != 0 as *mut _, "{}", io::Error::last_os_error()); diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index 912b5864c81..cd80c4298dc 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -507,6 +507,10 @@ impl Build { .env(format!("CFLAGS_{}", target), self.cflags(target).join(" ")); } + if self.config.channel == "nightly" && compiler.stage == 2 { + cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string()); + } + // Environment variables *required* needed throughout the build // // FIXME: should update code to not require this env var diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in index b165048b7b6..1fa70081938 100644 --- a/src/bootstrap/mk/Makefile.in +++ b/src/bootstrap/mk/Makefile.in @@ -1,4 +1,4 @@ -# Copyright 20126 The Rust Project Developers. See the COPYRIGHT +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. # @@ -55,6 +55,8 @@ check-cargotest: $(Q)$(BOOTSTRAP) test src/tools/cargotest $(BOOTSTRAP_ARGS) dist: $(Q)$(BOOTSTRAP) dist $(BOOTSTRAP_ARGS) +distcheck: + $(Q)$(BOOTSTRAP) test distcheck install: $(Q)$(BOOTSTRAP) dist --install $(BOOTSTRAP_ARGS) tidy: diff --git a/src/bootstrap/step.rs b/src/bootstrap/step.rs index f9eae41a330..884cc7da8ea 100644 --- a/src/bootstrap/step.rs +++ b/src/bootstrap/step.rs @@ -198,14 +198,6 @@ pub fn build_rules(build: &Build) -> Rules { }); } for (krate, path, default) in krates("rustc-main") { - // We hijacked the `src/rustc` path above for "build just the compiler" - // so let's not reinterpret it here as everything and redirect the - // `src/rustc` path to a nonexistent path. - let path = if path == "src/rustc" { - "path/to/nowhere" - } else { - path - }; rules.build(&krate.build_step, path) .dep(|s| s.name("libtest")) .dep(move |s| s.name("llvm").host(&build.config.build).stage(0)) @@ -403,6 +395,10 @@ pub fn build_rules(build: &Build) -> Rules { .default(true) .host(true) .run(move |s| check::docs(build, &s.compiler())); + rules.test("check-distcheck", "distcheck") + .dep(|s| s.name("dist-src")) + .run(move |_| check::distcheck(build)); + rules.build("test-helpers", "src/rt/rust_test_helpers.c") .run(move |s| native::test_helpers(build, s.target)); @@ -503,6 +499,10 @@ pub fn build_rules(build: &Build) -> Rules { .default(true) .dep(|s| s.name("default:doc")) .run(move |s| dist::docs(build, s.stage, s.target)); + rules.dist("dist-analysis", "src/libstd") + .dep(|s| s.name("dist-std")) + .default(true) + .run(move |s| dist::analysis(build, &s.compiler(), s.target)); rules.dist("install", "src") .dep(|s| s.name("default:dist")) .run(move |s| install::install(build, s.stage, s.target)); diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs index 38844fb6c9e..07f9c91d3c7 100644 --- a/src/build_helper/lib.rs +++ b/src/build_helper/lib.rs @@ -21,7 +21,8 @@ pub fn run(cmd: &mut Command) { pub fn run_silent(cmd: &mut Command) { let status = match cmd.status() { Ok(status) => status, - Err(e) => fail(&format!("failed to execute command: {}", e)), + Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", + cmd, e)), }; if !status.success() { fail(&format!("command did not execute successfully: {:?}\n\ @@ -63,7 +64,8 @@ pub fn cc2ar(cc: &Path, target: &str) -> Option<PathBuf> { pub fn output(cmd: &mut Command) -> String { let output = match cmd.stderr(Stdio::inherit()).output() { Ok(status) => status, - Err(e) => fail(&format!("failed to execute command: {}", e)), + Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", + cmd, e)), }; if !output.status.success() { panic!("command did not execute successfully: {:?}\n\ diff --git a/src/doc/rust.css b/src/doc/rust.css index 932594b9912..664bc0fdab0 100644 --- a/src/doc/rust.css +++ b/src/doc/rust.css @@ -44,7 +44,9 @@ font-family: 'Source Code Pro'; font-style: normal; font-weight: 400; - src: local('Source Code Pro'), url("SourceCodePro-Regular.woff") format('woff'); + /* Avoid using locally installed font because bad versions are in circulation: + * see https://github.com/rust-lang/rust/issues/24355 */ + src: url("SourceCodePro-Regular.woff") format('woff'); } *:not(body) { diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index acce4ce0358..f9dfdc0e075 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -74,6 +74,7 @@ #![feature(allocator)] #![feature(box_syntax)] +#![feature(cfg_target_has_atomic)] #![feature(coerce_unsized)] #![feature(const_fn)] #![feature(core_intrinsics)] @@ -122,6 +123,7 @@ mod boxed { } #[cfg(test)] mod boxed_test; +#[cfg(target_has_atomic = "ptr")] pub mod arc; pub mod rc; pub mod raw_vec; diff --git a/src/liballoc/oom.rs b/src/liballoc/oom.rs index d355d59185e..3640156fec2 100644 --- a/src/liballoc/oom.rs +++ b/src/liballoc/oom.rs @@ -8,12 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::sync::atomic::{AtomicPtr, Ordering}; -use core::mem; +#[cfg(target_has_atomic = "ptr")] +pub use self::imp::set_oom_handler; use core::intrinsics; -static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(default_oom_handler as *mut ()); - fn default_oom_handler() -> ! { // The default handler can't do much more since we can't assume the presence // of libc or any way of printing an error message. @@ -26,17 +24,38 @@ fn default_oom_handler() -> ! { #[unstable(feature = "oom", reason = "not a scrutinized interface", issue = "27700")] pub fn oom() -> ! { - let value = OOM_HANDLER.load(Ordering::SeqCst); - let handler: fn() -> ! = unsafe { mem::transmute(value) }; - handler(); + self::imp::oom() } -/// Set a custom handler for out-of-memory conditions -/// -/// To avoid recursive OOM failures, it is critical that the OOM handler does -/// not allocate any memory itself. -#[unstable(feature = "oom", reason = "not a scrutinized interface", - issue = "27700")] -pub fn set_oom_handler(handler: fn() -> !) { - OOM_HANDLER.store(handler as *mut (), Ordering::SeqCst); +#[cfg(target_has_atomic = "ptr")] +mod imp { + use core::mem; + use core::sync::atomic::{AtomicPtr, Ordering}; + + static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(super::default_oom_handler as *mut ()); + + #[inline(always)] + pub fn oom() -> ! { + let value = OOM_HANDLER.load(Ordering::SeqCst); + let handler: fn() -> ! = unsafe { mem::transmute(value) }; + handler(); + } + + /// Set a custom handler for out-of-memory conditions + /// + /// To avoid recursive OOM failures, it is critical that the OOM handler does + /// not allocate any memory itself. + #[unstable(feature = "oom", reason = "not a scrutinized interface", + issue = "27700")] + pub fn set_oom_handler(handler: fn() -> !) { + OOM_HANDLER.store(handler as *mut (), Ordering::SeqCst); + } +} + +#[cfg(not(target_has_atomic = "ptr"))] +mod imp { + #[inline(always)] + pub fn oom() -> ! { + super::default_oom_handler() + } } diff --git a/src/libcollections/Cargo.toml b/src/libcollections/Cargo.toml index 3056977d224..ab882fde9c2 100644 --- a/src/libcollections/Cargo.toml +++ b/src/libcollections/Cargo.toml @@ -10,7 +10,7 @@ path = "lib.rs" [dependencies] alloc = { path = "../liballoc" } core = { path = "../libcore" } -rustc_unicode = { path = "../librustc_unicode" } +std_unicode = { path = "../libstd_unicode" } [[test]] name = "collectionstest" diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 08288b4de8b..68b067012d3 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -47,19 +47,19 @@ #![feature(placement_in)] #![feature(placement_new_protocol)] #![feature(shared)] +#![feature(slice_get_slice)] #![feature(slice_patterns)] #![feature(specialization)] #![feature(staged_api)] -#![feature(step_by)] #![feature(trusted_len)] #![feature(unicode)] #![feature(unique)] -#![feature(slice_get_slice)] +#![feature(untagged_unions)] #![cfg_attr(test, feature(rand, test))] #![no_std] -extern crate rustc_unicode; +extern crate std_unicode; extern crate alloc; #[cfg(test)] diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs index e615e780d2b..5fb8cd6e1e2 100644 --- a/src/libcollections/slice.rs +++ b/src/libcollections/slice.rs @@ -98,8 +98,7 @@ #![cfg_attr(test, allow(unused_imports, dead_code))] use alloc::boxed::Box; -use core::cmp::Ordering::{self, Greater, Less}; -use core::cmp; +use core::cmp::Ordering::{self, Greater}; use core::mem::size_of; use core::mem; use core::ptr; @@ -1042,8 +1041,8 @@ impl<T> [T] { /// This is equivalent to `self.sort_by(|a, b| a.cmp(b))`. /// - /// This sort is stable and `O(n log n)` worst-case but allocates - /// approximately `2 * n` where `n` is the length of `self`. + /// This sort is stable and `O(n log n)` worst-case, but allocates + /// temporary storage half the size of `self`. /// /// # Examples /// @@ -1064,8 +1063,8 @@ impl<T> [T] { /// Sorts the slice, in place, using `f` to extract a key by which to /// order the sort by. /// - /// This sort is stable and `O(n log n)` worst-case but allocates - /// approximately `2 * n`, where `n` is the length of `self`. + /// This sort is stable and `O(n log n)` worst-case, but allocates + /// temporary storage half the size of `self`. /// /// # Examples /// @@ -1086,8 +1085,8 @@ impl<T> [T] { /// Sorts the slice, in place, using `compare` to compare /// elements. /// - /// This sort is stable and `O(n log n)` worst-case but allocates - /// approximately `2 * n`, where `n` is the length of `self`. + /// This sort is stable and `O(n log n)` worst-case, but allocates + /// temporary storage half the size of `self`. /// /// # Examples /// @@ -1305,213 +1304,333 @@ impl<T: Clone> ToOwned for [T] { // Sorting //////////////////////////////////////////////////////////////////////////////// -fn insertion_sort<T, F>(v: &mut [T], mut compare: F) +/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. +/// +/// This is the integral subroutine of insertion sort. +fn insert_head<T, F>(v: &mut [T], compare: &mut F) where F: FnMut(&T, &T) -> Ordering { - let len = v.len() as isize; - let buf_v = v.as_mut_ptr(); - - // 1 <= i < len; - for i in 1..len { - // j satisfies: 0 <= j <= i; - let mut j = i; + if v.len() >= 2 && compare(&v[0], &v[1]) == Greater { unsafe { - // `i` is in bounds. - let read_ptr = buf_v.offset(i) as *const T; - - // find where to insert, we need to do strict <, - // rather than <=, to maintain stability. - - // 0 <= j - 1 < len, so .offset(j - 1) is in bounds. - while j > 0 && compare(&*read_ptr, &*buf_v.offset(j - 1)) == Less { - j -= 1; + // There are three ways to implement insertion here: + // + // 1. Swap adjacent elements until the first one gets to its final destination. + // However, this way we copy data around more than is necessary. If elements are big + // structures (costly to copy), this method will be slow. + // + // 2. Iterate until the right place for the first element is found. Then shift the + // elements succeeding it to make room for it and finally place it into the + // remaining hole. This is a good method. + // + // 3. Copy the first element into a temporary variable. Iterate until the right place + // for it is found. As we go along, copy every traversed element into the slot + // preceding it. Finally, copy data from the temporary variable into the remaining + // hole. This method is very good. Benchmarks demonstrated slightly better + // performance than with the 2nd method. + // + // All methods were benchmarked, and the 3rd showed best results. So we chose that one. + let mut tmp = NoDrop { value: ptr::read(&v[0]) }; + + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `compare`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `compare` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { + src: &mut tmp.value, + dest: &mut v[1], + }; + ptr::copy_nonoverlapping(&v[1], &mut v[0], 1); + + for i in 2..v.len() { + if compare(&tmp.value, &v[i]) != Greater { + break; + } + ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1); + hole.dest = &mut v[i]; } + // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. + } + } - // shift everything to the right, to make space to - // insert this value. + // Holds a value, but never drops it. + #[allow(unions_with_drop_fields)] + union NoDrop<T> { + value: T + } - // j + 1 could be `len` (for the last `i`), but in - // that case, `i == j` so we don't copy. The - // `.offset(j)` is always in bounds. + // When dropped, copies from `src` into `dest`. + struct InsertionHole<T> { + src: *mut T, + dest: *mut T, + } - if i != j { - let tmp = ptr::read(read_ptr); - ptr::copy(&*buf_v.offset(j), buf_v.offset(j + 1), (i - j) as usize); - ptr::copy_nonoverlapping(&tmp, buf_v.offset(j), 1); - mem::forget(tmp); - } + impl<T> Drop for InsertionHole<T> { + fn drop(&mut self) { + unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } } -fn merge_sort<T, F>(v: &mut [T], mut compare: F) +/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and +/// stores the result into `v[..]`. +/// +/// # Safety +/// +/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough +/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type. +unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, compare: &mut F) where F: FnMut(&T, &T) -> Ordering { - // warning: this wildly uses unsafe. - const BASE_INSERTION: usize = 32; - const LARGE_INSERTION: usize = 16; - - // FIXME #12092: smaller insertion runs seems to make sorting - // vectors of large elements a little faster on some platforms, - // but hasn't been tested/tuned extensively - let insertion = if size_of::<T>() <= 16 { - BASE_INSERTION + let len = v.len(); + let v = v.as_mut_ptr(); + let v_mid = v.offset(mid as isize); + let v_end = v.offset(len as isize); + + // The merge process first copies the shorter run into `buf`. Then it traces the newly copied + // run and the longer run forwards (or backwards), comparing their next unconsumed elements and + // copying the lesser (or greater) one into `v`. + // + // As soon as the shorter run is fully consumed, the process is done. If the longer run gets + // consumed first, then we must copy whatever is left of the shorter run into the remaining + // hole in `v`. + // + // Intermediate state of the process is always tracked by `hole`, which serves two purposes: + // 1. Protects integrity of `v` from panics in `compare`. + // 2. Fills the remaining hole in `v` if the longer run gets consumed first. + // + // Panic safety: + // + // If `compare` panics at any point during the process, `hole` will get dropped and fill the + // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every + // object it initially held exactly once. + let mut hole; + + if mid <= len - mid { + // The left run is shorter. + ptr::copy_nonoverlapping(v, buf, mid); + hole = MergeHole { + start: buf, + end: buf.offset(mid as isize), + dest: v, + }; + + // Initially, these pointers point to the beginnings of their arrays. + let left = &mut hole.start; + let mut right = v_mid; + let out = &mut hole.dest; + + while *left < hole.end && right < v_end { + // Consume the lesser side. + // If equal, prefer the left run to maintain stability. + let to_copy = if compare(&**left, &*right) == Greater { + get_and_increment(&mut right) + } else { + get_and_increment(left) + }; + ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1); + } } else { - LARGE_INSERTION - }; + // The right run is shorter. + ptr::copy_nonoverlapping(v_mid, buf, len - mid); + hole = MergeHole { + start: buf, + end: buf.offset((len - mid) as isize), + dest: v_mid, + }; + + // Initially, these pointers point past the ends of their arrays. + let left = &mut hole.dest; + let right = &mut hole.end; + let mut out = v_end; + + while v < *left && buf < *right { + // Consume the greater side. + // If equal, prefer the right run to maintain stability. + let to_copy = if compare(&*left.offset(-1), &*right.offset(-1)) == Greater { + decrement_and_get(left) + } else { + decrement_and_get(right) + }; + ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1); + } + } + // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of + // it will now be copied into the hole in `v`. - let len = v.len(); + unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T { + let old = *ptr; + *ptr = ptr.offset(1); + old + } - // short vectors get sorted in-place via insertion sort to avoid allocations - if len <= insertion { - insertion_sort(v, compare); - return; + unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T { + *ptr = ptr.offset(-1); + *ptr } - // allocate some memory to use as scratch memory, we keep the - // length 0 so we can keep shallow copies of the contents of `v` - // without risking the dtors running on an object twice if - // `compare` panics. - let mut working_space = Vec::with_capacity(2 * len); - // these both are buffers of length `len`. - let mut buf_dat = working_space.as_mut_ptr(); - let mut buf_tmp = unsafe { buf_dat.offset(len as isize) }; - - // length `len`. - let buf_v = v.as_ptr(); - - // step 1. sort short runs with insertion sort. This takes the - // values from `v` and sorts them into `buf_dat`, leaving that - // with sorted runs of length INSERTION. - - // We could hardcode the sorting comparisons here, and we could - // manipulate/step the pointers themselves, rather than repeatedly - // .offset-ing. - for start in (0..len).step_by(insertion) { - // start <= i < len; - for i in start..cmp::min(start + insertion, len) { - // j satisfies: start <= j <= i; - let mut j = i as isize; - unsafe { - // `i` is in bounds. - let read_ptr = buf_v.offset(i as isize); + // When dropped, copies the range `start..end` into `dest..`. + struct MergeHole<T> { + start: *mut T, + end: *mut T, + dest: *mut T, + } + + impl<T> Drop for MergeHole<T> { + fn drop(&mut self) { + // `T` is not a zero-sized type, so it's okay to divide by it's size. + let len = (self.end as usize - self.start as usize) / mem::size_of::<T>(); + unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); } + } + } +} - // find where to insert, we need to do strict <, - // rather than <=, to maintain stability. +/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail +/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt). +/// +/// The algorithm identifies strictly descending and non-descending subsequences, which are called +/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed +/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are +/// satisfied, for every `i` in `0 .. runs.len() - 2`: +/// +/// 1. `runs[i].len > runs[i + 1].len` +/// 2. `runs[i].len > runs[i + 1].len + runs[i + 2].len` +/// +/// The invariants ensure that the total running time is `O(n log n)` worst-case. +fn merge_sort<T, F>(v: &mut [T], mut compare: F) + where F: FnMut(&T, &T) -> Ordering +{ + // Sorting has no meaningful behavior on zero-sized types. + if size_of::<T>() == 0 { + return; + } - // start <= j - 1 < len, so .offset(j - 1) is in - // bounds. - while j > start as isize && compare(&*read_ptr, &*buf_dat.offset(j - 1)) == Less { - j -= 1; - } + // FIXME #12092: These numbers are platform-specific and need more extensive testing/tuning. + // + // If `v` has length up to `insertion_len`, simply switch to insertion sort because it is going + // to perform better than merge sort. For bigger types `T`, the threshold is smaller. + // + // Short runs are extended using insertion sort to span at least `min_run` elements, in order + // to improve performance. + let (max_insertion, min_run) = if size_of::<T>() <= 16 { + (64, 32) + } else { + (32, 16) + }; - // shift everything to the right, to make space to - // insert this value. + let len = v.len(); - // j + 1 could be `len` (for the last `i`), but in - // that case, `i == j` so we don't copy. The - // `.offset(j)` is always in bounds. - ptr::copy(&*buf_dat.offset(j), buf_dat.offset(j + 1), i - j as usize); - ptr::copy_nonoverlapping(read_ptr, buf_dat.offset(j), 1); + // Short arrays get sorted in-place via insertion sort to avoid allocations. + if len <= max_insertion { + if len >= 2 { + for i in (0..len-1).rev() { + insert_head(&mut v[i..], &mut compare); } } + return; } - // step 2. merge the sorted runs. - let mut width = insertion; - while width < len { - // merge the sorted runs of length `width` in `buf_dat` two at - // a time, placing the result in `buf_tmp`. - - // 0 <= start <= len. - for start in (0..len).step_by(2 * width) { - // manipulate pointers directly for speed (rather than - // using a `for` loop with `range` and `.offset` inside - // that loop). - unsafe { - // the end of the first run & start of the - // second. Offset of `len` is defined, since this is - // precisely one byte past the end of the object. - let right_start = buf_dat.offset(cmp::min(start + width, len) as isize); - // end of the second. Similar reasoning to the above re safety. - let right_end_idx = cmp::min(start + 2 * width, len); - let right_end = buf_dat.offset(right_end_idx as isize); - - // the pointers to the elements under consideration - // from the two runs. - - // both of these are in bounds. - let mut left = buf_dat.offset(start as isize); - let mut right = right_start; - - // where we're putting the results, it is a run of - // length `2*width`, so we step it once for each step - // of either `left` or `right`. `buf_tmp` has length - // `len`, so these are in bounds. - let mut out = buf_tmp.offset(start as isize); - let out_end = buf_tmp.offset(right_end_idx as isize); - - // If left[last] <= right[0], they are already in order: - // fast-forward the left side (the right side is handled - // in the loop). - // If `right` is not empty then left is not empty, and - // the offsets are in bounds. - if right != right_end && compare(&*right.offset(-1), &*right) != Greater { - let elems = (right_start as usize - left as usize) / mem::size_of::<T>(); - ptr::copy_nonoverlapping(&*left, out, elems); - out = out.offset(elems as isize); - left = right_start; + // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it + // shallow copies of the contents of `v` without risking the dtors running on copies if + // `compare` panics. When merging two sorted runs, this buffer holds a copy of the shorter run, + // which will always have length at most `len / 2`. + let mut buf = Vec::with_capacity(len / 2); + + // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a + // strange decision, but consider the fact that merges more often go in the opposite direction + // (forwards). According to benchmarks, merging forwards is slightly faster than merging + // backwards. To conclude, identifying runs by traversing backwards improves performance. + let mut runs = vec![]; + let mut end = len; + while end > 0 { + // Find the next natural run, and reverse it if it's strictly descending. + let mut start = end - 1; + if start > 0 { + start -= 1; + if compare(&v[start], &v[start + 1]) == Greater { + while start > 0 && compare(&v[start - 1], &v[start]) == Greater { + start -= 1; } - - while out < out_end { - // Either the left or the right run are exhausted, - // so just copy the remainder from the other run - // and move on; this gives a huge speed-up (order - // of 25%) for mostly sorted vectors (the best - // case). - if left == right_start { - // the number remaining in this run. - let elems = (right_end as usize - right as usize) / mem::size_of::<T>(); - ptr::copy_nonoverlapping(&*right, out, elems); - break; - } else if right == right_end { - let elems = (right_start as usize - left as usize) / mem::size_of::<T>(); - ptr::copy_nonoverlapping(&*left, out, elems); - break; - } - - // check which side is smaller, and that's the - // next element for the new run. - - // `left < right_start` and `right < right_end`, - // so these are valid. - let to_copy = if compare(&*left, &*right) == Greater { - step(&mut right) - } else { - step(&mut left) - }; - ptr::copy_nonoverlapping(&*to_copy, out, 1); - step(&mut out); + v[start..end].reverse(); + } else { + while start > 0 && compare(&v[start - 1], &v[start]) != Greater { + start -= 1; } } } - mem::swap(&mut buf_dat, &mut buf_tmp); + // Insert some more elements into the run if it's too short. Insertion sort is faster than + // merge sort on short sequences, so this significantly improves performance. + while start > 0 && end - start < min_run { + start -= 1; + insert_head(&mut v[start..end], &mut compare); + } - width *= 2; + // Push this run onto the stack. + runs.push(Run { + start: start, + len: end - start, + }); + end = start; + + // Merge some pairs of adjacent runs to satisfy the invariants. + while let Some(r) = collapse(&runs) { + let left = runs[r + 1]; + let right = runs[r]; + unsafe { + merge(&mut v[left.start .. right.start + right.len], left.len, buf.as_mut_ptr(), + &mut compare); + } + runs[r] = Run { + start: left.start, + len: left.len + right.len, + }; + runs.remove(r + 1); + } } - // write the result to `v` in one go, so that there are never two copies - // of the same object in `v`. - unsafe { - ptr::copy_nonoverlapping(&*buf_dat, v.as_mut_ptr(), len); + // Finally, exactly one run must remain in the stack. + debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len); + + // Examines the stack of runs and identifies the next pair of runs to merge. More specifically, + // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the + // algorithm should continue building a new run instead, `None` is returned. + // + // TimSort is infamous for it's buggy implementations, as described here: + // http://envisage-project.eu/timsort-specification-and-verification/ + // + // The gist of the story is: we must enforce the invariants on the top four runs on the stack. + // Enforcing them on just top three is not sufficient to ensure that the invariants will still + // hold for *all* runs in the stack. + // + // This function correctly checks invariants for the top four runs. Additionally, if the top + // run starts at index 0, it will always demand a merge operation until the stack is fully + // collapsed, in order to complete the sort. + #[inline] + fn collapse(runs: &[Run]) -> Option<usize> { + let n = runs.len(); + if n >= 2 && (runs[n - 1].start == 0 || + runs[n - 2].len <= runs[n - 1].len || + (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) || + (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) { + if n >= 3 && runs[n - 3].len < runs[n - 1].len { + Some(n - 3) + } else { + Some(n - 2) + } + } else { + None + } } - // increment the pointer, returning the old pointer. - #[inline(always)] - unsafe fn step<T>(ptr: &mut *mut T) -> *mut T { - let old = *ptr; - *ptr = ptr.offset(1); - old + #[derive(Clone, Copy)] + struct Run { + start: usize, + len: usize, } } diff --git a/src/libcollections/str.rs b/src/libcollections/str.rs index 48a74bdecbb..d4be0914f15 100644 --- a/src/libcollections/str.rs +++ b/src/libcollections/str.rs @@ -24,12 +24,12 @@ use core::str::pattern::Pattern; use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; use core::iter::FusedIterator; -use rustc_unicode::str::{UnicodeStr, Utf16Encoder}; +use std_unicode::str::{UnicodeStr, Utf16Encoder}; use vec_deque::VecDeque; use borrow::{Borrow, ToOwned}; use string::String; -use rustc_unicode; +use std_unicode; use vec::Vec; use slice::SliceConcatExt; use boxed::Box; @@ -54,7 +54,7 @@ pub use core::str::{from_utf8, Chars, CharIndices, Bytes}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{from_utf8_unchecked, ParseBoolError}; #[stable(feature = "rust1", since = "1.0.0")] -pub use rustc_unicode::str::SplitWhitespace; +pub use std_unicode::str::SplitWhitespace; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::pattern; @@ -1705,7 +1705,7 @@ impl str { } fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool { - use rustc_unicode::derived_property::{Cased, Case_Ignorable}; + use std_unicode::derived_property::{Cased, Case_Ignorable}; match iter.skip_while(|&c| Case_Ignorable(c)).next() { Some(c) => Cased(c), None => false, diff --git a/src/libcollections/string.rs b/src/libcollections/string.rs index fff7c160e31..b4c41a99a6b 100644 --- a/src/libcollections/string.rs +++ b/src/libcollections/string.rs @@ -63,8 +63,8 @@ use core::mem; use core::ops::{self, Add, AddAssign, Index, IndexMut}; use core::ptr; use core::str::pattern::Pattern; -use rustc_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; -use rustc_unicode::str as unicode_str; +use std_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; +use std_unicode::str as unicode_str; use borrow::{Cow, ToOwned}; use range::RangeArgument; diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index f2632412700..c9f9e513ef3 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -1244,7 +1244,7 @@ impl<T: Clone> Vec<T> { /// ``` #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] pub fn extend_from_slice(&mut self, other: &[T]) { - self.extend(other.iter().cloned()) + self.spec_extend(other.iter()) } } @@ -1499,7 +1499,7 @@ impl<T> ops::DerefMut for Vec<T> { impl<T> FromIterator<T> for Vec<T> { #[inline] fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> { - <Self as SpecExtend<_>>::from_iter(iter.into_iter()) + <Self as SpecExtend<_, _>>::from_iter(iter.into_iter()) } } @@ -1572,12 +1572,12 @@ impl<T> Extend<T> for Vec<T> { } // Specialization trait used for Vec::from_iter and Vec::extend -trait SpecExtend<I> { +trait SpecExtend<T, I> { fn from_iter(iter: I) -> Self; fn spec_extend(&mut self, iter: I); } -impl<I, T> SpecExtend<I> for Vec<T> +impl<T, I> SpecExtend<T, I> for Vec<T> where I: Iterator<Item=T>, { default fn from_iter(mut iterator: I) -> Self { @@ -1607,7 +1607,7 @@ impl<I, T> SpecExtend<I> for Vec<T> } } -impl<I, T> SpecExtend<I> for Vec<T> +impl<T, I> SpecExtend<T, I> for Vec<T> where I: TrustedLen<Item=T>, { fn from_iter(iterator: I) -> Self { @@ -1642,6 +1642,33 @@ impl<I, T> SpecExtend<I> for Vec<T> } } +impl<'a, T: 'a, I> SpecExtend<&'a T, I> for Vec<T> + where I: Iterator<Item=&'a T>, + T: Clone, +{ + default fn from_iter(iterator: I) -> Self { + SpecExtend::from_iter(iterator.cloned()) + } + + default fn spec_extend(&mut self, iterator: I) { + self.spec_extend(iterator.cloned()) + } +} + +impl<'a, T: 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T> + where T: Copy, +{ + fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { + let slice = iterator.as_slice(); + self.reserve(slice.len()); + unsafe { + let len = self.len(); + self.set_len(len + slice.len()); + self.get_unchecked_mut(len..).copy_from_slice(slice); + } + } +} + impl<T> Vec<T> { fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) { // This is the case for a general iterator. @@ -1669,7 +1696,7 @@ impl<T> Vec<T> { #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, T: 'a + Copy> Extend<&'a T> for Vec<T> { fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { - self.extend(iter.into_iter().map(|&x| x)) + self.spec_extend(iter.into_iter()) } } diff --git a/src/libcollectionstest/lib.rs b/src/libcollectionstest/lib.rs index 58ce78eab9a..0fe0a1bad64 100644 --- a/src/libcollectionstest/lib.rs +++ b/src/libcollectionstest/lib.rs @@ -33,7 +33,7 @@ extern crate collections; extern crate test; -extern crate rustc_unicode; +extern crate std_unicode; use std::hash::{Hash, Hasher}; use std::collections::hash_map::DefaultHasher; diff --git a/src/libcollectionstest/slice.rs b/src/libcollectionstest/slice.rs index 0e63e8d4a1e..1b52214dee6 100644 --- a/src/libcollectionstest/slice.rs +++ b/src/libcollectionstest/slice.rs @@ -383,7 +383,7 @@ fn test_reverse() { #[test] fn test_sort() { - for len in 4..25 { + for len in (2..25).chain(500..510) { for _ in 0..100 { let mut v: Vec<_> = thread_rng().gen_iter::<i32>().take(len).collect(); let mut v1 = v.clone(); @@ -410,7 +410,7 @@ fn test_sort() { #[test] fn test_sort_stability() { - for len in 4..25 { + for len in (2..25).chain(500..510) { for _ in 0..10 { let mut counts = [0; 10]; @@ -442,6 +442,13 @@ fn test_sort_stability() { } #[test] +fn test_sort_zero_sized_type() { + // Should not panic. + [(); 10].sort(); + [(); 100].sort(); +} + +#[test] fn test_concat() { let v: [Vec<i32>; 0] = []; let c = v.concat(); @@ -1338,89 +1345,104 @@ mod bench { }) } - #[bench] - fn sort_random_small(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { - let mut v: Vec<_> = rng.gen_iter::<u64>().take(5).collect(); - v.sort(); - }); - b.bytes = 5 * mem::size_of::<u64>() as u64; + fn gen_ascending(len: usize) -> Vec<u64> { + (0..len as u64).collect() } - #[bench] - fn sort_random_medium(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { - let mut v: Vec<_> = rng.gen_iter::<u64>().take(100).collect(); - v.sort(); - }); - b.bytes = 100 * mem::size_of::<u64>() as u64; + fn gen_descending(len: usize) -> Vec<u64> { + (0..len as u64).rev().collect() } - #[bench] - fn sort_random_large(b: &mut Bencher) { + fn gen_random(len: usize) -> Vec<u64> { let mut rng = thread_rng(); - b.iter(|| { - let mut v: Vec<_> = rng.gen_iter::<u64>().take(10000).collect(); - v.sort(); - }); - b.bytes = 10000 * mem::size_of::<u64>() as u64; + rng.gen_iter::<u64>().take(len).collect() } - #[bench] - fn sort_sorted(b: &mut Bencher) { - let mut v: Vec<_> = (0..10000).collect(); - b.iter(|| { - v.sort(); - }); - b.bytes = (v.len() * mem::size_of_val(&v[0])) as u64; + fn gen_mostly_ascending(len: usize) -> Vec<u64> { + let mut rng = thread_rng(); + let mut v = gen_ascending(len); + for _ in (0usize..).take_while(|x| x * x <= len) { + let x = rng.gen::<usize>() % len; + let y = rng.gen::<usize>() % len; + v.swap(x, y); + } + v } - type BigSortable = (u64, u64, u64, u64); - - #[bench] - fn sort_big_random_small(b: &mut Bencher) { + fn gen_mostly_descending(len: usize) -> Vec<u64> { let mut rng = thread_rng(); - b.iter(|| { - let mut v = rng.gen_iter::<BigSortable>() - .take(5) - .collect::<Vec<BigSortable>>(); - v.sort(); - }); - b.bytes = 5 * mem::size_of::<BigSortable>() as u64; + let mut v = gen_descending(len); + for _ in (0usize..).take_while(|x| x * x <= len) { + let x = rng.gen::<usize>() % len; + let y = rng.gen::<usize>() % len; + v.swap(x, y); + } + v } - #[bench] - fn sort_big_random_medium(b: &mut Bencher) { + fn gen_big_random(len: usize) -> Vec<[u64; 16]> { let mut rng = thread_rng(); - b.iter(|| { - let mut v = rng.gen_iter::<BigSortable>() - .take(100) - .collect::<Vec<BigSortable>>(); - v.sort(); - }); - b.bytes = 100 * mem::size_of::<BigSortable>() as u64; + rng.gen_iter().map(|x| [x; 16]).take(len).collect() } - #[bench] - fn sort_big_random_large(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { - let mut v = rng.gen_iter::<BigSortable>() - .take(10000) - .collect::<Vec<BigSortable>>(); - v.sort(); - }); - b.bytes = 10000 * mem::size_of::<BigSortable>() as u64; + fn gen_big_ascending(len: usize) -> Vec<[u64; 16]> { + (0..len as u64).map(|x| [x; 16]).take(len).collect() } + fn gen_big_descending(len: usize) -> Vec<[u64; 16]> { + (0..len as u64).rev().map(|x| [x; 16]).take(len).collect() + } + + macro_rules! sort_bench { + ($name:ident, $gen:expr, $len:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + b.iter(|| $gen($len).sort()); + b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + } + } + } + + sort_bench!(sort_small_random, gen_random, 10); + sort_bench!(sort_small_ascending, gen_ascending, 10); + sort_bench!(sort_small_descending, gen_descending, 10); + + sort_bench!(sort_small_big_random, gen_big_random, 10); + sort_bench!(sort_small_big_ascending, gen_big_ascending, 10); + sort_bench!(sort_small_big_descending, gen_big_descending, 10); + + sort_bench!(sort_medium_random, gen_random, 100); + sort_bench!(sort_medium_ascending, gen_ascending, 100); + sort_bench!(sort_medium_descending, gen_descending, 100); + + sort_bench!(sort_large_random, gen_random, 10000); + sort_bench!(sort_large_ascending, gen_ascending, 10000); + sort_bench!(sort_large_descending, gen_descending, 10000); + sort_bench!(sort_large_mostly_ascending, gen_mostly_ascending, 10000); + sort_bench!(sort_large_mostly_descending, gen_mostly_descending, 10000); + + sort_bench!(sort_large_big_random, gen_big_random, 10000); + sort_bench!(sort_large_big_ascending, gen_big_ascending, 10000); + sort_bench!(sort_large_big_descending, gen_big_descending, 10000); + #[bench] - fn sort_big_sorted(b: &mut Bencher) { - let mut v: Vec<BigSortable> = (0..10000).map(|i| (i, i, i, i)).collect(); + fn sort_large_random_expensive(b: &mut Bencher) { + let len = 10000; b.iter(|| { - v.sort(); + let mut count = 0; + let cmp = move |a: &u64, b: &u64| { + count += 1; + if count % 1_000_000_000 == 0 { + panic!("should not happen"); + } + (*a as f64).cos().partial_cmp(&(*b as f64).cos()).unwrap() + }; + + let mut v = gen_random(len); + v.sort_by(cmp); + + black_box(count); }); - b.bytes = (v.len() * mem::size_of_val(&v[0])) as u64; + b.bytes = len as u64 * mem::size_of::<u64>() as u64; } } diff --git a/src/libcollectionstest/str.rs b/src/libcollectionstest/str.rs index 14a0819d381..384579ce6b8 100644 --- a/src/libcollectionstest/str.rs +++ b/src/libcollectionstest/str.rs @@ -530,7 +530,7 @@ fn from_utf8_mostly_ascii() { #[test] fn test_is_utf16() { - use rustc_unicode::str::is_utf16; + use std_unicode::str::is_utf16; macro_rules! pos { ($($e:expr),*) => { { $(assert!(is_utf16($e));)* } } @@ -1186,7 +1186,7 @@ fn test_rev_split_char_iterator_no_trailing() { #[test] fn test_utf16_code_units() { - use rustc_unicode::str::Utf16Encoder; + use std_unicode::str::Utf16Encoder; assert_eq!(Utf16Encoder::new(vec!['é', '\u{1F4A9}'].into_iter()).collect::<Vec<u16>>(), [0xE9, 0xD83D, 0xDCA9]) } diff --git a/src/libcollectionstest/string.rs b/src/libcollectionstest/string.rs index cb4fcb58452..a7d85d0bea1 100644 --- a/src/libcollectionstest/string.rs +++ b/src/libcollectionstest/string.rs @@ -132,7 +132,7 @@ fn test_from_utf16() { let s_as_utf16 = s.encode_utf16().collect::<Vec<u16>>(); let u_as_string = String::from_utf16(&u).unwrap(); - assert!(::rustc_unicode::str::is_utf16(&u)); + assert!(::std_unicode::str::is_utf16(&u)); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); diff --git a/src/libcore/char.rs b/src/libcore/char.rs index 966481e7b32..7f3ac13bac1 100644 --- a/src/libcore/char.rs +++ b/src/libcore/char.rs @@ -10,7 +10,7 @@ //! Character manipulation. //! -//! For more details, see ::rustc_unicode::char (a.k.a. std::char) +//! For more details, see ::std_unicode::char (a.k.a. std::char) #![allow(non_snake_case)] #![stable(feature = "core_char", since = "1.2.0")] diff --git a/src/libcoretest/lib.rs b/src/libcoretest/lib.rs index b8c01e570f5..92fb01e535c 100644 --- a/src/libcoretest/lib.rs +++ b/src/libcoretest/lib.rs @@ -40,7 +40,7 @@ extern crate core; extern crate test; extern crate libc; -extern crate rustc_unicode; +extern crate std_unicode; extern crate rand; mod any; diff --git a/src/librustc/infer/glb.rs b/src/librustc/infer/glb.rs index a5709e18808..8ccadc6b2af 100644 --- a/src/librustc/infer/glb.rs +++ b/src/librustc/infer/glb.rs @@ -13,6 +13,7 @@ use super::InferCtxt; use super::lattice::{self, LatticeDir}; use super::Subtype; +use traits::ObligationCause; use ty::{self, Ty, TyCtxt}; use ty::relate::{Relate, RelateResult, TypeRelation}; @@ -83,6 +84,10 @@ impl<'combine, 'infcx, 'gcx, 'tcx> LatticeDir<'infcx, 'gcx, 'tcx> self.fields.infcx } + fn cause(&self) -> &ObligationCause<'tcx> { + &self.fields.trace.cause + } + fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> { let mut sub = self.fields.sub(self.a_is_expected); sub.relate(&v, &a)?; diff --git a/src/librustc/infer/lattice.rs b/src/librustc/infer/lattice.rs index eda78428e61..f7b26a918b3 100644 --- a/src/librustc/infer/lattice.rs +++ b/src/librustc/infer/lattice.rs @@ -30,7 +30,9 @@ //! a lattice. use super::InferCtxt; +use super::type_variable::TypeVariableOrigin; +use traits::ObligationCause; use ty::TyVar; use ty::{self, Ty}; use ty::relate::{RelateResult, TypeRelation}; @@ -38,6 +40,8 @@ use ty::relate::{RelateResult, TypeRelation}; pub trait LatticeDir<'f, 'gcx: 'f+'tcx, 'tcx: 'f> : TypeRelation<'f, 'gcx, 'tcx> { fn infcx(&self) -> &'f InferCtxt<'f, 'gcx, 'tcx>; + fn cause(&self) -> &ObligationCause<'tcx>; + // Relates the type `v` to `a` and `b` such that `v` represents // the LUB/GLB of `a` and `b` as appropriate. fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>; @@ -64,14 +68,15 @@ pub fn super_lattice_tys<'a, 'gcx, 'tcx, L>(this: &mut L, match (&a.sty, &b.sty) { (&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..))) if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => { - let v = infcx.next_diverging_ty_var(); + let v = infcx.next_diverging_ty_var( + TypeVariableOrigin::LatticeVariable(this.cause().span)); this.relate_bound(v, a, b)?; Ok(v) } (&ty::TyInfer(TyVar(..)), _) | (_, &ty::TyInfer(TyVar(..))) => { - let v = infcx.next_ty_var(); + let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span)); this.relate_bound(v, a, b)?; Ok(v) } diff --git a/src/librustc/infer/lub.rs b/src/librustc/infer/lub.rs index 7d352be67d3..89571dea10c 100644 --- a/src/librustc/infer/lub.rs +++ b/src/librustc/infer/lub.rs @@ -13,6 +13,7 @@ use super::InferCtxt; use super::lattice::{self, LatticeDir}; use super::Subtype; +use traits::ObligationCause; use ty::{self, Ty, TyCtxt}; use ty::relate::{Relate, RelateResult, TypeRelation}; @@ -83,6 +84,10 @@ impl<'combine, 'infcx, 'gcx, 'tcx> LatticeDir<'infcx, 'gcx, 'tcx> self.fields.infcx } + fn cause(&self) -> &ObligationCause<'tcx> { + &self.fields.trace.cause + } + fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> { let mut sub = self.fields.sub(self.a_is_expected); sub.relate(&a, &v)?; diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 72ef987aefd..9b58334e658 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -45,6 +45,7 @@ use util::nodemap::{FxHashMap, FxHashSet, NodeMap}; use self::combine::CombineFields; use self::higher_ranked::HrMatchResult; use self::region_inference::{RegionVarBindings, RegionSnapshot}; +use self::type_variable::TypeVariableOrigin; use self::unify_key::ToType; mod bivariate; @@ -114,7 +115,7 @@ pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // We instantiate UnificationTable with bounds<Ty> because the // types that might instantiate a general type variable have an // order, represented by its upper and lower bounds. - type_variables: RefCell<type_variable::TypeVariableTable<'tcx>>, + pub type_variables: RefCell<type_variable::TypeVariableTable<'tcx>>, // Map from integral variable to the kind of integer it represents int_unification_table: RefCell<UnificationTable<ty::IntVid>>, @@ -1054,18 +1055,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }) } - pub fn next_ty_var_id(&self, diverging: bool) -> TyVid { + pub fn next_ty_var_id(&self, diverging: bool, origin: TypeVariableOrigin) -> TyVid { self.type_variables .borrow_mut() - .new_var(diverging, None) + .new_var(diverging, origin, None) } - pub fn next_ty_var(&self) -> Ty<'tcx> { - self.tcx.mk_var(self.next_ty_var_id(false)) + pub fn next_ty_var(&self, origin: TypeVariableOrigin) -> Ty<'tcx> { + self.tcx.mk_var(self.next_ty_var_id(false, origin)) } - pub fn next_diverging_ty_var(&self) -> Ty<'tcx> { - self.tcx.mk_var(self.next_ty_var_id(true)) + pub fn next_diverging_ty_var(&self, origin: TypeVariableOrigin) -> Ty<'tcx> { + self.tcx.mk_var(self.next_ty_var_id(true, origin)) } pub fn next_int_var_id(&self) -> IntVid { @@ -1118,7 +1119,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let ty_var_id = self.type_variables .borrow_mut() - .new_var(false, default); + .new_var(false, + TypeVariableOrigin::TypeParameterDefinition(span, def.name), + default); self.tcx.mk_var(ty_var_id) } diff --git a/src/librustc/infer/type_variable.rs b/src/librustc/infer/type_variable.rs index 804765ec881..9c8419d9546 100644 --- a/src/librustc/infer/type_variable.rs +++ b/src/librustc/infer/type_variable.rs @@ -13,6 +13,7 @@ use self::TypeVariableValue::*; use self::UndoEntry::*; use hir::def_id::{DefId}; use syntax::util::small_vector::SmallVector; +use syntax::ast; use syntax_pos::Span; use ty::{self, Ty}; @@ -28,8 +29,24 @@ pub struct TypeVariableTable<'tcx> { eq_relations: ut::UnificationTable<ty::TyVid>, } +/// Reasons to create a type inference variable +pub enum TypeVariableOrigin { + MiscVariable(Span), + NormalizeProjectionType(Span), + TypeInference(Span), + TypeParameterDefinition(Span, ast::Name), + TransformedUpvar(Span), + SubstitutionPlaceholder(Span), + AutoDeref(Span), + AdjustmentType(Span), + DivergingStmt(Span), + DivergingBlockExpr(Span), + LatticeVariable(Span), +} + struct TypeVariableData<'tcx> { value: TypeVariableValue<'tcx>, + origin: TypeVariableOrigin, diverging: bool } @@ -107,6 +124,10 @@ impl<'tcx> TypeVariableTable<'tcx> { self.values.get(vid.index as usize).diverging } + pub fn var_origin(&self, vid: ty::TyVid) -> &TypeVariableOrigin { + &self.values.get(vid.index as usize).origin + } + /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`. /// /// Precondition: neither `a` nor `b` are known. @@ -173,10 +194,12 @@ impl<'tcx> TypeVariableTable<'tcx> { pub fn new_var(&mut self, diverging: bool, - default: Option<Default<'tcx>>) -> ty::TyVid { + origin: TypeVariableOrigin, + default: Option<Default<'tcx>>,) -> ty::TyVid { self.eq_relations.new_key(()); let index = self.values.push(TypeVariableData { value: Bounded { relations: vec![], default: default }, + origin: origin, diverging: diverging }); let v = ty::TyVid { index: index as u32 }; diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 78bbb03f40d..f2be97c8323 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -355,6 +355,11 @@ pub trait CrateStore<'tcx> { fn get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Mir<'tcx>; fn is_item_mir_available(&self, def: DefId) -> bool; + /// Take a look if we need to inline or monomorphize this. If so, we + /// will emit code for this item in the local crate, and thus + /// create a translation item for it. + fn can_have_local_instance<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> bool; + // This is basically a 1-based range of ints, which is a little // silly - I may fix that. fn crates(&self) -> Vec<CrateNum>; @@ -528,6 +533,9 @@ impl<'tcx> CrateStore<'tcx> for DummyCrateStore { fn is_item_mir_available(&self, def: DefId) -> bool { bug!("is_item_mir_available") } + fn can_have_local_instance<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> bool { + bug!("can_have_local_instance") + } // This is basically a 1-based range of ints, which is a little // silly - I may fix that. diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 9798b2d587d..2c4710f1e45 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -323,19 +323,37 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // items of non-exported traits (or maybe all local traits?) unless their respective // trait items are used from inlinable code through method call syntax or UFCS, or their // trait is a lang item. -struct CollectPrivateImplItemsVisitor<'a> { +struct CollectPrivateImplItemsVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &'a privacy::AccessLevels, worklist: &'a mut Vec<ast::NodeId>, } -impl<'a, 'v> ItemLikeVisitor<'v> for CollectPrivateImplItemsVisitor<'a> { +impl<'a, 'tcx: 'a> ItemLikeVisitor<'tcx> for CollectPrivateImplItemsVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { // We need only trait impls here, not inherent impls, and only non-exported ones - if let hir::ItemImpl(.., Some(_), _, ref impl_item_refs) = item.node { + if let hir::ItemImpl(.., Some(ref trait_ref), _, ref impl_item_refs) = item.node { if !self.access_levels.is_reachable(item.id) { for impl_item_ref in impl_item_refs { self.worklist.push(impl_item_ref.id.node_id); } + + let trait_def_id = match trait_ref.path.def { + Def::Trait(def_id) => def_id, + _ => unreachable!() + }; + + if !trait_def_id.is_local() { + return + } + + for default_method in self.tcx.provided_trait_methods(trait_def_id) { + let node_id = self.tcx + .map + .as_local_node_id(default_method.def_id) + .unwrap(); + self.worklist.push(node_id); + } } } } @@ -369,6 +387,7 @@ pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } { let mut collect_private_impl_items = CollectPrivateImplItemsVisitor { + tcx: tcx, access_levels: access_levels, worklist: &mut reachable_context.worklist, }; diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 47f0de3ce57..e500c08ce6e 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -269,7 +269,6 @@ top_level_options!( test: bool [TRACKED], error_format: ErrorOutputType [UNTRACKED], - mir_opt_level: usize [TRACKED], // if Some, enable incremental compilation, using the given // directory to store intermediate results @@ -435,7 +434,6 @@ pub fn basic_options() -> Options { maybe_sysroot: None, target_triple: host_triple().to_string(), test: false, - mir_opt_level: 1, incremental: None, debugging_opts: basic_debugging_options(), prints: Vec::new(), @@ -916,8 +914,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "print layout information for each type encountered"), print_trans_items: Option<String> = (None, parse_opt_string, [UNTRACKED], "print the result of the translation item collection pass"), - mir_opt_level: Option<usize> = (None, parse_opt_uint, [TRACKED], - "set the MIR optimization level (0-3)"), + mir_opt_level: usize = (1, parse_uint, [TRACKED], + "set the MIR optimization level (0-3, default: 1)"), dump_mir: Option<String> = (None, parse_opt_string, [UNTRACKED], "dump MIR state at various points in translation"), dump_mir_dir: Option<String> = (None, parse_opt_string, [UNTRACKED], @@ -928,6 +926,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "print some statistics about AST and HIR"), mir_stats: bool = (false, parse_bool, [UNTRACKED], "print some statistics about MIR"), + always_encode_mir: bool = (false, parse_bool, [TRACKED], + "encode MIR of all functions into the crate metadata"), } pub fn default_lib_output() -> CrateType { @@ -1320,8 +1320,6 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) let debugging_opts = build_debugging_options(matches, error_format); - let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1); - let mut output_types = BTreeMap::new(); if !debugging_opts.parse_only { for list in matches.opt_strs("emit") { @@ -1530,7 +1528,6 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) maybe_sysroot: sysroot_opt, target_triple: target, test: test, - mir_opt_level: mir_opt_level, incremental: incremental, debugging_opts: debugging_opts, prints: prints, @@ -2473,7 +2470,7 @@ mod tests { assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); - opts.debugging_opts.mir_opt_level = Some(1); + opts.debugging_opts.mir_opt_level = 3; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); } } diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index 76a5e2764f2..0c0d0c010e2 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -27,6 +27,7 @@ use super::{ use fmt_macros::{Parser, Piece, Position}; use hir::def_id::DefId; use infer::{self, InferCtxt}; +use infer::type_variable::TypeVariableOrigin; use rustc::lint::builtin::EXTRA_REQUIREMENT_IN_IMPL; use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use ty::error::ExpectedFound; @@ -38,7 +39,7 @@ use util::nodemap::{FxHashMap, FxHashSet}; use std::cmp; use std::fmt; use syntax::ast; -use syntax_pos::Span; +use syntax_pos::{DUMMY_SP, Span}; use errors::DiagnosticBuilder; #[derive(Debug, PartialEq, Eq, Hash)] @@ -790,9 +791,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.infcx.tcx } fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { - if let ty::TyParam(..) = ty.sty { + if let ty::TyParam(ty::ParamTy {name, ..}) = ty.sty { let infcx = self.infcx; - self.var_map.entry(ty).or_insert_with(|| infcx.next_ty_var()) + self.var_map.entry(ty).or_insert_with(|| + infcx.next_ty_var( + TypeVariableOrigin::TypeParameterDefinition(DUMMY_SP, name))) } else { ty.super_fold_with(self) } @@ -824,12 +827,26 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn need_type_info(&self, span: Span, ty: Ty<'tcx>) { + let ty = self.resolve_type_vars_if_possible(&ty); + let name = if let ty::TyInfer(ty::TyVar(ty_vid)) = ty.sty { + let ty_vars = self.type_variables.borrow(); + if let TypeVariableOrigin::TypeParameterDefinition(_, name) = + *ty_vars.var_origin(ty_vid) + { + name.to_string() + } else { + ty.to_string() + } + } else { + ty.to_string() + }; + let mut err = struct_span_err!(self.tcx.sess, span, E0282, "unable to infer enough type information about `{}`", - ty); + name); err.note("type annotations or generic parameter binding required"); - err.span_label(span, &format!("cannot infer type for `{}`", ty)); - err.emit() + err.span_label(span, &format!("cannot infer type for `{}`", name)); + err.emit(); } fn note_obligation_cause<T>(&self, diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index 173f2a0299d..6f645b5f94d 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -25,6 +25,7 @@ use super::util; use hir::def_id::DefId; use infer::InferOk; +use infer::type_variable::TypeVariableOrigin; use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap}; use syntax::ast; use syntax::symbol::Symbol; @@ -382,7 +383,12 @@ pub fn normalize_projection_type<'a, 'b, 'gcx, 'tcx>( // and a deferred predicate to resolve this when more type // information is available. - let ty_var = selcx.infcx().next_ty_var(); + let tcx = selcx.infcx().tcx; + let def_id = tcx.associated_items(projection_ty.trait_ref.def_id).find(|i| + i.name == projection_ty.item_name && i.kind == ty::AssociatedKind::Type + ).map(|i| i.def_id).unwrap(); + let ty_var = selcx.infcx().next_ty_var( + TypeVariableOrigin::NormalizeProjectionType(tcx.def_span(def_id))); let projection = ty::Binder(ty::ProjectionPredicate { projection_ty: projection_ty, ty: ty_var @@ -596,7 +602,12 @@ fn normalize_to_error<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tc let trait_obligation = Obligation { cause: cause, recursion_depth: depth, predicate: trait_ref.to_predicate() }; - let new_value = selcx.infcx().next_ty_var(); + let tcx = selcx.infcx().tcx; + let def_id = tcx.associated_items(projection_ty.trait_ref.def_id).find(|i| + i.name == projection_ty.item_name && i.kind == ty::AssociatedKind::Type + ).map(|i| i.def_id).unwrap(); + let new_value = selcx.infcx().next_ty_var( + TypeVariableOrigin::NormalizeProjectionType(tcx.def_span(def_id))); Normalized { value: new_value, obligations: vec![trait_obligation] diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs index 870494363c8..59e3d398b2f 100644 --- a/src/librustc/traits/specialize/mod.rs +++ b/src/librustc/traits/specialize/mod.rs @@ -127,6 +127,7 @@ pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let substs = substs.rebase_onto(tcx, trait_def_id, impl_data.substs); let substs = translate_substs(&infcx, impl_data.impl_def_id, substs, node_item.node); + let substs = infcx.tcx.erase_regions(&substs); tcx.lift(&substs).unwrap_or_else(|| { bug!("find_method: translate_substs \ returned {:?} which contains inference types/regions", diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs index b7cebe31073..2f8550e5acd 100644 --- a/src/librustc_driver/test.rs +++ b/src/librustc_driver/test.rs @@ -24,6 +24,7 @@ use rustc::ty::subst::{Kind, Subst}; use rustc::traits::{ObligationCause, Reveal}; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::infer::{self, InferOk, InferResult}; +use rustc::infer::type_variable::TypeVariableOrigin; use rustc_metadata::cstore::CStore; use rustc::hir::map as hir_map; use rustc::session::{self, config}; @@ -36,6 +37,7 @@ use errors::emitter::Emitter; use errors::{Level, DiagnosticBuilder}; use syntax::feature_gate::UnstableFeatures; use syntax::symbol::Symbol; +use syntax_pos::DUMMY_SP; use rustc::hir; @@ -489,7 +491,7 @@ fn sub_free_bound_false_infer() { //! does NOT hold for any instantiation of `_#1`. test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - let t_infer1 = env.infcx.next_ty_var(); + let t_infer1 = env.infcx.next_ty_var(TypeVariableOrigin::MiscVariable(DUMMY_SP)); let t_rptr_bound1 = env.t_rptr_late_bound(1); env.check_not_sub(env.t_fn(&[t_infer1], env.tcx().types.isize), env.t_fn(&[t_rptr_bound1], env.tcx().types.isize)); @@ -508,7 +510,7 @@ fn lub_free_bound_infer() { test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { env.create_simple_region_hierarchy(); - let t_infer1 = env.infcx.next_ty_var(); + let t_infer1 = env.infcx.next_ty_var(TypeVariableOrigin::MiscVariable(DUMMY_SP)); let t_rptr_bound1 = env.t_rptr_late_bound(1); let t_rptr_free1 = env.t_rptr_free(1, 1); env.check_lub(env.t_fn(&[t_infer1], env.tcx().types.isize), @@ -628,7 +630,7 @@ fn glb_bound_free() { fn glb_bound_free_infer() { test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { let t_rptr_bound1 = env.t_rptr_late_bound(1); - let t_infer1 = env.infcx.next_ty_var(); + let t_infer1 = env.infcx.next_ty_var(TypeVariableOrigin::MiscVariable(DUMMY_SP)); // compute GLB(fn(_) -> isize, for<'b> fn(&'b isize) -> isize), // which should yield for<'b> fn(&'b isize) -> isize diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index d7c15f550e0..09a0c7f9be4 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -31,7 +31,7 @@ extern crate term; extern crate log; #[macro_use] extern crate libc; -extern crate rustc_unicode; +extern crate std_unicode; extern crate serialize as rustc_serialize; // used by deriving extern crate syntax_pos; diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs index b72766bccea..3cb5244413b 100644 --- a/src/librustc_incremental/lib.rs +++ b/src/librustc_incremental/lib.rs @@ -48,6 +48,7 @@ pub mod ich; pub use assert_dep_graph::assert_dep_graph; pub use calculate_svh::compute_incremental_hashes_map; pub use calculate_svh::IncrementalHashesMap; +pub use calculate_svh::hasher::IchHasher; pub use persist::load_dep_graph; pub use persist::save_dep_graph; pub use persist::save_trans_partition; diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index 2882efb75b0..1a1bb1432ee 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -527,6 +527,11 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { self.get_crate_data(def.krate).is_item_mir_available(def.index) } + fn can_have_local_instance<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> bool { + self.dep_graph.read(DepNode::MetaData(def)); + def.is_local() || self.get_crate_data(def.krate).can_have_local_instance(tcx, def.index) + } + fn crates(&self) -> Vec<CrateNum> { let mut result = vec![]; diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 2a6063cc4bd..54c195b1881 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -504,6 +504,14 @@ impl<'tcx> EntryKind<'tcx> { EntryKind::Closure(_) => return None, }) } + fn is_const_fn(&self, meta: &CrateMetadata) -> bool { + let constness = match *self { + EntryKind::Method(data) => data.decode(meta).fn_data.constness, + EntryKind::Fn(data) => data.decode(meta).constness, + _ => hir::Constness::NotConst, + }; + constness == hir::Constness::Const + } } impl<'a, 'tcx> CrateMetadata { @@ -839,6 +847,29 @@ impl<'a, 'tcx> CrateMetadata { self.maybe_entry(id).and_then(|item| item.decode(self).mir).is_some() } + pub fn can_have_local_instance(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: DefIndex) -> bool { + self.maybe_entry(id).map_or(false, |item| { + let item = item.decode(self); + // if we don't have a MIR, then this item was never meant to be locally instantiated + // or we have a bug in the metadata serialization + item.mir.is_some() && ( + // items with generics always can have local instances if monomorphized + item.generics.map_or(false, |generics| { + let generics = generics.decode((self, tcx)); + generics.parent_types != 0 || !generics.types.is_empty() + }) || + match item.kind { + EntryKind::Closure(_) => true, + _ => false, + } || + item.kind.is_const_fn(self) || + attr::requests_inline(&self.get_attributes(&item)) + ) + }) + } + pub fn maybe_get_item_mir(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex) @@ -1051,12 +1082,7 @@ impl<'a, 'tcx> CrateMetadata { } pub fn is_const_fn(&self, id: DefIndex) -> bool { - let constness = match self.entry(id).kind { - EntryKind::Method(data) => data.decode(self).fn_data.constness, - EntryKind::Fn(data) => data.decode(self).constness, - _ => hir::Constness::NotConst, - }; - constness == hir::Constness::Const + self.entry(id).kind.is_const_fn(self) } pub fn is_foreign_item(&self, id: DefIndex) -> bool { diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 01cb0f823e8..443f3fbaa6e 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -577,7 +577,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let types = generics.parent_types as usize + generics.types.len(); let needs_inline = types > 0 || attr::requests_inline(&ast_item.attrs); let is_const_fn = sig.constness == hir::Constness::Const; - (is_const_fn, needs_inline || is_const_fn) + let always_encode_mir = self.tcx.sess.opts.debugging_opts.always_encode_mir; + (is_const_fn, needs_inline || is_const_fn || always_encode_mir) } else { (false, false) }; @@ -838,11 +839,13 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { _ => None, }, mir: match item.node { + hir::ItemStatic(..) | hir::ItemConst(..) => self.encode_mir(def_id), hir::ItemFn(_, _, constness, _, ref generics, _) => { let tps_len = generics.ty_params.len(); let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); - if needs_inline || constness == hir::Constness::Const { + let always_encode_mir = self.tcx.sess.opts.debugging_opts.always_encode_mir; + if needs_inline || constness == hir::Constness::Const || always_encode_mir { self.encode_mir(def_id) } else { None diff --git a/src/librustc_mir/hair/cx/block.rs b/src/librustc_mir/hair/cx/block.rs index cb69de2cb3c..b355c8f2c4c 100644 --- a/src/librustc_mir/hair/cx/block.rs +++ b/src/librustc_mir/hair/cx/block.rs @@ -26,7 +26,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Block { extent: cx.tcx.region_maps.node_extent(self.id), span: self.span, stmts: stmts, - expr: self.expr.to_ref() + expr: self.expr.to_ref(), } } } @@ -34,39 +34,44 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Block { fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, block_id: ast::NodeId, stmts: &'tcx [hir::Stmt]) - -> Vec<StmtRef<'tcx>> -{ + -> Vec<StmtRef<'tcx>> { let mut result = vec![]; for (index, stmt) in stmts.iter().enumerate() { match stmt.node { - hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) => + hir::StmtExpr(ref expr, id) | + hir::StmtSemi(ref expr, id) => { result.push(StmtRef::Mirror(Box::new(Stmt { span: stmt.span, kind: StmtKind::Expr { scope: cx.tcx.region_maps.node_extent(id), - expr: expr.to_ref() + expr: expr.to_ref(), + }, + }))) + } + hir::StmtDecl(ref decl, id) => { + match decl.node { + hir::DeclItem(..) => { + // ignore for purposes of the MIR } - }))), - hir::StmtDecl(ref decl, id) => match decl.node { - hir::DeclItem(..) => { /* ignore for purposes of the MIR */ } - hir::DeclLocal(ref local) => { - let remainder_extent = CodeExtentData::Remainder(BlockRemainder { - block: block_id, - first_statement_index: index as u32, - }); - let remainder_extent = - cx.tcx.region_maps.lookup_code_extent(remainder_extent); + hir::DeclLocal(ref local) => { + let remainder_extent = CodeExtentData::Remainder(BlockRemainder { + block: block_id, + first_statement_index: index as u32, + }); + let remainder_extent = + cx.tcx.region_maps.lookup_code_extent(remainder_extent); - let pattern = Pattern::from_hir(cx.tcx, &local.pat); - result.push(StmtRef::Mirror(Box::new(Stmt { - span: stmt.span, - kind: StmtKind::Let { - remainder_scope: remainder_extent, - init_scope: cx.tcx.region_maps.node_extent(id), - pattern: pattern, - initializer: local.init.to_ref(), - }, - }))); + let pattern = Pattern::from_hir(cx.tcx, &local.pat); + result.push(StmtRef::Mirror(Box::new(Stmt { + span: stmt.span, + kind: StmtKind::Let { + remainder_scope: remainder_extent, + init_scope: cx.tcx.region_maps.node_extent(id), + pattern: pattern, + initializer: local.init.to_ref(), + }, + }))); + } } } } diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index bd4724159b4..d579cdb042f 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -36,7 +36,8 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { let adj = cx.tcx.tables().adjustments.get(&self.id).cloned(); debug!("make_mirror: unadjusted-expr={:?} applying adjustments={:?}", - expr, adj); + expr, + adj); // Now apply adjustments, if any. match adj.map(|adj| (adj.kind, adj.target)) { @@ -78,41 +79,44 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { for i in 0..autoderefs { let i = i as u32; let adjusted_ty = - expr.ty.adjust_for_autoderef( - cx.tcx, - self.id, - self.span, - i, - |mc| cx.tcx.tables().method_map.get(&mc).map(|m| m.ty)); - debug!("make_mirror: autoderef #{}, adjusted_ty={:?}", i, adjusted_ty); + expr.ty.adjust_for_autoderef(cx.tcx, self.id, self.span, i, |mc| { + cx.tcx.tables().method_map.get(&mc).map(|m| m.ty) + }); + debug!("make_mirror: autoderef #{}, adjusted_ty={:?}", + i, + adjusted_ty); let method_key = ty::MethodCall::autoderef(self.id, i); - let meth_ty = - cx.tcx.tables().method_map.get(&method_key).map(|m| m.ty); + let meth_ty = cx.tcx.tables().method_map.get(&method_key).map(|m| m.ty); let kind = if let Some(meth_ty) = meth_ty { debug!("make_mirror: overloaded autoderef (meth_ty={:?})", meth_ty); let ref_ty = cx.tcx.no_late_bound_regions(&meth_ty.fn_ret()); let (region, mutbl) = match ref_ty { - Some(&ty::TyS { - sty: ty::TyRef(region, mt), .. - }) => (region, mt.mutbl), - _ => span_bug!(expr.span, "autoderef returned bad type") + Some(&ty::TyS { sty: ty::TyRef(region, mt), .. }) => (region, mt.mutbl), + _ => span_bug!(expr.span, "autoderef returned bad type"), }; expr = Expr { temp_lifetime: temp_lifetime, - ty: cx.tcx.mk_ref( - region, ty::TypeAndMut { ty: expr.ty, mutbl: mutbl }), + ty: cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: expr.ty, + mutbl: mutbl, + }), span: expr.span, kind: ExprKind::Borrow { region: region, borrow_kind: to_borrow_kind(mutbl), - arg: expr.to_ref() - } + arg: expr.to_ref(), + }, }; - overloaded_lvalue(cx, self, method_key, - PassArgs::ByRef, expr.to_ref(), vec![]) + overloaded_lvalue(cx, + self, + method_key, + PassArgs::ByRef, + expr.to_ref(), + vec![]) } else { debug!("make_mirror: built-in autoderef"); ExprKind::Deref { arg: expr.to_ref() } @@ -148,7 +152,11 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { let region = cx.tcx.mk_region(region); expr = Expr { temp_lifetime: temp_lifetime, - ty: cx.tcx.mk_ref(region, ty::TypeAndMut { ty: expr.ty, mutbl: m }), + ty: cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: expr.ty, + mutbl: m, + }), span: self.span, kind: ExprKind::Borrow { region: region, @@ -240,12 +248,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let sig = match method.ty.sty { ty::TyFnDef(.., fn_ty) => &fn_ty.sig, - _ => span_bug!(expr.span, "type of method is not an fn") + _ => span_bug!(expr.span, "type of method is not an fn"), }; - let sig = cx.tcx.no_late_bound_regions(sig).unwrap_or_else(|| { - span_bug!(expr.span, "method call has late-bound regions") - }); + let sig = cx.tcx + .no_late_bound_regions(sig) + .unwrap_or_else(|| span_bug!(expr.span, "method call has late-bound regions")); assert_eq!(sig.inputs().len(), 2); @@ -253,44 +261,49 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty: sig.inputs()[1], temp_lifetime: temp_lifetime, span: expr.span, - kind: ExprKind::Tuple { - fields: args.iter().map(ToRef::to_ref).collect() - } + kind: ExprKind::Tuple { fields: args.iter().map(ToRef::to_ref).collect() }, }; ExprKind::Call { ty: method.ty, fun: method.to_ref(), - args: vec![fun.to_ref(), tupled_args.to_ref()] + args: vec![fun.to_ref(), tupled_args.to_ref()], } } else { let adt_data = if let hir::ExprPath(hir::QPath::Resolved(_, ref path)) = fun.node { // Tuple-like ADTs are represented as ExprCall. We convert them here. - expr_ty.ty_adt_def().and_then(|adt_def|{ + expr_ty.ty_adt_def().and_then(|adt_def| { match path.def { Def::VariantCtor(variant_id, CtorKind::Fn) => { Some((adt_def, adt_def.variant_index_with_id(variant_id))) - }, - Def::StructCtor(_, CtorKind::Fn) => { - Some((adt_def, 0)) - }, - _ => None + } + Def::StructCtor(_, CtorKind::Fn) => Some((adt_def, 0)), + _ => None, } }) - } else { None }; + } else { + None + }; if let Some((adt_def, index)) = adt_data { - let substs = cx.tcx.tables().node_id_item_substs(fun.id) + let substs = cx.tcx + .tables() + .node_id_item_substs(fun.id) .unwrap_or_else(|| cx.tcx.intern_substs(&[])); - let field_refs = args.iter().enumerate().map(|(idx, e)| FieldExprRef { - name: Field::new(idx), - expr: e.to_ref() - }).collect(); + let field_refs = args.iter() + .enumerate() + .map(|(idx, e)| { + FieldExprRef { + name: Field::new(idx), + expr: e.to_ref(), + } + }) + .collect(); ExprKind::Adt { adt_def: adt_def, substs: substs, variant_index: index, fields: field_refs, - base: None + base: None, } } else { ExprKind::Call { @@ -314,9 +327,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } } - hir::ExprBlock(ref blk) => { - ExprKind::Block { body: &blk } - } + hir::ExprBlock(ref blk) => ExprKind::Block { body: &blk }, hir::ExprAssign(ref lhs, ref rhs) => { ExprKind::Assign { @@ -332,8 +343,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } else { PassArgs::ByRef }; - overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), - pass_args, lhs.to_ref(), vec![rhs]) + overloaded_operator(cx, + expr, + ty::MethodCall::expr(expr.id), + pass_args, + lhs.to_ref(), + vec![rhs]) } else { ExprKind::AssignOp { op: bin_op(op.node), @@ -343,9 +358,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } } - hir::ExprLit(..) => ExprKind::Literal { - literal: cx.const_eval_literal(expr) - }, + hir::ExprLit(..) => ExprKind::Literal { literal: cx.const_eval_literal(expr) }, hir::ExprBinary(op, ref lhs, ref rhs) => { if cx.tcx.tables().is_method_call(expr.id) { @@ -354,8 +367,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } else { PassArgs::ByRef }; - overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), - pass_args, lhs.to_ref(), vec![rhs]) + overloaded_operator(cx, + expr, + ty::MethodCall::expr(expr.id), + pass_args, + lhs.to_ref(), + vec![rhs]) } else { // FIXME overflow match (op.node, cx.constness) { @@ -405,8 +422,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprIndex(ref lhs, ref index) => { if cx.tcx.tables().is_method_call(expr.id) { - overloaded_lvalue(cx, expr, ty::MethodCall::expr(expr.id), - PassArgs::ByValue, lhs.to_ref(), vec![index]) + overloaded_lvalue(cx, + expr, + ty::MethodCall::expr(expr.id), + PassArgs::ByValue, + lhs.to_ref(), + vec![index]) } else { ExprKind::Index { lhs: lhs.to_ref(), @@ -417,8 +438,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprUnary(hir::UnOp::UnDeref, ref arg) => { if cx.tcx.tables().is_method_call(expr.id) { - overloaded_lvalue(cx, expr, ty::MethodCall::expr(expr.id), - PassArgs::ByValue, arg.to_ref(), vec![]) + overloaded_lvalue(cx, + expr, + ty::MethodCall::expr(expr.id), + PassArgs::ByValue, + arg.to_ref(), + vec![]) } else { ExprKind::Deref { arg: arg.to_ref() } } @@ -426,8 +451,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprUnary(hir::UnOp::UnNot, ref arg) => { if cx.tcx.tables().is_method_call(expr.id) { - overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), - PassArgs::ByValue, arg.to_ref(), vec![]) + overloaded_operator(cx, + expr, + ty::MethodCall::expr(expr.id), + PassArgs::ByValue, + arg.to_ref(), + vec![]) } else { ExprKind::Unary { op: UnOp::Not, @@ -438,14 +467,16 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprUnary(hir::UnOp::UnNeg, ref arg) => { if cx.tcx.tables().is_method_call(expr.id) { - overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), - PassArgs::ByValue, arg.to_ref(), vec![]) + overloaded_operator(cx, + expr, + ty::MethodCall::expr(expr.id), + PassArgs::ByValue, + arg.to_ref(), + vec![]) } else { // FIXME runtime-overflow if let hir::ExprLit(_) = arg.node { - ExprKind::Literal { - literal: cx.const_eval_literal(expr), - } + ExprKind::Literal { literal: cx.const_eval_literal(expr) } } else { ExprKind::Unary { op: UnOp::Neg, @@ -457,56 +488,54 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprStruct(ref qpath, ref fields, ref base) => { match expr_ty.sty { - ty::TyAdt(adt, substs) => match adt.adt_kind() { - AdtKind::Struct | AdtKind::Union => { - let field_refs = field_refs(&adt.variants[0], fields); - ExprKind::Adt { - adt_def: adt, - variant_index: 0, - substs: substs, - fields: field_refs, - base: base.as_ref().map(|base| { - FruInfo { - base: base.to_ref(), - field_types: - cx.tcx.tables().fru_field_types[&expr.id].clone() - } - }) + ty::TyAdt(adt, substs) => { + match adt.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + let field_refs = field_refs(&adt.variants[0], fields); + ExprKind::Adt { + adt_def: adt, + variant_index: 0, + substs: substs, + fields: field_refs, + base: base.as_ref().map(|base| { + FruInfo { + base: base.to_ref(), + field_types: cx.tcx.tables().fru_field_types[&expr.id] + .clone(), + } + }), + } } - } - AdtKind::Enum => { - let def = match *qpath { - hir::QPath::Resolved(_, ref path) => path.def, - hir::QPath::TypeRelative(..) => Def::Err - }; - match def { - Def::Variant(variant_id) => { - assert!(base.is_none()); - - let index = adt.variant_index_with_id(variant_id); - let field_refs = field_refs(&adt.variants[index], fields); - ExprKind::Adt { - adt_def: adt, - variant_index: index, - substs: substs, - fields: field_refs, - base: None + AdtKind::Enum => { + let def = match *qpath { + hir::QPath::Resolved(_, ref path) => path.def, + hir::QPath::TypeRelative(..) => Def::Err, + }; + match def { + Def::Variant(variant_id) => { + assert!(base.is_none()); + + let index = adt.variant_index_with_id(variant_id); + let field_refs = field_refs(&adt.variants[index], fields); + ExprKind::Adt { + adt_def: adt, + variant_index: index, + substs: substs, + fields: field_refs, + base: None, + } + } + _ => { + span_bug!(expr.span, "unexpected def: {:?}", def); } - } - _ => { - span_bug!( - expr.span, - "unexpected def: {:?}", - def); } } } - }, + } _ => { - span_bug!( - expr.span, - "unexpected type for struct literal: {:?}", - expr_ty); + span_bug!(expr.span, + "unexpected type for struct literal: {:?}", + expr_ty); } } } @@ -516,9 +545,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let (def_id, substs) = match closure_ty.sty { ty::TyClosure(def_id, substs) => (def_id, substs), _ => { - span_bug!(expr.span, - "closure expr w/o closure type: {:?}", - closure_ty); + span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty); } }; let upvars = cx.tcx.with_freevars(expr.id, |freevars| { @@ -543,69 +570,81 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ExprKind::InlineAsm { asm: asm, outputs: outputs.to_ref(), - inputs: inputs.to_ref() + inputs: inputs.to_ref(), } } // Now comes the rote stuff: - - hir::ExprRepeat(ref v, ref c) => ExprKind::Repeat { - value: v.to_ref(), - count: TypedConstVal { - ty: cx.tcx.tables().expr_ty(c), - span: c.span, - value: match const_eval::eval_const_expr(cx.tcx.global_tcx(), c) { - ConstVal::Integral(ConstInt::Usize(u)) => u, - other => bug!("constant evaluation of repeat count yielded {:?}", other), + hir::ExprRepeat(ref v, ref c) => { + ExprKind::Repeat { + value: v.to_ref(), + count: TypedConstVal { + ty: cx.tcx.tables().expr_ty(c), + span: c.span, + value: match const_eval::eval_const_expr(cx.tcx.global_tcx(), c) { + ConstVal::Integral(ConstInt::Usize(u)) => u, + other => bug!("constant evaluation of repeat count yielded {:?}", other), + }, }, } - }, - hir::ExprRet(ref v) => - ExprKind::Return { value: v.to_ref() }, - hir::ExprBreak(label, ref value) => + } + hir::ExprRet(ref v) => ExprKind::Return { value: v.to_ref() }, + hir::ExprBreak(label, ref value) => { ExprKind::Break { - label: label.map(|label| { - cx.tcx.region_maps.node_extent(label.loop_id) - }), - value: value.to_ref() - }, - hir::ExprAgain(label) => + label: label.map(|label| cx.tcx.region_maps.node_extent(label.loop_id)), + value: value.to_ref(), + } + } + hir::ExprAgain(label) => { ExprKind::Continue { - label: label.map(|label| { - cx.tcx.region_maps.node_extent(label.loop_id) - }) - }, - hir::ExprMatch(ref discr, ref arms, _) => - ExprKind::Match { discriminant: discr.to_ref(), - arms: arms.iter().map(|a| convert_arm(cx, a)).collect() }, - hir::ExprIf(ref cond, ref then, ref otherwise) => - ExprKind::If { condition: cond.to_ref(), - then: block::to_expr_ref(cx, then), - otherwise: otherwise.to_ref() }, - hir::ExprWhile(ref cond, ref body, _) => - ExprKind::Loop { condition: Some(cond.to_ref()), - body: block::to_expr_ref(cx, body) }, - hir::ExprLoop(ref body, _, _) => - ExprKind::Loop { condition: None, - body: block::to_expr_ref(cx, body) }, + label: label.map(|label| cx.tcx.region_maps.node_extent(label.loop_id)), + } + } + hir::ExprMatch(ref discr, ref arms, _) => { + ExprKind::Match { + discriminant: discr.to_ref(), + arms: arms.iter().map(|a| convert_arm(cx, a)).collect(), + } + } + hir::ExprIf(ref cond, ref then, ref otherwise) => { + ExprKind::If { + condition: cond.to_ref(), + then: block::to_expr_ref(cx, then), + otherwise: otherwise.to_ref(), + } + } + hir::ExprWhile(ref cond, ref body, _) => { + ExprKind::Loop { + condition: Some(cond.to_ref()), + body: block::to_expr_ref(cx, body), + } + } + hir::ExprLoop(ref body, _, _) => { + ExprKind::Loop { + condition: None, + body: block::to_expr_ref(cx, body), + } + } hir::ExprField(ref source, name) => { let index = match cx.tcx.tables().expr_ty_adjusted(source).sty { - ty::TyAdt(adt_def, _) => - adt_def.variants[0].index_of_field_named(name.node), - ref ty => - span_bug!(expr.span, "field of non-ADT: {:?}", ty), + ty::TyAdt(adt_def, _) => adt_def.variants[0].index_of_field_named(name.node), + ref ty => span_bug!(expr.span, "field of non-ADT: {:?}", ty), }; - let index = index.unwrap_or_else(|| { - span_bug!( - expr.span, - "no index found for field `{}`", - name.node) - }); - ExprKind::Field { lhs: source.to_ref(), name: Field::new(index) } + let index = + index.unwrap_or_else(|| { + span_bug!(expr.span, "no index found for field `{}`", name.node) + }); + ExprKind::Field { + lhs: source.to_ref(), + name: Field::new(index), + } + } + hir::ExprTupField(ref source, index) => { + ExprKind::Field { + lhs: source.to_ref(), + name: Field::new(index.node as usize), + } } - hir::ExprTupField(ref source, index) => - ExprKind::Field { lhs: source.to_ref(), - name: Field::new(index.node as usize) }, hir::ExprCast(ref source, _) => { // Check to see if this cast is a "coercion cast", where the cast is actually done // using a coercion (or is a no-op). @@ -616,17 +655,15 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ExprKind::Cast { source: source.to_ref() } } } - hir::ExprType(ref source, _) => - return source.make_mirror(cx), - hir::ExprBox(ref value) => + hir::ExprType(ref source, _) => return source.make_mirror(cx), + hir::ExprBox(ref value) => { ExprKind::Box { value: value.to_ref(), - value_extents: cx.tcx.region_maps.node_extent(value.id) - }, - hir::ExprArray(ref fields) => - ExprKind::Vec { fields: fields.to_ref() }, - hir::ExprTup(ref fields) => - ExprKind::Tuple { fields: fields.to_ref() }, + value_extents: cx.tcx.region_maps.node_extent(value.id), + } + } + hir::ExprArray(ref fields) => ExprKind::Vec { fields: fields.to_ref() }, + hir::ExprTup(ref fields) => ExprKind::Tuple { fields: fields.to_ref() }, }; Expr { @@ -663,8 +700,7 @@ fn to_borrow_kind(m: hir::Mutability) -> BorrowKind { } } -fn convert_arm<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, - arm: &'tcx hir::Arm) -> Arm<'tcx> { +fn convert_arm<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, arm: &'tcx hir::Arm) -> Arm<'tcx> { Arm { patterns: arm.pats.iter().map(|p| Pattern::from_hir(cx.tcx, p)).collect(), guard: arm.guard.to_ref(), @@ -676,41 +712,48 @@ fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, expr: &'tcx hir::Expr, def: Def) -> ExprKind<'tcx> { - let substs = cx.tcx.tables().node_id_item_substs(expr.id) + let substs = cx.tcx + .tables() + .node_id_item_substs(expr.id) .unwrap_or_else(|| cx.tcx.intern_substs(&[])); let def_id = match def { // A regular function, constructor function or a constant. - Def::Fn(def_id) | Def::Method(def_id) | + Def::Fn(def_id) | + Def::Method(def_id) | Def::StructCtor(def_id, CtorKind::Fn) | Def::VariantCtor(def_id, CtorKind::Fn) | - Def::Const(def_id) | Def::AssociatedConst(def_id) => def_id, + Def::Const(def_id) | + Def::AssociatedConst(def_id) => def_id, Def::StructCtor(def_id, CtorKind::Const) | Def::VariantCtor(def_id, CtorKind::Const) => { match cx.tcx.tables().node_id_to_type(expr.id).sty { // A unit struct/variant which is used as a value. // We return a completely different ExprKind here to account for this special case. - ty::TyAdt(adt_def, substs) => return ExprKind::Adt { - adt_def: adt_def, - variant_index: adt_def.variant_index_with_id(def_id), - substs: substs, - fields: vec![], - base: None, - }, - ref sty => bug!("unexpected sty: {:?}", sty) + ty::TyAdt(adt_def, substs) => { + return ExprKind::Adt { + adt_def: adt_def, + variant_index: adt_def.variant_index_with_id(def_id), + substs: substs, + fields: vec![], + base: None, + } + } + ref sty => bug!("unexpected sty: {:?}", sty), } } - Def::Static(node_id, _) => return ExprKind::StaticRef { - id: node_id, - }, + Def::Static(node_id, _) => return ExprKind::StaticRef { id: node_id }, Def::Local(..) | Def::Upvar(..) => return convert_var(cx, expr, def), _ => span_bug!(expr.span, "def `{:?}` not yet implemented", def), }; ExprKind::Literal { - literal: Literal::Item { def_id: def_id, substs: substs } + literal: Literal::Item { + def_id: def_id, + substs: substs, + }, } } @@ -723,14 +766,15 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, match def { Def::Local(def_id) => { let node_id = cx.tcx.map.as_local_node_id(def_id).unwrap(); - ExprKind::VarRef { - id: node_id, - } + ExprKind::VarRef { id: node_id } } Def::Upvar(def_id, index, closure_expr_id) => { let id_var = cx.tcx.map.as_local_node_id(def_id).unwrap(); - debug!("convert_var(upvar({:?}, {:?}, {:?}))", id_var, index, closure_expr_id); + debug!("convert_var(upvar({:?}, {:?}, {:?}))", + id_var, + index, + closure_expr_id); let var_ty = cx.tcx.tables().node_id_to_type(id_var); let body_id = match cx.tcx.map.find(closure_expr_id) { @@ -761,41 +805,45 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let self_expr = match cx.tcx.closure_kind(cx.tcx.map.local_def_id(closure_expr_id)) { ty::ClosureKind::Fn => { - let ref_closure_ty = - cx.tcx.mk_ref(region, - ty::TypeAndMut { ty: closure_ty, - mutbl: hir::MutImmutable }); + let ref_closure_ty = cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: closure_ty, + mutbl: hir::MutImmutable, + }); Expr { ty: closure_ty, temp_lifetime: temp_lifetime, span: expr.span, kind: ExprKind::Deref { arg: Expr { - ty: ref_closure_ty, - temp_lifetime: temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef - }.to_ref() - } + ty: ref_closure_ty, + temp_lifetime: temp_lifetime, + span: expr.span, + kind: ExprKind::SelfRef, + } + .to_ref(), + }, } } ty::ClosureKind::FnMut => { - let ref_closure_ty = - cx.tcx.mk_ref(region, - ty::TypeAndMut { ty: closure_ty, - mutbl: hir::MutMutable }); + let ref_closure_ty = cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: closure_ty, + mutbl: hir::MutMutable, + }); Expr { ty: closure_ty, temp_lifetime: temp_lifetime, span: expr.span, kind: ExprKind::Deref { arg: Expr { - ty: ref_closure_ty, - temp_lifetime: temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef - }.to_ref() - } + ty: ref_closure_ty, + temp_lifetime: temp_lifetime, + span: expr.span, + kind: ExprKind::SelfRef, + } + .to_ref(), + }, } } ty::ClosureKind::FnOnce => { @@ -823,10 +871,7 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let upvar_capture = match cx.tcx.tables().upvar_capture(upvar_id) { Some(c) => c, None => { - span_bug!( - expr.span, - "no upvar_capture for {:?}", - upvar_id); + span_bug!(expr.span, "no upvar_capture for {:?}", upvar_id); } }; match upvar_capture { @@ -834,15 +879,16 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty::UpvarCapture::ByRef(borrow) => { ExprKind::Deref { arg: Expr { - temp_lifetime: temp_lifetime, - ty: cx.tcx.mk_ref(borrow.region, - ty::TypeAndMut { - ty: var_ty, - mutbl: borrow.kind.to_mutbl_lossy() - }), - span: expr.span, - kind: field_kind, - }.to_ref() + temp_lifetime: temp_lifetime, + ty: cx.tcx.mk_ref(borrow.region, + ty::TypeAndMut { + ty: var_ty, + mutbl: borrow.kind.to_mutbl_lossy(), + }), + span: expr.span, + kind: field_kind, + } + .to_ref(), } } } @@ -894,30 +940,31 @@ fn overloaded_operator<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // the arguments, unfortunately, do not, so if this is a ByRef // operator, we have to gin up the autorefs (but by value is easy) match pass_args { - PassArgs::ByValue => { - argrefs.extend(args.iter().map(|arg| arg.to_ref())) - } + PassArgs::ByValue => argrefs.extend(args.iter().map(|arg| arg.to_ref())), PassArgs::ByRef => { let region = cx.tcx.node_scope_region(expr.id); let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); - argrefs.extend( - args.iter() - .map(|arg| { - let arg_ty = cx.tcx.tables().expr_ty_adjusted(arg); - let adjusted_ty = - cx.tcx.mk_ref(region, - ty::TypeAndMut { ty: arg_ty, - mutbl: hir::MutImmutable }); - Expr { + argrefs.extend(args.iter() + .map(|arg| { + let arg_ty = cx.tcx.tables().expr_ty_adjusted(arg); + let adjusted_ty = cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: arg_ty, + mutbl: hir::MutImmutable, + }); + Expr { temp_lifetime: temp_lifetime, ty: adjusted_ty, span: expr.span, - kind: ExprKind::Borrow { region: region, - borrow_kind: BorrowKind::Shared, - arg: arg.to_ref() } - }.to_ref() - })) + kind: ExprKind::Borrow { + region: region, + borrow_kind: BorrowKind::Shared, + arg: arg.to_ref(), + }, + } + .to_ref() + })) } } @@ -981,9 +1028,7 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, kind: convert_var(cx, closure_expr, freevar.def), }; match upvar_capture { - ty::UpvarCapture::ByValue => { - captured_var.to_ref() - } + ty::UpvarCapture::ByValue => captured_var.to_ref(), ty::UpvarCapture::ByRef(upvar_borrow) => { let borrow_kind = match upvar_borrow.kind { ty::BorrowKind::ImmBorrow => BorrowKind::Shared, @@ -991,13 +1036,16 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty::BorrowKind::MutBorrow => BorrowKind::Mut, }; Expr { - temp_lifetime: temp_lifetime, - ty: freevar_ty, - span: closure_expr.span, - kind: ExprKind::Borrow { region: upvar_borrow.region, - borrow_kind: borrow_kind, - arg: captured_var.to_ref() } - }.to_ref() + temp_lifetime: temp_lifetime, + ty: freevar_ty, + span: closure_expr.span, + kind: ExprKind::Borrow { + region: upvar_borrow.region, + borrow_kind: borrow_kind, + arg: captured_var.to_ref(), + }, + } + .to_ref() } } } @@ -1005,12 +1053,13 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, /// Converts a list of named fields (i.e. for struct-like struct/enum ADTs) into FieldExprRef. fn field_refs<'tcx>(variant: &'tcx VariantDef, fields: &'tcx [hir::Field]) - -> Vec<FieldExprRef<'tcx>> -{ + -> Vec<FieldExprRef<'tcx>> { fields.iter() - .map(|field| FieldExprRef { - name: Field::new(variant.index_of_field_named(field.name.node).unwrap()), - expr: field.expr.to_ref(), - }) - .collect() + .map(|field| { + FieldExprRef { + name: Field::new(variant.index_of_field_named(field.name.node).unwrap()), + expr: field.expr.to_ref(), + } + }) + .collect() } diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index e7a6b40c830..7d111fccd00 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -8,12 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * This module contains the code to convert from the wacky tcx data - * structures into the hair. The `builder` is generally ignorant of - * the tcx etc, and instead goes through the `Cx` for most of its - * work. - */ +//! This module contains the code to convert from the wacky tcx data +//! structures into the hair. The `builder` is generally ignorant of +//! the tcx etc, and instead goes through the `Cx` for most of its +//! work. +//! use hair::*; use rustc::mir::transform::MirSource; @@ -32,19 +31,17 @@ use rustc::hir; use rustc_const_math::{ConstInt, ConstUsize}; #[derive(Copy, Clone)] -pub struct Cx<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { +pub struct Cx<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, constness: hir::Constness, /// True if this constant/function needs overflow checks. - check_overflow: bool + check_overflow: bool, } impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { - pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - src: MirSource) - -> Cx<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, src: MirSource) -> Cx<'a, 'gcx, 'tcx> { let constness = match src { MirSource::Const(_) | MirSource::Static(..) => hir::Constness::Const, @@ -52,7 +49,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { let fn_like = FnLikeNode::from_node(infcx.tcx.map.get(id)); fn_like.map_or(hir::Constness::NotConst, |f| f.constness()) } - MirSource::Promoted(..) => bug!() + MirSource::Promoted(..) => bug!(), }; let src_node_id = src.item_id(); @@ -70,13 +67,16 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { // Some functions always have overflow checks enabled, // however, they may not get codegen'd, depending on // the settings for the crate they are translated in. - let mut check_overflow = attrs.iter().any(|item| { - item.check_name("rustc_inherit_overflow_checks") - }); + let mut check_overflow = attrs.iter() + .any(|item| item.check_name("rustc_inherit_overflow_checks")); // Respect -Z force-overflow-checks=on and -C debug-assertions. - check_overflow |= infcx.tcx.sess.opts.debugging_opts.force_overflow_checks - .unwrap_or(infcx.tcx.sess.opts.debug_assertions); + check_overflow |= infcx.tcx + .sess + .opts + .debugging_opts + .force_overflow_checks + .unwrap_or(infcx.tcx.sess.opts.debug_assertions); // Constants and const fn's always need overflow checks. check_overflow |= constness == hir::Constness::Const; @@ -85,7 +85,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { tcx: infcx.tcx, infcx: infcx, constness: constness, - check_overflow: check_overflow + check_overflow: check_overflow, } } } @@ -102,7 +102,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { pub fn usize_literal(&mut self, value: u64) -> Literal<'tcx> { match ConstUsize::new(value, self.tcx.sess.target.uint_type) { - Ok(val) => Literal::Value { value: ConstVal::Integral(ConstInt::Usize(val))}, + Ok(val) => Literal::Value { value: ConstVal::Integral(ConstInt::Usize(val)) }, Err(_) => bug!("usize literal out of range for target"), } } @@ -128,9 +128,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } pub fn const_eval_literal(&mut self, e: &hir::Expr) -> Literal<'tcx> { - Literal::Value { - value: const_eval::eval_const_expr(self.tcx.global_tcx(), e) - } + Literal::Value { value: const_eval::eval_const_expr(self.tcx.global_tcx(), e) } } pub fn trait_method(&mut self, @@ -145,10 +143,11 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { if item.kind == ty::AssociatedKind::Method && item.name == method_name { let method_ty = self.tcx.item_type(item.def_id); let method_ty = method_ty.subst(self.tcx, substs); - return (method_ty, Literal::Item { - def_id: item.def_id, - substs: substs, - }); + return (method_ty, + Literal::Item { + def_id: item.def_id, + substs: substs, + }); } } @@ -168,7 +167,8 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { pub fn needs_drop(&mut self, ty: Ty<'tcx>) -> bool { let ty = self.tcx.lift_to_global(&ty).unwrap_or_else(|| { bug!("MIR: Cx::needs_drop({}) got \ - type with inference types/regions", ty); + type with inference types/regions", + ty); }); self.tcx.type_needs_drop_given_env(ty, &self.infcx.parameter_environment) } diff --git a/src/librustc_mir/hair/cx/to_ref.rs b/src/librustc_mir/hair/cx/to_ref.rs index 63dbde47438..6930a959d65 100644 --- a/src/librustc_mir/hair/cx/to_ref.rs +++ b/src/librustc_mir/hair/cx/to_ref.rs @@ -18,7 +18,7 @@ pub trait ToRef { fn to_ref(self) -> Self::Output; } -impl<'a,'tcx:'a> ToRef for &'tcx hir::Expr { +impl<'a, 'tcx: 'a> ToRef for &'tcx hir::Expr { type Output = ExprRef<'tcx>; fn to_ref(self) -> ExprRef<'tcx> { @@ -26,7 +26,7 @@ impl<'a,'tcx:'a> ToRef for &'tcx hir::Expr { } } -impl<'a,'tcx:'a> ToRef for &'tcx P<hir::Expr> { +impl<'a, 'tcx: 'a> ToRef for &'tcx P<hir::Expr> { type Output = ExprRef<'tcx>; fn to_ref(self) -> ExprRef<'tcx> { @@ -34,7 +34,7 @@ impl<'a,'tcx:'a> ToRef for &'tcx P<hir::Expr> { } } -impl<'a,'tcx:'a> ToRef for Expr<'tcx> { +impl<'a, 'tcx: 'a> ToRef for Expr<'tcx> { type Output = ExprRef<'tcx>; fn to_ref(self) -> ExprRef<'tcx> { @@ -42,8 +42,8 @@ impl<'a,'tcx:'a> ToRef for Expr<'tcx> { } } -impl<'a,'tcx:'a,T,U> ToRef for &'tcx Option<T> - where &'tcx T: ToRef<Output=U> +impl<'a, 'tcx: 'a, T, U> ToRef for &'tcx Option<T> + where &'tcx T: ToRef<Output = U> { type Output = Option<U>; @@ -52,8 +52,8 @@ impl<'a,'tcx:'a,T,U> ToRef for &'tcx Option<T> } } -impl<'a,'tcx:'a,T,U> ToRef for &'tcx Vec<T> - where &'tcx T: ToRef<Output=U> +impl<'a, 'tcx: 'a, T, U> ToRef for &'tcx Vec<T> + where &'tcx T: ToRef<Output = U> { type Output = Vec<U>; @@ -62,8 +62,8 @@ impl<'a,'tcx:'a,T,U> ToRef for &'tcx Vec<T> } } -impl<'a,'tcx:'a,T,U> ToRef for &'tcx P<[T]> - where &'tcx T: ToRef<Output=U> +impl<'a, 'tcx: 'a, T, U> ToRef for &'tcx P<[T]> + where &'tcx T: ToRef<Output = U> { type Output = Vec<U>; diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs index 8c8c42a1c76..d16b51adbaf 100644 --- a/src/librustc_mir/transform/copy_prop.rs +++ b/src/librustc_mir/transform/copy_prop.rs @@ -65,11 +65,10 @@ impl<'tcx> MirPass<'tcx> for CopyPropagation { } } - // We only run when the MIR optimization level is at least 1. This avoids messing up debug - // info. - match tcx.sess.opts.debugging_opts.mir_opt_level { - Some(0) | None => return, - _ => {} + // We only run when the MIR optimization level is > 1. + // This avoids a slow pass, and messing up debug info. + if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 { + return; } loop { diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index fcdeae6d6c0..771f05f7bcc 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -23,84 +23,80 @@ impl<'tcx> MirPass<'tcx> for Deaggregator { let node_id = source.item_id(); let node_path = tcx.item_path_str(tcx.map.local_def_id(node_id)); debug!("running on: {:?}", node_path); - // we only run when mir_opt_level > 1 - match tcx.sess.opts.debugging_opts.mir_opt_level { - Some(0) | - Some(1) | - None => { return; }, - _ => {} - }; + // we only run when mir_opt_level > 2 + if tcx.sess.opts.debugging_opts.mir_opt_level <= 2 { + return; + } // Do not trigger on constants. Could be revised in future if let MirSource::Fn(_) = source {} else { return; } // In fact, we might not want to trigger in other cases. // Ex: when we could use SROA. See issue #35259 - let mut curr: usize = 0; for bb in mir.basic_blocks_mut() { - let idx = match get_aggregate_statement_index(curr, &bb.statements) { - Some(idx) => idx, - None => continue, - }; - // do the replacement - debug!("removing statement {:?}", idx); - let src_info = bb.statements[idx].source_info; - let suffix_stmts = bb.statements.split_off(idx+1); - let orig_stmt = bb.statements.pop().unwrap(); - let (lhs, rhs) = match orig_stmt.kind { - StatementKind::Assign(ref lhs, ref rhs) => (lhs, rhs), - _ => span_bug!(src_info.span, "expected assign, not {:?}", orig_stmt), - }; - let (agg_kind, operands) = match rhs { - &Rvalue::Aggregate(ref agg_kind, ref operands) => (agg_kind, operands), - _ => span_bug!(src_info.span, "expected aggregate, not {:?}", rhs), - }; - let (adt_def, variant, substs) = match agg_kind { - &AggregateKind::Adt(adt_def, variant, substs, None) => (adt_def, variant, substs), - _ => span_bug!(src_info.span, "expected struct, not {:?}", rhs), - }; - let n = bb.statements.len(); - bb.statements.reserve(n + operands.len() + suffix_stmts.len()); - for (i, op) in operands.iter().enumerate() { - let ref variant_def = adt_def.variants[variant]; - let ty = variant_def.fields[i].ty(tcx, substs); - let rhs = Rvalue::Use(op.clone()); - - let lhs_cast = if adt_def.variants.len() > 1 { - Lvalue::Projection(Box::new(LvalueProjection { - base: lhs.clone(), - elem: ProjectionElem::Downcast(adt_def, variant), - })) - } else { - lhs.clone() + let mut curr: usize = 0; + while let Some(idx) = get_aggregate_statement_index(curr, &bb.statements) { + // do the replacement + debug!("removing statement {:?}", idx); + let src_info = bb.statements[idx].source_info; + let suffix_stmts = bb.statements.split_off(idx+1); + let orig_stmt = bb.statements.pop().unwrap(); + let (lhs, rhs) = match orig_stmt.kind { + StatementKind::Assign(ref lhs, ref rhs) => (lhs, rhs), + _ => span_bug!(src_info.span, "expected assign, not {:?}", orig_stmt), }; - - let lhs_proj = Lvalue::Projection(Box::new(LvalueProjection { - base: lhs_cast, - elem: ProjectionElem::Field(Field::new(i), ty), - })); - let new_statement = Statement { - source_info: src_info, - kind: StatementKind::Assign(lhs_proj, rhs), + let (agg_kind, operands) = match rhs { + &Rvalue::Aggregate(ref agg_kind, ref operands) => (agg_kind, operands), + _ => span_bug!(src_info.span, "expected aggregate, not {:?}", rhs), }; - debug!("inserting: {:?} @ {:?}", new_statement, idx + i); - bb.statements.push(new_statement); - } + let (adt_def, variant, substs) = match agg_kind { + &AggregateKind::Adt(adt_def, variant, substs, None) + => (adt_def, variant, substs), + _ => span_bug!(src_info.span, "expected struct, not {:?}", rhs), + }; + let n = bb.statements.len(); + bb.statements.reserve(n + operands.len() + suffix_stmts.len()); + for (i, op) in operands.iter().enumerate() { + let ref variant_def = adt_def.variants[variant]; + let ty = variant_def.fields[i].ty(tcx, substs); + let rhs = Rvalue::Use(op.clone()); + + let lhs_cast = if adt_def.variants.len() > 1 { + Lvalue::Projection(Box::new(LvalueProjection { + base: lhs.clone(), + elem: ProjectionElem::Downcast(adt_def, variant), + })) + } else { + lhs.clone() + }; - // if the aggregate was an enum, we need to set the discriminant - if adt_def.variants.len() > 1 { - let set_discriminant = Statement { - kind: StatementKind::SetDiscriminant { - lvalue: lhs.clone(), - variant_index: variant, - }, - source_info: src_info, + let lhs_proj = Lvalue::Projection(Box::new(LvalueProjection { + base: lhs_cast, + elem: ProjectionElem::Field(Field::new(i), ty), + })); + let new_statement = Statement { + source_info: src_info, + kind: StatementKind::Assign(lhs_proj, rhs), + }; + debug!("inserting: {:?} @ {:?}", new_statement, idx + i); + bb.statements.push(new_statement); + } + + // if the aggregate was an enum, we need to set the discriminant + if adt_def.variants.len() > 1 { + let set_discriminant = Statement { + kind: StatementKind::SetDiscriminant { + lvalue: lhs.clone(), + variant_index: variant, + }, + source_info: src_info, + }; + bb.statements.push(set_discriminant); }; - bb.statements.push(set_discriminant); - }; - curr = bb.statements.len(); - bb.statements.extend(suffix_stmts); + curr = bb.statements.len(); + bb.statements.extend(suffix_stmts); + } } } } diff --git a/src/librustc_mir/transform/instcombine.rs b/src/librustc_mir/transform/instcombine.rs index c4a8d34bda0..3f6abb31fe9 100644 --- a/src/librustc_mir/transform/instcombine.rs +++ b/src/librustc_mir/transform/instcombine.rs @@ -38,7 +38,7 @@ impl<'tcx> MirPass<'tcx> for InstCombine { _: MirSource, mir: &mut Mir<'tcx>) { // We only run when optimizing MIR (at any level). - if tcx.sess.opts.debugging_opts.mir_opt_level == Some(0) { + if tcx.sess.opts.debugging_opts.mir_opt_level == 0 { return } diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index ec368c6bc1f..afa78a05a63 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -1326,16 +1326,18 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor<'l> for DumpVisitor<'l, 'tcx, 'll, self.process_macro_use(t.span, t.id); match t.node { ast::TyKind::Path(_, ref path) => { + if self.span.filter_generated(None, t.span) { + return; + } + if let Some(id) = self.lookup_def_id(t.id) { let sub_span = self.span.sub_span_for_type_name(t.span); - if !self.span.filter_generated(sub_span, t.span) { - self.dumper.type_ref(TypeRefData { - span: sub_span.expect("No span found for type ref"), - ref_id: Some(id), - scope: self.cur_scope, - qualname: String::new() - }.lower(self.tcx)); - } + self.dumper.type_ref(TypeRefData { + span: sub_span.expect("No span found for type ref"), + ref_id: Some(id), + scope: self.cur_scope, + qualname: String::new() + }.lower(self.tcx)); } self.write_sub_paths_truncated(path, false); diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs index b5cf8141da2..0c910240b60 100644 --- a/src/librustc_save_analysis/lib.rs +++ b/src/librustc_save_analysis/lib.rs @@ -250,8 +250,8 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { match typ.node { // Common case impl for a struct or something basic. ast::TyKind::Path(None, ref path) => { + filter!(self.span_utils, None, path.span, None); sub_span = self.span_utils.sub_span_for_type_name(path.span); - filter!(self.span_utils, sub_span, path.span, None); type_data = self.lookup_ref_id(typ.id).map(|id| { TypeRefData { span: sub_span.unwrap(), diff --git a/src/librustc_trans/back/symbol_export.rs b/src/librustc_trans/back/symbol_export.rs index f99f543d9b7..eef464eb7f4 100644 --- a/src/librustc_trans/back/symbol_export.rs +++ b/src/librustc_trans/back/symbol_export.rs @@ -51,8 +51,10 @@ impl ExportedSymbols { scx.tcx().map.local_def_id(node_id) }) .map(|def_id| { - (symbol_for_def_id(scx, def_id, symbol_map), - export_level(scx, def_id)) + let name = symbol_for_def_id(scx, def_id, symbol_map); + let export_level = export_level(scx, def_id); + debug!("EXPORTED SYMBOL (local): {} ({:?})", name, export_level); + (name, export_level) }) .collect(); @@ -90,9 +92,10 @@ impl ExportedSymbols { .exported_symbols(cnum) .iter() .map(|&def_id| { - debug!("EXTERN-SYMBOL: {:?}", def_id); let name = Instance::mono(scx, def_id).symbol_name(scx); - (name, export_level(scx, def_id)) + let export_level = export_level(scx, def_id); + debug!("EXPORTED SYMBOL (re-export): {} ({:?})", name, export_level); + (name, export_level) }) .collect(); diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 5b79f040d0f..f70c24c3ccb 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -1610,7 +1610,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let symbol_map = Rc::new(symbol_map); - let previous_work_products = trans_reuse_previous_work_products(tcx, + let previous_work_products = trans_reuse_previous_work_products(&shared_ccx, &codegen_units, &symbol_map); @@ -1630,7 +1630,9 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ModuleTranslation { name: String::from(ccx.codegen_unit().name()), - symbol_name_hash: ccx.codegen_unit().compute_symbol_name_hash(tcx, &symbol_map), + symbol_name_hash: ccx.codegen_unit() + .compute_symbol_name_hash(&shared_ccx, + &symbol_map), source: source, } }) @@ -1962,7 +1964,7 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { /// For each CGU, identify if we can reuse an existing object file (or /// maybe other context). -fn trans_reuse_previous_work_products(tcx: TyCtxt, +fn trans_reuse_previous_work_products(scx: &SharedCrateContext, codegen_units: &[CodegenUnit], symbol_map: &SymbolMap) -> Vec<Option<WorkProduct>> { @@ -1972,16 +1974,16 @@ fn trans_reuse_previous_work_products(tcx: TyCtxt, .map(|cgu| { let id = cgu.work_product_id(); - let hash = cgu.compute_symbol_name_hash(tcx, symbol_map); + let hash = cgu.compute_symbol_name_hash(scx, symbol_map); debug!("trans_reuse_previous_work_products: id={:?} hash={}", id, hash); - if let Some(work_product) = tcx.dep_graph.previous_work_product(&id) { + if let Some(work_product) = scx.dep_graph().previous_work_product(&id) { if work_product.input_hash == hash { debug!("trans_reuse_previous_work_products: reusing {:?}", work_product); return Some(work_product); } else { - if tcx.sess.opts.debugging_opts.incremental_info { + if scx.sess().opts.debugging_opts.incremental_info { println!("incremental: CGU `{}` invalidated because of \ changed partitioning hash.", cgu.name()); diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index 087fe4decbf..3af3ada66b3 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -706,10 +706,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { fn can_have_local_instance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { - // Take a look if we have the definition available. If not, we - // will not emit code for this item in the local crate, and thus - // don't create a translation item for it. - def_id.is_local() || tcx.sess.cstore.is_item_mir_available(def_id) + tcx.sess.cstore.can_have_local_instance(tcx, def_id) } fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 262b8362397..8b98eb57814 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -10,7 +10,8 @@ use llvm; use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef}; -use rustc::dep_graph::{DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct}; +use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, + WorkProduct}; use middle::cstore::LinkMeta; use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; @@ -551,6 +552,10 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { &self.tcx.sess } + pub fn dep_graph<'a>(&'a self) -> &'a DepGraph { + &self.tcx.dep_graph + } + pub fn stats<'a>(&'a self) -> &'a Stats { &self.stats } diff --git a/src/librustc_trans/partitioning.rs b/src/librustc_trans/partitioning.rs index a36960993e4..d93bbec7efa 100644 --- a/src/librustc_trans/partitioning.rs +++ b/src/librustc_trans/partitioning.rs @@ -126,10 +126,10 @@ use rustc::hir::map::DefPathData; use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER; use rustc::ty::TyCtxt; use rustc::ty::item_path::characteristic_def_id_of_type; +use rustc_incremental::IchHasher; use std::cmp::Ordering; -use std::hash::{Hash, Hasher}; +use std::hash::Hash; use std::sync::Arc; -use std::collections::hash_map::DefaultHasher; use symbol_map::SymbolMap; use syntax::ast::NodeId; use syntax::symbol::{Symbol, InternedString}; @@ -188,14 +188,30 @@ impl<'tcx> CodegenUnit<'tcx> { DepNode::WorkProduct(self.work_product_id()) } - pub fn compute_symbol_name_hash(&self, tcx: TyCtxt, symbol_map: &SymbolMap) -> u64 { - let mut state = DefaultHasher::new(); - let all_items = self.items_in_deterministic_order(tcx, symbol_map); + pub fn compute_symbol_name_hash(&self, + scx: &SharedCrateContext, + symbol_map: &SymbolMap) -> u64 { + let mut state = IchHasher::new(); + let exported_symbols = scx.exported_symbols(); + let all_items = self.items_in_deterministic_order(scx.tcx(), symbol_map); for (item, _) in all_items { let symbol_name = symbol_map.get(item).unwrap(); + symbol_name.len().hash(&mut state); symbol_name.hash(&mut state); + let exported = match item { + TransItem::Fn(ref instance) => { + let node_id = scx.tcx().map.as_local_node_id(instance.def); + node_id.map(|node_id| exported_symbols.contains(&node_id)) + .unwrap_or(false) + } + TransItem::Static(node_id) => { + exported_symbols.contains(&node_id) + } + TransItem::DropGlue(..) => false, + }; + exported.hash(&mut state); } - state.finish() + state.finish().to_smaller_hash() } pub fn items_in_deterministic_order(&self, diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 15f383c5787..624201eaab6 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -12,6 +12,7 @@ use rustc::hir::{self, PatKind}; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::pat_util::EnumerateAndAdjustIterator; use rustc::infer; +use rustc::infer::type_variable::TypeVariableOrigin; use rustc::traits::ObligationCauseCode; use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference}; use check::{FnCtxt, Expectation, Diverges}; @@ -162,7 +163,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } let max_len = cmp::max(expected_len, elements.len()); - let element_tys_iter = (0..max_len).map(|_| self.next_ty_var()); + let element_tys_iter = (0..max_len).map(|_| self.next_ty_var( + // FIXME: MiscVariable for now, obtaining the span and name information + // from all tuple elements isn't trivial. + TypeVariableOrigin::TypeInference(pat.span))); let element_tys = tcx.mk_type_list(element_tys_iter); let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys)); self.demand_eqtype(pat.span, expected, pat_ty); @@ -172,7 +176,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pat_ty } PatKind::Box(ref inner) => { - let inner_ty = self.next_ty_var(); + let inner_ty = self.next_ty_var(TypeVariableOrigin::TypeInference(inner.span)); let uniq_ty = tcx.mk_box(inner_ty); if self.check_dereferencable(pat.span, expected, &inner) { @@ -203,7 +207,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (expected, mt.ty) } _ => { - let inner_ty = self.next_ty_var(); + let inner_ty = self.next_ty_var( + TypeVariableOrigin::TypeInference(inner.span)); let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl }; let region = self.next_region_var(infer::PatternRegion(pat.span)); let rptr_ty = tcx.mk_ref(region, mt); @@ -379,7 +384,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // ...but otherwise we want to use any supertype of the // discriminant. This is sort of a workaround, see note (*) in // `check_pat` for some details. - discrim_ty = self.next_ty_var(); + discrim_ty = self.next_ty_var(TypeVariableOrigin::TypeInference(discrim.span)); self.check_expr_has_type(discrim, discrim_ty); }; let discrim_diverges = self.diverges.get(); @@ -407,7 +412,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // of execution reach it, we will panic, so bottom is an appropriate // type in that case) let expected = expected.adjust_for_branches(self); - let mut result_ty = self.next_diverging_ty_var(); + let mut result_ty = self.next_diverging_ty_var( + TypeVariableOrigin::DivergingBlockExpr(expr.span)); let mut all_arms_diverge = Diverges::WarnedAlways; let coerce_first = match expected { // We don't coerce to `()` so that if the match expression is a diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index 142a8b97111..1d81ed7d359 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -13,6 +13,7 @@ use super::{check_fn, Expectation, FnCtxt}; use astconv::AstConv; +use rustc::infer::type_variable::TypeVariableOrigin; use rustc::ty::{self, ToPolyTraitRef, Ty}; use std::cmp; use std::iter; @@ -66,7 +67,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let closure_type = self.tcx.mk_closure(expr_def_id, self.parameter_environment.free_substs.extend_to(self.tcx, expr_def_id, |_, _| span_bug!(expr.span, "closure has region param"), - |_, _| self.infcx.next_ty_var() + |_, _| self.infcx.next_ty_var(TypeVariableOrigin::TransformedUpvar(expr.span)) ) ); diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index e85dac1a44c..478de167317 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -376,7 +376,7 @@ fn compare_predicate_entailment<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, &infcx.parameter_environment.caller_bounds); infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id); } else { - let fcx = FnCtxt::new(&inh, tcx.types.err, impl_m_body_id); + let fcx = FnCtxt::new(&inh, Some(tcx.types.err), impl_m_body_id); fcx.regionck_item(impl_m_body_id, impl_m_span, &[]); } diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index b0787d75c9c..5cb0804b1bc 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -20,6 +20,7 @@ use rustc::infer::InferOk; use rustc::ty::subst::{Subst, Substs}; use rustc::traits::{self, ObligationCause}; use rustc::ty::{self, Ty, ToPolyTraitRef, TraitRef, TypeFoldable}; +use rustc::infer::type_variable::TypeVariableOrigin; use rustc::util::nodemap::FxHashSet; use syntax::ast; use syntax_pos::Span; @@ -1225,7 +1226,9 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let substs = Substs::for_item(self.tcx, impl_def_id, |_, _| self.tcx.mk_region(ty::ReErased), - |_, _| self.next_ty_var()); + |_, _| self.next_ty_var( + TypeVariableOrigin::SubstitutionPlaceholder( + self.tcx.def_span(impl_def_id)))); (impl_ty, substs) } diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 7cfefefc0d9..86bfede87b3 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -28,6 +28,7 @@ use syntax_pos::Span; use rustc::hir::print as pprust; use rustc::hir; +use rustc::infer::type_variable::TypeVariableOrigin; use std::cell; use std::cmp::Ordering; @@ -53,7 +54,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.autoderef(span, ty).any(|(ty, _)| { self.probe(|_| { - let fn_once_substs = tcx.mk_substs_trait(ty, &[self.next_ty_var()]); + let fn_once_substs = tcx.mk_substs_trait(ty, + &[self.next_ty_var(TypeVariableOrigin::MiscVariable(span))]); let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs); let poly_trait_ref = trait_ref.to_poly_trait_ref(); let obligation = diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 1b35081d524..58dff935a16 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -85,8 +85,8 @@ use dep_graph::DepNode; use fmt_macros::{Parser, Piece, Position}; use hir::def::{Def, CtorKind}; use hir::def_id::{DefId, LOCAL_CRATE}; -use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin, - TypeTrace, type_variable}; +use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin, TypeTrace}; +use rustc::infer::type_variable::{self, TypeVariableOrigin}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; use rustc::ty::{ParamTy, ParameterEnvironment}; @@ -117,7 +117,7 @@ use syntax::feature_gate::{GateIssue, emit_feature_err}; use syntax::ptr::P; use syntax::symbol::{Symbol, InternedString, keywords}; use syntax::util::lev_distance::find_best_match_for_name; -use syntax_pos::{self, BytePos, Span}; +use syntax_pos::{self, BytePos, Span, DUMMY_SP}; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir::itemlikevisit::ItemLikeVisitor; @@ -451,7 +451,7 @@ pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // expects the types within the function to be consistent. err_count_on_creation: usize, - ret_ty: Ty<'tcx>, + ret_ty: Option<Ty<'tcx>>, ps: RefCell<UnsafetyState>, @@ -683,11 +683,11 @@ struct GatherLocalsVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> GatherLocalsVisitor<'a, 'gcx, 'tcx> { - fn assign(&mut self, _span: Span, nid: ast::NodeId, ty_opt: Option<Ty<'tcx>>) -> Ty<'tcx> { + fn assign(&mut self, span: Span, nid: ast::NodeId, ty_opt: Option<Ty<'tcx>>) -> Ty<'tcx> { match ty_opt { None => { // infer the variable's type - let var_ty = self.fcx.next_ty_var(); + let var_ty = self.fcx.next_ty_var(TypeVariableOrigin::TypeInference(span)); self.fcx.locals.borrow_mut().insert(nid, var_ty); var_ty } @@ -785,12 +785,14 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, // Create the function context. This is either derived from scratch or, // in the case of function expressions, based on the outer context. - let mut fcx = FnCtxt::new(inherited, fn_sig.output(), body.id); + let mut fcx = FnCtxt::new(inherited, None, body.id); + let ret_ty = fn_sig.output(); *fcx.ps.borrow_mut() = UnsafetyState::function(unsafety, unsafety_id); - fcx.require_type_is_sized(fcx.ret_ty, decl.output.span(), traits::ReturnType); - fcx.ret_ty = fcx.instantiate_anon_types(&fcx.ret_ty); - fn_sig = fcx.tcx.mk_fn_sig(fn_sig.inputs().iter().cloned(), &fcx.ret_ty, fn_sig.variadic); + fcx.require_type_is_sized(ret_ty, decl.output.span(), traits::ReturnType); + fcx.ret_ty = fcx.instantiate_anon_types(&Some(ret_ty)); + fn_sig = fcx.tcx.mk_fn_sig(fn_sig.inputs().iter().cloned(), &fcx.ret_ty.unwrap(), + fn_sig.variadic); { let mut visit = GatherLocalsVisitor { fcx: &fcx, }; @@ -821,7 +823,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, inherited.tables.borrow_mut().liberated_fn_sigs.insert(fn_id, fn_sig); - fcx.check_expr_coercable_to_type(body, fcx.ret_ty); + fcx.check_expr_coercable_to_type(body, fcx.ret_ty.unwrap()); fcx } @@ -1245,7 +1247,7 @@ fn check_const_with_type<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>, expected_type: Ty<'tcx>, id: ast::NodeId) { ccx.inherited(id).enter(|inh| { - let fcx = FnCtxt::new(&inh, expected_type, expr.id); + let fcx = FnCtxt::new(&inh, None, expr.id); fcx.require_type_is_sized(expected_type, expr.span, traits::ConstSized); // Gather locals in statics (because of block expressions). @@ -1442,8 +1444,8 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { Ok(r) } - fn ty_infer(&self, _span: Span) -> Ty<'tcx> { - self.next_ty_var() + fn ty_infer(&self, span: Span) -> Ty<'tcx> { + self.next_ty_var(TypeVariableOrigin::TypeInference(span)) } fn ty_infer_for_def(&self, @@ -1530,7 +1532,7 @@ enum TupleArgumentsFlag { impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn new(inh: &'a Inherited<'a, 'gcx, 'tcx>, - rty: Ty<'tcx>, + rty: Option<Ty<'tcx>>, body_id: ast::NodeId) -> FnCtxt<'a, 'gcx, 'tcx> { FnCtxt { @@ -1749,13 +1751,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(ty_var) = self.anon_types.borrow().get(&def_id) { return ty_var; } - let ty_var = self.next_ty_var(); + let span = self.tcx.def_span(def_id); + let ty_var = self.next_ty_var(TypeVariableOrigin::TypeInference(span)); self.anon_types.borrow_mut().insert(def_id, ty_var); let item_predicates = self.tcx.item_predicates(def_id); let bounds = item_predicates.instantiate(self.tcx, substs); - let span = self.tcx.def_span(def_id); for predicate in bounds.predicates { // Change the predicate to refer to the type variable, // which will be the concrete type, instead of the TyAnon. @@ -2202,7 +2204,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let conflicting_default = self.find_conflicting_default(&unbound_tyvars, &default_map, conflict) .unwrap_or(type_variable::Default { - ty: self.next_ty_var(), + ty: self.next_ty_var( + TypeVariableOrigin::MiscVariable(syntax_pos::DUMMY_SP)), origin_span: syntax_pos::DUMMY_SP, // what do I put here? def_id: self.tcx.map.local_def_id(ast::CRATE_NODE_ID) @@ -2396,7 +2399,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { unsize, index_ty); - let input_ty = self.next_ty_var(); + let input_ty = self.next_ty_var(TypeVariableOrigin::AutoDeref(base_expr.span)); // First, try built-in indexing. match (adjusted_ty.builtin_index(), &index_ty.sty) { @@ -3484,8 +3487,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Add adjustments to !-expressions if ty.is_never() { - if let Some(hir::map::NodeExpr(_)) = self.tcx.map.find(expr.id) { - let adj_ty = self.next_diverging_ty_var(); + if let Some(hir::map::NodeExpr(node_expr)) = self.tcx.map.find(expr.id) { + let adj_ty = self.next_diverging_ty_var( + TypeVariableOrigin::AdjustmentType(node_expr.span)); self.write_adjustment(expr.id, adjustment::Adjustment { kind: adjustment::Adjust::NeverToAny, target: adj_ty @@ -3705,14 +3709,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } hir::ExprAgain(_) => { tcx.types.never } hir::ExprRet(ref expr_opt) => { - if let Some(ref e) = *expr_opt { - self.check_expr_coercable_to_type(&e, self.ret_ty); + if self.ret_ty.is_none() { + struct_span_err!(self.tcx.sess, expr.span, E0572, + "return statement outside of function body").emit(); + } else if let Some(ref e) = *expr_opt { + self.check_expr_coercable_to_type(&e, self.ret_ty.unwrap()); } else { match self.eq_types(false, &self.misc(expr.span), - self.ret_ty, - tcx.mk_nil()) - { + self.ret_ty.unwrap(), + tcx.mk_nil()) { Ok(ok) => self.register_infer_ok_obligations(ok), Err(_) => { struct_span_err!(tcx.sess, expr.span, E0069, @@ -3777,7 +3783,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } hir::ExprLoop(ref body, _, _) => { - let unified = self.next_ty_var(); + let unified = self.next_ty_var(TypeVariableOrigin::TypeInference(body.span)); let coerce_to = expected.only_has_type(self).unwrap_or(unified); let ctxt = LoopCtxt { unified: unified, @@ -3856,7 +3862,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }); - let mut unified = self.next_ty_var(); + let mut unified = self.next_ty_var(TypeVariableOrigin::TypeInference(expr.span)); let coerce_to = uty.unwrap_or(unified); for (i, e) in args.iter().enumerate() { @@ -3901,7 +3907,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (uty, uty) } None => { - let t: Ty = self.next_ty_var(); + let t: Ty = self.next_ty_var(TypeVariableOrigin::MiscVariable(element.span)); let element_ty = self.check_expr_has_type(&element, t); (element_ty, t) } @@ -4150,31 +4156,35 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.diverges.set(Diverges::Maybe); self.has_errors.set(false); - let node_id = match stmt.node { + let (node_id, span) = match stmt.node { hir::StmtDecl(ref decl, id) => { - match decl.node { + let span = match decl.node { hir::DeclLocal(ref l) => { self.check_decl_local(&l); + l.span } - hir::DeclItem(_) => {/* ignore for now */ } - } - id + hir::DeclItem(_) => {/* ignore for now */ + DUMMY_SP + } + }; + (id, span) } hir::StmtExpr(ref expr, id) => { // Check with expected type of () self.check_expr_has_type(&expr, self.tcx.mk_nil()); - id + (id, expr.span) } hir::StmtSemi(ref expr, id) => { self.check_expr(&expr); - id + (id, expr.span) } }; if self.has_errors.get() { self.write_error(node_id); } else if self.diverges.get().always() { - self.write_ty(node_id, self.next_diverging_ty_var()); + self.write_ty(node_id, self.next_diverging_ty_var( + TypeVariableOrigin::DivergingStmt(span))); } else { self.write_nil(node_id); } @@ -4220,7 +4230,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - ty = self.next_diverging_ty_var(); + ty = self.next_diverging_ty_var(TypeVariableOrigin::DivergingBlockExpr(blk.span)); } else if let ExpectHasType(ety) = expected { if let Some(ref e) = blk.expr { // Coerce the tail expression to the right type. diff --git a/src/librustc_typeck/check/op.rs b/src/librustc_typeck/check/op.rs index adb8c6be42b..d1a9b8ef85a 100644 --- a/src/librustc_typeck/check/op.rs +++ b/src/librustc_typeck/check/op.rs @@ -13,6 +13,7 @@ use super::FnCtxt; use hir::def_id::DefId; use rustc::ty::{Ty, TypeFoldable, PreferMutLvalue}; +use rustc::infer::type_variable::TypeVariableOrigin; use syntax::ast; use syntax::symbol::Symbol; use rustc::hir; @@ -179,7 +180,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // using this variable as the expected type, which sometimes lets // us do better coercions than we would be able to do otherwise, // particularly for things like `String + &String`. - let rhs_ty_var = self.next_ty_var(); + let rhs_ty_var = self.next_ty_var(TypeVariableOrigin::MiscVariable(rhs_expr.span)); let return_ty = match self.lookup_op_method(expr, lhs_ty, vec![rhs_ty_var], Symbol::intern(name), trait_def_id, diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 7f35c8efeff..ffdb56753fd 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -51,7 +51,7 @@ impl<'a, 'gcx, 'tcx> CheckWfFcxBuilder<'a, 'gcx, 'tcx> { let id = self.id; let span = self.span; self.inherited.enter(|inh| { - let fcx = FnCtxt::new(&inh, inh.ccx.tcx.types.never, id); + let fcx = FnCtxt::new(&inh, Some(inh.ccx.tcx.types.never), id); let wf_tys = f(&fcx, &mut CheckTypeWellFormedVisitor { ccx: fcx.ccx, code: code diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 01e99a296e8..71507063ffc 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -4164,6 +4164,33 @@ target / ABI combination is currently unsupported by llvm. If necessary, you can circumvent this check using custom target specifications. "##, +E0572: r##" +A return statement was found outside of a function body. + +Erroneous code example: + +```compile_fail,E0572 +const FOO: u32 = return 0; // error: return statement outside of function body + +fn main() {} +``` + +To fix this issue, just remove the return keyword or move the expression into a +function. Example: + +``` +const FOO: u32 = 0; + +fn some_fn() -> u32 { + return FOO; +} + +fn main() { + some_fn(); +} +``` +"##, + } register_diagnostics! { diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 7ee184c089c..15912b41d59 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -52,13 +52,15 @@ font-family: 'Source Code Pro'; font-style: normal; font-weight: 400; - src: local('Source Code Pro'), url("SourceCodePro-Regular.woff") format('woff'); + /* Avoid using locally installed font because bad versions are in circulation: + * see https://github.com/rust-lang/rust/issues/24355 */ + src: url("SourceCodePro-Regular.woff") format('woff'); } @font-face { font-family: 'Source Code Pro'; font-style: normal; font-weight: 600; - src: local('Source Code Pro Semibold'), url("SourceCodePro-Semibold.woff") format('woff'); + src: url("SourceCodePro-Semibold.woff") format('woff'); } * { diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs index afa5d66b113..74c7bc10194 100644 --- a/src/librustdoc/lib.rs +++ b/src/librustdoc/lib.rs @@ -45,7 +45,7 @@ extern crate serialize; #[macro_use] extern crate syntax; extern crate syntax_pos; extern crate test as testing; -extern crate rustc_unicode; +extern crate std_unicode; #[macro_use] extern crate log; extern crate rustc_errors as errors; diff --git a/src/librustdoc/test.rs b/src/librustdoc/test.rs index 009330065f3..b96a737ed00 100644 --- a/src/librustdoc/test.rs +++ b/src/librustdoc/test.rs @@ -345,7 +345,7 @@ pub fn maketest(s: &str, cratename: Option<&str>, dont_insert_main: bool, } fn partition_source(s: &str) -> (String, String) { - use rustc_unicode::str::UnicodeStr; + use std_unicode::str::UnicodeStr; let mut after_header = false; let mut before = String::new(); @@ -537,4 +537,8 @@ impl<'a, 'hir> intravisit::Visitor<'hir> for HirCollector<'a, 'hir> { intravisit::walk_struct_field(this, f); }); } + + fn visit_macro_def(&mut self, macro_def: &'hir hir::MacroDef) { + self.visit_testable(macro_def.name.to_string(), ¯o_def.attrs, |_| ()); + } } diff --git a/src/libserialize/lib.rs b/src/libserialize/lib.rs index d432ed42066..ad2304e1556 100644 --- a/src/libserialize/lib.rs +++ b/src/libserialize/lib.rs @@ -41,7 +41,7 @@ Core encoding and decoding interfaces. #[cfg(test)] extern crate test; #[macro_use] extern crate log; -extern crate rustc_unicode; +extern crate std_unicode; extern crate collections; pub use self::serialize::{Decoder, Encoder, Decodable, Encodable}; diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml index b9f52e20fdd..fcf84cb7169 100644 --- a/src/libstd/Cargo.toml +++ b/src/libstd/Cargo.toml @@ -20,7 +20,7 @@ core = { path = "../libcore" } libc = { path = "../rustc/libc_shim" } rand = { path = "../librand" } compiler_builtins = { path = "../libcompiler_builtins" } -rustc_unicode = { path = "../librustc_unicode" } +std_unicode = { path = "../libstd_unicode" } unwind = { path = "../libunwind" } [build-dependencies] diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index ad9ae5638b6..b3b89213df1 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -256,7 +256,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use cmp; -use rustc_unicode::str as core_str; +use std_unicode::str as core_str; use error as std_error; use fmt; use result; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 1f40d3fd1d3..414f25fa5eb 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -323,7 +323,7 @@ extern crate collections as core_collections; #[allow(deprecated)] extern crate rand as core_rand; extern crate alloc; -extern crate rustc_unicode; +extern crate std_unicode; extern crate libc; // We always need an unwinder currently for backtraces @@ -420,7 +420,7 @@ pub use core_collections::string; #[stable(feature = "rust1", since = "1.0.0")] pub use core_collections::vec; #[stable(feature = "rust1", since = "1.0.0")] -pub use rustc_unicode::char; +pub use std_unicode::char; pub mod f32; pub mod f64; diff --git a/src/libstd/net/udp.rs b/src/libstd/net/udp.rs index b280f466dd4..f8a5ec0b379 100644 --- a/src/libstd/net/udp.rs +++ b/src/libstd/net/udp.rs @@ -499,6 +499,19 @@ impl UdpSocket { /// This will retrieve the stored error in the underlying socket, clearing /// the field in the process. This can be useful for checking errors between /// calls. + /// + /// # Examples + /// + /// ```no_run + /// use std::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address"); + /// match socket.take_error() { + /// Ok(Some(error)) => println!("UdpSocket error: {:?}", error), + /// Ok(None) => println!("No error"), + /// Err(error) => println!("UdpSocket.take_error failed: {:?}", error), + /// } + /// ``` #[stable(feature = "net2_mutators", since = "1.9.0")] pub fn take_error(&self) -> io::Result<Option<io::Error>> { self.0.take_error() @@ -507,6 +520,15 @@ impl UdpSocket { /// Connects this UDP socket to a remote address, allowing the `send` and /// `recv` syscalls to be used to send data and also applies filters to only /// receive data from the specified address. + /// + /// # Examples + /// + /// ```no_run + /// use std::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address"); + /// socket.connect("127.0.0.1:8080").expect("connect function failed"); + /// ``` #[stable(feature = "net2_mutators", since = "1.9.0")] pub fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> { super::each_addr(addr, |addr| self.0.connect(addr)) @@ -514,8 +536,20 @@ impl UdpSocket { /// Sends data on the socket to the remote address to which it is connected. /// - /// The `connect` method will connect this socket to a remote address. This + /// The [`connect()`] method will connect this socket to a remote address. This /// method will fail if the socket is not connected. + /// + /// [`connect()`]: #method.connect + /// + /// # Examples + /// + /// ```no_run + /// use std::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address"); + /// socket.connect("127.0.0.1:8080").expect("connect function failed"); + /// socket.send(&[0, 1, 2]).expect("couldn't send message"); + /// ``` #[stable(feature = "net2_mutators", since = "1.9.0")] pub fn send(&self, buf: &[u8]) -> io::Result<usize> { self.0.send(buf) @@ -526,6 +560,20 @@ impl UdpSocket { /// /// The `connect` method will connect this socket to a remote address. This /// method will fail if the socket is not connected. + /// + /// # Examples + /// + /// ```no_run + /// use std::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address"); + /// socket.connect("127.0.0.1:8080").expect("connect function failed"); + /// let mut buf = [0; 10]; + /// match socket.recv(&mut buf) { + /// Ok(received) => println!("received {} bytes", received), + /// Err(e) => println!("recv function failed: {:?}", e), + /// } + /// ``` #[stable(feature = "net2_mutators", since = "1.9.0")] pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { self.0.recv(buf) @@ -535,6 +583,15 @@ impl UdpSocket { /// /// On Unix this corresponds to calling fcntl, and on Windows this /// corresponds to calling ioctlsocket. + /// + /// # Examples + /// + /// ```no_run + /// use std::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address"); + /// socket.set_nonblocking(true).expect("set_nonblocking call failed"); + /// ``` #[stable(feature = "net2_mutators", since = "1.9.0")] pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { self.0.set_nonblocking(nonblocking) diff --git a/src/libstd/sync/mpsc/mod.rs b/src/libstd/sync/mpsc/mod.rs index ca6e46eb15a..9f51d3e87f3 100644 --- a/src/libstd/sync/mpsc/mod.rs +++ b/src/libstd/sync/mpsc/mod.rs @@ -454,10 +454,16 @@ impl<T> UnsafeFlavor<T> for Receiver<T> { } /// Creates a new asynchronous channel, returning the sender/receiver halves. -/// /// All data sent on the sender will become available on the receiver, and no /// send will block the calling thread (this channel has an "infinite buffer"). /// +/// If the [`Receiver`] is disconnected while trying to [`send()`] with the +/// [`Sender`], the [`send()`] method will return an error. +/// +/// [`send()`]: ../../../std/sync/mpsc/struct.Sender.html#method.send +/// [`Sender`]: ../../../std/sync/mpsc/struct.Sender.html +/// [`Receiver`]: ../../../std/sync/mpsc/struct.Receiver.html +/// /// # Examples /// /// ``` @@ -487,18 +493,23 @@ pub fn channel<T>() -> (Sender<T>, Receiver<T>) { /// Creates a new synchronous, bounded channel. /// -/// Like asynchronous channels, the `Receiver` will block until a message +/// Like asynchronous channels, the [`Receiver`] will block until a message /// becomes available. These channels differ greatly in the semantics of the /// sender from asynchronous channels, however. /// -/// This channel has an internal buffer on which messages will be queued. `bound` -/// specifies the buffer size. When the internal buffer becomes full, future sends -/// will *block* waiting for the buffer to open up. Note that a buffer size of 0 -/// is valid, in which case this becomes "rendezvous channel" where each send will -/// not return until a recv is paired with it. +/// This channel has an internal buffer on which messages will be queued. +/// `bound` specifies the buffer size. When the internal buffer becomes full, +/// future sends will *block* waiting for the buffer to open up. Note that a +/// buffer size of 0 is valid, in which case this becomes "rendezvous channel" +/// where each [`send()`] will not return until a recv is paired with it. +/// +/// Like asynchronous channels, if the [`Receiver`] is disconnected while +/// trying to [`send()`] with the [`SyncSender`], the [`send()`] method will +/// return an error. /// -/// As with asynchronous channels, all senders will panic in `send` if the -/// `Receiver` has been destroyed. +/// [`send()`]: ../../../std/sync/mpsc/struct.SyncSender.html#method.send +/// [`SyncSender`]: ../../../std/sync/mpsc/struct.SyncSender.html +/// [`Receiver`]: ../../../std/sync/mpsc/struct.Receiver.html /// /// # Examples /// diff --git a/src/libstd/sys/unix/args.rs b/src/libstd/sys/unix/args.rs index c04fd863674..0f447ff4ec4 100644 --- a/src/libstd/sys/unix/args.rs +++ b/src/libstd/sys/unix/args.rs @@ -172,10 +172,23 @@ mod imp { extern { fn sel_registerName(name: *const libc::c_uchar) -> Sel; - fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId; fn objc_getClass(class_name: *const libc::c_uchar) -> NsId; } + #[cfg(target_arch="aarch64")] + extern { + fn objc_msgSend(obj: NsId, sel: Sel) -> NsId; + #[link_name="objc_msgSend"] + fn objc_msgSend_ul(obj: NsId, sel: Sel, i: libc::c_ulong) -> NsId; + } + + #[cfg(not(target_arch="aarch64"))] + extern { + fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId; + #[link_name="objc_msgSend"] + fn objc_msgSend_ul(obj: NsId, sel: Sel, ...) -> NsId; + } + #[link(name = "Foundation", kind = "framework")] #[link(name = "objc")] #[cfg(not(cargobuild))] @@ -199,7 +212,7 @@ mod imp { let cnt: usize = mem::transmute(objc_msgSend(args, count_sel)); for i in 0..cnt { - let tmp = objc_msgSend(args, object_at_sel, i); + let tmp = objc_msgSend_ul(args, object_at_sel, i as libc::c_ulong); let utf_c_str: *const libc::c_char = mem::transmute(objc_msgSend(tmp, utf8_sel)); let bytes = CStr::from_ptr(utf_c_str).to_bytes(); diff --git a/src/libstd/sys/unix/os.rs b/src/libstd/sys/unix/os.rs index e591f25cac1..6992a17832e 100644 --- a/src/libstd/sys/unix/os.rs +++ b/src/libstd/sys/unix/os.rs @@ -78,7 +78,7 @@ pub fn errno() -> i32 { static errno: c_int; } - errno as i32 + unsafe { errno as i32 } } /// Gets a detailed string description for the given error number. @@ -193,7 +193,7 @@ impl StdError for JoinPathsError { fn description(&self) -> &str { "failed to join paths" } } -#[cfg(target_os = "freebsd")] +#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] pub fn current_exe() -> io::Result<PathBuf> { unsafe { let mut mib = [libc::CTL_KERN as c_int, @@ -218,11 +218,6 @@ pub fn current_exe() -> io::Result<PathBuf> { } } -#[cfg(target_os = "dragonfly")] -pub fn current_exe() -> io::Result<PathBuf> { - ::fs::read_link("/proc/curproc/file") -} - #[cfg(target_os = "netbsd")] pub fn current_exe() -> io::Result<PathBuf> { ::fs::read_link("/proc/curproc/exe") diff --git a/src/librustc_unicode/Cargo.toml b/src/libstd_unicode/Cargo.toml index e2b4afb2a51..28fbd3c1aa9 100644 --- a/src/librustc_unicode/Cargo.toml +++ b/src/libstd_unicode/Cargo.toml @@ -1,10 +1,10 @@ [package] authors = ["The Rust Project Developers"] -name = "rustc_unicode" +name = "std_unicode" version = "0.0.0" [lib] -name = "rustc_unicode" +name = "std_unicode" path = "lib.rs" test = false bench = false diff --git a/src/librustc_unicode/char.rs b/src/libstd_unicode/char.rs index 94599216db6..94599216db6 100644 --- a/src/librustc_unicode/char.rs +++ b/src/libstd_unicode/char.rs diff --git a/src/librustc_unicode/lib.rs b/src/libstd_unicode/lib.rs index 65bd717e01a..b086658ee0d 100644 --- a/src/librustc_unicode/lib.rs +++ b/src/libstd_unicode/lib.rs @@ -20,7 +20,7 @@ //! provide for basic string-related manipulations. This crate does not //! (yet) aim to provide a full set of Unicode tables. -#![crate_name = "rustc_unicode"] +#![crate_name = "std_unicode"] #![unstable(feature = "unicode", issue = "27783")] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", diff --git a/src/librustc_unicode/tables.rs b/src/libstd_unicode/tables.rs index 21543e2ad07..21543e2ad07 100644 --- a/src/librustc_unicode/tables.rs +++ b/src/libstd_unicode/tables.rs diff --git a/src/librustc_unicode/u_str.rs b/src/libstd_unicode/u_str.rs index 1c7894794c9..1c7894794c9 100644 --- a/src/librustc_unicode/u_str.rs +++ b/src/libstd_unicode/u_str.rs diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index 3e8dfda4a9a..b3b0ee6093d 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -41,7 +41,7 @@ extern crate term; extern crate libc; #[macro_use] extern crate log; #[macro_use] #[no_link] extern crate rustc_bitflags; -extern crate rustc_unicode; +extern crate std_unicode; pub extern crate rustc_errors as errors; extern crate syntax_pos; extern crate rustc_data_structures; diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 681dec0ab56..818742e4492 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -16,7 +16,7 @@ use ext::tt::transcribe::tt_next_token; use parse::token; use str::char_at; use symbol::{Symbol, keywords}; -use rustc_unicode::property::Pattern_White_Space; +use std_unicode::property::Pattern_White_Space; use std::borrow::Cow; use std::char; diff --git a/src/llvm b/src/llvm -Subproject 3ec14daffb4b8c0604df50b7fb0ab552f456e38 +Subproject d7342a9a957470bb62c890cf88fc655ccfb755c diff --git a/src/rustllvm/ArchiveWrapper.cpp b/src/rustllvm/ArchiveWrapper.cpp index 12cd81ec700..c7f426fbfa3 100644 --- a/src/rustllvm/ArchiveWrapper.cpp +++ b/src/rustllvm/ArchiveWrapper.cpp @@ -37,6 +37,8 @@ struct RustArchiveIterator { Archive::child_iterator end; #if LLVM_VERSION_GE(3, 9) Error err; + + RustArchiveIterator() : err(Error::success()) { } #endif }; @@ -163,9 +165,20 @@ LLVMRustArchiveIteratorFree(LLVMRustArchiveIteratorRef rai) { extern "C" const char* LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef child, size_t *size) { +#if LLVM_VERSION_GE(4, 0) + Expected<StringRef> name_or_err = child->getName(); + if (!name_or_err) { + // rustc_llvm currently doesn't use this error string, but it might be useful + // in the future, and in the mean time this tells LLVM that the error was + // not ignored and that it shouldn't abort the process. + LLVMRustSetLastError(toString(name_or_err.takeError()).c_str()); + return NULL; + } +#else ErrorOr<StringRef> name_or_err = child->getName(); if (name_or_err.getError()) return NULL; +#endif StringRef name = name_or_err.get(); *size = name.size(); return name.data(); @@ -174,11 +187,19 @@ LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef child, size_t *size) { extern "C" const char* LLVMRustArchiveChildData(LLVMRustArchiveChildRef child, size_t *size) { StringRef buf; +#if LLVM_VERSION_GE(4, 0) + Expected<StringRef> buf_or_err = child->getBuffer(); + if (!buf_or_err) { + LLVMRustSetLastError(toString(buf_or_err.takeError()).c_str()); + return NULL; + } +#else ErrorOr<StringRef> buf_or_err = child->getBuffer(); if (buf_or_err.getError()) { LLVMRustSetLastError(buf_or_err.getError().message().c_str()); return NULL; } +#endif buf = buf_or_err.get(); *size = buf.size(); return buf.data(); diff --git a/src/rustllvm/PassWrapper.cpp b/src/rustllvm/PassWrapper.cpp index d1eb261abd3..c45d1c2d088 100644 --- a/src/rustllvm/PassWrapper.cpp +++ b/src/rustllvm/PassWrapper.cpp @@ -533,8 +533,11 @@ LLVMRustPrintPasses() { StringRef PassArg = info->getPassArgument(); StringRef PassName = info->getPassName(); if (!PassArg.empty()) { - printf("%15.*s - %.*s\n", PassArg.size(), PassArg.data(), - PassName.size(), PassName.data()); + // These unsigned->signed casts could theoretically overflow, but + // realistically never will (and even if, the result is implementation + // defined rather plain UB). + printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(), + (int)PassName.size(), PassName.data()); } #else if (info->getPassArgument() && *info->getPassArgument()) { diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index 6a95b65d5e9..ae2ab932a61 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -892,19 +892,34 @@ extern "C" void LLVMRustWriteValueToString(LLVMValueRef Value, RustStringRef str extern "C" bool LLVMRustLinkInExternalBitcode(LLVMModuleRef dst, char *bc, size_t len) { Module *Dst = unwrap(dst); + std::unique_ptr<MemoryBuffer> buf = MemoryBuffer::getMemBufferCopy(StringRef(bc, len)); + +#if LLVM_VERSION_GE(4, 0) + Expected<std::unique_ptr<Module>> SrcOrError = + llvm::getLazyBitcodeModule(buf->getMemBufferRef(), Dst->getContext()); + if (!SrcOrError) { + LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str()); + return false; + } + + auto Src = std::move(*SrcOrError); +#else ErrorOr<std::unique_ptr<Module>> Src = llvm::getLazyBitcodeModule(std::move(buf), Dst->getContext()); if (!Src) { LLVMRustSetLastError(Src.getError().message().c_str()); return false; } +#endif std::string Err; raw_string_ostream Stream(Err); DiagnosticPrinterRawOStream DP(Stream); -#if LLVM_VERSION_GE(3, 8) +#if LLVM_VERSION_GE(4, 0) + if (Linker::linkModules(*Dst, std::move(Src))) { +#elif LLVM_VERSION_GE(3, 8) if (Linker::linkModules(*Dst, std::move(Src.get()))) { #else if (Linker::LinkModules(Dst, Src->get(), [&](const DiagnosticInfo &DI) { DI.print(DP); })) { diff --git a/src/rustllvm/llvm-auto-clean-trigger b/src/rustllvm/llvm-auto-clean-trigger index 2d832fcdf2a..88dd04b6172 100644 --- a/src/rustllvm/llvm-auto-clean-trigger +++ b/src/rustllvm/llvm-auto-clean-trigger @@ -1,4 +1,4 @@ # If this file is modified, then llvm will be forcibly cleaned and then rebuilt. # The actual contents of this file do not matter, but to trigger a change on the # build bots then the contents should be changed so git updates the mtime. -2016-12-06 +2016-12-08 diff --git a/src/rustllvm/rustllvm.h b/src/rustllvm/rustllvm.h index 346153d578c..b8c4076f4ce 100644 --- a/src/rustllvm/rustllvm.h +++ b/src/rustllvm/rustllvm.h @@ -39,7 +39,6 @@ #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Vectorize.h" -#include "llvm/Bitcode/ReaderWriter.h" #include "llvm-c/Core.h" #include "llvm-c/BitReader.h" #include "llvm-c/ExecutionEngine.h" @@ -60,6 +59,13 @@ #include "llvm/PassManager.h" #endif +#if LLVM_VERSION_GE(4, 0) +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Bitcode/BitcodeWriter.h" +#else +#include "llvm/Bitcode/ReaderWriter.h" +#endif + #include "llvm/IR/IRPrintingPasses.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DIBuilder.h" diff --git a/src/test/compile-fail/E0572.rs b/src/test/compile-fail/E0572.rs new file mode 100644 index 00000000000..bbaab102de7 --- /dev/null +++ b/src/test/compile-fail/E0572.rs @@ -0,0 +1,13 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +const FOO: u32 = return 0; //~ ERROR E0572 + +fn main() {} diff --git a/src/test/compile-fail/issue-23046.rs b/src/test/compile-fail/issue-23046.rs index dba9c32f9b4..c274665530f 100644 --- a/src/test/compile-fail/issue-23046.rs +++ b/src/test/compile-fail/issue-23046.rs @@ -25,6 +25,6 @@ pub fn let_<'var, VAR, F: for<'v: 'var> Fn(Expr<'v, VAR>) -> Expr<'v, VAR>> fn main() { let ex = |x| { - let_(add(x,x), |y| { //~ ERROR unable to infer enough type information about `_` + let_(add(x,x), |y| { //~ ERROR unable to infer enough type information about `VAR` let_(add(x, x), |x|x)})}; } diff --git a/src/test/compile-fail/issue-5062.rs b/src/test/compile-fail/issue-5062.rs index f5aa4fadbed..cf78d6d8c0a 100644 --- a/src/test/compile-fail/issue-5062.rs +++ b/src/test/compile-fail/issue-5062.rs @@ -9,4 +9,4 @@ // except according to those terms. fn main() { format!("{:?}", None); } - //~^ ERROR unable to infer enough type information about `_` [E0282] + //~^ ERROR unable to infer enough type information about `T` [E0282] diff --git a/src/test/compile-fail/issue-6458-2.rs b/src/test/compile-fail/issue-6458-2.rs index 71f28054579..3816896d43d 100644 --- a/src/test/compile-fail/issue-6458-2.rs +++ b/src/test/compile-fail/issue-6458-2.rs @@ -11,5 +11,5 @@ fn main() { // Unconstrained type: format!("{:?}", None); - //~^ ERROR unable to infer enough type information about `_` [E0282] + //~^ ERROR unable to infer enough type information about `T` [E0282] } diff --git a/src/test/compile-fail/issue-6458-3.rs b/src/test/compile-fail/issue-6458-3.rs index e397805565b..8029522f5d3 100644 --- a/src/test/compile-fail/issue-6458-3.rs +++ b/src/test/compile-fail/issue-6458-3.rs @@ -12,7 +12,7 @@ use std::mem; fn main() { mem::transmute(0); - //~^ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + //~^ ERROR unable to infer enough type information about `U` [E0282] + //~| NOTE cannot infer type for `U` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/compile-fail/issue-6458.rs b/src/test/compile-fail/issue-6458.rs index a64522a0e5b..f8354ddbf12 100644 --- a/src/test/compile-fail/issue-6458.rs +++ b/src/test/compile-fail/issue-6458.rs @@ -17,8 +17,8 @@ pub fn foo<State>(_: TypeWithState<State>) {} pub fn bar() { foo(TypeWithState(marker::PhantomData)); - //~^ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + //~^ ERROR unable to infer enough type information about `State` [E0282] + //~| NOTE cannot infer type for `State` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/compile-fail/issue-7813.rs b/src/test/compile-fail/issue-7813.rs index e3cb1d0c7da..e37a8816423 100644 --- a/src/test/compile-fail/issue-7813.rs +++ b/src/test/compile-fail/issue-7813.rs @@ -10,7 +10,7 @@ fn main() { let v = &[]; - let it = v.iter(); //~ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + let it = v.iter(); //~ ERROR unable to infer enough type information about `T` [E0282] + //~| NOTE cannot infer type for `T` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/compile-fail/method-ambig-one-trait-unknown-int-type.rs b/src/test/compile-fail/method-ambig-one-trait-unknown-int-type.rs index 4f86909765e..1cf41f95a2d 100644 --- a/src/test/compile-fail/method-ambig-one-trait-unknown-int-type.rs +++ b/src/test/compile-fail/method-ambig-one-trait-unknown-int-type.rs @@ -32,7 +32,7 @@ impl foo for Vec<isize> { fn m1() { // we couldn't infer the type of the vector just based on calling foo()... let mut x = Vec::new(); - //~^ ERROR unable to infer enough type information about `_` [E0282] + //~^ ERROR unable to infer enough type information about `T` [E0282] x.foo(); } diff --git a/src/test/compile-fail/traits-multidispatch-convert-ambig-dest.rs b/src/test/compile-fail/traits-multidispatch-convert-ambig-dest.rs index e6545063dbd..ed2ffa995e5 100644 --- a/src/test/compile-fail/traits-multidispatch-convert-ambig-dest.rs +++ b/src/test/compile-fail/traits-multidispatch-convert-ambig-dest.rs @@ -34,8 +34,8 @@ where T : Convert<U> fn a() { test(22, std::default::Default::default()); - //~^ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + //~^ ERROR unable to infer enough type information about `U` [E0282] + //~| NOTE cannot infer type for `U` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/compile-fail/unconstrained-none.rs b/src/test/compile-fail/unconstrained-none.rs index 380cdd266cd..88080bc70ca 100644 --- a/src/test/compile-fail/unconstrained-none.rs +++ b/src/test/compile-fail/unconstrained-none.rs @@ -11,7 +11,7 @@ // Issue #5062 fn main() { - None; //~ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + None; //~ ERROR unable to infer enough type information about `T` [E0282] + //~| NOTE cannot infer type for `T` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/compile-fail/unconstrained-ref.rs b/src/test/compile-fail/unconstrained-ref.rs index ba94bf613d2..12278549215 100644 --- a/src/test/compile-fail/unconstrained-ref.rs +++ b/src/test/compile-fail/unconstrained-ref.rs @@ -13,7 +13,7 @@ struct S<'a, T:'a> { } fn main() { - S { o: &None }; //~ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + S { o: &None }; //~ ERROR unable to infer enough type information about `T` [E0282] + //~| NOTE cannot infer type for `T` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/compile-fail/vector-no-ann.rs b/src/test/compile-fail/vector-no-ann.rs index 25709f35246..d559caf77a1 100644 --- a/src/test/compile-fail/vector-no-ann.rs +++ b/src/test/compile-fail/vector-no-ann.rs @@ -11,7 +11,7 @@ fn main() { let _foo = Vec::new(); - //~^ ERROR unable to infer enough type information about `_` [E0282] - //~| NOTE cannot infer type for `_` + //~^ ERROR unable to infer enough type information about `T` [E0282] + //~| NOTE cannot infer type for `T` //~| NOTE type annotations or generic parameter binding } diff --git a/src/test/incremental/change_symbol_export_status.rs b/src/test/incremental/change_symbol_export_status.rs new file mode 100644 index 00000000000..71f46c641bf --- /dev/null +++ b/src/test/incremental/change_symbol_export_status.rs @@ -0,0 +1,42 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// revisions: rpass1 rpass2 + +#![feature(rustc_attrs)] +#![allow(private_no_mangle_fns)] + +#![rustc_partition_reused(module="change_symbol_export_status", cfg="rpass2")] +#![rustc_partition_translated(module="change_symbol_export_status-mod1", cfg="rpass2")] + + +// This test case makes sure that a change in symbol visibility is detected by +// our dependency tracking. We do this by changing a module's visibility to +// `private` in rpass2, causing the contained function to go from `default` to +// `hidden` visibility. +// The function is marked with #[no_mangle] so it is considered for exporting +// even from an executable. Plain Rust functions are only exported from Rust +// libraries, which our test infrastructure does not support. + +#[cfg(rpass1)] +pub mod mod1 { + #[no_mangle] + pub fn foo() {} +} + +#[cfg(rpass2)] +mod mod1 { + #[no_mangle] + pub fn foo() {} +} + +fn main() { + mod1::foo(); +} diff --git a/src/test/incremental/hashes/closure_expressions.rs b/src/test/incremental/hashes/closure_expressions.rs new file mode 100644 index 00000000000..38fe5cdffeb --- /dev/null +++ b/src/test/incremental/hashes/closure_expressions.rs @@ -0,0 +1,144 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// This test case tests the incremental compilation hash (ICH) implementation +// for closure expression. + +// The general pattern followed here is: Change one thing between rev1 and rev2 +// and make sure that the hash has changed, then change nothing between rev2 and +// rev3 and make sure that the hash has not changed. + +// must-compile-successfully +// revisions: cfail1 cfail2 cfail3 +// compile-flags: -Z query-dep-graph + +#![allow(warnings)] +#![feature(rustc_attrs)] +#![crate_type="rlib"] + + +// Change closure body --------------------------------------------------------- +#[cfg(cfail1)] +fn change_closure_body() { + let _ = || 1u32; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_closure_body() { + let _ = || 3u32; +} + + + +// Add parameter --------------------------------------------------------------- +#[cfg(cfail1)] +fn add_parameter() { + let x = 0u32; + let _ = || x + 1; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn add_parameter() { + let x = 0u32; + let _ = |x: u32| x + 1; +} + + + +// Change parameter pattern ---------------------------------------------------- +#[cfg(cfail1)] +fn change_parameter_pattern() { + let _ = |x: &u32| x; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_parameter_pattern() { + let _ = |&x: &u32| x; +} + + + +// Add `move` to closure ------------------------------------------------------- +#[cfg(cfail1)] +fn add_move() { + let _ = || 1; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn add_move() { + let _ = move || 1; +} + + + +// Add type ascription to parameter -------------------------------------------- +#[cfg(cfail1)] +fn add_type_ascription_to_parameter() { + let closure = |x| x + 1u32; + let _: u32 = closure(1); +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn add_type_ascription_to_parameter() { + let closure = |x: u32| x + 1u32; + let _: u32 = closure(1); +} + + + +// Change parameter type ------------------------------------------------------- +#[cfg(cfail1)] +fn change_parameter_type() { + let closure = |x: u32| (x as u64) + 1; + let _ = closure(1); +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_parameter_type() { + let closure = |x: u16| (x as u64) + 1; + let _ = closure(1); +} diff --git a/src/test/incremental/hashes/enum_constructors.rs b/src/test/incremental/hashes/enum_constructors.rs new file mode 100644 index 00000000000..7f991b30fc4 --- /dev/null +++ b/src/test/incremental/hashes/enum_constructors.rs @@ -0,0 +1,387 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// This test case tests the incremental compilation hash (ICH) implementation +// for struct constructor expressions. + +// The general pattern followed here is: Change one thing between rev1 and rev2 +// and make sure that the hash has changed, then change nothing between rev2 and +// rev3 and make sure that the hash has not changed. + +// must-compile-successfully +// revisions: cfail1 cfail2 cfail3 +// compile-flags: -Z query-dep-graph + +#![allow(warnings)] +#![feature(rustc_attrs)] +#![crate_type="rlib"] + + +enum Enum { + Struct { + x: i32, + y: i64, + z: i16, + }, + Tuple(i32, i64, i16) +} + +// Change field value (struct-like) ----------------------------------------- +#[cfg(cfail1)] +fn change_field_value_struct_like() -> Enum { + Enum::Struct { + x: 0, + y: 1, + z: 2, + } +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_field_value_struct_like() -> Enum { + Enum::Struct { + x: 0, + y: 2, + z: 2, + } +} + + + +// Change field order (struct-like) ----------------------------------------- +#[cfg(cfail1)] +fn change_field_order_struct_like() -> Enum { + Enum::Struct { + x: 3, + y: 4, + z: 5, + } +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_field_order_struct_like() -> Enum { + Enum::Struct { + y: 4, + x: 3, + z: 5, + } +} + + +enum Enum2 { + Struct { + x: i8, + y: i8, + z: i8, + }, + Struct2 { + x: i8, + y: i8, + z: i8, + }, + Tuple(u16, u16, u16), + Tuple2(u64, u64, u64), +} + +// Change constructor path (struct-like) ------------------------------------ +#[cfg(cfail1)] +fn change_constructor_path_struct_like() { + let _ = Enum::Struct { + x: 0, + y: 1, + z: 2, + }; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_constructor_path_struct_like() { + let _ = Enum2::Struct { + x: 0, + y: 1, + z: 2, + }; +} + + + +// Change variant (regular struct) ------------------------------------ +#[cfg(cfail1)] +fn change_constructor_variant_struct_like() { + let _ = Enum2::Struct { + x: 0, + y: 1, + z: 2, + }; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_constructor_variant_struct_like() { + let _ = Enum2::Struct2 { + x: 0, + y: 1, + z: 2, + }; +} + + +// Change constructor path indirectly (struct-like) ------------------------- +mod change_constructor_path_indirectly_struct_like { + #[cfg(cfail1)] + use super::Enum as TheEnum; + #[cfg(not(cfail1))] + use super::Enum2 as TheEnum; + + #[rustc_dirty(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_dirty(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] + fn function() -> TheEnum { + TheEnum::Struct { + x: 0, + y: 1, + z: 2, + } + } +} + + +// Change constructor variant indirectly (struct-like) --------------------------- +mod change_constructor_variant_indirectly_struct_like { + use super::Enum2; + #[cfg(cfail1)] + use super::Enum2::Struct as Variant; + #[cfg(not(cfail1))] + use super::Enum2::Struct2 as Variant; + + #[rustc_clean(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_clean(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] + fn function() -> Enum2 { + Variant { + x: 0, + y: 1, + z: 2, + } + } +} + + +// Change field value (tuple-like) ------------------------------------------- +#[cfg(cfail1)] +fn change_field_value_tuple_like() -> Enum { + Enum::Tuple(0, 1, 2) +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_field_value_tuple_like() -> Enum { + Enum::Tuple(0, 1, 3) +} + + + +// Change constructor path (tuple-like) -------------------------------------- +#[cfg(cfail1)] +fn change_constructor_path_tuple_like() { + let _ = Enum::Tuple(0, 1, 2); +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_constructor_path_tuple_like() { + let _ = Enum2::Tuple(0, 1, 2); +} + + + +// Change constructor variant (tuple-like) -------------------------------------- +#[cfg(cfail1)] +fn change_constructor_variant_tuple_like() { + let _ = Enum2::Tuple(0, 1, 2); +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_constructor_variant_tuple_like() { + let _ = Enum2::Tuple2(0, 1, 2); +} + + +// Change constructor path indirectly (tuple-like) --------------------------- +mod change_constructor_path_indirectly_tuple_like { + #[cfg(cfail1)] + use super::Enum as TheEnum; + #[cfg(not(cfail1))] + use super::Enum2 as TheEnum; + + #[rustc_dirty(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_dirty(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] + fn function() -> TheEnum { + TheEnum::Tuple(0, 1, 2) + } +} + + + +// Change constructor variant indirectly (tuple-like) --------------------------- +mod change_constructor_variant_indirectly_tuple_like { + use super::Enum2; + #[cfg(cfail1)] + use super::Enum2::Tuple as Variant; + #[cfg(not(cfail1))] + use super::Enum2::Tuple2 as Variant; + + #[rustc_clean(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_clean(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] + fn function() -> Enum2 { + Variant(0, 1, 2) + } +} + + +enum Clike { + A, + B, + C +} + +enum Clike2 { + B, + C, + D +} + +// Change constructor path (C-like) -------------------------------------- +#[cfg(cfail1)] +fn change_constructor_path_c_like() { + let _ = Clike::B; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_constructor_path_c_like() { + let _ = Clike2::B; +} + + + +// Change constructor variant (C-like) -------------------------------------- +#[cfg(cfail1)] +fn change_constructor_variant_c_like() { + let _ = Clike::A; +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_constructor_variant_c_like() { + let _ = Clike::C; +} + + +// Change constructor path indirectly (C-like) --------------------------- +mod change_constructor_path_indirectly_c_like { + #[cfg(cfail1)] + use super::Clike as TheEnum; + #[cfg(not(cfail1))] + use super::Clike2 as TheEnum; + + #[rustc_dirty(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_dirty(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] + fn function() -> TheEnum { + TheEnum::B + } +} + + + +// Change constructor variant indirectly (C-like) --------------------------- +mod change_constructor_variant_indirectly_c_like { + use super::Clike; + #[cfg(cfail1)] + use super::Clike::A as Variant; + #[cfg(not(cfail1))] + use super::Clike::B as Variant; + + #[rustc_clean(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_clean(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] + fn function() -> Clike { + Variant + } +} diff --git a/src/test/incremental/hashes/exported_vs_not.rs b/src/test/incremental/hashes/exported_vs_not.rs new file mode 100644 index 00000000000..082badacc6c --- /dev/null +++ b/src/test/incremental/hashes/exported_vs_not.rs @@ -0,0 +1,86 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// must-compile-successfully +// revisions: cfail1 cfail2 cfail3 +// compile-flags: -Z query-dep-graph + +#![allow(warnings)] +#![feature(rustc_attrs)] +#![crate_type="rlib"] + +// Case 1: The function body is not exported to metadata. If the body changes, +// the hash of the HirBody node should change, but not the hash of +// either the Hir or the Metadata node. + +#[cfg(cfail1)] +pub fn body_not_exported_to_metadata() -> u32 { + 1 +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +pub fn body_not_exported_to_metadata() -> u32 { + 2 +} + + + +// Case 2: The function body *is* exported to metadata because the function is +// marked as #[inline]. Only the hash of the Hir depnode should be +// unaffected by a change to the body. + +#[cfg(cfail1)] +#[inline] +pub fn body_exported_to_metadata_because_of_inline() -> u32 { + 1 +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_dirty(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +#[inline] +pub fn body_exported_to_metadata_because_of_inline() -> u32 { + 2 +} + + + +// Case 2: The function body *is* exported to metadata because the function is +// generic. Only the hash of the Hir depnode should be +// unaffected by a change to the body. + +#[cfg(cfail1)] +#[inline] +pub fn body_exported_to_metadata_because_of_generic() -> u32 { + 1 +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_dirty(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +#[inline] +pub fn body_exported_to_metadata_because_of_generic() -> u32 { + 2 +} + diff --git a/src/test/incremental/hashes/indexing_expressions.rs b/src/test/incremental/hashes/indexing_expressions.rs new file mode 100644 index 00000000000..bb31982d93f --- /dev/null +++ b/src/test/incremental/hashes/indexing_expressions.rs @@ -0,0 +1,157 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// This test case tests the incremental compilation hash (ICH) implementation +// for closure expression. + +// The general pattern followed here is: Change one thing between rev1 and rev2 +// and make sure that the hash has changed, then change nothing between rev2 and +// rev3 and make sure that the hash has not changed. + +// must-compile-successfully +// revisions: cfail1 cfail2 cfail3 +// compile-flags: -Z query-dep-graph + +#![allow(warnings)] +#![feature(rustc_attrs)] +#![crate_type="rlib"] +#![feature(inclusive_range_syntax)] + +// Change simple index --------------------------------------------------------- +#[cfg(cfail1)] +fn change_simple_index(slice: &[u32]) -> u32 { + slice[3] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_simple_index(slice: &[u32]) -> u32 { + slice[4] +} + + + +// Change lower bound ---------------------------------------------------------- +#[cfg(cfail1)] +fn change_lower_bound(slice: &[u32]) -> &[u32] { + &slice[3..5] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_lower_bound(slice: &[u32]) -> &[u32] { + &slice[2..5] +} + + + +// Change upper bound ---------------------------------------------------------- +#[cfg(cfail1)] +fn change_upper_bound(slice: &[u32]) -> &[u32] { + &slice[3..5] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_upper_bound(slice: &[u32]) -> &[u32] { + &slice[3..7] +} + + + +// Add lower bound ------------------------------------------------------------- +#[cfg(cfail1)] +fn add_lower_bound(slice: &[u32]) -> &[u32] { + &slice[..4] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn add_lower_bound(slice: &[u32]) -> &[u32] { + &slice[3..4] +} + + + +// Add upper bound ------------------------------------------------------------- +#[cfg(cfail1)] +fn add_upper_bound(slice: &[u32]) -> &[u32] { + &slice[3..] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn add_upper_bound(slice: &[u32]) -> &[u32] { + &slice[3..7] +} + + + +// Change mutability ----------------------------------------------------------- +#[cfg(cfail1)] +fn change_mutability(slice: &mut [u32]) -> u32 { + (&mut slice[3..5])[0] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn change_mutability(slice: &mut [u32]) -> u32 { + (&slice[3..5])[0] +} + + + +// Exclusive to inclusive range ------------------------------------------------ +#[cfg(cfail1)] +fn exclusive_to_inclusive_range(slice: &[u32]) -> &[u32] { + &slice[3..7] +} + +#[cfg(not(cfail1))] +#[rustc_clean(label="Hir", cfg="cfail2")] +#[rustc_clean(label="Hir", cfg="cfail3")] +#[rustc_dirty(label="HirBody", cfg="cfail2")] +#[rustc_clean(label="HirBody", cfg="cfail3")] +#[rustc_metadata_clean(cfg="cfail2")] +#[rustc_metadata_clean(cfg="cfail3")] +fn exclusive_to_inclusive_range(slice: &[u32]) -> &[u32] { + &slice[3...7] +} diff --git a/src/test/incremental/hashes/struct_constructors.rs b/src/test/incremental/hashes/struct_constructors.rs index 6a9f4698bf8..0e23d953baf 100644 --- a/src/test/incremental/hashes/struct_constructors.rs +++ b/src/test/incremental/hashes/struct_constructors.rs @@ -202,6 +202,12 @@ mod change_constructor_path_indirectly_regular_struct { #[cfg(not(cfail1))] use super::RegularStruct2 as Struct; + #[rustc_dirty(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_dirty(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] fn function() -> Struct { Struct { x: 0, @@ -262,6 +268,12 @@ mod change_constructor_path_indirectly_tuple_struct { #[cfg(not(cfail1))] use super::TupleStruct2 as Struct; + #[rustc_dirty(label="Hir", cfg="cfail2")] + #[rustc_clean(label="Hir", cfg="cfail3")] + #[rustc_dirty(label="HirBody", cfg="cfail2")] + #[rustc_clean(label="HirBody", cfg="cfail3")] + #[rustc_metadata_dirty(cfg="cfail2")] + #[rustc_metadata_clean(cfg="cfail3")] fn function() -> Struct { Struct(0, 1, 2) } diff --git a/src/test/mir-opt/deaggregator_test_enum_2.rs b/src/test/mir-opt/deaggregator_test_enum_2.rs new file mode 100644 index 00000000000..02d496b2901 --- /dev/null +++ b/src/test/mir-opt/deaggregator_test_enum_2.rs @@ -0,0 +1,57 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test that deaggregate fires in more than one basic block + +enum Foo { + A(i32), + B(i32), +} + +fn test1(x: bool, y: i32) -> Foo { + if x { + Foo::A(y) + } else { + Foo::B(y) + } +} + +fn main() {} + +// END RUST SOURCE +// START rustc.node12.Deaggregator.before.mir +// bb1: { +// _6 = _4; +// _0 = Foo::A(_6,); +// goto -> bb3; +// } +// +// bb2: { +// _7 = _4; +// _0 = Foo::B(_7,); +// goto -> bb3; +// } +// END rustc.node12.Deaggregator.before.mir +// START rustc.node12.Deaggregator.after.mir +// bb1: { +// _6 = _4; +// ((_0 as A).0: i32) = _6; +// discriminant(_0) = 0; +// goto -> bb3; +// } +// +// bb2: { +// _7 = _4; +// ((_0 as B).0: i32) = _7; +// discriminant(_0) = 1; +// goto -> bb3; +// } +// END rustc.node12.Deaggregator.after.mir +// diff --git a/src/test/mir-opt/deaggregator_test_multiple.rs b/src/test/mir-opt/deaggregator_test_multiple.rs new file mode 100644 index 00000000000..a180a69be55 --- /dev/null +++ b/src/test/mir-opt/deaggregator_test_multiple.rs @@ -0,0 +1,48 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test that deaggregate fires more than once per block + +enum Foo { + A(i32), + B, +} + +fn test(x: i32) -> [Foo; 2] { + [Foo::A(x), Foo::A(x)] +} + +fn main() { } + +// END RUST SOURCE +// START rustc.node10.Deaggregator.before.mir +// bb0: { +// _2 = _1; +// _4 = _2; +// _3 = Foo::A(_4,); +// _6 = _2; +// _5 = Foo::A(_6,); +// _0 = [_3, _5]; +// return; +// } +// END rustc.node10.Deaggregator.before.mir +// START rustc.node10.Deaggregator.after.mir +// bb0: { +// _2 = _1; +// _4 = _2; +// ((_3 as A).0: i32) = _4; +// discriminant(_3) = 0; +// _6 = _2; +// ((_5 as A).0: i32) = _6; +// discriminant(_5) = 0; +// _0 = [_3, _5]; +// return; +// } +// END rustc.node10.Deaggregator.after.mir diff --git a/src/test/run-make/llvm-pass/llvm-function-pass.so.cc b/src/test/run-make/llvm-pass/llvm-function-pass.so.cc index 4470c400760..880c9bce562 100644 --- a/src/test/run-make/llvm-pass/llvm-function-pass.so.cc +++ b/src/test/run-make/llvm-pass/llvm-function-pass.so.cc @@ -28,7 +28,12 @@ namespace { bool runOnFunction(Function &F) override; - const char *getPassName() const override { +#if LLVM_VERSION_MAJOR >= 4 + StringRef +#else + const char * +#endif + getPassName() const override { return "Some LLVM pass"; } diff --git a/src/test/run-make/llvm-pass/llvm-module-pass.so.cc b/src/test/run-make/llvm-pass/llvm-module-pass.so.cc index 510375a5e66..280eca7e8f0 100644 --- a/src/test/run-make/llvm-pass/llvm-module-pass.so.cc +++ b/src/test/run-make/llvm-pass/llvm-module-pass.so.cc @@ -27,7 +27,12 @@ namespace { bool runOnModule(Module &M) override; - const char *getPassName() const override { +#if LLVM_VERSION_MAJOR >= 4 + StringRef +#else + const char * +#endif + getPassName() const override { return "Some LLVM pass"; } diff --git a/src/test/run-pass/auxiliary/issue_38226_aux.rs b/src/test/run-pass/auxiliary/issue_38226_aux.rs new file mode 100644 index 00000000000..d48a9733685 --- /dev/null +++ b/src/test/run-pass/auxiliary/issue_38226_aux.rs @@ -0,0 +1,33 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![crate_type="rlib"] + +#[inline(never)] +pub fn foo<T>() { + let _: Box<SomeTrait> = Box::new(SomeTraitImpl); +} + +pub fn bar() { + SomeTraitImpl.bar(); +} + +mod submod { + pub trait SomeTrait { + fn bar(&self) { + panic!("NO") + } + } +} + +use self::submod::SomeTrait; + +pub struct SomeTraitImpl; +impl SomeTrait for SomeTraitImpl {} diff --git a/src/test/run-pass/issue-38226.rs b/src/test/run-pass/issue-38226.rs new file mode 100644 index 00000000000..33604212af9 --- /dev/null +++ b/src/test/run-pass/issue-38226.rs @@ -0,0 +1,24 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This test makes sure that we don't run into a linker error because of the +// middle::reachable pass missing trait methods with default impls. + +// aux-build:issue_38226_aux.rs + +// Need -Cno-prepopulate-passes to really disable inlining, otherwise the faulty +// code gets optimized out: +// compile-flags: -Cno-prepopulate-passes + +extern crate issue_38226_aux; + +fn main() { + issue_38226_aux::foo::<()>(); +} diff --git a/src/test/run-pass/specialization/specialization-translate-projections-with-lifetimes.rs b/src/test/run-pass/specialization/specialization-translate-projections-with-lifetimes.rs new file mode 100644 index 00000000000..9702f632413 --- /dev/null +++ b/src/test/run-pass/specialization/specialization-translate-projections-with-lifetimes.rs @@ -0,0 +1,41 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(specialization)] + +trait Iterator { + fn next(&self); +} + +trait WithAssoc { + type Item; +} + +impl<'a> WithAssoc for &'a () { + type Item = &'a u32; +} + +struct Cloned<I>(I); + +impl<'a, I, T: 'a> Iterator for Cloned<I> + where I: WithAssoc<Item=&'a T>, T: Clone +{ + fn next(&self) {} +} + +impl<'a, I, T: 'a> Iterator for Cloned<I> + where I: WithAssoc<Item=&'a T>, T: Copy +{ + +} + +fn main() { + Cloned(&()).next(); +} diff --git a/src/test/run-pass/vector-sort-panic-safe.rs b/src/test/run-pass/vector-sort-panic-safe.rs index 911bfc7454c..87f1968918c 100644 --- a/src/test/run-pass/vector-sort-panic-safe.rs +++ b/src/test/run-pass/vector-sort-panic-safe.rs @@ -17,86 +17,111 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::__rand::{thread_rng, Rng}; use std::thread; -const REPEATS: usize = 5; -const MAX_LEN: usize = 32; -static drop_counts: [AtomicUsize; MAX_LEN] = +const MAX_LEN: usize = 80; + +static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [ // FIXME #5244: AtomicUsize is not Copy. - [ - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), - AtomicUsize::new(0), AtomicUsize::new(0), - ]; - -static creation_count: AtomicUsize = AtomicUsize::new(0); + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), +]; #[derive(Clone, PartialEq, PartialOrd, Eq, Ord)] -struct DropCounter { x: u32, creation_id: usize } +struct DropCounter { + x: u32, + id: usize, +} impl Drop for DropCounter { fn drop(&mut self) { - drop_counts[self.creation_id].fetch_add(1, Ordering::Relaxed); + DROP_COUNTS[self.id].fetch_add(1, Ordering::Relaxed); } } -pub fn main() { - // len can't go above 64. - for len in 2..MAX_LEN { - for _ in 0..REPEATS { - // reset the count for these new DropCounters, so their - // IDs start from 0. - creation_count.store(0, Ordering::Relaxed); +fn test(input: &[DropCounter]) { + let len = input.len(); - let mut rng = thread_rng(); - let main = (0..len).map(|_| { - DropCounter { - x: rng.next_u32(), - creation_id: creation_count.fetch_add(1, Ordering::Relaxed), - } - }).collect::<Vec<_>>(); - - // work out the total number of comparisons required to sort - // this array... - let mut count = 0_usize; - main.clone().sort_by(|a, b| { count += 1; a.cmp(b) }); - - // ... and then panic on each and every single one. - for panic_countdown in 0..count { - // refresh the counters. - for c in &drop_counts { - c.store(0, Ordering::Relaxed); - } + // Work out the total number of comparisons required to sort + // this array... + let mut count = 0usize; + input.to_owned().sort_by(|a, b| { count += 1; a.cmp(b) }); - let v = main.clone(); - - let _ = thread::spawn(move|| { - let mut v = v; - let mut panic_countdown = panic_countdown; - v.sort_by(|a, b| { - if panic_countdown == 0 { - panic!() - } - panic_countdown -= 1; - a.cmp(b) - }) - }).join(); - - // check that the number of things dropped is exactly - // what we expect (i.e. the contents of `v`). - for (i, c) in drop_counts.iter().enumerate().take(len) { - let count = c.load(Ordering::Relaxed); - assert!(count == 1, - "found drop count == {} for i == {}, len == {}", - count, i, len); + // ... and then panic on each and every single one. + for panic_countdown in 0..count { + // Refresh the counters. + for i in 0..len { + DROP_COUNTS[i].store(0, Ordering::Relaxed); + } + + let v = input.to_owned(); + let _ = thread::spawn(move || { + let mut v = v; + let mut panic_countdown = panic_countdown; + v.sort_by(|a, b| { + if panic_countdown == 0 { + panic!(); } + panic_countdown -= 1; + a.cmp(b) + }) + }).join(); + + // Check that the number of things dropped is exactly + // what we expect (i.e. the contents of `v`). + for (i, c) in DROP_COUNTS.iter().enumerate().take(len) { + let count = c.load(Ordering::Relaxed); + assert!(count == 1, + "found drop count == {} for i == {}, len == {}", + count, i, len); + } + } +} + +fn main() { + for len in (1..20).chain(70..MAX_LEN) { + // Test on a random array. + let mut rng = thread_rng(); + let input = (0..len).map(|id| { + DropCounter { + x: rng.next_u32(), + id: id, } + }).collect::<Vec<_>>(); + test(&input); + + // Test on a sorted array with two elements randomly swapped, creating several natural + // runs of random lengths. Such arrays have very high chances of hitting all code paths in + // the merge procedure. + for _ in 0..5 { + let mut input = (0..len).map(|i| + DropCounter { + x: i as u32, + id: i, + } + ).collect::<Vec<_>>(); + + let a = rng.gen::<usize>() % len; + let b = rng.gen::<usize>() % len; + input.swap(a, b); + + test(&input); } } } diff --git a/src/test/rustdoc/issue-38219.rs b/src/test/rustdoc/issue-38219.rs new file mode 100644 index 00000000000..19b338bf560 --- /dev/null +++ b/src/test/rustdoc/issue-38219.rs @@ -0,0 +1,18 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags:--test +// should-fail + +/// ``` +/// fail +/// ``` +#[macro_export] +macro_rules! foo { () => {} } diff --git a/src/test/ui/codemap_tests/repair_span_std_macros.stderr b/src/test/ui/codemap_tests/repair_span_std_macros.stderr index 73a1c5bae85..7e0d778a3b2 100644 --- a/src/test/ui/codemap_tests/repair_span_std_macros.stderr +++ b/src/test/ui/codemap_tests/repair_span_std_macros.stderr @@ -1,8 +1,8 @@ -error[E0282]: unable to infer enough type information about `_` +error[E0282]: unable to infer enough type information about `T` --> $DIR/repair_span_std_macros.rs:12:13 | 12 | let x = vec![]; - | ^^^^^^ cannot infer type for `_` + | ^^^^^^ cannot infer type for `T` | = note: type annotations or generic parameter binding required = note: this error originates in a macro outside of the current crate diff --git a/src/test/ui/missing-items/missing-type-parameter.rs b/src/test/ui/missing-items/missing-type-parameter.rs new file mode 100644 index 00000000000..3671abd6624 --- /dev/null +++ b/src/test/ui/missing-items/missing-type-parameter.rs @@ -0,0 +1,15 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn foo<X>() { } + +fn main() { + foo(); +} diff --git a/src/test/ui/missing-items/missing-type-parameter.stderr b/src/test/ui/missing-items/missing-type-parameter.stderr new file mode 100644 index 00000000000..2d007af4980 --- /dev/null +++ b/src/test/ui/missing-items/missing-type-parameter.stderr @@ -0,0 +1,10 @@ +error[E0282]: unable to infer enough type information about `X` + --> $DIR/missing-type-parameter.rs:14:5 + | +14 | foo(); + | ^^^ cannot infer type for `X` + | + = note: type annotations or generic parameter binding required + +error: aborting due to previous error + diff --git a/src/tools/tidy/src/deps.rs b/src/tools/tidy/src/deps.rs new file mode 100644 index 00000000000..7592c09a913 --- /dev/null +++ b/src/tools/tidy/src/deps.rs @@ -0,0 +1,73 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Check license of third-party deps by inspecting src/vendor + +use std::fs::File; +use std::io::Read; +use std::path::Path; + +static LICENSES: &'static [&'static str] = &[ + "MIT/Apache-2.0" +]; + +pub fn check(path: &Path, bad: &mut bool) { + let path = path.join("vendor"); + assert!(path.exists(), "vendor directory missing"); + let mut saw_dir = false; + for dir in t!(path.read_dir()) { + saw_dir = true; + let dir = t!(dir); + let toml = dir.path().join("Cargo.toml"); + if !check_license(&toml) { + *bad = true; + } + } + assert!(saw_dir, "no vendored source"); +} + +fn check_license(path: &Path) -> bool { + if !path.exists() { + panic!("{} does not exist", path.display()); + } + let mut contents = String::new(); + t!(t!(File::open(path)).read_to_string(&mut contents)); + + let mut found_license = false; + for line in contents.lines() { + if !line.starts_with("license") { + continue; + } + let license = extract_license(line); + if !LICENSES.contains(&&*license) { + println!("invalid license {} in {}", license, path.display()); + return false; + } + found_license = true; + break; + } + if !found_license { + println!("no license in {}", path.display()); + return false; + } + + true +} + +fn extract_license(line: &str) -> String { + let first_quote = line.find('"'); + let last_quote = line.rfind('"'); + if let (Some(f), Some(l)) = (first_quote, last_quote) { + let license = &line[f + 1 .. l]; + license.into() + } else { + "bad-license-parse".into() + } +} diff --git a/src/tools/tidy/src/main.rs b/src/tools/tidy/src/main.rs index cb11fe261c4..7566580b1a5 100644 --- a/src/tools/tidy/src/main.rs +++ b/src/tools/tidy/src/main.rs @@ -36,6 +36,7 @@ mod errors; mod features; mod cargo; mod pal; +mod deps; fn main() { let path = env::args_os().skip(1).next().expect("need an argument"); @@ -48,6 +49,7 @@ fn main() { cargo::check(&path, &mut bad); features::check(&path, &mut bad); pal::check(&path, &mut bad); + deps::check(&path, &mut bad); if bad { panic!("some tidy checks failed"); |
