diff options
| author | Alex Crichton <alex@alexcrichton.com> | 2017-08-07 22:30:39 -0700 |
|---|---|---|
| committer | Alex Crichton <alex@alexcrichton.com> | 2017-08-09 11:44:21 -0700 |
| commit | c25ddf21f18c3eeeaea2a4dffd70d2f6183068b5 (patch) | |
| tree | 9715e57405ae14bd7877dec129bce733daf72dc1 | |
| parent | cc4ff8f4d169562ff4ae22b94197a191215e6d56 (diff) | |
| parent | c5e2051f070c01241f68720a92a0957bcb070597 (diff) | |
| download | rust-c25ddf21f18c3eeeaea2a4dffd70d2f6183068b5.tar.gz rust-c25ddf21f18c3eeeaea2a4dffd70d2f6183068b5.zip | |
Merge remote-tracking branch 'origin/master' into gen
280 files changed, 17748 insertions, 2232 deletions
diff --git a/configure b/configure index 2b82b5e405b..664b473b2c9 100755 --- a/configure +++ b/configure @@ -437,7 +437,6 @@ opt local-rust 0 "use an installed rustc rather than downloading a snapshot" opt local-rebuild 0 "assume local-rust matches the current version, for rebuilds; implies local-rust, and is implied if local-rust already matches the current version" opt llvm-static-stdcpp 0 "statically link to libstdc++ for LLVM" opt llvm-link-shared 0 "prefer shared linking to LLVM (llvm-config --link-shared)" -opt llvm-clean-rebuild 0 "delete LLVM build directory on rebuild" opt rpath 1 "build rpaths into rustc itself" opt stage0-landing-pads 1 "enable landing pads during bootstrap with stage0" # This is used by the automation to produce single-target nightlies diff --git a/src/Cargo.lock b/src/Cargo.lock index e193cc612c5..398b23061ea 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -187,7 +187,7 @@ dependencies = [ "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -323,7 +323,7 @@ name = "crates-io" version = "0.11.0" dependencies = [ "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -437,6 +437,14 @@ dependencies = [ ] [[package]] +name = "error-chain" +version = "0.11.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "error_index_generator" version = "0.0.0" dependencies = [ @@ -1132,8 +1140,8 @@ dependencies = [ "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "racer 2.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustfmt-nightly 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1147,23 +1155,25 @@ dependencies = [ [[package]] name = "rls-analysis" -version = "0.4.5" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-data" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1243,6 +1253,13 @@ dependencies = [ ] [[package]] +name = "rustc_apfloat" +version = "0.0.0" +dependencies = [ + "rustc_bitflags 0.0.0", +] + +[[package]] name = "rustc_asan" version = "0.0.0" dependencies = [ @@ -1299,6 +1316,7 @@ dependencies = [ name = "rustc_const_math" version = "0.0.0" dependencies = [ + "rustc_apfloat 0.0.0", "serialize 0.0.0", "syntax 0.0.0", ] @@ -1496,10 +1514,11 @@ name = "rustc_save_analysis" version = "0.0.0" dependencies = [ "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_data_structures 0.0.0", "rustc_typeck 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -1509,11 +1528,11 @@ dependencies = [ name = "rustc_trans" version = "0.0.0" dependencies = [ - "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", "jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2145,6 +2164,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" "checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" "checksum error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8" +"checksum error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38d3a55d9a7a456748f2a3912c0941a5d9a68006eb15b3c3c9836b8420dc102d" "checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922" "checksum flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "36df0166e856739905cd3d7e0b210fe818592211a008862599845e012d8d304c" "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" @@ -2209,8 +2229,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" -"checksum rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0127cfae9c726461facbbbc8327e782adf8afd61f7fcc6adf8ea9ad8fc428ed0" -"checksum rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f274ec7f966337dc2601fe9bde060b551d1293c277af782dc65cd7200ca070c0" +"checksum rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d2cb40c0371765897ae428b5706bb17135705ad4f6d1b8b6afbaabcf8c9b5cff" +"checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534" "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" "checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff" "checksum rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3058a43ada2c2d0b92b3ae38007a2d0fa5e9db971be260e0171408a4ff471c95" diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index ac2e3bc402a..f6ed4ee91b3 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -188,7 +188,7 @@ fn main() { cmd.arg("-Zsave-analysis"); cmd.env("RUST_SAVE_ANALYSIS_CONFIG", "{\"output_file\": null,\"full_docs\": false,\"pub_only\": true,\ - \"signatures\": false,\"borrow_data\": false}"); + \"distro_crate\": true,\"signatures\": false,\"borrow_data\": false}"); } // Dealing with rpath here is a little special, so let's go into some diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 2f6e3ca9253..811c7df5d99 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -28,6 +28,7 @@ use check; use flags::Subcommand; use doc; use tool; +use native; pub use Compiler; @@ -256,7 +257,8 @@ impl<'a> Builder<'a> { compile::StartupObjects, tool::BuildManifest, tool::Rustbook, tool::ErrorIndex, tool::UnstableBookGen, tool::Tidy, tool::Linkchecker, tool::CargoTest, tool::Compiletest, tool::RemoteTestServer, tool::RemoteTestClient, - tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc), + tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc, + native::Llvm), Kind::Test => describe!(check::Tidy, check::Bootstrap, check::DefaultCompiletest, check::HostCompiletest, check::Crate, check::CrateLibrustc, check::Linkcheck, check::Cargotest, check::Cargo, check::Rls, check::Docs, check::ErrorIndex, diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index b04e4de7744..c65f5a9fb48 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -1050,11 +1050,8 @@ impl Step for Crate { dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target))); cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap()); - if target.contains("emscripten") || build.remote_tested(target) { - cargo.arg("--no-run"); - } - cargo.arg("--"); + cargo.args(&build.flags.cmd.test_args()); if build.config.quiet_tests { cargo.arg("--quiet"); @@ -1063,75 +1060,24 @@ impl Step for Crate { let _time = util::timeit(); if target.contains("emscripten") { - build.run(&mut cargo); - krate_emscripten(build, compiler, target, mode); + cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), + build.config.nodejs.as_ref().expect("nodejs not configured")); } else if build.remote_tested(target) { - build.run(&mut cargo); - krate_remote(builder, compiler, target, mode); - } else { - cargo.args(&build.flags.cmd.test_args()); - try_run(build, &mut cargo); - } - } -} - -fn krate_emscripten(build: &Build, - compiler: Compiler, - target: Interned<String>, - mode: Mode) { - let out_dir = build.cargo_out(compiler, mode, target); - let tests = find_tests(&out_dir.join("deps"), target); - - let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured"); - for test in tests { - println!("running {}", test.display()); - let mut cmd = Command::new(nodejs); - cmd.arg(&test); - if build.config.quiet_tests { - cmd.arg("--quiet"); - } - try_run(build, &mut cmd); - } -} - -fn krate_remote(builder: &Builder, - compiler: Compiler, - target: Interned<String>, - mode: Mode) { - let build = builder.build; - let out_dir = build.cargo_out(compiler, mode, target); - let tests = find_tests(&out_dir.join("deps"), target); - - let tool = builder.tool_exe(Tool::RemoteTestClient); - for test in tests { - let mut cmd = Command::new(&tool); - cmd.arg("run") - .arg(&test); - if build.config.quiet_tests { - cmd.arg("--quiet"); + cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), + format!("{} run", + builder.tool_exe(Tool::RemoteTestClient).display())); } - cmd.args(&build.flags.cmd.test_args()); - try_run(build, &mut cmd); + try_run(build, &mut cargo); } } -fn find_tests(dir: &Path, target: Interned<String>) -> Vec<PathBuf> { - let mut dst = Vec::new(); - for e in t!(dir.read_dir()).map(|e| t!(e)) { - let file_type = t!(e.file_type()); - if !file_type.is_file() { - continue - } - let filename = e.file_name().into_string().unwrap(); - if (target.contains("windows") && filename.ends_with(".exe")) || - (!target.contains("windows") && !filename.contains(".")) || - (target.contains("emscripten") && - filename.ends_with(".js") && - !filename.ends_with(".asm.js")) { - dst.push(e.path()); +fn envify(s: &str) -> String { + s.chars().map(|c| { + match c { + '-' => '_', + c => c, } - } - dst + }).flat_map(|c| c.to_uppercase()).collect() } /// Some test suites are run inside emulators or on remote devices, and most diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 7b8af436d5a..5d898cb716d 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -62,7 +62,6 @@ pub struct Config { pub llvm_targets: Option<String>, pub llvm_experimental_targets: Option<String>, pub llvm_link_jobs: Option<u32>, - pub llvm_clean_rebuild: bool, // rust codegen options pub rust_optimize: bool, @@ -203,7 +202,6 @@ struct Llvm { targets: Option<String>, experimental_targets: Option<String>, link_jobs: Option<u32>, - clean_rebuild: Option<bool>, } #[derive(Deserialize, Default, Clone)] @@ -352,7 +350,6 @@ impl Config { set(&mut config.llvm_release_debuginfo, llvm.release_debuginfo); set(&mut config.llvm_version_check, llvm.version_check); set(&mut config.llvm_static_stdcpp, llvm.static_libstdcpp); - set(&mut config.llvm_clean_rebuild, llvm.clean_rebuild); config.llvm_targets = llvm.targets.clone(); config.llvm_experimental_targets = llvm.experimental_targets.clone(); config.llvm_link_jobs = llvm.link_jobs; @@ -477,7 +474,6 @@ impl Config { ("LLVM_VERSION_CHECK", self.llvm_version_check), ("LLVM_STATIC_STDCPP", self.llvm_static_stdcpp), ("LLVM_LINK_SHARED", self.llvm_link_shared), - ("LLVM_CLEAN_REBUILD", self.llvm_clean_rebuild), ("OPTIMIZE", self.rust_optimize), ("DEBUG_ASSERTIONS", self.rust_debug_assertions), ("DEBUGINFO", self.rust_debuginfo), diff --git a/src/bootstrap/config.toml.example b/src/bootstrap/config.toml.example index 7a52222e46e..9314135050f 100644 --- a/src/bootstrap/config.toml.example +++ b/src/bootstrap/config.toml.example @@ -69,11 +69,6 @@ # controlled by rustbuild's -j parameter. #link-jobs = 0 -# Delete LLVM build directory on LLVM rebuild. -# This option defaults to `false` for local development, but CI may want to -# always perform clean full builds (possibly accelerated by (s)ccache). -#clean-rebuild = false - # ============================================================================= # General build configuration options # ============================================================================= @@ -208,7 +203,7 @@ #codegen-units = 1 # Whether or not debug assertions are enabled for the compiler and standard -# library +# library. Also enables compilation of debug! and trace! logging macros. #debug-assertions = false # Whether or not debuginfo is emitted diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index f0dfd857ab6..ee0eca5d482 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -48,6 +48,10 @@ impl Step for Llvm { run.path("src/llvm") } + fn make_run(run: RunConfig) { + run.builder.ensure(Llvm { target: run.target }) + } + /// Compile LLVM for `target`. fn run(self, builder: &Builder) { let build = builder.build; @@ -76,9 +80,6 @@ impl Step for Llvm { return } } - if build.config.llvm_clean_rebuild { - drop(fs::remove_dir_all(&out_dir)); - } let _folder = build.fold_output(|| "llvm"); println!("Building LLVM for {}", target); @@ -128,6 +129,15 @@ impl Step for Llvm { .define("LLVM_TARGET_ARCH", target.split('-').next().unwrap()) .define("LLVM_DEFAULT_TARGET_TRIPLE", target); + + // This setting makes the LLVM tools link to the dynamic LLVM library, + // which saves both memory during parallel links and overall disk space + // for the tools. We don't distribute any of those tools, so this is + // just a local concern. However, it doesn't work well everywhere. + if target.contains("linux-gnu") || target.contains("apple-darwin") { + cfg.define("LLVM_LINK_LLVM_DYLIB", "ON"); + } + if target.contains("msvc") { cfg.define("LLVM_USE_CRT_DEBUG", "MT"); cfg.define("LLVM_USE_CRT_RELEASE", "MT"); @@ -154,6 +164,14 @@ impl Step for Llvm { let host = build.llvm_out(build.build).join("bin/llvm-tblgen"); cfg.define("CMAKE_CROSSCOMPILING", "True") .define("LLVM_TABLEGEN", &host); + + if target.contains("netbsd") { + cfg.define("CMAKE_SYSTEM_NAME", "NetBSD"); + } else if target.contains("freebsd") { + cfg.define("CMAKE_SYSTEM_NAME", "FreeBSD"); + } + + cfg.define("LLVM_NATIVE_BUILD", build.llvm_out(build.build).join("build")); } let sanitize_cc = |cc: &Path| { diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs index 7011261ab6c..8b4c7f2ac31 100644 --- a/src/build_helper/lib.rs +++ b/src/build_helper/lib.rs @@ -13,7 +13,6 @@ extern crate filetime; use std::fs::File; -use std::io; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::{fs, env}; @@ -211,7 +210,7 @@ pub fn native_lib_boilerplate(src_name: &str, let out_dir = env::var_os("RUSTBUILD_NATIVE_DIR").unwrap_or(env::var_os("OUT_DIR").unwrap()); let out_dir = PathBuf::from(out_dir).join(out_name); - t!(create_dir_racy(&out_dir)); + t!(fs::create_dir_all(&out_dir)); if link_name.contains('=') { println!("cargo:rustc-link-lib={}", link_name); } else { @@ -260,21 +259,3 @@ fn fail(s: &str) -> ! { println!("\n\n{}\n\n", s); std::process::exit(1); } - -fn create_dir_racy(path: &Path) -> io::Result<()> { - match fs::create_dir(path) { - Ok(()) => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} - Err(e) => return Err(e), - } - match path.parent() { - Some(p) => try!(create_dir_racy(p)), - None => return Err(io::Error::new(io::ErrorKind::Other, "failed to create whole tree")), - } - match fs::create_dir(path) { - Ok(()) => Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), - Err(e) => Err(e), - } -} diff --git a/src/ci/docker/dist-i686-freebsd/build-toolchain.sh b/src/ci/docker/dist-i686-freebsd/build-toolchain.sh index 5642e6fc937..8343327c33b 100755 --- a/src/ci/docker/dist-i686-freebsd/build-toolchain.sh +++ b/src/ci/docker/dist-i686-freebsd/build-toolchain.sh @@ -13,7 +13,7 @@ set -ex ARCH=$1 BINUTILS=2.25.1 -GCC=5.3.0 +GCC=6.4.0 hide_output() { set +x @@ -86,7 +86,7 @@ rm -rf freebsd # Finally, download and build gcc to target FreeBSD mkdir gcc cd gcc -curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf - cd gcc-$GCC ./contrib/download_prerequisites diff --git a/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh b/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh index 5642e6fc937..8343327c33b 100755 --- a/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh +++ b/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh @@ -13,7 +13,7 @@ set -ex ARCH=$1 BINUTILS=2.25.1 -GCC=5.3.0 +GCC=6.4.0 hide_output() { set +x @@ -86,7 +86,7 @@ rm -rf freebsd # Finally, download and build gcc to target FreeBSD mkdir gcc cd gcc -curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf - cd gcc-$GCC ./contrib/download_prerequisites diff --git a/src/ci/docker/run.sh b/src/ci/docker/run.sh index da74ffb41ff..d3f339bc15f 100755 --- a/src/ci/docker/run.sh +++ b/src/ci/docker/run.sh @@ -67,6 +67,13 @@ else args="$args --env SCCACHE_DIR=/sccache --volume $HOME/.cache/sccache:/sccache" fi +# Run containers as privileged as it should give them access to some more +# syscalls such as ptrace and whatnot. In the upgrade to LLVM 5.0 it was +# discovered that the leak sanitizer apparently needs these syscalls nowadays so +# we'll need `--privileged` for at least the `x86_64-gnu` builder, so this just +# goes ahead and sets it for all builders. +args="$args --privileged" + exec docker \ run \ --volume "$root_dir:/checkout:ro" \ diff --git a/src/ci/run.sh b/src/ci/run.sh index ccf0bb1ffb7..39fb4e44078 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -31,7 +31,6 @@ RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-sccache" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-manage-submodules" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-locked-deps" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-cargo-openssl-static" -RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-clean-rebuild" if [ "$DIST_SRC" = "" ]; then RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-dist-src" diff --git a/src/doc/book b/src/doc/book -Subproject 4ee596df22f8ecaa9a0b2ddc0624b0104540dbf +Subproject 6f1a03dae6bcea44976918186f2d554186b3499 diff --git a/src/doc/nomicon b/src/doc/nomicon -Subproject f8fd6710399a1a557155cb5be4922fe6a6f694c +Subproject f570bcb681771d691aa4fdb8dfcfad1939844bf diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index 5a7e986b532..7f01aaa3ac1 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -3,8 +3,15 @@ "intrinsic_prefix": "_vec_", "llvm_prefix": "llvm.ppc.altivec.", "number_info": { - "unsigned": {}, - "signed": {} + "unsigned": { + "kind" : "u", + "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } + }, + "signed": { + "kind" : "s", + "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } + }, + "float": {} }, "width_info": { "128": { "width": "" } @@ -16,6 +23,55 @@ "llvm": "vperm", "ret": "s32", "args": ["0", "0", "s8"] + }, + { + "intrinsic": "mradds", + "width": [128], + "llvm": "vmhraddshs", + "ret": "s16", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "cmpb", + "width": [128], + "llvm": "vcmpbfp", + "ret": "s32", + "args": ["f32", "f32"] + }, + { + "intrinsic": "cmpeq{0.data_type_short}", + "width": [128], + "llvm": "vcmpequ{0.data_type_short}", + "ret": "s(8-32)", + "args": ["0", "0"] + }, + { + "intrinsic": "cmpgt{1.kind}{1.data_type_short}", + "width": [128], + "llvm": "vcmpgt{1.kind}{1.data_type_short}", + "ret": "s(8-32)", + "args": ["0u", "1"] + }, + { + "intrinsic": "cmpgt{1.kind}{1.data_type_short}", + "width": [128], + "llvm": "vcmpgt{1.kind}{1.data_type_short}", + "ret": "s(8-32)", + "args": ["0", "1"] + }, + { + "intrinsic": "max{0.kind}{0.data_type_short}", + "width": [128], + "llvm": "vmax{0.kind}{0.data_type_short}", + "ret": "i(8-32)", + "args": ["0", "0"] + }, + { + "intrinsic": "min{0.kind}{0.data_type_short}", + "width": [128], + "llvm": "vmin{0.kind}{0.data_type_short}", + "ret": "i(8-32)", + "args": ["0", "0"] } ] } diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs index 66e0bf81c90..42111301a9f 100644 --- a/src/liballoc/allocator.rs +++ b/src/liballoc/allocator.rs @@ -215,6 +215,7 @@ impl Layout { /// of each element in the array. /// /// On arithmetic overflow, returns `None`. + #[inline] pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { let padded_size = match self.size.checked_add(self.padding_needed_for(self.align)) { None => return None, diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index 4df13c509a8..80317cd763b 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -273,7 +273,10 @@ impl str { core_str::StrExt::is_char_boundary(self, index) } - /// Converts a string slice to a byte slice. + /// Converts a string slice to a byte slice. To convert the byte slice back + /// into a string slice, use the [`str::from_utf8`] function. + /// + /// [`str::from_utf8`]: ./str/fn.from_utf8.html /// /// # Examples /// @@ -289,7 +292,11 @@ impl str { core_str::StrExt::as_bytes(self) } - /// Converts a mutable string slice to a mutable byte slice. + /// Converts a mutable string slice to a mutable byte slice. To convert the + /// mutable byte slice back into a mutable string slice, use the + /// [`str::from_utf8_mut`] function. + /// + /// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html #[stable(feature = "str_mut_extras", since = "1.20.0")] #[inline(always)] pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { @@ -328,11 +335,16 @@ impl str { /// # Examples /// /// ``` - /// let v = "🗻∈🌏"; + /// let mut v = String::from("🗻∈🌏"); + /// /// assert_eq!(Some("🗻"), v.get(0..4)); - /// assert!(v.get(1..).is_none()); - /// assert!(v.get(..8).is_none()); - /// assert!(v.get(..42).is_none()); + /// + /// // indices not on UTF-8 sequence boundaries + /// assert!(v.get_mut(1..).is_none()); + /// assert!(v.get_mut(..8).is_none()); + /// + /// // out of bounds + /// assert!(v.get_mut(..42).is_none()); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] #[inline] @@ -351,9 +363,14 @@ impl str { /// /// ``` /// let mut v = String::from("🗻∈🌏"); + /// /// assert_eq!(Some("🗻"), v.get_mut(0..4).map(|v| &*v)); + /// + /// // indices not on UTF-8 sequence boundaries /// assert!(v.get_mut(1..).is_none()); /// assert!(v.get_mut(..8).is_none()); + /// + /// // out of bounds /// assert!(v.get_mut(..42).is_none()); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] @@ -563,12 +580,16 @@ impl str { /// Basic usage: /// /// ``` - /// let mut s = "Per Martin-Löf".to_string(); - /// - /// let (first, last) = s.split_at_mut(3); + /// use std::ascii::AsciiExt; /// - /// assert_eq!("Per", first); - /// assert_eq!(" Martin-Löf", last); + /// let mut s = "Per Martin-Löf".to_string(); + /// { + /// let (first, last) = s.split_at_mut(3); + /// first.make_ascii_uppercase(); + /// assert_eq!("PER", first); + /// assert_eq!(" Martin-Löf", last); + /// } + /// assert_eq!("PER Martin-Löf", s); /// ``` #[inline] #[stable(feature = "str_split_at", since = "1.4.0")] diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 35744f3f16b..21b5557db99 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -188,6 +188,34 @@ use ptr; /// A mutable memory location. /// +/// # Examples +/// +/// Here you can see how using `Cell<T>` allows to use mutable field inside +/// immutable struct (which is also called 'interior mutability'). +/// +/// ``` +/// use std::cell::Cell; +/// +/// struct SomeStruct { +/// regular_field: u8, +/// special_field: Cell<u8>, +/// } +/// +/// let my_struct = SomeStruct { +/// regular_field: 0, +/// special_field: Cell::new(1), +/// }; +/// +/// let new_value = 100; +/// +/// // ERROR, because my_struct is immutable +/// // my_struct.regular_field = new_value; +/// +/// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell +/// my_struct.special_field.set(new_value); +/// assert_eq!(my_struct.special_field.get(), new_value); +/// ``` +/// /// See the [module-level documentation](index.html) for more. #[stable(feature = "rust1", since = "1.0.0")] pub struct Cell<T> { diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index 1685dba3c5a..2472efa14b3 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -1247,7 +1247,7 @@ pub trait Iterator { /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// - /// Because `collect()` cares about what you're collecting into, you can + /// Because `collect()` only cares about what you're collecting into, you can /// still use a partial type hint, `_`, with the turbofish: /// /// ``` diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index d35aa026685..ccfeb91aff1 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -147,22 +147,13 @@ pub trait FromIterator<A>: Sized { /// /// ``` /// let v = vec![1, 2, 3]; -/// /// let mut iter = v.into_iter(); /// -/// let n = iter.next(); -/// assert_eq!(Some(1), n); -/// -/// let n = iter.next(); -/// assert_eq!(Some(2), n); -/// -/// let n = iter.next(); -/// assert_eq!(Some(3), n); -/// -/// let n = iter.next(); -/// assert_eq!(None, n); +/// assert_eq!(Some(1), iter.next()); +/// assert_eq!(Some(2), iter.next()); +/// assert_eq!(Some(3), iter.next()); +/// assert_eq!(None, iter.next()); /// ``` -/// /// Implementing `IntoIterator` for your type: /// /// ``` @@ -227,20 +218,12 @@ pub trait IntoIterator { /// /// ``` /// let v = vec![1, 2, 3]; - /// /// let mut iter = v.into_iter(); /// - /// let n = iter.next(); - /// assert_eq!(Some(1), n); - /// - /// let n = iter.next(); - /// assert_eq!(Some(2), n); - /// - /// let n = iter.next(); - /// assert_eq!(Some(3), n); - /// - /// let n = iter.next(); - /// assert_eq!(None, n); + /// assert_eq!(Some(1), iter.next()); + /// assert_eq!(Some(2), iter.next()); + /// assert_eq!(Some(3), iter.next()); + /// assert_eq!(None, iter.next()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn into_iter(self) -> Self::IntoIter; diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index d533310625e..c5175287ccf 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -131,6 +131,10 @@ macro_rules! int_impl { /// /// Leading and trailing whitespace represent an error. /// + /// # Panics + /// + /// This function panics if `radix` is not in the range from 2 to 36. + /// /// # Examples /// /// Basic usage: diff --git a/src/liblibc b/src/liblibc -Subproject ec1e5ab1ef8baca57f8776bbebd9343572a8708 +Subproject 2a5b50b7f7f539a0fd201331d6c1e0534aa332f diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index 12ed4fba402..1bffffd6c9e 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -509,14 +509,49 @@ impl TokenTree { Ident(ident) | Lifetime(ident) => TokenNode::Term(Term(ident.name)), Literal(..) | DocComment(..) => TokenNode::Literal(self::Literal(token)), - Interpolated(ref nt) => __internal::with_sess(|(sess, _)| { - TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| { - // FIXME(jseyfried): Avoid this pretty-print + reparse hack - let name = "<macro expansion>".to_owned(); - let source = pprust::token_to_string(&token); - parse_stream_from_source_str(name, source, sess, Some(span)) - }))) - }), + Interpolated(ref nt) => { + // An `Interpolated` token means that we have a `Nonterminal` + // which is often a parsed AST item. At this point we now need + // to convert the parsed AST to an actual token stream, e.g. + // un-parse it basically. + // + // Unfortunately there's not really a great way to do that in a + // guaranteed lossless fashion right now. The fallback here is + // to just stringify the AST node and reparse it, but this loses + // all span information. + // + // As a result, some AST nodes are annotated with the token + // stream they came from. Attempt to extract these lossless + // token streams before we fall back to the stringification. + let mut tokens = None; + + match nt.0 { + Nonterminal::NtItem(ref item) => { + tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); + } + Nonterminal::NtTraitItem(ref item) => { + tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); + } + Nonterminal::NtImplItem(ref item) => { + tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); + } + _ => {} + } + + tokens.map(|tokens| { + TokenNode::Group(Delimiter::None, + TokenStream(tokens.clone())) + }).unwrap_or_else(|| { + __internal::with_sess(|(sess, _)| { + TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| { + // FIXME(jseyfried): Avoid this pretty-print + reparse hack + let name = "<macro expansion>".to_owned(); + let source = pprust::token_to_string(&token); + parse_stream_from_source_str(name, source, sess, Some(span)) + }))) + }) + }) + } OpenDelim(..) | CloseDelim(..) => unreachable!(), Whitespace | Comment | Shebang(..) | Eof => unreachable!(), @@ -580,6 +615,34 @@ impl TokenTree { } } +fn prepend_attrs(attrs: &[ast::Attribute], + tokens: Option<&tokenstream::TokenStream>, + span: syntax_pos::Span) + -> Option<tokenstream::TokenStream> +{ + let tokens = match tokens { + Some(tokens) => tokens, + None => return None, + }; + if attrs.len() == 0 { + return Some(tokens.clone()) + } + let mut builder = tokenstream::TokenStreamBuilder::new(); + for attr in attrs { + assert_eq!(attr.style, ast::AttrStyle::Outer, + "inner attributes should prevent cached tokens from existing"); + let stream = __internal::with_sess(|(sess, _)| { + // FIXME: Avoid this pretty-print + reparse hack as bove + let name = "<macro expansion>".to_owned(); + let source = pprust::attr_to_string(attr); + parse_stream_from_source_str(name, source, sess, Some(span)) + }); + builder.push(stream); + } + builder.push(tokens.clone()); + Some(builder.build()) +} + /// Permanently unstable internal implementation details of this crate. This /// should not be used. /// diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 8e2c44a427b..c6f9cb2fcea 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -66,7 +66,6 @@ use hir::map::DefPathHash; use ich::Fingerprint; use ty::{TyCtxt, Instance, InstanceDef}; use ty::fast_reject::SimplifiedType; -use ty::subst::Substs; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; use ich::StableHashingContext; use std::fmt; @@ -104,6 +103,8 @@ macro_rules! define_dep_nodes { match *self { $( DepKind :: $variant => { + $(return !anon_attr_to_bool!($anon);)* + // tuple args $({ return <( $($tuple_arg,)* ) as DepNodeParams> @@ -112,6 +113,7 @@ macro_rules! define_dep_nodes { // struct args $({ + return <( $($struct_arg_ty,)* ) as DepNodeParams> ::CAN_RECONSTRUCT_QUERY_KEY; })* @@ -394,6 +396,7 @@ define_dep_nodes!( <'tcx> // Represents different phases in the compiler. [] RegionMaps(DefId), [] Coherence, + [] CoherenceInherentImplOverlapCheck, [] Resolve, [] CoherenceCheckTrait(DefId), [] PrivacyAccessLevels(CrateNum), @@ -444,17 +447,17 @@ define_dep_nodes!( <'tcx> [] TypeckBodiesKrate, [] TypeckTables(DefId), [] HasTypeckTables(DefId), - [] ConstEval { def_id: DefId, substs: &'tcx Substs<'tcx> }, + [anon] ConstEval, [] SymbolName(DefId), [] InstanceSymbolName { instance: Instance<'tcx> }, [] SpecializationGraph(DefId), [] ObjectSafety(DefId), - [anon] IsCopy(DefId), - [anon] IsSized(DefId), - [anon] IsFreeze(DefId), - [anon] NeedsDrop(DefId), - [anon] Layout(DefId), + [anon] IsCopy, + [anon] IsSized, + [anon] IsFreeze, + [anon] NeedsDrop, + [anon] Layout, // The set of impls for a given trait. [] TraitImpls(DefId), diff --git a/src/librustc/dep_graph/edges.rs b/src/librustc/dep_graph/edges.rs index 277b69262c9..9aa634770df 100644 --- a/src/librustc/dep_graph/edges.rs +++ b/src/librustc/dep_graph/edges.rs @@ -23,6 +23,11 @@ pub struct DepGraphEdges { edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>, task_stack: Vec<OpenTask>, forbidden_edge: Option<EdgeFilter>, + + // A set to help assert that no two tasks use the same DepNode. This is a + // temporary measure. Once we load the previous dep-graph as readonly, this + // check will fall out of the graph implementation naturally. + opened_once: FxHashSet<DepNode>, } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] @@ -80,6 +85,7 @@ impl DepGraphEdges { edges: FxHashSet(), task_stack: Vec::new(), forbidden_edge, + opened_once: FxHashSet(), } } @@ -97,6 +103,10 @@ impl DepGraphEdges { } pub fn push_task(&mut self, key: DepNode) { + if !self.opened_once.insert(key) { + bug!("Re-opened node {:?}", key) + } + self.task_stack.push(OpenTask::Regular { node: key, reads: Vec::new(), diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index cd2bb32bbf8..c9b5aaf3877 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -2222,7 +2222,7 @@ impl<'a> LoweringContext<'a> { let next_ident = self.str_to_ident("__next"); let next_pat = self.pat_ident_binding_mode(e.span, next_ident, - hir::BindByValue(hir::MutMutable)); + hir::BindingAnnotation::Mutable); // `::std::option::Option::Some(val) => next = val` let pat_arm = { @@ -2246,8 +2246,9 @@ impl<'a> LoweringContext<'a> { }; // `mut iter` - let iter_pat = self.pat_ident_binding_mode(e.span, iter, - hir::BindByValue(hir::MutMutable)); + let iter_pat = self.pat_ident_binding_mode(e.span, + iter, + hir::BindingAnnotation::Mutable); // `match ::std::iter::Iterator::next(&mut iter) { ... }` let match_expr = { @@ -2534,10 +2535,13 @@ impl<'a> LoweringContext<'a> { } } - fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingMode { + fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation { match *b { - BindingMode::ByRef(m) => hir::BindByRef(self.lower_mutability(m)), - BindingMode::ByValue(m) => hir::BindByValue(self.lower_mutability(m)), + BindingMode::ByValue(Mutability::Immutable) => + hir::BindingAnnotation::Unannotated, + BindingMode::ByRef(Mutability::Immutable) => hir::BindingAnnotation::Ref, + BindingMode::ByValue(Mutability::Mutable) => hir::BindingAnnotation::Mutable, + BindingMode::ByRef(Mutability::Mutable) => hir::BindingAnnotation::RefMut, } } @@ -2678,7 +2682,7 @@ impl<'a> LoweringContext<'a> { fn stmt_let(&mut self, sp: Span, mutbl: bool, ident: Name, ex: P<hir::Expr>) -> (hir::Stmt, NodeId) { let pat = if mutbl { - self.pat_ident_binding_mode(sp, ident, hir::BindByValue(hir::MutMutable)) + self.pat_ident_binding_mode(sp, ident, hir::BindingAnnotation::Mutable) } else { self.pat_ident(sp, ident) }; @@ -2734,10 +2738,10 @@ impl<'a> LoweringContext<'a> { } fn pat_ident(&mut self, span: Span, name: Name) -> P<hir::Pat> { - self.pat_ident_binding_mode(span, name, hir::BindByValue(hir::MutImmutable)) + self.pat_ident_binding_mode(span, name, hir::BindingAnnotation::Unannotated) } - fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingMode) + fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingAnnotation) -> P<hir::Pat> { let id = self.next_id(); let parent_def = self.parent_def.unwrap(); diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs index a7cb1f3232b..d2888dcf6aa 100644 --- a/src/librustc/hir/map/blocks.rs +++ b/src/librustc/hir/map/blocks.rs @@ -192,6 +192,18 @@ impl<'a> FnLikeNode<'a> { } } + pub fn unsafety(self) -> ast::Unsafety { + match self.kind() { + FnKind::ItemFn(_, _, unsafety, ..) => { + unsafety + } + FnKind::Method(_, m, ..) => { + m.unsafety + } + _ => ast::Unsafety::Normal + } + } + pub fn kind(self) -> FnKind<'a> { let item = |p: ItemFnParts<'a>| -> FnKind<'a> { FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis, p.attrs) diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index 91bce64243e..cdd5a6e3da7 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -18,7 +18,7 @@ use hir; use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace, CRATE_DEF_INDEX}; use ich::Fingerprint; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::stable_hasher::StableHasher; use serialize::{Encodable, Decodable, Encoder, Decoder}; @@ -153,7 +153,7 @@ pub struct Definitions { pub(super) node_to_hir_id: IndexVec<ast::NodeId, hir::HirId>, macro_def_scopes: FxHashMap<Mark, DefId>, expansions: FxHashMap<DefIndex, Mark>, - keys_created: FxHashSet<DefKey>, + next_disambiguator: FxHashMap<(DefIndex, DefPathData), u32>, } // Unfortunately we have to provide a manual impl of Clone because of the @@ -170,7 +170,7 @@ impl Clone for Definitions { node_to_hir_id: self.node_to_hir_id.clone(), macro_def_scopes: self.macro_def_scopes.clone(), expansions: self.expansions.clone(), - keys_created: self.keys_created.clone(), + next_disambiguator: self.next_disambiguator.clone(), } } } @@ -402,7 +402,7 @@ impl Definitions { node_to_hir_id: IndexVec::new(), macro_def_scopes: FxHashMap(), expansions: FxHashMap(), - keys_created: FxHashSet(), + next_disambiguator: FxHashMap(), } } @@ -516,21 +516,21 @@ impl Definitions { // The root node must be created with create_root_def() assert!(data != DefPathData::CrateRoot); - // Find a unique DefKey. This basically means incrementing the disambiguator - // until we get no match. - let mut key = DefKey { + // Find the next free disambiguator for this key. + let disambiguator = { + let next_disamb = self.next_disambiguator.entry((parent, data.clone())).or_insert(0); + let disambiguator = *next_disamb; + *next_disamb = next_disamb.checked_add(1).expect("disambiguator overflow"); + disambiguator + }; + + let key = DefKey { parent: Some(parent), disambiguated_data: DisambiguatedDefPathData { - data, - disambiguator: 0 + data, disambiguator } }; - while self.keys_created.contains(&key) { - key.disambiguated_data.disambiguator += 1; - } - self.keys_created.insert(key.clone()); - let parent_hash = self.table.def_path_hash(parent); let def_path_hash = key.compute_stable_hash(parent_hash); diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index 6bbff608be4..f4ca536d370 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -555,7 +555,9 @@ impl<'hir> Map<'hir> { } /// Similar to get_parent, returns the parent node id or id if there is no - /// parent. + /// parent. Note that the parent may be CRATE_NODE_ID, which is not itself + /// present in the map -- so passing the return value of get_parent_node to + /// get may actually panic. /// This function returns the immediate parent in the AST, whereas get_parent /// returns the enclosing item. Note that this might not be the actual parent /// node in the AST - some kinds of nodes are not in the map and these will @@ -631,7 +633,7 @@ impl<'hir> Map<'hir> { } /// Retrieve the NodeId for `id`'s enclosing method, unless there's a - /// `while` or `loop` before reacing it, as block tail returns are not + /// `while` or `loop` before reaching it, as block tail returns are not /// available in them. /// /// ``` diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index d1cc6b5d3e4..10d34e49da4 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -10,7 +10,6 @@ // The Rust HIR. -pub use self::BindingMode::*; pub use self::BinOp_::*; pub use self::BlockCheckMode::*; pub use self::CaptureClause::*; @@ -49,7 +48,7 @@ use rustc_data_structures::indexed_vec; use std::collections::BTreeMap; use std::fmt; -/// HIR doesn't commit to a concrete storage type and have its own alias for a vector. +/// HIR doesn't commit to a concrete storage type and has its own alias for a vector. /// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar /// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead /// of `Vec` to avoid keeping extra capacity. @@ -76,14 +75,14 @@ pub mod pat_util; pub mod print; pub mod svh; -/// A HirId uniquely identifies a node in the HIR of then current crate. It is +/// A HirId uniquely identifies a node in the HIR of the current crate. It is /// composed of the `owner`, which is the DefIndex of the directly enclosing /// hir::Item, hir::TraitItem, or hir::ImplItem (i.e. the closest "item-like"), /// and the `local_id` which is unique within the given owner. /// /// This two-level structure makes for more stable values: One can move an item /// around within the source code, or add or remove stuff before it, without -/// the local_id part of the HirId changing, which is a very useful property +/// the local_id part of the HirId changing, which is a very useful property in /// incremental compilation where we have to persist things through changes to /// the code base. #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug, @@ -628,10 +627,28 @@ pub struct FieldPat { pub is_shorthand: bool, } +/// Explicit binding annotations given in the HIR for a binding. Note +/// that this is not the final binding *mode* that we infer after type +/// inference. #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum BindingMode { - BindByRef(Mutability), - BindByValue(Mutability), +pub enum BindingAnnotation { + /// No binding annotation given: this means that the final binding mode + /// will depend on whether we have skipped through a `&` reference + /// when matching. For example, the `x` in `Some(x)` will have binding + /// mode `None`; if you do `let Some(x) = &Some(22)`, it will + /// ultimately be inferred to be by-reference. + /// + /// Note that implicit reference skipping is not implemented yet (#42640). + Unannotated, + + /// Annotated with `mut x` -- could be either ref or not, similar to `None`. + Mutable, + + /// Annotated as `ref`, like `ref x` + Ref, + + /// Annotated as `ref mut x`. + RefMut, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -647,7 +664,7 @@ pub enum PatKind { /// A fresh binding `ref mut binding @ OPT_SUBPATTERN`. /// The `DefId` is for the definition of the variable being bound. - Binding(BindingMode, DefId, Spanned<Name>, Option<P<Pat>>), + Binding(BindingAnnotation, DefId, Spanned<Name>, Option<P<Pat>>), /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`. /// The `bool` is `true` in the presence of a `..`. @@ -684,6 +701,16 @@ pub enum Mutability { MutImmutable, } +impl Mutability { + /// Return MutMutable only if both arguments are mutable. + pub fn and(self, other: Self) -> Self { + match self { + MutMutable => other, + MutImmutable => MutImmutable, + } + } +} + #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] pub enum BinOp_ { /// The `+` operator (addition) @@ -892,6 +919,13 @@ impl Decl_ { DeclItem(_) => &[] } } + + pub fn is_local(&self) -> bool { + match *self { + Decl_::DeclLocal(_) => true, + _ => false, + } + } } /// represents one arm of a 'match' @@ -1686,7 +1720,7 @@ pub struct Item { #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub enum Item_ { - /// An`extern crate` item, with optional original crate name, + /// An `extern crate` item, with optional original crate name, /// /// e.g. `extern crate foo` or `extern crate foo_bar as foo` ItemExternCrate(Option<Name>), diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs index 0190e74df69..144cb34ee35 100644 --- a/src/librustc/hir/pat_util.rs +++ b/src/librustc/hir/pat_util.rs @@ -87,7 +87,7 @@ impl hir::Pat { /// Call `f` on every "binding" in a pattern, e.g., on `a` in /// `match foo() { Some(a) => (), None => () }` pub fn each_binding<F>(&self, mut f: F) - where F: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned<ast::Name>), + where F: FnMut(hir::BindingAnnotation, ast::NodeId, Span, &Spanned<ast::Name>), { self.walk(|p| { if let PatKind::Binding(binding_mode, _, ref pth, _) = p.node { @@ -130,12 +130,10 @@ impl hir::Pat { pub fn simple_name(&self) -> Option<ast::Name> { match self.node { - PatKind::Binding(hir::BindByValue(..), _, ref path1, None) => { - Some(path1.node) - } - _ => { - None - } + PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ref path1, None) | + PatKind::Binding(hir::BindingAnnotation::Mutable, _, ref path1, None) => + Some(path1.node), + _ => None, } } @@ -163,16 +161,22 @@ impl hir::Pat { } /// Checks if the pattern contains any `ref` or `ref mut` bindings, - /// and if yes whether its containing mutable ones or just immutables ones. - pub fn contains_ref_binding(&self) -> Option<hir::Mutability> { + /// and if yes whether it contains mutable or just immutables ones. + /// + /// FIXME(tschottdorf): this is problematic as the HIR is being scraped, + /// but ref bindings may be implicit after #42640. + pub fn contains_explicit_ref_binding(&self) -> Option<hir::Mutability> { let mut result = None; - self.each_binding(|mode, _, _, _| { - if let hir::BindingMode::BindByRef(m) = mode { - // Pick Mutable as maximum - match result { - None | Some(hir::MutImmutable) => result = Some(m), - _ => (), + self.each_binding(|annotation, _, _, _| { + match annotation { + hir::BindingAnnotation::Ref => { + match result { + None | Some(hir::MutImmutable) => result = Some(hir::MutImmutable), + _ => (), + } } + hir::BindingAnnotation::RefMut => result = Some(hir::MutMutable), + _ => (), } }); result @@ -182,9 +186,11 @@ impl hir::Pat { impl hir::Arm { /// Checks if the patterns for this arm contain any `ref` or `ref mut` /// bindings, and if yes whether its containing mutable ones or just immutables ones. - pub fn contains_ref_binding(&self) -> Option<hir::Mutability> { + pub fn contains_explicit_ref_binding(&self) -> Option<hir::Mutability> { + // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed + // for #42640. self.pats.iter() - .filter_map(|pat| pat.contains_ref_binding()) + .filter_map(|pat| pat.contains_explicit_ref_binding()) .max_by_key(|m| match *m { hir::MutMutable => 1, hir::MutImmutable => 0, diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs index f4d7b101e9c..d819fc2f779 100644 --- a/src/librustc/hir/print.rs +++ b/src/librustc/hir/print.rs @@ -1655,12 +1655,16 @@ impl<'a> State<'a> { PatKind::Wild => self.s.word("_")?, PatKind::Binding(binding_mode, _, ref path1, ref sub) => { match binding_mode { - hir::BindByRef(mutbl) => { + hir::BindingAnnotation::Ref => { self.word_nbsp("ref")?; - self.print_mutability(mutbl)?; + self.print_mutability(hir::MutImmutable)?; } - hir::BindByValue(hir::MutImmutable) => {} - hir::BindByValue(hir::MutMutable) => { + hir::BindingAnnotation::RefMut => { + self.word_nbsp("ref")?; + self.print_mutability(hir::MutMutable)?; + } + hir::BindingAnnotation::Unannotated => {} + hir::BindingAnnotation::Mutable => { self.word_nbsp("mut")?; } } diff --git a/src/librustc/ich/impls_const_math.rs b/src/librustc/ich/impls_const_math.rs index 6d11f2a87a4..6790c2ac7de 100644 --- a/src/librustc/ich/impls_const_math.rs +++ b/src/librustc/ich/impls_const_math.rs @@ -11,9 +11,9 @@ //! This module contains `HashStable` implementations for various data types //! from `rustc_const_math` in no particular order. -impl_stable_hash_for!(enum ::rustc_const_math::ConstFloat { - F32(val), - F64(val) +impl_stable_hash_for!(struct ::rustc_const_math::ConstFloat { + ty, + bits }); impl_stable_hash_for!(enum ::rustc_const_math::ConstInt { diff --git a/src/librustc/ich/impls_hir.rs b/src/librustc/ich/impls_hir.rs index 140c9c6ae9a..ea3f04ff8c2 100644 --- a/src/librustc/ich/impls_hir.rs +++ b/src/librustc/ich/impls_hir.rs @@ -442,9 +442,11 @@ impl_stable_hash_for!(struct hir::FieldPat { is_shorthand }); -impl_stable_hash_for!(enum hir::BindingMode { - BindByRef(mutability), - BindByValue(mutability) +impl_stable_hash_for!(enum hir::BindingAnnotation { + Unannotated, + Mutable, + Ref, + RefMut }); impl_stable_hash_for!(enum hir::RangeEnd { diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index 9b9a6a0273e..e277f40a26d 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -239,8 +239,12 @@ for mir::StatementKind<'tcx> { mir::StatementKind::StorageDead(ref lvalue) => { lvalue.hash_stable(hcx, hasher); } - mir::StatementKind::EndRegion(ref extents) => { - extents.hash_stable(hcx, hasher); + mir::StatementKind::EndRegion(ref extent) => { + extent.hash_stable(hcx, hasher); + } + mir::StatementKind::Validate(ref op, ref lvalues) => { + op.hash_stable(hcx, hasher); + lvalues.hash_stable(hcx, hasher); } mir::StatementKind::Nop => {} mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { @@ -252,6 +256,23 @@ for mir::StatementKind<'tcx> { } } +impl<'a, 'gcx, 'tcx, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> + for mir::ValidationOperand<'tcx, T> + where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> +{ + fn hash_stable<W: StableHasherResult>(&self, + hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hasher: &mut StableHasher<W>) + { + self.lval.hash_stable(hcx, hasher); + self.ty.hash_stable(hcx, hasher); + self.re.hash_stable(hcx, hasher); + self.mutbl.hash_stable(hcx, hasher); + } +} + +impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(extent) }); + impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Lvalue<'tcx> { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, @@ -271,10 +292,11 @@ impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::L } } -impl<'a, 'gcx, 'tcx, B, V> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> -for mir::Projection<'tcx, B, V> +impl<'a, 'gcx, 'tcx, B, V, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> +for mir::Projection<'tcx, B, V, T> where B: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>, - V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> + V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>, + T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, @@ -289,9 +311,10 @@ for mir::Projection<'tcx, B, V> } } -impl<'a, 'gcx, 'tcx, V> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> -for mir::ProjectionElem<'tcx, V> - where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> +impl<'a, 'gcx, 'tcx, V, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> +for mir::ProjectionElem<'tcx, V, T> + where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>, + T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, @@ -299,7 +322,7 @@ for mir::ProjectionElem<'tcx, V> mem::discriminant(self).hash_stable(hcx, hasher); match *self { mir::ProjectionElem::Deref => {} - mir::ProjectionElem::Field(field, ty) => { + mir::ProjectionElem::Field(field, ref ty) => { field.hash_stable(hcx, hasher); ty.hash_stable(hcx, hasher); } diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 7042f3993e1..9286d3c73ed 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -375,7 +375,6 @@ for ty::RegionParameterDef { name, def_id, index, - issue_32330: _, pure_wrt_drop } = *self; @@ -630,6 +629,7 @@ for ty::TypeckTables<'tcx> { ref node_types, ref node_substs, ref adjustments, + ref pat_binding_modes, ref upvar_capture_map, ref closure_tys, ref closure_kinds, @@ -652,6 +652,7 @@ for ty::TypeckTables<'tcx> { ich::hash_stable_nodemap(hcx, hasher, node_types); ich::hash_stable_nodemap(hcx, hasher, node_substs); ich::hash_stable_nodemap(hcx, hasher, adjustments); + ich::hash_stable_nodemap(hcx, hasher, pat_binding_modes); ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| { let ty::UpvarId { var_id, diff --git a/src/librustc/infer/error_reporting/anon_anon_conflict.rs b/src/librustc/infer/error_reporting/anon_anon_conflict.rs index 1017f2bd0e6..2e910968818 100644 --- a/src/librustc/infer/error_reporting/anon_anon_conflict.rs +++ b/src/librustc/infer/error_reporting/anon_anon_conflict.rs @@ -77,10 +77,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { struct_span_err!(self.tcx.sess, span, E0623, "lifetime mismatch") .span_label(ty1.span, - format!("these references must have the same lifetime")) + format!("these references are not declared with the same lifetime...")) .span_label(ty2.span, format!("")) .span_label(span, - format!("data{}flows{}here", span_label_var1, span_label_var2)) + format!("...but data{}flows{}here", span_label_var1, span_label_var2)) .emit(); } else { return false; diff --git a/src/librustc/infer/error_reporting/mod.rs b/src/librustc/infer/error_reporting/mod.rs index 77ec866dc80..8e8576b83e4 100644 --- a/src/librustc/infer/error_reporting/mod.rs +++ b/src/librustc/infer/error_reporting/mod.rs @@ -66,8 +66,7 @@ use hir::map as hir_map; use hir::def_id::DefId; use middle::region; use traits::{ObligationCause, ObligationCauseCode}; -use ty::{self, TyCtxt, TypeFoldable}; -use ty::{Region, Issue32330}; +use ty::{self, Region, TyCtxt, TypeFoldable}; use ty::error::TypeError; use syntax::ast::DUMMY_NODE_ID; use syntax_pos::{Pos, Span}; @@ -713,35 +712,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.note_and_explain_type_err(diag, terr, span); } - pub fn note_issue_32330(&self, - diag: &mut DiagnosticBuilder<'tcx>, - terr: &TypeError<'tcx>) - { - debug!("note_issue_32330: terr={:?}", terr); - match *terr { - TypeError::RegionsInsufficientlyPolymorphic(_, _, Some(box Issue32330 { - fn_def_id, region_name - })) | - TypeError::RegionsOverlyPolymorphic(_, _, Some(box Issue32330 { - fn_def_id, region_name - })) => { - diag.note( - &format!("lifetime parameter `{0}` declared on fn `{1}` \ - appears only in the return type, \ - but here is required to be higher-ranked, \ - which means that `{0}` must appear in both \ - argument and return types", - region_name, - self.tcx.item_path_str(fn_def_id))); - diag.note( - &format!("this error is the result of a recent bug fix; \ - for more information, see issue #33685 \ - <https://github.com/rust-lang/rust/issues/33685>")); - } - _ => {} - } - } - pub fn report_and_explain_type_error(&self, trace: TypeTrace<'tcx>, terr: &TypeError<'tcx>) @@ -761,7 +731,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } }; self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr); - self.note_issue_32330(&mut diag, terr); diag } @@ -934,7 +903,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { format!(" for lifetime parameter {}in trait containing associated type `{}`", br_string(br), type_name) } - infer::EarlyBoundRegion(_, name, _) => { + infer::EarlyBoundRegion(_, name) => { format!(" for lifetime parameter `{}`", name) } diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs index 541a9978341..9ecc8b0e66b 100644 --- a/src/librustc/infer/higher_ranked/mod.rs +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -13,9 +13,7 @@ use super::{CombinedSnapshot, InferCtxt, - LateBoundRegion, HigherRankedType, - RegionVariableOrigin, SubregionOrigin, SkolemizationMap}; use super::combine::CombineFields; @@ -29,15 +27,6 @@ use util::nodemap::{FxHashMap, FxHashSet}; pub struct HrMatchResult<U> { pub value: U, - - /// Normally, when we do a higher-ranked match operation, we - /// expect all higher-ranked regions to be constrained as part of - /// the match operation. However, in the transition period for - /// #32330, it can happen that we sometimes have unconstrained - /// regions that get instantiated with fresh variables. In that - /// case, we collect the set of unconstrained bound regions here - /// and replace them with fresh variables. - pub unconstrained_regions: Vec<ty::BoundRegion>, } impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { @@ -108,7 +97,6 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { /// that do not appear in `T`. If that happens, those regions are /// unconstrained, and this routine replaces them with `'static`. pub fn higher_ranked_match<T, U>(&mut self, - span: Span, a_pair: &Binder<(T, U)>, b_match: &T, a_is_expected: bool) @@ -158,28 +146,16 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // be any region from the sets above, except for other members of // `skol_map`. There should always be a representative if things // are properly well-formed. - let mut unconstrained_regions = vec![]; let skol_representatives: FxHashMap<_, _> = skol_resolution_map .iter() - .map(|(&skol, &(br, ref regions))| { + .map(|(&skol, &(_, ref regions))| { let representative = regions.iter() .filter(|&&r| !skol_resolution_map.contains_key(r)) .cloned() .next() - .unwrap_or_else(|| { // [1] - unconstrained_regions.push(br); - self.infcx.next_region_var( - LateBoundRegion(span, br, HigherRankedType)) - }); - - // [1] There should always be a representative, - // unless the higher-ranked region did not appear - // in the values being matched. We should reject - // as ill-formed cases that can lead to this, but - // right now we sometimes issue warnings (see - // #32330). + .expect("no representative region"); (skol, representative) }) @@ -216,10 +192,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // We are now done with these skolemized variables. self.infcx.pop_skolemized(skol_map, snapshot); - Ok(HrMatchResult { - value: a_value, - unconstrained_regions, - }) + Ok(HrMatchResult { value: a_value }) }); } @@ -657,28 +630,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { skol_br, tainted_region); - let issue_32330 = if let &ty::ReVar(vid) = tainted_region { - match self.region_vars.var_origin(vid) { - RegionVariableOrigin::EarlyBoundRegion(_, _, issue_32330) => { - issue_32330.map(Box::new) - } - _ => None - } - } else { - None - }; - - if overly_polymorphic { + return Err(if overly_polymorphic { debug!("Overly polymorphic!"); - return Err(TypeError::RegionsOverlyPolymorphic(skol_br, - tainted_region, - issue_32330)); + TypeError::RegionsOverlyPolymorphic(skol_br, tainted_region) } else { debug!("Not as polymorphic!"); - return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br, - tainted_region, - issue_32330)); - } + TypeError::RegionsInsufficientlyPolymorphic(skol_br, tainted_region) + }) } } diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 2d7ce4a82da..6b60a248267 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -299,7 +299,7 @@ pub enum RegionVariableOrigin { Coercion(Span), // Region variables created as the values for early-bound regions - EarlyBoundRegion(Span, ast::Name, Option<ty::Issue32330>), + EarlyBoundRegion(Span, ast::Name), // Region variables created for bound regions // in a function or method that is called @@ -989,7 +989,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span: Span, def: &ty::RegionParameterDef) -> ty::Region<'tcx> { - self.next_region_var(EarlyBoundRegion(span, def.name, def.issue_32330)) + self.next_region_var(EarlyBoundRegion(span, def.name)) } /// Create a type inference variable for the given @@ -1278,14 +1278,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { -> InferResult<'tcx, HrMatchResult<Ty<'tcx>>> { let match_pair = match_a.map_bound(|p| (p.projection_ty.trait_ref(self.tcx), p.ty)); - let span = cause.span; let trace = TypeTrace { cause, values: TraitRefs(ExpectedFound::new(true, match_pair.skip_binder().0, match_b)) }; let mut combine = self.combine_fields(trace, param_env); - let result = combine.higher_ranked_match(span, &match_pair, &match_b, true)?; + let result = combine.higher_ranked_match(&match_pair, &match_b, true)?; Ok(InferOk { value: result, obligations: combine.obligations }) } diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 48bddf2f717..288390cb5e7 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -18,9 +18,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// the rustc crate store interface. This also includes types that -// are *mostly* used as a part of that interface, but these should -// probably get a better home if someone can find one. +//! the rustc crate store interface. This also includes types that +//! are *mostly* used as a part of that interface, but these should +//! probably get a better home if someone can find one. use hir::def; use hir::def_id::{CrateNum, DefId, DefIndex}; @@ -50,13 +50,13 @@ pub use self::NativeLibraryKind::*; // lonely orphan structs and enums looking for a better home -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Copy)] pub struct LinkMeta { pub crate_hash: Svh, } -// Where a crate came from on the local filesystem. One of these three options -// must be non-None. +/// Where a crate came from on the local filesystem. One of these three options +/// must be non-None. #[derive(PartialEq, Clone, Debug)] pub struct CrateSource { pub dylib: Option<(PathBuf, PathKind)>, @@ -120,10 +120,14 @@ pub enum LinkagePreference { #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub enum NativeLibraryKind { - NativeStatic, // native static library (.a archive) - NativeStaticNobundle, // native static library, which doesn't get bundled into .rlibs - NativeFramework, // macOS-specific - NativeUnknown, // default way to specify a dynamic library + /// native static library (.a archive) + NativeStatic, + /// native static library, which doesn't get bundled into .rlibs + NativeStaticNobundle, + /// macOS-specific + NativeFramework, + /// default way to specify a dynamic library + NativeUnknown, } #[derive(Clone, Hash, RustcEncodable, RustcDecodable)] @@ -161,15 +165,13 @@ pub struct ExternCrate { } pub struct EncodedMetadata { - pub raw_data: Vec<u8>, - pub hashes: EncodedMetadataHashes, + pub raw_data: Vec<u8> } impl EncodedMetadata { pub fn new() -> EncodedMetadata { EncodedMetadata { raw_data: Vec::new(), - hashes: EncodedMetadataHashes::new(), } } } @@ -294,7 +296,7 @@ pub trait CrateStore { tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, reachable: &NodeSet) - -> EncodedMetadata; + -> (EncodedMetadata, EncodedMetadataHashes); fn metadata_encoding_version(&self) -> &[u8]; } @@ -424,7 +426,7 @@ impl CrateStore for DummyCrateStore { tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, reachable: &NodeSet) - -> EncodedMetadata { + -> (EncodedMetadata, EncodedMetadataHashes) { bug!("encode_metadata") } fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") } diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs index f6be7090091..d394c0f0c87 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc/middle/dataflow.rs @@ -22,6 +22,9 @@ use std::mem; use std::usize; use syntax::ast; use syntax::print::pprust::PrintState; + +use rustc_data_structures::graph::OUTGOING; + use util::nodemap::NodeMap; use hir; use hir::intravisit::{self, IdRange}; @@ -523,12 +526,16 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> { changed: true }; + let nodes_po = cfg.graph.nodes_in_postorder(OUTGOING, cfg.entry); let mut temp = vec![0; words_per_id]; + let mut num_passes = 0; while propcx.changed { + num_passes += 1; propcx.changed = false; propcx.reset(&mut temp); - propcx.walk_cfg(cfg, &mut temp); + propcx.walk_cfg(cfg, &nodes_po, &mut temp); } + debug!("finished in {} iterations", num_passes); } debug!("Dataflow result for {}:", self.analysis_name); @@ -543,12 +550,15 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> { impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> { fn walk_cfg(&mut self, cfg: &cfg::CFG, + nodes_po: &[CFGIndex], in_out: &mut [usize]) { debug!("DataFlowContext::walk_cfg(in_out={}) {}", bits_to_string(in_out), self.dfcx.analysis_name); assert!(self.dfcx.bits_per_id > 0); - cfg.graph.each_node(|node_index, node| { + // Iterate over nodes in reverse postorder + for &node_index in nodes_po.iter().rev() { + let node = cfg.graph.node(node_index); debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}", node_index, node.data.id(), bits_to_string(in_out)); @@ -563,8 +573,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> { // Propagate state on-exit from node into its successors. self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index); - true // continue to next node - }); + } } fn reset(&mut self, bits: &mut [usize]) { diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 2238e464cbc..a525b4e13b7 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -13,7 +13,7 @@ // from live codes are live, and everything else is dead. use hir::map as hir_map; -use hir::{self, PatKind}; +use hir::{self, Item_, PatKind}; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use hir::itemlikevisit::ItemLikeVisitor; @@ -189,6 +189,22 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { self.struct_has_extern_repr = had_extern_repr; self.inherited_pub_visibility = had_inherited_pub_visibility; } + + fn mark_as_used_if_union(&mut self, did: DefId, fields: &hir::HirVec<hir::Field>) { + if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { + if let Some(hir_map::NodeItem(item)) = self.tcx.hir.find(node_id) { + if let Item_::ItemUnion(ref variant, _) = item.node { + if variant.fields().len() > 1 { + for field in variant.fields() { + if fields.iter().find(|x| x.name.node == field.name).is_some() { + self.live_symbols.insert(field.id); + } + } + } + } + } + } + } } impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { @@ -231,6 +247,13 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { hir::ExprTupField(ref lhs, idx) => { self.handle_tup_field_access(&lhs, idx.node); } + hir::ExprStruct(_, ref fields, _) => { + if let ty::TypeVariants::TyAdt(ref def, _) = self.tables.expr_ty(expr).sty { + if def.is_union() { + self.mark_as_used_if_union(def.did, fields); + } + } + } _ => () } @@ -561,7 +584,6 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { self.warn_dead_code(field.id, field.span, field.name, "field"); } - intravisit::walk_struct_field(self, field); } @@ -603,6 +625,9 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); let krate = tcx.hir.krate(); let live_symbols = find_live(tcx, access_levels, krate); - let mut visitor = DeadVisitor { tcx: tcx, live_symbols: live_symbols }; + let mut visitor = DeadVisitor { + tcx: tcx, + live_symbols: live_symbols, + }; intravisit::walk_crate(&mut visitor, krate); } diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index 899068a2b3b..1ea5b29b15b 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -800,16 +800,19 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { debug!("determine_pat_move_mode cmt_discr={:?} pat={:?}", cmt_discr, pat); return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |cmt_pat, pat| { - match pat.node { - PatKind::Binding(hir::BindByRef(..), ..) => - mode.lub(BorrowingMatch), - PatKind::Binding(hir::BindByValue(..), ..) => { - match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) { - Copy => mode.lub(CopyingMatch), - Move(..) => mode.lub(MovingMatch), + if let PatKind::Binding(..) = pat.node { + let bm = *self.mc.tables.pat_binding_modes.get(&pat.id) + .expect("missing binding mode"); + match bm { + ty::BindByReference(..) => + mode.lub(BorrowingMatch), + ty::BindByValue(..) => { + match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) { + Copy => mode.lub(CopyingMatch), + Move(..) => mode.lub(MovingMatch), + } } } - _ => {} } })); } @@ -822,8 +825,9 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let ExprUseVisitor { ref mc, ref mut delegate, param_env } = *self; return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| { - if let PatKind::Binding(bmode, def_id, ..) = pat.node { + if let PatKind::Binding(_, def_id, ..) = pat.node { debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode); + let bm = *mc.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"); // pat_ty: the type of the binding being produced. let pat_ty = return_if_err!(mc.node_ty(pat.id)); @@ -836,14 +840,14 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { } // It is also a borrow or copy/move of the value being matched. - match bmode { - hir::BindByRef(m) => { + match bm { + ty::BindByReference(m) => { if let ty::TyRef(r, _) = pat_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding); } } - hir::BindByValue(..) => { + ty::BindByValue(..) => { let mode = copy_or_move(mc, param_env, &cmt_pat, PatBindingMove); debug!("walk_pat binding consuming pat"); delegate.consume_pat(pat, cmt_pat, mode); diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index f06d4a3d9e3..3abd63fccdb 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -330,11 +330,12 @@ impl MutabilityCategory { ret } - fn from_local(tcx: TyCtxt, id: ast::NodeId) -> MutabilityCategory { + fn from_local(tcx: TyCtxt, tables: &ty::TypeckTables, id: ast::NodeId) -> MutabilityCategory { let ret = match tcx.hir.get(id) { hir_map::NodeLocal(p) => match p.node { - PatKind::Binding(bind_mode, ..) => { - if bind_mode == hir::BindByValue(hir::MutMutable) { + PatKind::Binding(..) => { + let bm = *tables.pat_binding_modes.get(&p.id).expect("missing binding mode"); + if bm == ty::BindByValue(hir::MutMutable) { McDeclared } else { McImmutable @@ -475,16 +476,21 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // *being borrowed* is. But ideally we would put in a more // fundamental fix to this conflated use of the node id. let ret_ty = match pat.node { - PatKind::Binding(hir::BindByRef(_), ..) => { - // a bind-by-ref means that the base_ty will be the type of the ident itself, - // but what we want here is the type of the underlying value being borrowed. - // So peel off one-level, turning the &T into T. - match base_ty.builtin_deref(false, ty::NoPreference) { - Some(t) => t.ty, - None => { - debug!("By-ref binding of non-derefable type {:?}", base_ty); - return Err(()); + PatKind::Binding(..) => { + let bm = *self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"); + if let ty::BindByReference(_) = bm { + // a bind-by-ref means that the base_ty will be the type of the ident itself, + // but what we want here is the type of the underlying value being borrowed. + // So peel off one-level, turning the &T into T. + match base_ty.builtin_deref(false, ty::NoPreference) { + Some(t) => t.ty, + None => { + debug!("By-ref binding of non-derefable type {:?}", base_ty); + return Err(()); + } } + } else { + base_ty } } _ => base_ty, @@ -659,7 +665,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { id, span, cat: Categorization::Local(vid), - mutbl: MutabilityCategory::from_local(self.tcx, vid), + mutbl: MutabilityCategory::from_local(self.tcx, self.tables, vid), ty: expr_ty, note: NoteNone })) @@ -717,7 +723,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let var_ty = self.node_ty(var_id)?; // Mutability of original variable itself - let var_mutbl = MutabilityCategory::from_local(self.tcx, var_id); + let var_mutbl = MutabilityCategory::from_local(self.tcx, self.tables, var_id); // Construct the upvar. This represents access to the field // from the environment (perhaps we should eventually desugar diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 63a9731e546..82e2a2114da 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -459,10 +459,10 @@ impl<'tcx> RegionMaps { -> CodeExtent { if scope_a == scope_b { return scope_a; } - /// [1] The initial values for `a_buf` and `b_buf` are not used. - /// The `ancestors_of` function will return some prefix that - /// is re-initialized with new values (or else fallback to a - /// heap-allocated vector). + // [1] The initial values for `a_buf` and `b_buf` are not used. + // The `ancestors_of` function will return some prefix that + // is re-initialized with new values (or else fallback to a + // heap-allocated vector). let mut a_buf: [CodeExtent; 32] = [scope_a /* [1] */; 32]; let mut a_vec: Vec<CodeExtent> = vec![]; let mut b_buf: [CodeExtent; 32] = [scope_b /* [1] */; 32]; @@ -890,8 +890,32 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, /// | ( ..., P&, ... ) /// | box P& fn is_binding_pat(pat: &hir::Pat) -> bool { + // Note that the code below looks for *explicit* refs only, that is, it won't + // know about *implicit* refs as introduced in #42640. + // + // This is not a problem. For example, consider + // + // let (ref x, ref y) = (Foo { .. }, Bar { .. }); + // + // Due to the explicit refs on the left hand side, the below code would signal + // that the temporary value on the right hand side should live until the end of + // the enclosing block (as opposed to being dropped after the let is complete). + // + // To create an implicit ref, however, you must have a borrowed value on the RHS + // already, as in this example (which won't compile before #42640): + // + // let Foo { x, .. } = &Foo { x: ..., ... }; + // + // in place of + // + // let Foo { ref x, .. } = Foo { ... }; + // + // In the former case (the implicit ref version), the temporary is created by the + // & expression, and its lifetime would be extended to the end of the block (due + // to a different rule, not the below code). match pat.node { - PatKind::Binding(hir::BindByRef(_), ..) => true, + PatKind::Binding(hir::BindingAnnotation::Ref, ..) | + PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true, PatKind::Struct(_, ref field_pats, _) => { field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat)) diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index c4f785757ce..13efa94a5c9 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -153,10 +153,6 @@ pub struct NamedRegionMap { // (b) it DOES appear in the arguments. pub late_bound: NodeSet, - // Contains the node-ids for lifetimes that were (incorrectly) categorized - // as late-bound, until #32330 was fixed. - pub issue_32330: NodeMap<ty::Issue32330>, - // For each type and trait definition, maps type parameters // to the trait object lifetime defaults computed from them. pub object_lifetime_defaults: NodeMap<Vec<ObjectLifetimeDefault>>, @@ -261,7 +257,6 @@ pub fn krate(sess: &Session, let mut map = NamedRegionMap { defs: NodeMap(), late_bound: NodeSet(), - issue_32330: NodeMap(), object_lifetime_defaults: compute_object_lifetime_defaults(sess, hir_map), }; sess.track_errors(|| { @@ -303,7 +298,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { match item.node { hir::ItemFn(ref decl, _, _, _, ref generics, _) => { - self.visit_early_late(item.id, None, decl, generics, |this| { + self.visit_early_late(None, decl, generics, |this| { intravisit::walk_item(this, item); }); } @@ -355,7 +350,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { match item.node { hir::ForeignItemFn(ref decl, _, ref generics) => { - self.visit_early_late(item.id, None, decl, generics, |this| { + self.visit_early_late(None, decl, generics, |this| { intravisit::walk_foreign_item(this, item); }) } @@ -406,7 +401,6 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { if let hir::TraitItemKind::Method(ref sig, _) = trait_item.node { self.visit_early_late( - trait_item.id, Some(self.hir_map.get_parent(trait_item.id)), &sig.decl, &sig.generics, |this| intravisit::walk_trait_item(this, trait_item)) @@ -418,7 +412,6 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node { self.visit_early_late( - impl_item.id, Some(self.hir_map.get_parent(impl_item.id)), &sig.decl, &sig.generics, |this| intravisit::walk_impl_item(this, impl_item)) @@ -811,18 +804,13 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the /// ordering is not important there. fn visit_early_late<F>(&mut self, - fn_id: ast::NodeId, parent_id: Option<ast::NodeId>, decl: &'tcx hir::FnDecl, generics: &'tcx hir::Generics, walk: F) where F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>), { - let fn_def_id = self.hir_map.local_def_id(fn_id); - insert_late_bound_lifetimes(self.map, - fn_def_id, - decl, - generics); + insert_late_bound_lifetimes(self.map, decl, generics); // Find the start of nested early scopes, e.g. in methods. let mut index = 0; @@ -1549,7 +1537,6 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// not amongst the inputs to a projection. In other words, `<&'a /// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`. fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, - fn_def_id: DefId, decl: &hir::FnDecl, generics: &hir::Generics) { debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics); @@ -1607,22 +1594,9 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, // any `impl Trait` in the return type? early-bound. if appears_in_output.impl_trait { continue; } - // does not appear in the inputs, but appears in the return - // type? eventually this will be early-bound, but for now we - // just mark it so we can issue warnings. - let constrained_by_input = constrained_by_input.regions.contains(&name); - let appears_in_output = appears_in_output.regions.contains(&name); - if !constrained_by_input && appears_in_output { - debug!("inserting issue_32330 entry for {:?}, {:?} on {:?}", - lifetime.lifetime.id, - name, - fn_def_id); - map.issue_32330.insert( - lifetime.lifetime.id, - ty::Issue32330 { - fn_def_id, - region_name: name, - }); + // does not appear in the inputs, but appears in the return type? early-bound. + if !constrained_by_input.regions.contains(&name) && + appears_in_output.regions.contains(&name) { continue; } diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 7b71bc0e678..4063609474b 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -25,7 +25,7 @@ use ty::{self, AdtDef, ClosureSubsts, Region, Ty}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use util::ppaux; use rustc_back::slice; -use hir::InlineAsm; +use hir::{self, InlineAsm}; use std::ascii; use std::borrow::{Cow}; use std::cell::Ref; @@ -894,12 +894,18 @@ pub enum StatementKind<'tcx> { /// End the current live range for the storage of the local. StorageDead(Lvalue<'tcx>), + /// Execute a piece of inline Assembly. InlineAsm { asm: Box<InlineAsm>, outputs: Vec<Lvalue<'tcx>>, inputs: Vec<Operand<'tcx>> }, + /// Assert the given lvalues to be valid inhabitants of their type. These statements are + /// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed. + /// See <https://internals.rust-lang.org/t/types-as-contracts/5562/73> for more details. + Validate(ValidationOp, Vec<ValidationOperand<'tcx, Lvalue<'tcx>>>), + /// Mark one terminating point of an extent (i.e. static region). /// (The starting point(s) arise implicitly from borrows.) EndRegion(CodeExtent), @@ -908,6 +914,57 @@ pub enum StatementKind<'tcx> { Nop, } +/// The `ValidationOp` describes what happens with each of the operands of a +/// `Validate` statement. +#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)] +pub enum ValidationOp { + /// Recursively traverse the lvalue following the type and validate that all type + /// invariants are maintained. Furthermore, acquire exclusive/read-only access to the + /// memory reachable from the lvalue. + Acquire, + /// Recursive traverse the *mutable* part of the type and relinquish all exclusive + /// access. + Release, + /// Recursive traverse the *mutable* part of the type and relinquish all exclusive + /// access *until* the given region ends. Then, access will be recovered. + Suspend(CodeExtent), +} + +impl Debug for ValidationOp { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::ValidationOp::*; + match *self { + Acquire => write!(fmt, "Acquire"), + Release => write!(fmt, "Release"), + // (reuse lifetime rendering policy from ppaux.) + Suspend(ref ce) => write!(fmt, "Suspend({})", ty::ReScope(*ce)), + } + } +} + +// This is generic so that it can be reused by miri +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct ValidationOperand<'tcx, T> { + pub lval: T, + pub ty: Ty<'tcx>, + pub re: Option<CodeExtent>, + pub mutbl: hir::Mutability, +} + +impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!(fmt, "{:?}: {:?}", self.lval, self.ty)?; + if let Some(ce) = self.re { + // (reuse lifetime rendering policy from ppaux.) + write!(fmt, "/{}", ty::ReScope(ce))?; + } + if let hir::MutImmutable = self.mutbl { + write!(fmt, " (imm)")?; + } + Ok(()) + } +} + impl<'tcx> Debug for Statement<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::StatementKind::*; @@ -915,6 +972,7 @@ impl<'tcx> Debug for Statement<'tcx> { Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv), // (reuse lifetime rendering policy from ppaux.) EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)), + Validate(ref op, ref lvalues) => write!(fmt, "Validate({:?}, {:?})", op, lvalues), StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv), StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv), SetDiscriminant{lvalue: ref lv, variant_index: index} => { @@ -963,15 +1021,15 @@ impl_stable_hash_for!(struct Static<'tcx> { /// shared between `Constant` and `Lvalue`. See the aliases /// `LvalueProjection` etc below. #[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct Projection<'tcx, B, V> { +pub struct Projection<'tcx, B, V, T> { pub base: B, - pub elem: ProjectionElem<'tcx, V>, + pub elem: ProjectionElem<'tcx, V, T>, } #[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub enum ProjectionElem<'tcx, V> { +pub enum ProjectionElem<'tcx, V, T> { Deref, - Field(Field, Ty<'tcx>), + Field(Field, T), Index(V), /// These indices are generated by slice patterns. Easiest to explain @@ -1008,11 +1066,11 @@ pub enum ProjectionElem<'tcx, V> { /// Alias for projections as they appear in lvalues, where the base is an lvalue /// and the index is an operand. -pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>>; +pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>, Ty<'tcx>>; /// Alias for projections as they appear in lvalues, where the base is an lvalue /// and the index is an operand. -pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>>; +pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>, Ty<'tcx>>; newtype_index!(Field, "field"); @@ -1606,6 +1664,21 @@ impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { } } +impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Lvalue<'tcx>> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ValidationOperand { + lval: self.lval.fold_with(folder), + ty: self.ty.fold_with(folder), + re: self.re, + mutbl: self.mutbl, + } + } + + fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { + self.lval.visit_with(visitor) || self.ty.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { use mir::StatementKind::*; @@ -1630,6 +1703,10 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { // trait with a `fn fold_extent`. EndRegion(ref extent) => EndRegion(extent.clone()), + Validate(ref op, ref lvals) => + Validate(op.clone(), + lvals.iter().map(|operand| operand.fold_with(folder)).collect()), + Nop => Nop, }; Statement { @@ -1655,6 +1732,9 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { // trait with a `fn visit_extent`. EndRegion(ref _extent) => false, + Validate(ref _op, ref lvalues) => + lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)), + Nop => false, } } @@ -1857,8 +1937,8 @@ impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { } } -impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> - where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx> +impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection<'tcx, B, V, T> + where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>, T: TypeFoldable<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { use mir::ProjectionElem::*; @@ -1866,7 +1946,7 @@ impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> let base = self.base.fold_with(folder); let elem = match self.elem { Deref => Deref, - Field(f, ty) => Field(f, ty.fold_with(folder)), + Field(f, ref ty) => Field(f, ty.fold_with(folder)), Index(ref v) => Index(v.fold_with(folder)), ref elem => elem.clone() }; @@ -1882,7 +1962,7 @@ impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> self.base.visit_with(visitor) || match self.elem { - Field(_, ty) => ty.visit_with(visitor), + Field(_, ref ty) => ty.visit_with(visitor), Index(ref v) => v.visit_with(visitor), _ => false } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 903d70add6f..8cc5b6cab11 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -14,7 +14,6 @@ use ty::subst::Substs; use ty::{ClosureSubsts, Region, Ty}; use mir::*; use rustc_const_math::ConstUsize; -use rustc_data_structures::indexed_vec::Idx; use syntax_pos::Span; // # The MIR Visitor @@ -264,9 +263,15 @@ macro_rules! make_mir_visitor { fn super_mir(&mut self, mir: & $($mutability)* Mir<'tcx>) { - for index in 0..mir.basic_blocks().len() { - let block = BasicBlock::new(index); - self.visit_basic_block_data(block, &$($mutability)* mir[block]); + // for best performance, we want to use an iterator rather + // than a for-loop, to avoid calling Mir::invalidate for + // each basic block. + macro_rules! basic_blocks { + (mut) => (mir.basic_blocks_mut().iter_enumerated_mut()); + () => (mir.basic_blocks().iter_enumerated()); + }; + for (bb, data) in basic_blocks!($($mutability)*) { + self.visit_basic_block_data(bb, data); } for scope in &$($mutability)* mir.visibility_scopes { @@ -337,6 +342,13 @@ macro_rules! make_mir_visitor { self.visit_assign(block, lvalue, rvalue, location); } StatementKind::EndRegion(_) => {} + StatementKind::Validate(_, ref $($mutability)* lvalues) => { + for operand in lvalues { + self.visit_lvalue(& $($mutability)* operand.lval, + LvalueContext::Validate, location); + self.visit_ty(& $($mutability)* operand.ty, Lookup::Loc(location)); + } + } StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => { self.visit_lvalue(lvalue, LvalueContext::Store, location); } @@ -807,6 +819,9 @@ pub enum LvalueContext<'tcx> { // Starting and ending a storage live range StorageLive, StorageDead, + + // Validation command + Validate, } impl<'tcx> LvalueContext<'tcx> { @@ -853,7 +868,8 @@ impl<'tcx> LvalueContext<'tcx> { LvalueContext::Borrow { kind: BorrowKind::Shared, .. } | LvalueContext::Borrow { kind: BorrowKind::Unique, .. } | LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume | - LvalueContext::StorageLive | LvalueContext::StorageDead => false, + LvalueContext::StorageLive | LvalueContext::StorageDead | + LvalueContext::Validate => false, } } @@ -865,7 +881,8 @@ impl<'tcx> LvalueContext<'tcx> { LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume => true, LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | LvalueContext::Store | LvalueContext::Call | LvalueContext::Projection(Mutability::Mut) | - LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead => false, + LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead | + LvalueContext::Validate => false, } } diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 8b55eb4c099..6995f099677 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1025,6 +1025,9 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "the directory the MIR is dumped into"), dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED], "if set, exclude the pass number when dumping MIR (used in tests)"), + mir_emit_validate: usize = (0, parse_uint, [TRACKED], + "emit Validate MIR statements, interpreted e.g. by miri (0: do not emit; 1: if function \ + contains unsafe block, only validate arguments; 2: always emit full validation)"), perf_stats: bool = (false, parse_bool, [UNTRACKED], "print some performance-related statistics"), hir_stats: bool = (false, parse_bool, [UNTRACKED], @@ -1059,6 +1062,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "choose which RELRO level to use"), nll: bool = (false, parse_bool, [UNTRACKED], "run the non-lexical lifetimes MIR pass"), + trans_time_graph: bool = (false, parse_bool, [UNTRACKED], + "generate a graphical HTML report of time spent in trans and LLVM"), } pub fn default_lib_output() -> CrateType { @@ -1498,6 +1503,23 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) early_error(error_format, "Value for codegen units must be a positive nonzero integer"); } + // It's possible that we have `codegen_units > 1` but only one item in + // `trans.modules`. We could theoretically proceed and do LTO in that + // case, but it would be confusing to have the validity of + // `-Z lto -C codegen-units=2` depend on details of the crate being + // compiled, so we complain regardless. + if cg.lto && cg.codegen_units > 1 { + // This case is impossible to handle because LTO expects to be able + // to combine the entire crate and all its dependencies into a + // single compilation unit, but each codegen unit is in a separate + // LLVM context, so they can't easily be combined. + early_error(error_format, "can't perform LTO when using multiple codegen units"); + } + + if cg.lto && debugging_opts.incremental.is_some() { + early_error(error_format, "can't perform LTO when compiling incrementally"); + } + let mut prints = Vec::<PrintRequest>::new(); if cg.target_cpu.as_ref().map_or(false, |s| s == "help") { prints.push(PrintRequest::TargetCPUs); diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index b97c2b77d10..71f4c8441b2 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -463,13 +463,19 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( selcx.infcx().report_overflow_error(&obligation, false); } Err(ProjectionCacheEntry::NormalizedTy(ty)) => { - // If we find the value in the cache, then the obligations - // have already been returned from the previous entry (and - // should therefore have been honored). + // If we find the value in the cache, then return it along + // with the obligations that went along with it. Note + // that, when using a fulfillment context, these + // obligations could in principle be ignored: they have + // already been registered when the cache entry was + // created (and hence the new ones will quickly be + // discarded as duplicated). But when doing trait + // evaluation this is not the case, and dropping the trait + // evaluations can causes ICEs (e.g. #43132). debug!("opt_normalize_projection_type: \ found normalized ty `{:?}`", ty); - return Some(NormalizedTy { value: ty, obligations: vec![] }); + return Some(ty); } Err(ProjectionCacheEntry::Error) => { debug!("opt_normalize_projection_type: \ @@ -480,9 +486,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty); match project_type(selcx, &obligation) { - Ok(ProjectedTy::Progress(Progress { ty: projected_ty, - mut obligations, - cacheable })) => { + Ok(ProjectedTy::Progress(Progress { ty: projected_ty, mut obligations })) => { // if projection succeeded, then what we get out of this // is also non-normalized (consider: it was derived from // an impl, where-clause etc) and hence we must @@ -491,12 +495,10 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( debug!("opt_normalize_projection_type: \ projected_ty={:?} \ depth={} \ - obligations={:?} \ - cacheable={:?}", + obligations={:?}", projected_ty, depth, - obligations, - cacheable); + obligations); let result = if projected_ty.has_projection_types() { let mut normalizer = AssociatedTypeNormalizer::new(selcx, @@ -521,8 +523,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( obligations, } }; - infcx.projection_cache.borrow_mut() - .complete(projection_ty, &result, cacheable); + infcx.projection_cache.borrow_mut().complete(projection_ty, &result); Some(result) } Ok(ProjectedTy::NoProgress(projected_ty)) => { @@ -533,8 +534,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( value: projected_ty, obligations: vec![] }; - infcx.projection_cache.borrow_mut() - .complete(projection_ty, &result, true); + infcx.projection_cache.borrow_mut().complete(projection_ty, &result); Some(result) } Err(ProjectionTyError::TooManyCandidates) => { @@ -607,7 +607,6 @@ enum ProjectedTy<'tcx> { struct Progress<'tcx> { ty: Ty<'tcx>, obligations: Vec<PredicateObligation<'tcx>>, - cacheable: bool, } impl<'tcx> Progress<'tcx> { @@ -615,7 +614,6 @@ impl<'tcx> Progress<'tcx> { Progress { ty: tcx.types.err, obligations: vec![], - cacheable: true } } @@ -1286,7 +1284,6 @@ fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>( Progress { ty: ty_match.value, obligations, - cacheable: ty_match.unconstrained_regions.is_empty(), } } Err(e) => { @@ -1330,7 +1327,6 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( Progress { ty: ty.subst(tcx, substs), obligations: nested, - cacheable: true } } @@ -1394,7 +1390,7 @@ enum ProjectionCacheEntry<'tcx> { InProgress, Ambiguous, Error, - NormalizedTy(Ty<'tcx>), + NormalizedTy(NormalizedTy<'tcx>), } // NB: intentionally not Clone @@ -1438,22 +1434,11 @@ impl<'tcx> ProjectionCache<'tcx> { Ok(()) } - /// Indicates that `key` was normalized to `value`. If `cacheable` is false, - /// then this result is sadly not cacheable. - fn complete(&mut self, - key: ty::ProjectionTy<'tcx>, - value: &NormalizedTy<'tcx>, - cacheable: bool) { - let fresh_key = if cacheable { - debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}", - key, value); - self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value)) - } else { - debug!("ProjectionCacheEntry::complete: cannot cache: key={:?}, value={:?}", - key, value); - !self.map.remove(key) - }; - + /// Indicates that `key` was normalized to `value`. + fn complete(&mut self, key: ty::ProjectionTy<'tcx>, value: &NormalizedTy<'tcx>) { + debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}", + key, value); + let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.clone())); assert!(!fresh_key, "never started projecting `{:?}`", key); } diff --git a/src/librustc/ty/binding.rs b/src/librustc/ty/binding.rs new file mode 100644 index 00000000000..3db61b76cc5 --- /dev/null +++ b/src/librustc/ty/binding.rs @@ -0,0 +1,35 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::BindingAnnotation::*; +use hir::BindingAnnotation; +use hir::Mutability; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum BindingMode { + BindByReference(Mutability), + BindByValue(Mutability), +} + +impl BindingMode { + pub fn convert(ba: BindingAnnotation) -> BindingMode { + match ba { + Unannotated => BindingMode::BindByValue(Mutability::MutImmutable), + Mutable => BindingMode::BindByValue(Mutability::MutMutable), + Ref => BindingMode::BindByReference(Mutability::MutImmutable), + RefMut => BindingMode::BindByReference(Mutability::MutMutable), + } + } +} + +impl_stable_hash_for!(enum self::BindingMode { + BindByReference(mutability), + BindByValue(mutability) +}); diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 5e9caab60c1..0c79b2f23b9 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -40,6 +40,7 @@ use ty::layout::{Layout, TargetDataLayout}; use ty::inhabitedness::DefIdForest; use ty::maps; use ty::steal::Steal; +use ty::BindingMode; use util::nodemap::{NodeMap, NodeSet, DefIdSet}; use util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::accumulate_vec::AccumulateVec; @@ -223,6 +224,9 @@ pub struct TypeckTables<'tcx> { pub adjustments: NodeMap<Vec<ty::adjustment::Adjustment<'tcx>>>, + // Stores the actual binding mode for all instances of hir::BindingAnnotation. + pub pat_binding_modes: NodeMap<BindingMode>, + /// Borrows pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>, @@ -278,6 +282,7 @@ impl<'tcx> TypeckTables<'tcx> { node_types: FxHashMap(), node_substs: NodeMap(), adjustments: NodeMap(), + pat_binding_modes: NodeMap(), upvar_capture_map: FxHashMap(), generator_sigs: NodeMap(), generator_interiors: NodeMap(), diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index 695c9427001..802994ae094 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -39,8 +39,8 @@ pub enum TypeError<'tcx> { RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>), RegionsNotSame(Region<'tcx>, Region<'tcx>), RegionsNoOverlap(Region<'tcx>, Region<'tcx>), - RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>, Option<Box<ty::Issue32330>>), - RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>, Option<Box<ty::Issue32330>>), + RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>), + RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>), Sorts(ExpectedFound<Ty<'tcx>>), IntMismatch(ExpectedFound<ty::IntVarValue>), FloatMismatch(ExpectedFound<ast::FloatTy>), @@ -116,13 +116,13 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { RegionsNoOverlap(..) => { write!(f, "lifetimes do not intersect") } - RegionsInsufficientlyPolymorphic(br, _, _) => { + RegionsInsufficientlyPolymorphic(br, _) => { write!(f, "expected bound lifetime parameter{}{}, found concrete lifetime", if br.is_named() { " " } else { "" }, br) } - RegionsOverlyPolymorphic(br, _, _) => { + RegionsOverlyPolymorphic(br, _) => { write!(f, "expected concrete lifetime, found bound lifetime parameter{}{}", if br.is_named() { " " } else { "" }, @@ -258,15 +258,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.note_and_explain_region(db, "...does not overlap ", region2, ""); } - RegionsInsufficientlyPolymorphic(_, conc_region, _) => { + RegionsInsufficientlyPolymorphic(_, conc_region) => { self.note_and_explain_region(db, "concrete lifetime that was found is ", conc_region, ""); } - RegionsOverlyPolymorphic(_, &ty::ReVar(_), _) => { + RegionsOverlyPolymorphic(_, &ty::ReVar(_)) => { // don't bother to print out the message below for // inference variables, it's not very illuminating. } - RegionsOverlyPolymorphic(_, conc_region, _) => { + RegionsOverlyPolymorphic(_, conc_region) => { self.note_and_explain_region(db, "expected concrete lifetime is ", conc_region, ""); } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index df6fa05a962..54e15ed01f0 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -581,14 +581,14 @@ pub struct Struct { pub min_size: Size, } -// Info required to optimize struct layout. +/// Info required to optimize struct layout. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] enum StructKind { - // A tuple, closure, or univariant which cannot be coerced to unsized. + /// A tuple, closure, or univariant which cannot be coerced to unsized. AlwaysSizedUnivariant, - // A univariant, the last field of which may be coerced to unsized. + /// A univariant, the last field of which may be coerced to unsized. MaybeUnsizedUnivariant, - // A univariant, but part of an enum. + /// A univariant, but part of an enum. EnumVariant, } @@ -1020,7 +1020,7 @@ pub enum Layout { /// TyRawPtr or TyRef with a !Sized pointee. FatPointer { metadata: Primitive, - // If true, the pointer cannot be null. + /// If true, the pointer cannot be null. non_zero: bool }, @@ -1031,8 +1031,8 @@ pub enum Layout { discr: Integer, signed: bool, non_zero: bool, - // Inclusive discriminant range. - // If min > max, it represents min...u64::MAX followed by 0...max. + /// Inclusive discriminant range. + /// If min > max, it represents min...u64::MAX followed by 0...max. // FIXME(eddyb) always use the shortest range, e.g. by finding // the largest space between two consecutive discriminants and // taking everything else as the (shortest) discriminant range. @@ -1043,7 +1043,7 @@ pub enum Layout { /// Single-case enums, and structs/tuples. Univariant { variant: Struct, - // If true, the structure is NonZero. + /// If true, the structure is NonZero. // FIXME(eddyb) use a newtype Layout kind for this. non_zero: bool }, @@ -1084,9 +1084,9 @@ pub enum Layout { StructWrappedNullablePointer { nndiscr: u64, nonnull: Struct, - // N.B. There is a 0 at the start, for LLVM GEP through a pointer. + /// N.B. There is a 0 at the start, for LLVM GEP through a pointer. discrfield: FieldPath, - // Like discrfield, but in source order. For debuginfo. + /// Like discrfield, but in source order. For debuginfo. discrfield_source: FieldPath } } @@ -1954,11 +1954,11 @@ pub enum SizeSkeleton<'tcx> { /// A potentially-fat pointer. Pointer { - // If true, this pointer is never null. + /// If true, this pointer is never null. non_zero: bool, - // The type which determines the unsized metadata, if any, - // of this pointer. Either a type parameter or a projection - // depending on one, with regions erased. + /// The type which determines the unsized metadata, if any, + /// of this pointer. Either a type parameter or a projection + /// depending on one, with regions erased. tail: Ty<'tcx> } } diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs index 2a9480ab06a..88a2ce0ab8a 100644 --- a/src/librustc/ty/maps.rs +++ b/src/librustc/ty/maps.rs @@ -9,7 +9,7 @@ // except according to those terms. use dep_graph::{DepConstructor, DepNode, DepNodeIndex}; -use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE}; +use hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use hir::def::Def; use hir; use middle::const_val; @@ -942,7 +942,7 @@ define_maps! { <'tcx> /// Checks all types in the krate for overlap in their inherent impls. Reports errors. /// Not meant to be used directly outside of coherence. /// (Defined only for LOCAL_CRATE) - [] crate_inherent_impls_overlap_check: crate_inherent_impls_dep_node(CrateNum) -> (), + [] crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) -> (), /// Results of evaluating const items or constants embedded in /// other items (such as enum variant explicit discriminants). @@ -1025,6 +1025,10 @@ fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::Coherence } +fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CoherenceInherentImplOverlapCheck +} + fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::Reachability } @@ -1043,10 +1047,9 @@ fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::TypeckBodiesKrate } -fn const_eval_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) +fn const_eval_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) -> DepConstructor<'tcx> { - let (def_id, substs) = key.value; - DepConstructor::ConstEval { def_id, substs } + DepConstructor::ConstEval } fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { @@ -1061,32 +1064,22 @@ fn relevant_trait_impls_for<'tcx>((def_id, t): (DefId, SimplifiedType)) -> DepCo DepConstructor::RelevantTraitImpls(def_id, t) } -fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::IsCopy(def_id) +fn is_copy_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsCopy } -fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::IsSized(def_id) +fn is_sized_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsSized } -fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::IsFreeze(def_id) +fn is_freeze_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsFreeze } -fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::NeedsDrop(def_id) +fn needs_drop_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::NeedsDrop } -fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::Layout(def_id) +fn layout_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::Layout } diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 85218c6baa5..3ac189ae047 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -68,13 +68,15 @@ pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; pub use self::sty::{ExistentialProjection, PolyExistentialProjection}; pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; pub use self::sty::RegionKind; -pub use self::sty::Issue32330; pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; pub use self::sty::BoundRegion::*; pub use self::sty::InferTy::*; pub use self::sty::RegionKind::*; pub use self::sty::TypeVariants::*; +pub use self::binding::BindingMode; +pub use self::binding::BindingMode::*; + pub use self::context::{TyCtxt, GlobalArenas, tls}; pub use self::context::{Lift, TypeckTables}; @@ -85,6 +87,7 @@ pub use self::trait_def::TraitDef; pub use self::maps::queries; pub mod adjustment; +pub mod binding; pub mod cast; pub mod error; pub mod fast_reject; @@ -158,7 +161,7 @@ pub struct ImplHeader<'tcx> { pub predicates: Vec<Predicate<'tcx>>, } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct AssociatedItem { pub def_id: DefId, pub name: Name, @@ -172,7 +175,7 @@ pub struct AssociatedItem { pub method_has_self_argument: bool, } -#[derive(Copy, Clone, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, RustcEncodable, RustcDecodable)] pub enum AssociatedKind { Const, Method, @@ -679,7 +682,6 @@ pub struct RegionParameterDef { pub name: Name, pub def_id: DefId, pub index: u32, - pub issue_32330: Option<ty::Issue32330>, /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute /// on generic parameter `'a`, asserts data of lifetime `'a` diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index f93626b7cb1..087c41a7883 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -377,13 +377,11 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { RegionsNoOverlap(a, b) => { return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b)) } - RegionsInsufficientlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b, c)) + RegionsInsufficientlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b)) } - RegionsOverlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b, c)) + RegionsOverlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)) } IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -1065,13 +1063,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { RegionsNoOverlap(a, b) => { RegionsNoOverlap(a.fold_with(folder), b.fold_with(folder)) }, - RegionsInsufficientlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - RegionsInsufficientlyPolymorphic(a, b.fold_with(folder), c) + RegionsInsufficientlyPolymorphic(a, b) => { + RegionsInsufficientlyPolymorphic(a, b.fold_with(folder)) }, - RegionsOverlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - RegionsOverlyPolymorphic(a, b.fold_with(folder), c) + RegionsOverlyPolymorphic(a, b) => { + RegionsOverlyPolymorphic(a, b.fold_with(folder)) }, IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -1097,8 +1093,8 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { RegionsNoOverlap(a, b) => { a.visit_with(visitor) || b.visit_with(visitor) }, - RegionsInsufficientlyPolymorphic(_, b, _) | - RegionsOverlyPolymorphic(_, b, _) => { + RegionsInsufficientlyPolymorphic(_, b) | + RegionsOverlyPolymorphic(_, b) => { b.visit_with(visitor) }, Sorts(x) => x.visit_with(visitor), diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 9a180b3552d..2f3fd5244ae 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -77,20 +77,6 @@ impl BoundRegion { } } -/// When a region changed from late-bound to early-bound when #32330 -/// was fixed, its `RegionParameterDef` will have one of these -/// structures that we can use to give nicer errors. -#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable)] -pub struct Issue32330 { - /// fn where is region declared - pub fn_def_id: DefId, - - /// name of region; duplicates the info in BrNamed but convenient - /// to have it here, and this code is only temporary - pub region_name: ast::Name, -} - /// NB: If you change this, you'll probably want to change the corresponding /// AST structure in libsyntax/ast.rs as well. #[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index 17564671a1e..244b7f35968 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -57,6 +57,32 @@ pub fn time<T, F>(do_it: bool, what: &str, f: F) -> T where let rv = f(); let dur = start.elapsed(); + print_time_passes_entry_internal(what, dur); + + TIME_DEPTH.with(|slot| slot.set(old)); + + rv +} + +pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) { + if !do_it { + return + } + + let old = TIME_DEPTH.with(|slot| { + let r = slot.get(); + slot.set(r + 1); + r + }); + + print_time_passes_entry_internal(what, dur); + + TIME_DEPTH.with(|slot| slot.set(old)); +} + +fn print_time_passes_entry_internal(what: &str, dur: Duration) { + let indentation = TIME_DEPTH.with(|slot| slot.get()); + let mem_string = match get_resident() { Some(n) => { let mb = n as f64 / 1_000_000.0; @@ -65,14 +91,10 @@ pub fn time<T, F>(do_it: bool, what: &str, f: F) -> T where None => "".to_owned(), }; println!("{}time: {}{}\t{}", - repeat(" ").take(old).collect::<String>(), + repeat(" ").take(indentation).collect::<String>(), duration_to_secs_str(dur), mem_string, what); - - TIME_DEPTH.with(|slot| slot.set(old)); - - rv } // Hack up our own formatting for the duration to make it easier for scripts diff --git a/src/librustc_apfloat/Cargo.toml b/src/librustc_apfloat/Cargo.toml new file mode 100644 index 00000000000..b8f8488e302 --- /dev/null +++ b/src/librustc_apfloat/Cargo.toml @@ -0,0 +1,11 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_apfloat" +version = "0.0.0" + +[lib] +name = "rustc_apfloat" +path = "lib.rs" + +[dependencies] +rustc_bitflags = { path = "../librustc_bitflags" } diff --git a/src/librustc_apfloat/ieee.rs b/src/librustc_apfloat/ieee.rs new file mode 100644 index 00000000000..3545a77c75d --- /dev/null +++ b/src/librustc_apfloat/ieee.rs @@ -0,0 +1,2733 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; +use {Float, FloatConvert, ParseError, Round, Status, StatusAnd}; + +use std::cmp::{self, Ordering}; +use std::convert::TryFrom; +use std::fmt::{self, Write}; +use std::marker::PhantomData; +use std::mem; +use std::ops::Neg; + +#[must_use] +pub struct IeeeFloat<S> { + /// Absolute significand value (including the integer bit). + sig: [Limb; 1], + + /// The signed unbiased exponent of the value. + exp: ExpInt, + + /// What kind of floating point number this is. + category: Category, + + /// Sign bit of the number. + sign: bool, + + marker: PhantomData<S>, +} + +/// Fundamental unit of big integer arithmetic, but also +/// large to store the largest significands by itself. +type Limb = u128; +const LIMB_BITS: usize = 128; +fn limbs_for_bits(bits: usize) -> usize { + (bits + LIMB_BITS - 1) / LIMB_BITS +} + +/// Enum that represents what fraction of the LSB truncated bits of an fp number +/// represent. +/// +/// This essentially combines the roles of guard and sticky bits. +#[must_use] +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum Loss { + // Example of truncated bits: + ExactlyZero, // 000000 + LessThanHalf, // 0xxxxx x's not all zero + ExactlyHalf, // 100000 + MoreThanHalf, // 1xxxxx x's not all zero +} + +/// Represents floating point arithmetic semantics. +pub trait Semantics: Sized { + /// Total number of bits in the in-memory format. + const BITS: usize; + + /// Number of bits in the significand. This includes the integer bit. + const PRECISION: usize; + + /// The largest E such that 2^E is representable; this matches the + /// definition of IEEE 754. + const MAX_EXP: ExpInt; + + /// The smallest E such that 2^E is a normalized number; this + /// matches the definition of IEEE 754. + const MIN_EXP: ExpInt = -Self::MAX_EXP + 1; + + /// The significand bit that marks NaN as quiet. + const QNAN_BIT: usize = Self::PRECISION - 2; + + /// The significand bitpattern to mark a NaN as quiet. + /// NOTE: for X87DoubleExtended we need to set two bits instead of 2. + const QNAN_SIGNIFICAND: Limb = 1 << Self::QNAN_BIT; + + fn from_bits(bits: u128) -> IeeeFloat<Self> { + assert!(Self::BITS > Self::PRECISION); + + let sign = bits & (1 << (Self::BITS - 1)); + let exponent = (bits & !sign) >> (Self::PRECISION - 1); + let mut r = IeeeFloat { + sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)], + // Convert the exponent from its bias representation to a signed integer. + exp: (exponent as ExpInt) - Self::MAX_EXP, + category: Category::Zero, + sign: sign != 0, + marker: PhantomData, + }; + + if r.exp == Self::MIN_EXP - 1 && r.sig == [0] { + // Exponent, significand meaningless. + r.category = Category::Zero; + } else if r.exp == Self::MAX_EXP + 1 && r.sig == [0] { + // Exponent, significand meaningless. + r.category = Category::Infinity; + } else if r.exp == Self::MAX_EXP + 1 && r.sig != [0] { + // Sign, exponent, significand meaningless. + r.category = Category::NaN; + } else { + r.category = Category::Normal; + if r.exp == Self::MIN_EXP - 1 { + // Denormal. + r.exp = Self::MIN_EXP; + } else { + // Set integer bit. + sig::set_bit(&mut r.sig, Self::PRECISION - 1); + } + } + + r + } + + fn to_bits(x: IeeeFloat<Self>) -> u128 { + assert!(Self::BITS > Self::PRECISION); + + // Split integer bit from significand. + let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1); + let mut significand = x.sig[0] & ((1 << (Self::PRECISION - 1)) - 1); + let exponent = match x.category { + Category::Normal => { + if x.exp == Self::MIN_EXP && !integer_bit { + // Denormal. + Self::MIN_EXP - 1 + } else { + x.exp + } + } + Category::Zero => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 0; + Self::MIN_EXP - 1 + } + Category::Infinity => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 0; + Self::MAX_EXP + 1 + } + Category::NaN => Self::MAX_EXP + 1, + }; + + // Convert the exponent from a signed integer to its bias representation. + let exponent = (exponent + Self::MAX_EXP) as u128; + + ((x.sign as u128) << (Self::BITS - 1)) | (exponent << (Self::PRECISION - 1)) | significand + } +} + +impl<S> Copy for IeeeFloat<S> {} +impl<S> Clone for IeeeFloat<S> { + fn clone(&self) -> Self { + *self + } +} + +macro_rules! ieee_semantics { + ($($name:ident = $sem:ident($bits:tt : $exp_bits:tt)),*) => { + $(pub struct $sem;)* + $(pub type $name = IeeeFloat<$sem>;)* + $(impl Semantics for $sem { + const BITS: usize = $bits; + const PRECISION: usize = ($bits - 1 - $exp_bits) + 1; + const MAX_EXP: ExpInt = (1 << ($exp_bits - 1)) - 1; + })* + } +} + +ieee_semantics! { + Half = HalfS(16:5), + Single = SingleS(32:8), + Double = DoubleS(64:11), + Quad = QuadS(128:15) +} + +pub struct X87DoubleExtendedS; +pub type X87DoubleExtended = IeeeFloat<X87DoubleExtendedS>; +impl Semantics for X87DoubleExtendedS { + const BITS: usize = 80; + const PRECISION: usize = 64; + const MAX_EXP: ExpInt = (1 << (15 - 1)) - 1; + + /// For x87 extended precision, we want to make a NaN, not a + /// pseudo-NaN. Maybe we should expose the ability to make + /// pseudo-NaNs? + const QNAN_SIGNIFICAND: Limb = 0b11 << Self::QNAN_BIT; + + /// Integer bit is explicit in this format. Intel hardware (387 and later) + /// does not support these bit patterns: + /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity") + /// exponent = all 1's, integer bit 0, significand nonzero ("pseudoNaN") + /// exponent = 0, integer bit 1 ("pseudodenormal") + /// exponent!=0 nor all 1's, integer bit 0 ("unnormal") + /// At the moment, the first two are treated as NaNs, the second two as Normal. + fn from_bits(bits: u128) -> IeeeFloat<Self> { + let sign = bits & (1 << (Self::BITS - 1)); + let exponent = (bits & !sign) >> Self::PRECISION; + let mut r = IeeeFloat { + sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)], + // Convert the exponent from its bias representation to a signed integer. + exp: (exponent as ExpInt) - Self::MAX_EXP, + category: Category::Zero, + sign: sign != 0, + marker: PhantomData, + }; + + if r.exp == Self::MIN_EXP - 1 && r.sig == [0] { + // Exponent, significand meaningless. + r.category = Category::Zero; + } else if r.exp == Self::MAX_EXP + 1 && r.sig == [1 << (Self::PRECISION - 1)] { + // Exponent, significand meaningless. + r.category = Category::Infinity; + } else if r.exp == Self::MAX_EXP + 1 && r.sig != [1 << (Self::PRECISION - 1)] { + // Sign, exponent, significand meaningless. + r.category = Category::NaN; + } else { + r.category = Category::Normal; + if r.exp == Self::MIN_EXP - 1 { + // Denormal. + r.exp = Self::MIN_EXP; + } + } + + r + } + + fn to_bits(x: IeeeFloat<Self>) -> u128 { + // Get integer bit from significand. + let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1); + let mut significand = x.sig[0] & ((1 << Self::PRECISION) - 1); + let exponent = match x.category { + Category::Normal => { + if x.exp == Self::MIN_EXP && !integer_bit { + // Denormal. + Self::MIN_EXP - 1 + } else { + x.exp + } + } + Category::Zero => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 0; + Self::MIN_EXP - 1 + } + Category::Infinity => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 1 << (Self::PRECISION - 1); + Self::MAX_EXP + 1 + } + Category::NaN => Self::MAX_EXP + 1, + }; + + // Convert the exponent from a signed integer to its bias representation. + let exponent = (exponent + Self::MAX_EXP) as u128; + + ((x.sign as u128) << (Self::BITS - 1)) | (exponent << Self::PRECISION) | significand + } +} + +float_common_impls!(IeeeFloat<S>); + +impl<S: Semantics> PartialEq for IeeeFloat<S> { + fn eq(&self, rhs: &Self) -> bool { + self.partial_cmp(rhs) == Some(Ordering::Equal) + } +} + +impl<S: Semantics> PartialOrd for IeeeFloat<S> { + fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { + match (self.category, rhs.category) { + (Category::NaN, _) | + (_, Category::NaN) => None, + + (Category::Infinity, Category::Infinity) => Some((!self.sign).cmp(&(!rhs.sign))), + + (Category::Zero, Category::Zero) => Some(Ordering::Equal), + + (Category::Infinity, _) | + (Category::Normal, Category::Zero) => Some((!self.sign).cmp(&self.sign)), + + (_, Category::Infinity) | + (Category::Zero, Category::Normal) => Some(rhs.sign.cmp(&(!rhs.sign))), + + (Category::Normal, Category::Normal) => { + // Two normal numbers. Do they have the same sign? + Some((!self.sign).cmp(&(!rhs.sign)).then_with(|| { + // Compare absolute values; invert result if negative. + let result = self.cmp_abs_normal(*rhs); + + if self.sign { result.reverse() } else { result } + })) + } + } + } +} + +impl<S> Neg for IeeeFloat<S> { + type Output = Self; + fn neg(mut self) -> Self { + self.sign = !self.sign; + self + } +} + +/// Prints this value as a decimal string. +/// +/// \param precision The maximum number of digits of +/// precision to output. If there are fewer digits available, +/// zero padding will not be used unless the value is +/// integral and small enough to be expressed in +/// precision digits. 0 means to use the natural +/// precision of the number. +/// \param width The maximum number of zeros to +/// consider inserting before falling back to scientific +/// notation. 0 means to always use scientific notation. +/// +/// \param alternate Indicate whether to remove the trailing zero in +/// fraction part or not. Also setting this parameter to true forces +/// producing of output more similar to default printf behavior. +/// Specifically the lower e is used as exponent delimiter and exponent +/// always contains no less than two digits. +/// +/// Number precision width Result +/// ------ --------- ----- ------ +/// 1.01E+4 5 2 10100 +/// 1.01E+4 4 2 1.01E+4 +/// 1.01E+4 5 1 1.01E+4 +/// 1.01E-2 5 2 0.0101 +/// 1.01E-2 4 2 0.0101 +/// 1.01E-2 4 1 1.01E-2 +impl<S: Semantics> fmt::Display for IeeeFloat<S> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let width = f.width().unwrap_or(3); + let alternate = f.alternate(); + + match self.category { + Category::Infinity => { + if self.sign { + return f.write_str("-Inf"); + } else { + return f.write_str("+Inf"); + } + } + + Category::NaN => return f.write_str("NaN"), + + Category::Zero => { + if self.sign { + f.write_char('-')?; + } + + if width == 0 { + if alternate { + f.write_str("0.0")?; + if let Some(n) = f.precision() { + for _ in 1..n { + f.write_char('0')?; + } + } + f.write_str("e+00")?; + } else { + f.write_str("0.0E+0")?; + } + } else { + f.write_char('0')?; + } + return Ok(()); + } + + Category::Normal => {} + } + + if self.sign { + f.write_char('-')?; + } + + // We use enough digits so the number can be round-tripped back to an + // APFloat. The formula comes from "How to Print Floating-Point Numbers + // Accurately" by Steele and White. + // FIXME: Using a formula based purely on the precision is conservative; + // we can print fewer digits depending on the actual value being printed. + + // precision = 2 + floor(S::PRECISION / lg_2(10)) + let precision = f.precision().unwrap_or(2 + S::PRECISION * 59 / 196); + + // Decompose the number into an APInt and an exponent. + let mut exp = self.exp - (S::PRECISION as ExpInt - 1); + let mut sig = vec![self.sig[0]]; + + // Ignore trailing binary zeros. + let trailing_zeros = sig[0].trailing_zeros(); + let _: Loss = sig::shift_right(&mut sig, &mut exp, trailing_zeros as usize); + + // Change the exponent from 2^e to 10^e. + if exp == 0 { + // Nothing to do. + } else if exp > 0 { + // Just shift left. + let shift = exp as usize; + sig.resize(limbs_for_bits(S::PRECISION + shift), 0); + sig::shift_left(&mut sig, &mut exp, shift); + } else { + // exp < 0 + let mut texp = -exp as usize; + + // We transform this using the identity: + // (N)(2^-e) == (N)(5^e)(10^-e) + + // Multiply significand by 5^e. + // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8) + let mut sig_scratch = vec![]; + let mut p5 = vec![]; + let mut p5_scratch = vec![]; + while texp != 0 { + if p5.is_empty() { + p5.push(5); + } else { + p5_scratch.resize(p5.len() * 2, 0); + let _: Loss = + sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS); + while p5_scratch.last() == Some(&0) { + p5_scratch.pop(); + } + mem::swap(&mut p5, &mut p5_scratch); + } + if texp & 1 != 0 { + sig_scratch.resize(sig.len() + p5.len(), 0); + let _: Loss = sig::mul( + &mut sig_scratch, + &mut 0, + &sig, + &p5, + (sig.len() + p5.len()) * LIMB_BITS, + ); + while sig_scratch.last() == Some(&0) { + sig_scratch.pop(); + } + mem::swap(&mut sig, &mut sig_scratch); + } + texp >>= 1; + } + } + + // Fill the buffer. + let mut buffer = vec![]; + + // Ignore digits from the significand until it is no more + // precise than is required for the desired precision. + // 196/59 is a very slight overestimate of lg_2(10). + let required = (precision * 196 + 58) / 59; + let mut discard_digits = sig::omsb(&sig).saturating_sub(required) * 59 / 196; + let mut in_trail = true; + while !sig.is_empty() { + // Perform short division by 10 to extract the rightmost digit. + // rem <- sig % 10 + // sig <- sig / 10 + let mut rem = 0; + for limb in sig.iter_mut().rev() { + // We don't have an integer doubly wide than Limb, + // so we have to split the divrem on two halves. + const HALF_BITS: usize = LIMB_BITS / 2; + let mut halves = [*limb & ((1 << HALF_BITS) - 1), *limb >> HALF_BITS]; + for half in halves.iter_mut().rev() { + *half |= rem << HALF_BITS; + rem = *half % 10; + *half /= 10; + } + *limb = halves[0] | (halves[1] << HALF_BITS); + } + // Reduce the sigificand to avoid wasting time dividing 0's. + while sig.last() == Some(&0) { + sig.pop(); + } + + let digit = rem; + + // Ignore digits we don't need. + if discard_digits > 0 { + discard_digits -= 1; + exp += 1; + continue; + } + + // Drop trailing zeros. + if in_trail && digit == 0 { + exp += 1; + } else { + in_trail = false; + buffer.push(b'0' + digit as u8); + } + } + + assert!(!buffer.is_empty(), "no characters in buffer!"); + + // Drop down to precision. + // FIXME: don't do more precise calculations above than are required. + if buffer.len() > precision { + // The most significant figures are the last ones in the buffer. + let mut first_sig = buffer.len() - precision; + + // Round. + // FIXME: this probably shouldn't use 'round half up'. + + // Rounding down is just a truncation, except we also want to drop + // trailing zeros from the new result. + if buffer[first_sig - 1] < b'5' { + while first_sig < buffer.len() && buffer[first_sig] == b'0' { + first_sig += 1; + } + } else { + // Rounding up requires a decimal add-with-carry. If we continue + // the carry, the newly-introduced zeros will just be truncated. + for x in &mut buffer[first_sig..] { + if *x == b'9' { + first_sig += 1; + } else { + *x += 1; + break; + } + } + } + + exp += first_sig as ExpInt; + buffer.drain(..first_sig); + + // If we carried through, we have exactly one digit of precision. + if buffer.is_empty() { + buffer.push(b'1'); + } + } + + let digits = buffer.len(); + + // Check whether we should use scientific notation. + let scientific = if width == 0 { + true + } else { + if exp >= 0 { + // 765e3 --> 765000 + // ^^^ + // But we shouldn't make the number look more precise than it is. + exp as usize > width || digits + exp as usize > precision + } else { + // Power of the most significant digit. + let msd = exp + (digits - 1) as ExpInt; + if msd >= 0 { + // 765e-2 == 7.65 + false + } else { + // 765e-5 == 0.00765 + // ^ ^^ + -msd as usize > width + } + } + }; + + // Scientific formatting is pretty straightforward. + if scientific { + exp += digits as ExpInt - 1; + + f.write_char(buffer[digits - 1] as char)?; + f.write_char('.')?; + let truncate_zero = !alternate; + if digits == 1 && truncate_zero { + f.write_char('0')?; + } else { + for &d in buffer[..digits - 1].iter().rev() { + f.write_char(d as char)?; + } + } + // Fill with zeros up to precision. + if !truncate_zero && precision > digits - 1 { + for _ in 0..precision - digits + 1 { + f.write_char('0')?; + } + } + // For alternate we use lower 'e'. + f.write_char(if alternate { 'e' } else { 'E' })?; + + // Exponent always at least two digits if we do not truncate zeros. + if truncate_zero { + write!(f, "{:+}", exp)?; + } else { + write!(f, "{:+03}", exp)?; + } + + return Ok(()); + } + + // Non-scientific, positive exponents. + if exp >= 0 { + for &d in buffer.iter().rev() { + f.write_char(d as char)?; + } + for _ in 0..exp { + f.write_char('0')?; + } + return Ok(()); + } + + // Non-scientific, negative exponents. + let unit_place = -exp as usize; + if unit_place < digits { + for &d in buffer[unit_place..].iter().rev() { + f.write_char(d as char)?; + } + f.write_char('.')?; + for &d in buffer[..unit_place].iter().rev() { + f.write_char(d as char)?; + } + } else { + f.write_str("0.")?; + for _ in digits..unit_place { + f.write_char('0')?; + } + for &d in buffer.iter().rev() { + f.write_char(d as char)?; + } + } + + Ok(()) + } +} + +impl<S: Semantics> fmt::Debug for IeeeFloat<S> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}({:?} | {}{:?} * 2^{})", + self, self.category, + if self.sign { "-" } else { "+" }, + self.sig, + self.exp) + } +} + +impl<S: Semantics> Float for IeeeFloat<S> { + const BITS: usize = S::BITS; + const PRECISION: usize = S::PRECISION; + const MAX_EXP: ExpInt = S::MAX_EXP; + const MIN_EXP: ExpInt = S::MIN_EXP; + + const ZERO: Self = IeeeFloat { + sig: [0], + exp: S::MIN_EXP - 1, + category: Category::Zero, + sign: false, + marker: PhantomData, + }; + + const INFINITY: Self = IeeeFloat { + sig: [0], + exp: S::MAX_EXP + 1, + category: Category::Infinity, + sign: false, + marker: PhantomData, + }; + + // FIXME(eddyb) remove when qnan becomes const fn. + const NAN: Self = IeeeFloat { + sig: [S::QNAN_SIGNIFICAND], + exp: S::MAX_EXP + 1, + category: Category::NaN, + sign: false, + marker: PhantomData, + }; + + fn qnan(payload: Option<u128>) -> Self { + IeeeFloat { + sig: [ + S::QNAN_SIGNIFICAND | + payload.map_or(0, |payload| { + // Zero out the excess bits of the significand. + payload & ((1 << S::QNAN_BIT) - 1) + }), + ], + exp: S::MAX_EXP + 1, + category: Category::NaN, + sign: false, + marker: PhantomData, + } + } + + fn snan(payload: Option<u128>) -> Self { + let mut snan = Self::qnan(payload); + + // We always have to clear the QNaN bit to make it an SNaN. + sig::clear_bit(&mut snan.sig, S::QNAN_BIT); + + // If there are no bits set in the payload, we have to set + // *something* to make it a NaN instead of an infinity; + // conventionally, this is the next bit down from the QNaN bit. + if snan.sig[0] & !S::QNAN_SIGNIFICAND == 0 { + sig::set_bit(&mut snan.sig, S::QNAN_BIT - 1); + } + + snan + } + + fn largest() -> Self { + // We want (in interchange format): + // exponent = 1..10 + // significand = 1..1 + IeeeFloat { + sig: [!0 & ((1 << S::PRECISION) - 1)], + exp: S::MAX_EXP, + category: Category::Normal, + sign: false, + marker: PhantomData, + } + } + + // We want (in interchange format): + // exponent = 0..0 + // significand = 0..01 + const SMALLEST: Self = IeeeFloat { + sig: [1], + exp: S::MIN_EXP, + category: Category::Normal, + sign: false, + marker: PhantomData, + }; + + fn smallest_normalized() -> Self { + // We want (in interchange format): + // exponent = 0..0 + // significand = 10..0 + IeeeFloat { + sig: [1 << (S::PRECISION - 1)], + exp: S::MIN_EXP, + category: Category::Normal, + sign: false, + marker: PhantomData, + } + } + + fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> { + let status = match (self.category, rhs.category) { + (Category::Infinity, Category::Infinity) => { + // Differently signed infinities can only be validly + // subtracted. + if self.sign != rhs.sign { + self = Self::NAN; + Status::INVALID_OP + } else { + Status::OK + } + } + + // Sign may depend on rounding mode; handled below. + (_, Category::Zero) | + (Category::NaN, _) | + (Category::Infinity, Category::Normal) => Status::OK, + + (Category::Zero, _) | + (_, Category::NaN) | + (_, Category::Infinity) => { + self = rhs; + Status::OK + } + + // This return code means it was not a simple case. + (Category::Normal, Category::Normal) => { + let loss = sig::add_or_sub( + &mut self.sig, + &mut self.exp, + &mut self.sign, + &mut [rhs.sig[0]], + rhs.exp, + rhs.sign, + ); + let status; + self = unpack!(status=, self.normalize(round, loss)); + + // Can only be zero if we lost no fraction. + assert!(self.category != Category::Zero || loss == Loss::ExactlyZero); + + status + } + }; + + // If two numbers add (exactly) to zero, IEEE 754 decrees it is a + // positive zero unless rounding to minus infinity, except that + // adding two like-signed zeroes gives that zero. + if self.category == Category::Zero && + (rhs.category != Category::Zero || self.sign != rhs.sign) + { + self.sign = round == Round::TowardNegative; + } + + status.and(self) + } + + fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> { + self.sign ^= rhs.sign; + + match (self.category, rhs.category) { + (Category::NaN, _) => { + self.sign = false; + Status::OK.and(self) + } + + (_, Category::NaN) => { + self.sign = false; + self.category = Category::NaN; + self.sig = rhs.sig; + Status::OK.and(self) + } + + (Category::Zero, Category::Infinity) | + (Category::Infinity, Category::Zero) => Status::INVALID_OP.and(Self::NAN), + + (_, Category::Infinity) | + (Category::Infinity, _) => { + self.category = Category::Infinity; + Status::OK.and(self) + } + + (Category::Zero, _) | + (_, Category::Zero) => { + self.category = Category::Zero; + Status::OK.and(self) + } + + (Category::Normal, Category::Normal) => { + self.exp += rhs.exp; + let mut wide_sig = [0; 2]; + let loss = sig::mul( + &mut wide_sig, + &mut self.exp, + &self.sig, + &rhs.sig, + S::PRECISION, + ); + self.sig = [wide_sig[0]]; + let mut status; + self = unpack!(status=, self.normalize(round, loss)); + if loss != Loss::ExactlyZero { + status |= Status::INEXACT; + } + status.and(self) + } + } + } + + fn mul_add_r(mut self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> { + // If and only if all arguments are normal do we need to do an + // extended-precision calculation. + if !self.is_finite_non_zero() || !multiplicand.is_finite_non_zero() || !addend.is_finite() { + let mut status; + self = unpack!(status=, self.mul_r(multiplicand, round)); + + // FS can only be Status::OK or Status::INVALID_OP. There is no more work + // to do in the latter case. The IEEE-754R standard says it is + // implementation-defined in this case whether, if ADDEND is a + // quiet NaN, we raise invalid op; this implementation does so. + // + // If we need to do the addition we can do so with normal + // precision. + if status == Status::OK { + self = unpack!(status=, self.add_r(addend, round)); + } + return status.and(self); + } + + // Post-multiplication sign, before addition. + self.sign ^= multiplicand.sign; + + // Allocate space for twice as many bits as the original significand, plus one + // extra bit for the addition to overflow into. + assert!(limbs_for_bits(S::PRECISION * 2 + 1) <= 2); + let mut wide_sig = sig::widening_mul(self.sig[0], multiplicand.sig[0]); + + let mut loss = Loss::ExactlyZero; + let mut omsb = sig::omsb(&wide_sig); + self.exp += multiplicand.exp; + + // Assume the operands involved in the multiplication are single-precision + // FP, and the two multiplicants are: + // lhs = a23 . a22 ... a0 * 2^e1 + // rhs = b23 . b22 ... b0 * 2^e2 + // the result of multiplication is: + // lhs = c48 c47 c46 . c45 ... c0 * 2^(e1+e2) + // Note that there are three significant bits at the left-hand side of the + // radix point: two for the multiplication, and an overflow bit for the + // addition (that will always be zero at this point). Move the radix point + // toward left by two bits, and adjust exponent accordingly. + self.exp += 2; + + if addend.is_non_zero() { + // Normalize our MSB to one below the top bit to allow for overflow. + let ext_precision = 2 * S::PRECISION + 1; + if omsb != ext_precision - 1 { + assert!(ext_precision > omsb); + sig::shift_left(&mut wide_sig, &mut self.exp, (ext_precision - 1) - omsb); + } + + // The intermediate result of the multiplication has "2 * S::PRECISION" + // signicant bit; adjust the addend to be consistent with mul result. + let mut ext_addend_sig = [addend.sig[0], 0]; + + // Extend the addend significand to ext_precision - 1. This guarantees + // that the high bit of the significand is zero (same as wide_sig), + // so the addition will overflow (if it does overflow at all) into the top bit. + sig::shift_left( + &mut ext_addend_sig, + &mut 0, + ext_precision - 1 - S::PRECISION, + ); + loss = sig::add_or_sub( + &mut wide_sig, + &mut self.exp, + &mut self.sign, + &mut ext_addend_sig, + addend.exp + 1, + addend.sign, + ); + + omsb = sig::omsb(&wide_sig); + } + + // Convert the result having "2 * S::PRECISION" significant-bits back to the one + // having "S::PRECISION" significant-bits. First, move the radix point from + // poision "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be + // adjusted by "2*S::PRECISION - 1" - "S::PRECISION - 1" = "S::PRECISION". + self.exp -= S::PRECISION as ExpInt + 1; + + // In case MSB resides at the left-hand side of radix point, shift the + // mantissa right by some amount to make sure the MSB reside right before + // the radix point (i.e. "MSB . rest-significant-bits"). + if omsb > S::PRECISION { + let bits = omsb - S::PRECISION; + loss = sig::shift_right(&mut wide_sig, &mut self.exp, bits).combine(loss); + } + + self.sig[0] = wide_sig[0]; + + let mut status; + self = unpack!(status=, self.normalize(round, loss)); + if loss != Loss::ExactlyZero { + status |= Status::INEXACT; + } + + // If two numbers add (exactly) to zero, IEEE 754 decrees it is a + // positive zero unless rounding to minus infinity, except that + // adding two like-signed zeroes gives that zero. + if self.category == Category::Zero && !status.intersects(Status::UNDERFLOW) && + self.sign != addend.sign + { + self.sign = round == Round::TowardNegative; + } + + status.and(self) + } + + fn div_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> { + self.sign ^= rhs.sign; + + match (self.category, rhs.category) { + (Category::NaN, _) => { + self.sign = false; + Status::OK.and(self) + } + + (_, Category::NaN) => { + self.category = Category::NaN; + self.sig = rhs.sig; + self.sign = false; + Status::OK.and(self) + } + + (Category::Infinity, Category::Infinity) | + (Category::Zero, Category::Zero) => Status::INVALID_OP.and(Self::NAN), + + (Category::Infinity, _) | + (Category::Zero, _) => Status::OK.and(self), + + (Category::Normal, Category::Infinity) => { + self.category = Category::Zero; + Status::OK.and(self) + } + + (Category::Normal, Category::Zero) => { + self.category = Category::Infinity; + Status::DIV_BY_ZERO.and(self) + } + + (Category::Normal, Category::Normal) => { + self.exp -= rhs.exp; + let dividend = self.sig[0]; + let loss = sig::div( + &mut self.sig, + &mut self.exp, + &mut [dividend], + &mut [rhs.sig[0]], + S::PRECISION, + ); + let mut status; + self = unpack!(status=, self.normalize(round, loss)); + if loss != Loss::ExactlyZero { + status |= Status::INEXACT; + } + status.and(self) + } + } + } + + fn c_fmod(mut self, rhs: Self) -> StatusAnd<Self> { + match (self.category, rhs.category) { + (Category::NaN, _) | + (Category::Zero, Category::Infinity) | + (Category::Zero, Category::Normal) | + (Category::Normal, Category::Infinity) => Status::OK.and(self), + + (_, Category::NaN) => { + self.sign = false; + self.category = Category::NaN; + self.sig = rhs.sig; + Status::OK.and(self) + } + + (Category::Infinity, _) | + (_, Category::Zero) => Status::INVALID_OP.and(Self::NAN), + + (Category::Normal, Category::Normal) => { + while self.is_finite_non_zero() && rhs.is_finite_non_zero() && + self.cmp_abs_normal(rhs) != Ordering::Less + { + let mut v = rhs.scalbn(self.ilogb() - rhs.ilogb()); + if self.cmp_abs_normal(v) == Ordering::Less { + v = v.scalbn(-1); + } + v.sign = self.sign; + + let status; + self = unpack!(status=, self - v); + assert_eq!(status, Status::OK); + } + Status::OK.and(self) + } + } + } + + fn round_to_integral(self, round: Round) -> StatusAnd<Self> { + // If the exponent is large enough, we know that this value is already + // integral, and the arithmetic below would potentially cause it to saturate + // to +/-Inf. Bail out early instead. + if self.is_finite_non_zero() && self.exp + 1 >= S::PRECISION as ExpInt { + return Status::OK.and(self); + } + + // The algorithm here is quite simple: we add 2^(p-1), where p is the + // precision of our format, and then subtract it back off again. The choice + // of rounding modes for the addition/subtraction determines the rounding mode + // for our integral rounding as well. + // NOTE: When the input value is negative, we do subtraction followed by + // addition instead. + assert!(S::PRECISION <= 128); + let mut status; + let magic_const = unpack!(status=, Self::from_u128(1 << (S::PRECISION - 1))); + let magic_const = magic_const.copy_sign(self); + + if status != Status::OK { + return status.and(self); + } + + let mut r = self; + r = unpack!(status=, r.add_r(magic_const, round)); + if status != Status::OK && status != Status::INEXACT { + return status.and(self); + } + + // Restore the input sign to handle 0.0/-0.0 cases correctly. + r.sub_r(magic_const, round).map(|r| r.copy_sign(self)) + } + + fn next_up(mut self) -> StatusAnd<Self> { + // Compute nextUp(x), handling each float category separately. + match self.category { + Category::Infinity => { + if self.sign { + // nextUp(-inf) = -largest + Status::OK.and(-Self::largest()) + } else { + // nextUp(+inf) = +inf + Status::OK.and(self) + } + } + Category::NaN => { + // IEEE-754R 2008 6.2 Par 2: nextUp(sNaN) = qNaN. Set Invalid flag. + // IEEE-754R 2008 6.2: nextUp(qNaN) = qNaN. Must be identity so we do not + // change the payload. + if self.is_signaling() { + // For consistency, propagate the sign of the sNaN to the qNaN. + Status::INVALID_OP.and(Self::NAN.copy_sign(self)) + } else { + Status::OK.and(self) + } + } + Category::Zero => { + // nextUp(pm 0) = +smallest + Status::OK.and(Self::SMALLEST) + } + Category::Normal => { + // nextUp(-smallest) = -0 + if self.is_smallest() && self.sign { + return Status::OK.and(-Self::ZERO); + } + + // nextUp(largest) == INFINITY + if self.is_largest() && !self.sign { + return Status::OK.and(Self::INFINITY); + } + + // Excluding the integral bit. This allows us to test for binade boundaries. + let sig_mask = (1 << (S::PRECISION - 1)) - 1; + + // nextUp(normal) == normal + inc. + if self.sign { + // If we are negative, we need to decrement the significand. + + // We only cross a binade boundary that requires adjusting the exponent + // if: + // 1. exponent != S::MIN_EXP. This implies we are not in the + // smallest binade or are dealing with denormals. + // 2. Our significand excluding the integral bit is all zeros. + let crossing_binade_boundary = self.exp != S::MIN_EXP && + self.sig[0] & sig_mask == 0; + + // Decrement the significand. + // + // We always do this since: + // 1. If we are dealing with a non-binade decrement, by definition we + // just decrement the significand. + // 2. If we are dealing with a normal -> normal binade decrement, since + // we have an explicit integral bit the fact that all bits but the + // integral bit are zero implies that subtracting one will yield a + // significand with 0 integral bit and 1 in all other spots. Thus we + // must just adjust the exponent and set the integral bit to 1. + // 3. If we are dealing with a normal -> denormal binade decrement, + // since we set the integral bit to 0 when we represent denormals, we + // just decrement the significand. + sig::decrement(&mut self.sig); + + if crossing_binade_boundary { + // Our result is a normal number. Do the following: + // 1. Set the integral bit to 1. + // 2. Decrement the exponent. + sig::set_bit(&mut self.sig, S::PRECISION - 1); + self.exp -= 1; + } + } else { + // If we are positive, we need to increment the significand. + + // We only cross a binade boundary that requires adjusting the exponent if + // the input is not a denormal and all of said input's significand bits + // are set. If all of said conditions are true: clear the significand, set + // the integral bit to 1, and increment the exponent. If we have a + // denormal always increment since moving denormals and the numbers in the + // smallest normal binade have the same exponent in our representation. + let crossing_binade_boundary = !self.is_denormal() && + self.sig[0] & sig_mask == sig_mask; + + if crossing_binade_boundary { + self.sig = [0]; + sig::set_bit(&mut self.sig, S::PRECISION - 1); + assert_ne!( + self.exp, + S::MAX_EXP, + "We can not increment an exponent beyond the MAX_EXP \ + allowed by the given floating point semantics." + ); + self.exp += 1; + } else { + sig::increment(&mut self.sig); + } + } + Status::OK.and(self) + } + } + } + + fn from_bits(input: u128) -> Self { + // Dispatch to semantics. + S::from_bits(input) + } + + fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> { + IeeeFloat { + sig: [input], + exp: S::PRECISION as ExpInt - 1, + category: Category::Normal, + sign: false, + marker: PhantomData, + }.normalize(round, Loss::ExactlyZero) + } + + fn from_str_r(mut s: &str, mut round: Round) -> Result<StatusAnd<Self>, ParseError> { + if s.is_empty() { + return Err(ParseError("Invalid string length")); + } + + // Handle special cases. + match s { + "inf" | "INFINITY" => return Ok(Status::OK.and(Self::INFINITY)), + "-inf" | "-INFINITY" => return Ok(Status::OK.and(-Self::INFINITY)), + "nan" | "NaN" => return Ok(Status::OK.and(Self::NAN)), + "-nan" | "-NaN" => return Ok(Status::OK.and(-Self::NAN)), + _ => {} + } + + // Handle a leading minus sign. + let minus = s.starts_with("-"); + if minus || s.starts_with("+") { + s = &s[1..]; + if s.is_empty() { + return Err(ParseError("String has no digits")); + } + } + + // Adjust the rounding mode for the absolute value below. + if minus { + round = -round; + } + + let r = if s.starts_with("0x") || s.starts_with("0X") { + s = &s[2..]; + if s.is_empty() { + return Err(ParseError("Invalid string")); + } + Self::from_hexadecimal_string(s, round)? + } else { + Self::from_decimal_string(s, round)? + }; + + Ok(r.map(|r| if minus { -r } else { r })) + } + + fn to_bits(self) -> u128 { + // Dispatch to semantics. + S::to_bits(self) + } + + fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> { + // The result of trying to convert a number too large. + let overflow = if self.sign { + // Negative numbers cannot be represented as unsigned. + 0 + } else { + // Largest unsigned integer of the given width. + !0 >> (128 - width) + }; + + *is_exact = false; + + match self.category { + Category::NaN => Status::INVALID_OP.and(0), + + Category::Infinity => Status::INVALID_OP.and(overflow), + + Category::Zero => { + // Negative zero can't be represented as an int. + *is_exact = !self.sign; + Status::OK.and(0) + } + + Category::Normal => { + let mut r = 0; + + // Step 1: place our absolute value, with any fraction truncated, in + // the destination. + let truncated_bits = if self.exp < 0 { + // Our absolute value is less than one; truncate everything. + // For exponent -1 the integer bit represents .5, look at that. + // For smaller exponents leftmost truncated bit is 0. + S::PRECISION - 1 + (-self.exp) as usize + } else { + // We want the most significant (exponent + 1) bits; the rest are + // truncated. + let bits = self.exp as usize + 1; + + // Hopelessly large in magnitude? + if bits > width { + return Status::INVALID_OP.and(overflow); + } + + if bits < S::PRECISION { + // We truncate (S::PRECISION - bits) bits. + r = self.sig[0] >> (S::PRECISION - bits); + S::PRECISION - bits + } else { + // We want at least as many bits as are available. + r = self.sig[0] << (bits - S::PRECISION); + 0 + } + }; + + // Step 2: work out any lost fraction, and increment the absolute + // value if we would round away from zero. + let mut loss = Loss::ExactlyZero; + if truncated_bits > 0 { + loss = Loss::through_truncation(&self.sig, truncated_bits); + if loss != Loss::ExactlyZero && + self.round_away_from_zero(round, loss, truncated_bits) + { + r = r.wrapping_add(1); + if r == 0 { + return Status::INVALID_OP.and(overflow); // Overflow. + } + } + } + + // Step 3: check if we fit in the destination. + if r > overflow { + return Status::INVALID_OP.and(overflow); + } + + if loss == Loss::ExactlyZero { + *is_exact = true; + Status::OK.and(r) + } else { + Status::INEXACT.and(r) + } + } + } + } + + fn cmp_abs_normal(self, rhs: Self) -> Ordering { + assert!(self.is_finite_non_zero()); + assert!(rhs.is_finite_non_zero()); + + // If exponents are equal, do an unsigned comparison of the significands. + self.exp.cmp(&rhs.exp).then_with( + || sig::cmp(&self.sig, &rhs.sig), + ) + } + + fn bitwise_eq(self, rhs: Self) -> bool { + if self.category != rhs.category || self.sign != rhs.sign { + return false; + } + + if self.category == Category::Zero || self.category == Category::Infinity { + return true; + } + + if self.is_finite_non_zero() && self.exp != rhs.exp { + return false; + } + + self.sig == rhs.sig + } + + fn is_negative(self) -> bool { + self.sign + } + + fn is_denormal(self) -> bool { + self.is_finite_non_zero() && self.exp == S::MIN_EXP && + !sig::get_bit(&self.sig, S::PRECISION - 1) + } + + fn is_signaling(self) -> bool { + // IEEE-754R 2008 6.2.1: A signaling NaN bit string should be encoded with the + // first bit of the trailing significand being 0. + self.is_nan() && !sig::get_bit(&self.sig, S::QNAN_BIT) + } + + fn category(self) -> Category { + self.category + } + + fn get_exact_inverse(self) -> Option<Self> { + // Special floats and denormals have no exact inverse. + if !self.is_finite_non_zero() { + return None; + } + + // Check that the number is a power of two by making sure that only the + // integer bit is set in the significand. + if self.sig != [1 << (S::PRECISION - 1)] { + return None; + } + + // Get the inverse. + let mut reciprocal = Self::from_u128(1).value; + let status; + reciprocal = unpack!(status=, reciprocal / self); + if status != Status::OK { + return None; + } + + // Avoid multiplication with a denormal, it is not safe on all platforms and + // may be slower than a normal division. + if reciprocal.is_denormal() { + return None; + } + + assert!(reciprocal.is_finite_non_zero()); + assert_eq!(reciprocal.sig, [1 << (S::PRECISION - 1)]); + + Some(reciprocal) + } + + fn ilogb(mut self) -> ExpInt { + if self.is_nan() { + return IEK_NAN; + } + if self.is_zero() { + return IEK_ZERO; + } + if self.is_infinite() { + return IEK_INF; + } + if !self.is_denormal() { + return self.exp; + } + + let sig_bits = (S::PRECISION - 1) as ExpInt; + self.exp += sig_bits; + self = self.normalize(Round::NearestTiesToEven, Loss::ExactlyZero) + .value; + self.exp - sig_bits + } + + fn scalbn_r(mut self, exp: ExpInt, round: Round) -> Self { + // If exp is wildly out-of-scale, simply adding it to self.exp will + // overflow; clamp it to a safe range before adding, but ensure that the range + // is large enough that the clamp does not change the result. The range we + // need to support is the difference between the largest possible exponent and + // the normalized exponent of half the smallest denormal. + + let sig_bits = (S::PRECISION - 1) as i32; + let max_change = S::MAX_EXP as i32 - (S::MIN_EXP as i32 - sig_bits) + 1; + + // Clamp to one past the range ends to let normalize handle overflow. + let exp_change = cmp::min(cmp::max(exp as i32, (-max_change - 1)), max_change); + self.exp = self.exp.saturating_add(exp_change as ExpInt); + self = self.normalize(round, Loss::ExactlyZero).value; + if self.is_nan() { + sig::set_bit(&mut self.sig, S::QNAN_BIT); + } + self + } + + fn frexp_r(mut self, exp: &mut ExpInt, round: Round) -> Self { + *exp = self.ilogb(); + + // Quiet signalling nans. + if *exp == IEK_NAN { + sig::set_bit(&mut self.sig, S::QNAN_BIT); + return self; + } + + if *exp == IEK_INF { + return self; + } + + // 1 is added because frexp is defined to return a normalized fraction in + // +/-[0.5, 1.0), rather than the usual +/-[1.0, 2.0). + if *exp == IEK_ZERO { + *exp = 0; + } else { + *exp += 1; + } + self.scalbn_r(-*exp, round) + } +} + +impl<S: Semantics, T: Semantics> FloatConvert<IeeeFloat<T>> for IeeeFloat<S> { + fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<IeeeFloat<T>> { + let mut r = IeeeFloat { + sig: self.sig, + exp: self.exp, + category: self.category, + sign: self.sign, + marker: PhantomData, + }; + + // x86 has some unusual NaNs which cannot be represented in any other + // format; note them here. + fn is_x87_double_extended<S: Semantics>() -> bool { + S::QNAN_SIGNIFICAND == X87DoubleExtendedS::QNAN_SIGNIFICAND + } + let x87_special_nan = is_x87_double_extended::<S>() && !is_x87_double_extended::<T>() && + r.category == Category::NaN && + (r.sig[0] & S::QNAN_SIGNIFICAND) != S::QNAN_SIGNIFICAND; + + // If this is a truncation of a denormal number, and the target semantics + // has larger exponent range than the source semantics (this can happen + // when truncating from PowerPC double-double to double format), the + // right shift could lose result mantissa bits. Adjust exponent instead + // of performing excessive shift. + let mut shift = T::PRECISION as ExpInt - S::PRECISION as ExpInt; + if shift < 0 && r.is_finite_non_zero() { + let mut exp_change = sig::omsb(&r.sig) as ExpInt - S::PRECISION as ExpInt; + if r.exp + exp_change < T::MIN_EXP { + exp_change = T::MIN_EXP - r.exp; + } + if exp_change < shift { + exp_change = shift; + } + if exp_change < 0 { + shift -= exp_change; + r.exp += exp_change; + } + } + + // If this is a truncation, perform the shift. + let mut loss = Loss::ExactlyZero; + if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { + loss = sig::shift_right(&mut r.sig, &mut 0, -shift as usize); + } + + // If this is an extension, perform the shift. + if shift > 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { + sig::shift_left(&mut r.sig, &mut 0, shift as usize); + } + + let status; + if r.is_finite_non_zero() { + r = unpack!(status=, r.normalize(round, loss)); + *loses_info = status != Status::OK; + } else if r.category == Category::NaN { + *loses_info = loss != Loss::ExactlyZero || x87_special_nan; + + // For x87 extended precision, we want to make a NaN, not a special NaN if + // the input wasn't special either. + if !x87_special_nan && is_x87_double_extended::<T>() { + sig::set_bit(&mut r.sig, T::PRECISION - 1); + } + + // gcc forces the Quiet bit on, which means (float)(double)(float_sNan) + // does not give you back the same bits. This is dubious, and we + // don't currently do it. You're really supposed to get + // an invalid operation signal at runtime, but nobody does that. + status = Status::OK; + } else { + *loses_info = false; + status = Status::OK; + } + + status.and(r) + } +} + +impl<S: Semantics> IeeeFloat<S> { + /// Handle positive overflow. We either return infinity or + /// the largest finite number. For negative overflow, + /// negate the `round` argument before calling. + fn overflow_result(round: Round) -> StatusAnd<Self> { + match round { + // Infinity? + Round::NearestTiesToEven | Round::NearestTiesToAway | Round::TowardPositive => { + (Status::OVERFLOW | Status::INEXACT).and(Self::INFINITY) + } + // Otherwise we become the largest finite number. + Round::TowardNegative | Round::TowardZero => Status::INEXACT.and(Self::largest()), + } + } + + /// Returns TRUE if, when truncating the current number, with BIT the + /// new LSB, with the given lost fraction and rounding mode, the result + /// would need to be rounded away from zero (i.e., by increasing the + /// signficand). This routine must work for Category::Zero of both signs, and + /// Category::Normal numbers. + fn round_away_from_zero(&self, round: Round, loss: Loss, bit: usize) -> bool { + // NaNs and infinities should not have lost fractions. + assert!(self.is_finite_non_zero() || self.is_zero()); + + // Current callers never pass this so we don't handle it. + assert_ne!(loss, Loss::ExactlyZero); + + match round { + Round::NearestTiesToAway => loss == Loss::ExactlyHalf || loss == Loss::MoreThanHalf, + Round::NearestTiesToEven => { + if loss == Loss::MoreThanHalf { + return true; + } + + // Our zeros don't have a significand to test. + if loss == Loss::ExactlyHalf && self.category != Category::Zero { + return sig::get_bit(&self.sig, bit); + } + + false + } + Round::TowardZero => false, + Round::TowardPositive => !self.sign, + Round::TowardNegative => self.sign, + } + } + + fn normalize(mut self, round: Round, mut loss: Loss) -> StatusAnd<Self> { + if !self.is_finite_non_zero() { + return Status::OK.and(self); + } + + // Before rounding normalize the exponent of Category::Normal numbers. + let mut omsb = sig::omsb(&self.sig); + + if omsb > 0 { + // OMSB is numbered from 1. We want to place it in the integer + // bit numbered PRECISION if possible, with a compensating change in + // the exponent. + let mut final_exp = self.exp.saturating_add( + omsb as ExpInt - S::PRECISION as ExpInt, + ); + + // If the resulting exponent is too high, overflow according to + // the rounding mode. + if final_exp > S::MAX_EXP { + let round = if self.sign { -round } else { round }; + return Self::overflow_result(round).map(|r| r.copy_sign(self)); + } + + // Subnormal numbers have exponent MIN_EXP, and their MSB + // is forced based on that. + if final_exp < S::MIN_EXP { + final_exp = S::MIN_EXP; + } + + // Shifting left is easy as we don't lose precision. + if final_exp < self.exp { + assert_eq!(loss, Loss::ExactlyZero); + + let exp_change = (self.exp - final_exp) as usize; + sig::shift_left(&mut self.sig, &mut self.exp, exp_change); + + return Status::OK.and(self); + } + + // Shift right and capture any new lost fraction. + if final_exp > self.exp { + let exp_change = (final_exp - self.exp) as usize; + loss = sig::shift_right(&mut self.sig, &mut self.exp, exp_change).combine(loss); + + // Keep OMSB up-to-date. + omsb = omsb.saturating_sub(exp_change); + } + } + + // Now round the number according to round given the lost + // fraction. + + // As specified in IEEE 754, since we do not trap we do not report + // underflow for exact results. + if loss == Loss::ExactlyZero { + // Canonicalize zeros. + if omsb == 0 { + self.category = Category::Zero; + } + + return Status::OK.and(self); + } + + // Increment the significand if we're rounding away from zero. + if self.round_away_from_zero(round, loss, 0) { + if omsb == 0 { + self.exp = S::MIN_EXP; + } + + // We should never overflow. + assert_eq!(sig::increment(&mut self.sig), 0); + omsb = sig::omsb(&self.sig); + + // Did the significand increment overflow? + if omsb == S::PRECISION + 1 { + // Renormalize by incrementing the exponent and shifting our + // significand right one. However if we already have the + // maximum exponent we overflow to infinity. + if self.exp == S::MAX_EXP { + self.category = Category::Infinity; + + return (Status::OVERFLOW | Status::INEXACT).and(self); + } + + let _: Loss = sig::shift_right(&mut self.sig, &mut self.exp, 1); + + return Status::INEXACT.and(self); + } + } + + // The normal case - we were and are not denormal, and any + // significand increment above didn't overflow. + if omsb == S::PRECISION { + return Status::INEXACT.and(self); + } + + // We have a non-zero denormal. + assert!(omsb < S::PRECISION); + + // Canonicalize zeros. + if omsb == 0 { + self.category = Category::Zero; + } + + // The Category::Zero case is a denormal that underflowed to zero. + (Status::UNDERFLOW | Status::INEXACT).and(self) + } + + fn from_hexadecimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> { + let mut r = IeeeFloat { + sig: [0], + exp: 0, + category: Category::Normal, + sign: false, + marker: PhantomData, + }; + + let mut any_digits = false; + let mut has_exp = false; + let mut bit_pos = LIMB_BITS as isize; + let mut loss = None; + + // Without leading or trailing zeros, irrespective of the dot. + let mut first_sig_digit = None; + let mut dot = s.len(); + + for (p, c) in s.char_indices() { + // Skip leading zeros and any (hexa)decimal point. + if c == '.' { + if dot != s.len() { + return Err(ParseError("String contains multiple dots")); + } + dot = p; + } else if let Some(hex_value) = c.to_digit(16) { + any_digits = true; + + if first_sig_digit.is_none() { + if hex_value == 0 { + continue; + } + first_sig_digit = Some(p); + } + + // Store the number while we have space. + bit_pos -= 4; + if bit_pos >= 0 { + r.sig[0] |= (hex_value as Limb) << bit_pos; + } else { + // If zero or one-half (the hexadecimal digit 8) are followed + // by non-zero, they're a little more than zero or one-half. + if let Some(ref mut loss) = loss { + if hex_value != 0 { + if *loss == Loss::ExactlyZero { + *loss = Loss::LessThanHalf; + } + if *loss == Loss::ExactlyHalf { + *loss = Loss::MoreThanHalf; + } + } + } else { + loss = Some(match hex_value { + 0 => Loss::ExactlyZero, + 1...7 => Loss::LessThanHalf, + 8 => Loss::ExactlyHalf, + 9...15 => Loss::MoreThanHalf, + _ => unreachable!(), + }); + } + } + } else if c == 'p' || c == 'P' { + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + if dot == s.len() { + dot = p; + } + + let mut chars = s[p + 1..].chars().peekable(); + + // Adjust for the given exponent. + let exp_minus = chars.peek() == Some(&'-'); + if exp_minus || chars.peek() == Some(&'+') { + chars.next(); + } + + for c in chars { + if let Some(value) = c.to_digit(10) { + has_exp = true; + r.exp = r.exp.saturating_mul(10).saturating_add(value as ExpInt); + } else { + return Err(ParseError("Invalid character in exponent")); + } + } + if !has_exp { + return Err(ParseError("Exponent has no digits")); + } + + if exp_minus { + r.exp = -r.exp; + } + + break; + } else { + return Err(ParseError("Invalid character in significand")); + } + } + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + // Hex floats require an exponent but not a hexadecimal point. + if !has_exp { + return Err(ParseError("Hex strings require an exponent")); + } + + // Ignore the exponent if we are zero. + let first_sig_digit = match first_sig_digit { + Some(p) => p, + None => return Ok(Status::OK.and(Self::ZERO)), + }; + + // Calculate the exponent adjustment implicit in the number of + // significant digits and adjust for writing the significand starting + // at the most significant nibble. + let exp_adjustment = if dot > first_sig_digit { + ExpInt::try_from(dot - first_sig_digit).unwrap() + } else { + -ExpInt::try_from(first_sig_digit - dot - 1).unwrap() + }; + let exp_adjustment = exp_adjustment + .saturating_mul(4) + .saturating_sub(1) + .saturating_add(S::PRECISION as ExpInt) + .saturating_sub(LIMB_BITS as ExpInt); + r.exp = r.exp.saturating_add(exp_adjustment); + + Ok(r.normalize(round, loss.unwrap_or(Loss::ExactlyZero))) + } + + fn from_decimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> { + // Given a normal decimal floating point number of the form + // + // dddd.dddd[eE][+-]ddd + // + // where the decimal point and exponent are optional, fill out the + // variables below. Exponent is appropriate if the significand is + // treated as an integer, and normalized_exp if the significand + // is taken to have the decimal point after a single leading + // non-zero digit. + // + // If the value is zero, first_sig_digit is None. + + let mut any_digits = false; + let mut dec_exp = 0i32; + + // Without leading or trailing zeros, irrespective of the dot. + let mut first_sig_digit = None; + let mut last_sig_digit = 0; + let mut dot = s.len(); + + for (p, c) in s.char_indices() { + if c == '.' { + if dot != s.len() { + return Err(ParseError("String contains multiple dots")); + } + dot = p; + } else if let Some(dec_value) = c.to_digit(10) { + any_digits = true; + + if dec_value != 0 { + if first_sig_digit.is_none() { + first_sig_digit = Some(p); + } + last_sig_digit = p; + } + } else if c == 'e' || c == 'E' { + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + if dot == s.len() { + dot = p; + } + + let mut chars = s[p + 1..].chars().peekable(); + + // Adjust for the given exponent. + let exp_minus = chars.peek() == Some(&'-'); + if exp_minus || chars.peek() == Some(&'+') { + chars.next(); + } + + any_digits = false; + for c in chars { + if let Some(value) = c.to_digit(10) { + any_digits = true; + dec_exp = dec_exp.saturating_mul(10).saturating_add(value as i32); + } else { + return Err(ParseError("Invalid character in exponent")); + } + } + if !any_digits { + return Err(ParseError("Exponent has no digits")); + } + + if exp_minus { + dec_exp = -dec_exp; + } + + break; + } else { + return Err(ParseError("Invalid character in significand")); + } + } + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + // Test if we have a zero number allowing for non-zero exponents. + let first_sig_digit = match first_sig_digit { + Some(p) => p, + None => return Ok(Status::OK.and(Self::ZERO)), + }; + + // Adjust the exponents for any decimal point. + if dot > last_sig_digit { + dec_exp = dec_exp.saturating_add((dot - last_sig_digit - 1) as i32); + } else { + dec_exp = dec_exp.saturating_sub((last_sig_digit - dot) as i32); + } + let significand_digits = last_sig_digit - first_sig_digit + 1 - + (dot > first_sig_digit && dot < last_sig_digit) as usize; + let normalized_exp = dec_exp.saturating_add(significand_digits as i32 - 1); + + // Handle the cases where exponents are obviously too large or too + // small. Writing L for log 10 / log 2, a number d.ddddd*10^dec_exp + // definitely overflows if + // + // (dec_exp - 1) * L >= MAX_EXP + // + // and definitely underflows to zero where + // + // (dec_exp + 1) * L <= MIN_EXP - PRECISION + // + // With integer arithmetic the tightest bounds for L are + // + // 93/28 < L < 196/59 [ numerator <= 256 ] + // 42039/12655 < L < 28738/8651 [ numerator <= 65536 ] + + // Check for MAX_EXP. + if normalized_exp.saturating_sub(1).saturating_mul(42039) >= 12655 * S::MAX_EXP as i32 { + // Overflow and round. + return Ok(Self::overflow_result(round)); + } + + // Check for MIN_EXP. + if normalized_exp.saturating_add(1).saturating_mul(28738) <= + 8651 * (S::MIN_EXP as i32 - S::PRECISION as i32) + { + // Underflow to zero and round. + let r = if round == Round::TowardPositive { + IeeeFloat::SMALLEST + } else { + IeeeFloat::ZERO + }; + return Ok((Status::UNDERFLOW | Status::INEXACT).and(r)); + } + + // A tight upper bound on number of bits required to hold an + // N-digit decimal integer is N * 196 / 59. Allocate enough space + // to hold the full significand, and an extra limb required by + // tcMultiplyPart. + let max_limbs = limbs_for_bits(1 + 196 * significand_digits / 59); + let mut dec_sig = Vec::with_capacity(max_limbs); + + // Convert to binary efficiently - we do almost all multiplication + // in a Limb. When this would overflow do we do a single + // bignum multiplication, and then revert again to multiplication + // in a Limb. + let mut chars = s[first_sig_digit..last_sig_digit + 1].chars(); + loop { + let mut val = 0; + let mut multiplier = 1; + + loop { + let dec_value = match chars.next() { + Some('.') => continue, + Some(c) => c.to_digit(10).unwrap(), + None => break, + }; + + multiplier *= 10; + val = val * 10 + dec_value as Limb; + + // The maximum number that can be multiplied by ten with any + // digit added without overflowing a Limb. + if multiplier > (!0 - 9) / 10 { + break; + } + } + + // If we've consumed no digits, we're done. + if multiplier == 1 { + break; + } + + // Multiply out the current limb. + let mut carry = val; + for x in &mut dec_sig { + let [low, mut high] = sig::widening_mul(*x, multiplier); + + // Now add carry. + let (low, overflow) = low.overflowing_add(carry); + high += overflow as Limb; + + *x = low; + carry = high; + } + + // If we had carry, we need another limb (likely but not guaranteed). + if carry > 0 { + dec_sig.push(carry); + } + } + + // Calculate pow(5, abs(dec_exp)) into `pow5_full`. + // The *_calc Vec's are reused scratch space, as an optimization. + let (pow5_full, mut pow5_calc, mut sig_calc, mut sig_scratch_calc) = { + let mut power = dec_exp.abs() as usize; + + const FIRST_EIGHT_POWERS: [Limb; 8] = [1, 5, 25, 125, 625, 3125, 15625, 78125]; + + let mut p5_scratch = vec![]; + let mut p5 = vec![FIRST_EIGHT_POWERS[4]]; + + let mut r_scratch = vec![]; + let mut r = vec![FIRST_EIGHT_POWERS[power & 7]]; + power >>= 3; + + while power > 0 { + // Calculate pow(5,pow(2,n+3)). + p5_scratch.resize(p5.len() * 2, 0); + let _: Loss = sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS); + while p5_scratch.last() == Some(&0) { + p5_scratch.pop(); + } + mem::swap(&mut p5, &mut p5_scratch); + + if power & 1 != 0 { + r_scratch.resize(r.len() + p5.len(), 0); + let _: Loss = sig::mul( + &mut r_scratch, + &mut 0, + &r, + &p5, + (r.len() + p5.len()) * LIMB_BITS, + ); + while r_scratch.last() == Some(&0) { + r_scratch.pop(); + } + mem::swap(&mut r, &mut r_scratch); + } + + power >>= 1; + } + + (r, r_scratch, p5, p5_scratch) + }; + + // Attempt dec_sig * 10^dec_exp with increasing precision. + let mut attempt = 1; + loop { + let calc_precision = (LIMB_BITS << attempt) - 1; + attempt += 1; + + let calc_normal_from_limbs = |sig: &mut Vec<Limb>, + limbs: &[Limb]| + -> StatusAnd<ExpInt> { + sig.resize(limbs_for_bits(calc_precision), 0); + let (mut loss, mut exp) = sig::from_limbs(sig, limbs, calc_precision); + + // Before rounding normalize the exponent of Category::Normal numbers. + let mut omsb = sig::omsb(sig); + + assert_ne!(omsb, 0); + + // OMSB is numbered from 1. We want to place it in the integer + // bit numbered PRECISION if possible, with a compensating change in + // the exponent. + let final_exp = exp.saturating_add(omsb as ExpInt - calc_precision as ExpInt); + + // Shifting left is easy as we don't lose precision. + if final_exp < exp { + assert_eq!(loss, Loss::ExactlyZero); + + let exp_change = (exp - final_exp) as usize; + sig::shift_left(sig, &mut exp, exp_change); + + return Status::OK.and(exp); + } + + // Shift right and capture any new lost fraction. + if final_exp > exp { + let exp_change = (final_exp - exp) as usize; + loss = sig::shift_right(sig, &mut exp, exp_change).combine(loss); + + // Keep OMSB up-to-date. + omsb = omsb.saturating_sub(exp_change); + } + + assert_eq!(omsb, calc_precision); + + // Now round the number according to round given the lost + // fraction. + + // As specified in IEEE 754, since we do not trap we do not report + // underflow for exact results. + if loss == Loss::ExactlyZero { + return Status::OK.and(exp); + } + + // Increment the significand if we're rounding away from zero. + if loss == Loss::MoreThanHalf || loss == Loss::ExactlyHalf && sig::get_bit(sig, 0) { + // We should never overflow. + assert_eq!(sig::increment(sig), 0); + omsb = sig::omsb(sig); + + // Did the significand increment overflow? + if omsb == calc_precision + 1 { + let _: Loss = sig::shift_right(sig, &mut exp, 1); + + return Status::INEXACT.and(exp); + } + } + + // The normal case - we were and are not denormal, and any + // significand increment above didn't overflow. + Status::INEXACT.and(exp) + }; + + let status; + let mut exp = unpack!(status=, + calc_normal_from_limbs(&mut sig_calc, &dec_sig)); + let pow5_status; + let pow5_exp = unpack!(pow5_status=, + calc_normal_from_limbs(&mut pow5_calc, &pow5_full)); + + // Add dec_exp, as 10^n = 5^n * 2^n. + exp += dec_exp as ExpInt; + + let mut used_bits = S::PRECISION; + let mut truncated_bits = calc_precision - used_bits; + + let half_ulp_err1 = (status != Status::OK) as Limb; + let (calc_loss, half_ulp_err2); + if dec_exp >= 0 { + exp += pow5_exp; + + sig_scratch_calc.resize(sig_calc.len() + pow5_calc.len(), 0); + calc_loss = sig::mul( + &mut sig_scratch_calc, + &mut exp, + &sig_calc, + &pow5_calc, + calc_precision, + ); + mem::swap(&mut sig_calc, &mut sig_scratch_calc); + + half_ulp_err2 = (pow5_status != Status::OK) as Limb; + } else { + exp -= pow5_exp; + + sig_scratch_calc.resize(sig_calc.len(), 0); + calc_loss = sig::div( + &mut sig_scratch_calc, + &mut exp, + &mut sig_calc, + &mut pow5_calc, + calc_precision, + ); + mem::swap(&mut sig_calc, &mut sig_scratch_calc); + + // Denormal numbers have less precision. + if exp < S::MIN_EXP { + truncated_bits += (S::MIN_EXP - exp) as usize; + used_bits = calc_precision.saturating_sub(truncated_bits); + } + // Extra half-ulp lost in reciprocal of exponent. + half_ulp_err2 = 2 * + (pow5_status != Status::OK || calc_loss != Loss::ExactlyZero) as Limb; + } + + // Both sig::mul and sig::div return the + // result with the integer bit set. + assert!(sig::get_bit(&sig_calc, calc_precision - 1)); + + // The error from the true value, in half-ulps, on multiplying two + // floating point numbers, which differ from the value they + // approximate by at most half_ulp_err1 and half_ulp_err2 half-ulps, is strictly less + // than the returned value. + // + // See "How to Read Floating Point Numbers Accurately" by William D Clinger. + assert!( + half_ulp_err1 < 2 || half_ulp_err2 < 2 || (half_ulp_err1 + half_ulp_err2 < 8) + ); + + let inexact = (calc_loss != Loss::ExactlyZero) as Limb; + let half_ulp_err = if half_ulp_err1 + half_ulp_err2 == 0 { + inexact * 2 // <= inexact half-ulps. + } else { + inexact + 2 * (half_ulp_err1 + half_ulp_err2) + }; + + let ulps_from_boundary = { + let bits = calc_precision - used_bits - 1; + + let i = bits / LIMB_BITS; + let limb = sig_calc[i] & (!0 >> (LIMB_BITS - 1 - bits % LIMB_BITS)); + let boundary = match round { + Round::NearestTiesToEven | Round::NearestTiesToAway => 1 << (bits % LIMB_BITS), + _ => 0, + }; + if i == 0 { + let delta = limb.wrapping_sub(boundary); + cmp::min(delta, delta.wrapping_neg()) + } else if limb == boundary { + if !sig::is_all_zeros(&sig_calc[1..i]) { + !0 // A lot. + } else { + sig_calc[0] + } + } else if limb == boundary.wrapping_sub(1) { + if sig_calc[1..i].iter().any(|&x| x.wrapping_neg() != 1) { + !0 // A lot. + } else { + sig_calc[0].wrapping_neg() + } + } else { + !0 // A lot. + } + }; + + // Are we guaranteed to round correctly if we truncate? + if ulps_from_boundary.saturating_mul(2) >= half_ulp_err { + let mut r = IeeeFloat { + sig: [0], + exp, + category: Category::Normal, + sign: false, + marker: PhantomData, + }; + sig::extract(&mut r.sig, &sig_calc, used_bits, calc_precision - used_bits); + // If we extracted less bits above we must adjust our exponent + // to compensate for the implicit right shift. + r.exp += (S::PRECISION - used_bits) as ExpInt; + let loss = Loss::through_truncation(&sig_calc, truncated_bits); + return Ok(r.normalize(round, loss)); + } + } + } +} + +impl Loss { + /// Combine the effect of two lost fractions. + fn combine(self, less_significant: Loss) -> Loss { + let mut more_significant = self; + if less_significant != Loss::ExactlyZero { + if more_significant == Loss::ExactlyZero { + more_significant = Loss::LessThanHalf; + } else if more_significant == Loss::ExactlyHalf { + more_significant = Loss::MoreThanHalf; + } + } + + more_significant + } + + /// Return the fraction lost were a bignum truncated losing the least + /// significant `bits` bits. + fn through_truncation(limbs: &[Limb], bits: usize) -> Loss { + if bits == 0 { + return Loss::ExactlyZero; + } + + let half_bit = bits - 1; + let half_limb = half_bit / LIMB_BITS; + let (half_limb, rest) = if half_limb < limbs.len() { + (limbs[half_limb], &limbs[..half_limb]) + } else { + (0, limbs) + }; + let half = 1 << (half_bit % LIMB_BITS); + let has_half = half_limb & half != 0; + let has_rest = half_limb & (half - 1) != 0 || !sig::is_all_zeros(rest); + + match (has_half, has_rest) { + (false, false) => Loss::ExactlyZero, + (false, true) => Loss::LessThanHalf, + (true, false) => Loss::ExactlyHalf, + (true, true) => Loss::MoreThanHalf, + } + } +} + +/// Implementation details of IeeeFloat significands, such as big integer arithmetic. +/// As a rule of thumb, no functions in this module should dynamically allocate. +mod sig { + use std::cmp::Ordering; + use std::mem; + use super::{ExpInt, Limb, LIMB_BITS, limbs_for_bits, Loss}; + + pub(super) fn is_all_zeros(limbs: &[Limb]) -> bool { + limbs.iter().all(|&l| l == 0) + } + + /// One, not zero, based MSB. That is, returns 0 for a zeroed significand. + pub(super) fn omsb(limbs: &[Limb]) -> usize { + for i in (0..limbs.len()).rev() { + if limbs[i] != 0 { + return (i + 1) * LIMB_BITS - limbs[i].leading_zeros() as usize; + } + } + + 0 + } + + /// Comparison (unsigned) of two significands. + pub(super) fn cmp(a: &[Limb], b: &[Limb]) -> Ordering { + assert_eq!(a.len(), b.len()); + for (a, b) in a.iter().zip(b).rev() { + match a.cmp(b) { + Ordering::Equal => {} + o => return o, + } + } + + Ordering::Equal + } + + /// Extract the given bit. + pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool { + limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0 + } + + /// Set the given bit. + pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) { + limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS); + } + + /// Clear the given bit. + pub(super) fn clear_bit(limbs: &mut [Limb], bit: usize) { + limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS)); + } + + /// Shift `dst` left `bits` bits, subtract `bits` from its exponent. + pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) { + if bits > 0 { + // Our exponent should not underflow. + *exp = exp.checked_sub(bits as ExpInt).unwrap(); + + // Jump is the inter-limb jump; shift is is intra-limb shift. + let jump = bits / LIMB_BITS; + let shift = bits % LIMB_BITS; + + for i in (0..dst.len()).rev() { + let mut limb; + + if i < jump { + limb = 0; + } else { + // dst[i] comes from the two limbs src[i - jump] and, if we have + // an intra-limb shift, src[i - jump - 1]. + limb = dst[i - jump]; + if shift > 0 { + limb <<= shift; + if i >= jump + 1 { + limb |= dst[i - jump - 1] >> (LIMB_BITS - shift); + } + } + } + + dst[i] = limb; + } + } + } + + /// Shift `dst` right `bits` bits noting lost fraction. + pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss { + let loss = Loss::through_truncation(dst, bits); + + if bits > 0 { + // Our exponent should not overflow. + *exp = exp.checked_add(bits as ExpInt).unwrap(); + + // Jump is the inter-limb jump; shift is is intra-limb shift. + let jump = bits / LIMB_BITS; + let shift = bits % LIMB_BITS; + + // Perform the shift. This leaves the most significant `bits` bits + // of the result at zero. + for i in 0..dst.len() { + let mut limb; + + if i + jump >= dst.len() { + limb = 0; + } else { + limb = dst[i + jump]; + if shift > 0 { + limb >>= shift; + if i + jump + 1 < dst.len() { + limb |= dst[i + jump + 1] << (LIMB_BITS - shift); + } + } + } + + dst[i] = limb; + } + } + + loss + } + + /// Copy the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB, + /// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`. + /// All high bits above `src_bits` in `dst` are zero-filled. + pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) { + if src_bits == 0 { + return; + } + + let dst_limbs = limbs_for_bits(src_bits); + assert!(dst_limbs <= dst.len()); + + let src = &src[src_lsb / LIMB_BITS..]; + dst[..dst_limbs].copy_from_slice(&src[..dst_limbs]); + + let shift = src_lsb % LIMB_BITS; + let _: Loss = shift_right(&mut dst[..dst_limbs], &mut 0, shift); + + // We now have (dst_limbs * LIMB_BITS - shift) bits from `src` + // in `dst`. If this is less that src_bits, append the rest, else + // clear the high bits. + let n = dst_limbs * LIMB_BITS - shift; + if n < src_bits { + let mask = (1 << (src_bits - n)) - 1; + dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << n % LIMB_BITS; + } else if n > src_bits && src_bits % LIMB_BITS > 0 { + dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1; + } + + // Clear high limbs. + for x in &mut dst[dst_limbs..] { + *x = 0; + } + } + + /// We want the most significant PRECISION bits of `src`. There may not + /// be that many; extract what we can. + pub(super) fn from_limbs(dst: &mut [Limb], src: &[Limb], precision: usize) -> (Loss, ExpInt) { + let omsb = omsb(src); + + if precision <= omsb { + extract(dst, src, precision, omsb - precision); + ( + Loss::through_truncation(src, omsb - precision), + omsb as ExpInt - 1, + ) + } else { + extract(dst, src, omsb, 0); + (Loss::ExactlyZero, precision as ExpInt - 1) + } + } + + /// Increment in-place, return the carry flag. + pub(super) fn increment(dst: &mut [Limb]) -> Limb { + for x in dst { + *x = x.wrapping_add(1); + if *x != 0 { + return 0; + } + } + + 1 + } + + /// Decrement in-place, return the borrow flag. + pub(super) fn decrement(dst: &mut [Limb]) -> Limb { + for x in dst { + *x = x.wrapping_sub(1); + if *x != !0 { + return 0; + } + } + + 1 + } + + /// `a += b + c` where `c` is zero or one. Returns the carry flag. + pub(super) fn add(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb { + assert!(c <= 1); + + for (a, &b) in a.iter_mut().zip(b) { + let (r, overflow) = a.overflowing_add(b); + let (r, overflow2) = r.overflowing_add(c); + *a = r; + c = (overflow | overflow2) as Limb; + } + + c + } + + /// `a -= b + c` where `c` is zero or one. Returns the borrow flag. + pub(super) fn sub(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb { + assert!(c <= 1); + + for (a, &b) in a.iter_mut().zip(b) { + let (r, overflow) = a.overflowing_sub(b); + let (r, overflow2) = r.overflowing_sub(c); + *a = r; + c = (overflow | overflow2) as Limb; + } + + c + } + + /// `a += b` or `a -= b`. Does not preserve `b`. + pub(super) fn add_or_sub( + a_sig: &mut [Limb], + a_exp: &mut ExpInt, + a_sign: &mut bool, + b_sig: &mut [Limb], + b_exp: ExpInt, + b_sign: bool, + ) -> Loss { + // Are we bigger exponent-wise than the RHS? + let bits = *a_exp - b_exp; + + // Determine if the operation on the absolute values is effectively + // an addition or subtraction. + // Subtraction is more subtle than one might naively expect. + if *a_sign ^ b_sign { + let (reverse, loss); + + if bits == 0 { + reverse = cmp(a_sig, b_sig) == Ordering::Less; + loss = Loss::ExactlyZero; + } else if bits > 0 { + loss = shift_right(b_sig, &mut 0, (bits - 1) as usize); + shift_left(a_sig, a_exp, 1); + reverse = false; + } else { + loss = shift_right(a_sig, a_exp, (-bits - 1) as usize); + shift_left(b_sig, &mut 0, 1); + reverse = true; + } + + let borrow = (loss != Loss::ExactlyZero) as Limb; + if reverse { + // The code above is intended to ensure that no borrow is necessary. + assert_eq!(sub(b_sig, a_sig, borrow), 0); + a_sig.copy_from_slice(b_sig); + *a_sign = !*a_sign; + } else { + // The code above is intended to ensure that no borrow is necessary. + assert_eq!(sub(a_sig, b_sig, borrow), 0); + } + + // Invert the lost fraction - it was on the RHS and subtracted. + match loss { + Loss::LessThanHalf => Loss::MoreThanHalf, + Loss::MoreThanHalf => Loss::LessThanHalf, + _ => loss, + } + } else { + let loss = if bits > 0 { + shift_right(b_sig, &mut 0, bits as usize) + } else { + shift_right(a_sig, a_exp, -bits as usize) + }; + // We have a guard bit; generating a carry cannot happen. + assert_eq!(add(a_sig, b_sig, 0), 0); + loss + } + } + + /// `[low, high] = a * b`. + /// + /// This cannot overflow, because + /// + /// `(n - 1) * (n - 1) + 2 * (n - 1) == (n - 1) * (n + 1)` + /// + /// which is less than n^2. + pub(super) fn widening_mul(a: Limb, b: Limb) -> [Limb; 2] { + let mut wide = [0, 0]; + + if a == 0 || b == 0 { + return wide; + } + + const HALF_BITS: usize = LIMB_BITS / 2; + + let select = |limb, i| (limb >> (i * HALF_BITS)) & ((1 << HALF_BITS) - 1); + for i in 0..2 { + for j in 0..2 { + let mut x = [select(a, i) * select(b, j), 0]; + shift_left(&mut x, &mut 0, (i + j) * HALF_BITS); + assert_eq!(add(&mut wide, &x, 0), 0); + } + } + + wide + } + + /// `dst = a * b` (for normal `a` and `b`). Returns the lost fraction. + pub(super) fn mul<'a>( + dst: &mut [Limb], + exp: &mut ExpInt, + mut a: &'a [Limb], + mut b: &'a [Limb], + precision: usize, + ) -> Loss { + // Put the narrower number on the `a` for less loops below. + if a.len() > b.len() { + mem::swap(&mut a, &mut b); + } + + for x in &mut dst[..b.len()] { + *x = 0; + } + + for i in 0..a.len() { + let mut carry = 0; + for j in 0..b.len() { + let [low, mut high] = widening_mul(a[i], b[j]); + + // Now add carry. + let (low, overflow) = low.overflowing_add(carry); + high += overflow as Limb; + + // And now `dst[i + j]`, and store the new low part there. + let (low, overflow) = low.overflowing_add(dst[i + j]); + high += overflow as Limb; + + dst[i + j] = low; + carry = high; + } + dst[i + b.len()] = carry; + } + + // Assume the operands involved in the multiplication are single-precision + // FP, and the two multiplicants are: + // a = a23 . a22 ... a0 * 2^e1 + // b = b23 . b22 ... b0 * 2^e2 + // the result of multiplication is: + // dst = c48 c47 c46 . c45 ... c0 * 2^(e1+e2) + // Note that there are three significant bits at the left-hand side of the + // radix point: two for the multiplication, and an overflow bit for the + // addition (that will always be zero at this point). Move the radix point + // toward left by two bits, and adjust exponent accordingly. + *exp += 2; + + // Convert the result having "2 * precision" significant-bits back to the one + // having "precision" significant-bits. First, move the radix point from + // poision "2*precision - 1" to "precision - 1". The exponent need to be + // adjusted by "2*precision - 1" - "precision - 1" = "precision". + *exp -= precision as ExpInt + 1; + + // In case MSB resides at the left-hand side of radix point, shift the + // mantissa right by some amount to make sure the MSB reside right before + // the radix point (i.e. "MSB . rest-significant-bits"). + // + // Note that the result is not normalized when "omsb < precision". So, the + // caller needs to call IeeeFloat::normalize() if normalized value is + // expected. + let omsb = omsb(dst); + if omsb <= precision { + Loss::ExactlyZero + } else { + shift_right(dst, exp, omsb - precision) + } + } + + /// `quotient = dividend / divisor`. Returns the lost fraction. + /// Does not preserve `dividend` or `divisor`. + pub(super) fn div( + quotient: &mut [Limb], + exp: &mut ExpInt, + dividend: &mut [Limb], + divisor: &mut [Limb], + precision: usize, + ) -> Loss { + // Zero the quotient before setting bits in it. + for x in &mut quotient[..limbs_for_bits(precision)] { + *x = 0; + } + + // Normalize the divisor. + let bits = precision - omsb(divisor); + shift_left(divisor, &mut 0, bits); + *exp += bits as ExpInt; + + // Normalize the dividend. + let bits = precision - omsb(dividend); + shift_left(dividend, exp, bits); + + // Ensure the dividend >= divisor initially for the loop below. + // Incidentally, this means that the division loop below is + // guaranteed to set the integer bit to one. + if cmp(dividend, divisor) == Ordering::Less { + shift_left(dividend, exp, 1); + assert_ne!(cmp(dividend, divisor), Ordering::Less) + } + + // Long division. + for bit in (0..precision).rev() { + if cmp(dividend, divisor) != Ordering::Less { + sub(dividend, divisor, 0); + set_bit(quotient, bit); + } + shift_left(dividend, &mut 0, 1); + } + + // Figure out the lost fraction. + match cmp(dividend, divisor) { + Ordering::Greater => Loss::MoreThanHalf, + Ordering::Equal => Loss::ExactlyHalf, + Ordering::Less => { + if is_all_zeros(dividend) { + Loss::ExactlyZero + } else { + Loss::LessThanHalf + } + } + } + } +} diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs new file mode 100644 index 00000000000..d9dbf787856 --- /dev/null +++ b/src/librustc_apfloat/lib.rs @@ -0,0 +1,693 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Port of LLVM's APFloat software floating-point implementation from the +//! following C++ sources (please update commit hash when backporting): +//! https://github.com/llvm-mirror/llvm/tree/23efab2bbd424ed13495a420ad8641cb2c6c28f9 +//! * `include/llvm/ADT/APFloat.h` -> `Float` and `FloatConvert` traits +//! * `lib/Support/APFloat.cpp` -> `ieee` and `ppc` modules +//! * `unittests/ADT/APFloatTest.cpp` -> `tests` directory +//! +//! The port contains no unsafe code, global state, or side-effects in general, +//! and the only allocations are in the conversion to/from decimal strings. +//! +//! Most of the API and the testcases are intact in some form or another, +//! with some ergonomic changes, such as idiomatic short names, returning +//! new values instead of mutating the receiver, and having separate method +//! variants that take a non-default rounding mode (with the suffix `_r`). +//! Comments have been preserved where possible, only slightly adapted. +//! +//! Instead of keeping a pointer to a configuration struct and inspecting it +//! dynamically on every operation, types (e.g. `ieee::Double`), traits +//! (e.g. `ieee::Semantics`) and associated constants are employed for +//! increased type safety and performance. +//! +//! On-heap bigints are replaced everywhere (except in decimal conversion), +//! with short arrays of `type Limb = u128` elements (instead of `u64`), +//! This allows fitting the largest supported significands in one integer +//! (`ieee::Quad` and `ppc::Fallback` use slightly less than 128 bits). +//! All of the functions in the `ieee::sig` module operate on slices. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![crate_name = "rustc_apfloat"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![deny(warnings)] +#![forbid(unsafe_code)] + +#![feature(const_fn)] +#![feature(i128_type)] +#![feature(slice_patterns)] +#![feature(try_from)] + +#[macro_use] +extern crate rustc_bitflags; + +use std::cmp::Ordering; +use std::fmt; +use std::ops::{Neg, Add, Sub, Mul, Div, Rem}; +use std::ops::{AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, BitOrAssign}; +use std::str::FromStr; + +bitflags! { + /// IEEE-754R 7: Default exception handling. + /// + /// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT. + #[must_use] + #[derive(Debug)] + flags Status: u8 { + const OK = 0x00, + const INVALID_OP = 0x01, + const DIV_BY_ZERO = 0x02, + const OVERFLOW = 0x04, + const UNDERFLOW = 0x08, + const INEXACT = 0x10 + } +} + +impl BitOrAssign for Status { + fn bitor_assign(&mut self, rhs: Self) { + *self = *self | rhs; + } +} + +#[must_use] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct StatusAnd<T> { + pub status: Status, + pub value: T, +} + +impl Status { + pub fn and<T>(self, value: T) -> StatusAnd<T> { + StatusAnd { + status: self, + value, + } + } +} + +impl<T> StatusAnd<T> { + fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> { + StatusAnd { + status: self.status, + value: f(self.value), + } + } +} + +#[macro_export] +macro_rules! unpack { + ($status:ident|=, $e:expr) => { + match $e { + $crate::StatusAnd { status, value } => { + $status |= status; + value + } + } + }; + ($status:ident=, $e:expr) => { + match $e { + $crate::StatusAnd { status, value } => { + $status = status; + value + } + } + } +} + +/// Category of internally-represented number. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Category { + Infinity, + NaN, + Normal, + Zero, +} + +/// IEEE-754R 4.3: Rounding-direction attributes. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Round { + NearestTiesToEven, + TowardPositive, + TowardNegative, + TowardZero, + NearestTiesToAway, +} + +impl Neg for Round { + type Output = Round; + fn neg(self) -> Round { + match self { + Round::TowardPositive => Round::TowardNegative, + Round::TowardNegative => Round::TowardPositive, + Round::NearestTiesToEven | Round::TowardZero | Round::NearestTiesToAway => self, + } + } +} + +/// A signed type to represent a floating point number's unbiased exponent. +pub type ExpInt = i16; + +// \c ilogb error results. +pub const IEK_INF: ExpInt = ExpInt::max_value(); +pub const IEK_NAN: ExpInt = ExpInt::min_value(); +pub const IEK_ZERO: ExpInt = ExpInt::min_value() + 1; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct ParseError(pub &'static str); + +/// A self-contained host- and target-independent arbitrary-precision +/// floating-point software implementation. +/// +/// `apfloat` uses significand bignum integer arithmetic as provided by functions +/// in the `ieee::sig`. +/// +/// Written for clarity rather than speed, in particular with a view to use in +/// the front-end of a cross compiler so that target arithmetic can be correctly +/// performed on the host. Performance should nonetheless be reasonable, +/// particularly for its intended use. It may be useful as a base +/// implementation for a run-time library during development of a faster +/// target-specific one. +/// +/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all +/// implemented operations. Currently implemented operations are add, subtract, +/// multiply, divide, fused-multiply-add, conversion-to-float, +/// conversion-to-integer and conversion-from-integer. New rounding modes +/// (e.g. away from zero) can be added with three or four lines of code. +/// +/// Four formats are built-in: IEEE single precision, double precision, +/// quadruple precision, and x87 80-bit extended double (when operating with +/// full extended precision). Adding a new format that obeys IEEE semantics +/// only requires adding two lines of code: a declaration and definition of the +/// format. +/// +/// All operations return the status of that operation as an exception bit-mask, +/// so multiple operations can be done consecutively with their results or-ed +/// together. The returned status can be useful for compiler diagnostics; e.g., +/// inexact, underflow and overflow can be easily diagnosed on constant folding, +/// and compiler optimizers can determine what exceptions would be raised by +/// folding operations and optimize, or perhaps not optimize, accordingly. +/// +/// At present, underflow tininess is detected after rounding; it should be +/// straight forward to add support for the before-rounding case too. +/// +/// The library reads hexadecimal floating point numbers as per C99, and +/// correctly rounds if necessary according to the specified rounding mode. +/// Syntax is required to have been validated by the caller. +/// +/// It also reads decimal floating point numbers and correctly rounds according +/// to the specified rounding mode. +/// +/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit +/// signed exponent, and the significand as an array of integer limbs. After +/// normalization of a number of precision P the exponent is within the range of +/// the format, and if the number is not denormal the P-th bit of the +/// significand is set as an explicit integer bit. For denormals the most +/// significant bit is shifted right so that the exponent is maintained at the +/// format's minimum, so that the smallest denormal has just the least +/// significant bit of the significand set. The sign of zeros and infinities +/// is significant; the exponent and significand of such numbers is not stored, +/// but has a known implicit (deterministic) value: 0 for the significands, 0 +/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and +/// significand are deterministic, although not really meaningful, and preserved +/// in non-conversion operations. The exponent is implicitly all 1 bits. +/// +/// `apfloat` does not provide any exception handling beyond default exception +/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause +/// by encoding Signaling NaNs with the first bit of its trailing significand as +/// 0. +/// +/// Future work +/// =========== +/// +/// Some features that may or may not be worth adding: +/// +/// Optional ability to detect underflow tininess before rounding. +/// +/// New formats: x87 in single and double precision mode (IEEE apart from +/// extended exponent range) (hard). +/// +/// New operations: sqrt, nexttoward. +/// +pub trait Float + : Copy + + Default + + FromStr<Err = ParseError> + + PartialOrd + + fmt::Display + + Neg<Output = Self> + + AddAssign + + SubAssign + + MulAssign + + DivAssign + + RemAssign + + Add<Output = StatusAnd<Self>> + + Sub<Output = StatusAnd<Self>> + + Mul<Output = StatusAnd<Self>> + + Div<Output = StatusAnd<Self>> + + Rem<Output = StatusAnd<Self>> { + /// Total number of bits in the in-memory format. + const BITS: usize; + + /// Number of bits in the significand. This includes the integer bit. + const PRECISION: usize; + + /// The largest E such that 2^E is representable; this matches the + /// definition of IEEE 754. + const MAX_EXP: ExpInt; + + /// The smallest E such that 2^E is a normalized number; this + /// matches the definition of IEEE 754. + const MIN_EXP: ExpInt; + + /// Positive Zero. + const ZERO: Self; + + /// Positive Infinity. + const INFINITY: Self; + + /// NaN (Not a Number). + // FIXME(eddyb) provide a default when qnan becomes const fn. + const NAN: Self; + + /// Factory for QNaN values. + // FIXME(eddyb) should be const fn. + fn qnan(payload: Option<u128>) -> Self; + + /// Factory for SNaN values. + // FIXME(eddyb) should be const fn. + fn snan(payload: Option<u128>) -> Self; + + /// Largest finite number. + // FIXME(eddyb) should be const (but FloatPair::largest is nontrivial). + fn largest() -> Self; + + /// Smallest (by magnitude) finite number. + /// Might be denormalized, which implies a relative loss of precision. + const SMALLEST: Self; + + /// Smallest (by magnitude) normalized finite number. + // FIXME(eddyb) should be const (but FloatPair::smallest_normalized is nontrivial). + fn smallest_normalized() -> Self; + + // Arithmetic + + fn add_r(self, rhs: Self, round: Round) -> StatusAnd<Self>; + fn sub_r(self, rhs: Self, round: Round) -> StatusAnd<Self> { + self.add_r(-rhs, round) + } + fn mul_r(self, rhs: Self, round: Round) -> StatusAnd<Self>; + fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self>; + fn mul_add(self, multiplicand: Self, addend: Self) -> StatusAnd<Self> { + self.mul_add_r(multiplicand, addend, Round::NearestTiesToEven) + } + fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self>; + /// IEEE remainder. + // This is not currently correct in all cases. + fn ieee_rem(self, rhs: Self) -> StatusAnd<Self> { + let mut v = self; + + let status; + v = unpack!(status=, v / rhs); + if status == Status::DIV_BY_ZERO { + return status.and(self); + } + + assert!(Self::PRECISION < 128); + + let status; + let x = unpack!(status=, v.to_i128_r(128, Round::NearestTiesToEven, &mut false)); + if status == Status::INVALID_OP { + return status.and(self); + } + + let status; + let mut v = unpack!(status=, Self::from_i128(x)); + assert_eq!(status, Status::OK); // should always work + + let status; + v = unpack!(status=, v * rhs); + assert_eq!(status - Status::INEXACT, Status::OK); // should not overflow or underflow + + let status; + v = unpack!(status=, self - v); + assert_eq!(status - Status::INEXACT, Status::OK); // likewise + + if v.is_zero() { + status.and(v.copy_sign(self)) // IEEE754 requires this + } else { + status.and(v) + } + } + /// C fmod, or llvm frem. + fn c_fmod(self, rhs: Self) -> StatusAnd<Self>; + fn round_to_integral(self, round: Round) -> StatusAnd<Self>; + + /// IEEE-754R 2008 5.3.1: nextUp. + fn next_up(self) -> StatusAnd<Self>; + + /// IEEE-754R 2008 5.3.1: nextDown. + /// + /// *NOTE* since nextDown(x) = -nextUp(-x), we only implement nextUp with + /// appropriate sign switching before/after the computation. + fn next_down(self) -> StatusAnd<Self> { + (-self).next_up().map(|r| -r) + } + + fn abs(self) -> Self { + if self.is_negative() { -self } else { self } + } + fn copy_sign(self, rhs: Self) -> Self { + if self.is_negative() != rhs.is_negative() { + -self + } else { + self + } + } + + // Conversions + fn from_bits(input: u128) -> Self; + fn from_i128_r(input: i128, round: Round) -> StatusAnd<Self> { + if input < 0 { + Self::from_u128_r(-input as u128, -round).map(|r| -r) + } else { + Self::from_u128_r(input as u128, round) + } + } + fn from_i128(input: i128) -> StatusAnd<Self> { + Self::from_i128_r(input, Round::NearestTiesToEven) + } + fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self>; + fn from_u128(input: u128) -> StatusAnd<Self> { + Self::from_u128_r(input, Round::NearestTiesToEven) + } + fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError>; + fn to_bits(self) -> u128; + + /// Convert a floating point number to an integer according to the + /// rounding mode. In case of an invalid operation exception, + /// deterministic values are returned, namely zero for NaNs and the + /// minimal or maximal value respectively for underflow or overflow. + /// If the rounded value is in range but the floating point number is + /// not the exact integer, the C standard doesn't require an inexact + /// exception to be raised. IEEE-854 does require it so we do that. + /// + /// Note that for conversions to integer type the C standard requires + /// round-to-zero to always be used. + /// + /// The *is_exact output tells whether the result is exact, in the sense + /// that converting it back to the original floating point type produces + /// the original value. This is almost equivalent to result==Status::OK, + /// except for negative zeroes. + fn to_i128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<i128> { + let status; + if self.is_negative() { + if self.is_zero() { + // Negative zero can't be represented as an int. + *is_exact = false; + } + let r = unpack!(status=, (-self).to_u128_r(width, -round, is_exact)); + + // Check for values that don't fit in the signed integer. + if r > (1 << (width - 1)) { + // Return the most negative integer for the given width. + *is_exact = false; + Status::INVALID_OP.and(-1 << (width - 1)) + } else { + status.and(r.wrapping_neg() as i128) + } + } else { + // Positive case is simpler, can pretend it's a smaller unsigned + // integer, and `to_u128` will take care of all the edge cases. + self.to_u128_r(width - 1, round, is_exact).map( + |r| r as i128, + ) + } + } + fn to_i128(self, width: usize) -> StatusAnd<i128> { + self.to_i128_r(width, Round::TowardZero, &mut true) + } + fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128>; + fn to_u128(self, width: usize) -> StatusAnd<u128> { + self.to_u128_r(width, Round::TowardZero, &mut true) + } + + fn cmp_abs_normal(self, rhs: Self) -> Ordering; + + /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0). + fn bitwise_eq(self, rhs: Self) -> bool; + + // IEEE-754R 5.7.2 General operations. + + /// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if + /// both are not NaN. If either argument is a NaN, returns the other argument. + fn min(self, other: Self) -> Self { + if self.is_nan() { + other + } else if other.is_nan() { + self + } else if other.partial_cmp(&self) == Some(Ordering::Less) { + other + } else { + self + } + } + + /// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if + /// both are not NaN. If either argument is a NaN, returns the other argument. + fn max(self, other: Self) -> Self { + if self.is_nan() { + other + } else if other.is_nan() { + self + } else if self.partial_cmp(&other) == Some(Ordering::Less) { + other + } else { + self + } + } + + /// IEEE-754R isSignMinus: Returns true if and only if the current value is + /// negative. + /// + /// This applies to zeros and NaNs as well. + fn is_negative(self) -> bool; + + /// IEEE-754R isNormal: Returns true if and only if the current value is normal. + /// + /// This implies that the current value of the float is not zero, subnormal, + /// infinite, or NaN following the definition of normality from IEEE-754R. + fn is_normal(self) -> bool { + !self.is_denormal() && self.is_finite_non_zero() + } + + /// Returns true if and only if the current value is zero, subnormal, or + /// normal. + /// + /// This means that the value is not infinite or NaN. + fn is_finite(self) -> bool { + !self.is_nan() && !self.is_infinite() + } + + /// Returns true if and only if the float is plus or minus zero. + fn is_zero(self) -> bool { + self.category() == Category::Zero + } + + /// IEEE-754R isSubnormal(): Returns true if and only if the float is a + /// denormal. + fn is_denormal(self) -> bool; + + /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity. + fn is_infinite(self) -> bool { + self.category() == Category::Infinity + } + + /// Returns true if and only if the float is a quiet or signaling NaN. + fn is_nan(self) -> bool { + self.category() == Category::NaN + } + + /// Returns true if and only if the float is a signaling NaN. + fn is_signaling(self) -> bool; + + // Simple Queries + + fn category(self) -> Category; + fn is_non_zero(self) -> bool { + !self.is_zero() + } + fn is_finite_non_zero(self) -> bool { + self.is_finite() && !self.is_zero() + } + fn is_pos_zero(self) -> bool { + self.is_zero() && !self.is_negative() + } + fn is_neg_zero(self) -> bool { + self.is_zero() && self.is_negative() + } + + /// Returns true if and only if the number has the smallest possible non-zero + /// magnitude in the current semantics. + fn is_smallest(self) -> bool { + Self::SMALLEST.copy_sign(self).bitwise_eq(self) + } + + /// Returns true if and only if the number has the largest possible finite + /// magnitude in the current semantics. + fn is_largest(self) -> bool { + Self::largest().copy_sign(self).bitwise_eq(self) + } + + /// Returns true if and only if the number is an exact integer. + fn is_integer(self) -> bool { + // This could be made more efficient; I'm going for obviously correct. + if !self.is_finite() { + return false; + } + self.round_to_integral(Round::TowardZero).value.bitwise_eq( + self, + ) + } + + /// If this value has an exact multiplicative inverse, return it. + fn get_exact_inverse(self) -> Option<Self>; + + /// Returns the exponent of the internal representation of the Float. + /// + /// Because the radix of Float is 2, this is equivalent to floor(log2(x)). + /// For special Float values, this returns special error codes: + /// + /// NaN -> \c IEK_NAN + /// 0 -> \c IEK_ZERO + /// Inf -> \c IEK_INF + /// + fn ilogb(self) -> ExpInt; + + /// Returns: self * 2^exp for integral exponents. + fn scalbn_r(self, exp: ExpInt, round: Round) -> Self; + fn scalbn(self, exp: ExpInt) -> Self { + self.scalbn_r(exp, Round::NearestTiesToEven) + } + + /// Equivalent of C standard library function. + /// + /// While the C standard says exp is an unspecified value for infinity and nan, + /// this returns INT_MAX for infinities, and INT_MIN for NaNs (see `ilogb`). + fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self; + fn frexp(self, exp: &mut ExpInt) -> Self { + self.frexp_r(exp, Round::NearestTiesToEven) + } +} + +pub trait FloatConvert<T: Float>: Float { + /// Convert a value of one floating point type to another. + /// The return value corresponds to the IEEE754 exceptions. *loses_info + /// records whether the transformation lost information, i.e. whether + /// converting the result back to the original type will produce the + /// original value (this is almost the same as return value==Status::OK, + /// but there are edge cases where this is not so). + fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<T>; + fn convert(self, loses_info: &mut bool) -> StatusAnd<T> { + self.convert_r(Round::NearestTiesToEven, loses_info) + } +} + +macro_rules! float_common_impls { + ($ty:ident<$t:tt>) => { + impl<$t> Default for $ty<$t> where Self: Float { + fn default() -> Self { + Self::ZERO + } + } + + impl<$t> ::std::str::FromStr for $ty<$t> where Self: Float { + type Err = ParseError; + fn from_str(s: &str) -> Result<Self, ParseError> { + Self::from_str_r(s, Round::NearestTiesToEven).map(|x| x.value) + } + } + + // Rounding ties to the nearest even, by default. + + impl<$t> ::std::ops::Add for $ty<$t> where Self: Float { + type Output = StatusAnd<Self>; + fn add(self, rhs: Self) -> StatusAnd<Self> { + self.add_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Sub for $ty<$t> where Self: Float { + type Output = StatusAnd<Self>; + fn sub(self, rhs: Self) -> StatusAnd<Self> { + self.sub_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Mul for $ty<$t> where Self: Float { + type Output = StatusAnd<Self>; + fn mul(self, rhs: Self) -> StatusAnd<Self> { + self.mul_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Div for $ty<$t> where Self: Float { + type Output = StatusAnd<Self>; + fn div(self, rhs: Self) -> StatusAnd<Self> { + self.div_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Rem for $ty<$t> where Self: Float { + type Output = StatusAnd<Self>; + fn rem(self, rhs: Self) -> StatusAnd<Self> { + self.c_fmod(rhs) + } + } + + impl<$t> ::std::ops::AddAssign for $ty<$t> where Self: Float { + fn add_assign(&mut self, rhs: Self) { + *self = (*self + rhs).value; + } + } + + impl<$t> ::std::ops::SubAssign for $ty<$t> where Self: Float { + fn sub_assign(&mut self, rhs: Self) { + *self = (*self - rhs).value; + } + } + + impl<$t> ::std::ops::MulAssign for $ty<$t> where Self: Float { + fn mul_assign(&mut self, rhs: Self) { + *self = (*self * rhs).value; + } + } + + impl<$t> ::std::ops::DivAssign for $ty<$t> where Self: Float { + fn div_assign(&mut self, rhs: Self) { + *self = (*self / rhs).value; + } + } + + impl<$t> ::std::ops::RemAssign for $ty<$t> where Self: Float { + fn rem_assign(&mut self, rhs: Self) { + *self = (*self % rhs).value; + } + } + } +} + +pub mod ieee; +pub mod ppc; diff --git a/src/librustc_apfloat/ppc.rs b/src/librustc_apfloat/ppc.rs new file mode 100644 index 00000000000..dec88eb62cc --- /dev/null +++ b/src/librustc_apfloat/ppc.rs @@ -0,0 +1,461 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {Category, ExpInt, Float, FloatConvert, Round, ParseError, Status, StatusAnd}; +use ieee; + +use std::cmp::Ordering; +use std::fmt; +use std::ops::Neg; + +#[must_use] +#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] +pub struct DoubleFloat<F>(F, F); +pub type DoubleDouble = DoubleFloat<ieee::Double>; + +// These are legacy semantics for the Fallback, inaccrurate implementation of +// IBM double-double, if the accurate DoubleDouble doesn't handle the +// operation. It's equivalent to having an IEEE number with consecutive 106 +// bits of mantissa and 11 bits of exponent. +// +// It's not equivalent to IBM double-double. For example, a legit IBM +// double-double, 1 + epsilon: +// +// 1 + epsilon = 1 + (1 >> 1076) +// +// is not representable by a consecutive 106 bits of mantissa. +// +// Currently, these semantics are used in the following way: +// +// DoubleDouble -> (Double, Double) -> +// DoubleDouble's Fallback -> IEEE operations +// +// FIXME: Implement all operations in DoubleDouble, and delete these +// semantics. +// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds. +pub struct FallbackS<F>(F); +type Fallback<F> = ieee::IeeeFloat<FallbackS<F>>; +impl<F: Float> ieee::Semantics for FallbackS<F> { + // Forbid any conversion to/from bits. + const BITS: usize = 0; + const PRECISION: usize = F::PRECISION * 2; + const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt; + const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt; +} + +// Convert number to F. To avoid spurious underflows, we re- +// normalize against the F exponent range first, and only *then* +// truncate the mantissa. The result of that second conversion +// may be inexact, but should never underflow. +// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds. +pub struct FallbackExtendedS<F>(F); +type FallbackExtended<F> = ieee::IeeeFloat<FallbackExtendedS<F>>; +impl<F: Float> ieee::Semantics for FallbackExtendedS<F> { + // Forbid any conversion to/from bits. + const BITS: usize = 0; + const PRECISION: usize = Fallback::<F>::PRECISION; + const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt; +} + +impl<F: Float> From<Fallback<F>> for DoubleFloat<F> +where + F: FloatConvert<FallbackExtended<F>>, + FallbackExtended<F>: FloatConvert<F>, +{ + fn from(x: Fallback<F>) -> Self { + let mut status; + let mut loses_info = false; + + let extended: FallbackExtended<F> = unpack!(status=, x.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + + let a = unpack!(status=, extended.convert(&mut loses_info)); + assert_eq!(status - Status::INEXACT, Status::OK); + + // If conversion was exact or resulted in a special case, we're done; + // just set the second double to zero. Otherwise, re-convert back to + // the extended format and compute the difference. This now should + // convert exactly to double. + let b = if a.is_finite_non_zero() && loses_info { + let u: FallbackExtended<F> = unpack!(status=, a.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + let v = unpack!(status=, extended - u); + assert_eq!(status, Status::OK); + let v = unpack!(status=, v.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + v + } else { + F::ZERO + }; + + DoubleFloat(a, b) + } +} + +impl<F: FloatConvert<Self>> From<DoubleFloat<F>> for Fallback<F> { + fn from(DoubleFloat(a, b): DoubleFloat<F>) -> Self { + let mut status; + let mut loses_info = false; + + // Get the first F and convert to our format. + let a = unpack!(status=, a.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + + // Unless we have a special case, add in second F. + if a.is_finite_non_zero() { + let b = unpack!(status=, b.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + + (a + b).value + } else { + a + } + } +} + +float_common_impls!(DoubleFloat<F>); + +impl<F: Float> Neg for DoubleFloat<F> { + type Output = Self; + fn neg(self) -> Self { + if self.1.is_finite_non_zero() { + DoubleFloat(-self.0, -self.1) + } else { + DoubleFloat(-self.0, self.1) + } + } +} + +impl<F: FloatConvert<Fallback<F>>> fmt::Display for DoubleFloat<F> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&Fallback::from(*self), f) + } +} + +impl<F: FloatConvert<Fallback<F>>> Float for DoubleFloat<F> +where + Self: From<Fallback<F>>, +{ + const BITS: usize = F::BITS * 2; + const PRECISION: usize = Fallback::<F>::PRECISION; + const MAX_EXP: ExpInt = Fallback::<F>::MAX_EXP; + const MIN_EXP: ExpInt = Fallback::<F>::MIN_EXP; + + const ZERO: Self = DoubleFloat(F::ZERO, F::ZERO); + + const INFINITY: Self = DoubleFloat(F::INFINITY, F::ZERO); + + // FIXME(eddyb) remove when qnan becomes const fn. + const NAN: Self = DoubleFloat(F::NAN, F::ZERO); + + fn qnan(payload: Option<u128>) -> Self { + DoubleFloat(F::qnan(payload), F::ZERO) + } + + fn snan(payload: Option<u128>) -> Self { + DoubleFloat(F::snan(payload), F::ZERO) + } + + fn largest() -> Self { + let status; + let mut r = DoubleFloat(F::largest(), F::largest()); + r.1 = r.1.scalbn(-(F::PRECISION as ExpInt + 1)); + r.1 = unpack!(status=, r.1.next_down()); + assert_eq!(status, Status::OK); + r + } + + const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO); + + fn smallest_normalized() -> Self { + DoubleFloat( + F::smallest_normalized().scalbn(F::PRECISION as ExpInt), + F::ZERO, + ) + } + + // Implement addition, subtraction, multiplication and division based on: + // "Software for Doubled-Precision Floating-Point Computations", + // by Seppo Linnainmaa, ACM TOMS vol 7 no 3, September 1981, pages 272-283. + + fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> { + match (self.category(), rhs.category()) { + (Category::Infinity, Category::Infinity) => { + if self.is_negative() != rhs.is_negative() { + Status::INVALID_OP.and(Self::NAN.copy_sign(self)) + } else { + Status::OK.and(self) + } + } + + (_, Category::Zero) | + (Category::NaN, _) | + (Category::Infinity, Category::Normal) => Status::OK.and(self), + + (Category::Zero, _) | + (_, Category::NaN) | + (_, Category::Infinity) => Status::OK.and(rhs), + + (Category::Normal, Category::Normal) => { + let mut status = Status::OK; + let (a, aa, c, cc) = (self.0, self.1, rhs.0, rhs.1); + let mut z = a; + z = unpack!(status|=, z.add_r(c, round)); + if !z.is_finite() { + if !z.is_infinite() { + return status.and(DoubleFloat(z, F::ZERO)); + } + status = Status::OK; + let a_cmp_c = a.cmp_abs_normal(c); + z = cc; + z = unpack!(status|=, z.add_r(aa, round)); + if a_cmp_c == Ordering::Greater { + // z = cc + aa + c + a; + z = unpack!(status|=, z.add_r(c, round)); + z = unpack!(status|=, z.add_r(a, round)); + } else { + // z = cc + aa + a + c; + z = unpack!(status|=, z.add_r(a, round)); + z = unpack!(status|=, z.add_r(c, round)); + } + if !z.is_finite() { + return status.and(DoubleFloat(z, F::ZERO)); + } + self.0 = z; + let mut zz = aa; + zz = unpack!(status|=, zz.add_r(cc, round)); + if a_cmp_c == Ordering::Greater { + // self.1 = a - z + c + zz; + self.1 = a; + self.1 = unpack!(status|=, self.1.sub_r(z, round)); + self.1 = unpack!(status|=, self.1.add_r(c, round)); + self.1 = unpack!(status|=, self.1.add_r(zz, round)); + } else { + // self.1 = c - z + a + zz; + self.1 = c; + self.1 = unpack!(status|=, self.1.sub_r(z, round)); + self.1 = unpack!(status|=, self.1.add_r(a, round)); + self.1 = unpack!(status|=, self.1.add_r(zz, round)); + } + } else { + // q = a - z; + let mut q = a; + q = unpack!(status|=, q.sub_r(z, round)); + + // zz = q + c + (a - (q + z)) + aa + cc; + // Compute a - (q + z) as -((q + z) - a) to avoid temporary copies. + let mut zz = q; + zz = unpack!(status|=, zz.add_r(c, round)); + q = unpack!(status|=, q.add_r(z, round)); + q = unpack!(status|=, q.sub_r(a, round)); + q = -q; + zz = unpack!(status|=, zz.add_r(q, round)); + zz = unpack!(status|=, zz.add_r(aa, round)); + zz = unpack!(status|=, zz.add_r(cc, round)); + if zz.is_zero() && !zz.is_negative() { + return Status::OK.and(DoubleFloat(z, F::ZERO)); + } + self.0 = z; + self.0 = unpack!(status|=, self.0.add_r(zz, round)); + if !self.0.is_finite() { + self.1 = F::ZERO; + return status.and(self); + } + self.1 = z; + self.1 = unpack!(status|=, self.1.sub_r(self.0, round)); + self.1 = unpack!(status|=, self.1.add_r(zz, round)); + } + status.and(self) + } + } + } + + fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> { + // Interesting observation: For special categories, finding the lowest + // common ancestor of the following layered graph gives the correct + // return category: + // + // NaN + // / \ + // Zero Inf + // \ / + // Normal + // + // e.g. NaN * NaN = NaN + // Zero * Inf = NaN + // Normal * Zero = Zero + // Normal * Inf = Inf + match (self.category(), rhs.category()) { + (Category::NaN, _) => Status::OK.and(self), + + (_, Category::NaN) => Status::OK.and(rhs), + + (Category::Zero, Category::Infinity) | + (Category::Infinity, Category::Zero) => Status::OK.and(Self::NAN), + + (Category::Zero, _) | + (Category::Infinity, _) => Status::OK.and(self), + + (_, Category::Zero) | + (_, Category::Infinity) => Status::OK.and(rhs), + + (Category::Normal, Category::Normal) => { + let mut status = Status::OK; + let (a, b, c, d) = (self.0, self.1, rhs.0, rhs.1); + // t = a * c + let mut t = a; + t = unpack!(status|=, t.mul_r(c, round)); + if !t.is_finite_non_zero() { + return status.and(DoubleFloat(t, F::ZERO)); + } + + // tau = fmsub(a, c, t), that is -fmadd(-a, c, t). + let mut tau = a; + tau = unpack!(status|=, tau.mul_add_r(c, -t, round)); + // v = a * d + let mut v = a; + v = unpack!(status|=, v.mul_r(d, round)); + // w = b * c + let mut w = b; + w = unpack!(status|=, w.mul_r(c, round)); + v = unpack!(status|=, v.add_r(w, round)); + // tau += v + w + tau = unpack!(status|=, tau.add_r(v, round)); + // u = t + tau + let mut u = t; + u = unpack!(status|=, u.add_r(tau, round)); + + self.0 = u; + if !u.is_finite() { + self.1 = F::ZERO; + } else { + // self.1 = (t - u) + tau + t = unpack!(status|=, t.sub_r(u, round)); + t = unpack!(status|=, t.add_r(tau, round)); + self.1 = t; + } + status.and(self) + } + } + } + + fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> { + Fallback::from(self) + .mul_add_r(Fallback::from(multiplicand), Fallback::from(addend), round) + .map(Self::from) + } + + fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self> { + Fallback::from(self).div_r(Fallback::from(rhs), round).map( + Self::from, + ) + } + + fn c_fmod(self, rhs: Self) -> StatusAnd<Self> { + Fallback::from(self).c_fmod(Fallback::from(rhs)).map( + Self::from, + ) + } + + fn round_to_integral(self, round: Round) -> StatusAnd<Self> { + Fallback::from(self).round_to_integral(round).map( + Self::from, + ) + } + + fn next_up(self) -> StatusAnd<Self> { + Fallback::from(self).next_up().map(Self::from) + } + + fn from_bits(input: u128) -> Self { + let (a, b) = (input, input >> F::BITS); + DoubleFloat( + F::from_bits(a & ((1 << F::BITS) - 1)), + F::from_bits(b & ((1 << F::BITS) - 1)), + ) + } + + fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> { + Fallback::from_u128_r(input, round).map(Self::from) + } + + fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> { + Fallback::from_str_r(s, round).map(|r| r.map(Self::from)) + } + + fn to_bits(self) -> u128 { + self.0.to_bits() | (self.1.to_bits() << F::BITS) + } + + fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> { + Fallback::from(self).to_u128_r(width, round, is_exact) + } + + fn cmp_abs_normal(self, rhs: Self) -> Ordering { + self.0.cmp_abs_normal(rhs.0).then_with(|| { + let result = self.1.cmp_abs_normal(rhs.1); + if result != Ordering::Equal { + let against = self.0.is_negative() ^ self.1.is_negative(); + let rhs_against = rhs.0.is_negative() ^ rhs.1.is_negative(); + (!against).cmp(&!rhs_against).then_with(|| if against { + result.reverse() + } else { + result + }) + } else { + result + } + }) + } + + fn bitwise_eq(self, rhs: Self) -> bool { + self.0.bitwise_eq(rhs.0) && self.1.bitwise_eq(rhs.1) + } + + fn is_negative(self) -> bool { + self.0.is_negative() + } + + fn is_denormal(self) -> bool { + self.category() == Category::Normal && + (self.0.is_denormal() || self.0.is_denormal() || + // (double)(Hi + Lo) == Hi defines a normal number. + !(self.0 + self.1).value.bitwise_eq(self.0)) + } + + fn is_signaling(self) -> bool { + self.0.is_signaling() + } + + fn category(self) -> Category { + self.0.category() + } + + fn get_exact_inverse(self) -> Option<Self> { + Fallback::from(self).get_exact_inverse().map(Self::from) + } + + fn ilogb(self) -> ExpInt { + self.0.ilogb() + } + + fn scalbn_r(self, exp: ExpInt, round: Round) -> Self { + DoubleFloat(self.0.scalbn_r(exp, round), self.1.scalbn_r(exp, round)) + } + + fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self { + let a = self.0.frexp_r(exp, round); + let mut b = self.1; + if self.category() == Category::Normal { + b = b.scalbn_r(-*exp, round); + } + DoubleFloat(a, b) + } +} diff --git a/src/librustc_apfloat/tests/ieee.rs b/src/librustc_apfloat/tests/ieee.rs new file mode 100644 index 00000000000..aff2076e038 --- /dev/null +++ b/src/librustc_apfloat/tests/ieee.rs @@ -0,0 +1,6891 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(i128_type)] + +#[macro_use] +extern crate rustc_apfloat; + +use rustc_apfloat::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; +use rustc_apfloat::{Float, FloatConvert, ParseError, Round, Status}; +use rustc_apfloat::ieee::{Half, Single, Double, Quad, X87DoubleExtended}; + +trait SingleExt { + fn from_f32(input: f32) -> Self; + fn to_f32(self) -> f32; +} + +impl SingleExt for Single { + fn from_f32(input: f32) -> Self { + Self::from_bits(input.to_bits() as u128) + } + + fn to_f32(self) -> f32 { + f32::from_bits(self.to_bits() as u32) + } +} + +trait DoubleExt { + fn from_f64(input: f64) -> Self; + fn to_f64(self) -> f64; +} + +impl DoubleExt for Double { + fn from_f64(input: f64) -> Self { + Self::from_bits(input.to_bits() as u128) + } + + fn to_f64(self) -> f64 { + f64::from_bits(self.to_bits() as u64) + } +} + +#[test] +fn is_signaling() { + // We test qNaN, -qNaN, +sNaN, -sNaN with and without payloads. + let payload = 4; + assert!(!Single::qnan(None).is_signaling()); + assert!(!(-Single::qnan(None)).is_signaling()); + assert!(!Single::qnan(Some(payload)).is_signaling()); + assert!(!(-Single::qnan(Some(payload))).is_signaling()); + assert!(Single::snan(None).is_signaling()); + assert!((-Single::snan(None)).is_signaling()); + assert!(Single::snan(Some(payload)).is_signaling()); + assert!((-Single::snan(Some(payload))).is_signaling()); +} + +#[test] +fn next() { + // 1. Test Special Cases Values. + // + // Test all special values for nextUp and nextDown perscribed by IEEE-754R + // 2008. These are: + // 1. +inf + // 2. -inf + // 3. largest + // 4. -largest + // 5. smallest + // 6. -smallest + // 7. qNaN + // 8. sNaN + // 9. +0 + // 10. -0 + + let mut status; + + // nextUp(+inf) = +inf. + let test = unpack!(status=, Quad::INFINITY.next_up()); + let expected = Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+inf) = -nextUp(-inf) = -(-largest) = largest + let test = unpack!(status=, Quad::INFINITY.next_down()); + let expected = Quad::largest(); + assert_eq!(status, Status::OK); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-inf) = -largest + let test = unpack!(status=, (-Quad::INFINITY).next_up()); + let expected = -Quad::largest(); + assert_eq!(status, Status::OK); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-inf) = -nextUp(+inf) = -(+inf) = -inf. + let test = unpack!(status=, (-Quad::INFINITY).next_down()); + let expected = -Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite() && test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(largest) = +inf + let test = unpack!(status=, Quad::largest().next_up()); + let expected = Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite() && !test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(largest) = -nextUp(-largest) + // = -(-largest + inc) + // = largest - inc. + let test = unpack!(status=, Quad::largest().next_down()); + let expected = "0x1.fffffffffffffffffffffffffffep+16383" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_infinite() && !test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-largest) = -largest + inc. + let test = unpack!(status=, (-Quad::largest()).next_up()); + let expected = "-0x1.fffffffffffffffffffffffffffep+16383" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-largest) = -nextUp(largest) = -(inf) = -inf. + let test = unpack!(status=, (-Quad::largest()).next_down()); + let expected = -Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite() && test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(smallest) = smallest + inc. + let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x0.0000000000000000000000000002p-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(smallest) = -nextUp(-smallest) = -(-0) = +0. + let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = Quad::ZERO; + assert_eq!(status, Status::OK); + assert!(test.is_pos_zero()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-smallest) = -0. + let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = -Quad::ZERO; + assert_eq!(status, Status::OK); + assert!(test.is_neg_zero()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-smallest) = -nextUp(smallest) = -smallest - inc. + let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x0.0000000000000000000000000002p-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(qNaN) = qNaN + let test = unpack!(status=, Quad::qnan(None).next_up()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(qNaN) = qNaN + let test = unpack!(status=, Quad::qnan(None).next_down()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(sNaN) = qNaN + let test = unpack!(status=, Quad::snan(None).next_up()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::INVALID_OP); + assert!(test.bitwise_eq(expected)); + + // nextDown(sNaN) = qNaN + let test = unpack!(status=, Quad::snan(None).next_down()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::INVALID_OP); + assert!(test.bitwise_eq(expected)); + + // nextUp(+0) = +smallest + let test = unpack!(status=, Quad::ZERO.next_up()); + let expected = Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(+0) = -nextUp(-0) = -smallest + let test = unpack!(status=, Quad::ZERO.next_down()); + let expected = -Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(-0) = +smallest + let test = unpack!(status=, (-Quad::ZERO).next_up()); + let expected = Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-0) = -nextUp(0) = -smallest + let test = unpack!(status=, (-Quad::ZERO).next_down()); + let expected = -Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // 2. Binade Boundary Tests. + + // 2a. Test denormal <-> normal binade boundaries. + // * nextUp(+Largest Denormal) -> +Smallest Normal. + // * nextDown(-Largest Denormal) -> -Smallest Normal. + // * nextUp(-Smallest Normal) -> -Largest Denormal. + // * nextDown(+Smallest Normal) -> +Largest Denormal. + + // nextUp(+Largest Denormal) -> +Smallest Normal. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x1.0000000000000000000000000000p-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Largest Denormal) -> -Smallest Normal. + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x1.0000000000000000000000000000p-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-Smallest Normal) -> -Largest Denormal. + let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "-0x0.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Smallest Normal) -> +Largest Denormal. + let test = unpack!(status=, "+0x1.0000000000000000000000000000p-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "+0x0.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // 2b. Test normal <-> normal binade boundaries. + // * nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1. + // * nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1. + // * nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary. + // * nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary. + + // nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1. + let test = unpack!(status=, "-0x1p+1".parse::<Quad>().unwrap().next_up()); + let expected = "-0x1.ffffffffffffffffffffffffffffp+0" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1. + let test = unpack!(status=, "0x1p+1".parse::<Quad>().unwrap().next_down()); + let expected = "0x1.ffffffffffffffffffffffffffffp+0" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary. + let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp+0" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x1p+1".parse::<Quad>().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary. + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp+0" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x1p+1".parse::<Quad>().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // 2c. Test using next at binade boundaries with a direction away from the + // binade boundary. Away from denormal <-> normal boundaries. + // + // This is to make sure that even though we are at a binade boundary, since + // we are rounding away, we do not trigger the binade boundary code. Thus we + // test: + // * nextUp(-Largest Denormal) -> -Largest Denormal + inc. + // * nextDown(+Largest Denormal) -> +Largest Denormal - inc. + // * nextUp(+Smallest Normal) -> +Smallest Normal + inc. + // * nextDown(-Smallest Normal) -> -Smallest Normal - inc. + + // nextUp(-Largest Denormal) -> -Largest Denormal + inc. + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "-0x0.fffffffffffffffffffffffffffep-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Largest Denormal) -> +Largest Denormal - inc. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "0x0.fffffffffffffffffffffffffffep-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(+Smallest Normal) -> +Smallest Normal + inc. + let test = unpack!(status=, "0x1.0000000000000000000000000000p-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x1.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Smallest Normal) -> -Smallest Normal - inc. + let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x1.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // 2d. Test values which cause our exponent to go to min exponent. This + // is to ensure that guards in the code to check for min exponent + // trigger properly. + // * nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382 + // * nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) -> + // -0x1p-16381 + // * nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16382 + // * nextDown(0x1p-16382) -> 0x1.ffffffffffffffffffffffffffffp-16382 + + // nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382 + let test = unpack!(status=, "-0x1p-16381".parse::<Quad>().unwrap().next_up()); + let expected = "-0x1.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) -> + // -0x1p-16381 + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x1p-16381".parse::<Quad>().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16381 + let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x1p-16381".parse::<Quad>().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(0x1p-16381) -> 0x1.ffffffffffffffffffffffffffffp-16382 + let test = unpack!(status=, "0x1p-16381".parse::<Quad>().unwrap().next_down()); + let expected = "0x1.ffffffffffffffffffffffffffffp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // 3. Now we test both denormal/normal computation which will not cause us + // to go across binade boundaries. Specifically we test: + // * nextUp(+Denormal) -> +Denormal. + // * nextDown(+Denormal) -> +Denormal. + // * nextUp(-Denormal) -> -Denormal. + // * nextDown(-Denormal) -> -Denormal. + // * nextUp(+Normal) -> +Normal. + // * nextDown(+Normal) -> +Normal. + // * nextUp(-Normal) -> -Normal. + // * nextDown(-Normal) -> -Normal. + + // nextUp(+Denormal) -> +Denormal. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x0.ffffffffffffffffffffffff000dp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Denormal) -> +Denormal. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "0x0.ffffffffffffffffffffffff000bp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-Denormal) -> -Denormal. + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "-0x0.ffffffffffffffffffffffff000bp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Denormal) -> -Denormal + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x0.ffffffffffffffffffffffff000dp-16382" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(+Normal) -> +Normal. + let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "0x1.ffffffffffffffffffffffff000dp-16000" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Normal) -> +Normal. + let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "0x1.ffffffffffffffffffffffff000bp-16000" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-Normal) -> -Normal. + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000" + .parse::<Quad>() + .unwrap() + .next_up()); + let expected = "-0x1.ffffffffffffffffffffffff000bp-16000" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Normal) -> -Normal. + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000" + .parse::<Quad>() + .unwrap() + .next_down()); + let expected = "-0x1.ffffffffffffffffffffffff000dp-16000" + .parse::<Quad>() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); +} + +#[test] +fn fma() { + { + let mut f1 = Single::from_f32(14.5); + let f2 = Single::from_f32(-14.5); + let f3 = Single::from_f32(225.0); + f1 = f1.mul_add(f2, f3).value; + assert_eq!(14.75, f1.to_f32()); + } + + { + let val2 = Single::from_f32(2.0); + let mut f1 = Single::from_f32(1.17549435e-38); + let mut f2 = Single::from_f32(1.17549435e-38); + f1 /= val2; + f2 /= val2; + let f3 = Single::from_f32(12.0); + f1 = f1.mul_add(f2, f3).value; + assert_eq!(12.0, f1.to_f32()); + } + + // Test for correct zero sign when answer is exactly zero. + // fma(1.0, -1.0, 1.0) -> +ve 0. + { + let mut f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(-1.0); + let f3 = Double::from_f64(1.0); + f1 = f1.mul_add(f2, f3).value; + assert!(!f1.is_negative() && f1.is_zero()); + } + + // Test for correct zero sign when answer is exactly zero and rounding towards + // negative. + // fma(1.0, -1.0, 1.0) -> +ve 0. + { + let mut f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(-1.0); + let f3 = Double::from_f64(1.0); + f1 = f1.mul_add_r(f2, f3, Round::TowardNegative).value; + assert!(f1.is_negative() && f1.is_zero()); + } + + // Test for correct (in this case -ve) sign when adding like signed zeros. + // Test fma(0.0, -0.0, -0.0) -> -ve 0. + { + let mut f1 = Double::from_f64(0.0); + let f2 = Double::from_f64(-0.0); + let f3 = Double::from_f64(-0.0); + f1 = f1.mul_add(f2, f3).value; + assert!(f1.is_negative() && f1.is_zero()); + } + + // Test -ve sign preservation when small negative results underflow. + { + let mut f1 = "-0x1p-1074".parse::<Double>().unwrap(); + let f2 = "+0x1p-1074".parse::<Double>().unwrap(); + let f3 = Double::from_f64(0.0); + f1 = f1.mul_add(f2, f3).value; + assert!(f1.is_negative() && f1.is_zero()); + } + + // Test x87 extended precision case from http://llvm.org/PR20728. + { + let mut m1 = X87DoubleExtended::from_u128(1).value; + let m2 = X87DoubleExtended::from_u128(1).value; + let a = X87DoubleExtended::from_u128(3).value; + + let mut loses_info = false; + m1 = m1.mul_add(m2, a).value; + let r: Single = m1.convert(&mut loses_info).value; + assert!(!loses_info); + assert_eq!(4.0, r.to_f32()); + } +} + +#[test] +fn min_num() { + let f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(2.0); + let nan = Double::NAN; + + assert_eq!(1.0, f1.min(f2).to_f64()); + assert_eq!(1.0, f2.min(f1).to_f64()); + assert_eq!(1.0, f1.min(nan).to_f64()); + assert_eq!(1.0, nan.min(f1).to_f64()); +} + +#[test] +fn max_num() { + let f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(2.0); + let nan = Double::NAN; + + assert_eq!(2.0, f1.max(f2).to_f64()); + assert_eq!(2.0, f2.max(f1).to_f64()); + assert_eq!(1.0, f1.max(nan).to_f64()); + assert_eq!(1.0, nan.max(f1).to_f64()); +} + +#[test] +fn denormal() { + // Test single precision + { + assert!(!Single::from_f32(0.0).is_denormal()); + + let mut t = "1.17549435082228750797e-38".parse::<Single>().unwrap(); + assert!(!t.is_denormal()); + + let val2 = Single::from_f32(2.0e0); + t /= val2; + assert!(t.is_denormal()); + } + + // Test double precision + { + assert!(!Double::from_f64(0.0).is_denormal()); + + let mut t = "2.22507385850720138309e-308".parse::<Double>().unwrap(); + assert!(!t.is_denormal()); + + let val2 = Double::from_f64(2.0e0); + t /= val2; + assert!(t.is_denormal()); + } + + // Test Intel double-ext + { + assert!(!X87DoubleExtended::from_u128(0).value.is_denormal()); + + let mut t = "3.36210314311209350626e-4932" + .parse::<X87DoubleExtended>() + .unwrap(); + assert!(!t.is_denormal()); + + t /= X87DoubleExtended::from_u128(2).value; + assert!(t.is_denormal()); + } + + // Test quadruple precision + { + assert!(!Quad::from_u128(0).value.is_denormal()); + + let mut t = "3.36210314311209350626267781732175260e-4932" + .parse::<Quad>() + .unwrap(); + assert!(!t.is_denormal()); + + t /= Quad::from_u128(2).value; + assert!(t.is_denormal()); + } +} + +#[test] +fn decimal_strings_without_null_terminators() { + // Make sure that we can parse strings without null terminators. + // rdar://14323230. + let val = "0.00"[..3].parse::<Double>().unwrap(); + assert_eq!(val.to_f64(), 0.0); + let val = "0.01"[..3].parse::<Double>().unwrap(); + assert_eq!(val.to_f64(), 0.0); + let val = "0.09"[..3].parse::<Double>().unwrap(); + assert_eq!(val.to_f64(), 0.0); + let val = "0.095"[..4].parse::<Double>().unwrap(); + assert_eq!(val.to_f64(), 0.09); + let val = "0.00e+3"[..7].parse::<Double>().unwrap(); + assert_eq!(val.to_f64(), 0.00); + let val = "0e+3"[..4].parse::<Double>().unwrap(); + assert_eq!(val.to_f64(), 0.00); + +} + +#[test] +fn from_zero_decimal_string() { + assert_eq!(0.0, "0".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0.".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, ".0".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+.0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-.0".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0.0".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "00000.".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+00000.".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-00000.".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, ".00000".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+.00000".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-.00000".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0000.00000".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0000.00000".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0000.00000".parse::<Double>().unwrap().to_f64()); +} + +#[test] +fn from_zero_decimal_single_exponent_string() { + assert_eq!(0.0, "0e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0e1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0e+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0e-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "0.e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.e1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0.e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.e+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0.e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.e-1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, ".0e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+.0e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-.0e1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, ".0e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+.0e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-.0e+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, ".0e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+.0e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-.0e-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "0.0e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.0e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0e1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0.0e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.0e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0e+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0.0e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0.0e-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0e-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "000.0000e1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+000.0000e+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-000.0000e+1".parse::<Double>().unwrap().to_f64()); +} + +#[test] +fn from_zero_decimal_large_exponent_string() { + assert_eq!(0.0, "0e1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0e1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0e1234".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0e+1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0e+1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0e+1234".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0e-1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0e-1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0e-1234".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "000.0000e1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "000.0000e-1234".parse::<Double>().unwrap().to_f64()); +} + +#[test] +fn from_zero_hexadecimal_string() { + assert_eq!(0.0, "0x0p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x0p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x0p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "0x.0p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x.0p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x.0p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x.0p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x.0p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x.0p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x.0p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x.0p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x.0p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "0x0.0p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.0p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.0p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.0p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.0p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.0p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.0p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.0p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.0p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(0.0, "0x00000.p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x0000.00000p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x.00000p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x0p1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x00000.p1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x0000.00000p1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x.00000p1234".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.0, "0x0.p1234".parse::<Double>().unwrap().to_f64()); +} + +#[test] +fn from_decimal_string() { + assert_eq!(1.0, "1".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.5, ".5".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.0, "1.0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-2.0, "-2".parse::<Double>().unwrap().to_f64()); + assert_eq!(-4.0, "-4.".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.5, "-.5".parse::<Double>().unwrap().to_f64()); + assert_eq!(-1.5, "-1.5".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.25e12, "1.25e12".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.25e+12, "1.25e+12".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.25e-12, "1.25e-12".parse::<Double>().unwrap().to_f64()); + assert_eq!(1024.0, "1024.".parse::<Double>().unwrap().to_f64()); + assert_eq!(1024.05, "1024.05000".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.05, ".05000".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0e2, "2.e2".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0e+2, "2.e+2".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0e-2, "2.e-2".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.05e2, "002.05000e2".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.05e+2, "002.05000e+2".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.05e-2, "002.05000e-2".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.05e12, "002.05000e12".parse::<Double>().unwrap().to_f64()); + assert_eq!( + 2.05e+12, + "002.05000e+12".parse::<Double>().unwrap().to_f64() + ); + assert_eq!( + 2.05e-12, + "002.05000e-12".parse::<Double>().unwrap().to_f64() + ); + + // These are "carefully selected" to overflow the fast log-base + // calculations in the implementation. + assert!("99e99999".parse::<Double>().unwrap().is_infinite()); + assert!("-99e99999".parse::<Double>().unwrap().is_infinite()); + assert!("1e-99999".parse::<Double>().unwrap().is_pos_zero()); + assert!("-1e-99999".parse::<Double>().unwrap().is_neg_zero()); + + assert_eq!(2.71828, "2.71828".parse::<Double>().unwrap().to_f64()); +} + +#[test] +fn from_hexadecimal_string() { + assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.0, "+0x1p0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-1.0, "-0x1p0".parse::<Double>().unwrap().to_f64()); + + assert_eq!(1.0, "0x1p+0".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.0, "+0x1p+0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-1.0, "-0x1p+0".parse::<Double>().unwrap().to_f64()); + + assert_eq!(1.0, "0x1p-0".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.0, "+0x1p-0".parse::<Double>().unwrap().to_f64()); + assert_eq!(-1.0, "-0x1p-0".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(2.0, "0x1p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0, "+0x1p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-2.0, "-0x1p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(2.0, "0x1p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(2.0, "+0x1p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-2.0, "-0x1p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.5, "0x1p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.5, "+0x1p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.5, "-0x1p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(3.0, "0x1.8p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(3.0, "+0x1.8p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-3.0, "-0x1.8p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(3.0, "0x1.8p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(3.0, "+0x1.8p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-3.0, "-0x1.8p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.75, "0x1.8p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.75, "+0x1.8p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.75, "-0x1.8p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(8192.0, "0x1000.000p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000.000p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-8192.0, "-0x1000.000p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(8192.0, "0x1000.000p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000.000p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!( + -8192.0, + "-0x1000.000p+1".parse::<Double>().unwrap().to_f64() + ); + + assert_eq!(2048.0, "0x1000.000p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(2048.0, "+0x1000.000p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!( + -2048.0, + "-0x1000.000p-1".parse::<Double>().unwrap().to_f64() + ); + + + assert_eq!(8192.0, "0x1000p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000p1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-8192.0, "-0x1000p1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(8192.0, "0x1000p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000p+1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-8192.0, "-0x1000p+1".parse::<Double>().unwrap().to_f64()); + + assert_eq!(2048.0, "0x1000p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(2048.0, "+0x1000p-1".parse::<Double>().unwrap().to_f64()); + assert_eq!(-2048.0, "-0x1000p-1".parse::<Double>().unwrap().to_f64()); + + + assert_eq!(16384.0, "0x10p10".parse::<Double>().unwrap().to_f64()); + assert_eq!(16384.0, "+0x10p10".parse::<Double>().unwrap().to_f64()); + assert_eq!(-16384.0, "-0x10p10".parse::<Double>().unwrap().to_f64()); + + assert_eq!(16384.0, "0x10p+10".parse::<Double>().unwrap().to_f64()); + assert_eq!(16384.0, "+0x10p+10".parse::<Double>().unwrap().to_f64()); + assert_eq!(-16384.0, "-0x10p+10".parse::<Double>().unwrap().to_f64()); + + assert_eq!(0.015625, "0x10p-10".parse::<Double>().unwrap().to_f64()); + assert_eq!(0.015625, "+0x10p-10".parse::<Double>().unwrap().to_f64()); + assert_eq!(-0.015625, "-0x10p-10".parse::<Double>().unwrap().to_f64()); + + assert_eq!(1.0625, "0x1.1p0".parse::<Double>().unwrap().to_f64()); + assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64()); + + assert_eq!( + "0x1p-150".parse::<Double>().unwrap().to_f64(), + "+0x800000000000000001.p-221" + .parse::<Double>() + .unwrap() + .to_f64() + ); + assert_eq!( + 2251799813685248.5, + "0x80000000000004000000.010p-28" + .parse::<Double>() + .unwrap() + .to_f64() + ); +} + +#[test] +fn to_string() { + let to_string = |d: f64, precision: usize, width: usize| { + let x = Double::from_f64(d); + if precision == 0 { + format!("{:1$}", x, width) + } else { + format!("{:2$.1$}", x, precision, width) + } + }; + assert_eq!("10", to_string(10.0, 6, 3)); + assert_eq!("1.0E+1", to_string(10.0, 6, 0)); + assert_eq!("10100", to_string(1.01E+4, 5, 2)); + assert_eq!("1.01E+4", to_string(1.01E+4, 4, 2)); + assert_eq!("1.01E+4", to_string(1.01E+4, 5, 1)); + assert_eq!("0.0101", to_string(1.01E-2, 5, 2)); + assert_eq!("0.0101", to_string(1.01E-2, 4, 2)); + assert_eq!("1.01E-2", to_string(1.01E-2, 5, 1)); + assert_eq!( + "0.78539816339744828", + to_string(0.78539816339744830961, 0, 3) + ); + assert_eq!( + "4.9406564584124654E-324", + to_string(4.9406564584124654e-324, 0, 3) + ); + assert_eq!("873.18340000000001", to_string(873.1834, 0, 1)); + assert_eq!("8.7318340000000001E+2", to_string(873.1834, 0, 0)); + assert_eq!( + "1.7976931348623157E+308", + to_string(1.7976931348623157E+308, 0, 0) + ); + + let to_string = |d: f64, precision: usize, width: usize| { + let x = Double::from_f64(d); + if precision == 0 { + format!("{:#1$}", x, width) + } else { + format!("{:#2$.1$}", x, precision, width) + } + }; + assert_eq!("10", to_string(10.0, 6, 3)); + assert_eq!("1.000000e+01", to_string(10.0, 6, 0)); + assert_eq!("10100", to_string(1.01E+4, 5, 2)); + assert_eq!("1.0100e+04", to_string(1.01E+4, 4, 2)); + assert_eq!("1.01000e+04", to_string(1.01E+4, 5, 1)); + assert_eq!("0.0101", to_string(1.01E-2, 5, 2)); + assert_eq!("0.0101", to_string(1.01E-2, 4, 2)); + assert_eq!("1.01000e-02", to_string(1.01E-2, 5, 1)); + assert_eq!( + "0.78539816339744828", + to_string(0.78539816339744830961, 0, 3) + ); + assert_eq!( + "4.94065645841246540e-324", + to_string(4.9406564584124654e-324, 0, 3) + ); + assert_eq!("873.18340000000001", to_string(873.1834, 0, 1)); + assert_eq!("8.73183400000000010e+02", to_string(873.1834, 0, 0)); + assert_eq!( + "1.79769313486231570e+308", + to_string(1.7976931348623157E+308, 0, 0) + ); +} + +#[test] +fn to_integer() { + let mut is_exact = false; + + assert_eq!( + Status::OK.and(10), + "10".parse::<Double>().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(is_exact); + + assert_eq!( + Status::INVALID_OP.and(0), + "-10".parse::<Double>().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::INVALID_OP.and(31), + "32".parse::<Double>().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::INEXACT.and(7), + "7.9".parse::<Double>().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::OK.and(-10), + "-10".parse::<Double>().unwrap().to_i128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(is_exact); + + assert_eq!( + Status::INVALID_OP.and(-16), + "-17".parse::<Double>().unwrap().to_i128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::INVALID_OP.and(15), + "16".parse::<Double>().unwrap().to_i128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); +} + +#[test] +fn nan() { + fn nanbits<T: Float>(signaling: bool, negative: bool, fill: u128) -> u128 { + let x = if signaling { + T::snan(Some(fill)) + } else { + T::qnan(Some(fill)) + }; + if negative { + (-x).to_bits() + } else { + x.to_bits() + } + } + + assert_eq!(0x7fc00000, nanbits::<Single>(false, false, 0)); + assert_eq!(0xffc00000, nanbits::<Single>(false, true, 0)); + assert_eq!(0x7fc0ae72, nanbits::<Single>(false, false, 0xae72)); + assert_eq!(0x7fffae72, nanbits::<Single>(false, false, 0xffffae72)); + assert_eq!(0x7fa00000, nanbits::<Single>(true, false, 0)); + assert_eq!(0xffa00000, nanbits::<Single>(true, true, 0)); + assert_eq!(0x7f80ae72, nanbits::<Single>(true, false, 0xae72)); + assert_eq!(0x7fbfae72, nanbits::<Single>(true, false, 0xffffae72)); + + assert_eq!(0x7ff8000000000000, nanbits::<Double>(false, false, 0)); + assert_eq!(0xfff8000000000000, nanbits::<Double>(false, true, 0)); + assert_eq!(0x7ff800000000ae72, nanbits::<Double>(false, false, 0xae72)); + assert_eq!( + 0x7fffffffffffae72, + nanbits::<Double>(false, false, 0xffffffffffffae72) + ); + assert_eq!(0x7ff4000000000000, nanbits::<Double>(true, false, 0)); + assert_eq!(0xfff4000000000000, nanbits::<Double>(true, true, 0)); + assert_eq!(0x7ff000000000ae72, nanbits::<Double>(true, false, 0xae72)); + assert_eq!( + 0x7ff7ffffffffae72, + nanbits::<Double>(true, false, 0xffffffffffffae72) + ); +} + +#[test] +fn string_decimal_death() { + assert_eq!( + "".parse::<Double>(), + Err(ParseError("Invalid string length")) + ); + assert_eq!( + "+".parse::<Double>(), + Err(ParseError("String has no digits")) + ); + assert_eq!( + "-".parse::<Double>(), + Err(ParseError("String has no digits")) + ); + + assert_eq!( + "\0".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1\0".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1\02".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1\02e1".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1e\0".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "1e1\0".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "1e1\02".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + + assert_eq!( + "1.0f".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + + assert_eq!( + "..".parse::<Double>(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "..0".parse::<Double>(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "1.0.0".parse::<Double>(), + Err(ParseError("String contains multiple dots")) + ); +} + +#[test] +fn string_decimal_significand_death() { + assert_eq!( + ".".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+.".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-.".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + + assert_eq!( + "e".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+e".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-e".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "e1".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+e1".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-e1".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + ".e1".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+.e1".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-.e1".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + + assert_eq!( + ".e".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+.e".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-.e".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); +} + +#[test] +fn string_decimal_exponent_death() { + assert_eq!( + "1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "1.e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+1.e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-1.e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + ".1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+.1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-.1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "1.1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+1.1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-1.1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "1e+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "1e-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + ".1e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + ".1e+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + ".1e-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "1.0e".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "1.0e+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "1.0e-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); +} + +#[test] +fn string_hexadecimal_death() { + assert_eq!("0x".parse::<Double>(), Err(ParseError("Invalid string"))); + assert_eq!("+0x".parse::<Double>(), Err(ParseError("Invalid string"))); + assert_eq!("-0x".parse::<Double>(), Err(ParseError("Invalid string"))); + + assert_eq!( + "0x0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x0.".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x0.".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x0.".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x.0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x.0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x.0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x0.0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x0.0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x0.0".parse::<Double>(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x\0".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1\0".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1\02".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1\02p1".parse::<Double>(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1p\0".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "0x1p1\0".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "0x1p1\02".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + + assert_eq!( + "0x1p0f".parse::<Double>(), + Err(ParseError("Invalid character in exponent")) + ); + + assert_eq!( + "0x..p1".parse::<Double>(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "0x..0p1".parse::<Double>(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "0x1.0.0p1".parse::<Double>(), + Err(ParseError("String contains multiple dots")) + ); +} + +#[test] +fn string_hexadecimal_significand_death() { + assert_eq!( + "0x.".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0xp".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0xp".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0xp".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0xp+".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0xp+".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0xp+".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0xp-".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0xp-".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0xp-".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + + assert_eq!( + "0x.p".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.p".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.p".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0x.p+".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.p+".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.p+".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0x.p-".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.p-".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.p-".parse::<Double>(), + Err(ParseError("Significand has no digits")) + ); +} + +#[test] +fn string_hexadecimal_exponent_death() { + assert_eq!( + "0x1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "0x1.p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "0x.1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x.1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x.1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x.1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x.1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x.1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x.1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x.1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x.1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "0x1.1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.1p".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.1p+".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.1p-".parse::<Double>(), + Err(ParseError("Exponent has no digits")) + ); +} + +#[test] +fn exact_inverse() { + // Trivial operation. + assert!( + Double::from_f64(2.0) + .get_exact_inverse() + .unwrap() + .bitwise_eq(Double::from_f64(0.5)) + ); + assert!( + Single::from_f32(2.0) + .get_exact_inverse() + .unwrap() + .bitwise_eq(Single::from_f32(0.5)) + ); + assert!( + "2.0" + .parse::<Quad>() + .unwrap() + .get_exact_inverse() + .unwrap() + .bitwise_eq("0.5".parse::<Quad>().unwrap()) + ); + assert!( + "2.0" + .parse::<X87DoubleExtended>() + .unwrap() + .get_exact_inverse() + .unwrap() + .bitwise_eq("0.5".parse::<X87DoubleExtended>().unwrap()) + ); + + // FLT_MIN + assert!( + Single::from_f32(1.17549435e-38) + .get_exact_inverse() + .unwrap() + .bitwise_eq(Single::from_f32(8.5070592e+37)) + ); + + // Large float, inverse is a denormal. + assert!(Single::from_f32(1.7014118e38).get_exact_inverse().is_none()); + // Zero + assert!(Double::from_f64(0.0).get_exact_inverse().is_none()); + // Denormalized float + assert!( + Single::from_f32(1.40129846e-45) + .get_exact_inverse() + .is_none() + ); +} + +#[test] +fn round_to_integral() { + let t = Double::from_f64(-0.5); + assert_eq!(-0.0, t.round_to_integral(Round::TowardZero).value.to_f64()); + assert_eq!( + -1.0, + t.round_to_integral(Round::TowardNegative).value.to_f64() + ); + assert_eq!( + -0.0, + t.round_to_integral(Round::TowardPositive).value.to_f64() + ); + assert_eq!( + -0.0, + t.round_to_integral(Round::NearestTiesToEven).value.to_f64() + ); + + let s = Double::from_f64(3.14); + assert_eq!(3.0, s.round_to_integral(Round::TowardZero).value.to_f64()); + assert_eq!( + 3.0, + s.round_to_integral(Round::TowardNegative).value.to_f64() + ); + assert_eq!( + 4.0, + s.round_to_integral(Round::TowardPositive).value.to_f64() + ); + assert_eq!( + 3.0, + s.round_to_integral(Round::NearestTiesToEven).value.to_f64() + ); + + let r = Double::largest(); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::TowardZero).value.to_f64() + ); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::TowardNegative).value.to_f64() + ); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::TowardPositive).value.to_f64() + ); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::NearestTiesToEven).value.to_f64() + ); + + let p = Double::ZERO.round_to_integral(Round::TowardZero).value; + assert_eq!(0.0, p.to_f64()); + let p = (-Double::ZERO).round_to_integral(Round::TowardZero).value; + assert_eq!(-0.0, p.to_f64()); + let p = Double::NAN.round_to_integral(Round::TowardZero).value; + assert!(p.to_f64().is_nan()); + let p = Double::INFINITY.round_to_integral(Round::TowardZero).value; + assert!(p.to_f64().is_infinite() && p.to_f64() > 0.0); + let p = (-Double::INFINITY) + .round_to_integral(Round::TowardZero) + .value; + assert!(p.to_f64().is_infinite() && p.to_f64() < 0.0); +} + +#[test] +fn is_integer() { + let t = Double::from_f64(-0.0); + assert!(t.is_integer()); + let t = Double::from_f64(3.14159); + assert!(!t.is_integer()); + let t = Double::NAN; + assert!(!t.is_integer()); + let t = Double::INFINITY; + assert!(!t.is_integer()); + let t = -Double::INFINITY; + assert!(!t.is_integer()); + let t = Double::largest(); + assert!(t.is_integer()); +} + +#[test] +fn largest() { + assert_eq!(3.402823466e+38, Single::largest().to_f32()); + assert_eq!(1.7976931348623158e+308, Double::largest().to_f64()); +} + +#[test] +fn smallest() { + let test = Single::SMALLEST; + let expected = "0x0.000002p-126".parse::<Single>().unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Single::SMALLEST; + let expected = "-0x0.000002p-126".parse::<Single>().unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = Quad::SMALLEST; + let expected = "0x0.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Quad::SMALLEST; + let expected = "-0x0.0000000000000000000000000001p-16382" + .parse::<Quad>() + .unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); +} + +#[test] +fn smallest_normalized() { + let test = Single::smallest_normalized(); + let expected = "0x1p-126".parse::<Single>().unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Single::smallest_normalized(); + let expected = "-0x1p-126".parse::<Single>().unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = Quad::smallest_normalized(); + let expected = "0x1p-16382".parse::<Quad>().unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Quad::smallest_normalized(); + let expected = "-0x1p-16382".parse::<Quad>().unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); +} + +#[test] +fn zero() { + assert_eq!(0.0, Single::from_f32(0.0).to_f32()); + assert_eq!(-0.0, Single::from_f32(-0.0).to_f32()); + assert!(Single::from_f32(-0.0).is_negative()); + + assert_eq!(0.0, Double::from_f64(0.0).to_f64()); + assert_eq!(-0.0, Double::from_f64(-0.0).to_f64()); + assert!(Double::from_f64(-0.0).is_negative()); + + fn test<T: Float>(sign: bool, bits: u128) { + let test = if sign { -T::ZERO } else { T::ZERO }; + let pattern = if sign { "-0x0p+0" } else { "0x0p+0" }; + let expected = pattern.parse::<T>().unwrap(); + assert!(test.is_zero()); + assert_eq!(sign, test.is_negative()); + assert!(test.bitwise_eq(expected)); + assert_eq!(bits, test.to_bits()); + } + test::<Half>(false, 0); + test::<Half>(true, 0x8000); + test::<Single>(false, 0); + test::<Single>(true, 0x80000000); + test::<Double>(false, 0); + test::<Double>(true, 0x8000000000000000); + test::<Quad>(false, 0); + test::<Quad>(true, 0x8000000000000000_0000000000000000); + test::<X87DoubleExtended>(false, 0); + test::<X87DoubleExtended>(true, 0x8000_0000000000000000); +} + +#[test] +fn copy_sign() { + assert!(Double::from_f64(-42.0).bitwise_eq( + Double::from_f64(42.0).copy_sign( + Double::from_f64(-1.0), + ), + )); + assert!(Double::from_f64(42.0).bitwise_eq( + Double::from_f64(-42.0).copy_sign( + Double::from_f64(1.0), + ), + )); + assert!(Double::from_f64(-42.0).bitwise_eq( + Double::from_f64(-42.0).copy_sign( + Double::from_f64(-1.0), + ), + )); + assert!(Double::from_f64(42.0).bitwise_eq( + Double::from_f64(42.0).copy_sign( + Double::from_f64(1.0), + ), + )); +} + +#[test] +fn convert() { + let mut loses_info = false; + let test = "1.0".parse::<Double>().unwrap(); + let test: Single = test.convert(&mut loses_info).value; + assert_eq!(1.0, test.to_f32()); + assert!(!loses_info); + + let mut test = "0x1p-53".parse::<X87DoubleExtended>().unwrap(); + let one = "1.0".parse::<X87DoubleExtended>().unwrap(); + test += one; + let test: Double = test.convert(&mut loses_info).value; + assert_eq!(1.0, test.to_f64()); + assert!(loses_info); + + let mut test = "0x1p-53".parse::<Quad>().unwrap(); + let one = "1.0".parse::<Quad>().unwrap(); + test += one; + let test: Double = test.convert(&mut loses_info).value; + assert_eq!(1.0, test.to_f64()); + assert!(loses_info); + + let test = "0xf.fffffffp+28".parse::<X87DoubleExtended>().unwrap(); + let test: Double = test.convert(&mut loses_info).value; + assert_eq!(4294967295.0, test.to_f64()); + assert!(!loses_info); + + let test = Single::snan(None); + let x87_snan = X87DoubleExtended::snan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_snan)); + assert!(!loses_info); + + let test = Single::qnan(None); + let x87_qnan = X87DoubleExtended::qnan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_qnan)); + assert!(!loses_info); + + let test = X87DoubleExtended::snan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_snan)); + assert!(!loses_info); + + let test = X87DoubleExtended::qnan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_qnan)); + assert!(!loses_info); +} + +#[test] +fn is_negative() { + let t = "0x1p+0".parse::<Single>().unwrap(); + assert!(!t.is_negative()); + let t = "-0x1p+0".parse::<Single>().unwrap(); + assert!(t.is_negative()); + + assert!(!Single::INFINITY.is_negative()); + assert!((-Single::INFINITY).is_negative()); + + assert!(!Single::ZERO.is_negative()); + assert!((-Single::ZERO).is_negative()); + + assert!(!Single::NAN.is_negative()); + assert!((-Single::NAN).is_negative()); + + assert!(!Single::snan(None).is_negative()); + assert!((-Single::snan(None)).is_negative()); +} + +#[test] +fn is_normal() { + let t = "0x1p+0".parse::<Single>().unwrap(); + assert!(t.is_normal()); + + assert!(!Single::INFINITY.is_normal()); + assert!(!Single::ZERO.is_normal()); + assert!(!Single::NAN.is_normal()); + assert!(!Single::snan(None).is_normal()); + assert!(!"0x1p-149".parse::<Single>().unwrap().is_normal()); +} + +#[test] +fn is_finite() { + let t = "0x1p+0".parse::<Single>().unwrap(); + assert!(t.is_finite()); + assert!(!Single::INFINITY.is_finite()); + assert!(Single::ZERO.is_finite()); + assert!(!Single::NAN.is_finite()); + assert!(!Single::snan(None).is_finite()); + assert!("0x1p-149".parse::<Single>().unwrap().is_finite()); +} + +#[test] +fn is_infinite() { + let t = "0x1p+0".parse::<Single>().unwrap(); + assert!(!t.is_infinite()); + assert!(Single::INFINITY.is_infinite()); + assert!(!Single::ZERO.is_infinite()); + assert!(!Single::NAN.is_infinite()); + assert!(!Single::snan(None).is_infinite()); + assert!(!"0x1p-149".parse::<Single>().unwrap().is_infinite()); +} + +#[test] +fn is_nan() { + let t = "0x1p+0".parse::<Single>().unwrap(); + assert!(!t.is_nan()); + assert!(!Single::INFINITY.is_nan()); + assert!(!Single::ZERO.is_nan()); + assert!(Single::NAN.is_nan()); + assert!(Single::snan(None).is_nan()); + assert!(!"0x1p-149".parse::<Single>().unwrap().is_nan()); +} + +#[test] +fn is_finite_non_zero() { + // Test positive/negative normal value. + assert!("0x1p+0".parse::<Single>().unwrap().is_finite_non_zero()); + assert!("-0x1p+0".parse::<Single>().unwrap().is_finite_non_zero()); + + // Test positive/negative denormal value. + assert!("0x1p-149".parse::<Single>().unwrap().is_finite_non_zero()); + assert!("-0x1p-149".parse::<Single>().unwrap().is_finite_non_zero()); + + // Test +/- Infinity. + assert!(!Single::INFINITY.is_finite_non_zero()); + assert!(!(-Single::INFINITY).is_finite_non_zero()); + + // Test +/- Zero. + assert!(!Single::ZERO.is_finite_non_zero()); + assert!(!(-Single::ZERO).is_finite_non_zero()); + + // Test +/- qNaN. +/- dont mean anything with qNaN but paranoia can't hurt in + // this instance. + assert!(!Single::NAN.is_finite_non_zero()); + assert!(!(-Single::NAN).is_finite_non_zero()); + + // Test +/- sNaN. +/- dont mean anything with sNaN but paranoia can't hurt in + // this instance. + assert!(!Single::snan(None).is_finite_non_zero()); + assert!(!(-Single::snan(None)).is_finite_non_zero()); +} + +#[test] +fn add() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::<Single>().unwrap(); + let m_normal_value = "-0x1p+0".parse::<Single>().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "inf", Status::OK, Category::Infinity), + (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, p_zero, "inf", Status::OK, Category::Infinity), + (p_inf, m_zero, "inf", Status::OK, Category::Infinity), + (p_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, m_inf, "-inf", Status::OK, Category::Infinity), + (m_inf, p_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, m_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "inf", Status::OK, Category::Infinity), + (p_zero, m_inf, "-inf", Status::OK, Category::Infinity), + (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_zero, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + (m_zero, p_inf, "inf", Status::OK, Category::Infinity), + (m_zero, m_inf, "-inf", Status::OK, Category::Infinity), + (m_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (m_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity), + ( + p_normal_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_normal_value, + p_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + (p_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x1p+1", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_normal_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + (m_normal_value, p_inf, "inf", Status::OK, Category::Infinity), + ( + m_normal_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_normal_value, + p_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + (m_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_normal_value, + m_normal_value, + "-0x1p+1", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + p_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (p_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + p_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + p_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (m_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_largest_value, + m_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + p_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + (p_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_value, + "0x1p-148", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + p_smallest_normalized, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_normalized, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + p_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + (m_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + m_smallest_value, + "-0x1p-148", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_normalized, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_normalized, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + p_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_value, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x1p-125", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + p_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_value, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "-0x1p-125", + Status::OK, + Category::Normal, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x + y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap())); + } +} + +#[test] +fn subtract() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::<Single>().unwrap(); + let m_normal_value = "-0x1p+0".parse::<Single>().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, m_inf, "inf", Status::OK, Category::Infinity), + (p_inf, p_zero, "inf", Status::OK, Category::Infinity), + (p_inf, m_zero, "inf", Status::OK, Category::Infinity), + (p_inf, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "-inf", Status::OK, Category::Infinity), + (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, p_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, m_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "-inf", Status::OK, Category::Infinity), + (p_zero, m_inf, "inf", Status::OK, Category::Infinity), + (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_zero, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + (m_zero, p_inf, "-inf", Status::OK, Category::Infinity), + (m_zero, m_inf, "inf", Status::OK, Category::Infinity), + (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (m_zero, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_normal_value, m_inf, "inf", Status::OK, Category::Infinity), + ( + p_normal_value, + p_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + (p_normal_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_normal_value, + m_normal_value, + "0x1p+1", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity), + ( + m_normal_value, + p_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + (m_normal_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "-0x1p+1", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_normal_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + p_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (p_largest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + m_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + p_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + p_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (m_largest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_largest_value, + p_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + p_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + (p_smallest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + m_smallest_value, + "0x1p-148", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_normalized, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_normalized, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + p_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + (m_smallest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_value, + "-0x1p-148", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + p_smallest_normalized, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_normalized, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + p_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + qnan, + "-nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_value, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "0x1p-125", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + p_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + qnan, + "-nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_value, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "-0x1p-125", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x - y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap())); + } +} + +#[test] +fn multiply() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::<Single>().unwrap(); + let m_normal_value = "-0x1p+0".parse::<Single>().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + let underflow_status = Status::UNDERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "inf", Status::OK, Category::Infinity), + (p_inf, m_inf, "-inf", Status::OK, Category::Infinity), + (p_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "-inf", Status::OK, Category::Infinity), + (m_inf, m_inf, "inf", Status::OK, Category::Infinity), + (m_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero), + (p_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + p_zero, + m_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (m_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + m_zero, + p_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity), + ( + p_normal_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_normal_value, p_zero, "0x0p+0", Status::OK, Category::Zero), + ( + p_normal_value, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (p_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity), + ( + m_normal_value, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_normal_value, m_zero, "0x0p+0", Status::OK, Category::Zero), + (m_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + p_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (p_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + p_smallest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_normalized, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_normalized, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_largest_value, + m_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (m_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_smallest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_normalized, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_normalized, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + p_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (p_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_largest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + m_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + p_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + m_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + m_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (m_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_largest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + m_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + p_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + m_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + p_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_largest_value, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + m_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + m_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_largest_value, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + m_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x * y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap())); + } +} + +#[test] +fn divide() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::<Single>().unwrap(); + let m_normal_value = "-0x1p+0".parse::<Single>().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + let underflow_status = Status::UNDERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, p_zero, "inf", Status::OK, Category::Infinity), + (p_inf, m_zero, "-inf", Status::OK, Category::Infinity), + (p_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, p_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, m_zero, "inf", Status::OK, Category::Infinity), + (m_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_inf, "-0x0p+0", Status::OK, Category::Zero), + (p_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + p_zero, + m_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, p_inf, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, m_inf, "0x0p+0", Status::OK, Category::Zero), + (m_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + m_zero, + p_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_normal_value, p_inf, "0x0p+0", Status::OK, Category::Zero), + (p_normal_value, m_inf, "-0x0p+0", Status::OK, Category::Zero), + ( + p_normal_value, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_normal_value, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (p_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_largest_value, + "0x1p-128", + underflow_status, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "-0x1p-128", + underflow_status, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_normal_value, + m_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p+126", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "-0x1p+126", + Status::OK, + Category::Normal, + ), + (m_normal_value, p_inf, "-0x0p+0", Status::OK, Category::Zero), + (m_normal_value, m_inf, "0x0p+0", Status::OK, Category::Zero), + ( + m_normal_value, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_normal_value, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (m_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_largest_value, + "-0x1p-128", + underflow_status, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "0x1p-128", + underflow_status, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_normal_value, + m_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p+126", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "0x1p+126", + Status::OK, + Category::Normal, + ), + (p_largest_value, p_inf, "0x0p+0", Status::OK, Category::Zero), + ( + p_largest_value, + m_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_largest_value, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (p_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_largest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + p_smallest_normalized, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_smallest_normalized, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_largest_value, m_inf, "0x0p+0", Status::OK, Category::Zero), + ( + m_largest_value, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_largest_value, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (m_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_largest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_smallest_normalized, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_smallest_normalized, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_smallest_value, + p_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + m_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_smallest_value, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (p_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + m_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + p_smallest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_normalized, + "0x1p-23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_normalized, + "-0x1p-23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + m_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_smallest_value, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (m_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + m_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + p_smallest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_normalized, + "-0x1p-23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_normalized, + "0x1p-23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + m_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + m_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x1p+23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_value, + "-0x1p+23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + m_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + m_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x1p+23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_value, + "0x1p+23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x / y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap())); + } +} + +#[test] +fn operator_overloads() { + // This is mostly testing that these operator overloads compile. + let one = "0x1p+0".parse::<Single>().unwrap(); + let two = "0x2p+0".parse::<Single>().unwrap(); + assert!(two.bitwise_eq((one + one).value)); + assert!(one.bitwise_eq((two - one).value)); + assert!(two.bitwise_eq((one * two).value)); + assert!(one.bitwise_eq((two / two).value)); +} + +#[test] +fn abs() { + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let p_qnan = Single::NAN; + let m_qnan = -Single::NAN; + let p_snan = Single::snan(None); + let m_snan = -Single::snan(None); + let p_normal_value = "0x1p+0".parse::<Single>().unwrap(); + let m_normal_value = "-0x1p+0".parse::<Single>().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + assert!(p_inf.bitwise_eq(p_inf.abs())); + assert!(p_inf.bitwise_eq(m_inf.abs())); + assert!(p_zero.bitwise_eq(p_zero.abs())); + assert!(p_zero.bitwise_eq(m_zero.abs())); + assert!(p_qnan.bitwise_eq(p_qnan.abs())); + assert!(p_qnan.bitwise_eq(m_qnan.abs())); + assert!(p_snan.bitwise_eq(p_snan.abs())); + assert!(p_snan.bitwise_eq(m_snan.abs())); + assert!(p_normal_value.bitwise_eq(p_normal_value.abs())); + assert!(p_normal_value.bitwise_eq(m_normal_value.abs())); + assert!(p_largest_value.bitwise_eq(p_largest_value.abs())); + assert!(p_largest_value.bitwise_eq(m_largest_value.abs())); + assert!(p_smallest_value.bitwise_eq(p_smallest_value.abs())); + assert!(p_smallest_value.bitwise_eq(m_smallest_value.abs())); + assert!(p_smallest_normalized.bitwise_eq( + p_smallest_normalized.abs(), + )); + assert!(p_smallest_normalized.bitwise_eq( + m_smallest_normalized.abs(), + )); +} + +#[test] +fn neg() { + let one = "1.0".parse::<Single>().unwrap(); + let neg_one = "-1.0".parse::<Single>().unwrap(); + let zero = Single::ZERO; + let neg_zero = -Single::ZERO; + let inf = Single::INFINITY; + let neg_inf = -Single::INFINITY; + let qnan = Single::NAN; + let neg_qnan = -Single::NAN; + + assert!(neg_one.bitwise_eq(-one)); + assert!(one.bitwise_eq(-neg_one)); + assert!(neg_zero.bitwise_eq(-zero)); + assert!(zero.bitwise_eq(-neg_zero)); + assert!(neg_inf.bitwise_eq(-inf)); + assert!(inf.bitwise_eq(-neg_inf)); + assert!(neg_inf.bitwise_eq(-inf)); + assert!(inf.bitwise_eq(-neg_inf)); + assert!(neg_qnan.bitwise_eq(-qnan)); + assert!(qnan.bitwise_eq(-neg_qnan)); +} + +#[test] +fn ilogb() { + assert_eq!(-1074, Double::SMALLEST.ilogb()); + assert_eq!(-1074, (-Double::SMALLEST).ilogb()); + assert_eq!( + -1023, + "0x1.ffffffffffffep-1024".parse::<Double>().unwrap().ilogb() + ); + assert_eq!( + -1023, + "0x1.ffffffffffffep-1023".parse::<Double>().unwrap().ilogb() + ); + assert_eq!( + -1023, + "-0x1.ffffffffffffep-1023" + .parse::<Double>() + .unwrap() + .ilogb() + ); + assert_eq!(-51, "0x1p-51".parse::<Double>().unwrap().ilogb()); + assert_eq!( + -1023, + "0x1.c60f120d9f87cp-1023".parse::<Double>().unwrap().ilogb() + ); + assert_eq!(-2, "0x0.ffffp-1".parse::<Double>().unwrap().ilogb()); + assert_eq!(-1023, "0x1.fffep-1023".parse::<Double>().unwrap().ilogb()); + assert_eq!(1023, Double::largest().ilogb()); + assert_eq!(1023, (-Double::largest()).ilogb()); + + + assert_eq!(0, "0x1p+0".parse::<Single>().unwrap().ilogb()); + assert_eq!(0, "-0x1p+0".parse::<Single>().unwrap().ilogb()); + assert_eq!(42, "0x1p+42".parse::<Single>().unwrap().ilogb()); + assert_eq!(-42, "0x1p-42".parse::<Single>().unwrap().ilogb()); + + assert_eq!(IEK_INF, Single::INFINITY.ilogb()); + assert_eq!(IEK_INF, (-Single::INFINITY).ilogb()); + assert_eq!(IEK_ZERO, Single::ZERO.ilogb()); + assert_eq!(IEK_ZERO, (-Single::ZERO).ilogb()); + assert_eq!(IEK_NAN, Single::NAN.ilogb()); + assert_eq!(IEK_NAN, Single::snan(None).ilogb()); + + assert_eq!(127, Single::largest().ilogb()); + assert_eq!(127, (-Single::largest()).ilogb()); + + assert_eq!(-149, Single::SMALLEST.ilogb()); + assert_eq!(-149, (-Single::SMALLEST).ilogb()); + assert_eq!(-126, Single::smallest_normalized().ilogb()); + assert_eq!(-126, (-Single::smallest_normalized()).ilogb()); +} + +#[test] +fn scalbn() { + assert!("0x1p+0".parse::<Single>().unwrap().bitwise_eq( + "0x1p+0".parse::<Single>().unwrap().scalbn(0), + )); + assert!("0x1p+42".parse::<Single>().unwrap().bitwise_eq( + "0x1p+0".parse::<Single>().unwrap().scalbn(42), + )); + assert!("0x1p-42".parse::<Single>().unwrap().bitwise_eq( + "0x1p+0".parse::<Single>().unwrap().scalbn(-42), + )); + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let p_qnan = Single::NAN; + let m_qnan = -Single::NAN; + let snan = Single::snan(None); + + assert!(p_inf.bitwise_eq(p_inf.scalbn(0))); + assert!(m_inf.bitwise_eq(m_inf.scalbn(0))); + assert!(p_zero.bitwise_eq(p_zero.scalbn(0))); + assert!(m_zero.bitwise_eq(m_zero.scalbn(0))); + assert!(p_qnan.bitwise_eq(p_qnan.scalbn(0))); + assert!(m_qnan.bitwise_eq(m_qnan.scalbn(0))); + assert!(!snan.scalbn(0).is_signaling()); + + let scalbn_snan = snan.scalbn(1); + assert!(scalbn_snan.is_nan() && !scalbn_snan.is_signaling()); + + // Make sure highest bit of payload is preserved. + let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1; + + let snan_with_payload = Double::snan(Some(payload)); + let quiet_payload = snan_with_payload.scalbn(1); + assert!(quiet_payload.is_nan() && !quiet_payload.is_signaling()); + assert_eq!(payload, quiet_payload.to_bits() & ((1 << 51) - 1)); + + assert!(p_inf.bitwise_eq( + "0x1p+0".parse::<Single>().unwrap().scalbn(128), + )); + assert!(m_inf.bitwise_eq( + "-0x1p+0".parse::<Single>().unwrap().scalbn(128), + )); + assert!(p_inf.bitwise_eq( + "0x1p+127".parse::<Single>().unwrap().scalbn(1), + )); + assert!(p_zero.bitwise_eq( + "0x1p-127".parse::<Single>().unwrap().scalbn(-127), + )); + assert!(m_zero.bitwise_eq( + "-0x1p-127".parse::<Single>().unwrap().scalbn(-127), + )); + assert!("-0x1p-149".parse::<Single>().unwrap().bitwise_eq( + "-0x1p-127".parse::<Single>().unwrap().scalbn(-22), + )); + assert!(p_zero.bitwise_eq( + "0x1p-126".parse::<Single>().unwrap().scalbn(-24), + )); + + + let smallest_f64 = Double::SMALLEST; + let neg_smallest_f64 = -Double::SMALLEST; + + let largest_f64 = Double::largest(); + let neg_largest_f64 = -Double::largest(); + + let largest_denormal_f64 = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap(); + let neg_largest_denormal_f64 = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap(); + + + assert!(smallest_f64.bitwise_eq( + "0x1p-1074".parse::<Double>().unwrap().scalbn(0), + )); + assert!(neg_smallest_f64.bitwise_eq( + "-0x1p-1074".parse::<Double>().unwrap().scalbn(0), + )); + + assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2097, + ), + )); + + assert!(smallest_f64.scalbn(-2097).is_pos_zero()); + assert!(smallest_f64.scalbn(-2098).is_pos_zero()); + assert!(smallest_f64.scalbn(-2099).is_pos_zero()); + assert!("0x1p+1022".parse::<Double>().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2096, + ), + )); + assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2097, + ), + )); + assert!(smallest_f64.scalbn(2098).is_infinite()); + assert!(smallest_f64.scalbn(2099).is_infinite()); + + // Test for integer overflows when adding to exponent. + assert!(smallest_f64.scalbn(-ExpInt::max_value()).is_pos_zero()); + assert!(largest_f64.scalbn(ExpInt::max_value()).is_infinite()); + + assert!(largest_denormal_f64.bitwise_eq( + largest_denormal_f64.scalbn(0), + )); + assert!(neg_largest_denormal_f64.bitwise_eq( + neg_largest_denormal_f64.scalbn(0), + )); + + assert!( + "0x1.ffffffffffffep-1022" + .parse::<Double>() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1)) + ); + assert!( + "-0x1.ffffffffffffep-1021" + .parse::<Double>() + .unwrap() + .bitwise_eq(neg_largest_denormal_f64.scalbn(2)) + ); + + assert!( + "0x1.ffffffffffffep+1" + .parse::<Double>() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1024)) + ); + assert!(largest_denormal_f64.scalbn(-1023).is_pos_zero()); + assert!(largest_denormal_f64.scalbn(-1024).is_pos_zero()); + assert!(largest_denormal_f64.scalbn(-2048).is_pos_zero()); + assert!(largest_denormal_f64.scalbn(2047).is_infinite()); + assert!(largest_denormal_f64.scalbn(2098).is_infinite()); + assert!(largest_denormal_f64.scalbn(2099).is_infinite()); + + assert!( + "0x1.ffffffffffffep-2" + .parse::<Double>() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1021)) + ); + assert!( + "0x1.ffffffffffffep-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1022)) + ); + assert!( + "0x1.ffffffffffffep+0" + .parse::<Double>() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1023)) + ); + assert!( + "0x1.ffffffffffffep+1023" + .parse::<Double>() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(2046)) + ); + assert!("0x1p+974".parse::<Double>().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2048, + ), + )); + + let random_denormal_f64 = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap(); + assert!( + "0x1.c60f120d9f87cp-972" + .parse::<Double>() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-1023)) + ); + assert!( + "0x1.c60f120d9f87cp-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-52)) + ); + assert!( + "0x1.c60f120d9f87cp-2" + .parse::<Double>() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-53)) + ); + assert!( + "0x1.c60f120d9f87cp+0" + .parse::<Double>() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-51)) + ); + + assert!(random_denormal_f64.scalbn(-2097).is_pos_zero()); + assert!(random_denormal_f64.scalbn(-2090).is_pos_zero()); + + + assert!("-0x1p-1073".parse::<Double>().unwrap().bitwise_eq( + neg_largest_f64.scalbn(-2097), + )); + + assert!("-0x1p-1024".parse::<Double>().unwrap().bitwise_eq( + neg_largest_f64.scalbn(-2048), + )); + + assert!("0x1p-1073".parse::<Double>().unwrap().bitwise_eq( + largest_f64.scalbn( + -2097, + ), + )); + + assert!("0x1p-1074".parse::<Double>().unwrap().bitwise_eq( + largest_f64.scalbn( + -2098, + ), + )); + assert!("-0x1p-1074".parse::<Double>().unwrap().bitwise_eq( + neg_largest_f64.scalbn(-2098), + )); + assert!(neg_largest_f64.scalbn(-2099).is_neg_zero()); + assert!(largest_f64.scalbn(1).is_infinite()); + + + assert!("0x1p+0".parse::<Double>().unwrap().bitwise_eq( + "0x1p+52".parse::<Double>().unwrap().scalbn(-52), + )); + + assert!("0x1p-103".parse::<Double>().unwrap().bitwise_eq( + "0x1p-51".parse::<Double>().unwrap().scalbn(-52), + )); +} + +#[test] +fn frexp() { + let p_zero = Double::ZERO; + let m_zero = -Double::ZERO; + let one = Double::from_f64(1.0); + let m_one = Double::from_f64(-1.0); + + let largest_denormal = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap(); + let neg_largest_denormal = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap(); + + let smallest = Double::SMALLEST; + let neg_smallest = -Double::SMALLEST; + + let largest = Double::largest(); + let neg_largest = -Double::largest(); + + let p_inf = Double::INFINITY; + let m_inf = -Double::INFINITY; + + let p_qnan = Double::NAN; + let m_qnan = -Double::NAN; + let snan = Double::snan(None); + + // Make sure highest bit of payload is preserved. + let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1; + + let snan_with_payload = Double::snan(Some(payload)); + + let mut exp = 0; + + let frac = p_zero.frexp(&mut exp); + assert_eq!(0, exp); + assert!(frac.is_pos_zero()); + + let frac = m_zero.frexp(&mut exp); + assert_eq!(0, exp); + assert!(frac.is_neg_zero()); + + + let frac = one.frexp(&mut exp); + assert_eq!(1, exp); + assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac)); + + let frac = m_one.frexp(&mut exp); + assert_eq!(1, exp); + assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac)); + + let frac = largest_denormal.frexp(&mut exp); + assert_eq!(-1022, exp); + assert!( + "0x1.ffffffffffffep-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(frac) + ); + + let frac = neg_largest_denormal.frexp(&mut exp); + assert_eq!(-1022, exp); + assert!( + "-0x1.ffffffffffffep-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(frac) + ); + + + let frac = smallest.frexp(&mut exp); + assert_eq!(-1073, exp); + assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac)); + + let frac = neg_smallest.frexp(&mut exp); + assert_eq!(-1073, exp); + assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac)); + + + let frac = largest.frexp(&mut exp); + assert_eq!(1024, exp); + assert!( + "0x1.fffffffffffffp-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(frac) + ); + + let frac = neg_largest.frexp(&mut exp); + assert_eq!(1024, exp); + assert!( + "-0x1.fffffffffffffp-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(frac) + ); + + + let frac = p_inf.frexp(&mut exp); + assert_eq!(IEK_INF, exp); + assert!(frac.is_infinite() && !frac.is_negative()); + + let frac = m_inf.frexp(&mut exp); + assert_eq!(IEK_INF, exp); + assert!(frac.is_infinite() && frac.is_negative()); + + let frac = p_qnan.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan()); + + let frac = m_qnan.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan()); + + let frac = snan.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan() && !frac.is_signaling()); + + let frac = snan_with_payload.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan() && !frac.is_signaling()); + assert_eq!(payload, frac.to_bits() & ((1 << 51) - 1)); + + let frac = "0x0.ffffp-1".parse::<Double>().unwrap().frexp(&mut exp); + assert_eq!(-1, exp); + assert!("0x1.fffep-1".parse::<Double>().unwrap().bitwise_eq(frac)); + + let frac = "0x1p-51".parse::<Double>().unwrap().frexp(&mut exp); + assert_eq!(-50, exp); + assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac)); + + let frac = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap().frexp( + &mut exp, + ); + assert_eq!(52, exp); + assert!( + "0x1.c60f120d9f87cp-1" + .parse::<Double>() + .unwrap() + .bitwise_eq(frac) + ); +} + +#[test] +fn modulo() { + let mut status; + { + let f1 = "1.5".parse::<Double>().unwrap(); + let f2 = "1.0".parse::<Double>().unwrap(); + let expected = "0.5".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0.5".parse::<Double>().unwrap(); + let f2 = "1.0".parse::<Double>().unwrap(); + let expected = "0.5".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0x1.3333333333333p-2".parse::<Double>().unwrap(); // 0.3 + let f2 = "0x1.47ae147ae147bp-7".parse::<Double>().unwrap(); // 0.01 + // 0.009999999999999983 + let expected = "0x1.47ae147ae1471p-7".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0x1p64".parse::<Double>().unwrap(); // 1.8446744073709552e19 + let f2 = "1.5".parse::<Double>().unwrap(); + let expected = "1.0".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0x1p1000".parse::<Double>().unwrap(); + let f2 = "0x1p-1000".parse::<Double>().unwrap(); + let expected = "0.0".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0.0".parse::<Double>().unwrap(); + let f2 = "1.0".parse::<Double>().unwrap(); + let expected = "0.0".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "1.0".parse::<Double>().unwrap(); + let f2 = "0.0".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).is_nan()); + assert_eq!(status, Status::INVALID_OP); + } + { + let f1 = "0.0".parse::<Double>().unwrap(); + let f2 = "0.0".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).is_nan()); + assert_eq!(status, Status::INVALID_OP); + } + { + let f1 = Double::INFINITY; + let f2 = "1.0".parse::<Double>().unwrap(); + assert!(unpack!(status=, f1 % f2).is_nan()); + assert_eq!(status, Status::INVALID_OP); + } +} diff --git a/src/librustc_apfloat/tests/ppc.rs b/src/librustc_apfloat/tests/ppc.rs new file mode 100644 index 00000000000..145c3ddc869 --- /dev/null +++ b/src/librustc_apfloat/tests/ppc.rs @@ -0,0 +1,655 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate rustc_apfloat; + +use rustc_apfloat::{Category, Float, Round}; +use rustc_apfloat::ppc::DoubleDouble; + +use std::cmp::Ordering; + +#[test] +fn ppc_double_double() { + let test = DoubleDouble::ZERO; + let expected = "0x0p+0".parse::<DoubleDouble>().unwrap(); + assert!(test.is_zero()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + assert_eq!(0, test.to_bits()); + + let test = -DoubleDouble::ZERO; + let expected = "-0x0p+0".parse::<DoubleDouble>().unwrap(); + assert!(test.is_zero()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + assert_eq!(0x8000000000000000, test.to_bits()); + + let test = "1.0".parse::<DoubleDouble>().unwrap(); + assert_eq!(0x3ff0000000000000, test.to_bits()); + + // LDBL_MAX + let test = "1.79769313486231580793728971405301e+308" + .parse::<DoubleDouble>() + .unwrap(); + assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, test.to_bits()); + + // LDBL_MIN + let test = "2.00416836000897277799610805135016e-292" + .parse::<DoubleDouble>() + .unwrap(); + assert_eq!(0x0000000000000000_0360000000000000, test.to_bits()); +} + +#[test] +fn ppc_double_double_add_special() { + let data = [ + // (1 + 0) + (-1 + 0) = Category::Zero + ( + 0x3ff0000000000000, + 0xbff0000000000000, + Category::Zero, + Round::NearestTiesToEven, + ), + // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x7948000000000000, + Category::Infinity, + Round::NearestTiesToEven, + ), + // FIXME: change the 4th 0x75effffffffffffe to 0x75efffffffffffff when + // DoubleDouble's fallback is gone. + // LDBL_MAX + (1.011111... >> (1023 - 106) + (1.1111111...0 >> (1023 - + // 160))) = Category::Normal + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x75effffffffffffe_7947ffffffffffff, + Category::Normal, + Round::NearestTiesToEven, + ), + // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x7c8ffffffffffffe_7fefffffffffffff, + Category::Infinity, + Round::NearestTiesToEven, + ), + // NaN + (1 + 0) = Category::NaN + ( + 0x7ff8000000000000, + 0x3ff0000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.add_r(a2, round).value; + + assert_eq!(expected, a1.category(), "{:#x} + {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.add_r(a1, round).value; + + assert_eq!(expected, a2.category(), "{:#x} + {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_add() { + let data = [ + // (1 + 0) + (1e-105 + 0) = (1 + 1e-105) + ( + 0x3ff0000000000000, + 0x3960000000000000, + 0x3960000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 0) + (1e-106 + 0) = (1 + 1e-106) + ( + 0x3ff0000000000000, + 0x3950000000000000, + 0x3950000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 1e-106) + (1e-106 + 0) = (1 + 1e-105) + ( + 0x3950000000000000_3ff0000000000000, + 0x3950000000000000, + 0x3960000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 0) + (epsilon + 0) = (1 + epsilon) + ( + 0x3ff0000000000000, + 0x0000000000000001, + 0x0000000000000001_3ff0000000000000, + Round::NearestTiesToEven, + ), + // FIXME: change 0xf950000000000000 to 0xf940000000000000, when + // DoubleDouble's fallback is gone. + // (DBL_MAX - 1 << (1023 - 105)) + (1 << (1023 - 53) + 0) = DBL_MAX + + // 1.11111... << (1023 - 52) + ( + 0xf950000000000000_7fefffffffffffff, + 0x7c90000000000000, + 0x7c8ffffffffffffe_7fefffffffffffff, + Round::NearestTiesToEven, + ), + // FIXME: change 0xf950000000000000 to 0xf940000000000000, when + // DoubleDouble's fallback is gone. + // (1 << (1023 - 53) + 0) + (DBL_MAX - 1 << (1023 - 105)) = DBL_MAX + + // 1.11111... << (1023 - 52) + ( + 0x7c90000000000000, + 0xf950000000000000_7fefffffffffffff, + 0x7c8ffffffffffffe_7fefffffffffffff, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.add_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} + {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.add_r(a1, round).value; + + assert_eq!(expected, a2.to_bits(), "{:#x} + {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_subtract() { + let data = [ + // (1 + 0) - (-1e-105 + 0) = (1 + 1e-105) + ( + 0x3ff0000000000000, + 0xb960000000000000, + 0x3960000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 0) - (-1e-106 + 0) = (1 + 1e-106) + ( + 0x3ff0000000000000, + 0xb950000000000000, + 0x3950000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.sub_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} - {:#x}", op1, op2); + } +} + +#[test] +fn ppc_double_double_multiply_special() { + let data = [ + // Category::NaN * Category::NaN = Category::NaN + ( + 0x7ff8000000000000, + 0x7ff8000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::NaN * Category::Zero = Category::NaN + ( + 0x7ff8000000000000, + 0, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::NaN * Category::Infinity = Category::NaN + ( + 0x7ff8000000000000, + 0x7ff0000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::NaN * Category::Normal = Category::NaN + ( + 0x7ff8000000000000, + 0x3ff0000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::Infinity * Category::Infinity = Category::Infinity + ( + 0x7ff0000000000000, + 0x7ff0000000000000, + Category::Infinity, + Round::NearestTiesToEven, + ), + // Category::Infinity * Category::Zero = Category::NaN + ( + 0x7ff0000000000000, + 0, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::Infinity * Category::Normal = Category::Infinity + ( + 0x7ff0000000000000, + 0x3ff0000000000000, + Category::Infinity, + Round::NearestTiesToEven, + ), + // Category::Zero * Category::Zero = Category::Zero + (0, 0, Category::Zero, Round::NearestTiesToEven), + // Category::Zero * Category::Normal = Category::Zero + ( + 0, + 0x3ff0000000000000, + Category::Zero, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.mul_r(a2, round).value; + + assert_eq!(expected, a1.category(), "{:#x} * {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.mul_r(a1, round).value; + + assert_eq!(expected, a2.category(), "{:#x} * {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_multiply() { + let data = [ + // 1/3 * 3 = 1.0 + ( + 0x3c75555555555556_3fd5555555555555, + 0x4008000000000000, + 0x3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + epsilon) * (1 + 0) = Category::Zero + ( + 0x0000000000000001_3ff0000000000000, + 0x3ff0000000000000, + 0x0000000000000001_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + epsilon) * (1 + epsilon) = 1 + 2 * epsilon + ( + 0x0000000000000001_3ff0000000000000, + 0x0000000000000001_3ff0000000000000, + 0x0000000000000002_3ff0000000000000, + Round::NearestTiesToEven, + ), + // -(1 + epsilon) * (1 + epsilon) = -1 + ( + 0x0000000000000001_bff0000000000000, + 0x0000000000000001_3ff0000000000000, + 0xbff0000000000000, + Round::NearestTiesToEven, + ), + // (0.5 + 0) * (1 + 2 * epsilon) = 0.5 + epsilon + ( + 0x3fe0000000000000, + 0x0000000000000002_3ff0000000000000, + 0x0000000000000001_3fe0000000000000, + Round::NearestTiesToEven, + ), + // (0.5 + 0) * (1 + epsilon) = 0.5 + ( + 0x3fe0000000000000, + 0x0000000000000001_3ff0000000000000, + 0x3fe0000000000000, + Round::NearestTiesToEven, + ), + // __LDBL_MAX__ * (1 + 1 << 106) = inf + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x3950000000000000_3ff0000000000000, + 0x7ff0000000000000, + Round::NearestTiesToEven, + ), + // __LDBL_MAX__ * (1 + 1 << 107) > __LDBL_MAX__, but not inf, yes =_=||| + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x3940000000000000_3ff0000000000000, + 0x7c8fffffffffffff_7fefffffffffffff, + Round::NearestTiesToEven, + ), + // __LDBL_MAX__ * (1 + 1 << 108) = __LDBL_MAX__ + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x3930000000000000_3ff0000000000000, + 0x7c8ffffffffffffe_7fefffffffffffff, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.mul_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} * {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.mul_r(a1, round).value; + + assert_eq!(expected, a2.to_bits(), "{:#x} * {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_divide() { + // FIXME: Only a sanity check for now. Add more edge cases when the + // double-double algorithm is implemented. + let data = [ + // 1 / 3 = 1/3 + ( + 0x3ff0000000000000, + 0x4008000000000000, + 0x3c75555555555556_3fd5555555555555, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.div_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} / {:#x}", op1, op2); + } +} + +#[test] +fn ppc_double_double_remainder() { + let data = [ + // ieee_rem(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53) + ( + 0x3cb8000000000000_4008000000000000, + 0x3ca4000000000000_3ff4000000000000, + 0x3c90000000000000_3fe0000000000000, + ), + // ieee_rem(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (-0.5 - 0.5 << 53) + ( + 0x3cb8000000000000_4008000000000000, + 0x3cac000000000000_3ffc000000000000, + 0xbc90000000000000_bfe0000000000000, + ), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + let result = a1.ieee_rem(a2).value; + + assert_eq!( + expected, + result.to_bits(), + "ieee_rem({:#x}, {:#x})", + op1, + op2 + ); + } +} + +#[test] +fn ppc_double_double_mod() { + let data = [ + // mod(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53) + ( + 0x3cb8000000000000_4008000000000000, + 0x3ca4000000000000_3ff4000000000000, + 0x3c90000000000000_3fe0000000000000, + ), + // mod(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (1.25 + 1.25 << 53) + // 0xbc98000000000000 doesn't seem right, but it's what we currently have. + // FIXME: investigate + ( + 0x3cb8000000000000_4008000000000000, + 0x3cac000000000000_3ffc000000000000, + 0xbc98000000000000_3ff4000000000001, + ), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + let r = (a1 % a2).value; + + assert_eq!(expected, r.to_bits(), "fmod({:#x}, {:#x})", op1, op2); + } +} + +#[test] +fn ppc_double_double_fma() { + // Sanity check for now. + let mut a = "2".parse::<DoubleDouble>().unwrap(); + a = a.mul_add( + "3".parse::<DoubleDouble>().unwrap(), + "4".parse::<DoubleDouble>().unwrap(), + ).value; + assert_eq!( + Some(Ordering::Equal), + "10".parse::<DoubleDouble>().unwrap().partial_cmp(&a) + ); +} + +#[test] +fn ppc_double_double_round_to_integral() { + { + let a = "1.5".parse::<DoubleDouble>().unwrap(); + let a = a.round_to_integral(Round::NearestTiesToEven).value; + assert_eq!( + Some(Ordering::Equal), + "2".parse::<DoubleDouble>().unwrap().partial_cmp(&a) + ); + } + { + let a = "2.5".parse::<DoubleDouble>().unwrap(); + let a = a.round_to_integral(Round::NearestTiesToEven).value; + assert_eq!( + Some(Ordering::Equal), + "2".parse::<DoubleDouble>().unwrap().partial_cmp(&a) + ); + } +} + +#[test] +fn ppc_double_double_compare() { + let data = [ + // (1 + 0) = (1 + 0) + ( + 0x3ff0000000000000, + 0x3ff0000000000000, + Some(Ordering::Equal), + ), + // (1 + 0) < (1.00...1 + 0) + (0x3ff0000000000000, 0x3ff0000000000001, Some(Ordering::Less)), + // (1.00...1 + 0) > (1 + 0) + ( + 0x3ff0000000000001, + 0x3ff0000000000000, + Some(Ordering::Greater), + ), + // (1 + 0) < (1 + epsilon) + ( + 0x3ff0000000000000, + 0x0000000000000001_3ff0000000000001, + Some(Ordering::Less), + ), + // NaN != NaN + (0x7ff8000000000000, 0x7ff8000000000000, None), + // (1 + 0) != NaN + (0x3ff0000000000000, 0x7ff8000000000000, None), + // Inf = Inf + ( + 0x7ff0000000000000, + 0x7ff0000000000000, + Some(Ordering::Equal), + ), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + assert_eq!( + expected, + a1.partial_cmp(&a2), + "compare({:#x}, {:#x})", + op1, + op2, + ); + } +} + +#[test] +fn ppc_double_double_bitwise_eq() { + let data = [ + // (1 + 0) = (1 + 0) + (0x3ff0000000000000, 0x3ff0000000000000, true), + // (1 + 0) != (1.00...1 + 0) + (0x3ff0000000000000, 0x3ff0000000000001, false), + // NaN = NaN + (0x7ff8000000000000, 0x7ff8000000000000, true), + // NaN != NaN with a different bit pattern + ( + 0x7ff8000000000000, + 0x3ff0000000000000_7ff8000000000000, + false, + ), + // Inf = Inf + (0x7ff0000000000000, 0x7ff0000000000000, true), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + assert_eq!(expected, a1.bitwise_eq(a2), "{:#x} = {:#x}", op1, op2); + } +} + +#[test] +fn ppc_double_double_change_sign() { + let float = DoubleDouble::from_bits(0xbcb0000000000000_400f000000000000); + { + let actual = float.copy_sign("1".parse::<DoubleDouble>().unwrap()); + assert_eq!(0xbcb0000000000000_400f000000000000, actual.to_bits()); + } + { + let actual = float.copy_sign("-1".parse::<DoubleDouble>().unwrap()); + assert_eq!(0x3cb0000000000000_c00f000000000000, actual.to_bits()); + } +} + +#[test] +fn ppc_double_double_factories() { + assert_eq!(0, DoubleDouble::ZERO.to_bits()); + assert_eq!( + 0x7c8ffffffffffffe_7fefffffffffffff, + DoubleDouble::largest().to_bits() + ); + assert_eq!(0x0000000000000001, DoubleDouble::SMALLEST.to_bits()); + assert_eq!( + 0x0360000000000000, + DoubleDouble::smallest_normalized().to_bits() + ); + assert_eq!( + 0x0000000000000000_8000000000000000, + (-DoubleDouble::ZERO).to_bits() + ); + assert_eq!( + 0xfc8ffffffffffffe_ffefffffffffffff, + (-DoubleDouble::largest()).to_bits() + ); + assert_eq!( + 0x0000000000000000_8000000000000001, + (-DoubleDouble::SMALLEST).to_bits() + ); + assert_eq!( + 0x0000000000000000_8360000000000000, + (-DoubleDouble::smallest_normalized()).to_bits() + ); + assert!(DoubleDouble::SMALLEST.is_smallest()); + assert!(DoubleDouble::largest().is_largest()); +} + +#[test] +fn ppc_double_double_is_denormal() { + assert!(DoubleDouble::SMALLEST.is_denormal()); + assert!(!DoubleDouble::largest().is_denormal()); + assert!(!DoubleDouble::smallest_normalized().is_denormal()); + { + // (4 + 3) is not normalized + let data = 0x4008000000000000_4010000000000000; + assert!(DoubleDouble::from_bits(data).is_denormal()); + } +} + +#[test] +fn ppc_double_double_exact_inverse() { + assert!( + "2.0" + .parse::<DoubleDouble>() + .unwrap() + .get_exact_inverse() + .unwrap() + .bitwise_eq("0.5".parse::<DoubleDouble>().unwrap()) + ); +} + +#[test] +fn ppc_double_double_scalbn() { + // 3.0 + 3.0 << 53 + let input = 0x3cb8000000000000_4008000000000000; + let result = DoubleDouble::from_bits(input).scalbn(1); + // 6.0 + 6.0 << 53 + assert_eq!(0x3cc8000000000000_4018000000000000, result.to_bits()); +} + +#[test] +fn ppc_double_double_frexp() { + // 3.0 + 3.0 << 53 + let input = 0x3cb8000000000000_4008000000000000; + let mut exp = 0; + // 0.75 + 0.75 << 53 + let result = DoubleDouble::from_bits(input).frexp(&mut exp); + assert_eq!(2, exp); + assert_eq!(0x3c98000000000000_3fe8000000000000, result.to_bits()); +} diff --git a/src/librustc_back/target/l4re_base.rs b/src/librustc_back/target/l4re_base.rs new file mode 100644 index 00000000000..998183d4015 --- /dev/null +++ b/src/librustc_back/target/l4re_base.rs @@ -0,0 +1,32 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use PanicStrategy; +use LinkerFlavor; +use target::{LinkArgs, TargetOptions}; +use std::default::Default; + +pub fn opts() -> TargetOptions { + let mut pre_link_args = LinkArgs::new(); + pre_link_args.insert(LinkerFlavor::Ld, vec![ + "-nostdlib".to_string(), + ]); + + TargetOptions { + executables: true, + has_elf_tls: false, + exe_allocation_crate: Some("alloc_system".to_string()), + panic_strategy: PanicStrategy::Abort, + linker: "ld".to_string(), + pre_link_args: pre_link_args, + target_family: Some("unix".to_string()), + .. Default::default() + } +} diff --git a/src/librustc_back/target/mod.rs b/src/librustc_back/target/mod.rs index 0dbfdb4d809..08b94d5a01c 100644 --- a/src/librustc_back/target/mod.rs +++ b/src/librustc_back/target/mod.rs @@ -69,6 +69,7 @@ mod solaris_base; mod windows_base; mod windows_msvc_base; mod thumb_base; +mod l4re_base; mod fuchsia_base; mod redox_base; @@ -193,6 +194,8 @@ supported_targets! { ("aarch64-unknown-fuchsia", aarch64_unknown_fuchsia), ("x86_64-unknown-fuchsia", x86_64_unknown_fuchsia), + ("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc), + ("x86_64-unknown-redox", x86_64_unknown_redox), ("i386-apple-ios", i386_apple_ios), diff --git a/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs b/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs new file mode 100644 index 00000000000..b447f8a989d --- /dev/null +++ b/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs @@ -0,0 +1,31 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use LinkerFlavor; +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::l4re_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); + + Ok(Target { + llvm_target: "x86_64-unknown-l4re-uclibc".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), + arch: "x86_64".to_string(), + target_os: "l4re".to_string(), + target_env: "uclibc".to_string(), + target_vendor: "unknown".to_string(), + linker_flavor: LinkerFlavor::Ld, + options: base, + }) +} diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 0016c406b37..cf3b7508c02 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -111,19 +111,28 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) { // is not yet stolen. tcx.mir_validated(owner_def_id).borrow(); - let cfg = cfg::CFG::new(bccx.tcx, &body); - let AnalysisData { all_loans, - loans: loan_dfcx, - move_data: flowed_moves } = - build_borrowck_dataflow_data(bccx, &cfg, body_id); - - check_loans::check_loans(bccx, &loan_dfcx, &flowed_moves, &all_loans, body); + // option dance because you can't capture an uninitialized variable + // by mut-ref. + let mut cfg = None; + if let Some(AnalysisData { all_loans, + loans: loan_dfcx, + move_data: flowed_moves }) = + build_borrowck_dataflow_data(bccx, false, body_id, + |bccx| { + cfg = Some(cfg::CFG::new(bccx.tcx, &body)); + cfg.as_mut().unwrap() + }) + { + check_loans::check_loans(bccx, &loan_dfcx, &flowed_moves, &all_loans, body); + } } -fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, - cfg: &cfg::CFG, - body_id: hir::BodyId) - -> AnalysisData<'a, 'tcx> +fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tcx>, + force_analysis: bool, + body_id: hir::BodyId, + get_cfg: F) + -> Option<AnalysisData<'a, 'tcx>> + where F: FnOnce(&mut BorrowckCtxt<'a, 'tcx>) -> &'c cfg::CFG { // Check the body of fn items. let tcx = this.tcx; @@ -135,6 +144,18 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, let (all_loans, move_data) = gather_loans::gather_loans_in_fn(this, body_id); + if !force_analysis && move_data.is_empty() && all_loans.is_empty() { + // large arrays of data inserted as constants can take a lot of + // time and memory to borrow-check - see issue #36799. However, + // they don't have lvalues, so no borrow-check is actually needed. + // Recognize that case and skip borrow-checking. + debug!("skipping loan propagation for {:?} because of no loans", body_id); + return None; + } else { + debug!("propagating loans in {:?}", body_id); + } + + let cfg = get_cfg(this); let mut loan_dfcx = DataFlowContext::new(this.tcx, "borrowck", @@ -157,9 +178,9 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, id_range, this.body); - AnalysisData { all_loans: all_loans, - loans: loan_dfcx, - move_data:flowed_moves } + Some(AnalysisData { all_loans: all_loans, + loans: loan_dfcx, + move_data:flowed_moves }) } /// Accessor for introspective clients inspecting `AnalysisData` and @@ -177,8 +198,8 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( let body = tcx.hir.body(body_id); let mut bccx = BorrowckCtxt { tcx, tables, region_maps, owner_def_id, body }; - let dataflow_data = build_borrowck_dataflow_data(&mut bccx, cfg, body_id); - (bccx, dataflow_data) + let dataflow_data = build_borrowck_dataflow_data(&mut bccx, true, body_id, |_| cfg); + (bccx, dataflow_data.unwrap()) } // ---------------------------------------------------------------------- @@ -1072,14 +1093,15 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } - fn local_binding_mode(&self, node_id: ast::NodeId) -> hir::BindingMode { + fn local_binding_mode(&self, node_id: ast::NodeId) -> ty::BindingMode { let pat = match self.tcx.hir.get(node_id) { hir_map::Node::NodeLocal(pat) => pat, node => bug!("bad node for local: {:?}", node) }; match pat.node { - hir::PatKind::Binding(mode, ..) => mode, + hir::PatKind::Binding(..) => + *self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"), _ => bug!("local is not a binding: {:?}", pat) } } @@ -1114,7 +1136,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { Some(ImmutabilityBlame::ClosureEnv(_)) => {} Some(ImmutabilityBlame::ImmLocal(node_id)) => { let let_span = self.tcx.hir.span(node_id); - if let hir::BindingMode::BindByValue(..) = self.local_binding_mode(node_id) { + if let ty::BindByValue(..) = self.local_binding_mode(node_id) { if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(let_span) { let (_, is_implicit_self) = self.local_ty(node_id); if is_implicit_self && snippet != "self" { @@ -1131,7 +1153,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { Some(ImmutabilityBlame::LocalDeref(node_id)) => { let let_span = self.tcx.hir.span(node_id); match self.local_binding_mode(node_id) { - hir::BindingMode::BindByRef(..) => { + ty::BindByReference(..) => { let snippet = self.tcx.sess.codemap().span_to_snippet(let_span); if let Ok(snippet) = snippet { db.span_label( @@ -1141,7 +1163,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { ); } } - hir::BindingMode::BindByValue(..) => { + ty::BindByValue(..) => { if let (Some(local_ty), is_implicit_self) = self.local_ty(node_id) { if let Some(msg) = self.suggest_mut_for_immutable(local_ty, is_implicit_self) { diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index 0a31905c792..fd80e8320d6 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -220,6 +220,15 @@ impl<'a, 'tcx> MoveData<'tcx> { } } + /// return true if there are no trackable assignments or moves + /// in this move data - that means that there is nothing that + /// could cause a borrow error. + pub fn is_empty(&self) -> bool { + self.moves.borrow().is_empty() && + self.path_assignments.borrow().is_empty() && + self.var_assignments.borrow().is_empty() + } + pub fn path_loan_path(&self, index: MovePathIndex) -> Rc<LoanPath<'tcx>> { (*self.paths.borrow())[index.get()].loan_path.clone() } diff --git a/src/librustc_borrowck/diagnostics.rs b/src/librustc_borrowck/diagnostics.rs index 5d8998b332d..517b4e7f99b 100644 --- a/src/librustc_borrowck/diagnostics.rs +++ b/src/librustc_borrowck/diagnostics.rs @@ -1132,6 +1132,24 @@ fn main() { ``` "##, +E0595: r##" +Closures cannot mutate immutable captured variables. + +Erroneous code example: + +```compile_fail,E0595 +let x = 3; // error: closure cannot assign to immutable local variable `x` +let mut c = || { x += 1 }; +``` + +Make the variable binding mutable: + +``` +let mut x = 3; // ok! +let mut c = || { x += 1 }; +``` +"##, + E0596: r##" This error occurs because you tried to mutably borrow a non-mutable variable. @@ -1275,6 +1293,5 @@ register_diagnostics! { // E0385, // {} in an aliasable location E0524, // two closures require unique access to `..` at the same time E0594, // cannot assign to {} - E0595, // closure cannot assign to {} E0598, // lifetime of {} is too short to guarantee its contents can be... } diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs index 95c8613232e..060ff503d4e 100644 --- a/src/librustc_const_eval/check_match.rs +++ b/src/librustc_const_eval/check_match.rs @@ -268,7 +268,12 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { fn check_for_bindings_named_the_same_as_variants(cx: &MatchVisitor, pat: &Pat) { pat.walk(|p| { - if let PatKind::Binding(hir::BindByValue(hir::MutImmutable), _, name, None) = p.node { + if let PatKind::Binding(_, _, name, None) = p.node { + let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode"); + if bm != ty::BindByValue(hir::MutImmutable) { + // Nothing to check. + return true; + } let pat_ty = cx.tables.pat_ty(p); if let ty::TyAdt(edef, _) = pat_ty.sty { if edef.is_enum() && edef.variants.iter().any(|variant| { @@ -452,8 +457,9 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor, pats: &[P<Pat>]) { let mut by_ref_span = None; for pat in pats { - pat.each_binding(|bm, _, span, _path| { - if let hir::BindByRef(..) = bm { + pat.each_binding(|_, id, span, _path| { + let bm = *cx.tables.pat_binding_modes.get(&id).expect("missing binding mode"); + if let ty::BindByReference(..) = bm { by_ref_span = Some(span); } }) @@ -484,10 +490,16 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor, for pat in pats { pat.walk(|p| { - if let PatKind::Binding(hir::BindByValue(..), _, _, ref sub) = p.node { - let pat_ty = cx.tables.node_id_to_type(p.id); - if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { - check_move(p, sub.as_ref().map(|p| &**p)); + if let PatKind::Binding(_, _, _, ref sub) = p.node { + let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode"); + match bm { + ty::BindByValue(..) => { + let pat_ty = cx.tables.node_id_to_type(p.id); + if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { + check_move(p, sub.as_ref().map(|p| &**p)); + } + } + _ => {} } } true diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index 463f256fe6c..eb45fd9c0e0 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -26,6 +26,7 @@ use rustc::util::nodemap::DefIdMap; use syntax::abi::Abi; use syntax::ast; +use syntax::attr; use rustc::hir::{self, Expr}; use syntax_pos::Span; @@ -560,8 +561,15 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::TyUint(ast::UintTy::Us) => { Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.uint_type)))) }, - ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(val.to_f64()))), - ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(val.to_f32()))), + ty::TyFloat(fty) => { + if let Some(i) = val.to_u128() { + Ok(Float(ConstFloat::from_u128(i, fty))) + } else { + // The value must be negative, go through signed integers. + let i = val.to_u128_unchecked() as i128; + Ok(Float(ConstFloat::from_i128(i, fty))) + } + } ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting an address to a raw ptr")), ty::TyChar => match val { U8(u) => Ok(Char(u as char)), @@ -574,30 +582,25 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstFloat, ty: Ty<'tcx>) -> CastResult<'tcx> { + let int_width = |ty| { + ty::layout::Integer::from_attr(tcx, ty).size().bits() as usize + }; match ty.sty { - ty::TyInt(_) | ty::TyUint(_) => { - let i = match val { - F32(f) if f >= 0.0 => U128(f as u128), - F64(f) if f >= 0.0 => U128(f as u128), - - F32(f) => I128(f as i128), - F64(f) => I128(f as i128) - }; - - if let (I128(_), &ty::TyUint(_)) = (i, &ty.sty) { - return Err(CannotCast); + ty::TyInt(ity) => { + if let Some(i) = val.to_i128(int_width(attr::SignedInt(ity))) { + cast_const_int(tcx, I128(i), ty) + } else { + Err(CannotCast) + } + } + ty::TyUint(uty) => { + if let Some(i) = val.to_u128(int_width(attr::UnsignedInt(uty))) { + cast_const_int(tcx, U128(i), ty) + } else { + Err(CannotCast) } - - cast_const_int(tcx, i, ty) } - ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(match val { - F32(f) => f as f64, - F64(f) => f - }))), - ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(match val { - F64(f) => f as f32, - F32(f) => f - }))), + ty::TyFloat(fty) => Ok(Float(val.convert(fty))), _ => Err(CannotCast), } } @@ -691,11 +694,7 @@ fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, fn parse_float<'tcx>(num: &str, fty: ast::FloatTy) -> Result<ConstFloat, ErrKind<'tcx>> { - let val = match fty { - ast::FloatTy::F32 => num.parse::<f32>().map(F32), - ast::FloatTy::F64 => num.parse::<f64>().map(F64) - }; - val.map_err(|_| { + ConstFloat::from_str(num, fty).map_err(|_| { // FIXME(#31407) this is only necessary because float parsing is buggy UnimplementedConstVal("could not evaluate float literal (see issue #31407)") }) diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs index ab919da8152..f37a112a596 100644 --- a/src/librustc_const_eval/pattern.rs +++ b/src/librustc_const_eval/pattern.rs @@ -374,27 +374,31 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - PatKind::Binding(bm, def_id, ref ident, ref sub) => { + PatKind::Binding(_, def_id, ref ident, ref sub) => { let id = self.tcx.hir.as_local_node_id(def_id).unwrap(); let var_ty = self.tables.node_id_to_type(pat.id); let region = match var_ty.sty { ty::TyRef(r, _) => Some(r), _ => None, }; + let bm = *self.tables.pat_binding_modes.get(&pat.id) + .expect("missing binding mode"); let (mutability, mode) = match bm { - hir::BindByValue(hir::MutMutable) => + ty::BindByValue(hir::MutMutable) => (Mutability::Mut, BindingMode::ByValue), - hir::BindByValue(hir::MutImmutable) => + ty::BindByValue(hir::MutImmutable) => (Mutability::Not, BindingMode::ByValue), - hir::BindByRef(hir::MutMutable) => - (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Mut)), - hir::BindByRef(hir::MutImmutable) => - (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)), + ty::BindByReference(hir::MutMutable) => + (Mutability::Not, BindingMode::ByRef( + region.unwrap(), BorrowKind::Mut)), + ty::BindByReference(hir::MutImmutable) => + (Mutability::Not, BindingMode::ByRef( + region.unwrap(), BorrowKind::Shared)), }; // A ref x pattern is the same node used for x, and as such it has // x's type, which is &T, where we want T (the type being matched). - if let hir::BindByRef(_) = bm { + if let ty::BindByReference(_) = bm { if let ty::TyRef(_, mt) = ty.sty { ty = mt.ty; } else { diff --git a/src/librustc_const_math/Cargo.toml b/src/librustc_const_math/Cargo.toml index e74c1ef693c..41310ede3e0 100644 --- a/src/librustc_const_math/Cargo.toml +++ b/src/librustc_const_math/Cargo.toml @@ -9,5 +9,6 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] +rustc_apfloat = { path = "../librustc_apfloat" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } diff --git a/src/librustc_const_math/float.rs b/src/librustc_const_math/float.rs index f557edffbda..719f6b6a7b3 100644 --- a/src/librustc_const_math/float.rs +++ b/src/librustc_const_math/float.rs @@ -9,102 +9,164 @@ // except according to those terms. use std::cmp::Ordering; -use std::hash; -use std::mem::transmute; +use std::num::ParseFloatError; + +use syntax::ast; + +use rustc_apfloat::{Float, FloatConvert, Status}; +use rustc_apfloat::ieee::{Single, Double}; use super::err::*; -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] -pub enum ConstFloat { - F32(f32), - F64(f64) +// Note that equality for `ConstFloat` means that the it is the same +// constant, not that the rust values are equal. In particular, `NaN +// == NaN` (at least if it's the same NaN; distinct encodings for NaN +// are considering unequal). +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ConstFloat { + pub ty: ast::FloatTy, + + // This is a bit inefficient but it makes conversions below more + // ergonomic, and all of this will go away once `miri` is merged. + pub bits: u128, } -pub use self::ConstFloat::*; impl ConstFloat { /// Description of the type, not the value pub fn description(&self) -> &'static str { - match *self { - F32(_) => "f32", - F64(_) => "f64", - } + self.ty.ty_to_string() } pub fn is_nan(&self) -> bool { - match *self { - F32(f) => f.is_nan(), - F64(f) => f.is_nan(), + match self.ty { + ast::FloatTy::F32 => Single::from_bits(self.bits).is_nan(), + ast::FloatTy::F64 => Double::from_bits(self.bits).is_nan(), } } /// Compares the values if they are of the same type pub fn try_cmp(self, rhs: Self) -> Result<Ordering, ConstMathErr> { - match (self, rhs) { - (F64(a), F64(b)) => { + match (self.ty, rhs.ty) { + (ast::FloatTy::F64, ast::FloatTy::F64) => { + let a = Double::from_bits(self.bits); + let b = Double::from_bits(rhs.bits); // This is pretty bad but it is the existing behavior. - Ok(if a == b { - Ordering::Equal - } else if a < b { - Ordering::Less - } else { - Ordering::Greater - }) + Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater)) } - (F32(a), F32(b)) => { - Ok(if a == b { - Ordering::Equal - } else if a < b { - Ordering::Less - } else { - Ordering::Greater - }) + (ast::FloatTy::F32, ast::FloatTy::F32) => { + let a = Single::from_bits(self.bits); + let b = Single::from_bits(rhs.bits); + Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater)) } _ => Err(CmpBetweenUnequalTypes), } } -} -/// Note that equality for `ConstFloat` means that the it is the same -/// constant, not that the rust values are equal. In particular, `NaN -/// == NaN` (at least if it's the same NaN; distinct encodings for NaN -/// are considering unequal). -impl PartialEq for ConstFloat { - fn eq(&self, other: &Self) -> bool { - match (*self, *other) { - (F64(a), F64(b)) => { - unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)} + pub fn from_i128(input: i128, ty: ast::FloatTy) -> Self { + let bits = match ty { + ast::FloatTy::F32 => Single::from_i128(input).value.to_bits(), + ast::FloatTy::F64 => Double::from_i128(input).value.to_bits() + }; + ConstFloat { bits, ty } + } + + pub fn from_u128(input: u128, ty: ast::FloatTy) -> Self { + let bits = match ty { + ast::FloatTy::F32 => Single::from_u128(input).value.to_bits(), + ast::FloatTy::F64 => Double::from_u128(input).value.to_bits() + }; + ConstFloat { bits, ty } + } + + pub fn from_str(num: &str, ty: ast::FloatTy) -> Result<Self, ParseFloatError> { + let bits = match ty { + ast::FloatTy::F32 => { + let rust_bits = num.parse::<f32>()?.to_bits() as u128; + let apfloat = num.parse::<Single>().unwrap_or_else(|e| { + panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e); + }); + let apfloat_bits = apfloat.to_bits(); + assert!(rust_bits == apfloat_bits, + "apfloat::ieee::Single gave different result for `{}`: \ + {}({:#x}) vs Rust's {}({:#x})", + num, apfloat, apfloat_bits, + Single::from_bits(rust_bits), rust_bits); + apfloat_bits } - (F32(a), F32(b)) => { - unsafe{transmute::<_,u32>(a) == transmute::<_,u32>(b)} + ast::FloatTy::F64 => { + let rust_bits = num.parse::<f64>()?.to_bits() as u128; + let apfloat = num.parse::<Double>().unwrap_or_else(|e| { + panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e); + }); + let apfloat_bits = apfloat.to_bits(); + assert!(rust_bits == apfloat_bits, + "apfloat::ieee::Double gave different result for `{}`: \ + {}({:#x}) vs Rust's {}({:#x})", + num, apfloat, apfloat_bits, + Double::from_bits(rust_bits), rust_bits); + apfloat_bits } - _ => false + }; + Ok(ConstFloat { bits, ty }) + } + + pub fn to_i128(self, width: usize) -> Option<i128> { + assert!(width <= 128); + let r = match self.ty { + ast::FloatTy::F32 => Single::from_bits(self.bits).to_i128(width), + ast::FloatTy::F64 => Double::from_bits(self.bits).to_i128(width) + }; + if r.status.intersects(Status::INVALID_OP) { + None + } else { + Some(r.value) } } -} -impl Eq for ConstFloat {} + pub fn to_u128(self, width: usize) -> Option<u128> { + assert!(width <= 128); + let r = match self.ty { + ast::FloatTy::F32 => Single::from_bits(self.bits).to_u128(width), + ast::FloatTy::F64 => Double::from_bits(self.bits).to_u128(width) + }; + if r.status.intersects(Status::INVALID_OP) { + None + } else { + Some(r.value) + } + } -impl hash::Hash for ConstFloat { - fn hash<H: hash::Hasher>(&self, state: &mut H) { - match *self { - F64(a) => { - unsafe { transmute::<_,u64>(a) }.hash(state) + pub fn convert(self, to: ast::FloatTy) -> Self { + let bits = match (self.ty, to) { + (ast::FloatTy::F32, ast::FloatTy::F32) | + (ast::FloatTy::F64, ast::FloatTy::F64) => return self, + + (ast::FloatTy::F32, ast::FloatTy::F64) => { + Double::to_bits(Single::from_bits(self.bits).convert(&mut false).value) } - F32(a) => { - unsafe { transmute::<_,u32>(a) }.hash(state) + (ast::FloatTy::F64, ast::FloatTy::F32) => { + Single::to_bits(Double::from_bits(self.bits).convert(&mut false).value) } - } + }; + ConstFloat { bits, ty: to } } } impl ::std::fmt::Display for ConstFloat { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - match *self { - F32(f) => write!(fmt, "{}f32", f), - F64(f) => write!(fmt, "{}f64", f), + match self.ty { + ast::FloatTy::F32 => write!(fmt, "{:#}", Single::from_bits(self.bits))?, + ast::FloatTy::F64 => write!(fmt, "{:#}", Double::from_bits(self.bits))?, } + write!(fmt, "{}", self.ty) + } +} + +impl ::std::fmt::Debug for ConstFloat { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + ::std::fmt::Display::fmt(self, fmt) } } @@ -113,11 +175,20 @@ macro_rules! derive_binop { impl ::std::ops::$op for ConstFloat { type Output = Result<Self, ConstMathErr>; fn $func(self, rhs: Self) -> Result<Self, ConstMathErr> { - match (self, rhs) { - (F32(a), F32(b)) => Ok(F32(a.$func(b))), - (F64(a), F64(b)) => Ok(F64(a.$func(b))), - _ => Err(UnequalTypes(Op::$op)), - } + let bits = match (self.ty, rhs.ty) { + (ast::FloatTy::F32, ast::FloatTy::F32) =>{ + let a = Single::from_bits(self.bits); + let b = Single::from_bits(rhs.bits); + a.$func(b).value.to_bits() + } + (ast::FloatTy::F64, ast::FloatTy::F64) => { + let a = Double::from_bits(self.bits); + let b = Double::from_bits(rhs.bits); + a.$func(b).value.to_bits() + } + _ => return Err(UnequalTypes(Op::$op)), + }; + Ok(ConstFloat { bits, ty: self.ty }) } } } @@ -132,9 +203,10 @@ derive_binop!(Rem, rem); impl ::std::ops::Neg for ConstFloat { type Output = Self; fn neg(self) -> Self { - match self { - F32(f) => F32(-f), - F64(f) => F64(-f), - } + let bits = match self.ty { + ast::FloatTy::F32 => (-Single::from_bits(self.bits)).to_bits(), + ast::FloatTy::F64 => (-Double::from_bits(self.bits)).to_bits(), + }; + ConstFloat { bits, ty: self.ty } } } diff --git a/src/librustc_const_math/int.rs b/src/librustc_const_math/int.rs index d97276da9bf..65471416e80 100644 --- a/src/librustc_const_math/int.rs +++ b/src/librustc_const_math/int.rs @@ -211,48 +211,6 @@ impl ConstInt { } } - pub fn to_f32(self) -> f32 { - match self { - I8(i) => i as f32, - I16(i) => i as f32, - I32(i) => i as f32, - I64(i) => i as f32, - I128(i) => i as f32, - Isize(Is16(i)) => i as f32, - Isize(Is32(i)) => i as f32, - Isize(Is64(i)) => i as f32, - U8(i) => i as f32, - U16(i) => i as f32, - U32(i) => i as f32, - U64(i) => i as f32, - U128(i) => i as f32, - Usize(Us16(i)) => i as f32, - Usize(Us32(i)) => i as f32, - Usize(Us64(i)) => i as f32, - } - } - - pub fn to_f64(self) -> f64 { - match self { - I8(i) => i as f64, - I16(i) => i as f64, - I32(i) => i as f64, - I64(i) => i as f64, - I128(i) => i as f64, - Isize(Is16(i)) => i as f64, - Isize(Is32(i)) => i as f64, - Isize(Is64(i)) => i as f64, - U8(i) => i as f64, - U16(i) => i as f64, - U32(i) => i as f64, - U64(i) => i as f64, - U128(i) => i as f64, - Usize(Us16(i)) => i as f64, - Usize(Us32(i)) => i as f64, - Usize(Us64(i)) => i as f64, - } - } - pub fn is_negative(&self) -> bool { match *self { I8(v) => v < 0, diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs index 0dce0e1fb02..3947edecb5a 100644 --- a/src/librustc_const_math/lib.rs +++ b/src/librustc_const_math/lib.rs @@ -26,6 +26,8 @@ #![feature(i128)] #![feature(i128_type)] +extern crate rustc_apfloat; + extern crate syntax; extern crate serialize as rustc_serialize; // used by deriving diff --git a/src/librustc_data_structures/bitslice.rs b/src/librustc_data_structures/bitslice.rs index ba53578e579..f74af6ee163 100644 --- a/src/librustc_data_structures/bitslice.rs +++ b/src/librustc_data_structures/bitslice.rs @@ -134,9 +134,11 @@ pub trait BitwiseOperator { pub struct Union; impl BitwiseOperator for Union { + #[inline] fn join(&self, a: usize, b: usize) -> usize { a | b } } pub struct Subtract; impl BitwiseOperator for Subtract { + #[inline] fn join(&self, a: usize, b: usize) -> usize { a & !b } } diff --git a/src/librustc_data_structures/fnv.rs b/src/librustc_data_structures/fnv.rs deleted file mode 100644 index 5bd57236e7c..00000000000 --- a/src/librustc_data_structures/fnv.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::collections::{HashMap, HashSet}; -use std::default::Default; -use std::hash::{Hasher, Hash, BuildHasherDefault}; - -pub type FnvHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FnvHasher>>; -pub type FnvHashSet<V> = HashSet<V, BuildHasherDefault<FnvHasher>>; - -#[allow(non_snake_case)] -pub fn FnvHashMap<K: Hash + Eq, V>() -> FnvHashMap<K, V> { - HashMap::default() -} - -#[allow(non_snake_case)] -pub fn FnvHashSet<V: Hash + Eq>() -> FnvHashSet<V> { - HashSet::default() -} - -/// A speedy hash algorithm for node ids and def ids. The hashmap in -/// liballoc by default uses SipHash which isn't quite as speedy as we -/// want. In the compiler we're not really worried about DOS attempts, so we -/// just default to a non-cryptographic hash. -/// -/// This uses FNV hashing, as described here: -/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function -pub struct FnvHasher(u64); - -impl Default for FnvHasher { - /// Creates a `FnvHasher`, with a 64-bit hex initial value. - #[inline] - fn default() -> FnvHasher { - FnvHasher(0xcbf29ce484222325) - } -} - -impl Hasher for FnvHasher { - #[inline] - fn write(&mut self, bytes: &[u8]) { - let FnvHasher(mut hash) = *self; - for byte in bytes { - hash = hash ^ (*byte as u64); - hash = hash.wrapping_mul(0x100000001b3); - } - *self = FnvHasher(hash); - } - - #[inline] - fn finish(&self) -> u64 { - self.0 - } -} - -pub fn hash<T: Hash>(v: &T) -> u64 { - let mut state = FnvHasher::default(); - v.hash(&mut state); - state.finish() -} diff --git a/src/librustc_data_structures/graph/mod.rs b/src/librustc_data_structures/graph/mod.rs index f94ed6b7209..f562ae0e3b8 100644 --- a/src/librustc_data_structures/graph/mod.rs +++ b/src/librustc_data_structures/graph/mod.rs @@ -308,6 +308,42 @@ impl<N: Debug, E: Debug> Graph<N, E> { DepthFirstTraversal::with_start_node(self, start, direction) } + pub fn nodes_in_postorder<'a>(&'a self, + direction: Direction, + entry_node: NodeIndex) + -> Vec<NodeIndex> + { + let mut visited = BitVector::new(self.len_nodes()); + let mut stack = vec![]; + let mut result = Vec::with_capacity(self.len_nodes()); + let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| { + if visited.insert(node.0) { + stack.push((node, self.adjacent_edges(node, direction))); + } + }; + + for node in Some(entry_node).into_iter() + .chain(self.enumerated_nodes().map(|(node, _)| node)) + { + push_node(&mut stack, node); + while let Some((node, mut iter)) = stack.pop() { + if let Some((_, child)) = iter.next() { + let target = child.source_or_target(direction); + // the current node needs more processing, so + // add it back to the stack + stack.push((node, iter)); + // and then push the new node + push_node(&mut stack, target); + } else { + result.push(node); + } + } + } + + assert_eq!(result.len(), self.len_nodes()); + result + } + /// Whether or not a node can be reached from itself. pub fn is_node_cyclic(&self, starting_node_index: NodeIndex) -> bool { // This is similar to depth traversal below, but we diff --git a/src/librustc_data_structures/graph/tests.rs b/src/librustc_data_structures/graph/tests.rs index bdefc39a61a..b6a0d4cff5a 100644 --- a/src/librustc_data_structures/graph/tests.rs +++ b/src/librustc_data_structures/graph/tests.rs @@ -175,3 +175,46 @@ fn is_node_cyclic_b() { let graph = create_graph_with_cycle(); assert!(graph.is_node_cyclic(NodeIndex(1))); } + +#[test] +fn nodes_in_postorder() { + let expected = vec![ + ("A", vec!["C", "E", "D", "B", "A", "F"]), + ("B", vec!["C", "E", "D", "B", "A", "F"]), + ("C", vec!["C", "E", "D", "B", "A", "F"]), + ("D", vec!["C", "E", "D", "B", "A", "F"]), + ("E", vec!["C", "E", "D", "B", "A", "F"]), + ("F", vec!["C", "E", "D", "B", "F", "A"]) + ]; + + let graph = create_graph(); + + for ((idx, node), &(node_name, ref expected)) + in graph.enumerated_nodes().zip(&expected) + { + assert_eq!(node.data, node_name); + assert_eq!(expected, + &graph.nodes_in_postorder(OUTGOING, idx) + .into_iter().map(|idx| *graph.node_data(idx)) + .collect::<Vec<&str>>()); + } + + let expected = vec![ + ("A", vec!["D", "C", "B", "A"]), + ("B", vec!["D", "C", "B", "A"]), + ("C", vec!["B", "D", "C", "A"]), + ("D", vec!["C", "B", "D", "A"]), + ]; + + let graph = create_graph_with_cycle(); + + for ((idx, node), &(node_name, ref expected)) + in graph.enumerated_nodes().zip(&expected) + { + assert_eq!(node.data, node_name); + assert_eq!(expected, + &graph.nodes_in_postorder(OUTGOING, idx) + .into_iter().map(|idx| *graph.node_data(idx)) + .collect::<Vec<&str>>()); + } +} diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index bb27d479a41..3cb3e088364 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -65,7 +65,6 @@ pub mod snapshot_vec; pub mod stable_hasher; pub mod transitive_relation; pub mod unify; -pub mod fnv; pub mod fx; pub mod tuple_slice; pub mod veccell; diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 704d3568ca3..6e8d3494ba5 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -15,8 +15,7 @@ use rustc_data_structures::stable_hasher::StableHasher; use rustc_mir as mir; use rustc::session::{Session, CompileResult}; use rustc::session::CompileIncomplete; -use rustc::session::config::{self, Input, OutputFilenames, OutputType, - OutputTypes}; +use rustc::session::config::{self, Input, OutputFilenames, OutputType}; use rustc::session::search_paths::PathKind; use rustc::lint; use rustc::middle::{self, dependency_format, stability, reachable}; @@ -26,7 +25,6 @@ use rustc::ty::{self, TyCtxt, Resolutions, GlobalArenas}; use rustc::traits; use rustc::util::common::{ErrorReported, time}; use rustc::util::nodemap::NodeSet; -use rustc::util::fs::rename_or_copy_remove; use rustc_allocator as allocator; use rustc_borrowck as borrowck; use rustc_incremental::{self, IncrementalHashesMap}; @@ -208,7 +206,7 @@ pub fn compile_input(sess: &Session, println!("Pre-trans"); tcx.print_debug_stats(); } - let trans = phase_4_translate_to_llvm(tcx, analysis, &incremental_hashes_map, + let trans = phase_4_translate_to_llvm(tcx, analysis, incremental_hashes_map, &outputs); if log_enabled!(::log::LogLevel::Info) { @@ -231,7 +229,7 @@ pub fn compile_input(sess: &Session, sess.code_stats.borrow().print_type_sizes(); } - let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs); + let (phase5_result, trans) = phase_5_run_llvm_passes(sess, trans); controller_entry_point!(after_llvm, sess, @@ -239,8 +237,6 @@ pub fn compile_input(sess: &Session, phase5_result); phase5_result?; - write::cleanup_llvm(&trans); - phase_6_link_output(sess, &trans, &outputs); // Now that we won't touch anything in the incremental compilation directory @@ -933,6 +929,8 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, passes.push_pass(MIR_CONST, mir::transform::type_check::TypeckMir); passes.push_pass(MIR_CONST, mir::transform::rustc_peek::SanityCheck); + // We compute "constant qualifications" betwen MIR_CONST and MIR_VALIDATED. + // What we need to run borrowck etc. passes.push_pass(MIR_VALIDATED, mir::transform::qualify_consts::QualifyAndPromoteConstants); passes.push_pass(MIR_VALIDATED, @@ -940,18 +938,23 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, passes.push_pass(MIR_VALIDATED, mir::transform::simplify::SimplifyCfg::new("qualify-consts")); passes.push_pass(MIR_VALIDATED, mir::transform::nll::NLL); - // Optimizations begin. - passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); - passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyCfg::new("no-landing-pads")); + // borrowck runs between MIR_VALIDATED and MIR_OPTIMIZED. - // From here on out, regions are gone. - passes.push_pass(MIR_OPTIMIZED, mir::transform::erase_regions::EraseRegions); + // These next passes must be executed together + passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(MIR_OPTIMIZED, mir::transform::add_call_guards::AddCallGuards); passes.push_pass(MIR_OPTIMIZED, mir::transform::elaborate_drops::ElaborateDrops); passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyCfg::new("elaborate-drops")); - // No lifetime analysis based on borrowing can be done from here on out. + + // AddValidation needs to run after ElaborateDrops and before EraseRegions. + passes.push_pass(MIR_OPTIMIZED, mir::transform::add_validation::AddValidation); + + // From here on out, regions are gone. + passes.push_pass(MIR_OPTIMIZED, mir::transform::erase_regions::EraseRegions); + + // Optimizations begin. passes.push_pass(MIR_OPTIMIZED, mir::transform::inline::Inline); passes.push_pass(MIR_OPTIMIZED, mir::transform::instcombine::InstCombine); passes.push_pass(MIR_OPTIMIZED, mir::transform::deaggregator::Deaggregator); @@ -1059,9 +1062,9 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, /// be discarded. pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, - incremental_hashes_map: &IncrementalHashesMap, + incremental_hashes_map: IncrementalHashesMap, output_filenames: &OutputFilenames) - -> trans::CrateTranslation { + -> write::OngoingCrateTranslation { let time_passes = tcx.sess.time_passes(); time(time_passes, @@ -1071,63 +1074,27 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let translation = time(time_passes, "translation", - move || trans::trans_crate(tcx, analysis, &incremental_hashes_map, output_filenames)); + move || trans::trans_crate(tcx, analysis, incremental_hashes_map, output_filenames)); - time(time_passes, - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(time_passes, - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx, - &incremental_hashes_map, - &translation.metadata.hashes, - translation.link.crate_hash)); translation } /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, - trans: &trans::CrateTranslation, - outputs: &OutputFilenames) -> CompileResult { - if sess.opts.cg.no_integrated_as || - (sess.target.target.options.no_integrated_as && - (outputs.outputs.contains_key(&OutputType::Object) || - outputs.outputs.contains_key(&OutputType::Exe))) - { - let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, trans, &output_types, outputs)); - - write::run_assembler(sess, outputs); - - // HACK the linker expects the object file to be named foo.0.o but - // `run_assembler` produces an object named just foo.o. Rename it if we - // are going to build an executable - if sess.opts.output_types.contains_key(&OutputType::Exe) { - let f = outputs.path(OutputType::Object); - rename_or_copy_remove(&f, - f.with_file_name(format!("{}.0.o", - f.file_stem().unwrap().to_string_lossy()))).unwrap(); - } + trans: write::OngoingCrateTranslation) + -> (CompileResult, trans::CrateTranslation) { + let trans = trans.join(sess); - // Remove assembly source, unless --save-temps was specified - if !sess.opts.cg.save_temps { - fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); - } - } else { - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, trans, &sess.opts.output_types, outputs)); + if sess.opts.debugging_opts.incremental_info { + write::dump_incremental_data(&trans); } time(sess.time_passes(), "serialize work products", move || rustc_incremental::save_work_products(sess)); - sess.compile_status() + (sess.compile_status(), trans) } /// Run the linker on any artifacts that resulted from the LLVM run. diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index e139f81416e..4c337993468 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -795,7 +795,12 @@ fn usage(verbose: bool, include_unstable_options: bool) { (option.apply)(&mut options); } let message = format!("Usage: rustc [OPTIONS] INPUT"); - let extra_help = if verbose { + let nightly_help = if nightly_options::is_nightly_build() { + "\n -Z help Print internal options for debugging rustc" + } else { + "" + }; + let verbose_help = if verbose { "" } else { "\n --help -v Print the full set of options rustc accepts" @@ -803,11 +808,10 @@ fn usage(verbose: bool, include_unstable_options: bool) { println!("{}\nAdditional help: -C help Print codegen options -W help \ - Print 'lint' options and default settings - -Z help Print internal \ - options for debugging rustc{}\n", + Print 'lint' options and default settings{}{}\n", options.usage(&message), - extra_help); + nightly_help, + verbose_help); } fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) { @@ -1203,6 +1207,10 @@ pub fn diagnostics_registry() -> errors::registry::Registry { all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_mir::DIAGNOSTICS); + all_errors.extend_from_slice(&syntax::DIAGNOSTICS); Registry::new(&all_errors) } diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index 6f6470089d7..5d7c5e2829a 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -82,26 +82,27 @@ impl<'a> DiagnosticBuilder<'a> { return; } - match self.level { + let is_error = match self.level { Level::Bug | Level::Fatal | Level::PhaseFatal | Level::Error => { - self.handler.bump_err_count(); + true } Level::Warning | Level::Note | Level::Help | Level::Cancelled => { + false } - } + }; self.handler.emitter.borrow_mut().emit(&self); self.cancel(); - if self.level == Level::Error { - self.handler.panic_if_treat_err_as_bug(); + if is_error { + self.handler.bump_err_count(); } // if self.is_fatal() { @@ -210,4 +211,3 @@ impl<'a> Drop for DiagnosticBuilder<'a> { } } } - diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index e873137444d..159d2c7a2df 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -399,7 +399,6 @@ impl Handler { pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> FatalError { self.emit(&sp.into(), msg, Fatal); - self.panic_if_treat_err_as_bug(); FatalError } pub fn span_fatal_with_code<S: Into<MultiSpan>>(&self, @@ -408,12 +407,10 @@ impl Handler { code: &str) -> FatalError { self.emit_with_code(&sp.into(), msg, code, Fatal); - self.panic_if_treat_err_as_bug(); FatalError } pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) { self.emit(&sp.into(), msg, Error); - self.panic_if_treat_err_as_bug(); } pub fn mut_span_err<'a, S: Into<MultiSpan>>(&'a self, sp: S, @@ -425,7 +422,6 @@ impl Handler { } pub fn span_err_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: &str) { self.emit_with_code(&sp.into(), msg, code, Error); - self.panic_if_treat_err_as_bug(); } pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) { self.emit(&sp.into(), msg, Warning); @@ -494,6 +490,7 @@ impl Handler { } pub fn bump_err_count(&self) { + self.panic_if_treat_err_as_bug(); self.err_count.set(self.err_count.get() + 1); } diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index 1bdd4f851fb..339e2bdc157 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -34,7 +34,7 @@ use super::file_format; use super::work_product; pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap, + incremental_hashes_map: IncrementalHashesMap, metadata_hashes: &EncodedMetadataHashes, svh: Svh) { debug!("save_dep_graph()"); @@ -51,7 +51,7 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges()); } - let mut hcx = HashContext::new(tcx, incremental_hashes_map); + let mut hcx = HashContext::new(tcx, &incremental_hashes_map); let preds = Predecessors::new(&query, &mut hcx); let mut current_metadata_hashes = FxHashMap(); diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 02d68a41b4c..ca30ed4a536 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -723,6 +723,46 @@ impl EarlyLintPass for IllegalFloatLiteralPattern { } declare_lint! { + pub UNUSED_DOC_COMMENT, + Warn, + "detects doc comments that aren't used by rustdoc" +} + +#[derive(Copy, Clone)] +pub struct UnusedDocComment; + +impl LintPass for UnusedDocComment { + fn get_lints(&self) -> LintArray { + lint_array![UNUSED_DOC_COMMENT] + } +} + +impl UnusedDocComment { + fn warn_if_doc<'a, 'tcx, + I: Iterator<Item=&'a ast::Attribute>, + C: LintContext<'tcx>>(&self, mut attrs: I, cx: &C) { + if let Some(attr) = attrs.find(|a| a.is_value_str() && a.check_name("doc")) { + cx.struct_span_lint(UNUSED_DOC_COMMENT, attr.span, "doc comment not used by rustdoc") + .emit(); + } + } +} + +impl EarlyLintPass for UnusedDocComment { + fn check_local(&mut self, cx: &EarlyContext, decl: &ast::Local) { + self.warn_if_doc(decl.attrs.iter(), cx); + } + + fn check_arm(&mut self, cx: &EarlyContext, arm: &ast::Arm) { + self.warn_if_doc(arm.attrs.iter(), cx); + } + + fn check_expr(&mut self, cx: &EarlyContext, expr: &ast::Expr) { + self.warn_if_doc(expr.attrs.iter(), cx); + } +} + +declare_lint! { pub UNCONDITIONAL_RECURSION, Warn, "functions that cannot return without calling themselves" diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index 21dca7f6c61..83c00c178a0 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -111,6 +111,7 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { UnusedImportBraces, AnonymousParameters, IllegalFloatLiteralPattern, + UnusedDocComment, ); add_early_builtin_with_new!(sess, diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index 473c0f3ffda..d7d0dc7cb35 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -44,9 +44,13 @@ impl UnusedMut { let mut mutables = FxHashMap(); for p in pats { - p.each_binding(|mode, id, _, path1| { + p.each_binding(|_, id, span, path1| { + let bm = match cx.tables.pat_binding_modes.get(&id) { + Some(&bm) => bm, + None => span_bug!(span, "missing binding mode"), + }; let name = path1.node; - if let hir::BindByValue(hir::MutMutable) = mode { + if let ty::BindByValue(hir::MutMutable) = bm { if !name.as_str().starts_with("_") { match mutables.entry(name) { Vacant(entry) => { diff --git a/src/librustc_llvm/archive_ro.rs b/src/librustc_llvm/archive_ro.rs index b3f5f8e5360..0b24e55541b 100644 --- a/src/librustc_llvm/archive_ro.rs +++ b/src/librustc_llvm/archive_ro.rs @@ -39,14 +39,14 @@ impl ArchiveRO { /// /// If this archive is used with a mutable method, then an error will be /// raised. - pub fn open(dst: &Path) -> Option<ArchiveRO> { + pub fn open(dst: &Path) -> Result<ArchiveRO, String> { return unsafe { let s = path2cstr(dst); let ar = ::LLVMRustOpenArchive(s.as_ptr()); if ar.is_null() { - None + Err(::last_error().unwrap_or("failed to open archive".to_string())) } else { - Some(ArchiveRO { ptr: ar }) + Ok(ArchiveRO { ptr: ar }) } }; diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 24d4040ccb0..20735af69e3 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -598,7 +598,6 @@ extern "C" { // Operations on scalar constants pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) -> ValueRef; pub fn LLVMConstIntOfArbitraryPrecision(IntTy: TypeRef, Wn: c_uint, Ws: *const u64) -> ValueRef; - pub fn LLVMConstReal(RealTy: TypeRef, N: f64) -> ValueRef; pub fn LLVMConstIntGetZExtValue(ConstantVal: ValueRef) -> c_ulonglong; pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong; pub fn LLVMRustConstInt128Get(ConstantVal: ValueRef, SExt: bool, diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index 4c883f3e014..64cbe638e8d 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -15,7 +15,8 @@ use schema; use rustc::ty::maps::QueryConfig; use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind, NativeLibrary, MetadataLoader, LinkMeta, - LinkagePreference, LoadedMacro, EncodedMetadata}; + LinkagePreference, LoadedMacro, EncodedMetadata, + EncodedMetadataHashes}; use rustc::hir::def; use rustc::middle::lang_items; use rustc::session::Session; @@ -390,6 +391,7 @@ impl CrateStore for cstore::CStore { legacy: def.legacy, }), vis: ast::Visibility::Inherited, + tokens: None, }) } @@ -443,7 +445,7 @@ impl CrateStore for cstore::CStore { tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, reachable: &NodeSet) - -> EncodedMetadata + -> (EncodedMetadata, EncodedMetadataHashes) { encoder::encode_metadata(tcx, link_meta, reachable) } diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index d29c2746092..1d8feb6b636 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -1648,7 +1648,7 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ImplVisitor<'a, 'tcx> { pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, exported_symbols: &NodeSet) - -> EncodedMetadata + -> (EncodedMetadata, EncodedMetadataHashes) { let mut cursor = Cursor::new(vec![]); cursor.write_all(METADATA_HEADER).unwrap(); @@ -1691,10 +1691,7 @@ pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, result[header + 2] = (pos >> 8) as u8; result[header + 3] = (pos >> 0) as u8; - EncodedMetadata { - raw_data: result, - hashes: metadata_hashes, - } + (EncodedMetadata { raw_data: result }, metadata_hashes) } pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions { diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index 865174aa272..4583d80b83d 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -86,7 +86,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let tcx = this.hir.tcx(); // Enter the remainder scope, i.e. the bindings' destruction scope. - this.push_scope(remainder_scope); + this.push_scope((remainder_scope, source_info)); let_extent_stack.push(remainder_scope); // Declare the bindings, which may create a visibility scope. diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index c1209f4fbe2..d585672d6da 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -242,7 +242,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ExprKind::Yield { value } => { let value = unpack!(block = this.as_operand(block, scope, value)); let resume = this.cfg.start_new_block(); - let cleanup = this.generator_drop_cleanup(expr_span); + let cleanup = this.generator_drop_cleanup(); this.cfg.terminate(block, source_info, TerminatorKind::Yield { value: value, resume: resume, diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index e8fb918f76c..96df4037d05 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -237,7 +237,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .collect(); let success = this.cfg.start_new_block(); - let cleanup = this.diverge_cleanup(expr_span); + let cleanup = this.diverge_cleanup(); this.cfg.terminate(block, source_info, TerminatorKind::Call { func: fun, args: args, diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index f4d43e041ae..28386fa598c 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -306,7 +306,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let bool_ty = self.hir.bool_ty(); let eq_result = self.temp(bool_ty, test.span); let eq_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(test.span); + let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::Call { func: Operand::Constant(box Constant { span: test.span, diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 32b429be4a6..2033131c065 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -107,6 +107,9 @@ pub struct Scope<'tcx> { /// the extent of this scope within source code. extent: CodeExtent, + /// the span of that extent + extent_span: Span, + /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, /// but not StorageDead statements, which don't get emitted at all @@ -116,7 +119,7 @@ pub struct Scope<'tcx> { /// * pollutting the cleanup MIR with StorageDead creates /// landing pads even though there's no actual destructors /// * freeing up stack space has no effect during unwinding - pub(super) needs_cleanup: bool, + needs_cleanup: bool, /// set of lvalues to drop when exiting this scope. This starts /// out empty but grows as variables are declared during the @@ -231,6 +234,15 @@ impl CachedBlock { } } +impl DropKind { + fn may_panic(&self) -> bool { + match *self { + DropKind::Value { .. } => true, + DropKind::Storage => false + } + } +} + impl<'tcx> Scope<'tcx> { /// Invalidate all the cached blocks in the scope. /// @@ -318,7 +330,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R> { debug!("in_opt_scope(opt_extent={:?}, block={:?})", opt_extent, block); - if let Some(extent) = opt_extent { self.push_scope(extent.0); } + if let Some(extent) = opt_extent { self.push_scope(extent); } let rv = unpack!(block = f(self)); if let Some(extent) = opt_extent { unpack!(block = self.pop_scope(extent, block)); @@ -337,7 +349,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R> { debug!("in_scope(extent={:?}, block={:?})", extent, block); - self.push_scope(extent.0); + self.push_scope(extent); let rv = unpack!(block = f(self)); unpack!(block = self.pop_scope(extent, block)); debug!("in_scope: exiting extent={:?} block={:?}", extent, block); @@ -348,12 +360,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. - pub fn push_scope(&mut self, extent: CodeExtent) { + pub fn push_scope(&mut self, extent: (CodeExtent, SourceInfo)) { debug!("push_scope({:?})", extent); let vis_scope = self.visibility_scope; self.scopes.push(Scope { visibility_scope: vis_scope, - extent: extent, + extent: extent.0, + extent_span: extent.1.span, needs_cleanup: false, drops: vec![], free: None, @@ -370,9 +383,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { mut block: BasicBlock) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); - // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup - // to make sure all the `cached_block`s are filled in. - self.diverge_cleanup(extent.1.span); + // If we are emitting a `drop` statement, we need to have the cached + // diverge cleanup pads ready in case that drop panics. + let may_panic = + self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic()); + if may_panic { + self.diverge_cleanup(); + } let scope = self.scopes.pop().unwrap(); assert_eq!(scope.extent, extent.0); unpack!(block = build_scope_drops(&mut self.cfg, @@ -404,6 +421,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let len = self.scopes.len(); assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); let tmp = self.get_unit_temp(); + + // If we are emitting a `drop` statement, we need to have the cached + // diverge cleanup pads ready in case that drop panics. + let may_panic = self.scopes[(len - scope_count)..].iter() + .any(|s| s.drops.iter().any(|s| s.kind.may_panic())); + if may_panic { + self.diverge_cleanup(); + } + { let mut rest = &mut self.scopes[(len - scope_count)..]; while let Some((scope, rest_)) = {rest}.split_last_mut() { @@ -446,13 +472,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// /// This path terminates in GeneratorDrop. Returns the start of the path. /// None indicates there’s no cleanup to do at this point. - pub fn generator_drop_cleanup(&mut self, span: Span) -> Option<BasicBlock> { + pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> { if !self.scopes.iter().any(|scope| scope.needs_cleanup) { return None; } // Fill in the cache - self.diverge_cleanup_gen(span, true); + self.diverge_cleanup_gen(true); let src_info = self.scopes[0].source_info(self.fn_span); let tmp = self.get_unit_temp(); @@ -715,11 +741,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// This path terminates in Resume. Returns the start of the path. /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. - pub fn diverge_cleanup(&mut self, span: Span) -> Option<BasicBlock> { - self.diverge_cleanup_gen(span, false) + pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> { + self.diverge_cleanup_gen(false) } - fn diverge_cleanup_gen(&mut self, span: Span, generator_drop: bool) -> Option<BasicBlock> { + fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> { if !self.scopes.iter().any(|scope| scope.needs_cleanup) { return None; } @@ -756,7 +782,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, - span, + scope.extent_span, scope, target, generator_drop); @@ -775,7 +801,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } let source_info = self.source_info(span); let next_target = self.cfg.start_new_block(); - let diverge_target = self.diverge_cleanup(span); + let diverge_target = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::Drop { location: location, @@ -793,7 +819,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { value: Operand<'tcx>) -> BlockAnd<()> { let source_info = self.source_info(span); let next_target = self.cfg.start_new_block(); - let diverge_target = self.diverge_cleanup(span); + let diverge_target = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::DropAndReplace { location: location, @@ -816,7 +842,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source_info = self.source_info(span); let success_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(span); + let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::Assert { @@ -839,51 +865,54 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, arg_count: usize, generator_drop: bool) -> BlockAnd<()> { - + debug!("build_scope_drops({:?} -> {:?})", block, scope); let mut iter = scope.drops.iter().rev().peekable(); while let Some(drop_data) = iter.next() { let source_info = scope.source_info(drop_data.span); - if let DropKind::Value { .. } = drop_data.kind { - // Try to find the next block with its cached block - // for us to diverge into in case the drop panics. - let on_diverge = iter.peek().iter().filter_map(|dd| { - match dd.kind { - DropKind::Value { cached_block } => cached_block.get(generator_drop), - DropKind::Storage => None - } - }).next(); - // If there’s no `cached_block`s within current scope, - // we must look for one in the enclosing scope. - let on_diverge = on_diverge.or_else(||{ - earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next() - }); - let next = cfg.start_new_block(); - cfg.terminate(block, source_info, TerminatorKind::Drop { - location: drop_data.location.clone(), - target: next, - unwind: on_diverge - }); - block = next; - } match drop_data.kind { - DropKind::Value { .. } | - DropKind::Storage => { - // We do not need to emit these for generator drops - if generator_drop { - continue - } - - // Only temps and vars need their storage dead. - match drop_data.location { - Lvalue::Local(index) if index.index() > arg_count => {} - _ => continue - } + DropKind::Value { .. } => { + // Try to find the next block with its cached block + // for us to diverge into in case the drop panics. + let on_diverge = iter.peek().iter().filter_map(|dd| { + match dd.kind { + DropKind::Value { + cached_block: CachedBlock { + unwind: None, + generator_drop: None, + } + } => { + span_bug!(drop_data.span, "cached block not present?") + } + DropKind::Value { cached_block } => cached_block.get(generator_drop), + DropKind::Storage => None + } + }).next(); + // If there’s no `cached_block`s within current scope, + // we must look for one in the enclosing scope. + let on_diverge = on_diverge.or_else(|| { + earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next() + }); + let next = cfg.start_new_block(); + cfg.terminate(block, source_info, TerminatorKind::Drop { + location: drop_data.location.clone(), + target: next, + unwind: on_diverge + }); + block = next; + } + DropKind::Storage => {} + } + // Drop the storage for both value and storage drops. + // Only temps and vars need their storage dead. + match drop_data.location { + Lvalue::Local(index) if index.index() > arg_count => { cfg.push(block, Statement { source_info: source_info, kind: StatementKind::StorageDead(drop_data.location.clone()) }); } + _ => continue } } block.unit() diff --git a/src/librustc_mir/dataflow/drop_flag_effects.rs b/src/librustc_mir/dataflow/drop_flag_effects.rs index daafbecc5df..24d5aa9e46b 100644 --- a/src/librustc_mir/dataflow/drop_flag_effects.rs +++ b/src/librustc_mir/dataflow/drop_flag_effects.rs @@ -289,6 +289,7 @@ pub(crate) fn drop_flag_effects_for_location<'a, 'tcx, F>( mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => {} }, None => { diff --git a/src/librustc_mir/dataflow/impls/mod.rs b/src/librustc_mir/dataflow/impls/mod.rs index 97c996dea68..d5bdc71a705 100644 --- a/src/librustc_mir/dataflow/impls/mod.rs +++ b/src/librustc_mir/dataflow/impls/mod.rs @@ -486,6 +486,7 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => {} } } diff --git a/src/librustc_mir/dataflow/move_paths/abs_domain.rs b/src/librustc_mir/dataflow/move_paths/abs_domain.rs index 5e61c2ec7a2..1255209322b 100644 --- a/src/librustc_mir/dataflow/move_paths/abs_domain.rs +++ b/src/librustc_mir/dataflow/move_paths/abs_domain.rs @@ -23,11 +23,14 @@ use rustc::mir::LvalueElem; use rustc::mir::{Operand, ProjectionElem}; +use rustc::ty::Ty; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct AbstractOperand; +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct AbstractType; pub type AbstractElem<'tcx> = - ProjectionElem<'tcx, AbstractOperand>; + ProjectionElem<'tcx, AbstractOperand, AbstractType>; pub trait Lift { type Abstract; @@ -37,6 +40,10 @@ impl<'tcx> Lift for Operand<'tcx> { type Abstract = AbstractOperand; fn lift(&self) -> Self::Abstract { AbstractOperand } } +impl<'tcx> Lift for Ty<'tcx> { + type Abstract = AbstractType; + fn lift(&self) -> Self::Abstract { AbstractType } +} impl<'tcx> Lift for LvalueElem<'tcx> { type Abstract = AbstractElem<'tcx>; fn lift(&self) -> Self::Abstract { @@ -44,7 +51,7 @@ impl<'tcx> Lift for LvalueElem<'tcx> { ProjectionElem::Deref => ProjectionElem::Deref, ProjectionElem::Field(ref f, ty) => - ProjectionElem::Field(f.clone(), ty.clone()), + ProjectionElem::Field(f.clone(), ty.lift()), ProjectionElem::Index(ref i) => ProjectionElem::Index(i.lift()), ProjectionElem::Subslice {from, to} => diff --git a/src/librustc_mir/dataflow/move_paths/mod.rs b/src/librustc_mir/dataflow/move_paths/mod.rs index 4d4161d4c80..f67891d54fd 100644 --- a/src/librustc_mir/dataflow/move_paths/mod.rs +++ b/src/librustc_mir/dataflow/move_paths/mod.rs @@ -416,6 +416,7 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } StatementKind::InlineAsm { .. } | StatementKind::EndRegion(_) | + StatementKind::Validate(..) | StatementKind::Nop => {} } } diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs index 6f3db0b388d..6530b356e33 100644 --- a/src/librustc_mir/diagnostics.rs +++ b/src/librustc_mir/diagnostics.rs @@ -122,10 +122,8 @@ On the other hand, static and constant pointers can point either to a known numeric address or to the address of a symbol. ``` +static MY_STATIC: u32 = 42; static MY_STATIC_ADDR: &'static u32 = &MY_STATIC; -// ... and also -static MY_STATIC_ADDR2: *const u32 = &MY_STATIC; - const CONST_ADDR: *const u8 = 0x5f3759df as *const u8; ``` @@ -160,6 +158,16 @@ Remember: you can't use a function call inside a const's initialization expression! However, you can totally use it anywhere else: ``` +enum Test { + V1 +} + +impl Test { + fn func(&self) -> i32 { + 12 + } +} + fn main() { const FOO: Test = Test::V1; diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 912c2043390..ea8624930e5 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -57,3 +57,5 @@ pub fn provide(providers: &mut Providers) { shim::provide(providers); transform::provide(providers); } + +__build_diagnostic_array! { librustc_mir, DIAGNOSTICS } diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs new file mode 100644 index 00000000000..52c2eaa7cb6 --- /dev/null +++ b/src/librustc_mir/transform/add_validation.rs @@ -0,0 +1,390 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate. +//! It has to be run really early, before transformations like inlining, because +//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part +//! of MIR building, and only after this pass we think of the program has having the +//! normal MIR semantics. + +use rustc::ty::{self, TyCtxt, RegionKind}; +use rustc::hir; +use rustc::mir::*; +use rustc::mir::transform::{MirPass, MirSource}; +use rustc::middle::region::CodeExtent; + +pub struct AddValidation; + +/// Determine the "context" of the lval: Mutability and region. +fn lval_context<'a, 'tcx, D>( + lval: &Lvalue<'tcx>, + local_decls: &D, + tcx: TyCtxt<'a, 'tcx, 'tcx> +) -> (Option<CodeExtent>, hir::Mutability) + where D: HasLocalDecls<'tcx> +{ + use rustc::mir::Lvalue::*; + + match *lval { + Local { .. } => (None, hir::MutMutable), + Static(_) => (None, hir::MutImmutable), + Projection(ref proj) => { + match proj.elem { + ProjectionElem::Deref => { + // Computing the inside the recursion makes this quadratic. + // We don't expect deep paths though. + let ty = proj.base.ty(local_decls, tcx).to_ty(tcx); + // A Deref projection may restrict the context, this depends on the type + // being deref'd. + let context = match ty.sty { + ty::TyRef(re, tam) => { + let re = match re { + &RegionKind::ReScope(ce) => Some(ce), + &RegionKind::ReErased => + bug!("AddValidation pass must be run before erasing lifetimes"), + _ => None + }; + (re, tam.mutbl) + } + ty::TyRawPtr(_) => + // There is no guarantee behind even a mutable raw pointer, + // no write locks are acquired there, so we also don't want to + // release any. + (None, hir::MutImmutable), + ty::TyAdt(adt, _) if adt.is_box() => (None, hir::MutMutable), + _ => bug!("Deref on a non-pointer type {:?}", ty), + }; + // "Intersect" this restriction with proj.base. + if let (Some(_), hir::MutImmutable) = context { + // This is already as restricted as it gets, no need to even recurse + context + } else { + let base_context = lval_context(&proj.base, local_decls, tcx); + // The region of the outermost Deref is always most restrictive. + let re = context.0.or(base_context.0); + let mutbl = context.1.and(base_context.1); + (re, mutbl) + } + + } + _ => lval_context(&proj.base, local_decls, tcx), + } + } + } +} + +/// Check if this function contains an unsafe block or is an unsafe function. +fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> bool { + use rustc::hir::intravisit::{self, Visitor, FnKind}; + use rustc::hir::map::blocks::FnLikeNode; + use rustc::hir::map::Node; + + /// Decide if this is an unsafe block + fn block_is_unsafe(block: &hir::Block) -> bool { + use rustc::hir::BlockCheckMode::*; + + match block.rules { + UnsafeBlock(_) | PushUnsafeBlock(_) => true, + // For PopUnsafeBlock, we don't actually know -- but we will always also check all + // parent blocks, so we can safely declare the PopUnsafeBlock to not be unsafe. + DefaultBlock | PopUnsafeBlock(_) => false, + } + } + + /// Decide if this FnLike is a closure + fn fn_is_closure<'a>(fn_like: FnLikeNode<'a>) -> bool { + match fn_like.kind() { + FnKind::Closure(_) => true, + FnKind::Method(..) | FnKind::ItemFn(..) => false, + } + } + + let fn_like = match src { + MirSource::Fn(node_id) => { + match FnLikeNode::from_node(tcx.hir.get(node_id)) { + Some(fn_like) => fn_like, + None => return false, // e.g. struct ctor shims -- such auto-generated code cannot + // contain unsafe. + } + }, + _ => return false, // only functions can have unsafe + }; + + // Test if the function is marked unsafe. + if fn_like.unsafety() == hir::Unsafety::Unsafe { + return true; + } + + // For closures, we need to walk up the parents and see if we are inside an unsafe fn or + // unsafe block. + if fn_is_closure(fn_like) { + let mut cur = fn_like.id(); + loop { + // Go further upwards. + cur = tcx.hir.get_parent_node(cur); + let node = tcx.hir.get(cur); + // Check if this is an unsafe function + if let Some(fn_like) = FnLikeNode::from_node(node) { + if !fn_is_closure(fn_like) { + if fn_like.unsafety() == hir::Unsafety::Unsafe { + return true; + } + } + } + // Check if this is an unsafe block, or an item + match node { + Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..}) => { + if block_is_unsafe(&*block) { + // Found an unsafe block, we can bail out here. + return true; + } + } + Node::NodeItem(..) => { + // No walking up beyond items. This makes sure the loop always terminates. + break; + } + _ => {}, + } + } + } + + // Visit the entire body of the function and check for unsafe blocks in there + struct FindUnsafe { + found_unsafe: bool, + } + let mut finder = FindUnsafe { found_unsafe: false }; + // Run the visitor on the NodeId we got. Seems like there is no uniform way to do that. + finder.visit_body(tcx.hir.body(fn_like.body())); + + impl<'tcx> Visitor<'tcx> for FindUnsafe { + fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> { + intravisit::NestedVisitorMap::None + } + + fn visit_block(&mut self, b: &'tcx hir::Block) { + if self.found_unsafe { return; } // short-circuit + + if block_is_unsafe(b) { + // We found an unsafe block. We can stop searching. + self.found_unsafe = true; + } else { + // No unsafe block here, go on searching. + intravisit::walk_block(self, b); + } + } + } + + finder.found_unsafe +} + +impl MirPass for AddValidation { + fn run_pass<'a, 'tcx>(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, + mir: &mut Mir<'tcx>) + { + let emit_validate = tcx.sess.opts.debugging_opts.mir_emit_validate; + if emit_validate == 0 { + return; + } + let restricted_validation = emit_validate == 1 && fn_contains_unsafe(tcx, src); + let local_decls = mir.local_decls.clone(); // FIXME: Find a way to get rid of this clone. + + // Convert an lvalue to a validation operand. + let lval_to_operand = |lval: Lvalue<'tcx>| -> ValidationOperand<'tcx, Lvalue<'tcx>> { + let (re, mutbl) = lval_context(&lval, &local_decls, tcx); + let ty = lval.ty(&local_decls, tcx).to_ty(tcx); + ValidationOperand { lval, ty, re, mutbl } + }; + + // Emit an Acquire at the beginning of the given block. If we are in restricted emission + // mode (mir_emit_validate=1), also emit a Release immediately after the Acquire. + let emit_acquire = |block: &mut BasicBlockData<'tcx>, source_info, operands: Vec<_>| { + if operands.len() == 0 { + return; // Nothing to do + } + // Emit the release first, to avoid cloning if we do not emit it + if restricted_validation { + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, operands.clone()), + }; + block.statements.insert(0, release_stmt); + } + // Now, the acquire + let acquire_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, operands), + }; + block.statements.insert(0, acquire_stmt); + }; + + // PART 1 + // Add an AcquireValid at the beginning of the start block. + { + let source_info = SourceInfo { + scope: ARGUMENT_VISIBILITY_SCOPE, + span: mir.span, // FIXME: Consider using just the span covering the function + // argument declaration. + }; + // Gather all arguments, skip return value. + let operands = mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count) + .map(|(local, _)| lval_to_operand(Lvalue::Local(local))).collect(); + emit_acquire(&mut mir.basic_blocks_mut()[START_BLOCK], source_info, operands); + } + + // PART 2 + // Add ReleaseValid/AcquireValid around function call terminators. We don't use a visitor + // because we need to access the block that a Call jumps to. + let mut returns : Vec<(SourceInfo, Lvalue<'tcx>, BasicBlock)> = Vec::new(); + for block_data in mir.basic_blocks_mut() { + match block_data.terminator { + Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, + source_info }) => { + // Before the call: Release all arguments *and* the return value. + // The callee may write into the return value! Note that this relies + // on "release of uninitialized" to be a NOP. + if !restricted_validation { + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, + destination.iter().map(|dest| lval_to_operand(dest.0.clone())) + .chain( + args.iter().filter_map(|op| { + match op { + &Operand::Consume(ref lval) => + Some(lval_to_operand(lval.clone())), + &Operand::Constant(..) => { None }, + } + }) + ).collect()) + }; + block_data.statements.push(release_stmt); + } + // Remember the return destination for later + if let &Some(ref destination) = destination { + returns.push((source_info, destination.0.clone(), destination.1)); + } + } + Some(Terminator { kind: TerminatorKind::Drop { location: ref lval, .. }, + source_info }) | + Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. }, + source_info }) => { + // Before the call: Release all arguments + if !restricted_validation { + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, + vec![lval_to_operand(lval.clone())]), + }; + block_data.statements.push(release_stmt); + } + // drop doesn't return anything, so we need no acquire. + } + _ => { + // Not a block ending in a Call -> ignore. + } + } + } + // Now we go over the returns we collected to acquire the return values. + for (source_info, dest_lval, dest_block) in returns { + emit_acquire( + &mut mir.basic_blocks_mut()[dest_block], + source_info, + vec![lval_to_operand(dest_lval)] + ); + } + + if restricted_validation { + // No part 3 for us. + return; + } + + // PART 3 + // Add ReleaseValid/AcquireValid around Ref and Cast. Again an iterator does not seem very + // suited as we need to add new statements before and after each Ref. + for block_data in mir.basic_blocks_mut() { + // We want to insert statements around Ref commands as we iterate. To this end, we + // iterate backwards using indices. + for i in (0..block_data.statements.len()).rev() { + match block_data.statements[i].kind { + // When the borrow of this ref expires, we need to recover validation. + StatementKind::Assign(_, Rvalue::Ref(_, _, _)) => { + // Due to a lack of NLL; we can't capture anything directly here. + // Instead, we have to re-match and clone there. + let (dest_lval, re, src_lval) = match block_data.statements[i].kind { + StatementKind::Assign(ref dest_lval, + Rvalue::Ref(re, _, ref src_lval)) => { + (dest_lval.clone(), re, src_lval.clone()) + }, + _ => bug!("We already matched this."), + }; + // So this is a ref, and we got all the data we wanted. + // Do an acquire of the result -- but only what it points to, so add a Deref + // projection. + let dest_lval = Projection { base: dest_lval, elem: ProjectionElem::Deref }; + let dest_lval = Lvalue::Projection(Box::new(dest_lval)); + let acquire_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, + vec![lval_to_operand(dest_lval)]), + }; + block_data.statements.insert(i+1, acquire_stmt); + + // The source is released until the region of the borrow ends. + let op = match re { + &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), + &RegionKind::ReErased => + bug!("AddValidation pass must be run before erasing lifetimes"), + _ => ValidationOp::Release, + }; + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(op, vec![lval_to_operand(src_lval)]), + }; + block_data.statements.insert(i, release_stmt); + } + // Casts can change what validation does (e.g. unsizing) + StatementKind::Assign(_, Rvalue::Cast(kind, Operand::Consume(_), _)) + if kind != CastKind::Misc => + { + // Due to a lack of NLL; we can't capture anything directly here. + // Instead, we have to re-match and clone there. + let (dest_lval, src_lval) = match block_data.statements[i].kind { + StatementKind::Assign(ref dest_lval, + Rvalue::Cast(_, Operand::Consume(ref src_lval), _)) => + { + (dest_lval.clone(), src_lval.clone()) + }, + _ => bug!("We already matched this."), + }; + + // Acquire of the result + let acquire_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, + vec![lval_to_operand(dest_lval)]), + }; + block_data.statements.insert(i+1, acquire_stmt); + + // Release of the input + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Release, + vec![lval_to_operand(src_lval)]), + }; + block_data.statements.insert(i, release_stmt); + } + _ => {}, + } + } + } + } +} diff --git a/src/librustc_mir/transform/clean_end_regions.rs b/src/librustc_mir/transform/clean_end_regions.rs index 36125f94543..f06b88551d1 100644 --- a/src/librustc_mir/transform/clean_end_regions.rs +++ b/src/librustc_mir/transform/clean_end_regions.rs @@ -24,8 +24,8 @@ use rustc_data_structures::fx::FxHashSet; use rustc::middle::region::CodeExtent; use rustc::mir::transform::{MirPass, MirSource}; use rustc::mir::{BasicBlock, Location, Mir, Rvalue, Statement, StatementKind}; -use rustc::mir::visit::{MutVisitor, Visitor}; -use rustc::ty::{RegionKind, TyCtxt}; +use rustc::mir::visit::{MutVisitor, Visitor, Lookup}; +use rustc::ty::{Ty, RegionKind, TyCtxt}; pub struct CleanEndRegions; @@ -42,7 +42,9 @@ impl MirPass for CleanEndRegions { _tcx: TyCtxt<'a, 'tcx, 'tcx>, _source: MirSource, mir: &mut Mir<'tcx>) { - let mut gather = GatherBorrowedRegions { seen_regions: FxHashSet() }; + let mut gather = GatherBorrowedRegions { + seen_regions: FxHashSet() + }; gather.visit_mir(mir); let mut delete = DeleteTrivialEndRegions { seen_regions: &mut gather.seen_regions }; @@ -54,6 +56,7 @@ impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + // Gather regions that are used for borrows if let Rvalue::Ref(r, _, _) = *rvalue { if let RegionKind::ReScope(ce) = *r { self.seen_regions.insert(ce); @@ -61,6 +64,17 @@ impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { } self.super_rvalue(rvalue, location); } + + fn visit_ty(&mut self, ty: &Ty<'tcx>, _: Lookup) { + // Gather regions that occur in types + for re in ty.walk().flat_map(|t| t.regions()) { + match *re { + RegionKind::ReScope(ce) => { self.seen_regions.insert(ce); } + _ => {}, + } + } + self.super_ty(ty); + } } impl<'a, 'tcx> MutVisitor<'tcx> for DeleteTrivialEndRegions<'a> { diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index da9032685e0..baf0522896c 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -11,6 +11,8 @@ //! This pass erases all early-bound regions from the types occuring in the MIR. //! We want to do this once just before trans, so trans does not have to take //! care erasing regions all over the place. +//! NOTE: We do NOT erase regions of statements that are relevant for +//! "types-as-contracts"-validation, namely, AcquireValid, ReleaseValid, and EndRegion. use rustc::ty::subst::Substs; use rustc::ty::{Ty, TyCtxt, ClosureSubsts}; @@ -20,20 +22,24 @@ use rustc::mir::transform::{MirPass, MirSource}; struct EraseRegionsVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, + in_validation_statement: bool, } impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { EraseRegionsVisitor { - tcx: tcx + tcx: tcx, + in_validation_statement: false, } } } impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { fn visit_ty(&mut self, ty: &mut Ty<'tcx>, _: Lookup) { - let old_ty = *ty; - *ty = self.tcx.erase_regions(&old_ty); + if !self.in_validation_statement { + *ty = self.tcx.erase_regions(&{*ty}); + } + self.super_ty(ty); } fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { @@ -71,10 +77,20 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { block: BasicBlock, statement: &mut Statement<'tcx>, location: Location) { - if let StatementKind::EndRegion(_) = statement.kind { - statement.kind = StatementKind::Nop; + // Do NOT delete EndRegion if validation statements are emitted. + // Validation needs EndRegion. + if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 { + if let StatementKind::EndRegion(_) = statement.kind { + statement.kind = StatementKind::Nop; + } } + + self.in_validation_statement = match statement.kind { + StatementKind::Validate(..) => true, + _ => false, + }; self.super_statement(block, statement, location); + self.in_validation_statement = false; } } diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index 159d3a1f8eb..555bea63c27 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -24,6 +24,7 @@ use syntax::ast; use syntax_pos::{DUMMY_SP, Span}; use transform; +pub mod add_validation; pub mod clean_end_regions; pub mod simplify_branches; pub mod simplify; diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 3ae0f8f2e82..4f0d6a51bfd 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -910,6 +910,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { StatementKind::StorageDead(_) | StatementKind::InlineAsm {..} | StatementKind::EndRegion(_) | + StatementKind::Validate(..) | StatementKind::Nop => {} } }); diff --git a/src/librustc_mir/transform/rustc_peek.rs b/src/librustc_mir/transform/rustc_peek.rs index 5918de0c688..268e7a4c185 100644 --- a/src/librustc_mir/transform/rustc_peek.rs +++ b/src/librustc_mir/transform/rustc_peek.rs @@ -161,6 +161,7 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => continue, mir::StatementKind::SetDiscriminant{ .. } => span_bug!(stmt.source_info.span, diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs index d5b79c0d1c3..a1d56ccd874 100644 --- a/src/librustc_mir/transform/simplify.rs +++ b/src/librustc_mir/transform/simplify.rs @@ -105,6 +105,8 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } pub fn simplify(mut self) { + self.strip_nops(); + loop { let mut changed = false; @@ -141,8 +143,6 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { if !changed { break } } - - self.strip_nops() } // Collapse a goto chain starting from `start` diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index fc923f1831c..888e5be10dd 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -432,6 +432,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } StatementKind::InlineAsm { .. } | StatementKind::EndRegion(_) | + StatementKind::Validate(..) | StatementKind::Nop => {} } } diff --git a/src/librustc_mir/util/liveness.rs b/src/librustc_mir/util/liveness.rs index 27997b3abc3..946e9ff4747 100644 --- a/src/librustc_mir/util/liveness.rs +++ b/src/librustc_mir/util/liveness.rs @@ -65,6 +65,7 @@ impl<'tcx> Visitor<'tcx> for BlockInfoVisitor { LvalueContext::Inspect | LvalueContext::Consume | + LvalueContext::Validate | // We consider drops to always be uses of locals. // Drop eloboration should be run before this analysis otherwise diff --git a/src/librustc_passes/diagnostics.rs b/src/librustc_passes/diagnostics.rs index 464dd72e569..907a258a12d 100644 --- a/src/librustc_passes/diagnostics.rs +++ b/src/librustc_passes/diagnostics.rs @@ -221,7 +221,7 @@ while break {} To fix this, add a label specifying which loop is being broken out of: ``` -`foo: while break `foo {} +'foo: while break 'foo {} ``` "## } diff --git a/src/librustc_passes/lib.rs b/src/librustc_passes/lib.rs index 3949152e848..ed5ea69d04e 100644 --- a/src/librustc_passes/lib.rs +++ b/src/librustc_passes/lib.rs @@ -45,3 +45,5 @@ pub mod loops; pub mod mir_stats; pub mod no_asm; pub mod static_recursion; + +__build_diagnostic_array! { librustc_passes, DIAGNOSTICS } diff --git a/src/librustc_passes/mir_stats.rs b/src/librustc_passes/mir_stats.rs index 45be49dc197..aff792f10ce 100644 --- a/src/librustc_passes/mir_stats.rs +++ b/src/librustc_passes/mir_stats.rs @@ -126,6 +126,7 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { self.record(match statement.kind { StatementKind::Assign(..) => "StatementKind::Assign", StatementKind::EndRegion(..) => "StatementKind::EndRegion", + StatementKind::Validate(..) => "StatementKind::Validate", StatementKind::SetDiscriminant { .. } => "StatementKind::SetDiscriminant", StatementKind::StorageLive(..) => "StatementKind::StorageLive", StatementKind::StorageDead(..) => "StatementKind::StorageDead", diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 31b642b4055..60074cce2b9 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -27,6 +27,121 @@ pub fn find(name: &str) -> Option<Intrinsic> { output: &::I32x4, definition: Named("llvm.ppc.altivec.vperm") }, + "_vec_mradds" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vmhraddshs") + }, + "_vec_cmpb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpbfp") + }, + "_vec_cmpeqb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vcmpequb") + }, + "_vec_cmpeqh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vcmpequh") + }, + "_vec_cmpeqw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpequw") + }, + "_vec_cmpgtub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vcmpgtub") + }, + "_vec_cmpgtuh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vcmpgtuh") + }, + "_vec_cmpgtuw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpgtuw") + }, + "_vec_cmpgtsb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vcmpgtsb") + }, + "_vec_cmpgtsh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vcmpgtsh") + }, + "_vec_cmpgtsw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpgtsw") + }, + "_vec_maxsb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vmaxsb") + }, + "_vec_maxub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.ppc.altivec.vmaxub") + }, + "_vec_maxsh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vmaxsh") + }, + "_vec_maxuh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.ppc.altivec.vmaxuh") + }, + "_vec_maxsw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vmaxsw") + }, + "_vec_maxuw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vmaxuw") + }, + "_vec_minsb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vminsb") + }, + "_vec_minub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.ppc.altivec.vminub") + }, + "_vec_minsh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vminsh") + }, + "_vec_minuh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.ppc.altivec.vminuh") + }, + "_vec_minsw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vminsw") + }, + "_vec_minuw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vminuw") + }, _ => return None, }) } diff --git a/src/librustc_plugin/lib.rs b/src/librustc_plugin/lib.rs index 1de31c5d791..e17a3c82b50 100644 --- a/src/librustc_plugin/lib.rs +++ b/src/librustc_plugin/lib.rs @@ -84,3 +84,5 @@ pub mod diagnostics; pub mod registry; pub mod load; pub mod build; + +__build_diagnostic_array! { librustc_plugin, DIAGNOSTICS } diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 88013b45a05..2317e36a0ab 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -546,7 +546,7 @@ impl<'a> PathSource<'a> { } } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Namespace { TypeNS, ValueNS, @@ -898,6 +898,19 @@ impl<'a> ModuleData<'a> { } } + fn for_each_child_stable<F: FnMut(Ident, Namespace, &'a NameBinding<'a>)>(&self, mut f: F) { + let resolutions = self.resolutions.borrow(); + let mut resolutions = resolutions.iter().map(|(&(ident, ns), &resolution)| { + // Pre-compute keys for sorting + (ident.name.as_str(), ns, ident, resolution) + }) + .collect::<Vec<_>>(); + resolutions.sort_unstable_by_key(|&(str, ns, ..)| (str, ns)); + for &(_, ns, ident, resolution) in resolutions.iter() { + resolution.borrow().binding.map(|binding| f(ident, ns, binding)); + } + } + fn def(&self) -> Option<Def> { match self.kind { ModuleKind::Def(def, _) => Some(def), @@ -2277,8 +2290,9 @@ impl<'a> Resolver<'a> { false, pat.span) .and_then(LexicalScopeBinding::item); let resolution = binding.map(NameBinding::def).and_then(|def| { + let ivmode = BindingMode::ByValue(Mutability::Immutable); let always_binding = !pat_src.is_refutable() || opt_pat.is_some() || - bmode != BindingMode::ByValue(Mutability::Immutable); + bmode != ivmode; match def { Def::StructCtor(_, CtorKind::Const) | Def::VariantCtor(_, CtorKind::Const) | @@ -3351,8 +3365,9 @@ impl<'a> Resolver<'a> { in_module_is_extern)) = worklist.pop() { self.populate_module_if_necessary(in_module); - in_module.for_each_child(|ident, ns, name_binding| { - + // We have to visit module children in deterministic order to avoid + // instabilities in reported imports (#43552). + in_module.for_each_child_stable(|ident, ns, name_binding| { // avoid imports entirely if name_binding.is_import() && !name_binding.is_extern_crate() { return; } // avoid non-importable candidates as well diff --git a/src/librustc_save_analysis/Cargo.toml b/src/librustc_save_analysis/Cargo.toml index 2a51bf9430e..aa249af363f 100644 --- a/src/librustc_save_analysis/Cargo.toml +++ b/src/librustc_save_analysis/Cargo.toml @@ -11,10 +11,11 @@ crate-type = ["dylib"] [dependencies] log = "0.3" rustc = { path = "../librustc" } +rustc_data_structures = { path = "../librustc_data_structures" } rustc_typeck = { path = "../librustc_typeck" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } -rls-data = "0.9" +rls-data = "0.10" rls-span = "0.4" # FIXME(#40527) should move rustc serialize out of tree rustc-serialize = "0.3" diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index ebdd99dc802..4740f9a0d5a 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -29,6 +29,7 @@ use rustc::hir::def_id::DefId; use rustc::hir::map::Node; use rustc::session::Session; use rustc::ty::{self, TyCtxt}; +use rustc_data_structures::fx::FxHashSet; use std::path::Path; @@ -74,6 +75,7 @@ pub struct DumpVisitor<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> { // we only write one macro def per unique macro definition, and // one macro use per unique callsite span. // mac_defs: HashSet<Span>, + macro_calls: FxHashSet<Span>, } impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { @@ -89,6 +91,7 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { span: span_utils.clone(), cur_scope: CRATE_NODE_ID, // mac_defs: HashSet::new(), + macro_calls: FxHashSet(), } } @@ -557,14 +560,21 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { let (value, fields) = if let ast::ItemKind::Struct(ast::VariantData::Struct(ref fields, _), _) = item.node { - let fields_str = fields.iter() - .enumerate() - .map(|(i, f)| f.ident.map(|i| i.to_string()) - .unwrap_or(i.to_string())) - .collect::<Vec<_>>() - .join(", "); - (format!("{} {{ {} }}", name, fields_str), - fields.iter().map(|f| ::id_from_node_id(f.id, &self.save_ctxt)).collect()) + let include_priv_fields = !self.save_ctxt.config.pub_only; + let fields_str = fields + .iter() + .enumerate() + .filter_map(|(i, f)| { + if include_priv_fields || f.vis == ast::Visibility::Public { + f.ident.map(|i| i.to_string()).or_else(|| Some(i.to_string())) + } else { + None + } + }) + .collect::<Vec<_>>() + .join(", "); + let value = format!("{} {{ {} }}", name, fields_str); + (value, fields.iter().map(|f| ::id_from_node_id(f.id, &self.save_ctxt)).collect()) } else { (String::new(), vec![]) }; @@ -972,11 +982,19 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { /// callsite spans to record macro definition and use data, using the /// mac_uses and mac_defs sets to prevent multiples. fn process_macro_use(&mut self, span: Span) { + let source_span = span.source_callsite(); + if self.macro_calls.contains(&source_span) { + return; + } + self.macro_calls.insert(source_span); + let data = match self.save_ctxt.get_macro_use_data(span) { None => return, Some(data) => data, }; + self.dumper.macro_use(data); + // FIXME write the macro def // let mut hasher = DefaultHasher::new(); // data.callee_span.hash(&mut hasher); @@ -996,7 +1014,6 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { // }.lower(self.tcx)); // } // } - self.dumper.macro_use(data); } fn process_trait_item(&mut self, trait_item: &'l ast::TraitItem, trait_id: DefId) { diff --git a/src/librustc_save_analysis/json_dumper.rs b/src/librustc_save_analysis/json_dumper.rs index 60cec4c5e46..30a698e6351 100644 --- a/src/librustc_save_analysis/json_dumper.rs +++ b/src/librustc_save_analysis/json_dumper.rs @@ -51,7 +51,11 @@ impl<'b> DumpOutput for CallbackOutput<'b> { impl<'b, W: Write> JsonDumper<WriteOutput<'b, W>> { pub fn new(writer: &'b mut W, config: Config) -> JsonDumper<WriteOutput<'b, W>> { - JsonDumper { output: WriteOutput { output: writer }, config, result: Analysis::new() } + JsonDumper { + output: WriteOutput { output: writer }, + config: config.clone(), + result: Analysis::new(config) + } } } @@ -61,8 +65,8 @@ impl<'b> JsonDumper<CallbackOutput<'b>> { -> JsonDumper<CallbackOutput<'b>> { JsonDumper { output: CallbackOutput { callback: callback }, - config, - result: Analysis::new(), + config: config.clone(), + result: Analysis::new(config), } } } diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs index c9489aac981..1dd0df4108f 100644 --- a/src/librustc_save_analysis/lib.rs +++ b/src/librustc_save_analysis/lib.rs @@ -23,6 +23,7 @@ #[macro_use] extern crate log; #[macro_use] extern crate syntax; +extern crate rustc_data_structures; extern crate rustc_serialize; extern crate rustc_typeck; extern crate syntax_pos; diff --git a/src/librustc_save_analysis/span_utils.rs b/src/librustc_save_analysis/span_utils.rs index 77cde33e962..e771da2ed4c 100644 --- a/src/librustc_save_analysis/span_utils.rs +++ b/src/librustc_save_analysis/span_utils.rs @@ -398,9 +398,10 @@ impl<'a> SpanUtils<'a> { return false; } // If sub_span is none, filter out generated code. - if sub_span.is_none() { - return true; - } + let sub_span = match sub_span { + Some(ss) => ss, + None => return true, + }; //If the span comes from a fake filemap, filter it. if !self.sess.codemap().lookup_char_pos(parent.lo).file.is_real_file() { @@ -409,7 +410,7 @@ impl<'a> SpanUtils<'a> { // Otherwise, a generated span is deemed invalid if it is not a sub-span of the root // callsite. This filters out macro internal variables and most malformed spans. - !parent.source_callsite().contains(parent) + !parent.source_callsite().contains(sub_span) } } diff --git a/src/librustc_trans/Cargo.toml b/src/librustc_trans/Cargo.toml index c7db2a9a8ae..ed9321cc3f3 100644 --- a/src/librustc_trans/Cargo.toml +++ b/src/librustc_trans/Cargo.toml @@ -10,7 +10,7 @@ crate-type = ["dylib"] test = false [dependencies] -crossbeam = "0.2" +num_cpus = "1.0" flate2 = "0.2" jobserver = "0.1.5" log = "0.3" diff --git a/src/librustc_trans/assert_module_sources.rs b/src/librustc_trans/assert_module_sources.rs index b5ef4aac34c..6e661a5a8c6 100644 --- a/src/librustc_trans/assert_module_sources.rs +++ b/src/librustc_trans/assert_module_sources.rs @@ -37,11 +37,22 @@ use rustc::ich::{ATTR_PARTITION_REUSED, ATTR_PARTITION_TRANSLATED}; const MODULE: &'static str = "module"; const CFG: &'static str = "cfg"; -#[derive(Debug, PartialEq)] -enum Disposition { Reused, Translated } +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum Disposition { Reused, Translated } + +impl ModuleTranslation { + pub fn disposition(&self) -> (String, Disposition) { + let disposition = match self.source { + ModuleSource::Preexisting(_) => Disposition::Reused, + ModuleSource::Translated(_) => Disposition::Translated, + }; + + (self.name.clone(), disposition) + } +} pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - modules: &[ModuleTranslation]) { + modules: &[(String, Disposition)]) { let _ignore = tcx.dep_graph.in_ignore(); if tcx.sess.opts.incremental.is_none() { @@ -56,7 +67,7 @@ pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, struct AssertModuleSource<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - modules: &'a [ModuleTranslation], + modules: &'a [(String, Disposition)], } impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { @@ -75,15 +86,15 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } let mname = self.field(attr, MODULE); - let mtrans = self.modules.iter().find(|mtrans| *mtrans.name == *mname.as_str()); + let mtrans = self.modules.iter().find(|&&(ref name, _)| name == mname.as_str()); let mtrans = match mtrans { Some(m) => m, None => { debug!("module name `{}` not found amongst:", mname); - for mtrans in self.modules { + for &(ref name, ref disposition) in self.modules { debug!("module named `{}` with disposition {:?}", - mtrans.name, - self.disposition(mtrans)); + name, + disposition); } self.tcx.sess.span_err( @@ -93,7 +104,7 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } }; - let mtrans_disposition = self.disposition(mtrans); + let mtrans_disposition = mtrans.1; if disposition != mtrans_disposition { self.tcx.sess.span_err( attr.span, @@ -104,13 +115,6 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } } - fn disposition(&self, mtrans: &ModuleTranslation) -> Disposition { - match mtrans.source { - ModuleSource::Preexisting(_) => Disposition::Reused, - ModuleSource::Translated(_) => Disposition::Translated, - } - } - fn field(&self, attr: &ast::Attribute, name: &str) -> ast::Name { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name(name) { diff --git a/src/librustc_trans/back/archive.rs b/src/librustc_trans/back/archive.rs index 902065c8688..6ec40bd689c 100644 --- a/src/librustc_trans/back/archive.rs +++ b/src/librustc_trans/back/archive.rs @@ -126,7 +126,7 @@ impl<'a> ArchiveBuilder<'a> { Some(ref src) => src, None => return None, }; - self.src_archive = Some(ArchiveRO::open(src)); + self.src_archive = Some(ArchiveRO::open(src).ok()); self.src_archive.as_ref().unwrap().as_ref() } @@ -186,9 +186,8 @@ impl<'a> ArchiveBuilder<'a> { where F: FnMut(&str) -> bool + 'static { let archive = match ArchiveRO::open(archive) { - Some(ar) => ar, - None => return Err(io::Error::new(io::ErrorKind::Other, - "failed to open archive")), + Ok(ar) => ar, + Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), }; self.additions.push(Addition::Archive { archive: archive, diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index feed127b0b6..3e2d9f5c32e 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -12,7 +12,7 @@ use back::link; use back::write; use back::symbol_export; use rustc::session::config; -use errors::FatalError; +use errors::{FatalError, Handler}; use llvm; use llvm::archive_ro::ArchiveRO; use llvm::{ModuleRef, TargetMachineRef, True, False}; @@ -27,6 +27,7 @@ use flate2::read::DeflateDecoder; use std::io::Read; use std::ffi::CString; use std::path::Path; +use std::ptr::read_unaligned; pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { match crate_type { @@ -41,24 +42,24 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } pub fn run(cgcx: &CodegenContext, + diag_handler: &Handler, llmod: ModuleRef, tm: TargetMachineRef, config: &ModuleConfig, temp_no_opt_bc_filename: &Path) -> Result<(), FatalError> { - let handler = cgcx.handler; if cgcx.opts.cg.prefer_dynamic { - handler.struct_err("cannot prefer dynamic linking when performing LTO") - .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ - supported with LTO") - .emit(); + diag_handler.struct_err("cannot prefer dynamic linking when performing LTO") + .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO") + .emit(); return Err(FatalError) } // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { - let e = handler.fatal("lto can only be run for executables, cdylibs and \ - static library outputs"); + let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \ + static library outputs"); return Err(e) } } @@ -116,13 +117,13 @@ pub fn run(cgcx: &CodegenContext, if res.is_err() { let msg = format!("failed to decompress bc of `{}`", name); - Err(handler.fatal(&msg)) + Err(diag_handler.fatal(&msg)) } else { Ok(inflated) } } else { - Err(handler.fatal(&format!("Unsupported bytecode format version {}", - version))) + Err(diag_handler.fatal(&format!("Unsupported bytecode format version {}", + version))) } })? } else { @@ -136,7 +137,7 @@ pub fn run(cgcx: &CodegenContext, if res.is_err() { let msg = format!("failed to decompress bc of `{}`", name); - Err(handler.fatal(&msg)) + Err(diag_handler.fatal(&msg)) } else { Ok(inflated) } @@ -152,7 +153,7 @@ pub fn run(cgcx: &CodegenContext, Ok(()) } else { let msg = format!("failed to load bc of `{}`", name); - Err(write::llvm_err(handler, msg)) + Err(write::llvm_err(&diag_handler, msg)) } })?; } @@ -223,13 +224,13 @@ fn is_versioned_bytecode_format(bc: &[u8]) -> bool { fn extract_bytecode_format_version(bc: &[u8]) -> u32 { let pos = link::RLIB_BYTECODE_OBJECT_VERSION_OFFSET; let byte_data = &bc[pos..pos + 4]; - let data = unsafe { *(byte_data.as_ptr() as *const u32) }; + let data = unsafe { read_unaligned(byte_data.as_ptr() as *const u32) }; u32::from_le(data) } fn extract_compressed_bytecode_size_v1(bc: &[u8]) -> u64 { let pos = link::RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET; let byte_data = &bc[pos..pos + 8]; - let data = unsafe { *(byte_data.as_ptr() as *const u64) }; + let data = unsafe { read_unaligned(byte_data.as_ptr() as *const u64) }; u64::from_le(data) } diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 26553c85023..0d5fe6c0ae9 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -10,36 +10,42 @@ use back::lto; use back::link::{self, get_linker, remove}; +use back::linker::LinkerInfo; use back::symbol_export::ExportedSymbols; use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; +use rustc::middle::cstore::{LinkMeta, EncodedMetadata}; use rustc::session::config::{self, OutputFilenames, OutputType, OutputTypes, Passes, SomePasses, AllPasses, Sanitizer}; use rustc::session::Session; +use time_graph::{self, TimeGraph}; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; use llvm::SMDiagnosticRef; -use {CrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation}; +use {CrateTranslation, ModuleSource, ModuleTranslation, CompiledModule, ModuleKind}; use rustc::hir::def_id::CrateNum; -use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; -use rustc::util::fs::link_or_copy; +use rustc::util::common::{time, time_depth, set_time_depth, path2cstr, print_time_passes_entry}; +use rustc::util::fs::{link_or_copy, rename_or_copy_remove}; use errors::{self, Handler, Level, DiagnosticBuilder, FatalError}; -use errors::emitter::Emitter; +use errors::emitter::{Emitter}; use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; use context::{is_pie_binary, get_reloc_model}; use jobserver::{Client, Acquired}; -use crossbeam::{scope, Scope}; use rustc_demangle; -use std::cmp; use std::ffi::CString; +use std::fmt; use std::fs; use std::io; use std::io::Write; use std::path::{Path, PathBuf}; use std::str; -use std::sync::mpsc::{channel, Sender}; +use std::sync::Arc; +use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; +use std::time::Instant; +use std::thread; use libc::{c_uint, c_void, c_char, size_t}; pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ @@ -190,7 +196,6 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef { /// Module-specific configuration for `optimize_and_codegen`. -#[derive(Clone)] pub struct ModuleConfig { /// LLVM TargetMachine to use for codegen. tm: TargetMachineRef, @@ -229,9 +234,9 @@ pub struct ModuleConfig { unsafe impl Send for ModuleConfig { } impl ModuleConfig { - fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig { + fn new(sess: &Session, passes: Vec<String>) -> ModuleConfig { ModuleConfig { - tm: tm, + tm: create_target_machine(sess), passes: passes, opt_level: None, opt_size: None, @@ -255,10 +260,10 @@ impl ModuleConfig { } } - fn set_flags(&mut self, sess: &Session, trans: &CrateTranslation) { + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { self.no_verify = sess.no_verify(); self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; - self.no_builtins = trans.no_builtins; + self.no_builtins = no_builtins; self.time_passes = sess.time_passes(); self.inline_threshold = sess.opts.cg.inline_threshold; self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode; @@ -279,20 +284,55 @@ impl ModuleConfig { self.merge_functions = sess.opts.optimize == config::OptLevel::Default || sess.opts.optimize == config::OptLevel::Aggressive; } + + fn clone(&self, sess: &Session) -> ModuleConfig { + ModuleConfig { + tm: create_target_machine(sess), + passes: self.passes.clone(), + opt_level: self.opt_level, + opt_size: self.opt_size, + + emit_no_opt_bc: self.emit_no_opt_bc, + emit_bc: self.emit_bc, + emit_lto_bc: self.emit_lto_bc, + emit_ir: self.emit_ir, + emit_asm: self.emit_asm, + emit_obj: self.emit_obj, + obj_is_bitcode: self.obj_is_bitcode, + + no_verify: self.no_verify, + no_prepopulate_passes: self.no_prepopulate_passes, + no_builtins: self.no_builtins, + time_passes: self.time_passes, + vectorize_loop: self.vectorize_loop, + vectorize_slp: self.vectorize_slp, + merge_functions: self.merge_functions, + inline_threshold: self.inline_threshold, + } + } +} + +impl Drop for ModuleConfig { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustDisposeTargetMachine(self.tm); + } + } } /// Additional resources used by optimize_and_codegen (not module specific) -pub struct CodegenContext<'a> { +#[derive(Clone)] +pub struct CodegenContext { // Resouces needed when running LTO pub time_passes: bool, pub lto: bool, pub no_landing_pads: bool, - pub exported_symbols: &'a ExportedSymbols, - pub opts: &'a config::Options, + pub exported_symbols: Arc<ExportedSymbols>, + pub opts: Arc<config::Options>, pub crate_types: Vec<config::CrateType>, pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, // Handler to use for diagnostics produced during codegen. - pub handler: &'a Handler, + pub diag_emitter: SharedEmitter, // LLVM passes added by plugins. pub plugin_passes: Vec<String>, // LLVM optimizations for which we want to print remarks. @@ -303,17 +343,27 @@ pub struct CodegenContext<'a> { // compiling incrementally pub incr_comp_session_dir: Option<PathBuf>, // Channel back to the main control thread to send messages to - pub tx: Sender<Message>, + coordinator_send: Sender<Message>, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + time_graph: Option<TimeGraph>, +} + +impl CodegenContext { + fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } } struct HandlerFreeVars<'a> { - cgcx: &'a CodegenContext<'a>, + cgcx: &'a CodegenContext, + diag_handler: &'a Handler, } -unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<'a>, +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, msg: &'b str, cookie: c_uint) { - drop(cgcx.tx.send(Message::InlineAsmError(cookie as u32, msg.to_string()))); + cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); } unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, @@ -328,7 +378,7 @@ unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, } unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_void) { - let HandlerFreeVars { cgcx, .. } = *(user as *const HandlerFreeVars); + let HandlerFreeVars { cgcx, diag_handler, .. } = *(user as *const HandlerFreeVars); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -344,7 +394,7 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo }; if enabled { - cgcx.handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}", + diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}", opt.kind.describe(), opt.pass_name, opt.filename, @@ -360,25 +410,32 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo // Unsafe due to LLVM calls. unsafe fn optimize_and_codegen(cgcx: &CodegenContext, + diag_handler: &Handler, mtrans: ModuleTranslation, - mllvm: ModuleLlvm, config: ModuleConfig, output_names: OutputFilenames) - -> Result<(), FatalError> + -> Result<CompiledModule, FatalError> { - let llmod = mllvm.llmod; - let llcx = mllvm.llcx; + let (llmod, llcx) = match mtrans.source { + ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx), + ModuleSource::Preexisting(_) => { + bug!("optimize_and_codegen: called with ModuleSource::Preexisting") + } + }; + let tm = config.tm; let fv = HandlerFreeVars { cgcx: cgcx, + diag_handler: diag_handler, }; let fv = &fv as *const HandlerFreeVars as *mut c_void; llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, fv); llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, fv); - let module_name = Some(&mtrans.name[..]); + let module_name = mtrans.name.clone(); + let module_name = Some(&module_name[..]); if config.emit_no_opt_bc { let out = output_names.temp_path_ext("no-opt.bc", module_name); @@ -406,7 +463,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llvm::PassKind::Function => fpm, llvm::PassKind::Module => mpm, llvm::PassKind::Other => { - cgcx.handler.err("Encountered LLVM pass kind we can't handle"); + diag_handler.err("Encountered LLVM pass kind we can't handle"); return true }, }; @@ -426,25 +483,25 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, for pass in &config.passes { if !addpass(pass) { - cgcx.handler.warn(&format!("unknown pass `{}`, ignoring", + diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass)); } } for pass in &cgcx.plugin_passes { if !addpass(pass) { - cgcx.handler.err(&format!("a plugin asked for LLVM pass \ + diag_handler.err(&format!("a plugin asked for LLVM pass \ `{}` but LLVM does not \ recognize it", pass)); } } - cgcx.handler.abort_if_errors(); + diag_handler.abort_if_errors(); // Finally, run the actual optimization passes - time(config.time_passes, &format!("llvm function passes [{}]", cgcx.worker), || + time(config.time_passes, &format!("llvm function passes [{}]", module_name.unwrap()), || llvm::LLVMRustRunFunctionPassManager(fpm, llmod)); - time(config.time_passes, &format!("llvm module passes [{}]", cgcx.worker), || + time(config.time_passes, &format!("llvm module passes [{}]", module_name.unwrap()), || llvm::LLVMRunPassManager(mpm, llmod)); // Deallocate managers that we're now done with @@ -456,6 +513,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, let temp_no_opt_bc_filename = output_names.temp_path_ext("no-opt.lto.bc", module_name); lto::run(cgcx, + diag_handler, llmod, tm, &config, @@ -506,7 +564,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llvm::LLVMWriteBitcodeToFile(llmod, bc_out_c.as_ptr()); } - time(config.time_passes, &format!("codegen passes [{}]", cgcx.worker), + time(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()), || -> Result<(), FatalError> { if config.emit_ir { let out = output_names.temp_path(OutputType::LlvmAssembly, module_name); @@ -561,7 +619,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llmod }; with_codegen(tm, llmod, config.no_builtins, |cpm| { - write_output_file(cgcx.handler, tm, cpm, llmod, &path, + write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile) })?; if config.emit_obj { @@ -571,7 +629,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, if write_obj { with_codegen(tm, llmod, config.no_builtins, |cpm| { - write_output_file(cgcx.handler, tm, cpm, llmod, &obj_out, + write_output_file(diag_handler, tm, cpm, llmod, &obj_out, llvm::FileType::ObjectFile) })?; } @@ -582,67 +640,53 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, if copy_bc_to_obj { debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); if let Err(e) = link_or_copy(&bc_out, &obj_out) { - cgcx.handler.err(&format!("failed to copy bitcode to object file: {}", e)); + diag_handler.err(&format!("failed to copy bitcode to object file: {}", e)); } } if rm_bc { debug!("removing_bitcode {:?}", bc_out); if let Err(e) = fs::remove_file(&bc_out) { - cgcx.handler.err(&format!("failed to remove bitcode: {}", e)); + diag_handler.err(&format!("failed to remove bitcode: {}", e)); } } - llvm::LLVMRustDisposeTargetMachine(tm); - Ok(()) + Ok(mtrans.into_compiled_module(config.emit_obj, config.emit_bc)) } - -pub fn cleanup_llvm(trans: &CrateTranslation) { - for module in trans.modules.iter() { - unsafe { - match module.source { - ModuleSource::Translated(llvm) => { - llvm::LLVMDisposeModule(llvm.llmod); - llvm::LLVMContextDispose(llvm.llcx); - } - ModuleSource::Preexisting(_) => { - } - } - } - } +pub struct CompiledModules { + pub modules: Vec<CompiledModule>, + pub metadata_module: CompiledModule, + pub allocator_module: Option<CompiledModule>, } -pub fn run_passes(sess: &Session, - trans: &CrateTranslation, - output_types: &OutputTypes, - crate_output: &OutputFilenames) { - // It's possible that we have `codegen_units > 1` but only one item in - // `trans.modules`. We could theoretically proceed and do LTO in that - // case, but it would be confusing to have the validity of - // `-Z lto -C codegen-units=2` depend on details of the crate being - // compiled, so we complain regardless. - if sess.lto() && sess.opts.cg.codegen_units > 1 { - // This case is impossible to handle because LTO expects to be able - // to combine the entire crate and all its dependencies into a - // single compilation unit, but each codegen unit is in a separate - // LLVM context, so they can't easily be combined. - sess.fatal("can't perform LTO when using multiple codegen units"); - } - - // Sanity check - assert!(trans.modules.len() == sess.opts.cg.codegen_units || - sess.opts.debugging_opts.incremental.is_some() || - !sess.opts.output_types.should_trans() || - sess.opts.debugging_opts.no_trans); +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateTypeRlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} - let tm = create_target_machine(sess); +pub fn start_async_translation(sess: &Session, + crate_output: &OutputFilenames, + time_graph: Option<TimeGraph>, + crate_name: Symbol, + link: LinkMeta, + metadata: EncodedMetadata, + exported_symbols: Arc<ExportedSymbols>, + no_builtins: bool, + windows_subsystem: Option<String>, + linker_info: LinkerInfo, + no_integrated_as: bool) + -> OngoingCrateTranslation { + let output_types_override = if no_integrated_as { + OutputTypes::new(&[(OutputType::Assembly, None)]) + } else { + sess.opts.output_types.clone() + }; // Figure out what we actually need to build. - - let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(tm, vec![]); - let mut allocator_config = ModuleConfig::new(tm, vec![]); + let mut modules_config = ModuleConfig::new(sess, sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(sess, vec![]); + let mut allocator_config = ModuleConfig::new(sess, vec![]); if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { match *sanitizer { @@ -679,16 +723,11 @@ pub fn run_passes(sess: &Session, // Emit bitcode files for the crate if we're emitting an rlib. // Whenever an rlib is created, the bitcode is inserted into the // archive in order to allow LTO against it. - let needs_crate_bitcode = - sess.crate_types.borrow().contains(&config::CrateTypeRlib) && - sess.opts.output_types.contains_key(&OutputType::Exe); - let needs_crate_object = - sess.opts.output_types.contains_key(&OutputType::Exe); - if needs_crate_bitcode { + if need_crate_bitcode_for_rlib(sess) { modules_config.emit_bc = true; } - for output_type in output_types.keys() { + for output_type in output_types_override.keys() { match *output_type { OutputType::Bitcode => { modules_config.emit_bc = true; } OutputType::LlvmAssembly => { modules_config.emit_ir = true; } @@ -714,76 +753,86 @@ pub fn run_passes(sess: &Session, } } - modules_config.set_flags(sess, trans); - metadata_config.set_flags(sess, trans); - allocator_config.set_flags(sess, trans); - + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); - // Populate a buffer with a list of codegen threads. Items are processed in - // LIFO order, just because it's a tiny bit simpler that way. (The order - // doesn't actually matter.) - let mut work_items = Vec::with_capacity(1 + trans.modules.len()); - - { - let work = build_work_item(sess, - trans.metadata_module.clone(), - metadata_config.clone(), - crate_output.clone()); - work_items.push(work); - } - - if let Some(allocator) = trans.allocator_module.clone() { - let work = build_work_item(sess, - allocator, - allocator_config.clone(), - crate_output.clone()); - work_items.push(work); - } - - for mtrans in trans.modules.iter() { - let work = build_work_item(sess, - mtrans.clone(), - modules_config.clone(), - crate_output.clone()); - work_items.push(work); - } - - if sess.opts.debugging_opts.incremental_info { - dump_incremental_data(&trans); - } + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; let client = sess.jobserver_from_env.clone().unwrap_or_else(|| { // Pick a "reasonable maximum" if we don't otherwise have a jobserver in // our environment, capping out at 32 so we don't take everything down // by hogging the process run queue. - let num_workers = cmp::min(work_items.len() - 1, 32); - Client::new(num_workers).expect("failed to create jobserver") - }); - scope(|scope| { - execute_work(sess, work_items, client, &trans.exported_symbols, scope); + Client::new(32).expect("failed to create jobserver") }); - // If in incr. comp. mode, preserve the `.o` files for potential re-use - for mtrans in trans.modules.iter() { + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (trans_worker_send, trans_worker_receive) = channel(); + let (coordinator_send, coordinator_receive) = channel(); + + let coordinator_thread = start_executing_work(sess, + shared_emitter, + trans_worker_send, + coordinator_send.clone(), + coordinator_receive, + client, + time_graph.clone(), + exported_symbols.clone()); + OngoingCrateTranslation { + crate_name, + link, + metadata, + exported_symbols, + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as, + + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + + time_graph, + output_filenames: crate_output.clone(), + coordinator_send, + trans_worker_receive, + shared_emitter_main, + future: coordinator_thread + } +} + +fn copy_module_artifacts_into_incr_comp_cache(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + if sess.opts.incremental.is_none() { + return; + } + + for module in compiled_modules.modules.iter() { let mut files = vec![]; - if modules_config.emit_obj { - let path = crate_output.temp_path(OutputType::Object, Some(&mtrans.name)); + if module.emit_obj { + let path = crate_output.temp_path(OutputType::Object, Some(&module.name)); files.push((OutputType::Object, path)); } - if modules_config.emit_bc { - let path = crate_output.temp_path(OutputType::Bitcode, Some(&mtrans.name)); + if module.emit_bc { + let path = crate_output.temp_path(OutputType::Bitcode, Some(&module.name)); files.push((OutputType::Bitcode, path)); } - save_trans_partition(sess, &mtrans.name, mtrans.symbol_name_hash, &files); + save_trans_partition(sess, &module.name, module.symbol_name_hash, &files); } +} - // All codegen is finished. - unsafe { - llvm::LLVMRustDisposeTargetMachine(tm); - } +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + let mut user_wants_bitcode = false; + let mut user_wants_objects = false; // Produce final compile outputs. let copy_gracefully = |from: &Path, to: &Path| { @@ -794,10 +843,10 @@ pub fn run_passes(sess: &Session, let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| { - if trans.modules.len() == 1 { + if compiled_modules.modules.len() == 1 { // 1) Only one codegen unit. In this case it's no difficulty // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&trans.modules[0].name[..]); + let module_name = Some(&compiled_modules.modules[0].name[..]); let path = crate_output.temp_path(output_type, module_name); copy_gracefully(&path, &crate_output.path(output_type)); @@ -834,9 +883,7 @@ pub fn run_passes(sess: &Session, // Flag to indicate whether the user explicitly requested bitcode. // Otherwise, we produced it only as a temporary output, and will need // to get rid of it. - let mut user_wants_bitcode = false; - let mut user_wants_objects = false; - for output_type in output_types.keys() { + for output_type in crate_output.outputs.keys() { match *output_type { OutputType::Bitcode => { user_wants_bitcode = true; @@ -861,7 +908,6 @@ pub fn run_passes(sess: &Session, OutputType::DepInfo => {} } } - let user_wants_bitcode = user_wants_bitcode; // Clean up unwanted temporary files. @@ -893,33 +939,39 @@ pub fn run_passes(sess: &Session, // If you change how this works, also update back::link::link_rlib, // where .#module-name#.bc files are (maybe) deleted after making an // rlib. + let needs_crate_bitcode = need_crate_bitcode_for_rlib(sess); + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + let keep_numbered_bitcode = needs_crate_bitcode || (user_wants_bitcode && sess.opts.cg.codegen_units > 1); let keep_numbered_objects = needs_crate_object || (user_wants_objects && sess.opts.cg.codegen_units > 1); - for module_name in trans.modules.iter().map(|m| Some(&m.name[..])) { - if modules_config.emit_obj && !keep_numbered_objects { + for module in compiled_modules.modules.iter() { + let module_name = Some(&module.name[..]); + + if module.emit_obj && !keep_numbered_objects { let path = crate_output.temp_path(OutputType::Object, module_name); remove(sess, &path); } - if modules_config.emit_bc && !keep_numbered_bitcode { + if module.emit_bc && !keep_numbered_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, module_name); remove(sess, &path); } } - if metadata_config.emit_bc && !user_wants_bitcode { + if compiled_modules.metadata_module.emit_bc && !user_wants_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, - Some(&trans.metadata_module.name)); + Some(&compiled_modules.metadata_module.name)); remove(sess, &path); } - if allocator_config.emit_bc && !user_wants_bitcode { - if let Some(ref module) = trans.allocator_module { + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if allocator_module.emit_bc && !user_wants_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, - Some(&module.name)); + Some(&allocator_module.name)); remove(sess, &path); } } @@ -930,20 +982,13 @@ pub fn run_passes(sess: &Session, // - #crate#.crate.metadata.o // - #crate#.bc // These are used in linking steps and will be cleaned up afterward. - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } } -fn dump_incremental_data(trans: &CrateTranslation) { +pub fn dump_incremental_data(trans: &CrateTranslation) { let mut reuse = 0; for mtrans in trans.modules.iter() { - match mtrans.source { - ModuleSource::Preexisting(..) => reuse += 1, - ModuleSource::Translated(..) => (), + if mtrans.pre_existing { + reuse += 1; } } eprintln!("incremental: re-using {} out of {} modules", reuse, trans.modules.len()); @@ -955,14 +1000,17 @@ struct WorkItem { output_names: OutputFilenames } -fn build_work_item(sess: &Session, - mtrans: ModuleTranslation, +impl fmt::Debug for WorkItem { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "WorkItem({})", self.mtrans.name) + } +} + +fn build_work_item(mtrans: ModuleTranslation, config: ModuleConfig, output_names: OutputFilenames) -> WorkItem { - let mut config = config; - config.tm = create_target_machine(sess); WorkItem { mtrans: mtrans, config: config, @@ -971,70 +1019,98 @@ fn build_work_item(sess: &Session, } fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) - -> Result<(), FatalError> + -> Result<CompiledModule, FatalError> { - unsafe { - match work_item.mtrans.source { - ModuleSource::Translated(mllvm) => { - debug!("llvm-optimizing {:?}", work_item.mtrans.name); - optimize_and_codegen(cgcx, - work_item.mtrans, - mllvm, - work_item.config, - work_item.output_names)?; - } - ModuleSource::Preexisting(wp) => { - let incr_comp_session_dir = cgcx.incr_comp_session_dir - .as_ref() - .unwrap(); - let name = &work_item.mtrans.name; - for (kind, saved_file) in wp.saved_files { - let obj_out = work_item.output_names.temp_path(kind, Some(name)); - let source_file = in_incr_comp_dir(&incr_comp_session_dir, - &saved_file); - debug!("copying pre-existing module `{}` from {:?} to {}", - work_item.mtrans.name, - source_file, - obj_out.display()); - match link_or_copy(&source_file, &obj_out) { - Ok(_) => { } - Err(err) => { - cgcx.handler.err(&format!("unable to copy {} to {}: {}", - source_file.display(), - obj_out.display(), - err)); - } - } + let diag_handler = cgcx.create_diag_handler(); + let module_name = work_item.mtrans.name.clone(); + + let pre_existing = match work_item.mtrans.source { + ModuleSource::Translated(_) => None, + ModuleSource::Preexisting(ref wp) => Some(wp.clone()), + }; + + if let Some(wp) = pre_existing { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let name = &work_item.mtrans.name; + for (kind, saved_file) in wp.saved_files { + let obj_out = work_item.output_names.temp_path(kind, Some(name)); + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + work_item.mtrans.name, + source_file, + obj_out.display()); + match link_or_copy(&source_file, &obj_out) { + Ok(_) => { } + Err(err) => { + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); } } } - } - Ok(()) + Ok(CompiledModule { + name: module_name, + kind: ModuleKind::Regular, + pre_existing: true, + symbol_name_hash: work_item.mtrans.symbol_name_hash, + emit_bc: work_item.config.emit_bc, + emit_obj: work_item.config.emit_obj, + }) + } else { + debug!("llvm-optimizing {:?}", module_name); + + unsafe { + optimize_and_codegen(cgcx, + &diag_handler, + work_item.mtrans, + work_item.config, + work_item.output_names) + } + } } -pub enum Message { +#[derive(Debug)] +enum Message { Token(io::Result<Acquired>), - Diagnostic(Diagnostic), - Done { success: bool }, - InlineAsmError(u32, String), - AbortIfErrors, + Done { + result: Result<CompiledModule, ()>, + worker_id: usize, + }, + TranslationDone { + llvm_work_item: WorkItem, + cost: u64, + is_last: bool, + }, + TranslateItem, } -pub struct Diagnostic { +struct Diagnostic { msg: String, code: Option<String>, lvl: Level, } -fn execute_work<'a>(sess: &'a Session, - mut work_items: Vec<WorkItem>, - jobserver: Client, - exported_symbols: &'a ExportedSymbols, - scope: &Scope<'a>) { - let (tx, rx) = channel(); - let tx2 = tx.clone(); +#[derive(PartialEq, Clone, Copy, Debug)] +enum MainThreadWorkerState { + Idle, + Translating, + LLVMing, +} +fn start_executing_work(sess: &Session, + shared_emitter: SharedEmitter, + trans_worker_send: Sender<Message>, + coordinator_send: Sender<Message>, + coordinator_receive: Receiver<Message>, + jobserver: Client, + time_graph: Option<TimeGraph>, + exported_symbols: Arc<ExportedSymbols>) + -> thread::JoinHandle<CompiledModules> { // First up, convert our jobserver into a helper thread so we can use normal // mpsc channels to manage our messages and such. Once we've got the helper // thread then request `n-1` tokens because all of our work items are ready @@ -1045,27 +1121,144 @@ fn execute_work<'a>(sess: &'a Session, // // After we've requested all these tokens then we'll, when we can, get // tokens on `rx` above which will get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); let helper = jobserver.into_helper_thread(move |token| { - drop(tx2.send(Message::Token(token))); + drop(coordinator_send2.send(Message::Token(token))); }).expect("failed to spawn helper thread"); - for _ in 0..work_items.len() - 1 { - helper.request_token(); - } + + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, &mut |cnum, path| { + if link::ignored_for_lto(sess, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let cgcx = CodegenContext { + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto: each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + opts: Arc::new(sess.opts.clone()), + time_passes: sess.time_passes(), + exported_symbols: exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + coordinator_send: coordinator_send, + diag_emitter: shared_emitter.clone(), + time_graph, + }; // This is the "main loop" of parallel work happening for parallel codegen. // It's here that we manage parallelism, schedule work, and work with // messages coming from clients. // - // Our channel `rx` created above is a channel of messages coming from our - // various worker threads. This includes the jobserver helper thread above - // as well as the work we'll spawn off here. Each turn of this loop starts - // off by trying to spawn as much work as possible. After we've done that we - // then wait for an event and dispatch accordingly once the event is - // received. We're only done once all our work items have been drained and - // nothing is running, at which point we return back up the stack. + // There are a few environmental pre-conditions that shape how the system + // is set up: + // + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Translation can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. + // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing translation work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to translate CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will translate one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since translation usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. // - // ## Parallelism management + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- // It's worth also touching on the management of parallelism here. We don't // want to just spawn a thread per work item because while that's optimal // parallelism it may overload a system with too many threads or violate our @@ -1078,193 +1271,302 @@ fn execute_work<'a>(sess: &'a Session, // and whenever we're done with that work we release the semaphore. In this // manner we can ensure that the maximum number of parallel workers is // capped at any one point in time. - // - // The jobserver protocol is a little unique, however. We, as a running - // process, already have an ephemeral token assigned to us. We're not going - // to be doing any productive work in this thread though so we're going to - // give this token to a worker thread (there's no actual token to give, this - // is just conceptually). As a result you'll see a few `+1` and `-1` - // instances below, and it's about working with this ephemeral token. - // - // To acquire tokens we have our `helper` thread above which is just in a - // loop acquiring tokens and sending them to us. We then store all tokens - // locally in a `tokens` vector once they're acquired. Currently we don't - // literally send a token to a worker thread to assist with management of - // our "ephemeral token". - // - // As a result, our "spawn as much work as possible" basically means that we - // fill up the `running` counter up to the limit of the `tokens` list. - // Whenever we get a new token this'll mean a new unit of work is spawned, - // and then whenever a unit of work finishes we relinquish a token, if we - // had one, to maybe get re-acquired later. - // - // Note that there's a race which may mean that we acquire more tokens than - // we originally anticipated. For example let's say we have 2 units of work. - // First we request one token from the helper thread and then we - // immediately spawn one unit of work with our ephemeral token after. We may - // then finish the first piece of work before the token is acquired, but we - // can continue to spawn the second piece of work with our ephemeral token. - // Before that work finishes, however, we may acquire a token. In that case - // we actually wastefully acquired the token, so we relinquish it back to - // the jobserver. - let mut tokens = Vec::new(); - let mut running = 0; - while work_items.len() > 0 || running > 0 { - - // Spin up what work we can, only doing this while we've got available - // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() + 1 { - let item = work_items.pop().unwrap(); - let index = work_items.len(); - spawn_work(sess, exported_symbols, scope, tx.clone(), item, index); - running += 1; - } - - // Relinquish accidentally acquired extra tokens - tokens.truncate(running.saturating_sub(1)); - - match rx.recv().unwrap() { - // Save the token locally and the next turn of the loop will use - // this to spawn a new unit of work, or it may get dropped - // immediately if we have no more work to spawn. - Message::Token(token) => { - tokens.push(token.expect("failed to acquire jobserver token")); + return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + + let max_workers = ::num_cpus::get(); + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id } + }; - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" - // - // Note that if the thread failed that means it panicked, so we - // abort immediately. - Message::Done { success: true } => { - drop(tokens.pop()); - running -= 1; + // This is where we collect codegen units that have gone all the way + // through translation and LLVM. + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + + // This flag tracks whether all items have gone through translations + let mut translation_done = false; + + // This is the queue of LLVM work items that still need processing. + let mut work_items = Vec::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. + let mut tokens = Vec::new(); + + let mut main_thread_worker_state = MainThreadWorkerState::Idle; + let mut running = 0; + + let mut llvm_start_time = None; + + // Run the message loop while there's still anything that needs message + // processing: + while !translation_done || + work_items.len() > 0 || + running > 0 || + main_thread_worker_state != MainThreadWorkerState::Idle { + + // While there are still CGUs to be translated, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For translating more CGU or for running them through LLVM. + if !translation_done { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, translate more items: + if let Err(_) = trans_worker_send.send(Message::TranslateItem) { + panic!("Could not send Message::TranslateItem to main thread") + } + main_thread_worker_state = MainThreadWorkerState::Translating; + } else { + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(&item, &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else { + // In this branch, we know that everything has been translated, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(&item, &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + MainThreadWorkerState::Translating => { + bug!("trans worker should not be translating after \ + translation was already completed") + } + MainThreadWorkerState::LLVMing => { + // Already making good use of that token + } + } } - Message::Done { success: false } => { - sess.fatal("aborting due to worker thread panic"); + + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while work_items.len() > 0 && running < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + + maybe_start_llvm_timer(&item, &mut llvm_start_time); + + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + + spawn_work(cgcx, item); + running += 1; } - // Our worker wants us to emit an error message, so get ahold of our - // `sess` and print it out - Message::Diagnostic(diag) => { - let handler = sess.diagnostic(); - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - &code, - diag.lvl); + // Relinquish accidentally acquired extra tokens + tokens.truncate(running); + + match coordinator_receive.recv().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + match token { + Ok(token) => { + tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to translation demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!("{}", msg) + } } - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); + } + + Message::TranslationDone { llvm_work_item, cost, is_last } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); + + if is_last { + // If this is the last, don't request a token because + // the trans worker thread will be free to handle this + // immediately. + translation_done = true; + } else { + helper.request_token(); } + + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Translating); + main_thread_worker_state = MainThreadWorkerState::Idle; } - } - Message::InlineAsmError(cookie, msg) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, &msg), - None => sess.err(&msg), + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { result: Ok(compiled_module), worker_id } => { + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } + } + Message::Done { result: Err(()), worker_id: _ } => { + shared_emitter.fatal("aborting due to worker thread panic"); + // Exit the coordinator thread + panic!("aborting due to worker thread panic") + } + Message::TranslateItem => { + bug!("the coordinator should not receive translation requests") } } + } - // Sent to us after a worker sends us a batch of error messages, and - // it's the point at which we check for errors. - Message::AbortIfErrors => sess.diagnostic().abort_if_errors(), + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); } - } - // Just in case, check this on the way out. - sess.diagnostic().abort_if_errors(); -} + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); -struct SharedEmitter { - tx: Sender<Message>, -} + CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module, + allocator_module: compiled_allocator_module, + } + }); -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - drop(self.tx.send(Message::Diagnostic(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }))); - for child in &db.children { - drop(self.tx.send(Message::Diagnostic(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }))); + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of translation + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue > 0 && + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } + + fn maybe_start_llvm_timer(work_item: &WorkItem, + llvm_start_time: &mut Option<Instant>) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if work_item.config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } } - drop(self.tx.send(Message::AbortIfErrors)); } } -fn spawn_work<'a>(sess: &'a Session, - exported_symbols: &'a ExportedSymbols, - scope: &Scope<'a>, - tx: Sender<Message>, - work: WorkItem, - idx: usize) { - let plugin_passes = sess.plugin_llvm_passes.borrow().clone(); - let remark = sess.opts.cg.remark.clone(); - let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); +pub const TRANS_WORKER_ID: usize = ::std::usize::MAX; +pub const TRANS_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(TRANS_WORKER_ID); +pub const TRANS_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + +fn spawn_work(cgcx: CodegenContext, work: WorkItem) { let depth = time_depth(); - let lto = sess.lto(); - let crate_types = sess.crate_types.borrow().clone(); - let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, &mut |cnum, path| { - if link::ignored_for_lto(sess, cnum) { - return - } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); - })); - let time_passes = sess.time_passes(); - let no_landing_pads = sess.no_landing_pads(); - let opts = &sess.opts; - scope.spawn(move || { + thread::spawn(move || { set_time_depth(depth); // Set up a destructor which will fire off a message that we're done as // we exit. struct Bomb { - tx: Sender<Message>, - success: bool, + coordinator_send: Sender<Message>, + result: Option<CompiledModule>, + worker_id: usize, } impl Drop for Bomb { fn drop(&mut self) { - drop(self.tx.send(Message::Done { success: self.success })); + let result = match self.result.take() { + Some(compiled_module) => Ok(compiled_module), + None => Err(()) + }; + + drop(self.coordinator_send.send(Message::Done { + result, + worker_id: self.worker_id, + })); } } - let mut bomb = Bomb { - tx: tx.clone(), - success: false, - }; - // Set up our non-`Send` `CodegenContext` now that we're in a helper - // thread and have all our info available to us. - let emitter = SharedEmitter { tx: tx.clone() }; - let diag_handler = Handler::with_emitter(true, false, Box::new(emitter)); - - let cgcx = CodegenContext { - crate_types: crate_types, - each_linked_rlib_for_lto: each_linked_rlib_for_lto, - lto: lto, - no_landing_pads: no_landing_pads, - opts: opts, - time_passes: time_passes, - exported_symbols: exported_symbols, - handler: &diag_handler, - plugin_passes: plugin_passes, - remark: remark, - worker: idx, - incr_comp_session_dir: incr_comp_session_dir, - tx: tx.clone(), + let mut bomb = Bomb { + coordinator_send: cgcx.coordinator_send.clone(), + result: None, + worker_id: cgcx.worker, }; // Execute the work itself, and if it finishes successfully then flag @@ -1280,8 +1582,13 @@ fn spawn_work<'a>(sess: &'a Session, // we just ignore the result and then send off our message saying that // we're done, which if `execute_work_item` failed is unlikely to be // seen by the main thread, but hey we might as well try anyway. - drop(execute_work_item(&cgcx, work).is_err()); - bomb.success = true; + bomb.result = { + let _timing_guard = cgcx.time_graph + .as_ref() + .map(|tg| tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND)); + Some(execute_work_item(&cgcx, work).unwrap()) + }; }); } @@ -1375,3 +1682,249 @@ pub unsafe fn with_llvm_pmb(llmod: ModuleRef, f(builder); llvm::LLVMPassManagerBuilderDispose(builder); } + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender<SharedEmitterMessage>, +} + +pub struct SharedEmitterMain { + receiver: Receiver<SharedEmitterMessage>, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session, blocking: bool) { + loop { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + &code, + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} + +pub struct OngoingCrateTranslation { + crate_name: Symbol, + link: LinkMeta, + metadata: EncodedMetadata, + exported_symbols: Arc<ExportedSymbols>, + no_builtins: bool, + windows_subsystem: Option<String>, + linker_info: LinkerInfo, + no_integrated_as: bool, + + output_filenames: OutputFilenames, + regular_module_config: ModuleConfig, + metadata_module_config: ModuleConfig, + allocator_module_config: ModuleConfig, + + time_graph: Option<TimeGraph>, + coordinator_send: Sender<Message>, + trans_worker_receive: Receiver<Message>, + shared_emitter_main: SharedEmitterMain, + future: thread::JoinHandle<CompiledModules>, +} + +impl OngoingCrateTranslation { + pub fn join(self, sess: &Session) -> CrateTranslation { + self.shared_emitter_main.check(sess, true); + let compiled_modules = match self.future.join() { + Ok(compiled_modules) => compiled_modules, + Err(_) => { + sess.fatal("Error during translation/LLVM phase."); + } + }; + + sess.abort_if_errors(); + + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + + copy_module_artifacts_into_incr_comp_cache(sess, + &compiled_modules, + &self.output_filenames); + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + + let trans = CrateTranslation { + crate_name: self.crate_name, + link: self.link, + metadata: self.metadata, + exported_symbols: self.exported_symbols, + no_builtins: self.no_builtins, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + + modules: compiled_modules.modules, + metadata_module: compiled_modules.metadata_module, + allocator_module: compiled_modules.allocator_module, + }; + + if self.no_integrated_as { + run_assembler(sess, &self.output_filenames); + + // HACK the linker expects the object file to be named foo.0.o but + // `run_assembler` produces an object named just foo.o. Rename it if we + // are going to build an executable + if sess.opts.output_types.contains_key(&OutputType::Exe) { + let f = self.output_filenames.path(OutputType::Object); + rename_or_copy_remove(&f, + f.with_file_name(format!("{}.0.o", + f.file_stem().unwrap().to_string_lossy()))).unwrap(); + } + + // Remove assembly source, unless --save-temps was specified + if !sess.opts.cg.save_temps { + fs::remove_file(&self.output_filenames + .temp_path(OutputType::Assembly, None)).unwrap(); + } + } + + trans + } + + pub fn submit_translated_module_to_llvm(&self, + sess: &Session, + mtrans: ModuleTranslation, + cost: u64, + is_last: bool) { + let module_config = match mtrans.kind { + ModuleKind::Regular => self.regular_module_config.clone(sess), + ModuleKind::Metadata => self.metadata_module_config.clone(sess), + ModuleKind::Allocator => self.allocator_module_config.clone(sess), + }; + + let llvm_work_item = build_work_item(mtrans, + module_config, + self.output_filenames.clone()); + + drop(self.coordinator_send.send(Message::TranslationDone { + llvm_work_item, + cost, + is_last + })); + } + + pub fn submit_pre_translated_module_to_llvm(&self, + sess: &Session, + mtrans: ModuleTranslation, + is_last: bool) { + self.wait_for_signal_to_translate_item(); + self.check_for_errors(sess); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + self.submit_translated_module_to_llvm(sess, mtrans, cost, is_last); + } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } + + pub fn wait_for_signal_to_translate_item(&self) { + match self.trans_worker_receive.recv() { + Ok(Message::TranslateItem) => { + // Nothing to do + } + Ok(message) => { + panic!("unexpected message: {:?}", message) + } + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } +} diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 7b836399f9c..e8032529b1f 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -23,29 +23,30 @@ //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, //! int) and rec(x=int, y=int, z=int) will have the same TypeRef. -use super::CrateTranslation; use super::ModuleLlvm; use super::ModuleSource; use super::ModuleTranslation; +use super::ModuleKind; use assert_module_sources; use back::link; use back::linker::LinkerInfo; use back::symbol_export::{self, ExportedSymbols}; +use back::write::{self, OngoingCrateTranslation}; use llvm::{ContextRef, Linkage, ModuleRef, ValueRef, Vector, get_param}; use llvm; use metadata; use rustc::hir::def_id::LOCAL_CRATE; use rustc::middle::lang_items::StartFnLangItem; -use rustc::middle::cstore::EncodedMetadata; +use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::dep_graph::AssertDepGraphSafe; use rustc::middle::cstore::LinkMeta; use rustc::hir::map as hir_map; -use rustc::util::common::time; -use rustc::session::config::{self, NoDebugInfo, OutputFilenames}; +use rustc::util::common::{time, print_time_passes_entry}; +use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType}; use rustc::session::Session; -use rustc_incremental::IncrementalHashesMap; +use rustc_incremental::{self, IncrementalHashesMap}; use abi; use allocator; use mir::lvalue::LvalueRef; @@ -68,6 +69,7 @@ use mir; use monomorphize::{self, Instance}; use partitioning::{self, PartitioningStrategy, CodegenUnit}; use symbol_names_test; +use time_graph; use trans_item::{TransItem, DefPathBasedNames}; use type_::Type; use type_of; @@ -78,6 +80,7 @@ use libc::c_uint; use std::ffi::{CStr, CString}; use std::str; use std::sync::Arc; +use std::time::{Instant, Duration}; use std::i32; use syntax_pos::Span; use syntax::attr; @@ -647,9 +650,23 @@ pub fn set_link_section(ccx: &CrateContext, } } +// check for the #[rustc_error] annotation, which forces an +// error in trans. This is used to write compile-fail tests +// that actually test that compilation succeeds without +// reporting an error. +fn check_for_rustc_errors_attr(tcx: TyCtxt) { + if let Some((id, span)) = *tcx.sess.entry_fn.borrow() { + let main_def_id = tcx.hir.local_def_id(id); + + if tcx.has_attr(main_def_id, "rustc_error") { + tcx.sess.span_fatal(span, "compilation successful"); + } + } +} + /// Create the `main` function which will initialise the rust runtime and call /// users main function. -pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { +fn maybe_create_entry_wrapper(ccx: &CrateContext) { let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() { Some((id, span)) => { (ccx.tcx().hir.local_def_id(id), span) @@ -657,14 +674,6 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { None => return, }; - // check for the #[rustc_error] annotation, which forces an - // error in trans. This is used to write compile-fail tests - // that actually test that compilation succeeds without - // reporting an error. - if ccx.tcx().has_attr(main_def_id, "rustc_error") { - ccx.tcx().sess.span_fatal(span, "compilation successful"); - } - let instance = Instance::mono(ccx.tcx(), main_def_id); if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) { @@ -728,7 +737,8 @@ fn contains_null(s: &str) -> bool { fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, link_meta: &LinkMeta, exported_symbols: &NodeSet) - -> (ContextRef, ModuleRef, EncodedMetadata) { + -> (ContextRef, ModuleRef, + EncodedMetadata, EncodedMetadataHashes) { use std::io::Write; use flate2::Compression; use flate2::write::DeflateEncoder; @@ -758,15 +768,18 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, }).max().unwrap(); if kind == MetadataKind::None { - return (metadata_llcx, metadata_llmod, EncodedMetadata::new()); + return (metadata_llcx, + metadata_llmod, + EncodedMetadata::new(), + EncodedMetadataHashes::new()); } let cstore = &tcx.sess.cstore; - let metadata = cstore.encode_metadata(tcx, - &link_meta, - exported_symbols); + let (metadata, hashes) = cstore.encode_metadata(tcx, + &link_meta, + exported_symbols); if kind == MetadataKind::Uncompressed { - return (metadata_llcx, metadata_llmod, metadata); + return (metadata_llcx, metadata_llmod, metadata, hashes); } assert!(kind == MetadataKind::Compressed); @@ -794,7 +807,7 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, let directive = CString::new(directive).unwrap(); llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr()) } - return (metadata_llcx, metadata_llmod, metadata); + return (metadata_llcx, metadata_llmod, metadata, hashes); } // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`. @@ -803,7 +816,7 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, // code references on its own. // See #26591, #27438 fn create_imps(sess: &Session, - llvm_modules: &[ModuleLlvm]) { + llvm_module: &ModuleLlvm) { // The x86 ABI seems to require that leading underscores are added to symbol // names, so we need an extra underscore on 32-bit. There's also a leading // '\x01' here which disables LLVM's symbol mangling (e.g. no extra @@ -814,28 +827,25 @@ fn create_imps(sess: &Session, "\x01__imp_" }; unsafe { - for ll in llvm_modules { - let exported: Vec<_> = iter_globals(ll.llmod) - .filter(|&val| { - llvm::LLVMRustGetLinkage(val) == - llvm::Linkage::ExternalLinkage && - llvm::LLVMIsDeclaration(val) == 0 - }) - .collect(); - - let i8p_ty = Type::i8p_llcx(ll.llcx); - for val in exported { - let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); - let mut imp_name = prefix.as_bytes().to_vec(); - imp_name.extend(name.to_bytes()); - let imp_name = CString::new(imp_name).unwrap(); - let imp = llvm::LLVMAddGlobal(ll.llmod, - i8p_ty.to_ref(), - imp_name.as_ptr() as *const _); - let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); - llvm::LLVMSetInitializer(imp, init); - llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); - } + let exported: Vec<_> = iter_globals(llvm_module.llmod) + .filter(|&val| { + llvm::LLVMRustGetLinkage(val) == + llvm::Linkage::ExternalLinkage && + llvm::LLVMIsDeclaration(val) == 0 + }) + .collect(); + + let i8p_ty = Type::i8p_llcx(llvm_module.llcx); + for val in exported { + let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); + let mut imp_name = prefix.as_bytes().to_vec(); + imp_name.extend(name.to_bytes()); + let imp_name = CString::new(imp_name).unwrap(); + let imp = llvm::LLVMAddGlobal(llvm_module.llmod, + i8p_ty.to_ref(), + imp_name.as_ptr() as *const _); + llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); + llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } } @@ -920,27 +930,26 @@ pub fn find_exported_symbols(tcx: TyCtxt, reachable: &NodeSet) -> NodeSet { pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, - incremental_hashes_map: &IncrementalHashesMap, + incremental_hashes_map: IncrementalHashesMap, output_filenames: &OutputFilenames) - -> CrateTranslation { + -> OngoingCrateTranslation { + check_for_rustc_errors_attr(tcx); + // Be careful with this krate: obviously it gives access to the // entire contents of the krate. So if you push any subtasks of // `TransCrate`, you need to be careful to register "reads" of the // particular items that will be processed. let krate = tcx.hir.krate(); - let ty::CrateAnalysis { reachable, .. } = analysis; - let check_overflow = tcx.sess.overflow_checks(); - - let link_meta = link::build_link_meta(incremental_hashes_map); - + let link_meta = link::build_link_meta(&incremental_hashes_map); let exported_symbol_node_ids = find_exported_symbols(tcx, &reachable); + let shared_ccx = SharedCrateContext::new(tcx, check_overflow, output_filenames); // Translate the metadata. - let (metadata_llcx, metadata_llmod, metadata) = + let (metadata_llcx, metadata_llmod, metadata, metadata_incr_hashes) = time(tcx.sess.time_passes(), "write metadata", || { write_metadata(tcx, &link_meta, &exported_symbol_node_ids) }); @@ -952,27 +961,44 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, llcx: metadata_llcx, llmod: metadata_llmod, }), + kind: ModuleKind::Metadata, }; let no_builtins = attr::contains_name(&krate.attrs, "no_builtins"); + let time_graph = if tcx.sess.opts.debugging_opts.trans_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; // Skip crate items and just output metadata in -Z no-trans mode. if tcx.sess.opts.debugging_opts.no_trans || !tcx.sess.opts.output_types.should_trans() { let empty_exported_symbols = ExportedSymbols::empty(); let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); - return CrateTranslation { - crate_name: tcx.crate_name(LOCAL_CRATE), - modules: vec![], - metadata_module: metadata_module, - allocator_module: None, - link: link_meta, - metadata: metadata, - exported_symbols: empty_exported_symbols, - no_builtins: no_builtins, - linker_info: linker_info, - windows_subsystem: None, - }; + let ongoing_translation = write::start_async_translation( + tcx.sess, + output_filenames, + time_graph.clone(), + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + Arc::new(empty_exported_symbols), + no_builtins, + None, + linker_info, + false); + + ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, metadata_module, true); + + assert_and_save_dep_graph(tcx, + incremental_hashes_map, + metadata_incr_hashes, + link_meta); + + ongoing_translation.check_for_errors(tcx.sess); + + return ongoing_translation; } let exported_symbols = Arc::new(ExportedSymbols::compute(tcx, @@ -983,12 +1009,110 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let (translation_items, codegen_units) = collect_and_partition_translation_items(&shared_ccx, &exported_symbols); + assert!(codegen_units.len() <= 1 || !tcx.sess.lto()); + + let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols); + let subsystem = attr::first_attr_value_str_by_name(&krate.attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + (tcx.sess.target.target.options.no_integrated_as && + (output_filenames.outputs.contains_key(&OutputType::Object) || + output_filenames.outputs.contains_key(&OutputType::Exe))); + + let ongoing_translation = write::start_async_translation( + tcx.sess, + output_filenames, + time_graph.clone(), + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + exported_symbols.clone(), + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as); + + // Translate an allocator shim, if any + // + // If LTO is enabled and we've got some previous LLVM module we translated + // above, then we can just translate directly into that LLVM module. If not, + // however, we need to create a separate module and trans into that. Note + // that the separate translation is critical for the standard library where + // the rlib's object file doesn't have allocator functions but the dylib + // links in an object file that has allocator functions. When we're + // compiling a final LTO artifact, though, there's no need to worry about + // this as we're not working with this dual "rlib/dylib" functionality. + let allocator_module = if tcx.sess.lto() { + None + } else if let Some(kind) = tcx.sess.allocator_kind.get() { + unsafe { + let (llcx, llmod) = + context::create_context_and_module(tcx.sess, "allocator"); + let modules = ModuleLlvm { + llmod: llmod, + llcx: llcx, + }; + time(tcx.sess.time_passes(), "write allocator module", || { + allocator::trans(tcx, &modules, kind) + }); + + Some(ModuleTranslation { + name: link::ALLOCATOR_MODULE_NAME.to_string(), + symbol_name_hash: 0, // we always rebuild allocator shims + source: ModuleSource::Translated(modules), + kind: ModuleKind::Allocator, + }) + } + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, allocator_module, false); + } + + let codegen_unit_count = codegen_units.len(); + ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, + metadata_module, + codegen_unit_count == 0); + let translation_items = Arc::new(translation_items); let mut all_stats = Stats::default(); - let modules: Vec<ModuleTranslation> = codegen_units - .into_iter() - .map(|cgu| { + let mut module_dispositions = tcx.sess.opts.incremental.as_ref().map(|_| Vec::new()); + + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. Note that "size" is defined rather crudely at the + // moment as it is just the number of TransItems in the CGU, not taking into + // account the size of each TransItem. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_key(|cgu| -(cgu.items().len() as isize)); + codegen_units + }; + + let mut total_trans_time = Duration::new(0, 0); + + for (cgu_index, cgu) in codegen_units.into_iter().enumerate() { + ongoing_translation.wait_for_signal_to_translate_item(); + ongoing_translation.check_for_errors(tcx.sess); + + let start_time = Instant::now(); + + let module = { + let _timing_guard = time_graph + .as_ref() + .map(|time_graph| time_graph.start(write::TRANS_WORKER_TIMELINE, + write::TRANS_WORK_PACKAGE_KIND)); let dep_node = cgu.work_product_dep_node(); let ((stats, module), _) = tcx.dep_graph.with_task(dep_node, @@ -998,9 +1122,41 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, exported_symbols.clone())), module_translation); all_stats.extend(stats); + + if let Some(ref mut module_dispositions) = module_dispositions { + module_dispositions.push(module.disposition()); + } + module - }) - .collect(); + }; + + let time_to_translate = Instant::now().duration_since(start_time); + + // We assume that the cost to run LLVM on a CGU is proportional to + // the time we needed for translating it. + let cost = time_to_translate.as_secs() * 1_000_000_000 + + time_to_translate.subsec_nanos() as u64; + + total_trans_time += time_to_translate; + + let is_last_cgu = (cgu_index + 1) == codegen_unit_count; + + ongoing_translation.submit_translated_module_to_llvm(tcx.sess, + module, + cost, + is_last_cgu); + ongoing_translation.check_for_errors(tcx.sess); + } + + // Since the main thread is sometimes blocked during trans, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "translate to LLVM IR", + total_trans_time); + + if let Some(module_dispositions) = module_dispositions { + assert_module_sources::assert_module_sources(tcx, &module_dispositions); + } fn module_translation<'a, 'tcx>( scx: AssertDepGraphSafe<&SharedCrateContext<'a, 'tcx>>, @@ -1015,7 +1171,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let cgu_name = String::from(cgu.name()); let cgu_id = cgu.work_product_id(); - let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &exported_symbols); + let symbol_name_hash = cgu.compute_symbol_name_hash(scx); // Check whether there is a previous work-product we can // re-use. Not only must the file exist, and the inputs not @@ -1044,7 +1200,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let module = ModuleTranslation { name: cgu_name, symbol_name_hash, - source: ModuleSource::Preexisting(buf.clone()) + source: ModuleSource::Preexisting(buf.clone()), + kind: ModuleKind::Regular, }; return (Stats::default(), module); } @@ -1099,21 +1256,40 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debuginfo::finalize(&ccx); } + let llvm_module = ModuleLlvm { + llcx: ccx.llcx(), + llmod: ccx.llmod(), + }; + + // In LTO mode we inject the allocator shim into the existing + // module. + if ccx.sess().lto() { + if let Some(kind) = ccx.sess().allocator_kind.get() { + time(ccx.sess().time_passes(), "write allocator module", || { + unsafe { + allocator::trans(ccx.tcx(), &llvm_module, kind); + } + }); + } + } + + // Adjust exported symbols for MSVC dllimport + if ccx.sess().target.target.options.is_like_msvc && + ccx.sess().crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { + create_imps(ccx.sess(), &llvm_module); + } + ModuleTranslation { name: cgu_name, symbol_name_hash, - source: ModuleSource::Translated(ModuleLlvm { - llcx: ccx.llcx(), - llmod: ccx.llmod(), - }) + source: ModuleSource::Translated(llvm_module), + kind: ModuleKind::Regular, } }; (lcx.into_stats(), module) } - assert_module_sources::assert_module_sources(tcx, &modules); - symbol_names_test::report_symbol_names(tcx); if shared_ccx.sess().trans_stats() { @@ -1144,85 +1320,29 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - let sess = shared_ccx.sess(); - - // Get the list of llvm modules we created. We'll do a few wacky - // transforms on them now. - - let llvm_modules: Vec<_> = - modules.iter() - .filter_map(|module| match module.source { - ModuleSource::Translated(llvm) => Some(llvm), - _ => None, - }) - .collect(); - - if sess.target.target.options.is_like_msvc && - sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { - create_imps(sess, &llvm_modules); - } - - // Translate an allocator shim, if any - // - // If LTO is enabled and we've got some previous LLVM module we translated - // above, then we can just translate directly into that LLVM module. If not, - // however, we need to create a separate module and trans into that. Note - // that the separate translation is critical for the standard library where - // the rlib's object file doesn't have allocator functions but the dylib - // links in an object file that has allocator functions. When we're - // compiling a final LTO artifact, though, there's no need to worry about - // this as we're not working with this dual "rlib/dylib" functionality. - let allocator_module = tcx.sess.allocator_kind.get().and_then(|kind| unsafe { - if sess.lto() && llvm_modules.len() > 0 { - time(tcx.sess.time_passes(), "write allocator module", || { - allocator::trans(tcx, &llvm_modules[0], kind) - }); - None - } else { - let (llcx, llmod) = - context::create_context_and_module(tcx.sess, "allocator"); - let modules = ModuleLlvm { - llmod: llmod, - llcx: llcx, - }; - time(tcx.sess.time_passes(), "write allocator module", || { - allocator::trans(tcx, &modules, kind) - }); - - Some(ModuleTranslation { - name: link::ALLOCATOR_MODULE_NAME.to_string(), - symbol_name_hash: 0, // we always rebuild allocator shims - source: ModuleSource::Translated(modules), - }) - } - }); - - let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols); + ongoing_translation.check_for_errors(tcx.sess); - let subsystem = attr::first_attr_value_str_by_name(&krate.attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); - } - subsystem.to_string() - }); + assert_and_save_dep_graph(tcx, + incremental_hashes_map, + metadata_incr_hashes, + link_meta); + ongoing_translation +} - CrateTranslation { - crate_name: tcx.crate_name(LOCAL_CRATE), - modules: modules, - metadata_module: metadata_module, - allocator_module: allocator_module, - link: link_meta, - metadata: metadata, - exported_symbols: Arc::try_unwrap(exported_symbols) - .expect("There's still a reference to exported_symbols?"), - no_builtins: no_builtins, - linker_info: linker_info, - windows_subsystem: windows_subsystem, - } +fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: IncrementalHashesMap, + metadata_incr_hashes: EncodedMetadataHashes, + link_meta: LinkMeta) { + time(tcx.sess.time_passes(), + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess.time_passes(), + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx, + incremental_hashes_map, + &metadata_incr_hashes, + link_meta.crate_hash)); } #[inline(never)] // give this a place in the profiler diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 7a91cad511d..635741b4d1a 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -8,14 +8,50 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, ArgType, LayoutExt, Reg, Uniform}; +use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform}; use context::CrateContext; +use llvm::CallConv; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) + -> Option<Uniform> { + arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { + let size = arg.layout.size(ccx); + + // Ensure we have at most four uniquely addressable members. + if size > unit.size.checked_mul(4, ccx).unwrap() { + return None; + } + + let valid_unit = match unit.kind { + RegKind::Integer => false, + RegKind::Float => true, + RegKind::Vector => size.bits() == 64 || size.bits() == 128 + }; + + if valid_unit { + Some(Uniform { + unit, + total: size + }) + } else { + None + } + }) +} + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>, vfp: bool) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); return; } + + if vfp { + if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { + ret.cast_to(ccx, uniform); + return; + } + } + let size = ret.layout.size(ccx); let bits = size.bits(); if bits <= 32 { @@ -35,11 +71,19 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc ret.make_indirect(ccx); } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) { if !arg.layout.is_aggregate() { arg.extend_integer_width_to(32); return; } + + if vfp { + if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { + arg.cast_to(ccx, uniform); + return; + } + } + let align = arg.layout.align(ccx).abi(); let total = arg.layout.size(ccx); arg.cast_to(ccx, Uniform { @@ -49,12 +93,18 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + // If this is a target with a hard-float ABI, and the function is not explicitly + // `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates. + let vfp = ccx.sess().target.target.llvm_target.ends_with("hf") + && fty.cconv != CallConv::ArmAapcsCallConv + && !fty.variadic; + if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, vfp); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(ccx, arg, vfp); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7bbaf50d21b..184c6f83579 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -234,12 +234,6 @@ pub fn C_big_integral(t: Type, u: u128) -> ValueRef { } } -pub fn C_floating_f64(f: f64, t: Type) -> ValueRef { - unsafe { - llvm::LLVMConstReal(t.to_ref(), f) - } -} - pub fn C_nil(ccx: &CrateContext) -> ValueRef { C_struct(ccx, &[], false) } @@ -383,7 +377,7 @@ pub fn const_to_uint(v: ValueRef) -> u64 { } } -fn is_const_integral(v: ValueRef) -> bool { +pub fn is_const_integral(v: ValueRef) -> bool { unsafe { !llvm::LLVMIsAConstantInt(v).is_null() } diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index da2a5839863..310cd6fe955 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -36,6 +36,12 @@ pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { } } +pub fn bitcast(val: ValueRef, ty: Type) -> ValueRef { + unsafe { + llvm::LLVMConstBitCast(val, ty.to_ref()) + } +} + pub fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, align: machine::llalign, diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 70337a91731..5a4a5b95cf9 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -36,9 +36,9 @@ use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; +use std::sync::Arc; extern crate flate2; -extern crate crossbeam; extern crate libc; extern crate owning_ref; #[macro_use] extern crate rustc; @@ -54,6 +54,7 @@ extern crate rustc_const_math; extern crate rustc_bitflags; extern crate rustc_demangle; extern crate jobserver; +extern crate num_cpus; #[macro_use] extern crate log; #[macro_use] extern crate syntax; @@ -124,13 +125,13 @@ mod mir; mod monomorphize; mod partitioning; mod symbol_names_test; +mod time_graph; mod trans_item; mod tvec; mod type_; mod type_of; mod value; -#[derive(Clone)] pub struct ModuleTranslation { /// The name of the module. When the crate may be saved between /// compilations, incremental compilation requires that name be @@ -140,6 +141,58 @@ pub struct ModuleTranslation { pub name: String, pub symbol_name_hash: u64, pub source: ModuleSource, + pub kind: ModuleKind, +} + +#[derive(Copy, Clone, Debug)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + +impl ModuleTranslation { + pub fn into_compiled_module(self, emit_obj: bool, emit_bc: bool) -> CompiledModule { + let pre_existing = match self.source { + ModuleSource::Preexisting(_) => true, + ModuleSource::Translated(_) => false, + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + symbol_name_hash: self.symbol_name_hash, + pre_existing, + emit_obj, + emit_bc, + } + } +} + +impl Drop for ModuleTranslation { + fn drop(&mut self) { + match self.source { + ModuleSource::Preexisting(_) => { + // Nothing to dispose. + }, + ModuleSource::Translated(llvm) => { + unsafe { + llvm::LLVMDisposeModule(llvm.llmod); + llvm::LLVMContextDispose(llvm.llcx); + } + }, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub symbol_name_hash: u64, + pub pre_existing: bool, + pub emit_obj: bool, + pub emit_bc: bool, } #[derive(Clone)] @@ -151,7 +204,7 @@ pub enum ModuleSource { Translated(ModuleLlvm), } -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub struct ModuleLlvm { pub llcx: llvm::ContextRef, pub llmod: llvm::ModuleRef, @@ -162,12 +215,12 @@ unsafe impl Sync for ModuleTranslation { } pub struct CrateTranslation { pub crate_name: Symbol, - pub modules: Vec<ModuleTranslation>, - pub metadata_module: ModuleTranslation, - pub allocator_module: Option<ModuleTranslation>, + pub modules: Vec<CompiledModule>, + pub metadata_module: CompiledModule, + pub allocator_module: Option<CompiledModule>, pub link: rustc::middle::cstore::LinkMeta, pub metadata: rustc::middle::cstore::EncodedMetadata, - pub exported_symbols: back::symbol_export::ExportedSymbols, + pub exported_symbols: Arc<back::symbol_export::ExportedSymbols>, pub no_builtins: bool, pub windows_subsystem: Option<String>, pub linker_info: back::linker::LinkerInfo diff --git a/src/librustc_trans/llvm_util.rs b/src/librustc_trans/llvm_util.rs index 99ab1c47bed..448feb5259d 100644 --- a/src/librustc_trans/llvm_util.rs +++ b/src/librustc_trans/llvm_util.rs @@ -80,7 +80,10 @@ const X86_WHITELIST: &'static [&'static str] = &["avx\0", "avx2\0", "bmi\0", "bm const HEXAGON_WHITELIST: &'static [&'static str] = &["hvx\0", "hvx-double\0"]; -const POWERPC_WHITELIST: &'static [&'static str] = &["altivec\0", "vsx\0"]; +const POWERPC_WHITELIST: &'static [&'static str] = &["altivec\0", + "power8-altivec\0", "power9-altivec\0", + "power8-vector\0", "power9-vector\0", + "vsx\0"]; pub fn target_features(sess: &Session) -> Vec<Symbol> { let target_machine = create_target_machine(sess); diff --git a/src/librustc_trans/metadata.rs b/src/librustc_trans/metadata.rs index 2c0148dfbb3..883808c5909 100644 --- a/src/librustc_trans/metadata.rs +++ b/src/librustc_trans/metadata.rs @@ -31,10 +31,10 @@ impl MetadataLoader for LlvmMetadataLoader { // just keeping the archive along while the metadata is in use. let archive = ArchiveRO::open(filename) .map(|ar| OwningRef::new(box ar)) - .ok_or_else(|| { - debug!("llvm didn't like `{}`", filename.display()); - format!("failed to read rlib metadata: '{}'", filename.display()) - })?; + .map_err(|e| { + debug!("llvm didn't like `{}`: {}", filename.display(), e); + format!("failed to read rlib metadata in '{}': {}", filename.display(), e) + })?; let buf: OwningRef<_, [u8]> = archive .try_map(|ar| { ar.iter() @@ -42,10 +42,10 @@ impl MetadataLoader for LlvmMetadataLoader { .find(|sect| sect.name() == Some(METADATA_FILENAME)) .map(|s| s.data()) .ok_or_else(|| { - debug!("didn't find '{}' in the archive", METADATA_FILENAME); - format!("failed to read rlib metadata: '{}'", - filename.display()) - }) + debug!("didn't find '{}' in the archive", METADATA_FILENAME); + format!("failed to read rlib metadata: '{}'", + filename.display()) + }) })?; Ok(buf.erase_owner()) } diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 9ff32bb7088..a17ddabb1a7 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -158,6 +158,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { LvalueContext::StorageLive | LvalueContext::StorageDead | + LvalueContext::Validate | LvalueContext::Inspect | LvalueContext::Consume => {} diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index b938dc66e3c..b43e76df04c 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -11,7 +11,6 @@ use llvm::{self, ValueRef}; use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind}; use rustc_const_math::ConstInt::*; -use rustc_const_math::ConstFloat::*; use rustc_const_math::{ConstInt, ConstMathErr}; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; @@ -27,7 +26,7 @@ use abi::{self, Abi}; use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; -use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; +use common::{C_array, C_bool, C_bytes, C_integral, C_big_integral, C_u32, C_u64}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef}; use common::const_to_opt_u128; use consts; @@ -37,6 +36,7 @@ use type_::Type; use value::Value; use syntax_pos::Span; +use syntax::ast; use std::fmt; use std::ptr; @@ -95,8 +95,13 @@ impl<'tcx> Const<'tcx> { -> Const<'tcx> { let llty = type_of::type_of(ccx, ty); let val = match cv { - ConstVal::Float(F32(v)) => C_floating_f64(v as f64, llty), - ConstVal::Float(F64(v)) => C_floating_f64(v, llty), + ConstVal::Float(v) => { + let bits = match v.ty { + ast::FloatTy::F32 => C_u32(ccx, v.bits as u32), + ast::FloatTy::F64 => C_u64(ccx, v.bits as u64) + }; + consts::bitcast(bits, llty) + } ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), @@ -222,15 +227,24 @@ struct MirConstContext<'a, 'tcx: 'a> { substs: &'tcx Substs<'tcx>, /// Values of locals in a constant or const fn. - locals: IndexVec<mir::Local, Option<Const<'tcx>>> + locals: IndexVec<mir::Local, Option<Result<Const<'tcx>, ConstEvalErr<'tcx>>>> } +fn add_err<'tcx, U, V>(failure: &mut Result<U, ConstEvalErr<'tcx>>, + value: &Result<V, ConstEvalErr<'tcx>>) +{ + if let &Err(ref err) = value { + if failure.is_ok() { + *failure = Err(err.clone()); + } + } +} impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn new(ccx: &'a CrateContext<'a, 'tcx>, mir: &'a mir::Mir<'tcx>, substs: &'tcx Substs<'tcx>, - args: IndexVec<mir::Local, Const<'tcx>>) + args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>) -> MirConstContext<'a, 'tcx> { let mut context = MirConstContext { ccx: ccx, @@ -249,7 +263,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn trans_def(ccx: &'a CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, - args: IndexVec<mir::Local, Const<'tcx>>) + args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> { let instance = monomorphize::resolve(ccx.shared(), def_id, substs); let mir = ccx.tcx().instance_mir(instance.def); @@ -278,13 +292,13 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::StatementKind::Assign(ref dest, ref rvalue) => { let ty = dest.ty(self.mir, tcx); let ty = self.monomorphize(&ty).to_ty(tcx); - match self.const_rvalue(rvalue, ty, span) { - Ok(value) => self.store(dest, value, span), - Err(err) => if failure.is_ok() { failure = Err(err); } - } + let value = self.const_rvalue(rvalue, ty, span); + add_err(&mut failure, &value); + self.store(dest, value, span); } mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::EndRegion(_) | mir::StatementKind::Nop => {} mir::StatementKind::InlineAsm { .. } | @@ -301,9 +315,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::TerminatorKind::Goto { target } => target, mir::TerminatorKind::Return => { failure?; - return Ok(self.locals[mir::RETURN_POINTER].unwrap_or_else(|| { + return self.locals[mir::RETURN_POINTER].clone().unwrap_or_else(|| { span_bug!(span, "no returned value in constant"); - })); + }); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => { @@ -345,33 +359,30 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let mut arg_vals = IndexVec::with_capacity(args.len()); for arg in args { - match self.const_operand(arg, span) { - Ok(arg) => { arg_vals.push(arg); }, - Err(err) => if failure.is_ok() { failure = Err(err); } - } + let arg_val = self.const_operand(arg, span); + add_err(&mut failure, &arg_val); + arg_vals.push(arg_val); } if let Some((ref dest, target)) = *destination { - if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic { - let value = match &tcx.item_name(def_id).as_str()[..] { + let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic { + match &tcx.item_name(def_id).as_str()[..] { "size_of" => { let llval = C_uint(self.ccx, self.ccx.size_of(substs.type_at(0))); - Const::new(llval, tcx.types.usize) + Ok(Const::new(llval, tcx.types.usize)) } "min_align_of" => { let llval = C_uint(self.ccx, self.ccx.align_of(substs.type_at(0))); - Const::new(llval, tcx.types.usize) + Ok(Const::new(llval, tcx.types.usize)) } _ => span_bug!(span, "{:?} in constant", terminator.kind) - }; - self.store(dest, value, span); - } else { - match MirConstContext::trans_def(self.ccx, def_id, substs, arg_vals) { - Ok(value) => self.store(dest, value, span), - Err(err) => if failure.is_ok() { failure = Err(err); } } - } + } else { + MirConstContext::trans_def(self.ccx, def_id, substs, arg_vals) + }; + add_err(&mut failure, &result); + self.store(dest, result, span); target } else { span_bug!(span, "diverging {:?} in constant", terminator.kind); @@ -382,7 +393,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } - fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) { + fn store(&mut self, + dest: &mir::Lvalue<'tcx>, + value: Result<Const<'tcx>, ConstEvalErr<'tcx>>, + span: Span) { if let mir::Lvalue::Local(index) = *dest { self.locals[index] = Some(value); } else { @@ -395,9 +409,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let tcx = self.ccx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { - return Ok(self.locals[index].unwrap_or_else(|| { + return self.locals[index].clone().unwrap_or_else(|| { span_bug!(span, "{:?} not initialized", lvalue) - }).as_lvalue()); + }).map(|v| v.as_lvalue()); } let lvalue = match *lvalue { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 4bd5091a4f3..a23e1a0684b 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -20,7 +20,7 @@ use base; use builder::Builder; use callee; use common::{self, val_ty, C_bool, C_null, C_uint}; -use common::{C_integral}; +use common::{C_integral, C_i32}; use adt; use machine; use monomorphize; @@ -93,12 +93,42 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::Rvalue::Repeat(ref elem, ref count) => { + let dest_ty = dest.ty.to_ty(bcx.tcx()); + + // No need to inizialize memory of a zero-sized slice + if common::type_is_zero_size(bcx.ccx, dest_ty) { + return bcx; + } + let tr_elem = self.trans_operand(&bcx, elem); let size = count.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); + let align = dest.alignment.to_align(); + + if let OperandValue::Immediate(v) = tr_elem.val { + // Use llvm.memset.p0i8.* to initialize all zero arrays + if common::is_const_integral(v) && common::const_to_uint(v) == 0 { + let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = C_i32(bcx.ccx, align as i32); + let ty = type_of::type_of(bcx.ccx, dest_ty); + let size = machine::llsize_of(bcx.ccx, ty); + let fill = C_integral(Type::i8(bcx.ccx), 0, false); + base::call_memset(&bcx, base, fill, size, align, false); + return bcx; + } + + // Use llvm.memset.p0i8.* to initialize byte arrays + if common::val_ty(v) == Type::i8(bcx.ccx) { + let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = C_i32(bcx.ccx, align as i32); + base::call_memset(&bcx, base, v, size, align, false); + return bcx; + } + } + tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { - self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem); + self.store_operand(bcx, llslot, align, tr_elem); bcx.br(loop_bb); }) } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 170a76a4949..52dfc8dc4de 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -87,6 +87,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx } mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => bcx, } } diff --git a/src/librustc_trans/partitioning.rs b/src/librustc_trans/partitioning.rs index 904cfb2acd7..cff0eca02c6 100644 --- a/src/librustc_trans/partitioning.rs +++ b/src/librustc_trans/partitioning.rs @@ -174,29 +174,16 @@ impl<'tcx> CodegenUnit<'tcx> { } pub fn compute_symbol_name_hash<'a>(&self, - scx: &SharedCrateContext<'a, 'tcx>, - exported_symbols: &ExportedSymbols) + scx: &SharedCrateContext<'a, 'tcx>) -> u64 { let mut state = IchHasher::new(); - let exported_symbols = exported_symbols.local_exports(); let all_items = self.items_in_deterministic_order(scx.tcx()); - for (item, _) in all_items { + for (item, (linkage, visibility)) in all_items { let symbol_name = item.symbol_name(scx.tcx()); symbol_name.len().hash(&mut state); symbol_name.hash(&mut state); - let exported = match item { - TransItem::Fn(ref instance) => { - let node_id = - scx.tcx().hir.as_local_node_id(instance.def_id()); - node_id.map(|node_id| exported_symbols.contains(&node_id)) - .unwrap_or(false) - } - TransItem::Static(node_id) => { - exported_symbols.contains(&node_id) - } - TransItem::GlobalAsm(..) => true, - }; - exported.hash(&mut state); + linkage.hash(&mut state); + visibility.hash(&mut state); } state.finish().to_smaller_hash() } diff --git a/src/librustc_trans/time_graph.rs b/src/librustc_trans/time_graph.rs new file mode 100644 index 00000000000..e0ebe8a0933 --- /dev/null +++ b/src/librustc_trans/time_graph.rs @@ -0,0 +1,181 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; +use std::marker::PhantomData; +use std::sync::{Arc, Mutex}; +use std::time::Instant; +use std::io::prelude::*; +use std::fs::File; + +const OUTPUT_WIDTH_IN_PX: u64 = 1000; +const TIME_LINE_HEIGHT_IN_PX: u64 = 7; +const TIME_LINE_HEIGHT_STRIDE_IN_PX: usize = 10; + +#[derive(Clone)] +struct Timing { + start: Instant, + end: Instant, + work_package_kind: WorkPackageKind, +} + +#[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)] +pub struct TimelineId(pub usize); + +#[derive(Clone)] +struct PerThread { + timings: Vec<Timing>, + open_work_package: Option<(Instant, WorkPackageKind)>, +} + +#[derive(Clone)] +pub struct TimeGraph { + data: Arc<Mutex<HashMap<TimelineId, PerThread>>>, +} + +#[derive(Clone, Copy)] +pub struct WorkPackageKind(pub &'static [&'static str]); + +pub struct RaiiToken { + graph: TimeGraph, + timeline: TimelineId, + // The token must not be Send: + _marker: PhantomData<*const ()> +} + + +impl Drop for RaiiToken { + fn drop(&mut self) { + self.graph.end(self.timeline); + } +} + +impl TimeGraph { + pub fn new() -> TimeGraph { + TimeGraph { + data: Arc::new(Mutex::new(HashMap::new())) + } + } + + pub fn start(&self, + timeline: TimelineId, + work_package_kind: WorkPackageKind) -> RaiiToken { + { + let mut table = self.data.lock().unwrap(); + + let mut data = table.entry(timeline).or_insert(PerThread { + timings: Vec::new(), + open_work_package: None, + }); + + assert!(data.open_work_package.is_none()); + data.open_work_package = Some((Instant::now(), work_package_kind)); + } + + RaiiToken { + graph: self.clone(), + timeline, + _marker: PhantomData, + } + } + + fn end(&self, timeline: TimelineId) { + let end = Instant::now(); + + let mut table = self.data.lock().unwrap(); + let mut data = table.get_mut(&timeline).unwrap(); + + if let Some((start, work_package_kind)) = data.open_work_package { + data.timings.push(Timing { + start, + end, + work_package_kind, + }); + } else { + bug!("end timing without start?") + } + + data.open_work_package = None; + } + + pub fn dump(&self, output_filename: &str) { + let table = self.data.lock().unwrap(); + + for data in table.values() { + assert!(data.open_work_package.is_none()); + } + + let mut timelines: Vec<PerThread> = + table.values().map(|data| data.clone()).collect(); + + timelines.sort_by_key(|timeline| timeline.timings[0].start); + + let earliest_instant = timelines[0].timings[0].start; + let latest_instant = timelines.iter() + .map(|timeline| timeline.timings + .last() + .unwrap() + .end) + .max() + .unwrap(); + let max_distance = distance(earliest_instant, latest_instant); + + let mut file = File::create(format!("{}.html", output_filename)).unwrap(); + + writeln!(file, "<html>").unwrap(); + writeln!(file, "<head></head>").unwrap(); + writeln!(file, "<body>").unwrap(); + + let mut color = 0; + + for (line_index, timeline) in timelines.iter().enumerate() { + let line_top = line_index * TIME_LINE_HEIGHT_STRIDE_IN_PX; + + for span in &timeline.timings { + let start = distance(earliest_instant, span.start); + let end = distance(earliest_instant, span.end); + + let start = normalize(start, max_distance, OUTPUT_WIDTH_IN_PX); + let end = normalize(end, max_distance, OUTPUT_WIDTH_IN_PX); + + let colors = span.work_package_kind.0; + + writeln!(file, "<div style='position:absolute; \ + top:{}px; \ + left:{}px; \ + width:{}px; \ + height:{}px; \ + background:{};'></div>", + line_top, + start, + end - start, + TIME_LINE_HEIGHT_IN_PX, + colors[color % colors.len()] + ).unwrap(); + + color += 1; + } + } + + writeln!(file, "</body>").unwrap(); + writeln!(file, "</html>").unwrap(); + } +} + +fn distance(zero: Instant, x: Instant) -> u64 { + + let duration = x.duration_since(zero); + (duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64) // / div +} + +fn normalize(distance: u64, max: u64, max_pixels: u64) -> u64 { + (max_pixels * distance) / max +} + diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 4216a73a8dd..de4d217c735 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -30,8 +30,8 @@ pub fn slice_for_each<'a, 'tcx, F>( }; let body_bcx = bcx.build_sibling_block("slice_loop_body"); - let next_bcx = bcx.build_sibling_block("slice_loop_next"); let header_bcx = bcx.build_sibling_block("slice_loop_header"); + let next_bcx = bcx.build_sibling_block("slice_loop_next"); let start = if zst { C_uint(bcx.ccx, 0usize) diff --git a/src/librustc_typeck/astconv.rs b/src/librustc_typeck/astconv.rs index bb6e478738a..1ec850ad7f3 100644 --- a/src/librustc_typeck/astconv.rs +++ b/src/librustc_typeck/astconv.rs @@ -1110,46 +1110,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } hir::TyBareFn(ref bf) => { require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); - let bare_fn_ty = self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl); - - // Find any late-bound regions declared in return type that do - // not appear in the arguments. These are not wellformed. - // - // Example: - // - // for<'a> fn() -> &'a str <-- 'a is bad - // for<'a> fn(&'a String) -> &'a str <-- 'a is ok - // - // Note that we do this check **here** and not in - // `ty_of_bare_fn` because the latter is also used to make - // the types for fn items, and we do not want to issue a - // warning then. (Once we fix #32330, the regions we are - // checking for here would be considered early bound - // anyway.) - let inputs = bare_fn_ty.inputs(); - let late_bound_in_args = tcx.collect_constrained_late_bound_regions( - &inputs.map_bound(|i| i.to_owned())); - let output = bare_fn_ty.output(); - let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); - for br in late_bound_in_ret.difference(&late_bound_in_args) { - let br_name = match *br { - ty::BrNamed(_, name) => name, - _ => { - span_bug!( - bf.decl.output.span(), - "anonymous bound region {:?} in return but not args", - br); - } - }; - struct_span_err!(tcx.sess, - ast_ty.span, - E0581, - "return type references lifetime `{}`, \ - which does not appear in the fn input types", - br_name) - .emit(); - } - tcx.mk_fn_ptr(bare_fn_ty) + tcx.mk_fn_ptr(self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl)) } hir::TyTraitObject(ref bounds, ref lifetime) => { self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime) @@ -1269,23 +1230,56 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> ty::PolyFnSig<'tcx> { debug!("ty_of_fn"); + let tcx = self.tcx(); let input_tys: Vec<Ty> = decl.inputs.iter().map(|a| self.ty_of_arg(a, None)).collect(); let output_ty = match decl.output { hir::Return(ref output) => self.ast_ty_to_ty(output), - hir::DefaultReturn(..) => self.tcx().mk_nil(), + hir::DefaultReturn(..) => tcx.mk_nil(), }; debug!("ty_of_fn: output_ty={:?}", output_ty); - ty::Binder(self.tcx().mk_fn_sig( + let bare_fn_ty = ty::Binder(tcx.mk_fn_sig( input_tys.into_iter(), output_ty, decl.variadic, unsafety, abi - )) + )); + + // Find any late-bound regions declared in return type that do + // not appear in the arguments. These are not wellformed. + // + // Example: + // for<'a> fn() -> &'a str <-- 'a is bad + // for<'a> fn(&'a String) -> &'a str <-- 'a is ok + let inputs = bare_fn_ty.inputs(); + let late_bound_in_args = tcx.collect_constrained_late_bound_regions( + &inputs.map_bound(|i| i.to_owned())); + let output = bare_fn_ty.output(); + let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); + for br in late_bound_in_ret.difference(&late_bound_in_args) { + let br_name = match *br { + ty::BrNamed(_, name) => name, + _ => { + span_bug!( + decl.output.span(), + "anonymous bound region {:?} in return but not args", + br); + } + }; + struct_span_err!(tcx.sess, + decl.output.span(), + E0581, + "return type references lifetime `{}`, \ + which does not appear in the fn input types", + br_name) + .emit(); + } + + bare_fn_ty } pub fn ty_of_closure(&self, diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 68726a7b1c4..eaff8e7b8ac 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -113,10 +113,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_eqtype(pat.span, expected, rhs_ty); common_type } - PatKind::Binding(bm, def_id, _, ref sub) => { + PatKind::Binding(ba, def_id, _, ref sub) => { + // Note the binding mode in the typeck tables. For now, what we store is always + // identical to what could be scraped from the HIR, but this will change with + // default binding modes (#42640). + let bm = ty::BindingMode::convert(ba); + self.inh.tables.borrow_mut().pat_binding_modes.insert(pat.id, bm); + let typ = self.local_ty(pat.span, pat.id); match bm { - hir::BindByRef(mutbl) => { + ty::BindByReference(mutbl) => { // if the binding is like // ref x | ref const x | ref mut x // then `x` is assigned a value of type `&M T` where M is the mutability @@ -131,7 +137,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_eqtype(pat.span, region_ty, typ); } // otherwise the type of x is the expected type T - hir::BindByValue(_) => { + ty::BindByValue(_) => { // As above, `T <: typeof(x)` is required but we // use equality, see (*) below. self.demand_eqtype(pat.span, expected, typ); @@ -396,11 +402,59 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match_src: hir::MatchSource) -> Ty<'tcx> { let tcx = self.tcx; - // Not entirely obvious: if matches may create ref bindings, we - // want to use the *precise* type of the discriminant, *not* some - // supertype, as the "discriminant type" (issue #23116). + // Not entirely obvious: if matches may create ref bindings, we want to + // use the *precise* type of the discriminant, *not* some supertype, as + // the "discriminant type" (issue #23116). + // + // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which + // is problematic as the HIR is being scraped, but ref bindings may be + // implicit after #42640. We need to make sure that pat_adjustments + // (once introduced) is populated by the time we get here. + // + // arielb1 [writes here in this comment thread][c] that there + // is certainly *some* potential danger, e.g. for an example + // like: + // + // [c]: https://github.com/rust-lang/rust/pull/43399#discussion_r130223956 + // + // ``` + // let Foo(x) = f()[0]; + // ``` + // + // Then if the pattern matches by reference, we want to match + // `f()[0]` as a lexpr, so we can't allow it to be + // coerced. But if the pattern matches by value, `f()[0]` is + // still syntactically a lexpr, but we *do* want to allow + // coercions. + // + // However, *likely* we are ok with allowing coercions to + // happen if there are no explicit ref mut patterns - all + // implicit ref mut patterns must occur behind a reference, so + // they will have the "correct" variance and lifetime. + // + // This does mean that the following pattern would be legal: + // + // ``` + // struct Foo(Bar); + // struct Bar(u32); + // impl Deref for Foo { + // type Target = Bar; + // fn deref(&self) -> &Bar { &self.0 } + // } + // impl DerefMut for Foo { + // fn deref_mut(&mut self) -> &mut Bar { &mut self.0 } + // } + // fn foo(x: &mut Foo) { + // { + // let Bar(z): &mut Bar = x; + // *z = 42; + // } + // assert_eq!(foo.0.0, 42); + // } + // ``` + let contains_ref_bindings = arms.iter() - .filter_map(|a| a.contains_ref_binding()) + .filter_map(|a| a.contains_explicit_ref_binding()) .max_by_key(|m| match *m { hir::MutMutable => 1, hir::MutImmutable => 0, diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index 968e893b9a0..e494bc15222 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -1046,7 +1046,7 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> } /// Indicates that one of the inputs is a "forced unit". This - /// occurs in a case like `if foo { ... };`, where the issing else + /// occurs in a case like `if foo { ... };`, where the missing else /// generates a "forced unit". Another example is a `loop { break; /// }`, where the `break` has no argument expression. We treat /// these cases slightly differently for error-reporting diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index ad4ee5a9d6d..b6a5ce0a6ce 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -38,6 +38,11 @@ impl<'a, 'gcx, 'tcx> Deref for ConfirmContext<'a, 'gcx, 'tcx> { } } +pub struct ConfirmResult<'tcx> { + pub callee: MethodCallee<'tcx>, + pub illegal_sized_bound: bool, +} + impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn confirm_method(&self, span: Span, @@ -46,7 +51,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { unadjusted_self_ty: Ty<'tcx>, pick: probe::Pick<'tcx>, segment: &hir::PathSegment) - -> MethodCallee<'tcx> { + -> ConfirmResult<'tcx> { debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, generic_args={:?})", unadjusted_self_ty, pick, @@ -75,7 +80,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { unadjusted_self_ty: Ty<'tcx>, pick: probe::Pick<'tcx>, segment: &hir::PathSegment) - -> MethodCallee<'tcx> { + -> ConfirmResult<'tcx> { // Adjust the self expression the user provided and obtain the adjusted type. let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick); @@ -91,12 +96,26 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // Create the final signature for the method, replacing late-bound regions. let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs); + // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that + // something which derefs to `Self` actually implements the trait and the caller + // wanted to make a static dispatch on it but forgot to import the trait. + // See test `src/test/ui/issue-35976.rs`. + // + // In that case, we'll error anyway, but we'll also re-run the search with all traits + // in scope, and if we find another method which can be used, we'll output an + // appropriate hint suggesting to import the trait. + let illegal_sized_bound = self.predicates_require_illegal_sized_bound(&method_predicates); + // Unify the (adjusted) self type with what the method expects. self.unify_receivers(self_ty, method_sig.inputs()[0]); // Add any trait/regions obligations specified on the method's type parameters. - let method_ty = self.tcx.mk_fn_ptr(ty::Binder(method_sig)); - self.add_obligations(method_ty, all_substs, &method_predicates); + // We won't add these if we encountered an illegal sized bound, so that we can use + // a custom error in that case. + if !illegal_sized_bound { + let method_ty = self.tcx.mk_fn_ptr(ty::Binder(method_sig)); + self.add_obligations(method_ty, all_substs, &method_predicates); + } // Create the final `MethodCallee`. let callee = MethodCallee { @@ -109,7 +128,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { self.convert_lvalue_derefs_to_mutable(); } - callee + ConfirmResult { callee, illegal_sized_bound } } /////////////////////////////////////////////////////////////////////////// @@ -533,6 +552,30 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { /////////////////////////////////////////////////////////////////////////// // MISCELLANY + fn predicates_require_illegal_sized_bound(&self, + predicates: &ty::InstantiatedPredicates<'tcx>) + -> bool { + let sized_def_id = match self.tcx.lang_items.sized_trait() { + Some(def_id) => def_id, + None => return false, + }; + + traits::elaborate_predicates(self.tcx, predicates.predicates.clone()) + .filter_map(|predicate| { + match predicate { + ty::Predicate::Trait(trait_pred) if trait_pred.def_id() == sized_def_id => + Some(trait_pred), + _ => None, + } + }) + .any(|trait_pred| { + match trait_pred.0.self_ty().sty { + ty::TyDynamic(..) => true, + _ => false, + } + }) + } + fn enforce_illegal_method_limitations(&self, pick: &probe::Pick) { // Disallow calls to the method `drop` defined in the `Drop` trait. match pick.item.container { diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index c842e47aaf5..dd5b0cdda42 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -33,7 +33,7 @@ mod confirm; pub mod probe; mod suggest; -use self::probe::IsSuggestion; +use self::probe::{IsSuggestion, ProbeScope}; #[derive(Clone, Copy, Debug)] pub struct MethodCallee<'tcx> { @@ -60,6 +60,10 @@ pub enum MethodError<'tcx> { // Found an applicable method, but it is not visible. PrivateMatch(Def), + + // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have + // forgotten to import a trait. + IllegalSizedBound(Vec<DefId>), } // Contains a list of static methods that may apply, a list of unsatisfied trait predicates which @@ -106,12 +110,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { -> bool { let mode = probe::Mode::MethodCall; match self.probe_for_name(span, mode, method_name, IsSuggestion(false), - self_ty, call_expr_id) { + self_ty, call_expr_id, ProbeScope::TraitsInScope) { Ok(..) => true, Err(NoMatch(..)) => false, Err(Ambiguity(..)) => true, Err(ClosureAmbiguity(..)) => true, Err(PrivateMatch(..)) => allow_private, + Err(IllegalSizedBound(..)) => true, } } @@ -142,10 +147,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { call_expr, self_expr); - let mode = probe::Mode::MethodCall; - let self_ty = self.resolve_type_vars_if_possible(&self_ty); - let pick = self.probe_for_name(span, mode, segment.name, IsSuggestion(false), - self_ty, call_expr.id)?; + let pick = self.lookup_probe( + span, + segment.name, + self_ty, + call_expr, + ProbeScope::TraitsInScope + )?; if let Some(import_id) = pick.import_id { let import_def_id = self.tcx.hir.local_def_id(import_id); @@ -155,12 +163,56 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.check_stability(pick.item.def_id, call_expr.id, span); - Ok(self.confirm_method(span, - self_expr, - call_expr, - self_ty, - pick, - segment)) + let result = self.confirm_method(span, + self_expr, + call_expr, + self_ty, + pick.clone(), + segment); + + if result.illegal_sized_bound { + // We probe again, taking all traits into account (not only those in scope). + let candidates = + match self.lookup_probe(span, + segment.name, + self_ty, + call_expr, + ProbeScope::AllTraits) { + + // If we find a different result the caller probably forgot to import a trait. + Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container.id()], + Err(Ambiguity(ref sources)) => { + sources.iter() + .filter_map(|source| { + match *source { + // Note: this cannot come from an inherent impl, + // because the first probing succeeded. + ImplSource(def) => self.tcx.trait_id_of_impl(def), + TraitSource(_) => None, + } + }) + .collect() + } + _ => Vec::new(), + }; + + return Err(IllegalSizedBound(candidates)); + } + + Ok(result.callee) + } + + fn lookup_probe(&self, + span: Span, + method_name: ast::Name, + self_ty: ty::Ty<'tcx>, + call_expr: &'gcx hir::Expr, + scope: ProbeScope) + -> probe::PickResult<'tcx> { + let mode = probe::Mode::MethodCall; + let self_ty = self.resolve_type_vars_if_possible(&self_ty); + self.probe_for_name(span, mode, method_name, IsSuggestion(false), + self_ty, call_expr.id, scope) } /// `lookup_method_in_trait` is used for overloaded operators. @@ -299,7 +351,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { -> Result<Def, MethodError<'tcx>> { let mode = probe::Mode::Path; let pick = self.probe_for_name(span, mode, method_name, IsSuggestion(false), - self_ty, expr_id)?; + self_ty, expr_id, ProbeScope::TraitsInScope)?; if let Some(import_id) = pick.import_id { let import_def_id = self.tcx.hir.local_def_id(import_id); diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index ae1724549d9..587e583cabd 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -106,7 +106,7 @@ enum CandidateKind<'tcx> { ty::PolyTraitRef<'tcx>), } -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct Pick<'tcx> { pub item: ty::AssociatedItem, pub kind: PickKind<'tcx>, @@ -130,7 +130,7 @@ pub struct Pick<'tcx> { pub unsize: Option<Ty<'tcx>>, } -#[derive(Clone,Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum PickKind<'tcx> { InherentImplPick, ExtensionImplPick(// Impl @@ -155,6 +155,15 @@ pub enum Mode { Path, } +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum ProbeScope { + // Assemble candidates coming only from traits in scope. + TraitsInScope, + + // Assemble candidates coming from all traits. + AllTraits, +} + impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// This is used to offer suggestions to users. It returns methods /// that could have been called which have the desired return @@ -175,14 +184,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { scope_expr_id); let method_names = self.probe_op(span, mode, LookingFor::ReturnType(return_type), IsSuggestion(true), - self_ty, scope_expr_id, + self_ty, scope_expr_id, ProbeScope::TraitsInScope, |probe_cx| Ok(probe_cx.candidate_method_names())) .unwrap_or(vec![]); method_names .iter() .flat_map(|&method_name| { match self.probe_for_name(span, mode, method_name, IsSuggestion(true), self_ty, - scope_expr_id) { + scope_expr_id, ProbeScope::TraitsInScope) { Ok(pick) => Some(pick.item), Err(_) => None, } @@ -196,7 +205,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { item_name: ast::Name, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, - scope_expr_id: ast::NodeId) + scope_expr_id: ast::NodeId, + scope: ProbeScope) -> PickResult<'tcx> { debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})", self_ty, @@ -208,6 +218,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_suggestion, self_ty, scope_expr_id, + scope, |probe_cx| probe_cx.pick()) } @@ -218,6 +229,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, scope_expr_id: ast::NodeId, + scope: ProbeScope, op: OP) -> Result<R, MethodError<'tcx>> where OP: FnOnce(ProbeContext<'a, 'gcx, 'tcx>) -> Result<R, MethodError<'tcx>> @@ -275,8 +287,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mut probe_cx = ProbeContext::new(self, span, mode, looking_for, steps, opt_simplified_steps); + probe_cx.assemble_inherent_candidates(); - probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?; + match scope { + ProbeScope::TraitsInScope => + probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?, + ProbeScope::AllTraits => + probe_cx.assemble_extension_candidates_for_all_traits()?, + }; op(probe_cx) }) } diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 4faf71e0cc9..53da9e19ee0 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -312,10 +312,45 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } MethodError::PrivateMatch(def) => { - let msg = format!("{} `{}` is private", def.kind_name(), item_name); - self.tcx.sess.span_err(span, &msg); + struct_span_err!(self.tcx.sess, span, E0624, + "{} `{}` is private", def.kind_name(), item_name).emit(); } + + MethodError::IllegalSizedBound(candidates) => { + let msg = format!("the `{}` method cannot be invoked on a trait object", item_name); + let mut err = self.sess().struct_span_err(span, &msg); + if !candidates.is_empty() { + let help = format!("{an}other candidate{s} {were} found in the following \ + trait{s}, perhaps add a `use` for {one_of_them}:", + an = if candidates.len() == 1 {"an" } else { "" }, + s = if candidates.len() == 1 { "" } else { "s" }, + were = if candidates.len() == 1 { "was" } else { "were" }, + one_of_them = if candidates.len() == 1 { + "it" + } else { + "one_of_them" + }); + self.suggest_use_candidates(&mut err, help, candidates); + } + err.emit(); + } + } + } + + fn suggest_use_candidates(&self, + err: &mut DiagnosticBuilder, + mut msg: String, + candidates: Vec<DefId>) { + let limit = if candidates.len() == 5 { 5 } else { 4 }; + for (i, trait_did) in candidates.iter().take(limit).enumerate() { + msg.push_str(&format!("\ncandidate #{}: `use {};`", + i + 1, + self.tcx.item_path_str(*trait_did))); + } + if candidates.len() > limit { + msg.push_str(&format!("\nand {} others", candidates.len() - limit)); } + err.note(&msg[..]); } fn suggest_traits_to_import(&self, @@ -330,30 +365,20 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { candidates.sort(); candidates.dedup(); err.help("items from traits can only be used if the trait is in scope"); - let mut msg = format!("the following {traits_are} implemented but not in scope, \ - perhaps add a `use` for {one_of_them}:", - traits_are = if candidates.len() == 1 { - "trait is" - } else { - "traits are" - }, - one_of_them = if candidates.len() == 1 { - "it" - } else { - "one of them" - }); - - let limit = if candidates.len() == 5 { 5 } else { 4 }; - for (i, trait_did) in candidates.iter().take(limit).enumerate() { - msg.push_str(&format!("\ncandidate #{}: `use {};`", - i + 1, - self.tcx.item_path_str(*trait_did))); - } - if candidates.len() > limit { - msg.push_str(&format!("\nand {} others", candidates.len() - limit)); - } - err.note(&msg[..]); - + let msg = format!("the following {traits_are} implemented but not in scope, \ + perhaps add a `use` for {one_of_them}:", + traits_are = if candidates.len() == 1 { + "trait is" + } else { + "traits are" + }, + one_of_them = if candidates.len() == 1 { + "it" + } else { + "one of them" + }); + + self.suggest_use_candidates(err, msg, candidates); return; } diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 67ffca69efa..ed6d0c035de 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -1647,7 +1647,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { fn re_infer(&self, span: Span, def: Option<&ty::RegionParameterDef>) -> Option<ty::Region<'tcx>> { let v = match def { - Some(def) => infer::EarlyBoundRegion(span, def.name, def.issue_32330), + Some(def) => infer::EarlyBoundRegion(span, def.name), None => infer::MiscVariable(span) }; Some(self.next_region_var(v)) @@ -3008,6 +3008,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { format!("did you mean `{}`?", suggested_field_name)); } else { err.span_label(field.span, "unknown field"); + let struct_variant_def = def.struct_variant(); + let field_names = self.available_field_names(struct_variant_def); + if !field_names.is_empty() { + err.note(&format!("available fields are: {}", + self.name_series_display(field_names))); + } }; } ty::TyRawPtr(..) => { @@ -3031,7 +3037,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Return an hint about the closest match in field names fn suggest_field_name(variant: &'tcx ty::VariantDef, field: &Spanned<ast::Name>, - skip : Vec<InternedString>) + skip: Vec<InternedString>) -> Option<Symbol> { let name = field.node.as_str(); let names = variant.fields.iter().filter_map(|field| { @@ -3044,8 +3050,29 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }); - // only find fits with at least one matching letter - find_best_match_for_name(names, &name, Some(name.len())) + find_best_match_for_name(names, &name, None) + } + + fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec<ast::Name> { + let mut available = Vec::new(); + for field in variant.fields.iter() { + let (_, def_scope) = self.tcx.adjust(field.name, variant.did, self.body_id); + if field.vis.is_accessible_from(def_scope, self.tcx) { + available.push(field.name); + } + } + available + } + + fn name_series_display(&self, names: Vec<ast::Name>) -> String { + // dynamic limit, to never omit just one field + let limit = if names.len() == 6 { 6 } else { 5 }; + let mut display = names.iter().take(limit) + .map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", "); + if names.len() > limit { + display = format!("{} ... and {} others", display, names.len() - limit); + } + display } // Check tuple index expressions @@ -3159,13 +3186,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { format!("field does not exist - did you mean `{}`?", field_name)); } else { match ty.sty { - ty::TyAdt(adt, ..) if adt.is_enum() => { - err.span_label(field.name.span, format!("`{}::{}` does not have this field", - ty, variant.name)); - } - _ => { - err.span_label(field.name.span, format!("`{}` does not have this field", ty)); + ty::TyAdt(adt, ..) => { + if adt.is_enum() { + err.span_label(field.name.span, + format!("`{}::{}` does not have this field", + ty, variant.name)); + } else { + err.span_label(field.name.span, + format!("`{}` does not have this field", ty)); + } + let available_field_names = self.available_field_names(variant); + if !available_field_names.is_empty() { + err.note(&format!("available fields are: {}", + self.name_series_display(available_field_names))); + } } + _ => bug!("non-ADT passed to report_unknown_field") } }; err.emit(); @@ -3975,7 +4011,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.check_expr_coercable_to_type(&value, ty); } None => { - struct_span_err!(self.tcx.sess, expr.span, E0624, + struct_span_err!(self.tcx.sess, expr.span, E0627, "yield statement outside of generator literal").emit(); } } @@ -4059,7 +4095,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { local: &'gcx hir::Local, init: &'gcx hir::Expr) -> Ty<'tcx> { - let ref_bindings = local.pat.contains_ref_binding(); + // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed + // for #42640. + let ref_bindings = local.pat.contains_explicit_ref_binding(); let local_ty = self.local_ty(init.span, local.id); if let Some(m) = ref_bindings { diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index 3413144b4fe..3a39800ea91 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -1196,9 +1196,13 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { mc.cat_pattern(discr_cmt, root_pat, |sub_cmt, sub_pat| { match sub_pat.node { // `ref x` pattern - PatKind::Binding(hir::BindByRef(mutbl), ..) => { - self.link_region_from_node_type(sub_pat.span, sub_pat.id, - mutbl, sub_cmt); + PatKind::Binding(..) => { + let bm = *mc.tables.pat_binding_modes.get(&sub_pat.id) + .expect("missing binding mode"); + if let ty::BindByReference(mutbl) = bm { + self.link_region_from_node_type(sub_pat.span, sub_pat.id, + mutbl, sub_cmt); + } } _ => {} } diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 69cd1414628..cf5882bb9bd 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -89,23 +89,23 @@ impl<'a, 'gcx> CheckTypeWellFormedVisitor<'a, 'gcx> { tcx.item_path_str(tcx.hir.local_def_id(item.id))); match item.node { - /// Right now we check that every default trait implementation - /// has an implementation of itself. Basically, a case like: - /// - /// `impl Trait for T {}` - /// - /// has a requirement of `T: Trait` which was required for default - /// method implementations. Although this could be improved now that - /// there's a better infrastructure in place for this, it's being left - /// for a follow-up work. - /// - /// Since there's such a requirement, we need to check *just* positive - /// implementations, otherwise things like: - /// - /// impl !Send for T {} - /// - /// won't be allowed unless there's an *explicit* implementation of `Send` - /// for `T` + // Right now we check that every default trait implementation + // has an implementation of itself. Basically, a case like: + // + // `impl Trait for T {}` + // + // has a requirement of `T: Trait` which was required for default + // method implementations. Although this could be improved now that + // there's a better infrastructure in place for this, it's being left + // for a follow-up work. + // + // Since there's such a requirement, we need to check *just* positive + // implementations, otherwise things like: + // + // impl !Send for T {} + // + // won't be allowed unless there's an *explicit* implementation of `Send` + // for `T` hir::ItemImpl(_, hir::ImplPolarity::Positive, _, _, ref trait_ref, ref self_ty, _) => { self.check_impl(item, self_ty, trait_ref); diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index 2f4a9ce4013..8101e87880c 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -181,6 +181,15 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { } fn visit_pat(&mut self, p: &'gcx hir::Pat) { + match p.node { + hir::PatKind::Binding(..) => { + let bm = *self.fcx.tables.borrow().pat_binding_modes.get(&p.id) + .expect("missing binding mode"); + self.tables.pat_binding_modes.insert(p.id, bm); + } + _ => {} + }; + self.visit_node_id(p.span, p.id); intravisit::walk_pat(self, p); } diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index d1ba9fd0918..7c2adce66fb 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -979,13 +979,11 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics); let regions = early_lifetimes.enumerate().map(|(i, l)| { - let issue_32330 = tcx.named_region_map.issue_32330.get(&l.lifetime.id).cloned(); ty::RegionParameterDef { name: l.lifetime.name, index: own_start + i as u32, def_id: tcx.hir.local_def_id(l.lifetime.id), pure_wrt_drop: l.pure_wrt_drop, - issue_32330: issue_32330, } }).collect::<Vec<_>>(); diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 7c410b111c2..4c608249ee9 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -2631,26 +2631,6 @@ struct Bar<S, T> { x: Foo<S, T> } ``` "##, -E0569: r##" -If an impl has a generic parameter with the `#[may_dangle]` attribute, then -that impl must be declared as an `unsafe impl. For example: - -```compile_fail,E0569 -#![feature(generic_param_attrs)] -#![feature(dropck_eyepatch)] - -struct Foo<X>(X); -impl<#[may_dangle] X> Drop for Foo<X> { - fn drop(&mut self) { } -} -``` - -In this example, we are asserting that the destructor for `Foo` will not -access any data of type `X`, and require this assertion to be true for -overall safety in our program. The compiler does not currently attempt to -verify this assertion; therefore we must tag this `impl` as unsafe. -"##, - E0318: r##" Default impls for a trait must be located in the same crate where the trait was defined. For more information see the [opt-in builtin traits RFC][RFC 19]. @@ -3457,6 +3437,56 @@ impl Foo for i32 { ``` "##, +E0436: r##" +The functional record update syntax is only allowed for structs. (Struct-like +enum variants don't qualify, for example.) + +Erroneous code example: + +```compile_fail,E0436 +enum PublicationFrequency { + Weekly, + SemiMonthly { days: (u8, u8), annual_special: bool }, +} + +fn one_up_competitor(competitor_frequency: PublicationFrequency) + -> PublicationFrequency { + match competitor_frequency { + PublicationFrequency::Weekly => PublicationFrequency::SemiMonthly { + days: (1, 15), annual_special: false + }, + c @ PublicationFrequency::SemiMonthly{ .. } => + PublicationFrequency::SemiMonthly { + annual_special: true, ..c // error: functional record update + // syntax requires a struct + } + } +} +``` + +Rewrite the expression without functional record update syntax: + +``` +enum PublicationFrequency { + Weekly, + SemiMonthly { days: (u8, u8), annual_special: bool }, +} + +fn one_up_competitor(competitor_frequency: PublicationFrequency) + -> PublicationFrequency { + match competitor_frequency { + PublicationFrequency::Weekly => PublicationFrequency::SemiMonthly { + days: (1, 15), annual_special: false + }, + PublicationFrequency::SemiMonthly{ days, .. } => + PublicationFrequency::SemiMonthly { + days, annual_special: true // ok! + } + } +} +``` +"##, + E0439: r##" The length of the platform-intrinsic function `simd_shuffle` wasn't specified. Erroneous code example: @@ -3926,6 +3956,28 @@ See [RFC 1522] for more details. [RFC 1522]: https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md "##, +E0569: r##" +If an impl has a generic parameter with the `#[may_dangle]` attribute, then +that impl must be declared as an `unsafe impl. + +Erroneous code example: + +```compile_fail,E0569 +#![feature(generic_param_attrs)] +#![feature(dropck_eyepatch)] + +struct Foo<X>(X); +impl<#[may_dangle] X> Drop for Foo<X> { + fn drop(&mut self) { } +} +``` + +In this example, we are asserting that the destructor for `Foo` will not +access any data of type `X`, and require this assertion to be true for +overall safety in our program. The compiler does not currently attempt to +verify this assertion; therefore we must tag this `impl` as unsafe. +"##, + E0570: r##" The requested ABI is unsupported by the current target. @@ -4592,6 +4644,62 @@ whose implementation is handled specially by the compiler. In order to fix this error, just declare a function. "##, +E0624: r##" +A private item was used outside of its scope. + +Erroneous code example: + +```compile_fail,E0627 +mod inner { + pub struct Foo; + + impl Foo { + fn method(&self) {} + } +} + +let foo = inner::Foo; +foo.method(); // error: method `method` is private +``` + +Two possibilities are available to solve this issue: + +1. Only use the item in the scope it has been defined: + +``` +mod inner { + pub struct Foo; + + impl Foo { + fn method(&self) {} + } + + pub fn call_method(foo: &Foo) { // We create a public function. + foo.method(); // Which calls the item. + } +} + +let foo = inner::Foo; +inner::call_method(&foo); // And since the function is public, we can call the + // method through it. +``` + +2. Make the item public: + +``` +mod inner { + pub struct Foo; + + impl Foo { + pub fn method(&self) {} // It's now public. + } +} + +let foo = inner::Foo; +foo.method(); // Ok! +``` +"##, + } register_diagnostics! { @@ -4655,10 +4763,9 @@ register_diagnostics! { // E0372, // coherence not object safe E0377, // the trait `CoerceUnsized` may only be implemented for a coercion // between structures with the same definition - E0436, // functional record update requires a struct E0521, // redundant default implementations of trait E0533, // `{}` does not name a unit variant, unit struct or a constant - E0563, // cannot determine a type for this `impl Trait`: {} +// E0563, // cannot determine a type for this `impl Trait`: {} // removed in 6383de15 E0564, // only named lifetimes are allowed in `impl Trait`, // but `{}` was found in the type `{}` E0567, // auto traits can not have type parameters @@ -4667,5 +4774,5 @@ register_diagnostics! { E0588, // packed struct cannot transitively contain a `[repr(align)]` struct E0592, // duplicate definitions with name `{}` // E0613, // Removed (merged with E0609) - E0624, // yield statement outside of generator literal + E0627, // yield statement outside of generator literal } diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index d8364a24dec..75ae2f9fe9a 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -1547,6 +1547,8 @@ pub enum PrimitiveType { Array, Tuple, RawPointer, + Reference, + Fn, } #[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)] @@ -1581,6 +1583,8 @@ impl Type { Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(..) => Some(PrimitiveType::Tuple), RawPointer(..) => Some(PrimitiveType::RawPointer), + BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), + BareFunction(..) => Some(PrimitiveType::Fn), _ => None, } } @@ -1633,6 +1637,8 @@ impl PrimitiveType { "slice" => Some(PrimitiveType::Slice), "tuple" => Some(PrimitiveType::Tuple), "pointer" => Some(PrimitiveType::RawPointer), + "reference" => Some(PrimitiveType::Reference), + "fn" => Some(PrimitiveType::Fn), _ => None, } } @@ -1661,6 +1667,8 @@ impl PrimitiveType { Slice => "slice", Tuple => "tuple", RawPointer => "pointer", + Reference => "reference", + Fn => "fn", } } @@ -2556,6 +2564,8 @@ fn build_deref_target_impls(cx: &DocContext, Array => tcx.lang_items.slice_impl(), Tuple => None, RawPointer => tcx.lang_items.const_ptr_impl(), + Reference => None, + Fn => None, }; if let Some(did) = did { if !did.is_local() { @@ -2777,6 +2787,9 @@ fn resolve_type(cx: &DocContext, Def::SelfTy(..) if path.segments.len() == 1 => { return Generic(keywords::SelfType.name().to_string()); } + Def::TyParam(..) if path.segments.len() == 1 => { + return Generic(format!("{:#}", path)); + } Def::SelfTy(..) | Def::TyParam(..) | Def::AssociatedTy(..) => true, _ => false, }; diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 766e76137ca..988890ffedc 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -607,11 +607,9 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: decl.generics, decl.decl) } else { - write!(f, "{}{}fn{}{}", - UnsafetySpace(decl.unsafety), - AbiSpace(decl.abi), - decl.generics, - decl.decl) + write!(f, "{}{}", UnsafetySpace(decl.unsafety), AbiSpace(decl.abi))?; + primitive_link(f, PrimitiveType::Fn, "fn")?; + write!(f, "{}{}", decl.generics, decl.decl) } } clean::Tuple(ref typs) => { @@ -665,26 +663,29 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: _ => "".to_string(), }; let m = MutableSpace(mutability); + let amp = if f.alternate() { + "&".to_string() + } else { + "&".to_string() + }; match **ty { clean::Slice(ref bt) => { // BorrowedRef{ ... Slice(T) } is &[T] match **bt { clean::Generic(_) => { if f.alternate() { primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[{:#}]", lt, m, **bt)) + &format!("{}{}{}[{:#}]", amp, lt, m, **bt)) } else { primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[{}]", lt, m, **bt)) + &format!("{}{}{}[{}]", amp, lt, m, **bt)) } } _ => { + primitive_link(f, PrimitiveType::Slice, + &format!("{}{}{}[", amp, lt, m))?; if f.alternate() { - primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[", lt, m))?; write!(f, "{:#}", **bt)?; } else { - primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[", lt, m))?; write!(f, "{}", **bt)?; } primitive_link(f, PrimitiveType::Slice, "]") @@ -692,23 +693,18 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: } } clean::ResolvedPath { typarams: Some(ref v), .. } if !v.is_empty() => { - if f.alternate() { - write!(f, "&{}{}", lt, m)?; - } else { - write!(f, "&{}{}", lt, m)?; - } - write!(f, "(")?; + write!(f, "{}{}{}(", amp, lt, m)?; fmt_type(&ty, f, use_absolute)?; write!(f, ")") } + clean::Generic(..) => { + primitive_link(f, PrimitiveType::Reference, + &format!("{}{}{}", amp, lt, m))?; + fmt_type(&ty, f, use_absolute) + } _ => { - if f.alternate() { - write!(f, "&{}{}", lt, m)?; - fmt_type(&ty, f, use_absolute) - } else { - write!(f, "&{}{}", lt, m)?; - fmt_type(&ty, f, use_absolute) - } + write!(f, "{}{}{}", amp, lt, m)?; + fmt_type(&ty, f, use_absolute) } } } diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index e89bd7aae9b..fc0adef70ba 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -2235,6 +2235,13 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _ => false, }; fmt_impl_for_trait_page(&implementor.impl_, w, use_absolute)?; + for it in &implementor.impl_.items { + if let clean::TypedefItem(ref tydef, _) = it.inner { + write!(w, "<span class=\"where fmt-newline\"> ")?; + assoc_type(w, it, &vec![], Some(&tydef.type_), AssocItemLink::Anchor(None))?; + write!(w, ";</span>")?; + } + } writeln!(w, "</code></li>")?; } } @@ -2962,7 +2969,15 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi write!(w, "<code>")?; render_assoc_item(w, item, link.anchor(&id), ItemType::Impl)?; write!(w, "</code>")?; - render_stability_since_raw(w, item.stable_since(), outer_version)?; + if let Some(l) = (Item { cx, item }).src_href() { + write!(w, "</span><span class='out-of-band'>")?; + write!(w, "<div class='ghost'></div>")?; + render_stability_since_raw(w, item.stable_since(), outer_version)?; + write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", + l, "goto source code")?; + } else { + render_stability_since_raw(w, item.stable_since(), outer_version)?; + } write!(w, "</span></h4>\n")?; } } diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 9314f57359a..51465bafc42 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -274,9 +274,13 @@ nav.sub { border-bottom: 1px solid; } -.docblock h1 { font-size: 1.3em; } -.docblock h2 { font-size: 1.15em; } -.docblock h3, .docblock h4, .docblock h5 { font-size: 1em; } +#main > .docblock h1 { font-size: 1.3em; } +#main > .docblock h2 { font-size: 1.15em; } +#main > .docblock h3, #main > .docblock h4, #main > .docblock h5 { font-size: 1em; } + +.docblock h1 { font-size: 1em; } +.docblock h2 { font-size: 0.95em; } +.docblock h3, .docblock h4, .docblock h5 { font-size: 0.9em; } .docblock { margin-left: 24px; @@ -297,6 +301,10 @@ h3.impl > .out-of-band { font-size: 21px; } +h4.method > .out-of-band { + font-size: 19px; +} + h4 > code, h3 > code, .invisible > code { position: inherit; } @@ -434,10 +442,6 @@ a { text-decoration: underline; } -.content span.enum, .content a.enum, .block a.current.enum { color: #5e9766; } -.content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } -.content span.type, .content a.type, .block a.current.type { color: #e57300; } -.content span.macro, .content a.macro, .block a.current.macro { color: #068000; } .block a.current.crate { font-weight: 500; } .search-input { diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index c0310199088..034c5307fc0 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -64,21 +64,6 @@ pre { background-color: #f6fdb0 !important; } -:target { background: #FDFFD3; } -.content .highlighted { - color: #000 !important; - background-color: #ccc; -} -.content .highlighted a, .content .highlighted span { color: #000 !important; } -.content .highlighted.trait { background-color: #fece7e; } -.content .highlighted.mod { background-color: #afc6e4; } -.content .highlighted.enum { background-color: #b4d1b9; } -.content .highlighted.struct { background-color: #e7b1a0; } -.content .highlighted.fn { background-color: #c6afb3; } -.content .highlighted.method { background-color: #c6afb3; } -.content .highlighted.tymethod { background-color: #c6afb3; } -.content .highlighted.type { background-color: #c6afb3; } - .docblock h1, .docblock h2, .docblock h3, .docblock h4, .docblock h5 { border-bottom-color: #DDD; } @@ -97,13 +82,42 @@ pre { border-bottom-color: #ddd; } -.content span.primitive, .content a.primitive, .block a.current.primitive { color: #39a7bf; } +:target { background: #FDFFD3; } +.content .highlighted { + color: #000 !important; + background-color: #ccc; +} +.content .highlighted a, .content .highlighted span { color: #000 !important; } +.content .highlighted.trait { background-color: #c7b6ff; } +.content .highlighted.mod, +.content .highlighted.externcrate { background-color: #afc6e4; } +.content .highlighted.enum { background-color: #b4d1b9; } +.content .highlighted.struct { background-color: #e7b1a0; } +.content .highlighted.union { background-color: #b7bd49; } +.content .highlighted.fn, +.content .highlighted.method, +.content .highlighted.tymethod { background-color: #c6afb3; } +.content .highlighted.type { background-color: #ffc891; } +.content .highlighted.macro { background-color: #8ce488; } +.content .highlighted.constant, +.content .highlighted.static { background-color: #c3e0ff; } +.content .highlighted.primitive { background-color: #9aecff; } + +.content span.enum, .content a.enum, .block a.current.enum { color: #508157; } +.content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } +.content span.type, .content a.type, .block a.current.type { color: #ba5d00; } +.content span.macro, .content a.macro, .block a.current.macro { color: #068000; } +.content span.union, .content a.union, .block a.current.union { color: #767b27; } +.content span.constant, .content a.constant, .block a.current.constant, +.content span.static, .content a.static, .block a.current.static { color: #546e8a; } +.content span.primitive, .content a.primitive, .block a.current.primitive { color: #2c8093; } .content span.externcrate, .content span.mod, .content a.mod, .block a.current.mod { color: #4d76ae; } +.content span.trait, .content a.trait, .block a.current.trait { color: #7c5af3; } .content span.fn, .content a.fn, .block a.current.fn, .content span.method, .content a.method, .block a.current.method, .content span.tymethod, .content a.tymethod, .block a.current.tymethod, -.content .fnname { color: #8c6067; } +.content .fnname { color: #9a6e31; } pre.rust .comment { color: #8E908C; } pre.rust .doccomment { color: #4D4D4C; } @@ -130,8 +144,6 @@ a.test-arrow { color: #f5f5f5; } -.content span.trait, .content a.trait, .block a.current.trait { color: #7c5af3; } - .search-input { color: #555; box-shadow: 0 0 0 1px #e0e0e0, 0 0 0 2px transparent; diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index d80df5f18b6..80a223c7d74 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -123,13 +123,13 @@ pub struct HashSet<T, S = RandomState> { } impl<T: Hash + Eq> HashSet<T, RandomState> { - /// Creates an empty HashSet. + /// Creates an empty `HashSet`. /// /// # Examples /// /// ``` /// use std::collections::HashSet; - /// let mut set: HashSet<i32> = HashSet::new(); + /// let set: HashSet<i32> = HashSet::new(); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -146,7 +146,8 @@ impl<T: Hash + Eq> HashSet<T, RandomState> { /// /// ``` /// use std::collections::HashSet; - /// let mut set: HashSet<i32> = HashSet::with_capacity(10); + /// let set: HashSet<i32> = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -215,6 +216,17 @@ impl<T, S> HashSet<T, S> /// Returns a reference to the set's [`BuildHasher`]. /// /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use std::collections::HashSet; + /// use std::collections::hash_map::RandomState; + /// + /// let hasher = RandomState::new(); + /// let set: HashSet<i32> = HashSet::with_hasher(hasher); + /// let hasher: &RandomState = set.hasher(); + /// ``` #[stable(feature = "hashmap_public_hasher", since = "1.9.0")] pub fn hasher(&self) -> &S { self.map.hasher() @@ -249,6 +261,7 @@ impl<T, S> HashSet<T, S> /// use std::collections::HashSet; /// let mut set: HashSet<i32> = HashSet::new(); /// set.reserve(10); + /// assert!(set.capacity() >= 10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { @@ -312,13 +325,13 @@ impl<T, S> HashSet<T, S> /// println!("{}", x); // Print 1 /// } /// - /// let diff: HashSet<_> = a.difference(&b).cloned().collect(); - /// assert_eq!(diff, [1].iter().cloned().collect()); + /// let diff: HashSet<_> = a.difference(&b).collect(); + /// assert_eq!(diff, [1].iter().collect()); /// /// // Note that difference is not symmetric, /// // and `b - a` means something else: - /// let diff: HashSet<_> = b.difference(&a).cloned().collect(); - /// assert_eq!(diff, [4].iter().cloned().collect()); + /// let diff: HashSet<_> = b.difference(&a).collect(); + /// assert_eq!(diff, [4].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn difference<'a>(&'a self, other: &'a HashSet<T, S>) -> Difference<'a, T, S> { @@ -343,11 +356,11 @@ impl<T, S> HashSet<T, S> /// println!("{}", x); /// } /// - /// let diff1: HashSet<_> = a.symmetric_difference(&b).cloned().collect(); - /// let diff2: HashSet<_> = b.symmetric_difference(&a).cloned().collect(); + /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect(); + /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect(); /// /// assert_eq!(diff1, diff2); - /// assert_eq!(diff1, [1, 4].iter().cloned().collect()); + /// assert_eq!(diff1, [1, 4].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn symmetric_difference<'a>(&'a self, @@ -371,8 +384,8 @@ impl<T, S> HashSet<T, S> /// println!("{}", x); /// } /// - /// let intersection: HashSet<_> = a.intersection(&b).cloned().collect(); - /// assert_eq!(intersection, [2, 3].iter().cloned().collect()); + /// let intersection: HashSet<_> = a.intersection(&b).collect(); + /// assert_eq!(intersection, [2, 3].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn intersection<'a>(&'a self, other: &'a HashSet<T, S>) -> Intersection<'a, T, S> { @@ -397,8 +410,8 @@ impl<T, S> HashSet<T, S> /// println!("{}", x); /// } /// - /// let union: HashSet<_> = a.union(&b).cloned().collect(); - /// assert_eq!(union, [1, 2, 3, 4].iter().cloned().collect()); + /// let union: HashSet<_> = a.union(&b).collect(); + /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn union<'a>(&'a self, other: &'a HashSet<T, S>) -> Union<'a, T, S> { @@ -440,6 +453,22 @@ impl<T, S> HashSet<T, S> } /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// use std::collections::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert!(!set.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in set.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(set.is_empty()); + /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self) -> Drain<T> { diff --git a/src/libstd/os/solaris/raw.rs b/src/libstd/os/solaris/raw.rs index b84fdba9ca2..5a813c5c76b 100644 --- a/src/libstd/os/solaris/raw.rs +++ b/src/libstd/os/solaris/raw.rs @@ -32,7 +32,7 @@ use os::unix::raw::{uid_t, gid_t}; #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64; #[stable(feature = "pthread_t", since = "1.8.0")] -pub type pthread_t = usize; +pub type pthread_t = u32; #[repr(C)] #[derive(Clone)] diff --git a/src/libstd/path.rs b/src/libstd/path.rs index e083ab0ef97..c90a0c78527 100644 --- a/src/libstd/path.rs +++ b/src/libstd/path.rs @@ -2237,7 +2237,7 @@ impl Path { fs::metadata(self).is_ok() } - /// Returns whether the path is pointing at a regular file. + /// Returns whether the path exists on disk and is pointing at a regular file. /// /// This function will traverse symbolic links to query information about the /// destination file. In case of broken symbolic links this will return `false`. @@ -2266,7 +2266,7 @@ impl Path { fs::metadata(self).map(|m| m.is_file()).unwrap_or(false) } - /// Returns whether the path is pointing at a directory. + /// Returns whether the path exists on disk and is pointing at a directory. /// /// This function will traverse symbolic links to query information about the /// destination file. In case of broken symbolic links this will return `false`. diff --git a/src/libstd/primitive_docs.rs b/src/libstd/primitive_docs.rs index 869299e2144..7be319d1954 100644 --- a/src/libstd/primitive_docs.rs +++ b/src/libstd/primitive_docs.rs @@ -722,3 +722,221 @@ mod prim_isize { } /// #[stable(feature = "rust1", since = "1.0.0")] mod prim_usize { } + +#[doc(primitive = "reference")] +// +/// References, both shared and mutable. +/// +/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut` +/// operators on a value, or by using a `ref` or `ref mut` pattern. +/// +/// For those familiar with pointers, a reference is just a pointer that is assumed to not be null. +/// In fact, `Option<&T>` has the same memory representation as a nullable pointer, and can be +/// passed across FFI boundaries as such. +/// +/// In most cases, references can be used much like the original value. Field access, method +/// calling, and indexing work the same (save for mutability rules, of course). In addition, the +/// comparison operators transparently defer to the referent's implementation, allowing references +/// to be compared the same as owned values. +/// +/// References have a lifetime attached to them, which represents the scope for which the borrow is +/// valid. A lifetime is said to "outlive" another one if its representative scope is as long or +/// longer than the other. The `'static` lifetime is the longest lifetime, which represents the +/// total life of the program. For example, string literals have a `'static` lifetime because the +/// text data is embedded into the binary of the program, rather than in an allocation that needs +/// to be dynamically managed. +/// +/// `&mut T` references can be freely coerced into `&T` references with the same referent type, and +/// references with longer lifetimes can be freely coerced into references with shorter ones. +/// +/// For more information on how to use references, see [the book's section on "References and +/// Borrowing"][book-refs]. +/// +/// [book-refs]: ../book/second-edition/ch04-02-references-and-borrowing.html +/// +/// The following traits are implemented for all `&T`, regardless of the type of its referent: +/// +/// * [`Copy`] +/// * [`Clone`] \(Note that this will not defer to `T`'s `Clone` implementation if it exists!) +/// * [`Deref`] +/// * [`Borrow`] +/// * [`Pointer`] +/// +/// [`Copy`]: marker/trait.Copy.html +/// [`Clone`]: clone/trait.Clone.html +/// [`Deref`]: ops/trait.Deref.html +/// [`Borrow`]: borrow/trait.Borrow.html +/// [`Pointer`]: fmt/trait.Pointer.html +/// +/// `&mut T` references get all of the above except `Copy` and `Clone` (to prevent creating +/// multiple simultaneous mutable borrows), plus the following, regardless of the type of its +/// referent: +/// +/// * [`DerefMut`] +/// * [`BorrowMut`] +/// +/// [`DerefMut`]: ops/trait.DerefMut.html +/// [`BorrowMut`]: borrow/trait.BorrowMut.html +/// +/// The following traits are implemented on `&T` references if the underlying `T` also implements +/// that trait: +/// +/// * All the traits in [`std::fmt`] except [`Pointer`] and [`fmt::Write`] +/// * [`PartialOrd`] +/// * [`Ord`] +/// * [`PartialEq`] +/// * [`Eq`] +/// * [`AsRef`] +/// * [`Fn`] \(in addition, `&T` references get [`FnMut`] and [`FnOnce`] if `T: Fn`) +/// * [`Hash`] +/// * [`ToSocketAddrs`] +/// +/// [`std::fmt`]: fmt/index.html +/// [`fmt::Write`]: fmt/trait.Write.html +/// [`PartialOrd`]: cmp/trait.PartialOrd.html +/// [`Ord`]: cmp/trait.Ord.html +/// [`PartialEq`]: cmp/trait.PartialEq.html +/// [`Eq`]: cmp/trait.Eq.html +/// [`AsRef`]: convert/trait.AsRef.html +/// [`Fn`]: ops/trait.Fn.html +/// [`FnMut`]: ops/trait.FnMut.html +/// [`FnOnce`]: ops/trait.FnOnce.html +/// [`Hash`]: hash/trait.Hash.html +/// [`ToSocketAddrs`]: net/trait.ToSocketAddrs.html +/// +/// `&mut T` references get all of the above except `ToSocketAddrs`, plus the following, if `T` +/// implements that trait: +/// +/// * [`AsMut`] +/// * [`FnMut`] \(in addition, `&mut T` references get [`FnOnce`] if `T: FnMut`) +/// * [`fmt::Write`] +/// * [`Iterator`] +/// * [`DoubleEndedIterator`] +/// * [`ExactSizeIterator`] +/// * [`FusedIterator`] +/// * [`TrustedLen`] +/// * [`Send`] \(note that `&T` references only get `Send` if `T: Sync`) +/// * [`io::Write`] +/// * [`Read`] +/// * [`Seek`] +/// * [`BufRead`] +/// +/// [`AsMut`]: convert/trait.AsMut.html +/// [`Iterator`]: iter/trait.Iterator.html +/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html +/// [`ExactSizeIterator`]: iter/trait.ExactSizeIterator.html +/// [`FusedIterator`]: iter/trait.FusedIterator.html +/// [`TrustedLen`]: iter/trait.TrustedLen.html +/// [`Send`]: marker/trait.Send.html +/// [`io::Write`]: io/trait.Write.html +/// [`Read`]: io/trait.Read.html +/// [`Seek`]: io/trait.Seek.html +/// [`BufRead`]: io/trait.BufRead.html +/// +/// Note that due to method call deref coercion, simply calling a trait method will act like they +/// work on references as well as they do on owned values! The implementations described here are +/// meant for generic contexts, where the final type `T` is a type parameter or otherwise not +/// locally known. +#[stable(feature = "rust1", since = "1.0.0")] +mod prim_ref { } + +#[doc(primitive = "fn")] +// +/// Function pointers, like `fn(usize) -> bool`. +/// +/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].* +/// +/// [`Fn`]: ops/trait.Fn.html +/// [`FnMut`]: ops/trait.FnMut.html +/// [`FnOnce`]: ops/trait.FnOnce.html +/// +/// Plain function pointers are obtained by casting either plain functions, or closures that don't +/// capture an environment: +/// +/// ``` +/// fn add_one(x: usize) -> usize { +/// x + 1 +/// } +/// +/// let ptr: fn(usize) -> usize = add_one; +/// assert_eq!(ptr(5), 6); +/// +/// let clos: fn(usize) -> usize = |x| x + 5; +/// assert_eq!(clos(5), 10); +/// ``` +/// +/// In addition to varying based on their signature, function pointers come in two flavors: safe +/// and unsafe. Plain `fn()` function pointers can only point to safe functions, +/// while `unsafe fn()` function pointers can point to safe or unsafe functions. +/// +/// ``` +/// fn add_one(x: usize) -> usize { +/// x + 1 +/// } +/// +/// unsafe fn add_one_unsafely(x: usize) -> usize { +/// x + 1 +/// } +/// +/// let safe_ptr: fn(usize) -> usize = add_one; +/// +/// //ERROR: mismatched types: expected normal fn, found unsafe fn +/// //let bad_ptr: fn(usize) -> usize = add_one_unsafely; +/// +/// let unsafe_ptr: unsafe fn(usize) -> usize = add_one_unsafely; +/// let really_safe_ptr: unsafe fn(usize) -> usize = add_one; +/// ``` +/// +/// On top of that, function pointers can vary based on what ABI they use. This is achieved by +/// adding the `extern` keyword to the type name, followed by the ABI in question. For example, +/// `fn()` is different from `extern "C" fn()`, which itself is different from `extern "stdcall" +/// fn()`, and so on for the various ABIs that Rust supports. Non-`extern` functions have an ABI +/// of `"Rust"`, and `extern` functions without an explicit ABI have an ABI of `"C"`. For more +/// information, see [the nomicon's section on foreign calling conventions][nomicon-abi]. +/// +/// [nomicon-abi]: ../nomicon/ffi.html#foreign-calling-conventions +/// +/// Extern function declarations with the "C" or "cdecl" ABIs can also be *variadic*, allowing them +/// to be called with a variable number of arguments. Normal rust functions, even those with an +/// `extern "ABI"`, cannot be variadic. For more information, see [the nomicon's section on +/// variadic functions][nomicon-variadic]. +/// +/// [nomicon-variadic]: ../nomicon/ffi.html#variadic-functions +/// +/// These markers can be combined, so `unsafe extern "stdcall" fn()` is a valid type. +/// +/// Like references in rust, function pointers are assumed to not be null, so if you want to pass a +/// function pointer over FFI and be able to accomodate null pointers, make your type +/// `Option<fn()>` with your required signature. +/// +/// Function pointers implement the following traits: +/// +/// * [`Clone`] +/// * [`PartialEq`] +/// * [`Eq`] +/// * [`PartialOrd`] +/// * [`Ord`] +/// * [`Hash`] +/// * [`Pointer`] +/// * [`Debug`] +/// +/// [`Clone`]: clone/trait.Clone.html +/// [`PartialEq`]: cmp/trait.PartialEq.html +/// [`Eq`]: cmp/trait.Eq.html +/// [`PartialOrd`]: cmp/trait.PartialOrd.html +/// [`Ord`]: cmp/trait.Ord.html +/// [`Hash`]: hash/trait.Hash.html +/// [`Pointer`]: fmt/trait.Pointer.html +/// [`Debug`]: fmt/trait.Debug.html +/// +/// Due to a temporary restriction in Rust's type system, these traits are only implemented on +/// functions that take 12 arguments or less, with the `"Rust"` and `"C"` ABIs. In the future, this +/// may change. +/// +/// In addition, function pointers of *any* signature, ABI, or safety are [`Copy`], and all *safe* +/// function pointers implement [`Fn`], [`FnMut`], and [`FnOnce`]. This works because these traits +/// are specially known to the compiler. +/// +/// [`Copy`]: marker/trait.Copy.html +#[stable(feature = "rust1", since = "1.0.0")] +mod prim_fn { } diff --git a/src/libstd/process.rs b/src/libstd/process.rs index 31809e38239..a872e7eee06 100644 --- a/src/libstd/process.rs +++ b/src/libstd/process.rs @@ -1417,8 +1417,19 @@ mod tests { let output = String::from_utf8(result.stdout).unwrap(); for (ref k, ref v) in env::vars() { - // don't check android RANDOM variables - if cfg!(target_os = "android") && *k == "RANDOM" { + // Don't check android RANDOM variable which seems to change + // whenever the shell runs, and our `env_cmd` is indeed running a + // shell which means it'll get a different RANDOM than we probably + // have. + // + // Also skip env vars with `-` in the name on android because, well, + // I'm not sure. It appears though that the `set` command above does + // not print env vars with `-` in the name, so we just skip them + // here as we won't find them in the output. Note that most env vars + // use `_` instead of `-`, but our build system sets a few env vars + // with `-` in the name. + if cfg!(target_os = "android") && + (*k == "RANDOM" || k.contains("-")) { continue } diff --git a/src/libstd/sys/redox/ext/io.rs b/src/libstd/sys/redox/ext/io.rs index 8e7cc593dbd..c4d99568c55 100644 --- a/src/libstd/sys/redox/ext/io.rs +++ b/src/libstd/sys/redox/ext/io.rs @@ -15,6 +15,7 @@ use fs; use net; use sys; +use io; use sys_common::{self, AsInner, FromInner, IntoInner}; /// Raw file descriptors. @@ -109,6 +110,21 @@ impl AsRawFd for net::UdpSocket { } } +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { 0 } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { 1 } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { 2 } +} + #[stable(feature = "from_raw_os", since = "1.1.0")] impl FromRawFd for net::TcpStream { unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { diff --git a/src/libstd/sys/redox/ext/mod.rs b/src/libstd/sys/redox/ext/mod.rs index 513ef272e97..0c1bf9e9557 100644 --- a/src/libstd/sys/redox/ext/mod.rs +++ b/src/libstd/sys/redox/ext/mod.rs @@ -33,6 +33,7 @@ pub mod ffi; pub mod fs; pub mod io; pub mod process; +pub mod thread; /// A prelude for conveniently writing platform-specific code. /// @@ -46,5 +47,7 @@ pub mod prelude { #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")] pub use super::fs::{FileTypeExt, PermissionsExt, OpenOptionsExt, MetadataExt}; #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")] + pub use super::thread::JoinHandleExt; + #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")] pub use super::process::{CommandExt, ExitStatusExt}; } diff --git a/src/libstd/sys/redox/ext/thread.rs b/src/libstd/sys/redox/ext/thread.rs new file mode 100644 index 00000000000..52be2ccd9f9 --- /dev/null +++ b/src/libstd/sys/redox/ext/thread.rs @@ -0,0 +1,47 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Unix-specific extensions to primitives in the `std::thread` module. + +#![stable(feature = "thread_extensions", since = "1.9.0")] + +use sys_common::{AsInner, IntoInner}; +use thread::JoinHandle; + +#[stable(feature = "thread_extensions", since = "1.9.0")] +#[allow(deprecated)] +pub type RawPthread = usize; + +/// Unix-specific extensions to `std::thread::JoinHandle` +#[stable(feature = "thread_extensions", since = "1.9.0")] +pub trait JoinHandleExt { + /// Extracts the raw pthread_t without taking ownership + #[stable(feature = "thread_extensions", since = "1.9.0")] + fn as_pthread_t(&self) -> RawPthread; + + /// Consumes the thread, returning the raw pthread_t + /// + /// This function **transfers ownership** of the underlying pthread_t to + /// the caller. Callers are then the unique owners of the pthread_t and + /// must either detach or join the pthread_t once it's no longer needed. + #[stable(feature = "thread_extensions", since = "1.9.0")] + fn into_pthread_t(self) -> RawPthread; +} + +#[stable(feature = "thread_extensions", since = "1.9.0")] +impl<T> JoinHandleExt for JoinHandle<T> { + fn as_pthread_t(&self) -> RawPthread { + self.as_inner().id() as RawPthread + } + + fn into_pthread_t(self) -> RawPthread { + self.into_inner().into_id() as RawPthread + } +} diff --git a/src/libstd/sys/redox/fd.rs b/src/libstd/sys/redox/fd.rs index 1b37aafef56..ba7bbdc657f 100644 --- a/src/libstd/sys/redox/fd.rs +++ b/src/libstd/sys/redox/fd.rs @@ -57,9 +57,9 @@ impl FileDesc { } pub fn set_cloexec(&self) -> io::Result<()> { - let mut flags = cvt(syscall::fcntl(self.fd, syscall::F_GETFL, 0))?; + let mut flags = cvt(syscall::fcntl(self.fd, syscall::F_GETFD, 0))?; flags |= syscall::O_CLOEXEC; - cvt(syscall::fcntl(self.fd, syscall::F_SETFL, flags)).and(Ok(())) + cvt(syscall::fcntl(self.fd, syscall::F_SETFD, flags)).and(Ok(())) } pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { diff --git a/src/libstd/sys/redox/process.rs b/src/libstd/sys/redox/process.rs index ff1626d9b31..17fa07b99ae 100644 --- a/src/libstd/sys/redox/process.rs +++ b/src/libstd/sys/redox/process.rs @@ -272,21 +272,21 @@ impl Command { if let Some(fd) = stdio.stderr.fd() { t!(cvt(syscall::dup2(fd, 2, &[]))); - let mut flags = t!(cvt(syscall::fcntl(2, syscall::F_GETFL, 0))); + let mut flags = t!(cvt(syscall::fcntl(2, syscall::F_GETFD, 0))); flags &= ! syscall::O_CLOEXEC; - t!(cvt(syscall::fcntl(2, syscall::F_SETFL, flags))); + t!(cvt(syscall::fcntl(2, syscall::F_SETFD, flags))); } if let Some(fd) = stdio.stdout.fd() { t!(cvt(syscall::dup2(fd, 1, &[]))); - let mut flags = t!(cvt(syscall::fcntl(1, syscall::F_GETFL, 0))); + let mut flags = t!(cvt(syscall::fcntl(1, syscall::F_GETFD, 0))); flags &= ! syscall::O_CLOEXEC; - t!(cvt(syscall::fcntl(1, syscall::F_SETFL, flags))); + t!(cvt(syscall::fcntl(1, syscall::F_SETFD, flags))); } if let Some(fd) = stdio.stdin.fd() { t!(cvt(syscall::dup2(fd, 0, &[]))); - let mut flags = t!(cvt(syscall::fcntl(0, syscall::F_GETFL, 0))); + let mut flags = t!(cvt(syscall::fcntl(0, syscall::F_GETFD, 0))); flags &= ! syscall::O_CLOEXEC; - t!(cvt(syscall::fcntl(0, syscall::F_SETFL, flags))); + t!(cvt(syscall::fcntl(0, syscall::F_SETFD, flags))); } if let Some(g) = self.gid { diff --git a/src/libstd/sys/redox/syscall/flag.rs b/src/libstd/sys/redox/syscall/flag.rs index 65ad9842d69..892007df2b7 100644 --- a/src/libstd/sys/redox/syscall/flag.rs +++ b/src/libstd/sys/redox/syscall/flag.rs @@ -20,8 +20,10 @@ pub const EVENT_NONE: usize = 0; pub const EVENT_READ: usize = 1; pub const EVENT_WRITE: usize = 2; -pub const F_GETFL: usize = 1; -pub const F_SETFL: usize = 2; +pub const F_GETFD: usize = 1; +pub const F_SETFD: usize = 2; +pub const F_GETFL: usize = 3; +pub const F_SETFL: usize = 4; pub const FUTEX_WAIT: usize = 0; pub const FUTEX_WAKE: usize = 1; diff --git a/src/libstd/sys/unix/ext/io.rs b/src/libstd/sys/unix/ext/io.rs index 296235e173d..a0323d933d6 100644 --- a/src/libstd/sys/unix/ext/io.rs +++ b/src/libstd/sys/unix/ext/io.rs @@ -16,7 +16,9 @@ use fs; use net; use os::raw; use sys; +use io; use sys_common::{self, AsInner, FromInner, IntoInner}; +use libc; /// Raw file descriptors. #[stable(feature = "rust1", since = "1.0.0")] @@ -104,6 +106,21 @@ impl AsRawFd for net::UdpSocket { fn as_raw_fd(&self) -> RawFd { *self.as_inner().socket().as_inner() } } +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { libc::STDIN_FILENO } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { libc::STDOUT_FILENO } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { libc::STDERR_FILENO } +} + #[stable(feature = "from_raw_os", since = "1.1.0")] impl FromRawFd for net::TcpStream { unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs index 59dceba8953..4e6fde5c29d 100644 --- a/src/libstd/sys/unix/fs.rs +++ b/src/libstd/sys/unix/fs.rs @@ -284,12 +284,7 @@ impl DirEntry { lstat(&self.path()) } - #[cfg(target_os = "solaris")] - pub fn file_type(&self) -> io::Result<FileType> { - stat(&self.path()).map(|m| m.file_type()) - } - - #[cfg(target_os = "haiku")] + #[cfg(any(target_os = "solaris", target_os = "haiku"))] pub fn file_type(&self) -> io::Result<FileType> { lstat(&self.path()).map(|m| m.file_type()) } diff --git a/src/libstd/sys/windows/ext/io.rs b/src/libstd/sys/windows/ext/io.rs index 2ddb6c65fd3..90128dda088 100644 --- a/src/libstd/sys/windows/ext/io.rs +++ b/src/libstd/sys/windows/ext/io.rs @@ -15,6 +15,7 @@ use os::windows::raw; use net; use sys_common::{self, AsInner, FromInner, IntoInner}; use sys; +use io; use sys::c; /// Raw HANDLEs. @@ -71,6 +72,27 @@ impl AsRawHandle for fs::File { } } +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stdin { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stdout { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stderr { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle } + } +} + #[stable(feature = "from_raw_os", since = "1.1.0")] impl FromRawHandle for fs::File { unsafe fn from_raw_handle(handle: RawHandle) -> fs::File { diff --git a/src/libstd/sys_common/net.rs b/src/libstd/sys_common/net.rs index 5775dd4f1fc..1ca39ff9d4a 100644 --- a/src/libstd/sys_common/net.rs +++ b/src/libstd/sys_common/net.rs @@ -165,16 +165,8 @@ pub fn lookup_host(host: &str) -> io::Result<LookupHost> { init(); let c_host = CString::new(host)?; - let hints = c::addrinfo { - ai_flags: 0, - ai_family: 0, - ai_socktype: c::SOCK_STREAM, - ai_protocol: 0, - ai_addrlen: 0, - ai_addr: ptr::null_mut(), - ai_canonname: ptr::null_mut(), - ai_next: ptr::null_mut() - }; + let mut hints: c::addrinfo = unsafe { mem::zeroed() }; + hints.ai_socktype = c::SOCK_STREAM; let mut res = ptr::null_mut(); unsafe { match cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res)) { diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index 2ae62f8c3e0..cbd019c2c0e 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -820,7 +820,8 @@ pub fn park_timeout(dur: Duration) { /// /// A `ThreadId` is an opaque object that has a unique value for each thread /// that creates one. `ThreadId`s are not guaranteed to correspond to a thread's -/// system-designated identifier. +/// system-designated identifier. A `ThreadId` can be retrieved from the [`id`] +/// method on a [`Thread`]. /// /// # Examples /// @@ -834,6 +835,9 @@ pub fn park_timeout(dur: Duration) { /// let other_thread_id = other_thread.join().unwrap(); /// assert!(thread::current().id() != other_thread_id); /// ``` +/// +/// [`id`]: ../../std/thread/struct.Thread.html#method.id +/// [`Thread`]: ../../std/thread/struct.Thread.html #[stable(feature = "thread_id", since = "1.19.0")] #[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] pub struct ThreadId(u64); @@ -896,6 +900,9 @@ struct Inner { /// docs of [`Builder`] and [`spawn`] for more details. /// /// [`Builder`]: ../../std/thread/struct.Builder.html +/// [`JoinHandle::thread`]: ../../std/thread/struct.JoinHandle.html#method.thread +/// [`JoinHandle`]: ../../std/thread/struct.JoinHandle.html +/// [`thread::current`]: ../../std/thread/fn.current.html /// [`spawn`]: ../../std/thread/fn.spawn.html pub struct Thread { diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index 0128bf73a35..cce428cad6d 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -336,6 +336,7 @@ impl Default for Generics { where_clause: WhereClause { id: DUMMY_NODE_ID, predicates: Vec::new(), + span: DUMMY_SP, }, span: DUMMY_SP, } @@ -347,6 +348,7 @@ impl Default for Generics { pub struct WhereClause { pub id: NodeId, pub predicates: Vec<WherePredicate>, + pub span: Span, } /// A single predicate in a `where` clause @@ -733,6 +735,13 @@ impl Stmt { }; self } + + pub fn is_item(&self) -> bool { + match self.node { + StmtKind::Local(_) => true, + _ => false, + } + } } impl fmt::Debug for Stmt { @@ -1152,6 +1161,8 @@ pub struct TraitItem { pub attrs: Vec<Attribute>, pub node: TraitItemKind, pub span: Span, + /// See `Item::tokens` for what this is + pub tokens: Option<TokenStream>, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -1171,6 +1182,8 @@ pub struct ImplItem { pub attrs: Vec<Attribute>, pub node: ImplItemKind, pub span: Span, + /// See `Item::tokens` for what this is + pub tokens: Option<TokenStream>, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -1815,6 +1828,15 @@ pub struct Item { pub node: ItemKind, pub vis: Visibility, pub span: Span, + + /// Original tokens this item was parsed from. This isn't necessarily + /// available for all items, although over time more and more items should + /// have this be `Some`. Right now this is primarily used for procedural + /// macros, notably custom attributes. + /// + /// Note that the tokens here do not include the outer attributes, but will + /// include inner attributes. + pub tokens: Option<TokenStream>, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index b3d9cf9da36..bfdcae7641d 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -561,8 +561,9 @@ impl CodeMapper for CodeMap { sp } fn ensure_filemap_source_present(&self, file_map: Rc<FileMap>) -> bool { - let src = self.file_loader.read_file(Path::new(&file_map.name)).ok(); - return file_map.add_external_src(src) + file_map.add_external_src( + || self.file_loader.read_file(Path::new(&file_map.name)).ok() + ) } } diff --git a/src/libsyntax/diagnostic_list.rs b/src/libsyntax/diagnostic_list.rs index 508feca9731..6598ecb9444 100644 --- a/src/libsyntax/diagnostic_list.rs +++ b/src/libsyntax/diagnostic_list.rs @@ -42,7 +42,7 @@ The `inline` attribute was malformed. Erroneous code example: -```compile_fail,E0534 +```ignore (compile_fail not working here; see Issue #43707) #[inline()] // error: expected one argument pub fn something() {} @@ -80,7 +80,7 @@ An unknown argument was given to the `inline` attribute. Erroneous code example: -```compile_fail,E0535 +```ignore (compile_fail not working here; see Issue #43707) #[inline(unknown)] // error: invalid argument pub fn something() {} @@ -190,7 +190,9 @@ A literal was used in an attribute that doesn't support literals. Erroneous code example: -```compile_fail,E0565 +```ignore (compile_fail not working here; see Issue #43707) +#![feature(attr_literals)] + #[inline("always")] // error: unsupported literal pub fn something() {} ``` @@ -209,7 +211,7 @@ A file wasn't found for an out-of-line module. Erroneous code example: -```compile_fail,E0583 +```ignore (compile_fail not working here; see Issue #43707) mod file_that_doesnt_exist; // error: file not found for module fn main() {} @@ -251,23 +253,33 @@ An inclusive range was used with no end. Erroneous code example: ```compile_fail,E0586 -let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; -let x = &tmp[1...]; // error: inclusive range was used with no end +#![feature(inclusive_range_syntax)] + +fn main() { + let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; + let x = &tmp[1...]; // error: inclusive range was used with no end +} ``` An inclusive range needs an end in order to *include* it. If you just need a start and no end, use a non-inclusive range (with `..`): ``` -let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; -let x = &tmp[1..]; // ok! +fn main() { + let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; + let x = &tmp[1..]; // ok! +} ``` Or put an end to your inclusive range: ``` -let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; -let x = &tmp[1...3]; // ok! +#![feature(inclusive_range_syntax)] + +fn main() { + let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; + let x = &tmp[1...3]; // ok! +} ``` "##, diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs index 2a5de3c7382..855f4cd3557 100644 --- a/src/libsyntax/diagnostics/plugin.rs +++ b/src/libsyntax/diagnostics/plugin.rs @@ -236,6 +236,7 @@ pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt, ), vis: ast::Visibility::Public, span: span, + tokens: None, }) ])) } diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index e004f7354eb..de0538e38b3 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -979,7 +979,8 @@ impl<'a> AstBuilder for ExtCtxt<'a> { id: ast::DUMMY_NODE_ID, node: node, vis: ast::Visibility::Inherited, - span: span + span: span, + tokens: None, }) } @@ -1147,7 +1148,8 @@ impl<'a> AstBuilder for ExtCtxt<'a> { attrs: vec![], node: ast::ItemKind::Use(vp), vis: vis, - span: sp + span: sp, + tokens: None, }) } diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index f6d56557166..16c264e0f94 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -214,6 +214,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { ident: keywords::Invalid.ident(), id: ast::DUMMY_NODE_ID, vis: ast::Visibility::Public, + tokens: None, }))); match self.expand(krate_item).make_items().pop().map(P::unwrap) { diff --git a/src/libsyntax/ext/placeholders.rs b/src/libsyntax/ext/placeholders.rs index 4fb138d506a..e3377c1d8de 100644 --- a/src/libsyntax/ext/placeholders.rs +++ b/src/libsyntax/ext/placeholders.rs @@ -46,15 +46,18 @@ pub fn placeholder(kind: ExpansionKind, id: ast::NodeId) -> Expansion { ExpansionKind::Items => Expansion::Items(SmallVector::one(P(ast::Item { id: id, span: span, ident: ident, vis: vis, attrs: attrs, node: ast::ItemKind::Mac(mac_placeholder()), + tokens: None, }))), ExpansionKind::TraitItems => Expansion::TraitItems(SmallVector::one(ast::TraitItem { id: id, span: span, ident: ident, attrs: attrs, node: ast::TraitItemKind::Macro(mac_placeholder()), + tokens: None, })), ExpansionKind::ImplItems => Expansion::ImplItems(SmallVector::one(ast::ImplItem { id: id, span: span, ident: ident, vis: vis, attrs: attrs, node: ast::ImplItemKind::Macro(mac_placeholder()), defaultness: ast::Defaultness::Final, + tokens: None, })), ExpansionKind::Pat => Expansion::Pat(P(ast::Pat { id: id, span: span, node: ast::PatKind::Mac(mac_placeholder()), diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 580c2aa58a5..8f9f179f08b 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -737,14 +737,15 @@ pub fn noop_fold_generics<T: Folder>(Generics {ty_params, lifetimes, where_claus } pub fn noop_fold_where_clause<T: Folder>( - WhereClause {id, predicates}: WhereClause, + WhereClause {id, predicates, span}: WhereClause, fld: &mut T) -> WhereClause { WhereClause { id: fld.new_id(id), predicates: predicates.move_map(|predicate| { fld.fold_where_predicate(predicate) - }) + }), + span: span, } } @@ -957,7 +958,8 @@ pub fn noop_fold_trait_item<T: Folder>(i: TraitItem, folder: &mut T) TraitItemKind::Macro(folder.fold_mac(mac)) } }, - span: folder.new_span(i.span) + span: folder.new_span(i.span), + tokens: i.tokens, }) } @@ -980,7 +982,8 @@ pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T) ast::ImplItemKind::Type(ty) => ast::ImplItemKind::Type(folder.fold_ty(ty)), ast::ImplItemKind::Macro(mac) => ast::ImplItemKind::Macro(folder.fold_mac(mac)) }, - span: folder.new_span(i.span) + span: folder.new_span(i.span), + tokens: i.tokens, }) } @@ -1000,6 +1003,7 @@ pub fn noop_fold_crate<T: Folder>(Crate {module, attrs, span}: Crate, vis: ast::Visibility::Public, span: span, node: ast::ItemKind::Mod(module), + tokens: None, })).into_iter(); let (module, attrs, span) = match items.next() { @@ -1032,7 +1036,7 @@ pub fn noop_fold_item<T: Folder>(i: P<Item>, folder: &mut T) -> SmallVector<P<It } // fold one item into exactly one item -pub fn noop_fold_item_simple<T: Folder>(Item {id, ident, attrs, node, vis, span}: Item, +pub fn noop_fold_item_simple<T: Folder>(Item {id, ident, attrs, node, vis, span, tokens}: Item, folder: &mut T) -> Item { Item { id: folder.new_id(id), @@ -1040,7 +1044,11 @@ pub fn noop_fold_item_simple<T: Folder>(Item {id, ident, attrs, node, vis, span} ident: folder.fold_ident(ident), attrs: fold_attrs(attrs, folder), node: folder.fold_item_kind(node), - span: folder.new_span(span) + span: folder.new_span(span), + + // FIXME: if this is replaced with a call to `folder.fold_tts` it causes + // an ICE during resolve... odd! + tokens: tokens, } } diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index a8338fccb6b..43345b02bf6 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -148,4 +148,4 @@ pub mod ext { #[cfg(test)] mod test_snippet; -// __build_diagnostic_array! { libsyntax, DIAGNOSTICS } +__build_diagnostic_array! { libsyntax, DIAGNOSTICS } diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index bd9a621c00c..893bada2670 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -843,11 +843,18 @@ mod tests { // check the contents of the tt manually: #[test] fn parse_fundecl () { // this test depends on the intern order of "fn" and "i32" - assert_eq!(string_to_item("fn a (b : i32) { b; }".to_string()), + let item = string_to_item("fn a (b : i32) { b; }".to_string()).map(|m| { + m.map(|mut m| { + m.tokens = None; + m + }) + }); + assert_eq!(item, Some( P(ast::Item{ident:Ident::from_str("a"), attrs:Vec::new(), id: ast::DUMMY_NODE_ID, + tokens: None, node: ast::ItemKind::Fn(P(ast::FnDecl { inputs: vec![ast::Arg{ ty: P(ast::Ty{id: ast::DUMMY_NODE_ID, @@ -860,13 +867,14 @@ mod tests { pat: P(ast::Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident( - ast::BindingMode::ByValue(ast::Mutability::Immutable), - Spanned{ - span: sp(6,7), - node: Ident::from_str("b")}, - None - ), - span: sp(6,7) + ast::BindingMode::ByValue( + ast::Mutability::Immutable), + Spanned{ + span: sp(6,7), + node: Ident::from_str("b")}, + None + ), + span: sp(6,7) }), id: ast::DUMMY_NODE_ID }], @@ -885,6 +893,7 @@ mod tests { where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: syntax_pos::DUMMY_SP, }, diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index b7ae025db5f..5fe3cf0ddac 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -216,6 +216,30 @@ struct TokenCursorFrame { open_delim: bool, tree_cursor: tokenstream::Cursor, close_delim: bool, + last_token: LastToken, +} + +/// This is used in `TokenCursorFrame` above to track tokens that are consumed +/// by the parser, and then that's transitively used to record the tokens that +/// each parse AST item is created with. +/// +/// Right now this has two states, either collecting tokens or not collecting +/// tokens. If we're collecting tokens we just save everything off into a local +/// `Vec`. This should eventually though likely save tokens from the original +/// token stream and just use slicing of token streams to avoid creation of a +/// whole new vector. +/// +/// The second state is where we're passively not recording tokens, but the last +/// token is still tracked for when we want to start recording tokens. This +/// "last token" means that when we start recording tokens we'll want to ensure +/// that this, the first token, is included in the output. +/// +/// You can find some more example usage of this in the `collect_tokens` method +/// on the parser. +#[derive(Clone)] +enum LastToken { + Collecting(Vec<TokenTree>), + Was(Option<TokenTree>), } impl TokenCursorFrame { @@ -226,6 +250,7 @@ impl TokenCursorFrame { open_delim: delimited.delim == token::NoDelim, tree_cursor: delimited.stream().into_trees(), close_delim: delimited.delim == token::NoDelim, + last_token: LastToken::Was(None), } } } @@ -250,6 +275,11 @@ impl TokenCursor { return TokenAndSpan { tok: token::Eof, sp: syntax_pos::DUMMY_SP } }; + match self.frame.last_token { + LastToken::Collecting(ref mut v) => v.push(tree.clone()), + LastToken::Was(ref mut t) => *t = Some(tree.clone()), + } + match tree { TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp }, TokenTree::Delimited(sp, ref delimited) => { @@ -1209,7 +1239,20 @@ impl<'a> Parser<'a> { /// Parse the items in a trait declaration pub fn parse_trait_item(&mut self, at_end: &mut bool) -> PResult<'a, TraitItem> { maybe_whole!(self, NtTraitItem, |x| x); - let mut attrs = self.parse_outer_attributes()?; + let attrs = self.parse_outer_attributes()?; + let (mut item, tokens) = self.collect_tokens(|this| { + this.parse_trait_item_(at_end, attrs) + })?; + // See `parse_item` for why this clause is here. + if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { + item.tokens = Some(tokens); + } + Ok(item) + } + + fn parse_trait_item_(&mut self, + at_end: &mut bool, + mut attrs: Vec<Attribute>) -> PResult<'a, TraitItem> { let lo = self.span; let (name, node) = if self.eat_keyword(keywords::Type) { @@ -1304,6 +1347,7 @@ impl<'a> Parser<'a> { attrs: attrs, node: node, span: lo.to(self.prev_span), + tokens: None, }) } @@ -2030,14 +2074,14 @@ impl<'a> Parser<'a> { } else { Ok(self.mk_expr(span, ExprKind::Tup(es), attrs)) } - }, + } token::OpenDelim(token::Brace) => { return self.parse_block_expr(lo, BlockCheckMode::Default, attrs); - }, - token::BinOp(token::Or) | token::OrOr => { + } + token::BinOp(token::Or) | token::OrOr => { let lo = self.span; return self.parse_lambda_expr(lo, CaptureBy::Ref, attrs); - }, + } token::OpenDelim(token::Bracket) => { self.bump(); @@ -2293,7 +2337,6 @@ impl<'a> Parser<'a> { pub fn parse_block_expr(&mut self, lo: Span, blk_mode: BlockCheckMode, outer_attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { - self.expect(&token::OpenDelim(token::Brace))?; let mut attrs = outer_attrs; @@ -4266,6 +4309,7 @@ impl<'a> Parser<'a> { where_clause: WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: span_lo.to(self.prev_span), }) @@ -4333,11 +4377,13 @@ impl<'a> Parser<'a> { let mut where_clause = WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }; if !self.eat_keyword(keywords::Where) { return Ok(where_clause); } + let lo = self.prev_span; // This is a temporary future proofing. // @@ -4415,6 +4461,7 @@ impl<'a> Parser<'a> { } } + where_clause.span = lo.to(self.prev_span); Ok(where_clause) } @@ -4661,6 +4708,7 @@ impl<'a> Parser<'a> { node: node, vis: vis, span: span, + tokens: None, }) } @@ -4716,8 +4764,21 @@ impl<'a> Parser<'a> { /// Parse an impl item. pub fn parse_impl_item(&mut self, at_end: &mut bool) -> PResult<'a, ImplItem> { maybe_whole!(self, NtImplItem, |x| x); + let attrs = self.parse_outer_attributes()?; + let (mut item, tokens) = self.collect_tokens(|this| { + this.parse_impl_item_(at_end, attrs) + })?; - let mut attrs = self.parse_outer_attributes()?; + // See `parse_item` for why this clause is here. + if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { + item.tokens = Some(tokens); + } + Ok(item) + } + + fn parse_impl_item_(&mut self, + at_end: &mut bool, + mut attrs: Vec<Attribute>) -> PResult<'a, ImplItem> { let lo = self.span; let vis = self.parse_visibility(false)?; let defaultness = self.parse_defaultness()?; @@ -4749,7 +4810,8 @@ impl<'a> Parser<'a> { vis: vis, defaultness: defaultness, attrs: attrs, - node: node + node: node, + tokens: None, }) } @@ -6025,9 +6087,71 @@ impl<'a> Parser<'a> { Ok(None) } + fn collect_tokens<F, R>(&mut self, f: F) -> PResult<'a, (R, TokenStream)> + where F: FnOnce(&mut Self) -> PResult<'a, R> + { + // Record all tokens we parse when parsing this item. + let mut tokens = Vec::new(); + match self.token_cursor.frame.last_token { + LastToken::Collecting(_) => { + panic!("cannot collect tokens recursively yet") + } + LastToken::Was(ref mut last) => tokens.extend(last.take()), + } + self.token_cursor.frame.last_token = LastToken::Collecting(tokens); + let prev = self.token_cursor.stack.len(); + let ret = f(self); + let last_token = if self.token_cursor.stack.len() == prev { + &mut self.token_cursor.frame.last_token + } else { + &mut self.token_cursor.stack[prev].last_token + }; + let mut tokens = match *last_token { + LastToken::Collecting(ref mut v) => mem::replace(v, Vec::new()), + LastToken::Was(_) => panic!("our vector went away?"), + }; + + // If we're not at EOF our current token wasn't actually consumed by + // `f`, but it'll still be in our list that we pulled out. In that case + // put it back. + if self.token == token::Eof { + *last_token = LastToken::Was(None); + } else { + *last_token = LastToken::Was(tokens.pop()); + } + + Ok((ret?, tokens.into_iter().collect())) + } + pub fn parse_item(&mut self) -> PResult<'a, Option<P<Item>>> { let attrs = self.parse_outer_attributes()?; - self.parse_item_(attrs, true, false) + + let (ret, tokens) = self.collect_tokens(|this| { + this.parse_item_(attrs, true, false) + })?; + + // Once we've parsed an item and recorded the tokens we got while + // parsing we may want to store `tokens` into the item we're about to + // return. Note, though, that we specifically didn't capture tokens + // related to outer attributes. The `tokens` field here may later be + // used with procedural macros to convert this item back into a token + // stream, but during expansion we may be removing attributes as we go + // along. + // + // If we've got inner attributes then the `tokens` we've got above holds + // these inner attributes. If an inner attribute is expanded we won't + // actually remove it from the token stream, so we'll just keep yielding + // it (bad!). To work around this case for now we just avoid recording + // `tokens` if we detect any inner attributes. This should help keep + // expansion correct, but we should fix this bug one day! + Ok(ret.map(|item| { + item.map(|mut i| { + if !i.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { + i.tokens = Some(tokens); + } + i + }) + })) } fn parse_path_list_items(&mut self) -> PResult<'a, Vec<ast::PathListItem>> { diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index 9e36fb83696..5832cfcdf36 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -1041,6 +1041,7 @@ impl<'a> State<'a> { where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: syntax_pos::DUMMY_SP, }; @@ -2993,6 +2994,7 @@ impl<'a> State<'a> { where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: syntax_pos::DUMMY_SP, }; diff --git a/src/libsyntax/std_inject.rs b/src/libsyntax/std_inject.rs index a8a9ae556f1..d9ed96f293a 100644 --- a/src/libsyntax/std_inject.rs +++ b/src/libsyntax/std_inject.rs @@ -60,6 +60,7 @@ pub fn maybe_inject_crates_ref(mut krate: ast::Crate, alt_std_name: Option<Strin ident: ast::Ident::from_str(name), id: ast::DUMMY_NODE_ID, span: DUMMY_SP, + tokens: None, })); let span = ignored_span(DUMMY_SP); @@ -82,6 +83,7 @@ pub fn maybe_inject_crates_ref(mut krate: ast::Crate, alt_std_name: Option<Strin id: ast::DUMMY_NODE_ID, ident: keywords::Invalid.ident(), span: span, + tokens: None, })); krate diff --git a/src/libsyntax/test.rs b/src/libsyntax/test.rs index bd2c386cb8a..887479a2472 100644 --- a/src/libsyntax/test.rs +++ b/src/libsyntax/test.rs @@ -192,7 +192,7 @@ impl fold::Folder for EntryPointCleaner { EntryPointType::MainNamed | EntryPointType::MainAttr | EntryPointType::Start => - folded.map(|ast::Item {id, ident, attrs, node, vis, span}| { + folded.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| { let allow_str = Symbol::intern("allow"); let dead_code_str = Symbol::intern("dead_code"); let word_vec = vec![attr::mk_list_word_item(dead_code_str)]; @@ -212,7 +212,8 @@ impl fold::Folder for EntryPointCleaner { .collect(), node: node, vis: vis, - span: span + span: span, + tokens: tokens, } }), EntryPointType::None | @@ -255,6 +256,7 @@ fn mk_reexport_mod(cx: &mut TestCtxt, node: ast::ItemKind::Mod(reexport_mod), vis: ast::Visibility::Public, span: DUMMY_SP, + tokens: None, })).pop().unwrap(); (it, sym) @@ -465,7 +467,8 @@ fn mk_std(cx: &TestCtxt) -> P<ast::Item> { node: vi, attrs: vec![], vis: vis, - span: sp + span: sp, + tokens: None, }) } @@ -506,7 +509,8 @@ fn mk_main(cx: &mut TestCtxt) -> P<ast::Item> { id: ast::DUMMY_NODE_ID, node: main, vis: ast::Visibility::Public, - span: sp + span: sp, + tokens: None, }) } @@ -536,6 +540,7 @@ fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<P<ast::Item>>) { node: item_, vis: ast::Visibility::Public, span: DUMMY_SP, + tokens: None, })).pop().unwrap(); let reexport = cx.reexport_test_harness_main.map(|s| { // building `use <ident> = __test::main` @@ -551,7 +556,8 @@ fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<P<ast::Item>>) { attrs: vec![], node: ast::ItemKind::Use(P(use_path)), vis: ast::Visibility::Inherited, - span: DUMMY_SP + span: DUMMY_SP, + tokens: None, })).pop().unwrap() }); diff --git a/src/libsyntax_ext/deriving/generic/mod.rs b/src/libsyntax_ext/deriving/generic/mod.rs index 4acd65bbf86..3cbc7938bde 100644 --- a/src/libsyntax_ext/deriving/generic/mod.rs +++ b/src/libsyntax_ext/deriving/generic/mod.rs @@ -504,6 +504,7 @@ impl<'a> TraitDef<'a> { defaultness: ast::Defaultness::Final, attrs: Vec::new(), node: ast::ImplItemKind::Type(type_def.to_ty(cx, self.span, type_ident, generics)), + tokens: None, } }); @@ -930,6 +931,7 @@ impl<'a> MethodDef<'a> { decl: fn_decl, }, body_block), + tokens: None, } } diff --git a/src/libsyntax_ext/deriving/generic/ty.rs b/src/libsyntax_ext/deriving/generic/ty.rs index 9c89f99cbb5..f5ac1743920 100644 --- a/src/libsyntax_ext/deriving/generic/ty.rs +++ b/src/libsyntax_ext/deriving/generic/ty.rs @@ -216,6 +216,7 @@ fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>, s where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: span, }, span: span, } diff --git a/src/libsyntax_ext/format.rs b/src/libsyntax_ext/format.rs index 7351377e771..9734bb867f1 100644 --- a/src/libsyntax_ext/format.rs +++ b/src/libsyntax_ext/format.rs @@ -20,7 +20,7 @@ use syntax::ext::build::AstBuilder; use syntax::parse::token; use syntax::ptr::P; use syntax::symbol::{Symbol, keywords}; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; use syntax::tokenstream; use std::collections::{HashMap, HashSet}; @@ -558,8 +558,10 @@ impl<'a, 'b> Context<'a, 'b> { // passed to this function. for (i, e) in self.args.into_iter().enumerate() { let name = self.ecx.ident_of(&format!("__arg{}", i)); - let span = - Span { ctxt: e.span.ctxt.apply_mark(self.ecx.current_expansion.mark), ..e.span }; + let span = Span { + ctxt: e.span.ctxt.apply_mark(self.ecx.current_expansion.mark), + ..DUMMY_SP + }; pats.push(self.ecx.pat_ident(span, name)); for ref arg_ty in self.arg_unique_types[i].iter() { locals.push(Context::format_arg(self.ecx, self.macsp, e.span, arg_ty, name)); diff --git a/src/libsyntax_ext/global_asm.rs b/src/libsyntax_ext/global_asm.rs index dc67e1c45f6..8b0bb8cb891 100644 --- a/src/libsyntax_ext/global_asm.rs +++ b/src/libsyntax_ext/global_asm.rs @@ -61,5 +61,6 @@ pub fn expand_global_asm<'cx>(cx: &'cx mut ExtCtxt, })), vis: ast::Visibility::Inherited, span: sp, + tokens: None, }))) } diff --git a/src/libsyntax_pos/lib.rs b/src/libsyntax_pos/lib.rs index 3a701f91314..7006f45455e 100644 --- a/src/libsyntax_pos/lib.rs +++ b/src/libsyntax_pos/lib.rs @@ -618,8 +618,11 @@ impl FileMap { /// If the hash of the input doesn't match or no input is supplied via None, /// it is interpreted as an error and the corresponding enum variant is set. /// The return value signifies whether some kind of source is present. - pub fn add_external_src(&self, src: Option<String>) -> bool { + pub fn add_external_src<F>(&self, get_src: F) -> bool + where F: FnOnce() -> Option<String> + { if *self.external_src.borrow() == ExternalSource::AbsentOk { + let src = get_src(); let mut external_src = self.external_src.borrow_mut(); if let Some(src) = src { let mut hasher: StableHasher<u128> = StableHasher::new(); diff --git a/src/libsyntax_pos/symbol.rs b/src/libsyntax_pos/symbol.rs index debac70545a..e49f1f28e5f 100644 --- a/src/libsyntax_pos/symbol.rs +++ b/src/libsyntax_pos/symbol.rs @@ -326,7 +326,7 @@ fn with_interner<T, F: FnOnce(&mut Interner) -> T>(f: F) -> T { /// destroyed. In particular, they must not access string contents. This can /// be fixed in the future by just leaking all strings until thread death /// somehow. -#[derive(Clone, Hash, PartialOrd, Eq, Ord)] +#[derive(Clone, Copy, Hash, PartialOrd, Eq, Ord)] pub struct InternedString { string: &'static str, } diff --git a/src/rustllvm/PassWrapper.cpp b/src/rustllvm/PassWrapper.cpp index 7fb1eafb30d..bca0881c08c 100644 --- a/src/rustllvm/PassWrapper.cpp +++ b/src/rustllvm/PassWrapper.cpp @@ -178,23 +178,17 @@ GEN_SUBTARGETS extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM, const char *Feature) { +#if LLVM_RUSTLLVM TargetMachine *Target = unwrap(TM); const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo(); const FeatureBitset &Bits = MCInfo->getFeatureBits(); - const llvm::SubtargetFeatureKV *FeatureEntry; - -#define SUBTARGET(x) \ - if (MCInfo->isCPUStringValid(x##SubTypeKV[0].Key)) { \ - FeatureEntry = x##FeatureKV; \ - } else - - GEN_SUBTARGETS { return false; } -#undef SUBTARGET - - while (strcmp(Feature, FeatureEntry->Key) != 0) - FeatureEntry++; + const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable(); - return (Bits & FeatureEntry->Value) == FeatureEntry->Value; + for (auto &FeatureEntry : FeatTable) + if (!strcmp(FeatureEntry.Key, Feature)) + return (Bits & FeatureEntry.Value) == FeatureEntry.Value; +#endif + return false; } enum class LLVMRustCodeModel { diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs new file mode 100644 index 00000000000..569d937c812 --- /dev/null +++ b/src/test/codegen/slice-init.rs @@ -0,0 +1,74 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +// CHECK-LABEL: @zero_sized_elem +#[no_mangle] +pub fn zero_sized_elem() { + // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: call void @llvm.memset.p0i8 + let x = [(); 4]; + drop(&x); +} + +// CHECK-LABEL: @zero_len_array +#[no_mangle] +pub fn zero_len_array() { + // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: call void @llvm.memset.p0i8 + let x = [4; 0]; + drop(&x); +} + +// CHECK-LABEL: @byte_array +#[no_mangle] +pub fn byte_array() { + // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4 + // CHECK-NOT: br label %slice_loop_header{{.*}} + let x = [7u8; 4]; + drop(&x); +} + +#[allow(dead_code)] +#[derive(Copy, Clone)] +enum Init { + Loop, + Memset, +} + +// CHECK-LABEL: @byte_enum_array +#[no_mangle] +pub fn byte_enum_array() { + // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4 + // CHECK-NOT: br label %slice_loop_header{{.*}} + let x = [Init::Memset; 4]; + drop(&x); +} + +// CHECK-LABEL: @zeroed_integer_array +#[no_mangle] +pub fn zeroed_integer_array() { + // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16 + // CHECK-NOT: br label %slice_loop_header{{.*}} + let x = [0u32; 4]; + drop(&x); +} + +// CHECK-LABEL: @nonzero_integer_array +#[no_mangle] +pub fn nonzero_integer_array() { + // CHECK: br label %slice_loop_header{{.*}} + // CHECK-NOT: call void @llvm.memset.p0i8 + let x = [0x1a_2b_3c_4d_u32; 4]; + drop(&x); +} diff --git a/src/test/codegen/vec-optimizes-away.rs b/src/test/codegen/vec-optimizes-away.rs new file mode 100644 index 00000000000..261564ed51a --- /dev/null +++ b/src/test/codegen/vec-optimizes-away.rs @@ -0,0 +1,21 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +// +// no-system-llvm +// compile-flags: -O +#![crate_type="lib"] + +#[no_mangle] +pub fn sum_me() -> i32 { + // CHECK-LABEL: @sum_me + // CHECK-NEXT: {{^.*:$}} + // CHECK-NEXT: ret i32 6 + vec![1, 2, 3].iter().sum::<i32>() +} diff --git a/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs b/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs new file mode 100644 index 00000000000..65f4b6350c4 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs @@ -0,0 +1,51 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// aux-build:attribute-with-error.rs + +#![feature(proc_macro)] + +extern crate attribute_with_error; + +use attribute_with_error::foo; + +#[foo] +fn test1() { + let a: i32 = "foo"; + //~^ ERROR: mismatched types +} + +fn test2() { + #![foo] + + // FIXME: should have a type error here and assert it works but it doesn't +} + +trait A { + // FIXME: should have a #[foo] attribute here and assert that it works + fn foo(&self) { + let a: i32 = "foo"; + //~^ ERROR: mismatched types + } +} + +struct B; + +impl A for B { + #[foo] + fn foo(&self) { + let a: i32 = "foo"; + //~^ ERROR: mismatched types + } +} + +#[foo] +fn main() { +} diff --git a/src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs b/src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs new file mode 100644 index 00000000000..508f8dac571 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs @@ -0,0 +1,30 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// aux-build:attributes-included.rs + +#![feature(proc_macro, rustc_attrs)] + +extern crate attributes_included; + +#[attributes_included::bar] +#[inline] +/// doc +#[attributes_included::foo] +#[inline] +/// doc +fn foo() { + let a: i32 = "foo"; //~ WARN: unused variable +} + +#[rustc_error] +fn main() { //~ ERROR: compilation successful + foo() +} diff --git a/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs new file mode 100644 index 00000000000..85a7a0bf633 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs @@ -0,0 +1,24 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// force-host +// no-prefer-dynamic + +#![crate_type = "proc-macro"] +#![feature(proc_macro)] + +extern crate proc_macro; + +use proc_macro::TokenStream; + +#[proc_macro_attribute] +pub fn foo(_attr: TokenStream, input: TokenStream) -> TokenStream { + input.into_iter().collect() +} diff --git a/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs new file mode 100644 index 00000000000..a1efbb88a4d --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs @@ -0,0 +1,130 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// force-host +// no-prefer-dynamic + +#![feature(proc_macro)] +#![crate_type = "proc-macro"] + +extern crate proc_macro; + +use proc_macro::{TokenStream, TokenTree, TokenNode, Delimiter, Literal}; + +#[proc_macro_attribute] +pub fn foo(attr: TokenStream, input: TokenStream) -> TokenStream { + assert!(attr.is_empty()); + let input = input.into_iter().collect::<Vec<_>>(); + { + let mut cursor = &input[..]; + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_foo(&mut cursor); + assert!(cursor.is_empty()); + } + fold_stream(input.into_iter().collect()) +} + +#[proc_macro_attribute] +pub fn bar(attr: TokenStream, input: TokenStream) -> TokenStream { + assert!(attr.is_empty()); + let input = input.into_iter().collect::<Vec<_>>(); + { + let mut cursor = &input[..]; + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_invoc(&mut cursor); + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_foo(&mut cursor); + assert!(cursor.is_empty()); + } + input.into_iter().collect() +} + +fn assert_inline(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Op('#', _) => {} + _ => panic!("expected '#' char"), + } + match slice[1].kind { + TokenNode::Group(Delimiter::Bracket, _) => {} + _ => panic!("expected brackets"), + } + *slice = &slice[2..]; +} + +fn assert_doc(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Literal(_) => {} + _ => panic!("expected literal doc comment got other"), + } + *slice = &slice[1..]; +} + +fn assert_invoc(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Op('#', _) => {} + _ => panic!("expected '#' char"), + } + match slice[1].kind { + TokenNode::Group(Delimiter::Bracket, _) => {} + _ => panic!("expected brackets"), + } + *slice = &slice[2..]; +} + +fn assert_foo(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Term(ref name) => assert_eq!(name.as_str(), "fn"), + _ => panic!("expected fn"), + } + match slice[1].kind { + TokenNode::Term(ref name) => assert_eq!(name.as_str(), "foo"), + _ => panic!("expected foo"), + } + match slice[2].kind { + TokenNode::Group(Delimiter::Parenthesis, ref s) => assert!(s.is_empty()), + _ => panic!("expected parens"), + } + match slice[3].kind { + TokenNode::Group(Delimiter::Brace, _) => {} + _ => panic!("expected braces"), + } + *slice = &slice[4..]; +} + +fn fold_stream(input: TokenStream) -> TokenStream { + input.into_iter().map(fold_tree).collect() +} + +fn fold_tree(input: TokenTree) -> TokenTree { + TokenTree { + span: input.span, + kind: fold_node(input.kind), + } +} + +fn fold_node(input: TokenNode) -> TokenNode { + match input { + TokenNode::Group(a, b) => TokenNode::Group(a, fold_stream(b)), + TokenNode::Op(a, b) => TokenNode::Op(a, b), + TokenNode::Term(a) => TokenNode::Term(a), + TokenNode::Literal(a) => { + if a.to_string() != "\"foo\"" { + TokenNode::Literal(a) + } else { + TokenNode::Literal(Literal::integer(3)) + } + } + } +} diff --git a/src/test/compile-fail/E0559.rs b/src/test/compile-fail/E0559.rs index fa6c885843e..e8b0915d2b5 100644 --- a/src/test/compile-fail/E0559.rs +++ b/src/test/compile-fail/E0559.rs @@ -15,5 +15,6 @@ enum Field { fn main() { let s = Field::Fool { joke: 0 }; //~^ ERROR E0559 - //~| NOTE field does not exist - did you mean `x`? + //~| NOTE `Field::Fool` does not have this field + //~| NOTE available fields are: `x` } diff --git a/src/test/compile-fail/E0560.rs b/src/test/compile-fail/E0560.rs index c6326a0f977..955ef7ca99c 100644 --- a/src/test/compile-fail/E0560.rs +++ b/src/test/compile-fail/E0560.rs @@ -16,4 +16,5 @@ fn main() { let s = Simba { mother: 1, father: 0 }; //~^ ERROR E0560 //~| NOTE `Simba` does not have this field + //~| NOTE available fields are: `mother` } diff --git a/src/test/compile-fail/E0624.rs b/src/test/compile-fail/E0624.rs new file mode 100644 index 00000000000..952e0b31c4c --- /dev/null +++ b/src/test/compile-fail/E0624.rs @@ -0,0 +1,22 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod inner { + pub struct Foo; + + impl Foo { + fn method(&self) {} + } +} + +fn main() { + let foo = inner::Foo; + foo.method(); //~ ERROR method `method` is private [E0624] +} diff --git a/src/test/compile-fail/asm-bad-clobber.rs b/src/test/compile-fail/asm-bad-clobber.rs index 145662fd87c..b863e90a3b7 100644 --- a/src/test/compile-fail/asm-bad-clobber.rs +++ b/src/test/compile-fail/asm-bad-clobber.rs @@ -14,6 +14,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm, rustc_attrs)] diff --git a/src/test/compile-fail/asm-in-bad-modifier.rs b/src/test/compile-fail/asm-in-bad-modifier.rs index f0467e75223..cae41332795 100644 --- a/src/test/compile-fail/asm-in-bad-modifier.rs +++ b/src/test/compile-fail/asm-in-bad-modifier.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/asm-misplaced-option.rs b/src/test/compile-fail/asm-misplaced-option.rs index 37a26753531..e634238c6e1 100644 --- a/src/test/compile-fail/asm-misplaced-option.rs +++ b/src/test/compile-fail/asm-misplaced-option.rs @@ -14,6 +14,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm, rustc_attrs)] diff --git a/src/test/compile-fail/asm-out-assign-imm.rs b/src/test/compile-fail/asm-out-assign-imm.rs index f95e4410381..546d402252e 100644 --- a/src/test/compile-fail/asm-out-assign-imm.rs +++ b/src/test/compile-fail/asm-out-assign-imm.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/asm-out-no-modifier.rs b/src/test/compile-fail/asm-out-no-modifier.rs index acf575c003a..2e843ddac82 100644 --- a/src/test/compile-fail/asm-out-no-modifier.rs +++ b/src/test/compile-fail/asm-out-no-modifier.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/asm-out-read-uninit.rs b/src/test/compile-fail/asm-out-read-uninit.rs index bd180f6e5eb..c85a097b962 100644 --- a/src/test/compile-fail/asm-out-read-uninit.rs +++ b/src/test/compile-fail/asm-out-read-uninit.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs b/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs index c5557cee7cc..0e822aff01e 100644 --- a/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs +++ b/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs @@ -43,23 +43,19 @@ fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) { (a, b) } -// FIXME(#32330) -//#[cfg(transmute)] // one instantiations: BAD -//fn baz<'a,'b>(x: &'a u32) -> &'static u32 { -// bar(foo, x) //[transmute] ERROR E0495 -//} +#[cfg(transmute)] // one instantiations: BAD +fn baz<'a,'b>(x: &'a u32) -> &'static u32 { + bar(foo, x) //[transmute]~ ERROR E0495 +} -// FIXME(#32330) -//#[cfg(krisskross)] // two instantiations, mixing and matching: BAD -//fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) { -// let a = bar(foo, y); //[krisskross] ERROR E0495 -// let b = bar(foo, x); //[krisskross] ERROR E0495 -// (a, b) -//} +#[cfg(krisskross)] // two instantiations, mixing and matching: BAD +fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) { + let a = bar(foo, y); //[krisskross]~ ERROR E0495 + let b = bar(foo, x); //[krisskross]~ ERROR E0495 + (a, b) +} #[rustc_error] fn main() { } //[ok]~^ ERROR compilation successful //[oneuse]~^^ ERROR compilation successful -//[transmute]~^^^ ERROR compilation successful -//[krisskross]~^^^^ ERROR compilation successful diff --git a/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs b/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs index a15422e42d9..10fe612980d 100644 --- a/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs +++ b/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs @@ -42,35 +42,29 @@ fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { (a, b) } -// FIXME(#32330) -//#[cfg(oneuse)] // one instantiation: BAD -//fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { -// let f = foo; // <-- No consistent type can be inferred for `f` here. -// let a = bar(f, x); //[oneuse] ERROR E0495 -// let b = bar(f, y); -// (a, b) -//} +#[cfg(oneuse)] // one instantiation: BAD +fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { + let f = foo; // <-- No consistent type can be inferred for `f` here. + let a = bar(f, x); //[oneuse]~^ ERROR E0495 + let b = bar(f, y); + (a, b) +} -// FIXME(#32330) -//#[cfg(transmute)] // one instantiations: BAD -//fn baz<'a,'b>(x: Type<'a>) -> Type<'static> { -// // Cannot instantiate `foo` with any lifetime other than `'a`, -// // since it is provided as input. -// -// bar(foo, x) //[transmute] ERROR E0495 -//} +#[cfg(transmute)] // one instantiations: BAD +fn baz<'a,'b>(x: Type<'a>) -> Type<'static> { + // Cannot instantiate `foo` with any lifetime other than `'a`, + // since it is provided as input. -// FIXME(#32330) -//#[cfg(krisskross)] // two instantiations, mixing and matching: BAD -//fn transmute<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { -// let a = bar(foo, y); //[krisskross] ERROR E0495 -// let b = bar(foo, x); //[krisskross] ERROR E0495 -// (a, b) -//} + bar(foo, x) //[transmute]~ ERROR E0495 +} + +#[cfg(krisskross)] // two instantiations, mixing and matching: BAD +fn transmute<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { + let a = bar(foo, y); //[krisskross]~ ERROR E0495 + let b = bar(foo, x); //[krisskross]~ ERROR E0495 + (a, b) +} #[rustc_error] fn main() { } //[ok]~^ ERROR compilation successful -//[oneuse]~^^ ERROR compilation successful -//[transmute]~^^^ ERROR compilation successful -//[krisskross]~^^^^ ERROR compilation successful diff --git a/src/test/compile-fail/hr-subtype.rs b/src/test/compile-fail/hr-subtype.rs index 95e469ebcfd..c88d74d53ce 100644 --- a/src/test/compile-fail/hr-subtype.rs +++ b/src/test/compile-fail/hr-subtype.rs @@ -91,9 +91,6 @@ check! { free_inv_x_vs_free_inv_y: (fn(Inv<'x>), // - if we are covariant, then 'a and 'b can be set to the call-site // intersection; // - if we are contravariant, then 'a can be inferred to 'static. -// -// FIXME(#32330) this is true, but we are not currently impl'ing this -// full semantics check! { bound_a_b_vs_bound_a: (for<'a,'b> fn(&'a u32, &'b u32), for<'a> fn(&'a u32, &'a u32)) } check! { bound_co_a_b_vs_bound_co_a: (for<'a,'b> fn(Co<'a>, Co<'b>), diff --git a/src/test/compile-fail/issue-19922.rs b/src/test/compile-fail/issue-19922.rs index d7b2f2b3f99..938ccb343d4 100644 --- a/src/test/compile-fail/issue-19922.rs +++ b/src/test/compile-fail/issue-19922.rs @@ -15,5 +15,6 @@ enum Homura { fn main() { let homura = Homura::Akemi { kaname: () }; //~^ ERROR variant `Homura::Akemi` has no field named `kaname` - //~| NOTE field does not exist - did you mean `madoka`? + //~| NOTE `Homura::Akemi` does not have this field + //~| NOTE available fields are: `madoka` } diff --git a/src/test/compile-fail/issue-35675.rs b/src/test/compile-fail/issue-35675.rs deleted file mode 100644 index c09e56cbc5b..00000000000 --- a/src/test/compile-fail/issue-35675.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// these two HELPs are actually in a new line between this line and the `enum Fruit` line -enum Fruit { //~ HELP possible candidate is found in another module, you can import it into scope - //~^ HELP possible candidate is found in another module, you can import it into scope - Apple(i64), - Orange(i64), -} - -fn should_return_fruit() -> Apple { - //~^ ERROR cannot find type `Apple` in this scope - //~| NOTE not found in this scope - //~| HELP you can try using the variant's enum - Apple(5) - //~^ ERROR cannot find function `Apple` in this scope - //~| NOTE not found in this scope -} - -fn should_return_fruit_too() -> Fruit::Apple { - //~^ ERROR expected type, found variant `Fruit::Apple` - //~| HELP you can try using the variant's enum - //~| NOTE not a type - Apple(5) - //~^ ERROR cannot find function `Apple` in this scope - //~| NOTE not found in this scope -} - -fn foo() -> Ok { - //~^ ERROR expected type, found variant `Ok` - //~| NOTE not a type - //~| HELP there is an enum variant - //~| HELP there is an enum variant - Ok(()) -} - -fn bar() -> Variant3 { - //~^ ERROR cannot find type `Variant3` in this scope - //~| HELP you can try using the variant's enum - //~| NOTE not found in this scope -} - -fn qux() -> Some { - //~^ ERROR expected type, found variant `Some` - //~| NOTE not a type - //~| HELP there is an enum variant - //~| HELP there is an enum variant - Some(1) -} - -fn main() {} - -mod x { - enum Enum { - Variant1, - Variant2(), - Variant3(usize), - Variant4 {}, - } -} diff --git a/src/test/compile-fail/issue-40510-1.rs b/src/test/compile-fail/issue-40510-1.rs new file mode 100644 index 00000000000..142092ff41e --- /dev/null +++ b/src/test/compile-fail/issue-40510-1.rs @@ -0,0 +1,23 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let mut x: Box<()> = Box::new(()); + + || { + &mut x + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/issue-34222.rs b/src/test/compile-fail/issue-40510-2.rs index 4609c0ccb1c..0fe56584822 100644 --- a/src/test/compile-fail/issue-34222.rs +++ b/src/test/compile-fail/issue-40510-2.rs @@ -9,10 +9,15 @@ // except according to those terms. #![feature(rustc_attrs)] -#![allow(warnings)] +#![allow(unused)] -#[rustc_error] -fn main() { //~ ERROR compilation successful - /// crash - let x = 0; +fn f() { + let x: Box<()> = Box::new(()); + + || { + &x + }; } + +#[rustc_error] +fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/issue-40510-3.rs b/src/test/compile-fail/issue-40510-3.rs new file mode 100644 index 00000000000..afa8f15ee57 --- /dev/null +++ b/src/test/compile-fail/issue-40510-3.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let mut x: Vec<()> = Vec::new(); + + || { + || { + x.push(()) + } + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/issue-40510-4.rs b/src/test/compile-fail/issue-40510-4.rs new file mode 100644 index 00000000000..a39c500225b --- /dev/null +++ b/src/test/compile-fail/issue-40510-4.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let x: Vec<()> = Vec::new(); + + || { + || { + x.len() + } + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/numeric-fields.rs b/src/test/compile-fail/numeric-fields.rs index 00fde3025a6..d6e091a1472 100644 --- a/src/test/compile-fail/numeric-fields.rs +++ b/src/test/compile-fail/numeric-fields.rs @@ -13,7 +13,8 @@ struct S(u8, u16); fn main() { let s = S{0b1: 10, 0: 11}; //~^ ERROR struct `S` has no field named `0b1` - //~| NOTE field does not exist - did you mean `1`? + //~| NOTE `S` does not have this field + //~| NOTE available fields are: `0`, `1` match s { S{0: a, 0x1: b, ..} => {} //~^ ERROR does not have a field named `0x1` diff --git a/src/test/compile-fail/struct-fields-too-many.rs b/src/test/compile-fail/struct-fields-too-many.rs index 0848ada731a..b1af142ad0f 100644 --- a/src/test/compile-fail/struct-fields-too-many.rs +++ b/src/test/compile-fail/struct-fields-too-many.rs @@ -18,5 +18,6 @@ fn main() { bar: 0 //~^ ERROR struct `BuildData` has no field named `bar` //~| NOTE `BuildData` does not have this field + //~| NOTE available fields are: `foo` }; } diff --git a/src/test/compile-fail/suggest-private-fields.rs b/src/test/compile-fail/suggest-private-fields.rs index 3672e0e90c2..d0752b5f02f 100644 --- a/src/test/compile-fail/suggest-private-fields.rs +++ b/src/test/compile-fail/suggest-private-fields.rs @@ -27,7 +27,8 @@ fn main () { //~| NOTE field does not exist - did you mean `a`? bb: 20, //~^ ERROR struct `xc::B` has no field named `bb` - //~| NOTE field does not exist - did you mean `a`? + //~| NOTE `xc::B` does not have this field + //~| NOTE available fields are: `a` }; // local crate struct let l = A { diff --git a/src/test/compile-fail/union/union-fields.rs b/src/test/compile-fail/union/union-fields.rs index b5d582a5746..124b16f99b1 100644 --- a/src/test/compile-fail/union/union-fields.rs +++ b/src/test/compile-fail/union/union-fields.rs @@ -20,6 +20,7 @@ fn main() { let u = U { a: 0, b: 1, c: 2 }; //~ ERROR union expressions should have exactly one field //~^ ERROR union `U` has no field named `c` //~| NOTE `U` does not have this field + //~| NOTE available fields are: `a`, `b` let u = U { ..u }; //~ ERROR union expressions should have exactly one field //~^ ERROR functional record update syntax requires a struct diff --git a/src/test/compile-fail/useless_comment.rs b/src/test/compile-fail/useless_comment.rs new file mode 100644 index 00000000000..a1172bb214d --- /dev/null +++ b/src/test/compile-fail/useless_comment.rs @@ -0,0 +1,30 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(unused_doc_comment)] + +fn foo() { + /// a //~ ERROR doc comment not used by rustdoc + let x = 12; + + /// b //~ doc comment not used by rustdoc + match x { + /// c //~ ERROR doc comment not used by rustdoc + 1 => {}, + _ => {} + } + + /// foo //~ ERROR doc comment not used by rustdoc + unsafe {} +} + +fn main() { + foo(); +} \ No newline at end of file diff --git a/src/test/mir-opt/README.md b/src/test/mir-opt/README.md index 28a124e3c61..d999ff97551 100644 --- a/src/test/mir-opt/README.md +++ b/src/test/mir-opt/README.md @@ -57,13 +57,6 @@ the lines being too long. compiletest handles dumping the MIR before and after every pass for you. The test writer only has to specify the file names of the dumped files (not the -full path to the file) and what lines to expect. I added an option to rustc +full path to the file) and what lines to expect. There is an option to rustc that tells it to dump the mir into some directly (rather then always dumping to -the current directory). - -Lines match ignoring whitespace, and the prefix "//" is removed of course. - -It also currently strips trailing comments -- partly because the full file path -in "scope comments" is unpredictable and partly because tidy complains about -the lines being too long. - +the current directory). diff --git a/src/test/mir-opt/basic_assignment.rs b/src/test/mir-opt/basic_assignment.rs index ef5158a403a..d3bf7f68785 100644 --- a/src/test/mir-opt/basic_assignment.rs +++ b/src/test/mir-opt/basic_assignment.rs @@ -47,42 +47,36 @@ fn main() { // StorageDead(_3); // StorageLive(_4); // _4 = std::option::Option<std::boxed::Box<u32>>::None; +// StorageLive(_5); // StorageLive(_6); -// StorageLive(_7); -// _7 = _4; -// replace(_6 <- _7) -> [return: bb6, unwind: bb7]; +// _6 = _4; +// replace(_5 <- _6) -> [return: bb1, unwind: bb5]; // } // bb1: { -// resume; +// drop(_6) -> [return: bb6, unwind: bb4]; // } // bb2: { -// drop(_4) -> bb1; +// resume; // } // bb3: { -// goto -> bb2; +// drop(_4) -> bb2; // } // bb4: { -// drop(_6) -> bb3; +// drop(_5) -> bb3; // } // bb5: { -// goto -> bb4; +// drop(_6) -> bb4; // } // bb6: { -// drop(_7) -> [return: bb8, unwind: bb4]; +// StorageDead(_6); +// _0 = (); +// drop(_5) -> [return: bb7, unwind: bb3]; // } // bb7: { -// drop(_7) -> bb5; +// StorageDead(_5); +// drop(_4) -> bb8; // } // bb8: { -// StorageDead(_7); -// _0 = (); -// drop(_6) -> [return: bb9, unwind: bb2]; -// } -// bb9: { -// StorageDead(_6); -// drop(_4) -> bb10; -// } -// bb10: { // StorageDead(_4); // StorageDead(_2); // StorageDead(_1); diff --git a/src/test/mir-opt/deaggregator_test.rs b/src/test/mir-opt/deaggregator_test.rs index f136d74fa51..81dd1932894 100644 --- a/src/test/mir-opt/deaggregator_test.rs +++ b/src/test/mir-opt/deaggregator_test.rs @@ -25,7 +25,7 @@ fn main() {} // bb0: { // _2 = _1; // _3 = _2; -// _0 = Baz { x: _3, y: const F32(0), z: const false }; +// _0 = Baz { x: _3, y: const 0f32, z: const false }; // return; // } // END rustc.node13.Deaggregator.before.mir @@ -34,7 +34,7 @@ fn main() {} // _2 = _1; // _3 = _2; // (_0.0: usize) = _3; -// (_0.1: f32) = const F32(0); +// (_0.1: f32) = const 0f32; // (_0.2: bool) = const false; // return; // } diff --git a/src/test/mir-opt/end_region_4.rs b/src/test/mir-opt/end_region_4.rs index 16ade9f96fd..bfb1b3b6528 100644 --- a/src/test/mir-opt/end_region_4.rs +++ b/src/test/mir-opt/end_region_4.rs @@ -32,41 +32,41 @@ fn foo(i: i32) { // START rustc.node4.SimplifyCfg-qualify-consts.after.mir // let mut _0: (); // let _1: D; -// let _3: i32; -// let _4: &'6_2rce i32; +// let _2: i32; +// let _3: &'6_2rce i32; // let _7: &'6_4rce i32; -// let mut _5: (); -// let mut _6: i32; -// +// let mut _4: (); +// let mut _5: i32; +// let mut _6: (); // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_2); +// _2 = const 0i32; // StorageLive(_3); -// _3 = const 0i32; -// StorageLive(_4); -// _4 = &'6_2rce _3; -// StorageLive(_6); -// _6 = (*_4); -// _5 = const foo(_6) -> [return: bb2, unwind: bb3]; +// _3 = &'6_2rce _2; +// StorageLive(_5); +// _5 = (*_3); +// _4 = const foo(_5) -> [return: bb1, unwind: bb3]; // } // bb1: { -// resume; -// } -// bb2: { -// StorageDead(_6); +// StorageDead(_5); // StorageLive(_7); -// _7 = &'6_4rce _3; +// _7 = &'6_4rce _2; // _0 = (); // StorageDead(_7); // EndRegion('6_4rce); -// StorageDead(_4); -// EndRegion('6_2rce); // StorageDead(_3); +// EndRegion('6_2rce); +// StorageDead(_2); // drop(_1) -> bb4; // } +// bb2: { +// resume; +// } // bb3: { // EndRegion('6_2rce); -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_1); diff --git a/src/test/mir-opt/end_region_5.rs b/src/test/mir-opt/end_region_5.rs index 513632a4cdf..773a348a939 100644 --- a/src/test/mir-opt/end_region_5.rs +++ b/src/test/mir-opt/end_region_5.rs @@ -31,32 +31,31 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 { // let mut _0: (); // let _1: D; // let mut _2: (); -// let mut _3: (); -// let mut _4: [closure@NodeId(18) d: &'19mce D]; -// let mut _5: &'19mce D; -// +// let mut _3: [closure@NodeId(18) d:&'19mce D]; +// let mut _4: &'19mce D; +// let mut _5: (); // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_3); // StorageLive(_4); -// StorageLive(_5); -// _5 = &'19mce _1; -// _4 = [closure@NodeId(18)] { d: _5 }; -// StorageDead(_5); -// _3 = const foo(_4) -> [return: bb2, unwind: bb3]; +// _4 = &'19mce _1; +// _3 = [closure@NodeId(18)] { d: _4 }; +// StorageDead(_4); +// _2 = const foo(_3) -> [return: bb1, unwind: bb3]; // } // bb1: { -// resume; -// } -// bb2: { -// StorageDead(_4); +// StorageDead(_3); // EndRegion('19mce); // _0 = (); // drop(_1) -> bb4; // } +// bb2: { +// resume; +// } // bb3: { // EndRegion('19mce); -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_1); diff --git a/src/test/mir-opt/end_region_6.rs b/src/test/mir-opt/end_region_6.rs index e82556f3ce4..112c93843e0 100644 --- a/src/test/mir-opt/end_region_6.rs +++ b/src/test/mir-opt/end_region_6.rs @@ -27,35 +27,35 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 { // END RUST SOURCE // START rustc.node4.SimplifyCfg-qualify-consts.after.mir +// fn main() -> () { // let mut _0: (); // let _1: D; // let mut _2: (); -// let mut _3: (); -// let mut _4: [closure@NodeId(22) d:&'23mce D]; -// let mut _5: &'23mce D; -// +// let mut _3: [closure@NodeId(22) d:&'23mce D]; +// let mut _4: &'23mce D; +// let mut _5: (); // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_3); // StorageLive(_4); -// StorageLive(_5); -// _5 = &'23mce _1; -// _4 = [closure@NodeId(22)] { d: _5 }; -// StorageDead(_5); -// _3 = const foo(_4) -> [return: bb2, unwind: bb3]; +// _4 = &'23mce _1; +// _3 = [closure@NodeId(22)] { d: _4 }; +// StorageDead(_4); +// _2 = const foo(_3) -> [return: bb1, unwind: bb3]; // } // bb1: { -// resume; -// } -// bb2: { -// StorageDead(_4); +// StorageDead(_3); // EndRegion('23mce); // _0 = (); // drop(_1) -> bb4; // } +// bb2: { +// resume; +// } // bb3: { // EndRegion('23mce); -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_1); diff --git a/src/test/mir-opt/end_region_7.rs b/src/test/mir-opt/end_region_7.rs index 3fbd3f36865..913986ae816 100644 --- a/src/test/mir-opt/end_region_7.rs +++ b/src/test/mir-opt/end_region_7.rs @@ -31,18 +31,18 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 { // let mut _0: (); // let _1: D; // let mut _2: (); -// let mut _3: (); -// let mut _4: [closure@NodeId(22) d:D]; -// let mut _5: D; +// let mut _3: [closure@NodeId(22) d:D]; +// let mut _4: D; +// let mut _5: (); // // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_3); // StorageLive(_4); -// StorageLive(_5); -// _5 = _1; -// _4 = [closure@NodeId(22)] { d: _5 }; -// drop(_5) -> [return: bb4, unwind: bb3]; +// _4 = _1; +// _3 = [closure@NodeId(22)] { d: _4 }; +// drop(_4) -> [return: bb4, unwind: bb3]; // } // bb1: { // resume; @@ -51,17 +51,17 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 { // drop(_1) -> bb1; // } // bb3: { -// drop(_4) -> bb2; +// drop(_3) -> bb2; // } // bb4: { -// StorageDead(_5); -// _3 = const foo(_4) -> [return: bb5, unwind: bb3]; +// StorageDead(_4); +// _2 = const foo(_3) -> [return: bb5, unwind: bb3]; // } // bb5: { -// drop(_4) -> [return: bb6, unwind: bb2]; +// drop(_3) -> [return: bb6, unwind: bb2]; // } // bb6: { -// StorageDead(_4); +// StorageDead(_3); // _0 = (); // drop(_1) -> bb7; // } @@ -76,16 +76,16 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 { // fn main::{{closure}}(_1: [closure@NodeId(22) d:D]) -> i32 { // let mut _0: i32; // let _2: &'14_0rce D; -// let mut _3: (); -// let mut _4: i32; +// let mut _3: i32; +// let mut _4: (); // // bb0: { // StorageLive(_2); // _2 = &'14_0rce (_1.0: D); -// StorageLive(_4); -// _4 = ((*_2).0: i32); -// _0 = _4; -// StorageDead(_4); +// StorageLive(_3); +// _3 = ((*_2).0: i32); +// _0 = _3; +// StorageDead(_3); // StorageDead(_2); // EndRegion('14_0rce); // drop(_1) -> bb1; diff --git a/src/test/mir-opt/end_region_8.rs b/src/test/mir-opt/end_region_8.rs index 7fb3f0b9118..dc8f8ea11f5 100644 --- a/src/test/mir-opt/end_region_8.rs +++ b/src/test/mir-opt/end_region_8.rs @@ -29,44 +29,43 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 { // END RUST SOURCE // START rustc.node4.SimplifyCfg-qualify-consts.after.mir // fn main() -> () { -// let mut _0: (); -// let _1: D; -// let _3: &'6_1rce D; -// let mut _2: (); -// let mut _4: (); -// let mut _5: [closure@NodeId(22) r:&'6_1rce D]; -// let mut _6: &'6_1rce D; -// -// bb0: { -// StorageLive(_1); -// _1 = D::{{constructor}}(const 0i32,); -// StorageLive(_3); -// _3 = &'6_1rce _1; -// StorageLive(_5); -// StorageLive(_6); -// _6 = _3; -// _5 = [closure@NodeId(22)] { r: _6 }; -// StorageDead(_6); -// _4 = const foo(_5) -> [return: bb2, unwind: bb3]; -// } -// bb1: { -// resume; -// } -// bb2: { -// StorageDead(_5); -// _0 = (); -// StorageDead(_3); -// EndRegion('6_1rce); -// drop(_1) -> bb4; -// } -// bb3: { -// EndRegion('6_1rce); -// drop(_1) -> bb1; -// } -// bb4: { -// StorageDead(_1); -// return; -// } +// let mut _0: (); +// let _1: D; +// let _2: &'6_1rce D; +// let mut _3: (); +// let mut _4: [closure@NodeId(22) r:&'6_1rce D]; +// let mut _5: &'6_1rce D; +// let mut _6: (); +// bb0: { +// StorageLive(_1); +// _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_2); +// _2 = &'6_1rce _1; +// StorageLive(_4); +// StorageLive(_5); +// _5 = _2; +// _4 = [closure@NodeId(22)] { r: _5 }; +// StorageDead(_5); +// _3 = const foo(_4) -> [return: bb1, unwind: bb3]; +// } +// bb1: { +// StorageDead(_4); +// _0 = (); +// StorageDead(_2); +// EndRegion('6_1rce); +// drop(_1) -> bb4; +// } +// bb2: { +// resume; +// } +// bb3: { +// EndRegion('6_1rce); +// drop(_1) -> bb2; +// } +// bb4: { +// StorageDead(_1); +// return; +// } // } // END rustc.node4.SimplifyCfg-qualify-consts.after.mir diff --git a/src/test/mir-opt/issue-41110.rs b/src/test/mir-opt/issue-41110.rs index fec635b3abf..1daa18256dc 100644 --- a/src/test/mir-opt/issue-41110.rs +++ b/src/test/mir-opt/issue-41110.rs @@ -34,18 +34,23 @@ impl S { // END RUST SOURCE // START rustc.node4.ElaborateDrops.after.mir +// let mut _0: (); +// let _1: (); // let mut _2: S; -// let mut _3: (); +// let mut _3: S; // let mut _4: S; -// let mut _5: S; +// let mut _5: (); // let mut _6: bool; // // bb0: { // END rustc.node4.ElaborateDrops.after.mir // START rustc.node13.ElaborateDrops.after.mir -// let mut _2: (); -// let mut _4: (); -// let mut _5: S; +// let mut _0: (); +// let _1: S; +// let mut _2: S; +// let mut _3: (); +// let mut _4: S; +// let mut _5: (); // let mut _6: S; // let mut _7: bool; // diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs new file mode 100644 index 00000000000..9ac76a5f4ea --- /dev/null +++ b/src/test/mir-opt/validate_1.rs @@ -0,0 +1,59 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=1 + +struct Test(i32); + +impl Test { + // Make sure we run the pass on a method, not just on bare functions. + fn foo(&self, _x: &mut i32) {} +} + +fn main() { + let mut x = 0; + Test(0).foo(&mut x); + + // Also test closures + let c = |x: &mut i32| { let y = &*x; *y }; + c(&mut x); +} + +// FIXME: Also test code generated inside the closure, make sure it has validation. Unfortunately, +// the interesting lines of code also contain name of the source file, so we cannot test for it. + +// END RUST SOURCE +// START rustc.node12.EraseRegions.after.mir +// bb0: { +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(5) => validate_1/8cd878b::{{impl}}[0]::foo[0] }, BrAnon(0)) Test, _2: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(5) => validate_1/8cd878b::{{impl}}[0]::foo[0] }, BrAnon(1)) mut i32]); +// return; +// } +// END rustc.node12.EraseRegions.after.mir +// START rustc.node23.EraseRegions.after.mir +// fn main() -> () { +// bb0: { +// Validate(Suspend(ReScope(Misc(NodeId(34)))), [_1: i32]); +// _6 = &ReErased mut _1; +// Validate(Acquire, [(*_6): i32/ReScope(Misc(NodeId(34)))]); +// Validate(Suspend(ReScope(Misc(NodeId(34)))), [(*_6): i32/ReScope(Misc(NodeId(34)))]); +// _5 = &ReErased mut (*_6); +// Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(34)))]); +// Validate(Release, [_2: (), _3: &ReScope(Misc(NodeId(34))) Test, _5: &ReScope(Misc(NodeId(34))) mut i32]); +// _2 = const Test::foo(_3, _5) -> bb1; +// } +// +// bb1: { +// Validate(Acquire, [_2: ()]); +// EndRegion(ReScope(Misc(NodeId(34)))); +// return; +// } +// } +// END rustc.node23.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_2.rs b/src/test/mir-opt/validate_2.rs new file mode 100644 index 00000000000..37ebd720d52 --- /dev/null +++ b/src/test/mir-opt/validate_2.rs @@ -0,0 +1,27 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=1 + +fn main() { + let _x : Box<[i32]> = Box::new([1, 2, 3]); +} + +// END RUST SOURCE +// START rustc.node4.EraseRegions.after.mir +// fn main() -> () { +// bb1: { +// Validate(Release, [_2: std::boxed::Box<[i32; 3]>]); +// _1 = _2 as std::boxed::Box<[i32]> (Unsize); +// Validate(Acquire, [_1: std::boxed::Box<[i32]>]); +// } +// } +// END rustc.node4.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_3.rs b/src/test/mir-opt/validate_3.rs new file mode 100644 index 00000000000..9140cf5768f --- /dev/null +++ b/src/test/mir-opt/validate_3.rs @@ -0,0 +1,50 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=1 + +struct Test { + x: i32 +} + +fn foo(_x: &i32) {} + +fn main() { + // These internal unsafe functions should have no effect on the code generation. + unsafe fn _unused1() {} + fn _unused2(x: *const i32) -> i32 { unsafe { *x }} + + let t = Test { x: 0 }; + let t = &t; + foo(&t.x); +} + +// END RUST SOURCE +// START rustc.node16.EraseRegions.after.mir +// fn main() -> () { +// let mut _5: &ReErased i32; +// bb0: { +// Validate(Suspend(ReScope(Misc(NodeId(46)))), [((*_2).0: i32): i32/ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 3 })) (imm)]); +// _5 = &ReErased ((*_2).0: i32); +// Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Suspend(ReScope(Misc(NodeId(46)))), [(*_5): i32/ReScope(Misc(NodeId(46))) (imm)]); +// _4 = &ReErased (*_5); +// Validate(Acquire, [(*_4): i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Release, [_3: (), _4: &ReScope(Misc(NodeId(46))) i32]); +// _3 = const foo(_4) -> bb1; +// } +// bb1: { +// EndRegion(ReScope(Misc(NodeId(46)))); +// EndRegion(ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 3 }))); +// return; +// } +// } +// END rustc.node16.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_4.rs b/src/test/mir-opt/validate_4.rs new file mode 100644 index 00000000000..591de975740 --- /dev/null +++ b/src/test/mir-opt/validate_4.rs @@ -0,0 +1,60 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=1 + +// Make sure unsafe fns and fns with an unsafe block only get restricted validation. + +unsafe fn write_42(x: *mut i32) -> bool { + let test_closure = |x: *mut i32| *x = 23; + test_closure(x); + *x = 42; + true +} + +fn test(x: &mut i32) { + unsafe { write_42(x) }; +} + +fn main() { + test(&mut 0); + + let test_closure = unsafe { |x: &mut i32| write_42(x) }; + test_closure(&mut 0); +} + +// FIXME: Also test code generated inside the closure, make sure it only does restricted validation +// because it is entirely inside an unsafe block. Unfortunately, the interesting lines of code also +// contain name of the source file, so we cannot test for it. + +// END RUST SOURCE +// START rustc.node4.EraseRegions.after.mir +// fn write_42(_1: *mut i32) -> bool { +// bb0: { +// Validate(Acquire, [_1: *mut i32]); +// Validate(Release, [_1: *mut i32]); +// return; +// } +// } +// END rustc.node4.EraseRegions.after.mir +// START rustc.node31.EraseRegions.after.mir +// fn test(_1: &ReErased mut i32) -> () { +// bb0: { +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_4/8cd878b::test[0] }, BrAnon(0)) mut i32]); +// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_4/8cd878b::test[0] }, BrAnon(0)) mut i32]); +// _3 = const write_42(_4) -> bb1; +// } +// bb1: { +// Validate(Acquire, [_3: bool]); +// Validate(Release, [_3: bool]); +// } +// } +// END rustc.node31.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_5.rs b/src/test/mir-opt/validate_5.rs new file mode 100644 index 00000000000..e9919af9fd3 --- /dev/null +++ b/src/test/mir-opt/validate_5.rs @@ -0,0 +1,44 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=2 + +// Make sure unsafe fns and fns with an unsafe block only get full validation. + +unsafe fn write_42(x: *mut i32) -> bool { + *x = 42; + true +} + +fn test(x: &mut i32) { + unsafe { write_42(x) }; +} + +fn main() { + test(&mut 0); + + let test_closure = unsafe { |x: &mut i32| write_42(x) }; + test_closure(&mut 0); +} + +// FIXME: Also test code generated inside the closure, make sure it has validation. Unfortunately, +// the interesting lines of code also contain name of the source file, so we cannot test for it. + +// END RUST SOURCE +// START rustc.node17.EraseRegions.after.mir +// fn test(_1: &ReErased mut i32) -> () { +// bb0: { +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_5/8cd878b::test[0] }, BrAnon(0)) mut i32]); +// Validate(Release, [_3: bool, _4: *mut i32]); +// _3 = const write_42(_4) -> bb1; +// } +// } +// END rustc.node17.EraseRegions.after.mir diff --git a/src/test/run-make/codegen-options-parsing/Makefile b/src/test/run-make/codegen-options-parsing/Makefile index 2b8b0712cc7..dc46a8a04ef 100644 --- a/src/test/run-make/codegen-options-parsing/Makefile +++ b/src/test/run-make/codegen-options-parsing/Makefile @@ -25,7 +25,7 @@ all: # Should not link dead code... $(RUSTC) -Z print-link-args dummy.rs 2>&1 | \ - grep -e '--gc-sections' -e '-dead_strip' -e '/OPT:REF' + grep -e '--gc-sections' -e '-z[^ ]* [^ ]*\<ignore\>' -e '-dead_strip' -e '/OPT:REF' # ... unless you specifically ask to keep it $(RUSTC) -Z print-link-args -C link-dead-code dummy.rs 2>&1 | \ - (! grep -e '--gc-sections' -e '-dead_strip' -e '/OPT:REF') + (! grep -e '--gc-sections' -e '-z[^ ]* [^ ]*\<ignore\>' -e '-dead_strip' -e '/OPT:REF') diff --git a/src/test/run-make/llvm-phase/test.rs b/src/test/run-make/llvm-phase/test.rs index a75dc7e57a9..7a63871f19e 100644 --- a/src/test/run-make/llvm-phase/test.rs +++ b/src/test/run-make/llvm-phase/test.rs @@ -54,11 +54,7 @@ impl<'a> CompilerCalls<'a> for JitCalls { state.session.abort_if_errors(); let trans = state.trans.unwrap(); assert_eq!(trans.modules.len(), 1); - let rs_llmod = match trans.modules[0].source { - ModuleSource::Preexisting(_) => unimplemented!(), - ModuleSource::Translated(llvm) => llvm.llmod, - }; - unsafe { rustc_llvm::LLVMDumpModule(rs_llmod) }; + println!("name of compiled module = {}", trans.modules[0].name); }); cc } diff --git a/src/test/run-make/print-cfg/Makefile b/src/test/run-make/print-cfg/Makefile index a820a463f4a..82fa3f6a3c5 100644 --- a/src/test/run-make/print-cfg/Makefile +++ b/src/test/run-make/print-cfg/Makefile @@ -5,7 +5,7 @@ all: default $(RUSTC) --target x86_64-pc-windows-gnu --print cfg | grep x86_64 $(RUSTC) --target i686-pc-windows-msvc --print cfg | grep msvc $(RUSTC) --target i686-apple-darwin --print cfg | grep macos - $(RUSTC) --target i686-unknown-linux-gnu --print cfg | grep sse2 + $(RUSTC) --target i686-unknown-linux-gnu --print cfg | grep gnu ifdef IS_WINDOWS default: diff --git a/src/test/run-make/tools.mk b/src/test/run-make/tools.mk index 693573d53a4..d13ba11e96a 100644 --- a/src/test/run-make/tools.mk +++ b/src/test/run-make/tools.mk @@ -82,7 +82,7 @@ ifeq ($(UNAME),Bitrig) EXTRACXXFLAGS := -lc++ -lc++abi else ifeq ($(UNAME),SunOS) - EXTRACFLAGS := -lm -lpthread -lposix4 -lsocket + EXTRACFLAGS := -lm -lpthread -lposix4 -lsocket -lresolv else ifeq ($(UNAME),OpenBSD) EXTRACFLAGS := -lm -lpthread diff --git a/src/test/run-make/treat-err-as-bug/Makefile b/src/test/run-make/treat-err-as-bug/Makefile new file mode 100644 index 00000000000..a8fa2d4e0f8 --- /dev/null +++ b/src/test/run-make/treat-err-as-bug/Makefile @@ -0,0 +1,5 @@ +-include ../tools.mk + +all: + $(RUSTC) err.rs -Z treat-err-as-bug 2>&1 \ + | grep -q "panicked at 'encountered error with .-Z treat_err_as_bug'" diff --git a/src/test/run-make/treat-err-as-bug/err.rs b/src/test/run-make/treat-err-as-bug/err.rs new file mode 100644 index 00000000000..078495663ac --- /dev/null +++ b/src/test/run-make/treat-err-as-bug/err.rs @@ -0,0 +1,13 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![crate_type="rlib"] + +pub static C: u32 = 0-1; diff --git a/src/test/run-pass/conditional-compile-arch.rs b/src/test/run-pass/conditional-compile-arch.rs index 6e3e4be0d8e..3d8bf9333fd 100644 --- a/src/test/run-pass/conditional-compile-arch.rs +++ b/src/test/run-pass/conditional-compile-arch.rs @@ -39,3 +39,6 @@ pub fn main() { } #[cfg(target_arch = "wasm32")] pub fn main() { } + +#[cfg(target_arch = "sparc64")] +pub fn main() { } diff --git a/src/test/run-pass/core-run-destroy.rs b/src/test/run-pass/core-run-destroy.rs index c5b5b6b24ab..22fbeb2d5d0 100644 --- a/src/test/run-pass/core-run-destroy.rs +++ b/src/test/run-pass/core-run-destroy.rs @@ -15,7 +15,6 @@ // memory, which makes for some *confusing* logs. That's why these are here // instead of in std. -#![reexport_test_harness_main = "test_main"] #![feature(libc, std_misc, duration)] extern crate libc; diff --git a/src/test/run-pass/foreign-call-no-runtime.rs b/src/test/run-pass/foreign-call-no-runtime.rs index 697e9074c44..dd5c075c39b 100644 --- a/src/test/run-pass/foreign-call-no-runtime.rs +++ b/src/test/run-pass/foreign-call-no-runtime.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten no threads support #![feature(libc)] diff --git a/src/test/run-pass/issue-13304.rs b/src/test/run-pass/issue-13304.rs index e1c2c5684fb..5a743d7b547 100644 --- a/src/test/run-pass/issue-13304.rs +++ b/src/test/run-pass/issue-13304.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten #![feature(io, process_capture)] diff --git a/src/test/run-pass/issue-16272.rs b/src/test/run-pass/issue-16272.rs index d4f3d15b320..f86be2d7c99 100644 --- a/src/test/run-pass/issue-16272.rs +++ b/src/test/run-pass/issue-16272.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten use std::process::Command; diff --git a/src/test/run-pass/issue-20091.rs b/src/test/run-pass/issue-20091.rs index 52c7911075a..1ee47a69d0c 100644 --- a/src/test/run-pass/issue-20091.rs +++ b/src/test/run-pass/issue-20091.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten #![feature(std_misc, os)] diff --git a/src/test/run-pass/issue-43132.rs b/src/test/run-pass/issue-43132.rs new file mode 100644 index 00000000000..64b3b092b89 --- /dev/null +++ b/src/test/run-pass/issue-43132.rs @@ -0,0 +1,74 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused)] + +fn main() { +} + +fn foo() { + let b = mk::< + Forward<(Box<Future<Error = u32>>,)>, + >(); + b.map_err(|_| ()).join(); +} + +fn mk<T>() -> T { + loop {} +} + +impl<I: Future<Error = E>, E> Future for (I,) { + type Error = E; +} + +struct Forward<T: Future> { + _a: T, +} + +impl<T: Future> Future for Forward<T> +where + T::Error: From<u32>, +{ + type Error = T::Error; +} + +trait Future { + type Error; + + fn map_err<F, E>(self, _: F) -> (Self, F) + where + F: FnOnce(Self::Error) -> E, + Self: Sized, + { + loop {} + } + + fn join(self) -> (MaybeDone<Self>, ()) + where + Self: Sized, + { + loop {} + } +} + +impl<S: ?Sized + Future> Future for Box<S> { + type Error = S::Error; +} + +enum MaybeDone<A: Future> { + _Done(A::Error), +} + +impl<U, A: Future, F> Future for (A, F) +where + F: FnOnce(A::Error) -> U, +{ + type Error = U; +} diff --git a/src/test/run-pass/process-spawn-with-unicode-params.rs b/src/test/run-pass/process-spawn-with-unicode-params.rs index d3d847127ee..550c6d6ab67 100644 --- a/src/test/run-pass/process-spawn-with-unicode-params.rs +++ b/src/test/run-pass/process-spawn-with-unicode-params.rs @@ -16,7 +16,6 @@ // non-ASCII characters. The child process ensures all the strings are // intact. -// ignore-aarch64 // ignore-emscripten use std::io::prelude::*; diff --git a/src/test/run-pass/sigpipe-should-be-ignored.rs b/src/test/run-pass/sigpipe-should-be-ignored.rs index 4eb4720e8d7..5aa4faa1365 100644 --- a/src/test/run-pass/sigpipe-should-be-ignored.rs +++ b/src/test/run-pass/sigpipe-should-be-ignored.rs @@ -11,7 +11,6 @@ // Be sure that when a SIGPIPE would have been received that the entire process // doesn't die in a ball of fire, but rather it's gracefully handled. -// ignore-aarch64 // ignore-emscripten use std::env; diff --git a/src/test/run-pass/sse2.rs b/src/test/run-pass/sse2.rs index 8d88c17af79..c27f83011cb 100644 --- a/src/test/run-pass/sse2.rs +++ b/src/test/run-pass/sse2.rs @@ -7,6 +7,7 @@ // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. +// min-llvm-version 4.0 #![feature(cfg_target_feature)] diff --git a/src/test/run-pass/union/union-basic.rs b/src/test/run-pass/union/union-basic.rs index 5e5b2d4d7ce..de744520cc6 100644 --- a/src/test/run-pass/union/union-basic.rs +++ b/src/test/run-pass/union/union-basic.rs @@ -12,6 +12,7 @@ // FIXME: This test case makes little-endian assumptions. // ignore-s390x +// ignore-sparc extern crate union; use std::mem::{size_of, align_of, zeroed}; diff --git a/src/test/ui/const-eval/issue-43197.rs b/src/test/ui/const-eval/issue-43197.rs new file mode 100644 index 00000000000..1d4ded6e712 --- /dev/null +++ b/src/test/ui/const-eval/issue-43197.rs @@ -0,0 +1,21 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(const_fn)] + +const fn foo(x: u32) -> u32 { + x +} + +fn main() { + const X: u32 = 0-1; + const Y: u32 = foo(0-1); + println!("{} {}", X, Y); +} diff --git a/src/test/ui/const-eval/issue-43197.stderr b/src/test/ui/const-eval/issue-43197.stderr new file mode 100644 index 00000000000..5ff80060eac --- /dev/null +++ b/src/test/ui/const-eval/issue-43197.stderr @@ -0,0 +1,28 @@ +warning: constant evaluation error: attempt to subtract with overflow. This will become a HARD ERROR in the future + --> $DIR/issue-43197.rs:18:20 + | +18 | const X: u32 = 0-1; + | ^^^ + | + = note: #[warn(const_err)] on by default + +warning: constant evaluation error: attempt to subtract with overflow. This will become a HARD ERROR in the future + --> $DIR/issue-43197.rs:19:20 + | +19 | const Y: u32 = foo(0-1); + | ^^^^^^^^ + +error[E0080]: constant evaluation error + --> $DIR/issue-43197.rs:18:20 + | +18 | const X: u32 = 0-1; + | ^^^ attempt to subtract with overflow + +error[E0080]: constant evaluation error + --> $DIR/issue-43197.rs:19:24 + | +19 | const Y: u32 = foo(0-1); + | ^^^ attempt to subtract with overflow + +error: aborting due to 2 previous errors + diff --git a/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr b/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr index 82e3eab0836..20bb7d4c91d 100644 --- a/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr +++ b/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr @@ -3,6 +3,8 @@ error[E0609]: no field `zz` on type `Foo` | 17 | f.zz; | ^^ unknown field + | + = note: available fields are: `bar` error: aborting due to previous error diff --git a/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs b/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs new file mode 100644 index 00000000000..7fe99508012 --- /dev/null +++ b/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs @@ -0,0 +1,43 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod submodule { + + #[derive(Default)] + pub struct Demo { + pub favorite_integer: isize, + secret_integer: isize, + pub innocently_misspellable: (), + another_field: bool, + yet_another_field: bool, + always_more_fields: bool, + and_ever: bool, + } + + impl Demo { + fn new_with_secret_two() -> Self { + Self { secret_integer: 2, inocently_mispellable: () } + } + + fn new_with_secret_three() -> Self { + Self { secret_integer: 3, egregiously_nonexistent_field: () } + } + } + +} + +fn main() { + use submodule::Demo; + + let demo = Demo::default(); + let innocent_field_misaccess = demo.inocently_mispellable; + // note shouldn't suggest private fields + let egregious_field_misaccess = demo.egregiously_nonexistent_field; +} diff --git a/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr b/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr new file mode 100644 index 00000000000..e2bb7fbd9a8 --- /dev/null +++ b/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr @@ -0,0 +1,30 @@ +error[E0560]: struct `submodule::Demo` has no field named `inocently_mispellable` + --> $DIR/issue-42599_available_fields_note.rs:26:39 + | +26 | Self { secret_integer: 2, inocently_mispellable: () } + | ^^^^^^^^^^^^^^^^^^^^^^ field does not exist - did you mean `innocently_misspellable`? + +error[E0560]: struct `submodule::Demo` has no field named `egregiously_nonexistent_field` + --> $DIR/issue-42599_available_fields_note.rs:30:39 + | +30 | Self { secret_integer: 3, egregiously_nonexistent_field: () } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `submodule::Demo` does not have this field + | + = note: available fields are: `favorite_integer`, `secret_integer`, `innocently_misspellable`, `another_field`, `yet_another_field` ... and 2 others + +error[E0609]: no field `inocently_mispellable` on type `submodule::Demo` + --> $DIR/issue-42599_available_fields_note.rs:40:41 + | +40 | let innocent_field_misaccess = demo.inocently_mispellable; + | ^^^^^^^^^^^^^^^^^^^^^ did you mean `innocently_misspellable`? + +error[E0609]: no field `egregiously_nonexistent_field` on type `submodule::Demo` + --> $DIR/issue-42599_available_fields_note.rs:42:42 + | +42 | let egregious_field_misaccess = demo.egregiously_nonexistent_field; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unknown field + | + = note: available fields are: `favorite_integer`, `innocently_misspellable` + +error: aborting due to 4 previous errors + diff --git a/src/test/ui/issue-35675.rs b/src/test/ui/issue-35675.rs index 391e1f2db5c..001c1f2eddc 100644 --- a/src/test/ui/issue-35675.rs +++ b/src/test/ui/issue-35675.rs @@ -33,11 +33,27 @@ fn should_return_fruit_too() -> Fruit::Apple { //~| NOTE not found in this scope } +fn foo() -> Ok { + //~^ ERROR expected type, found variant `Ok` + //~| NOTE not a type + //~| HELP there is an enum variant + //~| HELP there is an enum variant + Ok(()) +} + fn bar() -> Variant3 { //~^ ERROR cannot find type `Variant3` in this scope //~| NOTE not found in this scope } +fn qux() -> Some { + //~^ ERROR expected type, found variant `Some` + //~| NOTE not a type + //~| HELP there is an enum variant + //~| HELP there is an enum variant + Some(1) +} + fn main() {} mod x { diff --git a/src/test/ui/issue-35675.stderr b/src/test/ui/issue-35675.stderr index c2c10724646..ed330f47208 100644 --- a/src/test/ui/issue-35675.stderr +++ b/src/test/ui/issue-35675.stderr @@ -38,14 +38,32 @@ help: possible candidate is found in another module, you can import it into scop 12 | use Fruit::Apple; | -error[E0412]: cannot find type `Variant3` in this scope +error[E0573]: expected type, found variant `Ok` --> $DIR/issue-35675.rs:36:13 | -36 | fn bar() -> Variant3 { +36 | fn foo() -> Ok { + | ^^ not a type + | + = help: there is an enum variant `std::prelude::v1::Ok`, try using `std::prelude::v1`? + = help: there is an enum variant `std::result::Result::Ok`, try using `std::result::Result`? + +error[E0412]: cannot find type `Variant3` in this scope + --> $DIR/issue-35675.rs:44:13 + | +44 | fn bar() -> Variant3 { | ^^^^^^^^ | | | not found in this scope | help: you can try using the variant's enum: `x::Enum` -error: aborting due to 5 previous errors +error[E0573]: expected type, found variant `Some` + --> $DIR/issue-35675.rs:49:13 + | +49 | fn qux() -> Some { + | ^^^^ not a type + | + = help: there is an enum variant `std::prelude::v1::Option::Some`, try using `std::prelude::v1::Option`? + = help: there is an enum variant `std::prelude::v1::Some`, try using `std::prelude::v1`? + +error: aborting due to 7 previous errors diff --git a/src/test/ui/issue-35976.rs b/src/test/ui/issue-35976.rs new file mode 100644 index 00000000000..169d7b55916 --- /dev/null +++ b/src/test/ui/issue-35976.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod private { + pub trait Future { + fn wait(&self) where Self: Sized; + } + + impl Future for Box<Future> { + fn wait(&self) { } + } +} + +//use private::Future; + +fn bar(arg: Box<private::Future>) { + arg.wait(); + //~^ ERROR the `wait` method cannot be invoked on a trait object + //~| another candidate was found in the following trait, perhaps add a `use` for it: +} + +fn main() { + +} diff --git a/src/test/ui/issue-35976.stderr b/src/test/ui/issue-35976.stderr new file mode 100644 index 00000000000..9fb67449734 --- /dev/null +++ b/src/test/ui/issue-35976.stderr @@ -0,0 +1,11 @@ +error: the `wait` method cannot be invoked on a trait object + --> $DIR/issue-35976.rs:24:9 + | +24 | arg.wait(); + | ^^^^ + | + = note: another candidate was found in the following trait, perhaps add a `use` for it: + candidate #1: `use private::Future;` + +error: aborting due to previous error + diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr index 8dd906afdc4..4c878f3c0dc 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr @@ -2,9 +2,9 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-2.rs:12:9 | 11 | fn foo((v, w): (&u8, &u8), x: &u8) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | v = x; - | ^ data from `x` flows here + | ^ ...but data from `x` flows here error: aborting due to previous error diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr index 66c3ca45499..08506b8befa 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr @@ -2,9 +2,9 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-3.rs:12:9 | 11 | fn foo((v, w): (&u8, &u8), (x, y): (&u8, &u8)) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | v = x; - | ^ data flows here + | ^ ...but data flows here error: aborting due to previous error diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr index b969797b374..9c2630fc811 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr @@ -4,17 +4,17 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-4.rs:12:13 | 11 | fn foo(z: &mut Vec<(&u8,&u8)>, (x, y): (&u8, &u8)) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | z.push((x,y)); - | ^ data flows into `z` here + | ^ ...but data flows into `z` here error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-4.rs:12:15 | 11 | fn foo(z: &mut Vec<(&u8,&u8)>, (x, y): (&u8, &u8)) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | z.push((x,y)); - | ^ data flows into `z` here + | ^ ...but data flows into `z` here error: aborting due to 3 previous errors diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr index e38e2ef07ad..a183d1fffc0 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr @@ -2,9 +2,9 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions.rs:12:12 | 11 | fn foo(x: &mut Vec<&u8>, y: &u8) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | x.push(y); - | ^ data from `y` flows into `x` here + | ^ ...but data from `y` flows into `x` here error: aborting due to previous error diff --git a/src/test/ui/regions-fn-subtyping-return-static.stderr b/src/test/ui/regions-fn-subtyping-return-static.stderr index 0c7b44af949..1598a8a40d2 100644 --- a/src/test/ui/regions-fn-subtyping-return-static.stderr +++ b/src/test/ui/regions-fn-subtyping-return-static.stderr @@ -6,8 +6,6 @@ error[E0308]: mismatched types | = note: expected type `fn(&'cx S) -> &'cx S` found type `fn(&'a S) -> &S {bar::<'_>}` - = note: lifetime parameter `'b` declared on fn `bar` appears only in the return type, but here is required to be higher-ranked, which means that `'b` must appear in both argument and return types - = note: this error is the result of a recent bug fix; for more information, see issue #33685 <https://github.com/rust-lang/rust/issues/33685> error: aborting due to previous error diff --git a/src/test/ui/union-fields.rs b/src/test/ui/union-fields.rs new file mode 100644 index 00000000000..021f57e3eee --- /dev/null +++ b/src/test/ui/union-fields.rs @@ -0,0 +1,42 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(dead_code)] + +union U1 { + a: u8, // should not be reported + b: u8, // should not be reported + c: u8, // should be reported +} +union U2 { + a: u8, // should be reported + b: u8, // should not be reported + c: u8, // should not be reported +} +union NoDropLike { a: u8 } // should be reported as unused + +union U { + a: u8, // should not be reported + b: u8, // should not be reported + c: u8, // should be reported +} +type A = U; + +fn main() { + let u = U1 { a: 0 }; + let _a = unsafe { u.b }; + + let u = U2 { c: 0 }; + let _b = unsafe { u.b }; + + let _u = NoDropLike { a: 10 }; + let u = A { a: 0 }; + let _b = unsafe { u.b }; +} diff --git a/src/test/ui/union-fields.stderr b/src/test/ui/union-fields.stderr new file mode 100644 index 00000000000..f3a2702d5ae --- /dev/null +++ b/src/test/ui/union-fields.stderr @@ -0,0 +1,32 @@ +error: field is never used: `c` + --> $DIR/union-fields.rs:16:5 + | +16 | c: u8, // should be reported + | ^^^^^ + | +note: lint level defined here + --> $DIR/union-fields.rs:11:9 + | +11 | #![deny(dead_code)] + | ^^^^^^^^^ + +error: field is never used: `a` + --> $DIR/union-fields.rs:19:5 + | +19 | a: u8, // should be reported + | ^^^^^ + +error: field is never used: `a` + --> $DIR/union-fields.rs:23:20 + | +23 | union NoDropLike { a: u8 } // should be reported as unused + | ^^^^^ + +error: field is never used: `c` + --> $DIR/union-fields.rs:28:5 + | +28 | c: u8, // should be reported + | ^^^^^ + +error: aborting due to 4 previous errors + diff --git a/src/tools/cargo b/src/tools/cargo -Subproject 88aa6423a164774d09abc78a24e74e8e665f651 +Subproject 305bc25d5e105e84ffe261655b46cf74570f6e5 diff --git a/src/tools/rls b/src/tools/rls -Subproject 79d659e5699fbf7db5b4819e9a442fb3f550472 +Subproject 5d4bbd9052fe2af849a7d017b85df98ad002c20 |
