diff options
| author | bors <bors@rust-lang.org> | 2020-09-20 11:02:36 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2020-09-20 11:02:36 +0000 |
| commit | 41507ed0d57eba71adc20a021a19b64322162f04 (patch) | |
| tree | 16057bdc84087f24f271440fe9e303afc98fe235 /compiler | |
| parent | 5e449b9adff463455743291b0c1f76feec092992 (diff) | |
| parent | e5be14c2721ed4f4bf8b4c5c01a68de18cfd31a3 (diff) | |
| download | rust-41507ed0d57eba71adc20a021a19b64322162f04.tar.gz rust-41507ed0d57eba71adc20a021a19b64322162f04.zip | |
Auto merge of #76964 - RalfJung:rollup-ybn06fs, r=RalfJung
Rollup of 15 pull requests
Successful merges:
- #76722 (Test and fix Send and Sync traits of BTreeMap artefacts)
- #76766 (Extract some intrinsics out of rustc_codegen_llvm)
- #76800 (Don't generate bootstrap usage unless it's needed)
- #76809 (simplfy condition in ItemLowerer::with_trait_impl_ref())
- #76815 (Fix wording in mir doc)
- #76818 (Don't compile regex at every function call.)
- #76821 (Remove redundant nightly features)
- #76823 (black_box: silence unused_mut warning when building with cfg(miri))
- #76825 (use `array_windows` instead of `windows` in the compiler)
- #76827 (fix array_windows docs)
- #76828 (use strip_prefix over starts_with and manual slicing based on pattern length (clippy::manual_strip))
- #76840 (Move to intra doc links in core/src/future)
- #76845 (Use intra docs links in core::{ascii, option, str, pattern, hash::map})
- #76853 (Use intra-doc links in library/core/src/task/wake.rs)
- #76871 (support panic=abort in Miri)
Failed merges:
r? `@ghost`
Diffstat (limited to 'compiler')
36 files changed, 672 insertions, 600 deletions
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 0d0b1efd2de..6f9cccf58dd 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -11,7 +11,6 @@ html_root_url = "https://doc.rust-lang.org/nightly/", test(no_crate_inject, attr(deny(warnings))) )] -#![feature(core_intrinsics)] #![feature(dropck_eyepatch)] #![feature(new_uninit)] #![feature(maybe_uninit_slice)] @@ -24,7 +23,6 @@ use smallvec::SmallVec; use std::alloc::Layout; use std::cell::{Cell, RefCell}; use std::cmp; -use std::intrinsics; use std::marker::{PhantomData, Send}; use std::mem::{self, MaybeUninit}; use std::ptr; @@ -122,7 +120,7 @@ impl<T> TypedArena<T> { unsafe { if mem::size_of::<T>() == 0 { - self.ptr.set(intrinsics::arith_offset(self.ptr.get() as *mut u8, 1) as *mut T); + self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T); let ptr = mem::align_of::<T>() as *mut T; // Don't drop the object. This `write` is equivalent to `forget`. ptr::write(ptr, object); diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs index b556c1a446b..76b84d9da83 100644 --- a/compiler/rustc_ast/src/lib.rs +++ b/compiler/rustc_ast/src/lib.rs @@ -5,17 +5,13 @@ //! This API is completely unstable and subject to change. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))] -#![feature(bool_to_option)] #![feature(box_syntax)] #![feature(const_fn)] // For the `transmute` in `P::new` #![feature(const_panic)] -#![feature(const_fn_transmute)] #![feature(crate_visibility_modifier)] #![feature(label_break_value)] #![feature(nll)] #![feature(or_patterns)] -#![feature(try_trait)] -#![feature(unicode_internals)] #![recursion_limit = "256"] #[macro_use] diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index 6d41b7836b1..617cacee0e7 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -27,7 +27,7 @@ pub(super) struct ItemLowerer<'a, 'lowering, 'hir> { impl ItemLowerer<'_, '_, '_> { fn with_trait_impl_ref(&mut self, impl_ref: &Option<TraitRef>, f: impl FnOnce(&mut Self)) { let old = self.lctx.is_in_trait_impl; - self.lctx.is_in_trait_impl = if let &None = impl_ref { false } else { true }; + self.lctx.is_in_trait_impl = impl_ref.is_some(); f(self); self.lctx.is_in_trait_impl = old; } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 2208ceca00c..7f5b09eac4f 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -7,15 +7,12 @@ use crate::type_of::LayoutLlvmExt; use crate::va_arg::emit_va_arg; use crate::value::Value; -use rustc_ast as ast; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh}; use rustc_codegen_ssa::common::span_invalid_monomorphization_error; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; -use rustc_codegen_ssa::glue; -use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::MemFlags; use rustc_hir as hir; use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt}; use rustc_middle::ty::{self, Ty}; @@ -71,8 +68,6 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Va sym::nearbyintf64 => "llvm.nearbyint.f64", sym::roundf32 => "llvm.round.f32", sym::roundf64 => "llvm.round.f64", - sym::assume => "llvm.assume", - sym::abort => "llvm.trap", _ => return None, }; Some(cx.get_intrinsic(&llvm_name)) @@ -112,9 +107,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, ), - sym::unreachable => { - return; - } sym::likely => { let expect = self.get_intrinsic(&("llvm.expect.i1")); self.call(expect, &[args[0].immediate(), self.const_bool(true)], None) @@ -137,8 +129,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let llfn = self.get_intrinsic(&("llvm.debugtrap")); self.call(llfn, &[], None) } - sym::va_start => self.va_start(args[0].immediate()), - sym::va_end => self.va_end(args[0].immediate()), sym::va_copy => { let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None) @@ -169,123 +159,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { _ => bug!("the va_arg intrinsic does not work with non-scalar types"), } } - sym::size_of_val => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta)); - llsize - } else { - self.const_usize(self.size_of(tp_ty).bytes()) - } - } - sym::min_align_of_val => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta)); - llalign - } else { - self.const_usize(self.align_of(tp_ty).bytes()) - } - } - sym::size_of - | sym::pref_align_of - | sym::min_align_of - | sym::needs_drop - | sym::type_id - | sym::type_name - | sym::variant_count => { - let value = self - .tcx - .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None) - .unwrap(); - OperandRef::from_const(self, value, ret_ty).immediate_or_packed_pair(self) - } - // Effectively no-op - sym::forget => { - return; - } - sym::offset => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - self.inbounds_gep(ptr, &[offset]) - } - sym::arith_offset => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - self.gep(ptr, &[offset]) - } - - sym::copy_nonoverlapping => { - copy_intrinsic( - self, - false, - false, - substs.type_at(0), - args[1].immediate(), - args[0].immediate(), - args[2].immediate(), - ); - return; - } - sym::copy => { - copy_intrinsic( - self, - true, - false, - substs.type_at(0), - args[1].immediate(), - args[0].immediate(), - args[2].immediate(), - ); - return; - } - sym::write_bytes => { - memset_intrinsic( - self, - false, - substs.type_at(0), - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - ); - return; - } - sym::volatile_copy_nonoverlapping_memory => { - copy_intrinsic( - self, - false, - true, - substs.type_at(0), - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - ); - return; - } - sym::volatile_copy_memory => { - copy_intrinsic( - self, - true, - true, - substs.type_at(0), - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - ); - return; - } - sym::volatile_set_memory => { - memset_intrinsic( - self, - true, - substs.type_at(0), - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - ); - return; - } sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); @@ -343,20 +217,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { | sym::ctpop | sym::bswap | sym::bitreverse - | sym::add_with_overflow - | sym::sub_with_overflow - | sym::mul_with_overflow - | sym::wrapping_add - | sym::wrapping_sub - | sym::wrapping_mul - | sym::unchecked_div - | sym::unchecked_rem - | sym::unchecked_shl - | sym::unchecked_shr - | sym::unchecked_add - | sym::unchecked_sub - | sym::unchecked_mul - | sym::exact_div | sym::rotate_left | sym::rotate_right | sym::saturating_add @@ -396,84 +256,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { &[args[0].immediate()], None, ), - sym::add_with_overflow - | sym::sub_with_overflow - | sym::mul_with_overflow => { - let intrinsic = format!( - "llvm.{}{}.with.overflow.i{}", - if signed { 's' } else { 'u' }, - &name_str[..3], - width - ); - let llfn = self.get_intrinsic(&intrinsic); - - // Convert `i1` to a `bool`, and write it to the out parameter - let pair = - self.call(llfn, &[args[0].immediate(), args[1].immediate()], None); - let val = self.extract_value(pair, 0); - let overflow = self.extract_value(pair, 1); - let overflow = self.zext(overflow, self.type_bool()); - - let dest = result.project_field(self, 0); - self.store(val, dest.llval, dest.align); - let dest = result.project_field(self, 1); - self.store(overflow, dest.llval, dest.align); - - return; - } - sym::wrapping_add => self.add(args[0].immediate(), args[1].immediate()), - sym::wrapping_sub => self.sub(args[0].immediate(), args[1].immediate()), - sym::wrapping_mul => self.mul(args[0].immediate(), args[1].immediate()), - sym::exact_div => { - if signed { - self.exactsdiv(args[0].immediate(), args[1].immediate()) - } else { - self.exactudiv(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_div => { - if signed { - self.sdiv(args[0].immediate(), args[1].immediate()) - } else { - self.udiv(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_rem => { - if signed { - self.srem(args[0].immediate(), args[1].immediate()) - } else { - self.urem(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_shl => self.shl(args[0].immediate(), args[1].immediate()), - sym::unchecked_shr => { - if signed { - self.ashr(args[0].immediate(), args[1].immediate()) - } else { - self.lshr(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_add => { - if signed { - self.unchecked_sadd(args[0].immediate(), args[1].immediate()) - } else { - self.unchecked_uadd(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_sub => { - if signed { - self.unchecked_ssub(args[0].immediate(), args[1].immediate()) - } else { - self.unchecked_usub(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_mul => { - if signed { - self.unchecked_smul(args[0].immediate(), args[1].immediate()) - } else { - self.unchecked_umul(args[0].immediate(), args[1].immediate()) - } - } sym::rotate_left | sym::rotate_right => { let is_left = name == sym::rotate_left; let val = args[0].immediate(); @@ -513,75 +295,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } } - sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { - match float_type_width(arg_tys[0]) { - Some(_width) => match name { - sym::fadd_fast => self.fadd_fast(args[0].immediate(), args[1].immediate()), - sym::fsub_fast => self.fsub_fast(args[0].immediate(), args[1].immediate()), - sym::fmul_fast => self.fmul_fast(args[0].immediate(), args[1].immediate()), - sym::fdiv_fast => self.fdiv_fast(args[0].immediate(), args[1].immediate()), - sym::frem_fast => self.frem_fast(args[0].immediate(), args[1].immediate()), - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `{}` intrinsic: \ - expected basic float type, found `{}`", - name, arg_tys[0] - ), - ); - return; - } - } - } - - sym::float_to_int_unchecked => { - if float_type_width(arg_tys[0]).is_none() { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `float_to_int_unchecked` \ - intrinsic: expected basic float type, \ - found `{}`", - arg_tys[0] - ), - ); - return; - } - let (width, signed) = match int_type_width_signed(ret_ty, self.cx) { - Some(pair) => pair, - None => { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `float_to_int_unchecked` \ - intrinsic: expected basic integer type, \ - found `{}`", - ret_ty - ), - ); - return; - } - }; - if signed { - self.fptosi(args[0].immediate(), self.cx.type_ix(width)) - } else { - self.fptoui(args[0].immediate(), self.cx.type_ix(width)) - } - } - - sym::discriminant_value => { - if ret_ty.is_integral() { - args[0].deref(self.cx()).codegen_get_discr(self, ret_ty) - } else { - span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0]) - } - } _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { @@ -589,174 +302,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { Err(()) => return, } } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst - name if name_str.starts_with("atomic_") => { - use rustc_codegen_ssa::common::AtomicOrdering::*; - use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope}; - - let split: Vec<&str> = name_str.split('_').collect(); - - let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; - let (order, failorder) = match split.len() { - 2 => (SequentiallyConsistent, SequentiallyConsistent), - 3 => match split[2] { - "unordered" => (Unordered, Unordered), - "relaxed" => (Monotonic, Monotonic), - "acq" => (Acquire, Acquire), - "rel" => (Release, Monotonic), - "acqrel" => (AcquireRelease, Acquire), - "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic), - "failacq" if is_cxchg => (SequentiallyConsistent, Acquire), - _ => self.sess().fatal("unknown ordering in atomic intrinsic"), - }, - 4 => match (split[2], split[3]) { - ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic), - ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic), - _ => self.sess().fatal("unknown ordering in atomic intrinsic"), - }, - _ => self.sess().fatal("Atomic intrinsic not in correct format"), - }; - - let invalid_monomorphization = |ty| { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", - name, ty - ), - ); - }; - - match split[1] { - "cxchg" | "cxchgweak" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, self).is_some() { - let weak = split[1] == "cxchgweak"; - let pair = self.atomic_cmpxchg( - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - order, - failorder, - weak, - ); - let val = self.extract_value(pair, 0); - let success = self.extract_value(pair, 1); - let success = self.zext(success, self.type_bool()); - - let dest = result.project_field(self, 0); - self.store(val, dest.llval, dest.align); - let dest = result.project_field(self, 1); - self.store(success, dest.llval, dest.align); - return; - } else { - return invalid_monomorphization(ty); - } - } - - "load" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, self).is_some() { - let size = self.size_of(ty); - self.atomic_load(args[0].immediate(), order, size) - } else { - return invalid_monomorphization(ty); - } - } - - "store" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, self).is_some() { - let size = self.size_of(ty); - self.atomic_store( - args[1].immediate(), - args[0].immediate(), - order, - size, - ); - return; - } else { - return invalid_monomorphization(ty); - } - } - - "fence" => { - self.atomic_fence(order, SynchronizationScope::CrossThread); - return; - } - - "singlethreadfence" => { - self.atomic_fence(order, SynchronizationScope::SingleThread); - return; - } - - // These are all AtomicRMW ops - op => { - let atom_op = match op { - "xchg" => AtomicRmwBinOp::AtomicXchg, - "xadd" => AtomicRmwBinOp::AtomicAdd, - "xsub" => AtomicRmwBinOp::AtomicSub, - "and" => AtomicRmwBinOp::AtomicAnd, - "nand" => AtomicRmwBinOp::AtomicNand, - "or" => AtomicRmwBinOp::AtomicOr, - "xor" => AtomicRmwBinOp::AtomicXor, - "max" => AtomicRmwBinOp::AtomicMax, - "min" => AtomicRmwBinOp::AtomicMin, - "umax" => AtomicRmwBinOp::AtomicUMax, - "umin" => AtomicRmwBinOp::AtomicUMin, - _ => self.sess().fatal("unknown atomic operation"), - }; - - let ty = substs.type_at(0); - if int_type_width_signed(ty, self).is_some() { - self.atomic_rmw( - atom_op, - args[0].immediate(), - args[1].immediate(), - order, - ) - } else { - return invalid_monomorphization(ty); - } - } - } - } - - sym::nontemporal_store => { - let dst = args[0].deref(self.cx()); - args[1].val.nontemporal_store(self, dst); - return; - } - - sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { - let a = args[0].immediate(); - let b = args[1].immediate(); - if name == sym::ptr_guaranteed_eq { - self.icmp(IntPredicate::IntEQ, a, b) - } else { - self.icmp(IntPredicate::IntNE, a, b) - } - } - - sym::ptr_offset_from => { - let ty = substs.type_at(0); - let pointee_size = self.size_of(ty); - - // This is the same sequence that Clang emits for pointer subtraction. - // It can be neither `nsw` nor `nuw` because the input is treated as - // unsigned but then the output is treated as signed, so neither works. - let a = args[0].immediate(); - let b = args[1].immediate(); - let a = self.ptrtoint(a, self.type_isize()); - let b = self.ptrtoint(b, self.type_isize()); - let d = self.sub(a, b); - let pointee_size = self.const_usize(pointee_size.bytes()); - // this is where the signed magic happens (notice the `s` in `exactsdiv`) - self.exactsdiv(d, pointee_size) - } _ => bug!("unknown intrinsic '{}'", name), }; @@ -807,39 +352,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } -fn copy_intrinsic( - bx: &mut Builder<'a, 'll, 'tcx>, - allow_overlap: bool, - volatile: bool, - ty: Ty<'tcx>, - dst: &'ll Value, - src: &'ll Value, - count: &'ll Value, -) { - let (size, align) = bx.size_and_align_of(ty); - let size = bx.mul(bx.const_usize(size.bytes()), count); - let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() }; - if allow_overlap { - bx.memmove(dst, align, src, align, size, flags); - } else { - bx.memcpy(dst, align, src, align, size, flags); - } -} - -fn memset_intrinsic( - bx: &mut Builder<'a, 'll, 'tcx>, - volatile: bool, - ty: Ty<'tcx>, - dst: &'ll Value, - val: &'ll Value, - count: &'ll Value, -) { - let (size, align) = bx.size_and_align_of(ty); - let size = bx.mul(bx.const_usize(size.bytes()), count); - let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() }; - bx.memset(dst, val, size, align, flags); -} - fn try_intrinsic( bx: &mut Builder<'a, 'll, 'tcx>, try_func: &'ll Value, @@ -2205,37 +1717,12 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, // stuffs. fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> { match ty.kind() { - ty::Int(t) => Some(( - match t { - ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width), - ast::IntTy::I8 => 8, - ast::IntTy::I16 => 16, - ast::IntTy::I32 => 32, - ast::IntTy::I64 => 64, - ast::IntTy::I128 => 128, - }, - true, - )), - ty::Uint(t) => Some(( - match t { - ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width), - ast::UintTy::U8 => 8, - ast::UintTy::U16 => 16, - ast::UintTy::U32 => 32, - ast::UintTy::U64 => 64, - ast::UintTy::U128 => 128, - }, - false, - )), - _ => None, - } -} - -// Returns the width of a float Ty -// Returns None if the type is not a float -fn float_type_width(ty: Ty<'_>) -> Option<u64> { - match ty.kind() { - ty::Float(t) => Some(t.bit_width()), + ty::Int(t) => { + Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), true)) + } + ty::Uint(t) => { + Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), false)) + } _ => None, } } diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index 73e33369175..a87ce1446ba 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -8,8 +8,6 @@ #![feature(or_patterns)] #![feature(trusted_len)] #![feature(associated_type_bounds)] -#![feature(const_fn)] // for rustc_index::newtype_index -#![feature(const_panic)] // for rustc_index::newtype_index #![recursion_limit = "256"] //! This crate contains codegen code that is used by all codegen backends (LLVM and others). diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 23269f7245d..703a17b200a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -687,7 +687,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }) .collect(); - bx.codegen_intrinsic_call( + Self::codegen_intrinsic_call( + &mut bx, *instance.as_ref().unwrap(), &fn_abi, &args, diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs new file mode 100644 index 00000000000..14f1ed59a67 --- /dev/null +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -0,0 +1,596 @@ +use super::operand::{OperandRef, OperandValue}; +use super::place::PlaceRef; +use super::FunctionCx; +use crate::common::{span_invalid_monomorphization_error, IntPredicate}; +use crate::glue; +use crate::traits::*; +use crate::MemFlags; + +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_span::{sym, Span}; +use rustc_target::abi::call::{FnAbi, PassMode}; + +fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + allow_overlap: bool, + volatile: bool, + ty: Ty<'tcx>, + dst: Bx::Value, + src: Bx::Value, + count: Bx::Value, +) { + let layout = bx.layout_of(ty); + let size = layout.size; + let align = layout.align.abi; + let size = bx.mul(bx.const_usize(size.bytes()), count); + let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() }; + if allow_overlap { + bx.memmove(dst, align, src, align, size, flags); + } else { + bx.memcpy(dst, align, src, align, size, flags); + } +} + +fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + volatile: bool, + ty: Ty<'tcx>, + dst: Bx::Value, + val: Bx::Value, + count: Bx::Value, +) { + let layout = bx.layout_of(ty); + let size = layout.size; + let align = layout.align.abi; + let size = bx.mul(bx.const_usize(size.bytes()), count); + let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() }; + bx.memset(dst, val, size, align, flags); +} + +impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_intrinsic_call( + bx: &mut Bx, + instance: ty::Instance<'tcx>, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, Bx::Value>], + llresult: Bx::Value, + span: Span, + ) { + let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all()); + + let (def_id, substs) = match *callee_ty.kind() { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty), + }; + + let sig = callee_ty.fn_sig(bx.tcx()); + let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = bx.tcx().item_name(def_id); + let name_str = &*name.as_str(); + + let llret_ty = bx.backend_type(bx.layout_of(ret_ty)); + let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); + + let llval = match name { + sym::assume => { + bx.assume(args[0].immediate()); + return; + } + sym::abort => { + bx.abort(); + return; + } + + sym::unreachable => { + return; + } + sym::va_start => bx.va_start(args[0].immediate()), + sym::va_end => bx.va_end(args[0].immediate()), + sym::size_of_val => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); + llsize + } else { + bx.const_usize(bx.layout_of(tp_ty).size.bytes()) + } + } + sym::min_align_of_val => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); + llalign + } else { + bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes()) + } + } + sym::size_of + | sym::pref_align_of + | sym::min_align_of + | sym::needs_drop + | sym::type_id + | sym::type_name + | sym::variant_count => { + let value = bx + .tcx() + .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None) + .unwrap(); + OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx) + } + // Effectively no-op + sym::forget => { + return; + } + sym::offset => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + bx.inbounds_gep(ptr, &[offset]) + } + sym::arith_offset => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + bx.gep(ptr, &[offset]) + } + + sym::copy_nonoverlapping => { + copy_intrinsic( + bx, + false, + false, + substs.type_at(0), + args[1].immediate(), + args[0].immediate(), + args[2].immediate(), + ); + return; + } + sym::copy => { + copy_intrinsic( + bx, + true, + false, + substs.type_at(0), + args[1].immediate(), + args[0].immediate(), + args[2].immediate(), + ); + return; + } + sym::write_bytes => { + memset_intrinsic( + bx, + false, + substs.type_at(0), + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + ); + return; + } + + sym::volatile_copy_nonoverlapping_memory => { + copy_intrinsic( + bx, + false, + true, + substs.type_at(0), + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + ); + return; + } + sym::volatile_copy_memory => { + copy_intrinsic( + bx, + true, + true, + substs.type_at(0), + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + ); + return; + } + sym::volatile_set_memory => { + memset_intrinsic( + bx, + true, + substs.type_at(0), + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + ); + return; + } + sym::volatile_store => { + let dst = args[0].deref(bx.cx()); + args[1].val.volatile_store(bx, dst); + return; + } + sym::unaligned_volatile_store => { + let dst = args[0].deref(bx.cx()); + args[1].val.unaligned_volatile_store(bx, dst); + return; + } + sym::add_with_overflow + | sym::sub_with_overflow + | sym::mul_with_overflow + | sym::wrapping_add + | sym::wrapping_sub + | sym::wrapping_mul + | sym::unchecked_div + | sym::unchecked_rem + | sym::unchecked_shl + | sym::unchecked_shr + | sym::unchecked_add + | sym::unchecked_sub + | sym::unchecked_mul + | sym::exact_div => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, bx.tcx()) { + Some((_width, signed)) => match name { + sym::add_with_overflow + | sym::sub_with_overflow + | sym::mul_with_overflow => { + let op = match name { + sym::add_with_overflow => OverflowOp::Add, + sym::sub_with_overflow => OverflowOp::Sub, + sym::mul_with_overflow => OverflowOp::Mul, + _ => bug!(), + }; + let (val, overflow) = + bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate()); + // Convert `i1` to a `bool`, and write it to the out parameter + let val = bx.from_immediate(val); + let overflow = bx.from_immediate(overflow); + + let dest = result.project_field(bx, 0); + bx.store(val, dest.llval, dest.align); + let dest = result.project_field(bx, 1); + bx.store(overflow, dest.llval, dest.align); + + return; + } + sym::wrapping_add => bx.add(args[0].immediate(), args[1].immediate()), + sym::wrapping_sub => bx.sub(args[0].immediate(), args[1].immediate()), + sym::wrapping_mul => bx.mul(args[0].immediate(), args[1].immediate()), + sym::exact_div => { + if signed { + bx.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + bx.exactudiv(args[0].immediate(), args[1].immediate()) + } + } + sym::unchecked_div => { + if signed { + bx.sdiv(args[0].immediate(), args[1].immediate()) + } else { + bx.udiv(args[0].immediate(), args[1].immediate()) + } + } + sym::unchecked_rem => { + if signed { + bx.srem(args[0].immediate(), args[1].immediate()) + } else { + bx.urem(args[0].immediate(), args[1].immediate()) + } + } + sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()), + sym::unchecked_shr => { + if signed { + bx.ashr(args[0].immediate(), args[1].immediate()) + } else { + bx.lshr(args[0].immediate(), args[1].immediate()) + } + } + sym::unchecked_add => { + if signed { + bx.unchecked_sadd(args[0].immediate(), args[1].immediate()) + } else { + bx.unchecked_uadd(args[0].immediate(), args[1].immediate()) + } + } + sym::unchecked_sub => { + if signed { + bx.unchecked_ssub(args[0].immediate(), args[1].immediate()) + } else { + bx.unchecked_usub(args[0].immediate(), args[1].immediate()) + } + } + sym::unchecked_mul => { + if signed { + bx.unchecked_smul(args[0].immediate(), args[1].immediate()) + } else { + bx.unchecked_umul(args[0].immediate(), args[1].immediate()) + } + } + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + bx.tcx().sess, + span, + &format!( + "invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", + name, ty + ), + ); + return; + } + } + } + sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { + match float_type_width(arg_tys[0]) { + Some(_width) => match name { + sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()), + sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()), + sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()), + sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), + sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + bx.tcx().sess, + span, + &format!( + "invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", + name, arg_tys[0] + ), + ); + return; + } + } + } + + sym::float_to_int_unchecked => { + if float_type_width(arg_tys[0]).is_none() { + span_invalid_monomorphization_error( + bx.tcx().sess, + span, + &format!( + "invalid monomorphization of `float_to_int_unchecked` \ + intrinsic: expected basic float type, \ + found `{}`", + arg_tys[0] + ), + ); + return; + } + let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) { + Some(pair) => pair, + None => { + span_invalid_monomorphization_error( + bx.tcx().sess, + span, + &format!( + "invalid monomorphization of `float_to_int_unchecked` \ + intrinsic: expected basic integer type, \ + found `{}`", + ret_ty + ), + ); + return; + } + }; + if signed { + bx.fptosi(args[0].immediate(), llret_ty) + } else { + bx.fptoui(args[0].immediate(), llret_ty) + } + } + + sym::discriminant_value => { + if ret_ty.is_integral() { + args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty) + } else { + span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0]) + } + } + + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst + name if name_str.starts_with("atomic_") => { + use crate::common::AtomicOrdering::*; + use crate::common::{AtomicRmwBinOp, SynchronizationScope}; + + let split: Vec<&str> = name_str.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => (SequentiallyConsistent, Acquire), + _ => bx.sess().fatal("unknown ordering in atomic intrinsic"), + }, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic), + _ => bx.sess().fatal("unknown ordering in atomic intrinsic"), + }, + _ => bx.sess().fatal("Atomic intrinsic not in correct format"), + }; + + let invalid_monomorphization = |ty| { + span_invalid_monomorphization_error( + bx.tcx().sess, + span, + &format!( + "invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", + name, ty + ), + ); + }; + + match split[1] { + "cxchg" | "cxchgweak" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, bx.tcx()).is_some() { + let weak = split[1] == "cxchgweak"; + let pair = bx.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak, + ); + let val = bx.extract_value(pair, 0); + let success = bx.extract_value(pair, 1); + let val = bx.from_immediate(val); + let success = bx.from_immediate(success); + + let dest = result.project_field(bx, 0); + bx.store(val, dest.llval, dest.align); + let dest = result.project_field(bx, 1); + bx.store(success, dest.llval, dest.align); + return; + } else { + return invalid_monomorphization(ty); + } + } + + "load" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, bx.tcx()).is_some() { + let size = bx.layout_of(ty).size; + bx.atomic_load(args[0].immediate(), order, size) + } else { + return invalid_monomorphization(ty); + } + } + + "store" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, bx.tcx()).is_some() { + let size = bx.layout_of(ty).size; + bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size); + return; + } else { + return invalid_monomorphization(ty); + } + } + + "fence" => { + bx.atomic_fence(order, SynchronizationScope::CrossThread); + return; + } + + "singlethreadfence" => { + bx.atomic_fence(order, SynchronizationScope::SingleThread); + return; + } + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => AtomicRmwBinOp::AtomicXchg, + "xadd" => AtomicRmwBinOp::AtomicAdd, + "xsub" => AtomicRmwBinOp::AtomicSub, + "and" => AtomicRmwBinOp::AtomicAnd, + "nand" => AtomicRmwBinOp::AtomicNand, + "or" => AtomicRmwBinOp::AtomicOr, + "xor" => AtomicRmwBinOp::AtomicXor, + "max" => AtomicRmwBinOp::AtomicMax, + "min" => AtomicRmwBinOp::AtomicMin, + "umax" => AtomicRmwBinOp::AtomicUMax, + "umin" => AtomicRmwBinOp::AtomicUMin, + _ => bx.sess().fatal("unknown atomic operation"), + }; + + let ty = substs.type_at(0); + if int_type_width_signed(ty, bx.tcx()).is_some() { + bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) + } else { + return invalid_monomorphization(ty); + } + } + } + } + + sym::nontemporal_store => { + let dst = args[0].deref(bx.cx()); + args[1].val.nontemporal_store(bx, dst); + return; + } + + sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { + let a = args[0].immediate(); + let b = args[1].immediate(); + if name == sym::ptr_guaranteed_eq { + bx.icmp(IntPredicate::IntEQ, a, b) + } else { + bx.icmp(IntPredicate::IntNE, a, b) + } + } + + sym::ptr_offset_from => { + let ty = substs.type_at(0); + let pointee_size = bx.layout_of(ty).size; + + // This is the same sequence that Clang emits for pointer subtraction. + // It can be neither `nsw` nor `nuw` because the input is treated as + // unsigned but then the output is treated as signed, so neither works. + let a = args[0].immediate(); + let b = args[1].immediate(); + let a = bx.ptrtoint(a, bx.type_isize()); + let b = bx.ptrtoint(b, bx.type_isize()); + let d = bx.sub(a, b); + let pointee_size = bx.const_usize(pointee_size.bytes()); + // this is where the signed magic happens (notice the `s` in `exactsdiv`) + bx.exactsdiv(d, pointee_size) + } + + _ => { + // Need to use backend-specific things in the implementation. + bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); + return; + } + }; + + if !fn_abi.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_abi.ret.mode { + let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty)); + let ptr = bx.pointercast(result.llval, ptr_llty); + bx.store(llval, ptr, result.align); + } else { + OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) + .val + .store(bx, result); + } + } + } +} + +// Returns the width of an int Ty, and if it's signed or not +// Returns None if the type is not an integer +// FIXME: there’s multiple of this functions, investigate using some of the already existing +// stuffs. +fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> { + match ty.kind() { + ty::Int(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), true)), + ty::Uint(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), false)), + _ => None, + } +} + +// Returns the width of a float Ty +// Returns None if the type is not a float +fn float_type_width(ty: Ty<'_>) -> Option<u64> { + match ty.kind() { + ty::Float(t) => Some(t.bit_width()), + _ => None, + } +} diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index c1e7cfd80ef..64d456fb7aa 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -486,6 +486,7 @@ mod block; pub mod constant; pub mod coverageinfo; pub mod debuginfo; +mod intrinsic; pub mod operand; pub mod place; mod rvalue; diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 06718cc9803..8dafcdf3bc6 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -8,10 +8,10 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![allow(incomplete_features)] +#![feature(array_windows)] #![feature(control_flow_enum)] #![feature(in_band_lifetimes)] #![feature(unboxed_closures)] -#![feature(generators)] #![feature(generator_trait)] #![feature(fn_traits)] #![feature(int_bits_const)] @@ -27,7 +27,7 @@ #![feature(thread_id_value)] #![feature(extend_one)] #![feature(const_panic)] -#![feature(const_generics)] +#![feature(min_const_generics)] #![feature(once_cell)] #![allow(rustc::default_hash_types)] diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs index 856eb73e629..4807380595d 100644 --- a/compiler/rustc_data_structures/src/sorted_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map.rs @@ -34,7 +34,7 @@ impl<K: Ord, V> SortedMap<K, V> { /// and that there are no duplicates. #[inline] pub fn from_presorted_elements(elements: Vec<(K, V)>) -> SortedMap<K, V> { - debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0)); + debug_assert!(elements.array_windows().all(|[fst, snd]| fst.0 < snd.0)); SortedMap { data: elements } } @@ -159,7 +159,7 @@ impl<K: Ord, V> SortedMap<K, V> { return; } - debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0)); + debug_assert!(elements.array_windows().all(|[fst, snd]| fst.0 < snd.0)); let start_index = self.lookup_index_for(&elements[0].0); diff --git a/compiler/rustc_driver/src/args.rs b/compiler/rustc_driver/src/args.rs index 5686819c61b..4f2febf04b1 100644 --- a/compiler/rustc_driver/src/args.rs +++ b/compiler/rustc_driver/src/args.rs @@ -4,8 +4,7 @@ use std::fs; use std::io; pub fn arg_expand(arg: String) -> Result<Vec<String>, Error> { - if arg.starts_with('@') { - let path = &arg[1..]; + if let Some(path) = arg.strip_prefix('@') { let file = match fs::read_to_string(path) { Ok(file) => file, Err(ref err) if err.kind() == io::ErrorKind::InvalidData => { diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs index 5436b1ef737..47247294f5d 100644 --- a/compiler/rustc_expand/src/lib.rs +++ b/compiler/rustc_expand/src/lib.rs @@ -1,5 +1,4 @@ #![feature(bool_to_option)] -#![feature(cow_is_borrowed)] #![feature(crate_visibility_modifier)] #![feature(decl_macro)] #![feature(or_patterns)] diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs index e05041d8846..504b66bae73 100644 --- a/compiler/rustc_infer/src/lib.rs +++ b/compiler/rustc_infer/src/lib.rs @@ -13,7 +13,6 @@ //! This API is completely unstable and subject to change. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] -#![feature(bindings_after_at)] #![feature(bool_to_option)] #![feature(box_patterns)] #![feature(box_syntax)] @@ -23,7 +22,6 @@ #![feature(never_type)] #![feature(or_patterns)] #![feature(in_band_lifetimes)] -#![feature(crate_visibility_modifier)] #![recursion_limit = "512"] // For rustdoc #[macro_use] diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index 0a14b16e274..b48592c103c 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -27,6 +27,7 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(test, feature(test))] +#![feature(array_windows)] #![feature(bool_to_option)] #![feature(box_syntax)] #![feature(crate_visibility_modifier)] diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index 24467f81172..b3125f55d4d 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -70,9 +70,9 @@ fn is_camel_case(name: &str) -> bool { // ones (some scripts don't have a concept of upper/lowercase) !name.chars().next().unwrap().is_lowercase() && !name.contains("__") - && !name.chars().collect::<Vec<_>>().windows(2).any(|pair| { + && !name.chars().collect::<Vec<_>>().array_windows().any(|&[fst, snd]| { // contains a capitalisable character followed by, or preceded by, an underscore - char_has_case(pair[0]) && pair[1] == '_' || char_has_case(pair[1]) && pair[0] == '_' + char_has_case(fst) && snd == '_' || char_has_case(snd) && fst == '_' }) } diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs index a675aae5b17..74cb3c130b7 100644 --- a/compiler/rustc_middle/src/lib.rs +++ b/compiler/rustc_middle/src/lib.rs @@ -23,6 +23,7 @@ //! This API is completely unstable and subject to change. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] +#![feature(array_windows)] #![feature(backtrace)] #![feature(bool_to_option)] #![feature(box_patterns)] @@ -30,12 +31,9 @@ #![feature(cmp_min_max_by)] #![feature(const_fn)] #![feature(const_panic)] -#![feature(const_fn_transmute)] #![feature(core_intrinsics)] #![feature(discriminant_kind)] -#![feature(drain_filter)] #![feature(never_type)] -#![feature(exhaustive_patterns)] #![feature(extern_types)] #![feature(nll)] #![feature(once_cell)] @@ -43,13 +41,11 @@ #![feature(or_patterns)] #![feature(min_specialization)] #![feature(trusted_len)] -#![feature(stmt_expr_attributes)] #![feature(test)] #![feature(in_band_lifetimes)] #![feature(crate_visibility_modifier)] #![feature(associated_type_bounds)] #![feature(rustc_attrs)] -#![feature(hash_raw_entry)] #![feature(int_error_matching)] #![recursion_limit = "512"] diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index be61b676807..12dc1c12264 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -2286,7 +2286,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { /// Constants /// /// Two constants are equal if they are the same constant. Note that -/// this does not necessarily mean that they are `==` in Rust -- in +/// this does not necessarily mean that they are `==` in Rust. In /// particular, one must be wary of `NaN`! #[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, HashStable)] diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 56746666e2f..38c0441990b 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -2419,7 +2419,7 @@ impl<'tcx> TyCtxt<'tcx> { eps: &[ExistentialPredicate<'tcx>], ) -> &'tcx List<ExistentialPredicate<'tcx>> { assert!(!eps.is_empty()); - assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater)); + assert!(eps.array_windows().all(|[a, b]| a.stable_cmp(self, b) != Ordering::Greater)); self._intern_existential_predicates(eps) } diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs index e256fb55b12..629e9be9ddd 100644 --- a/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs +++ b/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs @@ -492,8 +492,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { { if let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span) { - if pat_snippet.starts_with('&') { - let pat_snippet = pat_snippet[1..].trim_start(); + if let Some(stripped) = pat_snippet.strip_prefix('&') { + let pat_snippet = stripped.trim_start(); let (suggestion, to_remove) = if pat_snippet.starts_with("mut") && pat_snippet["mut".len()..].starts_with(rustc_lexer::is_whitespace) { diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs index 8b0121cf360..d4cdf02104a 100644 --- a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs +++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs @@ -631,9 +631,8 @@ fn suggest_ampmut<'tcx>( let lt_name = &src[1..ws_pos]; let ty = &src[ws_pos..]; return (assignment_rhs_span, format!("&{} mut {}", lt_name, ty)); - } else if src.starts_with('&') { - let borrowed_expr = &src[1..]; - return (assignment_rhs_span, format!("&mut {}", borrowed_expr)); + } else if let Some(stripped) = src.strip_prefix('&') { + return (assignment_rhs_span, format!("&mut {}", stripped)); } } } diff --git a/compiler/rustc_mir/src/dataflow/framework/graphviz.rs b/compiler/rustc_mir/src/dataflow/framework/graphviz.rs index 94151fbd090..5d4c4251961 100644 --- a/compiler/rustc_mir/src/dataflow/framework/graphviz.rs +++ b/compiler/rustc_mir/src/dataflow/framework/graphviz.rs @@ -1,6 +1,7 @@ //! A helpful diagram for debugging dataflow problems. use std::borrow::Cow; +use std::lazy::SyncOnceCell; use std::{io, ops, str}; use regex::Regex; @@ -570,6 +571,13 @@ where } } +macro_rules! regex { + ($re:literal $(,)?) => {{ + static RE: SyncOnceCell<regex::Regex> = SyncOnceCell::new(); + RE.get_or_init(|| Regex::new($re).unwrap()) + }}; +} + fn diff_pretty<T, C>(new: T, old: T, ctxt: &C) -> String where T: DebugWithContext<C>, @@ -578,7 +586,7 @@ where return String::new(); } - let re = Regex::new("\t?\u{001f}([+-])").unwrap(); + let re = regex!("\t?\u{001f}([+-])"); let raw_diff = format!("{:#?}", DebugDiffWithAdapter { new, old, ctxt }); diff --git a/compiler/rustc_mir/src/lib.rs b/compiler/rustc_mir/src/lib.rs index 03e6f8414b9..a356ae1709d 100644 --- a/compiler/rustc_mir/src/lib.rs +++ b/compiler/rustc_mir/src/lib.rs @@ -6,6 +6,7 @@ Rust MIR: a lowered representation of Rust. #![feature(nll)] #![feature(in_band_lifetimes)] +#![feature(array_windows)] #![feature(bindings_after_at)] #![feature(bool_to_option)] #![feature(box_patterns)] @@ -14,20 +15,18 @@ Rust MIR: a lowered representation of Rust. #![feature(const_panic)] #![feature(crate_visibility_modifier)] #![feature(decl_macro)] -#![feature(drain_filter)] #![feature(exact_size_is_empty)] #![feature(exhaustive_patterns)] -#![feature(iter_order_by)] #![feature(never_type)] #![feature(min_specialization)] #![feature(trusted_len)] #![feature(try_blocks)] -#![feature(associated_type_bounds)] #![feature(associated_type_defaults)] #![feature(stmt_expr_attributes)] #![feature(trait_alias)] #![feature(option_expect_none)] #![feature(or_patterns)] +#![feature(once_cell)] #![recursion_limit = "256"] #[macro_use] diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs b/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs index b60beca6880..db6d3b2d912 100644 --- a/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs +++ b/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs @@ -277,14 +277,8 @@ where symbols.sort_by_key(|sym| sym.1); - for pair in symbols.windows(2) { - let sym1 = &pair[0].1; - let sym2 = &pair[1].1; - + for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() { if sym1 == sym2 { - let mono_item1 = pair[0].0; - let mono_item2 = pair[1].0; - let span1 = mono_item1.local_span(tcx); let span2 = mono_item2.local_span(tcx); diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs index e55180ff4be..714041ad4e8 100644 --- a/compiler/rustc_mir_build/src/lib.rs +++ b/compiler/rustc_mir_build/src/lib.rs @@ -1,7 +1,7 @@ //! Construction of MIR from HIR. //! //! This crate also contains the match exhaustiveness and usefulness checking. - +#![feature(array_windows)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(const_fn)] diff --git a/compiler/rustc_mir_build/src/thir/pattern/_match.rs b/compiler/rustc_mir_build/src/thir/pattern/_match.rs index ad94740c160..eddd2882406 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/_match.rs @@ -2299,19 +2299,19 @@ fn split_grouped_constructors<'p, 'tcx>( // interval into a constructor. split_ctors.extend( borders - .windows(2) - .filter_map(|window| match (window[0], window[1]) { - (Border::JustBefore(n), Border::JustBefore(m)) => { + .array_windows() + .filter_map(|&pair| match pair { + [Border::JustBefore(n), Border::JustBefore(m)] => { if n < m { Some(IntRange { range: n..=(m - 1), ty, span }) } else { None } } - (Border::JustBefore(n), Border::AfterMax) => { + [Border::JustBefore(n), Border::AfterMax] => { Some(IntRange { range: n..=u128::MAX, ty, span }) } - (Border::AfterMax, _) => None, + [Border::AfterMax, _] => None, }) .map(IntRange), ); diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index 0becdf24c53..72a34b86ae2 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -3,7 +3,6 @@ #![feature(bool_to_option)] #![feature(crate_visibility_modifier)] #![feature(bindings_after_at)] -#![feature(try_blocks)] #![feature(or_patterns)] use rustc_ast as ast; diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs index e07b8b86aef..dde162681b7 100644 --- a/compiler/rustc_parse_format/src/lib.rs +++ b/compiler/rustc_parse_format/src/lib.rs @@ -11,8 +11,6 @@ )] #![feature(nll)] #![feature(or_patterns)] -#![feature(rustc_private)] -#![feature(unicode_internals)] #![feature(bool_to_option)] pub use Alignment::*; diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index 1a95992ed83..c2de4cdbf0d 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -1,7 +1,6 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(in_band_lifetimes)] #![feature(nll)] -#![feature(or_patterns)] #![recursion_limit = "256"] use rustc_attr as attr; diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 2cc87dc6375..ced272e474d 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -1418,9 +1418,9 @@ impl<'tcx> LifetimeContext<'_, 'tcx> { if snippet.starts_with('&') && !snippet.starts_with("&'") { introduce_suggestion .push((param.span, format!("&{} {}", lt_name, &snippet[1..]))); - } else if snippet.starts_with("&'_ ") { + } else if let Some(stripped) = snippet.strip_prefix("&'_ ") { introduce_suggestion - .push((param.span, format!("&{} {}", lt_name, &snippet[4..]))); + .push((param.span, format!("&{} {}", lt_name, stripped))); } } } diff --git a/compiler/rustc_session/src/search_paths.rs b/compiler/rustc_session/src/search_paths.rs index e12364b7dac..83b737a73b1 100644 --- a/compiler/rustc_session/src/search_paths.rs +++ b/compiler/rustc_session/src/search_paths.rs @@ -56,16 +56,16 @@ impl PathKind { impl SearchPath { pub fn from_cli_opt(path: &str, output: config::ErrorOutputType) -> Self { - let (kind, path) = if path.starts_with("native=") { - (PathKind::Native, &path["native=".len()..]) - } else if path.starts_with("crate=") { - (PathKind::Crate, &path["crate=".len()..]) - } else if path.starts_with("dependency=") { - (PathKind::Dependency, &path["dependency=".len()..]) - } else if path.starts_with("framework=") { - (PathKind::Framework, &path["framework=".len()..]) - } else if path.starts_with("all=") { - (PathKind::All, &path["all=".len()..]) + let (kind, path) = if let Some(stripped) = path.strip_prefix("native=") { + (PathKind::Native, stripped) + } else if let Some(stripped) = path.strip_prefix("crate=") { + (PathKind::Crate, stripped) + } else if let Some(stripped) = path.strip_prefix("dependency=") { + (PathKind::Dependency, stripped) + } else if let Some(stripped) = path.strip_prefix("framework=") { + (PathKind::Framework, stripped) + } else if let Some(stripped) = path.strip_prefix("all=") { + (PathKind::All, stripped) } else { (PathKind::All, path) }; diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index e38cd516b91..a730c303788 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -5,15 +5,14 @@ //! This API is completely unstable and subject to change. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] +#![feature(array_windows)] #![feature(crate_visibility_modifier)] #![feature(const_fn)] #![feature(const_panic)] #![feature(negative_impls)] #![feature(nll)] -#![feature(optin_builtin_traits)] #![feature(min_specialization)] #![feature(option_expect_none)] -#![feature(refcell_take)] #[macro_use] extern crate rustc_macros; @@ -1158,7 +1157,12 @@ impl<S: Encoder> Encodable<S> for SourceFile { let max_line_length = if lines.len() == 1 { 0 } else { - lines.windows(2).map(|w| w[1] - w[0]).map(|bp| bp.to_usize()).max().unwrap() + lines + .array_windows() + .map(|&[fst, snd]| snd - fst) + .map(|bp| bp.to_usize()) + .max() + .unwrap() }; let bytes_per_diff: u8 = match max_line_length { @@ -1173,7 +1177,7 @@ impl<S: Encoder> Encodable<S> for SourceFile { // Encode the first element. lines[0].encode(s)?; - let diff_iter = (&lines[..]).windows(2).map(|w| (w[1] - w[0])); + let diff_iter = lines[..].array_windows().map(|&[fst, snd]| snd - fst); match bytes_per_diff { 1 => { diff --git a/compiler/rustc_traits/src/lib.rs b/compiler/rustc_traits/src/lib.rs index 6fea4732dda..d0b05beb4e6 100644 --- a/compiler/rustc_traits/src/lib.rs +++ b/compiler/rustc_traits/src/lib.rs @@ -4,7 +4,6 @@ #![feature(crate_visibility_modifier)] #![feature(in_band_lifetimes)] #![feature(nll)] -#![feature(or_patterns)] #![recursion_limit = "256"] #[macro_use] diff --git a/compiler/rustc_ty/src/lib.rs b/compiler/rustc_ty/src/lib.rs index 6e9042d1ba7..8dd6aa3c7fc 100644 --- a/compiler/rustc_ty/src/lib.rs +++ b/compiler/rustc_ty/src/lib.rs @@ -5,7 +5,6 @@ //! This API is completely unstable and subject to change. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] -#![feature(bool_to_option)] #![feature(nll)] #![recursion_limit = "256"] diff --git a/compiler/rustc_typeck/src/check/demand.rs b/compiler/rustc_typeck/src/check/demand.rs index f6b768bb122..c7ce5008c33 100644 --- a/compiler/rustc_typeck/src/check/demand.rs +++ b/compiler/rustc_typeck/src/check/demand.rs @@ -370,7 +370,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { { let s = s.as_ref(); let old = old.as_ref(); - if s.starts_with(old) { Some(new.as_ref().to_owned() + &s[old.len()..]) } else { None } + if let Some(stripped) = s.strip_prefix(old) { + Some(new.as_ref().to_owned() + stripped) + } else { + None + } } /// This function is used to determine potential "simple" improvements or users' errors and diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs index 529b8525a4a..66975f32a1f 100644 --- a/compiler/rustc_typeck/src/check/op.rs +++ b/compiler/rustc_typeck/src/check/op.rs @@ -589,10 +589,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } else { msg }, - if lstring.starts_with('&') { + if let Some(stripped) = lstring.strip_prefix('&') { // let a = String::new(); // let _ = &a + "bar"; - lstring[1..].to_string() + stripped.to_string() } else { format!("{}.to_owned()", lstring) }, @@ -617,10 +617,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { is_assign, ) { (Ok(l), Ok(r), IsAssign::No) => { - let to_string = if l.starts_with('&') { + let to_string = if let Some(stripped) = l.strip_prefix('&') { // let a = String::new(); let b = String::new(); // let _ = &a + b; - l[1..].to_string() + stripped.to_string() } else { format!("{}.to_owned()", l) }; diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs index 731ccfad2b4..f5418c9e01e 100644 --- a/compiler/rustc_typeck/src/collect.rs +++ b/compiler/rustc_typeck/src/collect.rs @@ -2343,8 +2343,8 @@ fn from_target_feature( item.span(), format!("`{}` is not valid for this target", feature), ); - if feature.starts_with('+') { - let valid = supported_target_features.contains_key(&feature[1..]); + if let Some(stripped) = feature.strip_prefix('+') { + let valid = supported_target_features.contains_key(stripped); if valid { err.help("consider removing the leading `+` in the feature name"); } |
