diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/write.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/common.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 16 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 2 |
7 files changed, 19 insertions, 17 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index a6a3f0f9646..d034f9b5256 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -226,7 +226,8 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // when passed by value, making it smaller. // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes // when passed by value, making it larger. - let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes()); + let copy_bytes = + cmp::min(cast.unaligned_size(bx).bytes(), self.layout.size.bytes()); // Allocate some scratch space... let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index 28a88dd2efe..b72636a6224 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -32,7 +32,7 @@ impl OwnedTargetMachine { unique_section_names: bool, trap_unreachable: bool, singletree: bool, - asm_comments: bool, + verbose_asm: bool, emit_stack_size_section: bool, relax_elf_relocations: bool, use_init_array: bool, @@ -64,7 +64,7 @@ impl OwnedTargetMachine { unique_section_names, trap_unreachable, singletree, - asm_comments, + verbose_asm, emit_stack_size_section, relax_elf_relocations, use_init_array, diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 5e481eb98f5..2fda19bf0c9 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -214,7 +214,7 @@ pub fn target_machine_factory( sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable); let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes; - let asm_comments = sess.opts.unstable_opts.asm_comments; + let verbose_asm = sess.opts.unstable_opts.verbose_asm; let relax_elf_relocations = sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations); @@ -289,7 +289,7 @@ pub fn target_machine_factory( funique_section_names, trap_unreachable, singlethread, - asm_comments, + verbose_asm, emit_stack_size_section, relax_elf_relocations, use_init_array, diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index d42c6ed827a..fe64649cf70 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -289,8 +289,8 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { (value, AddressSpace::DATA) } } - GlobalAlloc::Function(fn_instance) => ( - self.get_fn_addr(fn_instance.polymorphize(self.tcx)), + GlobalAlloc::Function { instance, .. } => ( + self.get_fn_addr(instance.polymorphize(self.tcx)), self.data_layout().instruction_address_space, ), GlobalAlloc::VTable(ty, trait_ref) => { diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 1a8e8efdae5..77beb9a6bb3 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -28,7 +28,7 @@ use rustc_session::config::{BranchProtection, CFGuard, CFProtection}; use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet}; use rustc_session::Session; use rustc_span::source_map::Spanned; -use rustc_span::Span; +use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::{call::FnAbi, HasDataLayout, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel}; use smallvec::SmallVec; @@ -580,6 +580,7 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { ty::ParamEnv::reveal_all(), def_id, ty::List::empty(), + DUMMY_SP, )), _ => { let name = name.unwrap_or("rust_eh_personality"); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index b5b0086f740..e02c61cd296 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1121,8 +1121,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_select_bitmask { let (len, _) = require_simd!(arg_tys[1], SimdArgument); - let expected_int_bits = (len.max(8) - 1).next_power_of_two(); - let expected_bytes = len / 8 + ((len % 8 > 0) as u64); + let expected_int_bits = len.max(8).next_power_of_two(); + let expected_bytes = len.div_ceil(8); let mask_ty = arg_tys[0]; let mask = match mask_ty.kind() { @@ -1379,17 +1379,16 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_bitmask { - // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a - // vector mask and returns the most significant bit (MSB) of each lane in the form - // of either: + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and + // returns one bit for each lane (which must all be `0` or `!0`) in the form of either: // * an unsigned integer // * an array of `u8` // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits. // // The bit order of the result depends on the byte endianness, LSB-first for little // endian and MSB-first for big endian. - let expected_int_bits = in_len.max(8); - let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64); + let expected_int_bits = in_len.max(8).next_power_of_two(); + let expected_bytes = in_len.div_ceil(8); // Integer vector <i{in_bitwidth} x in_len>: let (i_xn, in_elem_bitwidth) = match in_elem.kind() { @@ -1409,7 +1408,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }), }; - // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position. + // LLVM doesn't always know the inputs are `0` or `!0`, so we shift here so it optimizes to + // `pmovmskb` and similar on x86. let shift_indices = vec