diff options
| author | Mark Rousskov <mark.simulacrum@gmail.com> | 2018-07-24 16:43:44 -0600 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2018-07-24 16:43:44 -0600 |
| commit | 06ba69d043efd5612aad2aa4903125d34eba17d4 (patch) | |
| tree | 9ffce52d8b60a116b13a0b0c48202028f65629e5 | |
| parent | 487e961c6ae072d969e148def0b0856c2367ae00 (diff) | |
| parent | 303306cf5ede678719ec1324bb02d3d02c014183 (diff) | |
| download | rust-06ba69d043efd5612aad2aa4903125d34eba17d4.tar.gz rust-06ba69d043efd5612aad2aa4903125d34eba17d4.zip | |
Rollup merge of #52391 - Amanieu:volatile_unaligned, r=alexcrichton
Add unaligned volatile intrinsics Surprisingly enough, it turns out that unaligned volatile loads are actually useful for certain (very niche) types of lock-free code. I included unaligned volatile stores for completeness, but I currently do not know of any use cases for them. These are only exposed as intrinsics for now. If they turn out to be useful in practice, we can work towards stabilizing them. r? @alexcrichton
| -rw-r--r-- | src/libcore/intrinsics.rs | 9 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/builder.rs | 8 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/intrinsic.rs | 14 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/mir/operand.rs | 4 | ||||
| -rw-r--r-- | src/librustc_typeck/check/intrinsic.rs | 4 | ||||
| -rw-r--r-- | src/test/run-make-fulldeps/volatile-intrinsics/main.rs | 13 |
6 files changed, 44 insertions, 8 deletions
diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 89fe2d941a3..854cb5f4e3b 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -1085,6 +1085,15 @@ extern "rust-intrinsic" { /// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html). pub fn volatile_store<T>(dst: *mut T, val: T); + /// Perform a volatile load from the `src` pointer + /// The pointer is not required to be aligned. + #[cfg(not(stage0))] + pub fn unaligned_volatile_load<T>(src: *const T) -> T; + /// Perform a volatile store to the `dst` pointer. + /// The pointer is not required to be aligned. + #[cfg(not(stage0))] + pub fn unaligned_volatile_store<T>(dst: *mut T, val: T); + /// Returns the square root of an `f32` pub fn sqrtf32(x: f32) -> f32; /// Returns the square root of an `f64` diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index e4acb2ad4b1..b34d0f1cd90 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -54,6 +54,7 @@ bitflags! { pub struct MemFlags: u8 { const VOLATILE = 1 << 0; const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; } } @@ -602,7 +603,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let ptr = self.check_store(val, ptr); unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); - llvm::LLVMSetAlignment(store, align.abi() as c_uint); + let align = if flags.contains(MemFlags::UNALIGNED) { + 1 + } else { + align.abi() as c_uint + }; + llvm::LLVMSetAlignment(store, align); if flags.contains(MemFlags::VOLATILE) { llvm::LLVMSetVolatile(store, llvm::True); } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 58a32ad9774..9c5c0f730c1 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -234,15 +234,20 @@ pub fn codegen_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>, memset_intrinsic(bx, true, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate()) } - "volatile_load" => { + "volatile_load" | "unaligned_volatile_load" => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty) = fn_ty.ret.mode { ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); } let load = bx.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + cx.align_of(tp_ty).abi() as u32 + }; unsafe { - llvm::LLVMSetAlignment(load, cx.align_of(tp_ty).abi() as u32); + llvm::LLVMSetAlignment(load, align); } to_immediate(bx, load, cx.layout_of(tp_ty)) }, @@ -251,6 +256,11 @@ pub fn codegen_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>, args[1].val.volatile_store(bx, dst); return; }, + "unaligned_volatile_store" => { + let dst = args[0].deref(bx.cx); + args[1].val.unaligned_volatile_store(bx, dst); + return; + }, "prefetch_read_data" | "prefetch_write_data" | "prefetch_read_instruction" | "prefetch_write_instruction" => { let expect = cx.get_intrinsic(&("llvm.prefetch")); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 777054014dc..c433df51110 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -276,6 +276,10 @@ impl<'a, 'tcx> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } + pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) { + self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); + } + pub fn nontemporal_store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index e26bf1b4f77..af4356dc8de 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -270,9 +270,9 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "roundf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), "roundf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), - "volatile_load" => + "volatile_load" | "unaligned_volatile_load" => (1, vec![ tcx.mk_imm_ptr(param(0)) ], param(0)), - "volatile_store" => + "volatile_store" | "unaligned_volatile_store" => (1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_nil()), "ctpop" | "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | diff --git a/src/test/run-make-fulldeps/volatile-intrinsics/main.rs b/src/test/run-make-fulldeps/volatile-intrinsics/main.rs index 4d0d7672101..d214a20139c 100644 --- a/src/test/run-make-fulldeps/volatile-intrinsics/main.rs +++ b/src/test/run-make-fulldeps/volatile-intrinsics/main.rs @@ -10,17 +10,24 @@ #![feature(core_intrinsics, volatile)] -use std::intrinsics::{volatile_load, volatile_store}; +use std::intrinsics::{ + unaligned_volatile_load, unaligned_volatile_store, volatile_load, volatile_store, +}; use std::ptr::{read_volatile, write_volatile}; pub fn main() { unsafe { - let mut i : isize = 1; + let mut i: isize = 1; volatile_store(&mut i, 2); assert_eq!(volatile_load(&i), 2); } unsafe { - let mut i : isize = 1; + let mut i: isize = 1; + unaligned_volatile_store(&mut i, 2); + assert_eq!(unaligned_volatile_load(&i), 2); + } + unsafe { + let mut i: isize = 1; write_volatile(&mut i, 2); assert_eq!(read_volatile(&i), 2); } |
