about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2014-04-23 04:36:32 -0700
committerbors <bors@rust-lang.org>2014-04-23 04:36:32 -0700
commite049a7003b686002d5c091ec0465d07e5c5ff7a6 (patch)
treeff99ce2927b9ebf1ca13d25c2fabd08dc9f1f99d /src/libstd
parent1ce0b98c7bad8c01e008ccbc790607f4bb26ec89 (diff)
parentb2724727d52904e104ccb3ce14d9dc60ffee1dec (diff)
downloadrust-e049a7003b686002d5c091ec0465d07e5c5ff7a6.tar.gz
rust-e049a7003b686002d5c091ec0465d07e5c5ff7a6.zip
auto merge of #13693 : thestinger/rust/mem, r=alexcrichton
This exposes volatile versions of the memset/memmove/memcpy intrinsics.

The volatile parameter must be constant, so this can't simply be a
parameter to our intrinsics.
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/intrinsics.rs31
1 files changed, 27 insertions, 4 deletions
diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs
index 43b5f42163e..09cdad94305 100644
--- a/src/libstd/intrinsics.rs
+++ b/src/libstd/intrinsics.rs
@@ -263,10 +263,6 @@ extern "rust-intrinsic" {
     /// Execute a breakpoint trap, for inspection by a debugger.
     pub fn breakpoint();
 
-    pub fn volatile_load<T>(src: *T) -> T;
-    pub fn volatile_store<T>(dst: *mut T, val: T);
-
-
     /// The size of a type in bytes.
     ///
     /// This is the exact number of bytes in memory taken up by a
@@ -340,6 +336,33 @@ extern "rust-intrinsic" {
     /// `min_align_of::<T>()`
     pub fn set_memory<T>(dst: *mut T, val: u8, count: uint);
 
+    /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
+    /// a size of `count` * `size_of::<T>()` and an alignment of
+    /// `min_align_of::<T>()`
+    ///
+    /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
+    #[cfg(not(stage0))]
+    pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *T, count: uint);
+    /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
+    /// a size of `count` * `size_of::<T>()` and an alignment of
+    /// `min_align_of::<T>()`
+    ///
+    /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
+    #[cfg(not(stage0))]
+    pub fn volatile_copy_memory<T>(dst: *mut T, src: *T, count: uint);
+    /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
+    /// size of `count` * `size_of::<T>()` and an alignment of
+    /// `min_align_of::<T>()`.
+    ///
+    /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
+    #[cfg(not(stage0))]
+    pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: uint);
+
+    /// Perform a volatile load from the `src` pointer.
+    pub fn volatile_load<T>(src: *T) -> T;
+    /// Perform a volatile store to the `dst` pointer.
+    pub fn volatile_store<T>(dst: *mut T, val: T);
+
     pub fn sqrtf32(x: f32) -> f32;
     pub fn sqrtf64(x: f64) -> f64;