about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--src/libcore/at_vec.rs10
-rw-r--r--src/libcore/num/f32.rs6
-rw-r--r--src/libcore/num/f64.rs6
-rw-r--r--src/libcore/pipes.rs24
-rw-r--r--src/libcore/private.rs17
-rw-r--r--src/libcore/private/at_exit.rs6
-rw-r--r--src/libcore/private/exchange_alloc.rs10
-rw-r--r--src/libcore/private/global.rs6
-rw-r--r--src/libcore/private/intrinsics.rs131
-rw-r--r--src/libcore/ptr.rs7
-rw-r--r--src/libcore/vec.rs34
11 files changed, 168 insertions, 89 deletions
diff --git a/src/libcore/at_vec.rs b/src/libcore/at_vec.rs
index 565fe11d3f4..beda48d2dfb 100644
--- a/src/libcore/at_vec.rs
+++ b/src/libcore/at_vec.rs
@@ -30,11 +30,6 @@ pub extern mod rustrt {
                                             ++n: libc::size_t);
 }
 
-#[abi = "rust-intrinsic"]
-pub extern mod rusti {
-    pub fn move_val_init<T>(dst: &mut T, -src: T);
-}
-
 /// Returns the number of elements the vector can hold without reallocating
 #[inline(always)]
 pub pure fn capacity<T>(v: @[const T]) -> uint {
@@ -185,9 +180,10 @@ pub mod traits {
 pub mod traits {}
 
 pub mod raw {
-    use at_vec::{capacity, rusti, rustrt};
+    use at_vec::{capacity, rustrt};
     use cast::transmute;
     use libc;
+    use private::intrinsics::{move_val_init};
     use ptr::addr_of;
     use ptr;
     use sys;
@@ -229,7 +225,7 @@ pub mod raw {
         (**repr).unboxed.fill += sys::size_of::<T>();
         let p = addr_of(&((**repr).unboxed.data));
         let p = ptr::offset(p, fill) as *mut T;
-        rusti::move_val_init(&mut(*p), initval);
+        move_val_init(&mut(*p), initval);
     }
 
     pub unsafe fn push_slow<T>(v: &mut @[const T], initval: T) {
diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs
index 2189f109eff..d5407daca80 100644
--- a/src/libcore/num/f32.rs
+++ b/src/libcore/num/f32.rs
@@ -18,6 +18,7 @@ use num::strconv;
 use num;
 use ops;
 use option::Option;
+use private::intrinsics::floorf32;
 use from_str;
 use to_str;
 
@@ -332,11 +333,6 @@ impl ops::Neg<f32> for f32 {
     pure fn neg(&self) -> f32 { -*self }
 }
 
-#[abi="rust-intrinsic"]
-pub extern {
-    fn floorf32(val: f32) -> f32;
-}
-
 impl num::Round for f32 {
     #[inline(always)]
     pure fn round(&self, mode: num::RoundMode) -> f32 {
diff --git a/src/libcore/num/f64.rs b/src/libcore/num/f64.rs
index df68b6153cb..4e4e7c68646 100644
--- a/src/libcore/num/f64.rs
+++ b/src/libcore/num/f64.rs
@@ -19,6 +19,7 @@ use num::strconv;
 use num;
 use ops;
 use option::Option;
+use private::intrinsics::floorf64;
 use to_str;
 use from_str;
 
@@ -357,11 +358,6 @@ impl ops::Neg<f64> for f64 {
     pure fn neg(&self) -> f64 { -*self }
 }
 
-#[abi="rust-intrinsic"]
-pub extern {
-    fn floorf64(val: f64) -> f64;
-}
-
 impl num::Round for f64 {
     #[inline(always)]
     pure fn round(&self, mode: num::RoundMode) -> f64 {
diff --git a/src/libcore/pipes.rs b/src/libcore/pipes.rs
index f0108fe85b7..f1f4319bfce 100644
--- a/src/libcore/pipes.rs
+++ b/src/libcore/pipes.rs
@@ -92,6 +92,7 @@ use libc;
 use option;
 use option::{None, Option, Some, unwrap};
 use pipes;
+use private::intrinsics;
 use ptr;
 use private;
 use task;
@@ -256,37 +257,26 @@ pub fn entangle_buffer<T: Owned, Tstart: Owned>(
     (SendPacketBuffered(p), RecvPacketBuffered(p))
 }
 
-#[abi = "rust-intrinsic"]
-#[doc(hidden)]
-extern mod rusti {
-    fn atomic_xchg(dst: &mut int, src: int) -> int;
-    fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
-    fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
-
-    fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
-    fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
-}
-
 // If I call the rusti versions directly from a polymorphic function,
 // I get link errors. This is a bug that needs investigated more.
 #[doc(hidden)]
 pub fn atomic_xchng_rel(dst: &mut int, src: int) -> int {
     unsafe {
-        rusti::atomic_xchg_rel(dst, src)
+        intrinsics::atomic_xchg_rel(dst, src)
     }
 }
 
 #[doc(hidden)]
 pub fn atomic_add_acq(dst: &mut int, src: int) -> int {
     unsafe {
-        rusti::atomic_xadd_acq(dst, src)
+        intrinsics::atomic_xadd_acq(dst, src)
     }
 }
 
 #[doc(hidden)]
 pub fn atomic_sub_rel(dst: &mut int, src: int) -> int {
     unsafe {
-        rusti::atomic_xsub_rel(dst, src)
+        intrinsics::atomic_xsub_rel(dst, src)
     }
 }
 
@@ -295,7 +285,7 @@ pub fn swap_task(dst: &mut *rust_task, src: *rust_task) -> *rust_task {
     // It might be worth making both acquire and release versions of
     // this.
     unsafe {
-        transmute(rusti::atomic_xchg(transmute(dst), src as int))
+        transmute(intrinsics::atomic_xchg(transmute(dst), src as int))
     }
 }
 
@@ -335,14 +325,14 @@ fn wait_event(this: *rust_task) -> *libc::c_void {
 #[doc(hidden)]
 fn swap_state_acq(dst: &mut State, src: State) -> State {
     unsafe {
-        transmute(rusti::atomic_xchg_acq(transmute(dst), src as int))
+        transmute(intrinsics::atomic_xchg_acq(transmute(dst), src as int))
     }
 }
 
 #[doc(hidden)]
 fn swap_state_rel(dst: &mut State, src: State) -> State {
     unsafe {
-        transmute(rusti::atomic_xchg_rel(transmute(dst), src as int))
+        transmute(intrinsics::atomic_xchg_rel(transmute(dst), src as int))
     }
 }
 
diff --git a/src/libcore/private.rs b/src/libcore/private.rs
index 5b69c348c15..935a04731d5 100644
--- a/src/libcore/private.rs
+++ b/src/libcore/private.rs
@@ -32,6 +32,8 @@ pub mod finally;
 pub mod weak_task;
 #[path = "private/exchange_alloc.rs"]
 pub mod exchange_alloc;
+#[path = "private/intrinsics.rs"]
+pub mod intrinsics;
 
 extern mod rustrt {
     pub unsafe fn rust_create_little_lock() -> rust_little_lock;
@@ -43,13 +45,6 @@ extern mod rustrt {
     pub unsafe fn rust_raw_thread_join_delete(thread: *raw_thread);
 }
 
-#[abi = "rust-intrinsic"]
-extern mod rusti {
-    fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
-    fn atomic_xadd(dst: &mut int, src: int) -> int;
-    fn atomic_xsub(dst: &mut int, src: int) -> int;
-}
-
 #[allow(non_camel_case_types)] // runtime type
 type raw_thread = libc::c_void;
 
@@ -101,7 +96,7 @@ fn test_run_in_bare_thread_exchange() {
 
 fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool {
     unsafe {
-        let old = rusti::atomic_cxchg(address, oldval, newval);
+        let old = intrinsics::atomic_cxchg(address, oldval, newval);
         old == oldval
     }
 }
@@ -132,7 +127,7 @@ struct ArcDestruct<T> {
             }
             do task::unkillable {
                 let data: ~ArcData<T> = cast::reinterpret_cast(&self.data);
-                let new_count = rusti::atomic_xsub(&mut data.count, 1) - 1;
+                let new_count = intrinsics::atomic_xsub(&mut data.count, 1) - 1;
                 assert new_count >= 0;
                 if new_count == 0 {
                     // Were we really last, or should we hand off to an
@@ -205,7 +200,7 @@ pub unsafe fn unwrap_shared_mutable_state<T: Owned>(rc: SharedMutableState<T>)
             // Got in. Step 0: Tell destructor not to run. We are now it.
             rc.data = ptr::null();
             // Step 1 - drop our own reference.
-            let new_count = rusti::atomic_xsub(&mut ptr.count, 1) - 1;
+            let new_count = intrinsics::atomic_xsub(&mut ptr.count, 1) - 1;
             //assert new_count >= 0;
             if new_count == 0 {
                 // We were the last owner. Can unwrap immediately.
@@ -284,7 +279,7 @@ pub unsafe fn clone_shared_mutable_state<T: Owned>(rc: &SharedMutableState<T>)
         -> SharedMutableState<T> {
     unsafe {
         let ptr: ~ArcData<T> = cast::reinterpret_cast(&(*rc).data);
-        let new_count = rusti::atomic_xadd(&mut ptr.count, 1) + 1;
+        let new_count = intrinsics::atomic_xadd(&mut ptr.count, 1) + 1;
         assert new_count >= 2;
         cast::forget(ptr);
     }
diff --git a/src/libcore/private/at_exit.rs b/src/libcore/private/at_exit.rs
index d80631a29ee..4785cb622cb 100644
--- a/src/libcore/private/at_exit.rs
+++ b/src/libcore/private/at_exit.rs
@@ -70,12 +70,6 @@ fn exit_runner(exit_fns: *ExitFunctions) {
     }
 }
 
-#[abi = "rust-intrinsic"]
-pub extern mod rusti {
-    fn move_val_init<T>(dst: &mut T, -src: T);
-    fn init<T>() -> T;
-}
-
 #[test]
 fn test_at_exit() {
     let i = 10;
diff --git a/src/libcore/private/exchange_alloc.rs b/src/libcore/private/exchange_alloc.rs
index f7fcf3bdd5e..b6af9891e11 100644
--- a/src/libcore/private/exchange_alloc.rs
+++ b/src/libcore/private/exchange_alloc.rs
@@ -14,6 +14,7 @@ use c_malloc = libc::malloc;
 use c_free = libc::free;
 use managed::raw::{BoxHeaderRepr, BoxRepr};
 use cast::transmute;
+use private::intrinsics::{atomic_xadd,atomic_xsub};
 use ptr::null;
 use intrinsic::TyDesc;
 
@@ -35,7 +36,7 @@ pub unsafe fn malloc(td: *TypeDesc, size: uint) -> *c_void {
         box.header.next = null();
 
         let exchange_count = &mut *rust_get_exchange_count_ptr();
-        rusti::atomic_xadd(exchange_count, 1);
+        atomic_xadd(exchange_count, 1);
 
         return transmute(box);
     }
@@ -43,7 +44,7 @@ pub unsafe fn malloc(td: *TypeDesc, size: uint) -> *c_void {
 
 pub unsafe fn free(ptr: *c_void) {
     let exchange_count = &mut *rust_get_exchange_count_ptr();
-    rusti::atomic_xsub(exchange_count, 1);
+    atomic_xsub(exchange_count, 1);
 
     assert ptr.is_not_null();
     c_free(ptr);
@@ -68,8 +69,3 @@ extern {
     fn rust_get_exchange_count_ptr() -> *mut int;
 }
 
-#[abi = "rust-intrinsic"]
-extern mod rusti {
-    fn atomic_xadd(dst: &mut int, src: int) -> int;
-    fn atomic_xsub(dst: &mut int, src: int) -> int;
-}
diff --git a/src/libcore/private/global.rs b/src/libcore/private/global.rs
index 621ead48abc..086fa49c7b5 100644
--- a/src/libcore/private/global.rs
+++ b/src/libcore/private/global.rs
@@ -36,6 +36,7 @@ use private::{Exclusive, exclusive};
 use private::{SharedMutableState, shared_mutable_state};
 use private::{get_shared_immutable_state};
 use private::at_exit::at_exit;
+use private::intrinsics::atomic_cxchg;
 use hashmap::linear::LinearMap;
 use sys::Closure;
 use task::spawn;
@@ -231,11 +232,6 @@ extern {
     fn rust_get_global_data_ptr() -> *mut int;
 }
 
-#[abi = "rust-intrinsic"]
-extern {
-    fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
-}
-
 #[test]
 fn test_clone_rc() {
     type MyType = SharedMutableState<int>;
diff --git a/src/libcore/private/intrinsics.rs b/src/libcore/private/intrinsics.rs
new file mode 100644
index 00000000000..952ba742aa3
--- /dev/null
+++ b/src/libcore/private/intrinsics.rs
@@ -0,0 +1,131 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+An attempt to move all intrinsic declarations to a single place, as mentioned in #3369
+
+The intrinsics are defined in librustc/middle/trans/foreign.rs.
+*/
+
+#[abi = "rust-intrinsic"]
+pub extern {
+    pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
+    pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
+    pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
+
+    pub fn atomic_xchg(dst: &mut int, src: int) -> int;
+    pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
+    pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
+    
+    pub fn atomic_xadd(dst: &mut int, src: int) -> int;
+    pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
+    pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
+    
+    pub fn atomic_xsub(dst: &mut int, src: int) -> int;
+    pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
+    pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
+    
+    pub fn size_of<T>() -> uint;
+    
+    pub fn move_val<T>(dst: &mut T, -src: T);
+    pub fn move_val_init<T>(dst: &mut T, -src: T);
+    
+    pub fn min_align_of<T>() -> uint;
+    pub fn pref_align_of<T>() -> uint;
+    
+    pub fn get_tydesc<T>() -> *();
+    
+    pub fn init<T>() -> T;
+    
+    pub fn forget<T>(_: T) -> ();
+
+    // FIXME: intrinsic uses legacy modes
+    fn reinterpret_cast<T,U>(&&src: T) -> U;
+    // FIXME: intrinsic uses legacy modes
+    fn addr_of<T>(&&scr: T) -> *T;
+    
+    pub fn needs_drop<T>() -> bool;
+    
+    // FIXME: intrinsic uses legacy modes and has reference to TyDesc
+    // and TyVisitor which are in librustc
+    //fn visit_tydesc(++td: *TyDesc, &&tv: TyVisitor) -> ();
+    // FIXME: intrinsic uses legacy modes
+    //fn frame_address(f: &once fn(*u8));
+    
+    pub fn morestack_addr() -> *();
+    
+    pub fn memmove32(dst: *mut u8, src: *u8, size: u32);
+    pub fn memmove64(dst: *mut u8, src: *u8, size: u64);
+    
+    pub fn sqrtf32(x: f32) -> f32;
+    pub fn sqrtf64(x: f64) -> f64;
+    
+    pub fn powif32(a: f32, x: i32) -> f32;
+    pub fn powif64(a: f64, x: i32) -> f64;
+    
+    pub fn sinf32(x: f32) -> f32;
+    pub fn sinf64(x: f64) -> f64;
+    
+    pub fn cosf32(x: f32) -> f32;
+    pub fn cosf64(x: f64) -> f64;
+    
+    pub fn powf32(a: f32, x: f32) -> f32;
+    pub fn powf64(a: f64, x: f64) -> f64;
+    
+    pub fn expf32(x: f32) -> f32;
+    pub fn expf64(x: f64) -> f64;
+    
+    pub fn exp2f32(x: f32) -> f32;
+    pub fn exp2f64(x: f64) -> f64;
+    
+    pub fn logf32(x: f32) -> f32;
+    pub fn logf64(x: f64) -> f64;
+    
+    pub fn log10f32(x: f32) -> f32;
+    pub fn log10f64(x: f64) -> f64;
+    
+    pub fn log2f32(x: f32) -> f32;
+    pub fn log2f64(x: f64) -> f64;
+
+    pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
+    pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
+    
+    pub fn fabsf32(x: f32) -> f32;
+    pub fn fabsf64(x: f64) -> f64;
+    
+    pub fn floorf32(x: f32) -> f32;
+    pub fn floorf64(x: f64) -> f64;
+
+    pub fn ceilf32(x: f32) -> f32;
+    pub fn ceilf64(x: f64) -> f64;
+    
+    pub fn truncf32(x: f32) -> f32;
+    pub fn truncf64(x: f64) -> f64;
+    
+    pub fn ctpop8(x: i8) -> i8;
+    pub fn ctpop16(x: i16) -> i16;
+    pub fn ctpop32(x: i32) -> i32;
+    pub fn ctpop64(x: i64) -> i64;
+    
+    pub fn ctlz8(x: i8) -> i8;
+    pub fn ctlz16(x: i16) -> i16;
+    pub fn ctlz32(x: i32) -> i32;
+    pub fn ctlz64(x: i64) -> i64;
+
+    pub fn cttz8(x: i8) -> i8;
+    pub fn cttz16(x: i16) -> i16;
+    pub fn cttz32(x: i32) -> i32;
+    pub fn cttz64(x: i64) -> i64;
+    
+    pub fn bswap16(x: i16) -> i16;
+    pub fn bswap32(x: i32) -> i32;
+    pub fn bswap64(x: i64) -> i64;
+}
+
diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs
index c6617bdd516..2266c2511f8 100644
--- a/src/libcore/ptr.rs
+++ b/src/libcore/ptr.rs
@@ -14,6 +14,7 @@ use cast;
 use cmp::{Eq, Ord};
 use libc;
 use libc::{c_void, size_t};
+use private::intrinsics::{memmove32,memmove64};
 use ptr;
 use str;
 use sys;
@@ -179,12 +180,6 @@ pub trait Ptr<T> {
     pure fn offset(count: uint) -> Self;
 }
 
-#[abi="rust-intrinsic"]
-pub extern {
-    fn memmove32(dst: *mut u8, src: *u8, size: u32);
-    fn memmove64(dst: *mut u8, src: *u8, size: u64);
-}
-
 /// Extension methods for immutable pointers
 impl<T> Ptr<T> for *T {
     /// Returns true if the pointer is equal to the null pointer.
diff --git a/src/libcore/vec.rs b/src/libcore/vec.rs
index b55a3bee8b0..bbffd43d01e 100644
--- a/src/libcore/vec.rs
+++ b/src/libcore/vec.rs
@@ -22,6 +22,7 @@ use kinds::Copy;
 use libc;
 use libc::size_t;
 use option::{None, Option, Some};
+use private::intrinsics;
 use ptr;
 use ptr::addr_of;
 use sys;
@@ -35,13 +36,6 @@ pub extern mod rustrt {
                                  ++n: libc::size_t);
 }
 
-#[abi = "rust-intrinsic"]
-pub extern mod rusti {
-    fn move_val_init<T>(dst: &mut T, -src: T);
-    fn init<T>() -> T;
-}
-
-
 /// Returns true if a vector contains no elements
 pub pure fn is_empty<T>(v: &[const T]) -> bool {
     as_const_buf(v, |_p, len| len == 0u)
@@ -120,7 +114,7 @@ pub pure fn from_fn<T>(n_elts: uint, op: iter::InitOp<T>) -> ~[T] {
         do as_mut_buf(v) |p, _len| {
             let mut i: uint = 0u;
             while i < n_elts {
-                rusti::move_val_init(&mut(*ptr::mut_offset(p, i)), op(i));
+                intrinsics::move_val_init(&mut(*ptr::mut_offset(p, i)), op(i));
                 i += 1u;
             }
         }
@@ -538,7 +532,7 @@ pub fn consume<T>(mut v: ~[T], f: fn(uint, v: T)) {
                 // holes we create in the vector. That ensures that, if the
                 // iterator fails then we won't try to clean up the consumed
                 // elements during unwinding
-                let mut x = rusti::init();
+                let mut x = intrinsics::init();
                 let p = ptr::mut_offset(p, i);
                 x <-> *p;
                 f(i, x);
@@ -557,8 +551,8 @@ pub fn pop<T>(v: &mut ~[T]) -> T {
     }
     let valptr = ptr::to_mut_unsafe_ptr(&mut v[ln - 1u]);
     unsafe {
-        // FIXME #4204: Should be rusti::uninit() - we don't need this zeroed
-        let mut val = rusti::init();
+        // FIXME #4204: Should be intrinsics::uninit() - we don't need this zeroed
+        let mut val = intrinsics::init();
         val <-> *valptr;
         raw::set_len(v, ln - 1u);
         val
@@ -605,7 +599,7 @@ unsafe fn push_fast<T>(v: &mut ~[T], initval: T) {
     (**repr).unboxed.fill += sys::nonzero_size_of::<T>();
     let p = addr_of(&((**repr).unboxed.data));
     let p = ptr::offset(p, fill) as *mut T;
-    rusti::move_val_init(&mut(*p), initval);
+    intrinsics::move_val_init(&mut(*p), initval);
 }
 
 #[inline(never)]
@@ -632,8 +626,8 @@ pub fn push_all_move<T>(v: &mut ~[T], mut rhs: ~[T]) {
     unsafe {
         do as_mut_buf(rhs) |p, len| {
             for uint::range(0, len) |i| {
-                // FIXME #4204 Should be rusti::uninit() - don't need to zero
-                let mut x = rusti::init();
+                // FIXME #4204 Should be intrinsics::uninit() - don't need to zero
+                let mut x = intrinsics::init();
                 x <-> *ptr::mut_offset(p, i);
                 push(&mut *v, x);
             }
@@ -649,8 +643,8 @@ pub fn truncate<T>(v: &mut ~[T], newlen: uint) {
         unsafe {
             // This loop is optimized out for non-drop types.
             for uint::range(newlen, oldlen) |i| {
-                // FIXME #4204 Should be rusti::uninit() - don't need to zero
-                let mut dropped = rusti::init();
+                // FIXME #4204 Should be intrinsics::uninit() - don't need to zero
+                let mut dropped = intrinsics::init();
                 dropped <-> *ptr::mut_offset(p, i);
             }
         }
@@ -675,9 +669,9 @@ pub fn dedup<T: Eq>(v: &mut ~[T]) {
                 // last_written < next_to_read < ln
                 if *ptr::mut_offset(p, next_to_read) ==
                     *ptr::mut_offset(p, last_written) {
-                    // FIXME #4204 Should be rusti::uninit() - don't need to
+                    // FIXME #4204 Should be intrinsics::uninit() - don't need to
                     // zero
-                    let mut dropped = rusti::init();
+                    let mut dropped = intrinsics::init();
                     dropped <-> *ptr::mut_offset(p, next_to_read);
                 } else {
                     last_written += 1;
@@ -2009,11 +2003,11 @@ pub mod raw {
     use managed;
     use option::{None, Some};
     use option;
+    use private::intrinsics;
     use ptr::addr_of;
     use ptr;
     use sys;
     use vec::{UnboxedVecRepr, as_const_buf, as_mut_buf, len, with_capacity};
-    use vec::rusti;
 
     /// The internal representation of a (boxed) vector
     pub struct VecRepr {
@@ -2101,7 +2095,7 @@ pub mod raw {
         do as_mut_buf(v) |p, _len| {
             let mut box2 = None;
             box2 <-> box;
-            rusti::move_val_init(&mut(*ptr::mut_offset(p, i)),
+            intrinsics::move_val_init(&mut(*ptr::mut_offset(p, i)),
                                  option::unwrap(box2));
         }
     }