summary refs log tree commit diff
path: root/src/libstd/unstable/intrinsics.rs
blob: 500143fb5777341a8957112b5a3c3bea858fd4df (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

/*! rustc compiler intrinsics.

The corresponding definitions are in librustc/middle/trans/foreign.rs.

# Atomics

The atomic intrinsics provide common atomic operations on machine
words, with multiple possible memory orderings. They obey the same
semantics as C++11. See the LLVM documentation on [[atomics]].

[atomics]: http://llvm.org/docs/Atomics.html

A quick refresher on memory ordering:

* Acquire - a barrier for aquiring a lock. Subsequent reads and writes
  take place after the barrier.
* Release - a barrier for releasing a lock. Preceding reads and writes
  take place before the barrier.
* Sequentially consistent - sequentially consistent operations are
  guaranteed to happen in order. This is the standard mode for working
  with atomic types and is equivalent to Java's `volatile`.

*/

// This is needed to prevent duplicate lang item definitions.
#[cfg(test)]
pub use realstd::unstable::intrinsics::{TyDesc, Opaque, TyVisitor};

#[cfg(not(stage0))]
pub type GlueFn = extern "Rust" fn(*i8);

#[cfg(stage0)]
pub type GlueFn = extern "Rust" fn(**TyDesc, *i8);

// NB: this has to be kept in sync with the Rust ABI.
#[lang="ty_desc"]
#[cfg(not(test))]
pub struct TyDesc {
    size: uint,
    align: uint,
    take_glue: GlueFn,
    drop_glue: GlueFn,
    free_glue: GlueFn,
    visit_glue: GlueFn,
}

#[lang="opaque"]
#[cfg(not(test))]
pub enum Opaque { }

#[lang="ty_visitor"]
#[cfg(not(test))]
pub trait TyVisitor {
    fn visit_bot(&self) -> bool;
    fn visit_nil(&self) -> bool;
    fn visit_bool(&self) -> bool;

    fn visit_int(&self) -> bool;
    fn visit_i8(&self) -> bool;
    fn visit_i16(&self) -> bool;
    fn visit_i32(&self) -> bool;
    fn visit_i64(&self) -> bool;

    fn visit_uint(&self) -> bool;
    fn visit_u8(&self) -> bool;
    fn visit_u16(&self) -> bool;
    fn visit_u32(&self) -> bool;
    fn visit_u64(&self) -> bool;

    fn visit_float(&self) -> bool;
    fn visit_f32(&self) -> bool;
    fn visit_f64(&self) -> bool;

    fn visit_char(&self) -> bool;
    fn visit_str(&self) -> bool;

    fn visit_estr_box(&self) -> bool;
    fn visit_estr_uniq(&self) -> bool;
    fn visit_estr_slice(&self) -> bool;
    fn visit_estr_fixed(&self, n: uint, sz: uint, align: uint) -> bool;

    fn visit_box(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_uniq(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_ptr(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_rptr(&self, mtbl: uint, inner: *TyDesc) -> bool;

    fn visit_vec(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_unboxed_vec(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_evec_box(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_evec_uniq(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_evec_slice(&self, mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_evec_fixed(&self, n: uint, sz: uint, align: uint,
                        mtbl: uint, inner: *TyDesc) -> bool;

    fn visit_enter_rec(&self, n_fields: uint,
                       sz: uint, align: uint) -> bool;
    fn visit_rec_field(&self, i: uint, name: &str,
                       mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_leave_rec(&self, n_fields: uint,
                       sz: uint, align: uint) -> bool;

    fn visit_enter_class(&self, n_fields: uint,
                         sz: uint, align: uint) -> bool;
    fn visit_class_field(&self, i: uint, name: &str,
                         mtbl: uint, inner: *TyDesc) -> bool;
    fn visit_leave_class(&self, n_fields: uint,
                         sz: uint, align: uint) -> bool;

    fn visit_enter_tup(&self, n_fields: uint,
                       sz: uint, align: uint) -> bool;
    fn visit_tup_field(&self, i: uint, inner: *TyDesc) -> bool;
    fn visit_leave_tup(&self, n_fields: uint,
                       sz: uint, align: uint) -> bool;

    fn visit_enter_enum(&self, n_variants: uint,
                        get_disr: extern unsafe fn(ptr: *Opaque) -> int,
                        sz: uint, align: uint) -> bool;
    fn visit_enter_enum_variant(&self, variant: uint,
                                disr_val: int,
                                n_fields: uint,
                                name: &str) -> bool;
    fn visit_enum_variant_field(&self, i: uint, offset: uint, inner: *TyDesc) -> bool;
    fn visit_leave_enum_variant(&self, variant: uint,
                                disr_val: int,
                                n_fields: uint,
                                name: &str) -> bool;
    fn visit_leave_enum(&self, n_variants: uint,
                        get_disr: extern unsafe fn(ptr: *Opaque) -> int,
                        sz: uint, align: uint) -> bool;

    fn visit_enter_fn(&self, purity: uint, proto: uint,
                      n_inputs: uint, retstyle: uint) -> bool;
    fn visit_fn_input(&self, i: uint, mode: uint, inner: *TyDesc) -> bool;
    fn visit_fn_output(&self, retstyle: uint, inner: *TyDesc) -> bool;
    fn visit_leave_fn(&self, purity: uint, proto: uint,
                      n_inputs: uint, retstyle: uint) -> bool;

    fn visit_trait(&self) -> bool;
    fn visit_var(&self) -> bool;
    fn visit_var_integral(&self) -> bool;
    fn visit_param(&self, i: uint) -> bool;
    fn visit_self(&self) -> bool;
    fn visit_type(&self) -> bool;
    fn visit_opaque_box(&self) -> bool;
    fn visit_constr(&self, inner: *TyDesc) -> bool;
    fn visit_closure_ptr(&self, ck: uint) -> bool;
}

#[abi = "rust-intrinsic"]
pub extern "rust-intrinsic" {

    /// Atomic compare and exchange, sequentially consistent.
    pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
    /// Atomic compare and exchange, acquire ordering.
    pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
    /// Atomic compare and exchange, release ordering.
    pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;

    pub fn atomic_cxchg_acqrel(dst: &mut int, old: int, src: int) -> int;
    pub fn atomic_cxchg_relaxed(dst: &mut int, old: int, src: int) -> int;


    /// Atomic load, sequentially consistent.
    pub fn atomic_load(src: &int) -> int;
    /// Atomic load, acquire ordering.
    pub fn atomic_load_acq(src: &int) -> int;

    pub fn atomic_load_relaxed(src: &int) -> int;

    /// Atomic store, sequentially consistent.
    pub fn atomic_store(dst: &mut int, val: int);
    /// Atomic store, release ordering.
    pub fn atomic_store_rel(dst: &mut int, val: int);

    pub fn atomic_store_relaxed(dst: &mut int, val: int);

    /// Atomic exchange, sequentially consistent.
    pub fn atomic_xchg(dst: &mut int, src: int) -> int;
    /// Atomic exchange, acquire ordering.
    pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
    /// Atomic exchange, release ordering.
    pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_xchg_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_xchg_relaxed(dst: &mut int, src: int) -> int;

    /// Atomic addition, sequentially consistent.
    pub fn atomic_xadd(dst: &mut int, src: int) -> int;
    /// Atomic addition, acquire ordering.
    pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
    /// Atomic addition, release ordering.
    pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_xadd_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_xadd_relaxed(dst: &mut int, src: int) -> int;

    /// Atomic subtraction, sequentially consistent.
    pub fn atomic_xsub(dst: &mut int, src: int) -> int;
    /// Atomic subtraction, acquire ordering.
    pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
    /// Atomic subtraction, release ordering.
    pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_xsub_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_xsub_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_and(dst: &mut int, src: int) -> int;
    pub fn atomic_and_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_and_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_and_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_and_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_nand(dst: &mut int, src: int) -> int;
    pub fn atomic_nand_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_nand_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_nand_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_nand_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_or(dst: &mut int, src: int) -> int;
    pub fn atomic_or_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_or_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_or_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_or_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_xor(dst: &mut int, src: int) -> int;
    pub fn atomic_xor_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_xor_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_xor_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_xor_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_max(dst: &mut int, src: int) -> int;
    pub fn atomic_max_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_max_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_max_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_max_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_min(dst: &mut int, src: int) -> int;
    pub fn atomic_min_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_min_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_min_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_min_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_umin(dst: &mut int, src: int) -> int;
    pub fn atomic_umin_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_umin_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_umin_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_umin_relaxed(dst: &mut int, src: int) -> int;

    pub fn atomic_umax(dst: &mut int, src: int) -> int;
    pub fn atomic_umax_acq(dst: &mut int, src: int) -> int;
    pub fn atomic_umax_rel(dst: &mut int, src: int) -> int;
    pub fn atomic_umax_acqrel(dst: &mut int, src: int) -> int;
    pub fn atomic_umax_relaxed(dst: &mut int, src: int) -> int;

    /// The size of a type in bytes.
    ///
    /// This is the exact number of bytes in memory taken up by a
    /// value of the given type. In other words, a memset of this size
    /// would *exactly* overwrite a value. When laid out in vectors
    /// and structures there may be additional padding between
    /// elements.
    pub fn size_of<T>() -> uint;

    /// Move a value to a memory location containing a value.
    ///
    /// Drop glue is run on the destination, which must contain a
    /// valid Rust value.
    pub fn move_val<T>(dst: &mut T, src: T);

    /// Move a value to an uninitialized memory location.
    ///
    /// Drop glue is not run on the destination.
    pub fn move_val_init<T>(dst: &mut T, src: T);

    pub fn min_align_of<T>() -> uint;
    pub fn pref_align_of<T>() -> uint;

    /// Get a static pointer to a type descriptor.
    #[cfg(not(stage0))]
    pub fn get_tydesc<T>() -> *TyDesc;
    #[cfg(stage0)]
    pub fn get_tydesc<T>() -> *();

    /// Create a value initialized to zero.
    ///
    /// `init` is unsafe because it returns a zeroed-out datum,
    /// which is unsafe unless T is POD. We don't have a POD
    /// kind yet. (See #4074).
    pub unsafe fn init<T>() -> T;

    /// Create an uninitialized value.
    pub unsafe fn uninit<T>() -> T;

    /// Move a value out of scope without running drop glue.
    ///
    /// `forget` is unsafe because the caller is responsible for
    /// ensuring the argument is deallocated already.
    pub unsafe fn forget<T>(_: T) -> ();
    pub fn transmute<T,U>(e: T) -> U;

    /// Returns `true` if a type requires drop glue.
    pub fn needs_drop<T>() -> bool;

    /// Returns `true` if a type is managed (will be allocated on the local heap)
    #[cfg(not(stage0))]
    pub fn contains_managed<T>() -> bool;

    #[cfg(not(stage0))]
    pub fn visit_tydesc(td: *TyDesc, tv: @TyVisitor);

    pub fn frame_address(f: &once fn(*u8));

    /// Get the address of the `__morestack` stack growth function.
    pub fn morestack_addr() -> *();

    /// Equivalent to the `llvm.memcpy.p0i8.0i8.i32` intrinsic, with a size of
    /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
    pub fn memcpy32<T>(dst: *mut T, src: *T, count: u32);
    /// Equivalent to the `llvm.memcpy.p0i8.0i8.i64` intrinsic, with a size of
    /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
    pub fn memcpy64<T>(dst: *mut T, src: *T, count: u64);

    /// Equivalent to the `llvm.memmove.p0i8.0i8.i32` intrinsic, with a size of
    /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
    pub fn memmove32<T>(dst: *mut T, src: *T, count: u32);
    /// Equivalent to the `llvm.memmove.p0i8.0i8.i64` intrinsic, with a size of
    /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
    pub fn memmove64<T>(dst: *mut T, src: *T, count: u64);

    /// Equivalent to the `llvm.memset.p0i8.i32` intrinsic, with a size of
    /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
    pub fn memset32<T>(dst: *mut T, val: u8, count: u32);
    /// Equivalent to the `llvm.memset.p0i8.i64` intrinsic, with a size of
    /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
    pub fn memset64<T>(dst: *mut T, val: u8, count: u64);

    pub fn sqrtf32(x: f32) -> f32;
    pub fn sqrtf64(x: f64) -> f64;

    pub fn powif32(a: f32, x: i32) -> f32;
    pub fn powif64(a: f64, x: i32) -> f64;

    // the following kill the stack canary without
    // `fixed_stack_segment`. This possibly only affects the f64
    // variants, but it's hard to be sure since it seems to only
    // occur with fairly specific arguments.
    #[fixed_stack_segment]
    pub fn sinf32(x: f32) -> f32;
    #[fixed_stack_segment]
    pub fn sinf64(x: f64) -> f64;

    #[fixed_stack_segment]
    pub fn cosf32(x: f32) -> f32;
    #[fixed_stack_segment]
    pub fn cosf64(x: f64) -> f64;

    #[fixed_stack_segment]
    pub fn powf32(a: f32, x: f32) -> f32;
    #[fixed_stack_segment]
    pub fn powf64(a: f64, x: f64) -> f64;

    #[fixed_stack_segment]
    pub fn expf32(x: f32) -> f32;
    #[fixed_stack_segment]
    pub fn expf64(x: f64) -> f64;

    pub fn exp2f32(x: f32) -> f32;
    pub fn exp2f64(x: f64) -> f64;

    pub fn logf32(x: f32) -> f32;
    pub fn logf64(x: f64) -> f64;

    pub fn log10f32(x: f32) -> f32;
    pub fn log10f64(x: f64) -> f64;

    pub fn log2f32(x: f32) -> f32;
    pub fn log2f64(x: f64) -> f64;

    pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
    pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;

    pub fn fabsf32(x: f32) -> f32;
    pub fn fabsf64(x: f64) -> f64;

    pub fn floorf32(x: f32) -> f32;
    pub fn floorf64(x: f64) -> f64;

    pub fn ceilf32(x: f32) -> f32;
    pub fn ceilf64(x: f64) -> f64;

    pub fn truncf32(x: f32) -> f32;
    pub fn truncf64(x: f64) -> f64;

    pub fn ctpop8(x: i8) -> i8;
    pub fn ctpop16(x: i16) -> i16;
    pub fn ctpop32(x: i32) -> i32;
    pub fn ctpop64(x: i64) -> i64;

    pub fn ctlz8(x: i8) -> i8;
    pub fn ctlz16(x: i16) -> i16;
    pub fn ctlz32(x: i32) -> i32;
    pub fn ctlz64(x: i64) -> i64;

    pub fn cttz8(x: i8) -> i8;
    pub fn cttz16(x: i16) -> i16;
    pub fn cttz32(x: i32) -> i32;
    pub fn cttz64(x: i64) -> i64;

    pub fn bswap16(x: i16) -> i16;
    pub fn bswap32(x: i32) -> i32;
    pub fn bswap64(x: i64) -> i64;
}