1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  | 
// run-pass
#![feature(llvm_asm)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
unsafe fn next_power_of_2(n: u32) -> u32 {
    let mut tmp = n;
    llvm_asm!("dec $0" : "+rm"(tmp) :: "cc");
    let mut shift = 1_u32;
    while shift <= 16 {
        llvm_asm!(
            "shr %cl, $2
            or $2, $0
            shl $$1, $1"
            : "+&rm"(tmp), "+{ecx}"(shift) : "r"(tmp) : "cc"
        );
    }
    llvm_asm!("inc $0" : "+rm"(tmp) :: "cc");
    return tmp;
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn main() {
    unsafe {
        assert_eq!(64, next_power_of_2(37));
        assert_eq!(2147483648, next_power_of_2(2147483647));
    }
    let mut y: isize = 5;
    let x: isize;
    unsafe {
        // Treat the output as initialization.
        llvm_asm!(
            "shl $2, $1
            add $3, $1
            mov $1, $0"
            : "=r"(x), "+r"(y) : "i"(3_usize), "ir"(7_usize) : "cc"
        );
    }
    assert_eq!(x, 47);
    assert_eq!(y, 47);
    let mut x = x + 1;
    assert_eq!(x, 48);
    unsafe {
        // Assignment to mutable.
        // Early clobber "&":
        // Forbids the use of a single register by both operands.
        llvm_asm!("shr $$2, $1; add $1, $0" : "+&r"(x) : "r"(x) : "cc");
    }
    assert_eq!(x, 60);
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
pub fn main() {}
 
  |