• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=hexagon
2
3@si = common global i32 0, align 4
4@sll = common global i64 0, align 8
5
6define void @test_op_ignore() nounwind {
7entry:
8  %t00 = atomicrmw add i32* @si, i32 1 monotonic
9  %t01 = atomicrmw add i64* @sll, i64 1 monotonic
10  %t10 = atomicrmw sub i32* @si, i32 1 monotonic
11  %t11 = atomicrmw sub i64* @sll, i64 1 monotonic
12  %t20 = atomicrmw or i32* @si, i32 1 monotonic
13  %t21 = atomicrmw or i64* @sll, i64 1 monotonic
14  %t30 = atomicrmw xor i32* @si, i32 1 monotonic
15  %t31 = atomicrmw xor i64* @sll, i64 1 monotonic
16  %t40 = atomicrmw and i32* @si, i32 1 monotonic
17  %t41 = atomicrmw and i64* @sll, i64 1 monotonic
18  %t50 = atomicrmw nand i32* @si, i32 1 monotonic
19  %t51 = atomicrmw nand i64* @sll, i64 1 monotonic
20  br label %return
21
22return:                                           ; preds = %entry
23  ret void
24}
25
26define void @test_fetch_and_op() nounwind {
27entry:
28  %t00 = atomicrmw add i32* @si, i32 11 monotonic
29  store i32 %t00, i32* @si, align 4
30  %t01 = atomicrmw add i64* @sll, i64 11 monotonic
31  store i64 %t01, i64* @sll, align 8
32  %t10 = atomicrmw sub i32* @si, i32 11 monotonic
33  store i32 %t10, i32* @si, align 4
34  %t11 = atomicrmw sub i64* @sll, i64 11 monotonic
35  store i64 %t11, i64* @sll, align 8
36  %t20 = atomicrmw or i32* @si, i32 11 monotonic
37  store i32 %t20, i32* @si, align 4
38  %t21 = atomicrmw or i64* @sll, i64 11 monotonic
39  store i64 %t21, i64* @sll, align 8
40  %t30 = atomicrmw xor i32* @si, i32 11 monotonic
41  store i32 %t30, i32* @si, align 4
42  %t31 = atomicrmw xor i64* @sll, i64 11 monotonic
43  store i64 %t31, i64* @sll, align 8
44  %t40 = atomicrmw and i32* @si, i32 11 monotonic
45  store i32 %t40, i32* @si, align 4
46  %t41 = atomicrmw and i64* @sll, i64 11 monotonic
47  store i64 %t41, i64* @sll, align 8
48  %t50 = atomicrmw nand i32* @si, i32 11 monotonic
49  store i32 %t50, i32* @si, align 4
50  %t51 = atomicrmw nand i64* @sll, i64 11 monotonic
51  store i64 %t51, i64* @sll, align 8
52  br label %return
53
54return:                                           ; preds = %entry
55  ret void
56}
57
58define void @test_lock() nounwind {
59entry:
60  %t00 = atomicrmw xchg i32* @si, i32 1 monotonic
61  store i32 %t00, i32* @si, align 4
62  %t01 = atomicrmw xchg i64* @sll, i64 1 monotonic
63  store i64 %t01, i64* @sll, align 8
64  fence seq_cst
65  store volatile i32 0, i32* @si, align 4
66  store volatile i64 0, i64* @sll, align 8
67  br label %return
68
69return:                                           ; preds = %entry
70  ret void
71}
72
73
74define i64 @fred() nounwind {
75entry:
76  %s0 = cmpxchg i32* undef, i32 undef, i32 undef seq_cst seq_cst
77  %s1 = extractvalue { i32, i1 } %s0, 0
78  %t0 = cmpxchg i64* undef, i64 undef, i64 undef seq_cst seq_cst
79  %t1 = extractvalue { i64, i1 } %t0, 0
80  %u0 = zext i32 %s1 to i64
81  %u1 = add i64 %u0, %t1
82  ret i64 %u1
83}
84
85