• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -basic-aa -print-memoryssa -verify-memoryssa -enable-new-pm=0 -analyze < %s 2>&1 | FileCheck %s
2; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
3;
4; Ensures that atomic loads count as MemoryDefs
5
6; CHECK-LABEL: define i32 @foo
7define i32 @foo(i32* %a, i32* %b) {
8; CHECK: 1 = MemoryDef(liveOnEntry)
9; CHECK-NEXT: store i32 4
10  store i32 4, i32* %a, align 4
11; CHECK: 2 = MemoryDef(1)
12; CHECK-NEXT: %1 = load atomic i32
13  %1 = load atomic i32, i32* %b acquire, align 4
14; CHECK: MemoryUse(2)
15; CHECK-NEXT: %2 = load i32
16  %2 = load i32, i32* %a, align 4
17  %3 = add i32 %1, %2
18  ret i32 %3
19}
20
21; CHECK-LABEL: define void @bar
22define void @bar(i32* %a) {
23; CHECK: MemoryUse(liveOnEntry)
24; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
25  load atomic i32, i32* %a unordered, align 4
26; CHECK: 1 = MemoryDef(liveOnEntry)
27; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
28  load atomic i32, i32* %a monotonic, align 4
29; CHECK: 2 = MemoryDef(1)
30; CHECK-NEXT: load atomic i32, i32* %a acquire, align 4
31  load atomic i32, i32* %a acquire, align 4
32; CHECK: 3 = MemoryDef(2)
33; CHECK-NEXT: load atomic i32, i32* %a seq_cst, align 4
34  load atomic i32, i32* %a seq_cst, align 4
35  ret void
36}
37
38; CHECK-LABEL: define void @baz
39define void @baz(i32* %a) {
40; CHECK: 1 = MemoryDef(liveOnEntry)
41; CHECK-NEXT: %1 = load atomic i32
42  %1 = load atomic i32, i32* %a acquire, align 4
43; CHECK: MemoryUse(1)
44; CHECK-NEXT: %2 = load atomic i32, i32* %a unordered, align 4
45  %2 = load atomic i32, i32* %a unordered, align 4
46; CHECK: 2 = MemoryDef(1)
47; CHECK-NEXT: %3 = load atomic i32, i32* %a monotonic, align 4
48  %3 = load atomic i32, i32* %a monotonic, align 4
49  ret void
50}
51
52; CHECK-LABEL: define void @fences
53define void @fences(i32* %a) {
54; CHECK: 1 = MemoryDef(liveOnEntry)
55; CHECK-NEXT: fence acquire
56  fence acquire
57; CHECK: MemoryUse(1)
58; CHECK-NEXT: %1 = load i32, i32* %a
59  %1 = load i32, i32* %a
60
61; CHECK: 2 = MemoryDef(1)
62; CHECK-NEXT: fence release
63  fence release
64; CHECK: MemoryUse(2)
65; CHECK-NEXT: %2 = load i32, i32* %a
66  %2 = load i32, i32* %a
67
68; CHECK: 3 = MemoryDef(2)
69; CHECK-NEXT: fence acq_rel
70  fence acq_rel
71; CHECK: MemoryUse(3)
72; CHECK-NEXT: %3 = load i32, i32* %a
73  %3 = load i32, i32* %a
74
75; CHECK: 4 = MemoryDef(3)
76; CHECK-NEXT: fence seq_cst
77  fence seq_cst
78; CHECK: MemoryUse(4)
79; CHECK-NEXT: %4 = load i32, i32* %a
80  %4 = load i32, i32* %a
81  ret void
82}
83
84; CHECK-LABEL: define void @seq_cst_clobber
85define void @seq_cst_clobber(i32* noalias %a, i32* noalias %b) {
86; CHECK: 1 = MemoryDef(liveOnEntry)
87; CHECK-NEXT: %1 = load atomic i32, i32* %a monotonic, align 4
88  load atomic i32, i32* %a monotonic, align 4
89
90; CHECK: 2 = MemoryDef(1)
91; CHECK-NEXT: %2 = load atomic i32, i32* %a seq_cst, align 4
92  load atomic i32, i32* %a seq_cst, align 4
93
94; CHECK: 3 = MemoryDef(2)
95; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
96  load atomic i32, i32* %a monotonic, align 4
97
98  ret void
99}
100
101; Ensure that AA hands us MRI_Mod on unreorderable atomic ops.
102;
103; This test is a bit implementation-specific. In particular, it depends on that
104; we pass cmpxchg-load queries to AA, without trying to reason about them on
105; our own.
106;
107; If AA gets more aggressive, we can find another way.
108;
109; CHECK-LABEL: define void @check_aa_is_sane
110define void @check_aa_is_sane(i32* noalias %a, i32* noalias %b) {
111; CHECK: 1 = MemoryDef(liveOnEntry)
112; CHECK-NEXT: cmpxchg i32* %a, i32 0, i32 1 acquire acquire
113  cmpxchg i32* %a, i32 0, i32 1 acquire acquire
114; CHECK: MemoryUse(1)
115; CHECK-NEXT: load i32, i32* %b, align 4
116  load i32, i32* %b, align 4
117
118  ret void
119}
120