• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -tbaa -basicaa -licm -S < %s | FileCheck %s
2; RUN: opt -aa-pipeline=type-based-aa,basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
3
4; If we can prove a local is thread local, we can insert stores during
5; promotion which wouldn't be legal otherwise.
6
7target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
8target triple = "x86_64-linux-generic"
9
10@p = external global i8*
11
12declare i8* @malloc(i64)
13
14; Exercise the TLS case
15; CHECK-LABEL: @test
16define i32* @test(i32 %n) {
17entry:
18  ;; ignore the required null check for simplicity
19  %mem = call dereferenceable(16) noalias i8* @malloc(i64 16)
20  %addr = bitcast i8* %mem to i32*
21  br label %for.body.lr.ph
22
23for.body.lr.ph:                                   ; preds = %entry
24  br label %for.header
25
26for.header:
27  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
28  %old = load i32, i32* %addr, align 4
29  ; deliberate impossible to analyze branch
30  %guard = load atomic i8*, i8** @p monotonic, align 8
31  %exitcmp = icmp eq i8* %guard, null
32  br i1 %exitcmp, label %for.body, label %early-exit
33
34early-exit:
35; CHECK-LABEL: early-exit:
36; CHECK: store i32 %new1.lcssa, i32* %addr, align 1
37  ret i32* null
38
39for.body:
40  %new = add i32 %old, 1
41  store i32 %new, i32* %addr, align 4
42  %inc = add nsw i32 %i.02, 1
43  %cmp = icmp slt i32 %inc, %n
44  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
45
46for.cond.for.end_crit_edge:                       ; preds = %for.body
47; CHECK-LABEL: for.cond.for.end_crit_edge:
48; CHECK: store i32 %new.lcssa, i32* %addr, align 1
49  %split = phi i32* [ %addr, %for.body ]
50  ret i32* null
51}
52
53; Stack allocations can also be thread-local
54; CHECK-LABEL: @test2
55define i32* @test2(i32 %n) {
56entry:
57  %mem = alloca i8, i32 16
58  %addr = bitcast i8* %mem to i32*
59  br label %for.body.lr.ph
60
61for.body.lr.ph:                                   ; preds = %entry
62  br label %for.header
63
64for.header:
65  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
66  %old = load i32, i32* %addr, align 4
67  ; deliberate impossible to analyze branch
68  %guard = load atomic i8*, i8** @p monotonic, align 8
69  %exitcmp = icmp eq i8* %guard, null
70  br i1 %exitcmp, label %for.body, label %early-exit
71
72early-exit:
73; CHECK-LABEL: early-exit:
74; CHECK: store i32 %new1.lcssa, i32* %addr, align 1
75  ret i32* null
76
77for.body:
78  %new = add i32 %old, 1
79  store i32 %new, i32* %addr, align 4
80  %inc = add nsw i32 %i.02, 1
81  %cmp = icmp slt i32 %inc, %n
82  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
83
84for.cond.for.end_crit_edge:                       ; preds = %for.body
85; CHECK-LABEL: for.cond.for.end_crit_edge:
86; CHECK: store i32 %new.lcssa, i32* %addr, align 1
87  %split = phi i32* [ %addr, %for.body ]
88  ret i32* null
89}
90
91declare i8* @not_malloc(i64)
92
93; Negative test - not TLS
94; CHECK-LABEL: @test_neg
95define i32* @test_neg(i32 %n) {
96entry:
97  ;; ignore the required null check for simplicity
98  %mem = call dereferenceable(16) noalias i8* @not_malloc(i64 16)
99  %addr = bitcast i8* %mem to i32*
100  br label %for.body.lr.ph
101
102for.body.lr.ph:                                   ; preds = %entry
103  br label %for.header
104
105for.header:
106  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
107  %old = load i32, i32* %addr, align 4
108  ; deliberate impossible to analyze branch
109  %guard = load volatile i8*, i8** @p
110  %exitcmp = icmp eq i8* %guard, null
111  br i1 %exitcmp, label %for.body, label %early-exit
112
113early-exit:
114; CHECK-LABEL: early-exit:
115; CHECK-NOT: store
116  ret i32* null
117
118for.body:
119; CHECK-LABEL: for.body:
120; CHECK: store i32 %new, i32* %addr, align 4
121  %new = add i32 %old, 1
122  store i32 %new, i32* %addr, align 4
123  %inc = add nsw i32 %i.02, 1
124  %cmp = icmp slt i32 %inc, %n
125  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
126
127for.cond.for.end_crit_edge:                       ; preds = %for.body
128; CHECK-LABEL: for.cond.for.end_crit_edge:
129; CHECK-NOT: store
130  %split = phi i32* [ %addr, %for.body ]
131  ret i32* null
132}
133
134; Negative test - can't speculate load since branch
135; may control alignment
136; CHECK-LABEL: @test_neg2
137define i32* @test_neg2(i32 %n) {
138entry:
139  ;; ignore the required null check for simplicity
140  %mem = call dereferenceable(16) noalias i8* @malloc(i64 16)
141  %addr = bitcast i8* %mem to i32*
142  br label %for.body.lr.ph
143
144for.body.lr.ph:                                   ; preds = %entry
145  br label %for.header
146
147for.header:
148  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
149  ; deliberate impossible to analyze branch
150  %guard = load volatile i8*, i8** @p
151  %exitcmp = icmp eq i8* %guard, null
152  br i1 %exitcmp, label %for.body, label %early-exit
153
154early-exit:
155; CHECK-LABEL: early-exit:
156; CHECK-NOT: store
157  ret i32* null
158
159for.body:
160; CHECK-LABEL: for.body:
161; CHECK: store i32 %new, i32* %addr, align 4
162  %old = load i32, i32* %addr, align 4
163  %new = add i32 %old, 1
164  store i32 %new, i32* %addr, align 4
165  %inc = add nsw i32 %i.02, 1
166  %cmp = icmp slt i32 %inc, %n
167  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
168
169for.cond.for.end_crit_edge:                       ; preds = %for.body
170; CHECK-LABEL: for.cond.for.end_crit_edge:
171; CHECK-NOT: store
172  %split = phi i32* [ %addr, %for.body ]
173  ret i32* null
174}
175