• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -S -codegenprepare < %s | FileCheck %s
2
3target datalayout =
4"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
5target triple = "x86_64-unknown-linux-gnu"
6
7; Can we sink single addressing mode computation to use?
8define void @test1(i1 %cond, i64* %base) {
9; CHECK-LABEL: @test1
10; CHECK: add i64 {{.+}}, 40
11entry:
12  %addr = getelementptr inbounds i64, i64* %base, i64 5
13  %casted = bitcast i64* %addr to i32*
14  br i1 %cond, label %if.then, label %fallthrough
15
16if.then:
17  %v = load i32, i32* %casted, align 4
18  br label %fallthrough
19
20fallthrough:
21  ret void
22}
23
24declare void @foo(i32)
25
26; Make sure sinking two copies of addressing mode into different blocks works
27define void @test2(i1 %cond, i64* %base) {
28; CHECK-LABEL: @test2
29entry:
30  %addr = getelementptr inbounds i64, i64* %base, i64 5
31  %casted = bitcast i64* %addr to i32*
32  br i1 %cond, label %if.then, label %fallthrough
33
34if.then:
35; CHECK-LABEL: if.then:
36; CHECK: add i64 {{.+}}, 40
37  %v1 = load i32, i32* %casted, align 4
38  call void @foo(i32 %v1)
39  %cmp = icmp eq i32 %v1, 0
40  br i1 %cmp, label %next, label %fallthrough
41
42next:
43; CHECK-LABEL: next:
44; CHECK: add i64 {{.+}}, 40
45  %v2 = load i32, i32* %casted, align 4
46  call void @foo(i32 %v2)
47  br label %fallthrough
48
49fallthrough:
50  ret void
51}
52
53; If we have two loads in the same block, only need one copy of addressing mode
54; - instruction selection will duplicate if needed
55define void @test3(i1 %cond, i64* %base) {
56; CHECK-LABEL: @test3
57entry:
58  %addr = getelementptr inbounds i64, i64* %base, i64 5
59  %casted = bitcast i64* %addr to i32*
60  br i1 %cond, label %if.then, label %fallthrough
61
62if.then:
63; CHECK-LABEL: if.then:
64; CHECK: add i64 {{.+}}, 40
65  %v1 = load i32, i32* %casted, align 4
66  call void @foo(i32 %v1)
67; CHECK-NOT: add i64 {{.+}}, 40
68  %v2 = load i32, i32* %casted, align 4
69  call void @foo(i32 %v2)
70  br label %fallthrough
71
72fallthrough:
73  ret void
74}
75
76; Can we still sink addressing mode if there's a cold use of the
77; address itself?
78define void @test4(i1 %cond, i64* %base) {
79; CHECK-LABEL: @test4
80entry:
81  %addr = getelementptr inbounds i64, i64* %base, i64 5
82  %casted = bitcast i64* %addr to i32*
83  br i1 %cond, label %if.then, label %fallthrough
84
85if.then:
86; CHECK-LABEL: if.then:
87; CHECK: add i64 {{.+}}, 40
88  %v1 = load i32, i32* %casted, align 4
89  call void @foo(i32 %v1)
90  %cmp = icmp eq i32 %v1, 0
91  br i1 %cmp, label %rare.1, label %fallthrough
92
93fallthrough:
94  ret void
95
96rare.1:
97; CHECK-LABEL: rare.1:
98; CHECK: add i64 {{.+}}, 40
99  call void @slowpath(i32 %v1, i32* %casted) cold
100  br label %fallthrough
101}
102
103; Negative test - don't want to duplicate addressing into hot path
104define void @test5(i1 %cond, i64* %base) {
105; CHECK-LABEL: @test5
106entry:
107; CHECK: %addr = getelementptr
108  %addr = getelementptr inbounds i64, i64* %base, i64 5
109  %casted = bitcast i64* %addr to i32*
110  br i1 %cond, label %if.then, label %fallthrough
111
112if.then:
113; CHECK-LABEL: if.then:
114; CHECK-NOT: add i64 {{.+}}, 40
115  %v1 = load i32, i32* %casted, align 4
116  call void @foo(i32 %v1)
117  %cmp = icmp eq i32 %v1, 0
118  br i1 %cmp, label %rare.1, label %fallthrough
119
120fallthrough:
121  ret void
122
123rare.1:
124  call void @slowpath(i32 %v1, i32* %casted) ;; NOT COLD
125  br label %fallthrough
126}
127
128; Negative test - opt for size
129define void @test6(i1 %cond, i64* %base) minsize {
130; CHECK-LABEL: @test6
131entry:
132; CHECK: %addr = getelementptr
133  %addr = getelementptr inbounds i64, i64* %base, i64 5
134  %casted = bitcast i64* %addr to i32*
135  br i1 %cond, label %if.then, label %fallthrough
136
137if.then:
138; CHECK-LABEL: if.then:
139; CHECK-NOT: add i64 {{.+}}, 40
140  %v1 = load i32, i32* %casted, align 4
141  call void @foo(i32 %v1)
142  %cmp = icmp eq i32 %v1, 0
143  br i1 %cmp, label %rare.1, label %fallthrough
144
145fallthrough:
146  ret void
147
148rare.1:
149  call void @slowpath(i32 %v1, i32* %casted) cold
150  br label %fallthrough
151}
152
153
154; Make sure sinking two copies of addressing mode into different blocks works
155; when there are cold paths for each.
156define void @test7(i1 %cond, i64* %base) {
157; CHECK-LABEL: @test7
158entry:
159  %addr = getelementptr inbounds i64, i64* %base, i64 5
160  %casted = bitcast i64* %addr to i32*
161  br i1 %cond, label %if.then, label %fallthrough
162
163if.then:
164; CHECK-LABEL: if.then:
165; CHECK: add i64 {{.+}}, 40
166  %v1 = load i32, i32* %casted, align 4
167  call void @foo(i32 %v1)
168  %cmp = icmp eq i32 %v1, 0
169  br i1 %cmp, label %rare.1, label %next
170
171next:
172; CHECK-LABEL: next:
173; CHECK: add i64 {{.+}}, 40
174  %v2 = load i32, i32* %casted, align 4
175  call void @foo(i32 %v2)
176  %cmp2 = icmp eq i32 %v2, 0
177  br i1 %cmp2, label %rare.1, label %fallthrough
178
179fallthrough:
180  ret void
181
182rare.1:
183; CHECK-LABEL: rare.1:
184; CHECK: add i64 {{.+}}, 40
185  call void @slowpath(i32 %v1, i32* %casted) cold
186  br label %next
187
188rare.2:
189; CHECK-LABEL: rare.2:
190; CHECK: add i64 {{.+}}, 40
191  call void @slowpath(i32 %v2, i32* %casted) cold
192  br label %fallthrough
193}
194
195
196declare void @slowpath(i32, i32*)
197