• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -sroa -S | FileCheck %s
2; RUN: opt < %s -passes=sroa -S | FileCheck %s
3
4target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
5
6declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
7declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
8
9define i32 @test0() {
10; CHECK-LABEL: @test0(
11; CHECK-NOT: alloca
12; CHECK: ret i32
13
14entry:
15  %a1 = alloca i32
16  %a2 = alloca float
17
18  %a1.i8 = bitcast i32* %a1 to i8*
19  call void @llvm.lifetime.start.p0i8(i64 4, i8* %a1.i8)
20
21  store i32 0, i32* %a1
22  %v1 = load i32, i32* %a1
23
24  call void @llvm.lifetime.end.p0i8(i64 4, i8* %a1.i8)
25
26  %a2.i8 = bitcast float* %a2 to i8*
27  call void @llvm.lifetime.start.p0i8(i64 4, i8* %a2.i8)
28
29  store float 0.0, float* %a2
30  %v2 = load float , float * %a2
31  %v2.int = bitcast float %v2 to i32
32  %sum1 = add i32 %v1, %v2.int
33
34  call void @llvm.lifetime.end.p0i8(i64 4, i8* %a2.i8)
35
36  ret i32 %sum1
37}
38
39define i32 @test1() {
40; CHECK-LABEL: @test1(
41; CHECK-NOT: alloca
42; CHECK: ret i32 0
43
44entry:
45  %X = alloca { i32, float }
46  %Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
47  store i32 0, i32* %Y
48  %Z = load i32, i32* %Y
49  ret i32 %Z
50}
51
52define i64 @test2(i64 %X) {
53; CHECK-LABEL: @test2(
54; CHECK-NOT: alloca
55; CHECK: ret i64 %X
56
57entry:
58  %A = alloca [8 x i8]
59  %B = bitcast [8 x i8]* %A to i64*
60  store i64 %X, i64* %B
61  br label %L2
62
63L2:
64  %Z = load i64, i64* %B
65  ret i64 %Z
66}
67
68define i64 @test2_addrspacecast(i64 %X) {
69; CHECK-LABEL: @test2_addrspacecast(
70; CHECK-NOT: alloca
71; CHECK: ret i64 %X
72
73entry:
74  %A = alloca [8 x i8]
75  %B = addrspacecast [8 x i8]* %A to i64 addrspace(1)*
76  store i64 %X, i64 addrspace(1)* %B
77  br label %L2
78
79L2:
80  %Z = load i64, i64 addrspace(1)* %B
81  ret i64 %Z
82}
83
84define i64 @test2_addrspacecast_gep(i64 %X, i16 %idx) {
85; CHECK-LABEL: @test2_addrspacecast_gep(
86; CHECK-NOT: alloca
87; CHECK: ret i64 %X
88
89entry:
90  %A = alloca [256 x i8]
91  %B = addrspacecast [256 x i8]* %A to i64 addrspace(1)*
92  %gepA = getelementptr [256 x i8], [256 x i8]* %A, i16 0, i16 32
93  %gepB = getelementptr i64, i64 addrspace(1)* %B, i16 4
94  store i64 %X, i64 addrspace(1)* %gepB, align 1
95  br label %L2
96
97L2:
98  %gepA.bc = bitcast i8* %gepA to i64*
99  %Z = load i64, i64* %gepA.bc, align 1
100  ret i64 %Z
101}
102
103; Avoid crashing when load/storing at at different offsets.
104define i64 @test2_addrspacecast_gep_offset(i64 %X) {
105; CHECK-LABEL: @test2_addrspacecast_gep_offset(
106; CHECK: %A.sroa.0 = alloca [10 x i8]
107; CHECK: [[GEP0:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %A.sroa.0, i16 0, i16 2
108; CHECK-NEXT: [[GEP1:%.*]] = addrspacecast i8* [[GEP0]] to i64 addrspace(1)*
109; CHECK-NEXT: store i64 %X, i64 addrspace(1)* [[GEP1]], align 1
110; CHECK: br
111
112; CHECK: [[BITCAST:%.*]] = bitcast [10 x i8]* %A.sroa.0 to i64*
113; CHECK: %A.sroa.0.0.A.sroa.0.30.Z = load i64, i64* [[BITCAST]], align 1
114; CHECK-NEXT: ret
115entry:
116  %A = alloca [256 x i8]
117  %B = addrspacecast [256 x i8]* %A to i64 addrspace(1)*
118  %gepA = getelementptr [256 x i8], [256 x i8]* %A, i16 0, i16 30
119  %gepB = getelementptr i64, i64 addrspace(1)* %B, i16 4
120  store i64 %X, i64 addrspace(1)* %gepB, align 1
121  br label %L2
122
123L2:
124  %gepA.bc = bitcast i8* %gepA to i64*
125  %Z = load i64, i64* %gepA.bc, align 1
126  ret i64 %Z
127}
128
129define void @test3(i8* %dst, i8* align 8 %src) {
130; CHECK-LABEL: @test3(
131
132entry:
133  %a = alloca [300 x i8]
134; CHECK-NOT:  alloca
135; CHECK:      %[[test3_a1:.*]] = alloca [42 x i8]
136; CHECK-NEXT: %[[test3_a2:.*]] = alloca [99 x i8]
137; CHECK-NEXT: %[[test3_a3:.*]] = alloca [16 x i8]
138; CHECK-NEXT: %[[test3_a4:.*]] = alloca [42 x i8]
139; CHECK-NEXT: %[[test3_a5:.*]] = alloca [7 x i8]
140; CHECK-NEXT: %[[test3_a6:.*]] = alloca [7 x i8]
141; CHECK-NEXT: %[[test3_a7:.*]] = alloca [85 x i8]
142
143  %b = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 0
144  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* align 8 %src, i32 300, i1 false), !tbaa !0
145; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
146; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 8 %src, i32 42, {{.*}}), !tbaa [[TAG_0:!.*]]
147; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
148; CHECK-NEXT: %[[test3_r1:.*]] = load i8, i8* %[[gep]], {{.*}}, !tbaa [[TAG_0]]
149; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
150; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
151; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 99, {{.*}}), !tbaa [[TAG_0:!.*]]
152; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 142
153; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
154; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 2 %[[gep_src]], i32 16, {{.*}}), !tbaa [[TAG_0:!.*]]
155; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 158
156; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
157; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 2 %[[gep_src]], i32 42, {{.*}}), !tbaa [[TAG_0:!.*]]
158; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 200
159; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
160; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 8 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_0:!.*]]
161; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 207
162; CHECK-NEXT: %[[test3_r2:.*]] = load i8, i8* %[[gep]], {{.*}}, !tbaa [[TAG_0]]
163; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 208
164; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
165; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 8 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_0:!.*]]
166; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 215
167; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
168; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 85, {{.*}}), !tbaa [[TAG_0:!.*]]
169
170  ; Clobber a single element of the array, this should be promotable, and be deleted.
171  %c = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 42
172  store i8 0, i8* %c
173
174  ; Make a sequence of overlapping stores to the array. These overlap both in
175  ; forward strides and in shrinking accesses.
176  %overlap.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 142
177  %overlap.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 143
178  %overlap.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 144
179  %overlap.4.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 145
180  %overlap.5.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 146
181  %overlap.6.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 147
182  %overlap.7.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 148
183  %overlap.8.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 149
184  %overlap.9.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 150
185  %overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
186  %overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
187  %overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
188  %overlap.2.i64 = bitcast i8* %overlap.2.i8 to i64*
189  %overlap.3.i64 = bitcast i8* %overlap.3.i8 to i64*
190  %overlap.4.i64 = bitcast i8* %overlap.4.i8 to i64*
191  %overlap.5.i64 = bitcast i8* %overlap.5.i8 to i64*
192  %overlap.6.i64 = bitcast i8* %overlap.6.i8 to i64*
193  %overlap.7.i64 = bitcast i8* %overlap.7.i8 to i64*
194  %overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
195  %overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
196  store i8 1, i8* %overlap.1.i8, !tbaa !3
197; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
198; CHECK-NEXT: store i8 1, i8* %[[gep]], align 1, !tbaa [[TAG_3:!.*]]
199  store i16 1, i16* %overlap.1.i16, !tbaa !5
200; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i16*
201; CHECK-NEXT: store i16 1, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_5:!.*]]
202  store i32 1, i32* %overlap.1.i32, !tbaa !7
203; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i32*
204; CHECK-NEXT: store i32 1, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_7:!.*]]
205  store i64 1, i64* %overlap.1.i64, !tbaa !9
206; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i64*
207; CHECK-NEXT: store i64 1, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_9:!.*]]
208  store i64 2, i64* %overlap.2.i64, !tbaa !11
209; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 1
210; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
211; CHECK-NEXT: store i64 2, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_11:!.*]]
212  store i64 3, i64* %overlap.3.i64, !tbaa !13
213; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 2
214; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
215; CHECK-NEXT: store i64 3, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_13:!.*]]
216  store i64 4, i64* %overlap.4.i64, !tbaa !15
217; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 3
218; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
219; CHECK-NEXT: store i64 4, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_15:!.*]]
220  store i64 5, i64* %overlap.5.i64, !tbaa !17
221; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 4
222; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
223; CHECK-NEXT: store i64 5, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_17:!.*]]
224  store i64 6, i64* %overlap.6.i64, !tbaa !19
225; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 5
226; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
227; CHECK-NEXT: store i64 6, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_19:!.*]]
228  store i64 7, i64* %overlap.7.i64, !tbaa !21
229; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 6
230; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
231; CHECK-NEXT: store i64 7, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_21:!.*]]
232  store i64 8, i64* %overlap.8.i64, !tbaa !23
233; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 7
234; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
235; CHECK-NEXT: store i64 8, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_23:!.*]]
236  store i64 9, i64* %overlap.9.i64, !tbaa !25
237; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 8
238; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
239; CHECK-NEXT: store i64 9, i64* %[[bitcast]], {{.*}}, !tbaa [[TAG_25:!.*]]
240
241  ; Make two sequences of overlapping stores with more gaps and irregularities.
242  %overlap2.1.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 200
243  %overlap2.1.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 201
244  %overlap2.1.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 202
245  %overlap2.1.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 203
246
247  %overlap2.2.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 208
248  %overlap2.2.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 209
249  %overlap2.2.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 210
250  %overlap2.2.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 211
251
252  %overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
253  %overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
254  %overlap2.1.1.i32 = bitcast i8* %overlap2.1.1.i8 to i32*
255  %overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
256  %overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
257  store i8 1,  i8*  %overlap2.1.0.i8, !tbaa !27
258; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
259; CHECK-NEXT: store i8 1, i8* %[[gep]], align 1, !tbaa [[TAG_27:!.*]]
260  store i16 1, i16* %overlap2.1.0.i16, !tbaa !29
261; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i16*
262; CHECK-NEXT: store i16 1, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_29:!.*]]
263  store i32 1, i32* %overlap2.1.0.i32, !tbaa !31
264; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i32*
265; CHECK-NEXT: store i32 1, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_31:!.*]]
266  store i32 2, i32* %overlap2.1.1.i32, !tbaa !33
267; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 1
268; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
269; CHECK-NEXT: store i32 2, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_33:!.*]]
270  store i32 3, i32* %overlap2.1.2.i32, !tbaa !35
271; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
272; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
273; CHECK-NEXT: store i32 3, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_35:!.*]]
274  store i32 4, i32* %overlap2.1.3.i32, !tbaa !37
275; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 3
276; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
277; CHECK-NEXT: store i32 4, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_37:!.*]]
278
279  %overlap2.2.0.i32 = bitcast i8* %overlap2.2.0.i8 to i32*
280  %overlap2.2.1.i16 = bitcast i8* %overlap2.2.1.i8 to i16*
281  %overlap2.2.1.i32 = bitcast i8* %overlap2.2.1.i8 to i32*
282  %overlap2.2.2.i32 = bitcast i8* %overlap2.2.2.i8 to i32*
283  %overlap2.2.3.i32 = bitcast i8* %overlap2.2.3.i8 to i32*
284  store i32 1, i32* %overlap2.2.0.i32, !tbaa !39
285; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a6]] to i32*
286; CHECK-NEXT: store i32 1, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_39:!.*]]
287  store i8 1,  i8*  %overlap2.2.1.i8, !tbaa !41
288; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
289; CHECK-NEXT: store i8 1, i8* %[[gep]], align 1, !tbaa [[TAG_41:!.*]]
290  store i16 1, i16* %overlap2.2.1.i16, !tbaa !43
291; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
292; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
293; CHECK-NEXT: store i16 1, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_43:!.*]]
294  store i32 1, i32* %overlap2.2.1.i32, !tbaa !45
295; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
296; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
297; CHECK-NEXT: store i32 1, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_45:!.*]]
298  store i32 3, i32* %overlap2.2.2.i32, !tbaa !47
299; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
300; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
301; CHECK-NEXT: store i32 3, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_47:!.*]]
302  store i32 4, i32* %overlap2.2.3.i32, !tbaa !49
303; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 3
304; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
305; CHECK-NEXT: store i32 4, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_49:!.*]]
306
307  %overlap2.prefix = getelementptr i8, i8* %overlap2.1.1.i8, i64 -4
308  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i1 false), !tbaa !51
309; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 39
310; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %src, i32 3, {{.*}}), !tbaa [[TAG_51:!.*]]
311; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 3
312; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
313; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 5, {{.*}}), !tbaa [[TAG_51]]
314
315  ; Bridge between the overlapping areas
316  call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i1 false), !tbaa !53
317; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
318; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 %[[gep]], i8 42, i32 5, {{.*}}), !tbaa [[TAG_53:!.*]]
319; ...promoted i8 store...
320; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
321; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 %[[gep]], i8 42, i32 2, {{.*}}), !tbaa [[TAG_53]]
322
323  ; Entirely within the second overlap.
324  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i1 false), !tbaa !55
325; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
326; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep]], i8* align 1 %src, i32 5, {{.*}}), !tbaa [[TAG_55:!.*]]
327
328  ; Trailing past the second overlap.
329  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i1 false), !tbaa !57
330; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
331; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep]], i8* align 1 %src, i32 5, {{.*}}), !tbaa [[TAG_57:!.*]]
332; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 5
333; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
334; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 3, {{.*}}), !tbaa [[TAG_57]]
335
336  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i1 false), !tbaa !59
337; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
338; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dst, i8* align 1 %[[gep]], i32 42, {{.*}}), !tbaa [[TAG_59:!.*]]
339; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
340; CHECK-NEXT: store i8 0, i8* %[[gep]], {{.*}}, !tbaa [[TAG_59]]
341; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
342; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
343; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 99, {{.*}}), !tbaa [[TAG_59]]
344; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 142
345; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
346; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 16, {{.*}}), !tbaa [[TAG_59]]
347; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 158
348; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
349; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 42, {{.*}}), !tbaa [[TAG_59]]
350; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 200
351; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
352; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_59]]
353; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 207
354; CHECK-NEXT: store i8 42, i8* %[[gep]], {{.*}}, !tbaa [[TAG_59]]
355; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 208
356; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
357; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_59]]
358; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 215
359; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
360; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 85, {{.*}}), !tbaa [[TAG_59]]
361
362  ret void
363}
364
365define void @test4(i8* %dst, i8* %src) {
366; CHECK-LABEL: @test4(
367
368entry:
369  %a = alloca [100 x i8]
370; CHECK-NOT:  alloca
371; CHECK:      %[[test4_a1:.*]] = alloca [20 x i8]
372; CHECK-NEXT: %[[test4_a2:.*]] = alloca [7 x i8]
373; CHECK-NEXT: %[[test4_a3:.*]] = alloca [10 x i8]
374; CHECK-NEXT: %[[test4_a4:.*]] = alloca [7 x i8]
375; CHECK-NEXT: %[[test4_a5:.*]] = alloca [7 x i8]
376; CHECK-NEXT: %[[test4_a6:.*]] = alloca [40 x i8]
377
378  %b = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 0
379  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i1 false), !tbaa !0
380; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
381; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep]], i8* align 1 %src, i32 20, {{.*}}), !tbaa [[TAG_0]]
382; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 20
383; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
384; CHECK-NEXT: %[[test4_r1:.*]] = load i16, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_0]]
385; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 22
386; CHECK-NEXT: %[[test4_r2:.*]] = load i8, i8* %[[gep]], {{.*}}, !tbaa [[TAG_0]]
387; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 23
388; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
389; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_0]]
390; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 30
391; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
392; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 10, {{.*}}), !tbaa [[TAG_0]]
393; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 40
394; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
395; CHECK-NEXT: %[[test4_r3:.*]] = load i16, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_0]]
396; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
397; CHECK-NEXT: %[[test4_r4:.*]] = load i8, i8* %[[gep]], {{.*}}, !tbaa [[TAG_0]]
398; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
399; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
400; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_0]]
401; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 50
402; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
403; CHECK-NEXT: %[[test4_r5:.*]] = load i16, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_0]]
404; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 52
405; CHECK-NEXT: %[[test4_r6:.*]] = load i8, i8* %[[gep]], {{.*}}, !tbaa [[TAG_0]]
406; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 53
407; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
408; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_0]]
409; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 60
410; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
411; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 40, {{.*}}), !tbaa [[TAG_0]]
412
413  %a.src.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 20
414  %a.dst.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 40
415  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i1 false), !tbaa !3
416; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
417; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
418; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_3]]
419
420  ; Clobber a single element of the array, this should be promotable, and be deleted.
421  %c = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 42
422  store i8 0, i8* %c
423
424  %a.src.2 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 50
425  call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i1 false), !tbaa !5
426; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
427; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
428; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_5]]
429
430  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i1 false), !tbaa !7
431; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
432; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dst, i8* align 1 %[[gep]], i32 20, {{.*}}), !tbaa [[TAG_7]]
433; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 20
434; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
435; CHECK-NEXT: store i16 %[[test4_r1]], i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_7]]
436; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 22
437; CHECK-NEXT: store i8 %[[test4_r2]], i8* %[[gep]], {{.*}}, !tbaa [[TAG_7]]
438; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 23
439; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
440; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_7]]
441; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 30
442; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
443; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 10, {{.*}}), !tbaa [[TAG_7]]
444; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 40
445; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
446; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_7]]
447; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
448; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]], {{.*}}, !tbaa [[TAG_7]]
449; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
450; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
451; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_7]]
452; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 50
453; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
454; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_7]]
455; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 52
456; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]], {{.*}}, !tbaa [[TAG_7]]
457; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 53
458; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
459; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 7, {{.*}}), !tbaa [[TAG_7]]
460; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 60
461; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
462; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[gep_dst]], i8* align 1 %[[gep_src]], i32 40, {{.*}}), !tbaa [[TAG_7]]
463
464  ret void
465}
466
467declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
468declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i1) nounwind
469declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
470declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
471
472define i16 @test5() {
473; CHECK-LABEL: @test5(
474; CHECK-NOT: alloca float
475; CHECK:      %[[cast:.*]] = bitcast float 0.0{{.*}} to i32
476; CHECK-NEXT: %[[shr:.*]] = lshr i32 %[[cast]], 16
477; CHECK-NEXT: %[[trunc:.*]] = trunc i32 %[[shr]] to i16
478; CHECK-NEXT: ret i16 %[[trunc]]
479
480entry:
481  %a = alloca [4 x i8]
482  %fptr = bitcast [4 x i8]* %a to float*
483  store float 0.0, float* %fptr
484  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
485  %iptr = bitcast i8* %ptr to i16*
486  %val = load i16, i16* %iptr
487  ret i16 %val
488}
489
490define i16 @test5_multi_addrspace_access() {
491; CHECK-LABEL: @test5_multi_addrspace_access(
492; CHECK-NOT: alloca float
493; CHECK:      %[[cast:.*]] = bitcast float 0.0{{.*}} to i32
494; CHECK-NEXT: %[[shr:.*]] = lshr i32 %[[cast]], 16
495; CHECK-NEXT: %[[trunc:.*]] = trunc i32 %[[shr]] to i16
496; CHECK-NEXT: ret i16 %[[trunc]]
497
498entry:
499  %a = alloca [4 x i8]
500  %fptr = bitcast [4 x i8]* %a to float*
501  %fptr.as1 = addrspacecast float* %fptr to float addrspace(1)*
502  store float 0.0, float addrspace(1)* %fptr.as1
503  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
504  %iptr = bitcast i8* %ptr to i16*
505  %val = load i16, i16* %iptr
506  ret i16 %val
507}
508
509define i32 @test6() {
510; CHECK-LABEL: @test6(
511; CHECK: alloca i32
512; CHECK-NEXT: store volatile i32
513; CHECK-NEXT: load i32, i32*
514; CHECK-NEXT: ret i32
515
516entry:
517  %a = alloca [4 x i8]
518  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
519  call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i1 true)
520  %iptr = bitcast i8* %ptr to i32*
521  %val = load i32, i32* %iptr
522  ret i32 %val
523}
524
525define void @test7(i8* %src, i8* %dst) {
526; CHECK-LABEL: @test7(
527; CHECK: alloca i32
528; CHECK-NEXT: bitcast i8* %src to i32*
529; CHECK-NEXT: load volatile i32, {{.*}}, !tbaa [[TAG_0]]
530; CHECK-NEXT: store volatile i32 {{.*}}, !tbaa [[TAG_0]]
531; CHECK-NEXT: bitcast i8* %dst to i32*
532; CHECK-NEXT: load volatile i32, {{.*}}, !tbaa [[TAG_3]]
533; CHECK-NEXT: store volatile i32 {{.*}}, !tbaa [[TAG_3]]
534; CHECK-NEXT: ret
535
536entry:
537  %a = alloca [4 x i8]
538  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
539  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 true), !tbaa !0
540  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 true), !tbaa !3
541  ret void
542}
543
544
545%S1 = type { i32, i32, [16 x i8] }
546%S2 = type { %S1*, %S2* }
547
548define %S2 @test8(%S2* %s2) {
549; CHECK-LABEL: @test8(
550entry:
551  %new = alloca %S2
552; CHECK-NOT: alloca
553
554  %s2.next.ptr = getelementptr %S2, %S2* %s2, i64 0, i32 1
555  %s2.next = load %S2*, %S2** %s2.next.ptr, !tbaa !0
556; CHECK:      %[[gep:.*]] = getelementptr %S2, %S2* %s2, i64 0, i32 1
557; CHECK-NEXT: %[[next:.*]] = load %S2*, %S2** %[[gep]], align 8, !tbaa [[TAG_0]]
558
559  %s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
560  %s2.next.s1 = load %S1*, %S1** %s2.next.s1.ptr, !tbaa !3
561  %new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
562  store %S1* %s2.next.s1, %S1** %new.s1.ptr, !tbaa !5
563  %s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
564  %s2.next.next = load %S2*, %S2** %s2.next.next.ptr, !tbaa !7
565  %new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
566  store %S2* %s2.next.next, %S2** %new.next.ptr, !tbaa !9
567; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 0
568; CHECK-NEXT: %[[next_s1:.*]] = load %S1*, %S1** %[[gep]], align 8, !tbaa [[TAG_3]]
569; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 1
570; CHECK-NEXT: %[[next_next:.*]] = load %S2*, %S2** %[[gep]], align 8, !tbaa [[TAG_7]]
571
572  %new.s1 = load %S1*, %S1** %new.s1.ptr
573  %result1 = insertvalue %S2 undef, %S1* %new.s1, 0
574; CHECK-NEXT: %[[result1:.*]] = insertvalue %S2 undef, %S1* %[[next_s1]], 0
575  %new.next = load %S2*, %S2** %new.next.ptr
576  %result2 = insertvalue %S2 %result1, %S2* %new.next, 1
577; CHECK-NEXT: %[[result2:.*]] = insertvalue %S2 %[[result1]], %S2* %[[next_next]], 1
578  ret %S2 %result2
579; CHECK-NEXT: ret %S2 %[[result2]]
580}
581
582define i64 @test9() {
583; Ensure we can handle loads off the end of an alloca even when wrapped in
584; weird bit casts and types. This is valid IR due to the alignment and masking
585; off the bits past the end of the alloca.
586;
587; CHECK-LABEL: @test9(
588; CHECK-NOT: alloca
589; CHECK:      %[[b2:.*]] = zext i8 26 to i64
590; CHECK-NEXT: %[[s2:.*]] = shl i64 %[[b2]], 16
591; CHECK-NEXT: %[[m2:.*]] = and i64 undef, -16711681
592; CHECK-NEXT: %[[i2:.*]] = or i64 %[[m2]], %[[s2]]
593; CHECK-NEXT: %[[b1:.*]] = zext i8 0 to i64
594; CHECK-NEXT: %[[s1:.*]] = shl i64 %[[b1]], 8
595; CHECK-NEXT: %[[m1:.*]] = and i64 %[[i2]], -65281
596; CHECK-NEXT: %[[i1:.*]] = or i64 %[[m1]], %[[s1]]
597; CHECK-NEXT: %[[b0:.*]] = zext i8 0 to i64
598; CHECK-NEXT: %[[m0:.*]] = and i64 %[[i1]], -256
599; CHECK-NEXT: %[[i0:.*]] = or i64 %[[m0]], %[[b0]]
600; CHECK-NEXT: %[[result:.*]] = and i64 %[[i0]], 16777215
601; CHECK-NEXT: ret i64 %[[result]]
602
603entry:
604  %a = alloca { [3 x i8] }, align 8
605  %gep1 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 0
606  store i8 0, i8* %gep1, align 1
607  %gep2 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 1
608  store i8 0, i8* %gep2, align 1
609  %gep3 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 2
610  store i8 26, i8* %gep3, align 1
611  %cast = bitcast { [3 x i8] }* %a to { i64 }*
612  %elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
613  %load = load i64, i64* %elt
614  %result = and i64 %load, 16777215
615  ret i64 %result
616}
617
618define %S2* @test10() {
619; CHECK-LABEL: @test10(
620; CHECK-NOT: alloca %S2*
621; CHECK: ret %S2* null
622
623entry:
624  %a = alloca [8 x i8]
625  %ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
626  call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i1 false)
627  %s2ptrptr = bitcast i8* %ptr to %S2**
628  %s2ptr = load %S2*, %S2** %s2ptrptr
629  ret %S2* %s2ptr
630}
631
632define i32 @test11() {
633; CHECK-LABEL: @test11(
634; CHECK-NOT: alloca
635; CHECK: ret i32 0
636
637entry:
638  %X = alloca i32
639  br i1 undef, label %good, label %bad
640
641good:
642  %Y = getelementptr i32, i32* %X, i64 0
643  store i32 0, i32* %Y
644  %Z = load i32, i32* %Y
645  ret i32 %Z
646
647bad:
648  %Y2 = getelementptr i32, i32* %X, i64 1
649  store i32 0, i32* %Y2
650  %Z2 = load i32, i32* %Y2
651  ret i32 %Z2
652}
653
654define i8 @test12() {
655; We fully promote these to the i24 load or store size, resulting in just masks
656; and other operations that instcombine will fold, but no alloca.
657;
658; CHECK-LABEL: @test12(
659
660entry:
661  %a = alloca [3 x i8]
662  %b = alloca [3 x i8]
663; CHECK-NOT: alloca
664
665  %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
666  store i8 0, i8* %a0ptr
667  %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
668  store i8 0, i8* %a1ptr
669  %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
670  store i8 0, i8* %a2ptr
671  %aiptr = bitcast [3 x i8]* %a to i24*
672  %ai = load i24, i24* %aiptr
673; CHECK-NOT: store
674; CHECK-NOT: load
675; CHECK:      %[[ext2:.*]] = zext i8 0 to i24
676; CHECK-NEXT: %[[shift2:.*]] = shl i24 %[[ext2]], 16
677; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, 65535
678; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[shift2]]
679; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
680; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
681; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
682; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
683; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
684; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], -256
685; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[ext0]]
686
687  %biptr = bitcast [3 x i8]* %b to i24*
688  store i24 %ai, i24* %biptr
689  %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
690  %b0 = load i8, i8* %b0ptr
691  %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
692  %b1 = load i8, i8* %b1ptr
693  %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
694  %b2 = load i8, i8* %b2ptr
695; CHECK-NOT: store
696; CHECK-NOT: load
697; CHECK:      %[[trunc0:.*]] = trunc i24 %[[insert0]] to i8
698; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
699; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
700; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[insert0]], 16
701; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[shift2]] to i8
702
703  %bsum0 = add i8 %b0, %b1
704  %bsum1 = add i8 %bsum0, %b2
705  ret i8 %bsum1
706; CHECK:      %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]]
707; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]]
708; CHECK-NEXT: ret i8 %[[sum1]]
709}
710
711define i32 @test13() {
712; Ensure we don't crash and handle undefined loads that straddle the end of the
713; allocation.
714; CHECK-LABEL: @test13(
715; CHECK:      %[[value:.*]] = zext i8 0 to i16
716; CHECK-NEXT: %[[ret:.*]] = zext i16 %[[value]] to i32
717; CHECK-NEXT: ret i32 %[[ret]]
718
719entry:
720  %a = alloca [3 x i8], align 2
721  %b0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
722  store i8 0, i8* %b0ptr
723  %b1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
724  store i8 0, i8* %b1ptr
725  %b2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
726  store i8 0, i8* %b2ptr
727  %iptrcast = bitcast [3 x i8]* %a to i16*
728  %iptrgep = getelementptr i16, i16* %iptrcast, i64 1
729  %i = load i16, i16* %iptrgep
730  %ret = zext i16 %i to i32
731  ret i32 %ret
732}
733
734%test14.struct = type { [3 x i32] }
735
736define void @test14(...) nounwind uwtable {
737; This is a strange case where we split allocas into promotable partitions, but
738; also gain enough data to prove they must be dead allocas due to GEPs that walk
739; across two adjacent allocas. Test that we don't try to promote or otherwise
740; do bad things to these dead allocas, they should just be removed.
741; CHECK-LABEL: @test14(
742; CHECK-NEXT: entry:
743; CHECK-NEXT: ret void
744
745entry:
746  %a = alloca %test14.struct
747  %p = alloca %test14.struct*
748  %0 = bitcast %test14.struct* %a to i8*
749  %1 = getelementptr i8, i8* %0, i64 12
750  %2 = bitcast i8* %1 to %test14.struct*
751  %3 = getelementptr inbounds %test14.struct, %test14.struct* %2, i32 0, i32 0
752  %4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
753  %5 = bitcast [3 x i32]* %3 to i32*
754  %6 = bitcast [3 x i32]* %4 to i32*
755  %7 = load i32, i32* %6, align 4
756  store i32 %7, i32* %5, align 4
757  %8 = getelementptr inbounds i32, i32* %5, i32 1
758  %9 = getelementptr inbounds i32, i32* %6, i32 1
759  %10 = load i32, i32* %9, align 4
760  store i32 %10, i32* %8, align 4
761  %11 = getelementptr inbounds i32, i32* %5, i32 2
762  %12 = getelementptr inbounds i32, i32* %6, i32 2
763  %13 = load i32, i32* %12, align 4
764  store i32 %13, i32* %11, align 4
765  ret void
766}
767
768define i32 @test15(i1 %flag) nounwind uwtable {
769; Ensure that when there are dead instructions using an alloca that are not
770; loads or stores we still delete them during partitioning and rewriting.
771; Otherwise we'll go to promote them while thy still have unpromotable uses.
772; CHECK-LABEL: @test15(
773; CHECK-NEXT: entry:
774; CHECK-NEXT:   br label %loop
775; CHECK:      loop:
776; CHECK-NEXT:   br label %loop
777
778entry:
779  %l0 = alloca i64
780  %l1 = alloca i64
781  %l2 = alloca i64
782  %l3 = alloca i64
783  br label %loop
784
785loop:
786  %dead3 = phi i8* [ %gep3, %loop ], [ null, %entry ]
787
788  store i64 1879048192, i64* %l0, align 8
789  %bc0 = bitcast i64* %l0 to i8*
790  %gep0 = getelementptr i8, i8* %bc0, i64 3
791  %dead0 = bitcast i8* %gep0 to i64*
792
793  store i64 1879048192, i64* %l1, align 8
794  %bc1 = bitcast i64* %l1 to i8*
795  %gep1 = getelementptr i8, i8* %bc1, i64 3
796  %dead1 = getelementptr i8, i8* %gep1, i64 1
797
798  store i64 1879048192, i64* %l2, align 8
799  %bc2 = bitcast i64* %l2 to i8*
800  %gep2.1 = getelementptr i8, i8* %bc2, i64 1
801  %gep2.2 = getelementptr i8, i8* %bc2, i64 3
802  ; Note that this select should get visited multiple times due to using two
803  ; different GEPs off the same alloca. We should only delete it once.
804  %dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
805
806  store i64 1879048192, i64* %l3, align 8
807  %bc3 = bitcast i64* %l3 to i8*
808  %gep3 = getelementptr i8, i8* %bc3, i64 3
809
810  br label %loop
811}
812
813define void @test16(i8* %src, i8* %dst) {
814; Ensure that we can promote an alloca of [3 x i8] to an i24 SSA value.
815; CHECK-LABEL: @test16(
816; CHECK-NOT: alloca
817; CHECK:      %[[srccast:.*]] = bitcast i8* %src to i24*
818; CHECK-NEXT: load i24, i24* %[[srccast]], {{.*}}, !tbaa [[TAG_0]]
819; CHECK-NEXT: %[[dstcast:.*]] = bitcast i8* %dst to i24*
820; CHECK-NEXT: store i24 0, i24* %[[dstcast]], {{.*}}, !tbaa [[TAG_5]]
821; CHECK-NEXT: ret void
822
823entry:
824  %a = alloca [3 x i8]
825  %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
826  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 false), !tbaa !0
827  %cast = bitcast i8* %ptr to i24*
828  store i24 0, i24* %cast, !tbaa !3
829  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 false), !tbaa !5
830  ret void
831}
832
833define void @test17(i8* %src, i8* %dst) {
834; Ensure that we can rewrite unpromotable memcpys which extend past the end of
835; the alloca.
836; CHECK-LABEL: @test17(
837; CHECK:      %[[a:.*]] = alloca [3 x i8]
838; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8], [3 x i8]* %[[a]], i32 0, i32 0
839; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[ptr]], i8* %src, {{.*}}), !tbaa [[TAG_0]]
840; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[ptr]], {{.*}}), !tbaa [[TAG_3]]
841; CHECK-NEXT: ret void
842
843entry:
844  %a = alloca [3 x i8]
845  %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
846  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 true), !tbaa !0
847  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 true), !tbaa !3
848  ret void
849}
850
851define void @test18(i8* %src, i8* %dst, i32 %size) {
852; Preserve transfer instrinsics with a variable size, even if they overlap with
853; fixed size operations. Further, continue to split and promote allocas preceding
854; the variable sized intrinsic.
855; CHECK-LABEL: @test18(
856; CHECK:      %[[a:.*]] = alloca [34 x i8]
857; CHECK:      %[[srcgep1:.*]] = getelementptr inbounds i8, i8* %src, i64 4
858; CHECK-NEXT: %[[srccast1:.*]] = bitcast i8* %[[srcgep1]] to i32*
859; CHECK-NEXT: %[[srcload:.*]] = load i32, i32* %[[srccast1]], {{.*}}, !tbaa [[TAG_0]]
860; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
861; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %[[agep1]], i8* %src, i32 %size, {{.*}}), !tbaa [[TAG_3]]
862; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
863; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 %[[agep2]], i8 42, i32 %size, {{.*}}), !tbaa [[TAG_5]]
864; CHECK-NEXT: %[[dstcast1:.*]] = bitcast i8* %dst to i32*
865; CHECK-NEXT: store i32 42, i32* %[[dstcast1]], {{.*}}, !tbaa [[TAG_9]]
866; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8, i8* %dst, i64 4
867; CHECK-NEXT: %[[dstcast2:.*]] = bitcast i8* %[[dstgep1]] to i32*
868; CHECK-NEXT: store i32 %[[srcload]], i32* %[[dstcast2]], {{.*}}, !tbaa [[TAG_9]]
869; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
870; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* align 1 %[[agep3]], i32 %size, {{.*}}), !tbaa [[TAG_11]]
871; CHECK-NEXT: ret void
872
873entry:
874  %a = alloca [42 x i8]
875  %ptr = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 0
876  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i1 false), !tbaa !0
877  %ptr2 = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 8
878  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i1 false), !tbaa !3
879  call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i1 false), !tbaa !5
880  %cast = bitcast i8* %ptr to i32*
881  store i32 42, i32* %cast, !tbaa !7
882  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i1 false), !tbaa !9
883  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr2, i32 %size, i1 false), !tbaa !11
884  ret void
885}
886
887%opaque = type opaque
888
889define i32 @test19(%opaque* %x) {
890; This input will cause us to try to compute a natural GEP when rewriting
891; pointers in such a way that we try to GEP through the opaque type. Previously,
892; a check for an unsized type was missing and this crashed. Ensure it behaves
893; reasonably now.
894; CHECK-LABEL: @test19(
895; CHECK-NOT: alloca
896; CHECK: ret i32 undef
897
898entry:
899  %a = alloca { i64, i8* }
900  %cast1 = bitcast %opaque* %x to i8*
901  %cast2 = bitcast { i64, i8* }* %a to i8*
902  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i1 false)
903  %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
904  %val = load i64, i64* %gep
905  ret i32 undef
906}
907
908declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i32, i1) nounwind
909
910define i32 @test19_addrspacecast(%opaque* %x) {
911; This input will cause us to try to compute a natural GEP when rewriting
912; pointers in such a way that we try to GEP through the opaque type. Previously,
913; a check for an unsized type was missing and this crashed. Ensure it behaves
914; reasonably now.
915; CHECK-LABEL: @test19_addrspacecast(
916; CHECK-NOT: alloca
917; CHECK: ret i32 undef
918
919entry:
920  %a = alloca { i64, i8* }
921  %cast1 = addrspacecast %opaque* %x to i8 addrspace(1)*
922  %cast2 = bitcast { i64, i8* }* %a to i8*
923  call void @llvm.memcpy.p0i8.p1i8.i32(i8* %cast2, i8 addrspace(1)* %cast1, i32 16, i32 1, i1 false)
924  %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
925  %val = load i64, i64* %gep
926  ret i32 undef
927}
928
929define i32 @test20() {
930; Ensure we can track negative offsets (before the beginning of the alloca) and
931; negative relative offsets from offsets starting past the end of the alloca.
932; CHECK-LABEL: @test20(
933; CHECK-NOT: alloca
934; CHECK: %[[sum1:.*]] = add i32 1, 2
935; CHECK: %[[sum2:.*]] = add i32 %[[sum1]], 3
936; CHECK: ret i32 %[[sum2]]
937
938entry:
939  %a = alloca [3 x i32]
940  %gep1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 0
941  store i32 1, i32* %gep1
942  %gep2.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 -2
943  %gep2.2 = getelementptr i32, i32* %gep2.1, i32 3
944  store i32 2, i32* %gep2.2
945  %gep3.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 14
946  %gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
947  store i32 3, i32* %gep3.2
948
949  %load1 = load i32, i32* %gep1
950  %load2 = load i32, i32* %gep2.2
951  %load3 = load i32, i32* %gep3.2
952  %sum1 = add i32 %load1, %load2
953  %sum2 = add i32 %sum1, %load3
954  ret i32 %sum2
955}
956
957declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
958
959define i8 @test21() {
960; Test allocations and offsets which border on overflow of the int64_t used
961; internally. This is really awkward to really test as LLVM doesn't really
962; support such extreme constructs cleanly.
963; CHECK-LABEL: @test21(
964; CHECK-NOT: alloca
965; CHECK: or i8 -1, -1
966
967entry:
968  %a = alloca [2305843009213693951 x i8]
969  %gep0 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
970  store i8 255, i8* %gep0
971  %gep1 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
972  %gep2 = getelementptr i8, i8* %gep1, i64 -1
973  call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i1 false)
974  %gep3 = getelementptr i8, i8* %gep1, i64 9223372036854775807
975  %gep4 = getelementptr i8, i8* %gep3, i64 9223372036854775807
976  %gep5 = getelementptr i8, i8* %gep4, i64 -6917529027641081857
977  store i8 255, i8* %gep5
978  %cast1 = bitcast i8* %gep4 to i32*
979  store i32 0, i32* %cast1
980  %load = load i8, i8* %gep0
981  %gep6 = getelementptr i8, i8* %gep0, i32 1
982  %load2 = load i8, i8* %gep6
983  %result = or i8 %load, %load2
984  ret i8 %result
985}
986
987%PR13916.struct = type { i8 }
988
989define void @PR13916.1() {
990; Ensure that we handle overlapping memcpy intrinsics correctly, especially in
991; the case where there is a directly identical value for both source and dest.
992; CHECK: @PR13916.1
993; CHECK-NOT: alloca
994; CHECK: ret void
995
996entry:
997  %a = alloca i8
998  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 1, i1 false)
999  %tmp2 = load i8, i8* %a
1000  ret void
1001}
1002
1003define void @PR13916.2() {
1004; Check whether we continue to handle them correctly when they start off with
1005; different pointer value chains, but during rewriting we coalesce them into the
1006; same value.
1007; CHECK: @PR13916.2
1008; CHECK-NOT: alloca
1009; CHECK: ret void
1010
1011entry:
1012  %a = alloca %PR13916.struct, align 1
1013  br i1 undef, label %if.then, label %if.end
1014
1015if.then:
1016  %tmp0 = bitcast %PR13916.struct* %a to i8*
1017  %tmp1 = bitcast %PR13916.struct* %a to i8*
1018  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp0, i8* %tmp1, i32 1, i1 false)
1019  br label %if.end
1020
1021if.end:
1022  %gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
1023  %tmp2 = load i8, i8* %gep
1024  ret void
1025}
1026
1027define void @PR13990() {
1028; Ensure we can handle cases where processing one alloca causes the other
1029; alloca to become dead and get deleted. This might crash or fail under
1030; Valgrind if we regress.
1031; CHECK-LABEL: @PR13990(
1032; CHECK-NOT: alloca
1033; CHECK: unreachable
1034; CHECK: unreachable
1035
1036entry:
1037  %tmp1 = alloca i8*
1038  %tmp2 = alloca i8*
1039  br i1 undef, label %bb1, label %bb2
1040
1041bb1:
1042  store i8* undef, i8** %tmp2
1043  br i1 undef, label %bb2, label %bb3
1044
1045bb2:
1046  %tmp50 = select i1 undef, i8** %tmp2, i8** %tmp1
1047  br i1 undef, label %bb3, label %bb4
1048
1049bb3:
1050  unreachable
1051
1052bb4:
1053  unreachable
1054}
1055
1056define double @PR13969(double %x) {
1057; Check that we detect when promotion will un-escape an alloca and iterate to
1058; re-try running SROA over that alloca. Without that, the two allocas that are
1059; stored into a dead alloca don't get rewritten and promoted.
1060; CHECK-LABEL: @PR13969(
1061
1062entry:
1063  %a = alloca double
1064  %b = alloca double*
1065  %c = alloca double
1066; CHECK-NOT: alloca
1067
1068  store double %x, double* %a
1069  store double* %c, double** %b
1070  store double* %a, double** %b
1071  store double %x, double* %c
1072  %ret = load double, double* %a
1073; CHECK-NOT: store
1074; CHECK-NOT: load
1075
1076  ret double %ret
1077; CHECK: ret double %x
1078}
1079
1080%PR14034.struct = type { { {} }, i32, %PR14034.list }
1081%PR14034.list = type { %PR14034.list*, %PR14034.list* }
1082
1083define void @PR14034() {
1084; This test case tries to form GEPs into the empty leading struct members, and
1085; subsequently crashed (under valgrind) before we fixed the PR. The important
1086; thing is to handle empty structs gracefully.
1087; CHECK-LABEL: @PR14034(
1088
1089entry:
1090  %a = alloca %PR14034.struct
1091  %list = getelementptr %PR14034.struct, %PR14034.struct* %a, i32 0, i32 2
1092  %prev = getelementptr %PR14034.list, %PR14034.list* %list, i32 0, i32 1
1093  store %PR14034.list* undef, %PR14034.list** %prev
1094  %cast0 = bitcast %PR14034.struct* undef to i8*
1095  %cast1 = bitcast %PR14034.struct* %a to i8*
1096  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast0, i8* %cast1, i32 12, i1 false)
1097  ret void
1098}
1099
1100define i32 @test22(i32 %x) {
1101; Test that SROA and promotion is not confused by a grab bax mixture of pointer
1102; types involving wrapper aggregates and zero-length aggregate members.
1103; CHECK-LABEL: @test22(
1104
1105entry:
1106  %a1 = alloca { { [1 x { i32 }] } }
1107  %a2 = alloca { {}, { float }, [0 x i8] }
1108  %a3 = alloca { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }
1109; CHECK-NOT: alloca
1110
1111  %wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
1112  %gep1 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
1113  store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
1114
1115  %gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
1116  %ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
1117  %load1 = load { [1 x { float }] }, { [1 x { float }] }* %ptrcast1
1118  %unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
1119
1120  %wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
1121  store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
1122
1123  %gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
1124  %ptrcast2 = bitcast float* %gep3 to <4 x i8>*
1125  %load3 = load <4 x i8>, <4 x i8>* %ptrcast2
1126  %valcast1 = bitcast <4 x i8> %load3 to i32
1127
1128  %wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
1129  %wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
1130  %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
1131  %ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
1132  store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
1133
1134  %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
1135  %ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
1136  %load4 = load { {}, float, {} }, { {}, float, {} }* %ptrcast4
1137  %unwrap2 = extractvalue { {}, float, {} } %load4, 1
1138  %valcast2 = bitcast float %unwrap2 to i32
1139
1140  ret i32 %valcast2
1141; CHECK: ret i32
1142}
1143
1144define void @PR14059.1(double* %d) {
1145; In PR14059 a peculiar construct was identified as something that is used
1146; pervasively in ARM's ABI-calling-convention lowering: the passing of a struct
1147; of doubles via an array of i32 in order to place the data into integer
1148; registers. This in turn was missed as an optimization by SROA due to the
1149; partial loads and stores of integers to the double alloca we were trying to
1150; form and promote. The solution is to widen the integer operations to be
1151; whole-alloca operations, and perform the appropriate bitcasting on the
1152; *values* rather than the pointers. When this works, partial reads and writes
1153; via integers can be promoted away.
1154; CHECK: @PR14059.1
1155; CHECK-NOT: alloca
1156; CHECK: ret void
1157
1158entry:
1159  %X.sroa.0.i = alloca double, align 8
1160  %0 = bitcast double* %X.sroa.0.i to i8*
1161  call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
1162
1163  ; Store to the low 32-bits...
1164  %X.sroa.0.0.cast2.i = bitcast double* %X.sroa.0.i to i32*
1165  store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
1166
1167  ; Also use a memset to the middle 32-bits for fun.
1168  %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8, i8* %0, i32 2
1169  call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i1 false)
1170
1171  ; Or a memset of the whole thing.
1172  call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i1 false)
1173
1174  ; Write to the high 32-bits with a memcpy.
1175  %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8, i8* %0, i32 4
1176  %d.raw = bitcast double* %d to i8*
1177  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i1 false)
1178
1179  ; Store to the high 32-bits...
1180  %X.sroa.0.4.cast5.i = bitcast i8* %X.sroa.0.4.raw_idx4.i to i32*
1181  store i32 1072693248, i32* %X.sroa.0.4.cast5.i, align 4
1182
1183  ; Do the actual math...
1184  %X.sroa.0.0.load1.i = load double, double* %X.sroa.0.i, align 8
1185  %accum.real.i = load double, double* %d, align 8
1186  %add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
1187  store double %add.r.i, double* %d, align 8
1188  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
1189  ret void
1190}
1191
1192define i64 @PR14059.2({ float, float }* %phi) {
1193; Check that SROA can split up alloca-wide integer loads and stores where the
1194; underlying alloca has smaller components that are accessed independently. This
1195; shows up particularly with ABI lowering patterns coming out of Clang that rely
1196; on the particular register placement of a single large integer return value.
1197; CHECK: @PR14059.2
1198
1199entry:
1200  %retval = alloca { float, float }, align 4
1201  ; CHECK-NOT: alloca
1202
1203  %0 = bitcast { float, float }* %retval to i64*
1204  store i64 0, i64* %0
1205  ; CHECK-NOT: store
1206
1207  %phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
1208  %phi.real = load float, float* %phi.realp
1209  %phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
1210  %phi.imag = load float, float* %phi.imagp
1211  ; CHECK:      %[[realp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
1212  ; CHECK-NEXT: %[[real:.*]] = load float, float* %[[realp]]
1213  ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
1214  ; CHECK-NEXT: %[[imag:.*]] = load float, float* %[[imagp]]
1215
1216  %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
1217  %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
1218  store float %phi.real, float* %real
1219  store float %phi.imag, float* %imag
1220  ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
1221  ; CHECK-NEXT: %[[imag_convert:.*]] = bitcast float %[[imag]] to i32
1222  ; CHECK-NEXT: %[[imag_ext:.*]] = zext i32 %[[imag_convert]] to i64
1223  ; CHECK-NEXT: %[[imag_shift:.*]] = shl i64 %[[imag_ext]], 32
1224  ; CHECK-NEXT: %[[imag_mask:.*]] = and i64 undef, 4294967295
1225  ; CHECK-NEXT: %[[imag_insert:.*]] = or i64 %[[imag_mask]], %[[imag_shift]]
1226  ; CHECK-NEXT: %[[real_ext:.*]] = zext i32 %[[real_convert]] to i64
1227  ; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296
1228  ; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]]
1229
1230  %1 = load i64, i64* %0, align 1
1231  ret i64 %1
1232  ; CHECK-NEXT: ret i64 %[[real_insert]]
1233}
1234
1235define void @PR14105({ [16 x i8] }* %ptr) {
1236; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is
1237; sign as negative. We use a volatile memcpy to ensure promotion never actually
1238; occurs.
1239; CHECK-LABEL: @PR14105(
1240
1241entry:
1242  %a = alloca { [16 x i8] }, align 8
1243; CHECK: alloca [16 x i8], align 8
1244
1245  %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1
1246; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
1247
1248  %cast1 = bitcast { [16 x i8 ] }* %gep to i8*
1249  %cast2 = bitcast { [16 x i8 ] }* %a to i8*
1250  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
1251  ret void
1252; CHECK: ret
1253}
1254
1255define void @PR14105_as1({ [16 x i8] } addrspace(1)* %ptr) {
1256; Make sure this the right address space pointer is used for type check.
1257; CHECK-LABEL: @PR14105_as1(
1258; CHECK: alloca { [16 x i8] }, align 8
1259; CHECK-NEXT: %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
1260; CHECK-NEXT: %cast1 = bitcast { [16 x i8] } addrspace(1)* %gep to i8 addrspace(1)*
1261; CHECK-NEXT: %cast2 = bitcast { [16 x i8] }* %a to i8*
1262; CHECK-NEXT: call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
1263
1264entry:
1265  %a = alloca { [16 x i8] }, align 8
1266  %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
1267  %cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
1268  %cast2 = bitcast { [16 x i8 ] }* %a to i8*
1269  call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
1270  ret void
1271; CHECK: ret
1272}
1273
1274define void @PR14465() {
1275; Ensure that we don't crash when analyzing a alloca larger than the maximum
1276; integer type width (MAX_INT_BITS) supported by llvm (1048576*32 > (1<<23)-1).
1277; CHECK-LABEL: @PR14465(
1278
1279  %stack = alloca [1048576 x i32], align 16
1280; CHECK: alloca [1048576 x i32]
1281  %cast = bitcast [1048576 x i32]* %stack to i8*
1282  call void @llvm.memset.p0i8.i64(i8* align 16 %cast, i8 -2, i64 4194304, i1 false)
1283  ret void
1284; CHECK: ret
1285}
1286
1287define void @PR14548(i1 %x) {
1288; Handle a mixture of i1 and i8 loads and stores to allocas. This particular
1289; pattern caused crashes and invalid output in the PR, and its nature will
1290; trigger a mixture in several permutations as we resolve each alloca
1291; iteratively.
1292; Note that we don't do a particularly good *job* of handling these mixtures,
1293; but the hope is that this is very rare.
1294; CHECK-LABEL: @PR14548(
1295
1296entry:
1297  %a = alloca <{ i1 }>, align 8
1298  %b = alloca <{ i1 }>, align 8
1299; CHECK:      %[[a:.*]] = alloca i8, align 8
1300; CHECK-NEXT: %[[b:.*]] = alloca i8, align 8
1301
1302  %b.i1 = bitcast <{ i1 }>* %b to i1*
1303  store i1 %x, i1* %b.i1, align 8
1304  %b.i8 = bitcast <{ i1 }>* %b to i8*
1305  %foo = load i8, i8* %b.i8, align 1
1306; CHECK-NEXT: %[[b_cast:.*]] = bitcast i8* %[[b]] to i1*
1307; CHECK-NEXT: store i1 %x, i1* %[[b_cast]], align 8
1308; CHECK-NEXT: {{.*}} = load i8, i8* %[[b]], align 8
1309
1310  %a.i8 = bitcast <{ i1 }>* %a to i8*
1311  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i1 false) nounwind
1312  %bar = load i8, i8* %a.i8, align 1
1313  %a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
1314  %baz = load i1, i1* %a.i1, align 1
1315; CHECK-NEXT: %[[copy:.*]] = load i8, i8* %[[b]], align 8
1316; CHECK-NEXT: store i8 %[[copy]], i8* %[[a]], align 8
1317; CHECK-NEXT: {{.*}} = load i8, i8* %[[a]], align 8
1318; CHECK-NEXT: %[[a_cast:.*]] = bitcast i8* %[[a]] to i1*
1319; CHECK-NEXT: {{.*}} = load i1, i1* %[[a_cast]], align 8
1320
1321  ret void
1322}
1323
1324define <3 x i8> @PR14572.1(i32 %x) {
1325; Ensure that a split integer store which is wider than the type size of the
1326; alloca (relying on the alloc size padding) doesn't trigger an assert.
1327; CHECK: @PR14572.1
1328
1329entry:
1330  %a = alloca <3 x i8>, align 4
1331; CHECK-NOT: alloca
1332
1333  %cast = bitcast <3 x i8>* %a to i32*
1334  store i32 %x, i32* %cast, align 1
1335  %y = load <3 x i8>, <3 x i8>* %a, align 4
1336  ret <3 x i8> %y
1337; CHECK: ret <3 x i8>
1338}
1339
1340define i32 @PR14572.2(<3 x i8> %x) {
1341; Ensure that a split integer load which is wider than the type size of the
1342; alloca (relying on the alloc size padding) doesn't trigger an assert.
1343; CHECK: @PR14572.2
1344
1345entry:
1346  %a = alloca <3 x i8>, align 4
1347; CHECK-NOT: alloca
1348
1349  store <3 x i8> %x, <3 x i8>* %a, align 1
1350  %cast = bitcast <3 x i8>* %a to i32*
1351  %y = load i32, i32* %cast, align 4
1352  ret i32 %y
1353; CHECK: ret i32
1354}
1355
1356define i32 @PR14601(i32 %x) {
1357; Don't try to form a promotable integer alloca when there is a variable length
1358; memory intrinsic.
1359; CHECK-LABEL: @PR14601(
1360
1361entry:
1362  %a = alloca i32
1363; CHECK: alloca
1364
1365  %a.i8 = bitcast i32* %a to i8*
1366  call void @llvm.memset.p0i8.i32(i8* %a.i8, i8 0, i32 %x, i1 false)
1367  %v = load i32, i32* %a
1368  ret i32 %v
1369}
1370
1371define void @PR15674(i8* %data, i8* %src, i32 %size) {
1372; Arrange (via control flow) to have unmerged stores of a particular width to
1373; an alloca where we incrementally store from the end of the array toward the
1374; beginning of the array. Ensure that the final integer store, despite being
1375; convertable to the integer type that we end up promoting this alloca toward,
1376; doesn't get widened to a full alloca store.
1377; CHECK-LABEL: @PR15674(
1378
1379entry:
1380  %tmp = alloca [4 x i8], align 1
1381; CHECK: alloca i32
1382
1383  switch i32 %size, label %end [
1384    i32 4, label %bb4
1385    i32 3, label %bb3
1386    i32 2, label %bb2
1387    i32 1, label %bb1
1388  ]
1389
1390bb4:
1391  %src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
1392  %src.3 = load i8, i8* %src.gep3
1393  %tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
1394  store i8 %src.3, i8* %tmp.gep3
1395; CHECK: store i8
1396
1397  br label %bb3
1398
1399bb3:
1400  %src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
1401  %src.2 = load i8, i8* %src.gep2
1402  %tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
1403  store i8 %src.2, i8* %tmp.gep2
1404; CHECK: store i8
1405
1406  br label %bb2
1407
1408bb2:
1409  %src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
1410  %src.1 = load i8, i8* %src.gep1
1411  %tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
1412  store i8 %src.1, i8* %tmp.gep1
1413; CHECK: store i8
1414
1415  br label %bb1
1416
1417bb1:
1418  %src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
1419  %src.0 = load i8, i8* %src.gep0
1420  %tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
1421  store i8 %src.0, i8* %tmp.gep0
1422; CHECK: store i8
1423
1424  br label %end
1425
1426end:
1427  %tmp.raw = bitcast [4 x i8]* %tmp to i8*
1428  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %data, i8* %tmp.raw, i32 %size, i1 false)
1429  ret void
1430; CHECK: ret void
1431}
1432
1433define void @PR15805(i1 %a, i1 %b) {
1434; CHECK-LABEL: @PR15805(
1435; CHECK-NOT: alloca
1436; CHECK: ret void
1437
1438  %c = alloca i64, align 8
1439  %p.0.c = select i1 undef, i64* %c, i64* %c
1440  %cond.in = select i1 undef, i64* %p.0.c, i64* %c
1441  %cond = load i64, i64* %cond.in, align 8
1442  ret void
1443}
1444
1445define void @PR15805.1(i1 %a, i1 %b) {
1446; Same as the normal PR15805, but rigged to place the use before the def inside
1447; of looping unreachable code. This helps ensure that we aren't sensitive to the
1448; order in which the uses of the alloca are visited.
1449;
1450; CHECK-LABEL: @PR15805.1(
1451; CHECK-NOT: alloca
1452; CHECK: ret void
1453
1454  %c = alloca i64, align 8
1455  br label %exit
1456
1457loop:
1458  %cond.in = select i1 undef, i64* %c, i64* %p.0.c
1459  %p.0.c = select i1 undef, i64* %c, i64* %c
1460  %cond = load i64, i64* %cond.in, align 8
1461  br i1 undef, label %loop, label %exit
1462
1463exit:
1464  ret void
1465}
1466
1467define void @PR16651.1(i8* %a) {
1468; This test case caused a crash due to the volatile memcpy in combination with
1469; lowering to integer loads and stores of a width other than that of the original
1470; memcpy.
1471;
1472; CHECK-LABEL: @PR16651.1(
1473; CHECK: alloca i16
1474; CHECK: alloca i8
1475; CHECK: alloca i8
1476; CHECK: unreachable
1477
1478entry:
1479  %b = alloca i32, align 4
1480  %b.cast = bitcast i32* %b to i8*
1481  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %b.cast, i8* align 4 %a, i32 4, i1 true)
1482  %b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
1483  load i8, i8* %b.gep, align 2
1484  unreachable
1485}
1486
1487define void @PR16651.2() {
1488; This test case caused a crash due to failing to promote given a select that
1489; can't be speculated. It shouldn't be promoted, but we missed that fact when
1490; analyzing whether we could form a vector promotion because that code didn't
1491; bail on select instructions.
1492;
1493; CHECK-LABEL: @PR16651.2(
1494; CHECK: alloca <2 x float>
1495; CHECK: ret void
1496
1497entry:
1498  %tv1 = alloca { <2 x float>, <2 x float> }, align 8
1499  %0 = getelementptr { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
1500  store <2 x float> undef, <2 x float>* %0, align 8
1501  %1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
1502  %cond105.in.i.i = select i1 undef, float* null, float* %1
1503  %cond105.i.i = load float, float* %cond105.in.i.i, align 8
1504  ret void
1505}
1506
1507define void @test23(i32 %x) {
1508; CHECK-LABEL: @test23(
1509; CHECK-NOT: alloca
1510; CHECK: ret void
1511entry:
1512  %a = alloca i32, align 4
1513  store i32 %x, i32* %a, align 4
1514  %gep1 = getelementptr inbounds i32, i32* %a, i32 1
1515  %gep0 = getelementptr inbounds i32, i32* %a, i32 0
1516  %cast1 = bitcast i32* %gep1 to i8*
1517  %cast0 = bitcast i32* %gep0 to i8*
1518  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i1 false)
1519  ret void
1520}
1521
1522define void @PR18615() {
1523; CHECK-LABEL: @PR18615(
1524; CHECK-NOT: alloca
1525; CHECK: ret void
1526entry:
1527  %f = alloca i8
1528  %gep = getelementptr i8, i8* %f, i64 -1
1529  call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i1 false)
1530  ret void
1531}
1532
1533define void @test24(i8* %src, i8* %dst) {
1534; CHECK-LABEL: @test24(
1535; CHECK: alloca i64, align 16
1536; CHECK: load volatile i64, i64* %{{[^,]*}}, align 1, !tbaa [[TAG_0]]
1537; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 16, !tbaa [[TAG_0]]
1538; CHECK: load volatile i64, i64* %{{[^,]*}}, align 16, !tbaa [[TAG_3]]
1539; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 1, !tbaa [[TAG_3]]
1540
1541entry:
1542  %a = alloca i64, align 16
1543  %ptr = bitcast i64* %a to i8*
1544  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i1 true), !tbaa !0
1545  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i1 true), !tbaa !3
1546  ret void
1547}
1548
1549define float @test25() {
1550; Check that we split up stores in order to promote the smaller SSA values.. These types
1551; of patterns can arise because LLVM maps small memcpy's to integer load and
1552; stores. If we get a memcpy of an aggregate (such as C and C++ frontends would
1553; produce, but so might any language frontend), this will in many cases turn into
1554; an integer load and store. SROA needs to be extremely powerful to correctly
1555; handle these cases and form splitable and promotable SSA values.
1556;
1557; CHECK-LABEL: @test25(
1558; CHECK-NOT: alloca
1559; CHECK: %[[F1:.*]] = bitcast i32 0 to float
1560; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
1561; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
1562; CHECK: ret float %[[SUM]]
1563
1564entry:
1565  %a = alloca i64
1566  %b = alloca i64
1567  %a.cast = bitcast i64* %a to [2 x float]*
1568  %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
1569  %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
1570  %b.cast = bitcast i64* %b to [2 x float]*
1571  %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
1572  %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
1573  store float 0.0, float* %a.gep1
1574  store float 1.0, float* %a.gep2
1575  %v = load i64, i64* %a
1576  store i64 %v, i64* %b
1577  %f1 = load float, float* %b.gep1
1578  %f2 = load float, float* %b.gep2
1579  %ret = fadd float %f1, %f2
1580  ret float %ret
1581}
1582
1583@complex1 = external global [2 x float]
1584@complex2 = external global [2 x float]
1585
1586define void @test26() {
1587; Test a case of splitting up loads and stores against a globals.
1588;
1589; CHECK-LABEL: @test26(
1590; CHECK-NOT: alloca
1591; CHECK: %[[L1:.*]] = load i32, i32* bitcast
1592; CHECK: %[[L2:.*]] = load i32, i32* bitcast
1593; CHECK: %[[F1:.*]] = bitcast i32 %[[L1]] to float
1594; CHECK: %[[F2:.*]] = bitcast i32 %[[L2]] to float
1595; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
1596; CHECK: %[[C1:.*]] = bitcast float %[[SUM]] to i32
1597; CHECK: %[[C2:.*]] = bitcast float %[[SUM]] to i32
1598; CHECK: store i32 %[[C1]], i32* bitcast
1599; CHECK: store i32 %[[C2]], i32* bitcast
1600; CHECK: ret void
1601
1602entry:
1603  %a = alloca i64
1604  %a.cast = bitcast i64* %a to [2 x float]*
1605  %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
1606  %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
1607  %v1 = load i64, i64* bitcast ([2 x float]* @complex1 to i64*)
1608  store i64 %v1, i64* %a
1609  %f1 = load float, float* %a.gep1
1610  %f2 = load float, float* %a.gep2
1611  %sum = fadd float %f1, %f2
1612  store float %sum, float* %a.gep1
1613  store float %sum, float* %a.gep2
1614  %v2 = load i64, i64* %a
1615  store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
1616  ret void
1617}
1618
1619define float @test27() {
1620; Another, more complex case of splittable i64 loads and stores. This example
1621; is a particularly challenging one because the load and store both point into
1622; the alloca SROA is processing, and they overlap but at an offset.
1623;
1624; CHECK-LABEL: @test27(
1625; CHECK-NOT: alloca
1626; CHECK: %[[F1:.*]] = bitcast i32 0 to float
1627; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
1628; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
1629; CHECK: ret float %[[SUM]]
1630
1631entry:
1632  %a = alloca [12 x i8]
1633  %gep1 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 0
1634  %gep2 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 4
1635  %gep3 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 8
1636  %iptr1 = bitcast i8* %gep1 to i64*
1637  %iptr2 = bitcast i8* %gep2 to i64*
1638  %fptr1 = bitcast i8* %gep1 to float*
1639  %fptr2 = bitcast i8* %gep2 to float*
1640  %fptr3 = bitcast i8* %gep3 to float*
1641  store float 0.0, float* %fptr1
1642  store float 1.0, float* %fptr2
1643  %v = load i64, i64* %iptr1
1644  store i64 %v, i64* %iptr2
1645  %f1 = load float, float* %fptr2
1646  %f2 = load float, float* %fptr3
1647  %ret = fadd float %f1, %f2
1648  ret float %ret
1649}
1650
1651define i32 @PR22093() {
1652; Test that we don't try to pre-split a splittable store of a splittable but
1653; not pre-splittable load over the same alloca. We "handle" this case when the
1654; load is unsplittable but unrelated to this alloca by just generating extra
1655; loads without touching the original, but when the original load was out of
1656; this alloca we need to handle it specially to ensure the splits line up
1657; properly for rewriting.
1658;
1659; CHECK-LABEL: @PR22093(
1660; CHECK-NOT: alloca
1661; CHECK: alloca i16
1662; CHECK-NOT: alloca
1663; CHECK: store volatile i16
1664
1665entry:
1666  %a = alloca i32
1667  %a.cast = bitcast i32* %a to i16*
1668  store volatile i16 42, i16* %a.cast
1669  %load = load i32, i32* %a
1670  store i32 %load, i32* %a
1671  ret i32 %load
1672}
1673
1674define void @PR22093.2() {
1675; Another way that we end up being unable to split a particular set of loads
1676; and stores can even have ordering importance. Here we have a load which is
1677; pre-splittable by itself, and the first store is also compatible. But the
1678; second store of the load makes the load unsplittable because of a mismatch of
1679; splits. Because this makes the load unsplittable, we also have to go back and
1680; remove the first store from the presplit candidates as its load won't be
1681; presplit.
1682;
1683; CHECK-LABEL: @PR22093.2(
1684; CHECK-NOT: alloca
1685; CHECK: alloca i16
1686; CHECK-NEXT: alloca i8
1687; CHECK-NOT: alloca
1688; CHECK: store volatile i16
1689; CHECK: store volatile i8
1690
1691entry:
1692  %a = alloca i64
1693  %a.cast1 = bitcast i64* %a to i32*
1694  %a.cast2 = bitcast i64* %a to i16*
1695  store volatile i16 42, i16* %a.cast2
1696  %load = load i32, i32* %a.cast1
1697  store i32 %load, i32* %a.cast1
1698  %a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
1699  %a.cast3 = bitcast i32* %a.gep1 to i8*
1700  store volatile i8 13, i8* %a.cast3
1701  store i32 %load, i32* %a.gep1
1702  ret void
1703}
1704
1705define void @PR23737() {
1706; CHECK-LABEL: @PR23737(
1707; CHECK: store atomic volatile {{.*}} seq_cst
1708; CHECK: load atomic volatile {{.*}} seq_cst
1709entry:
1710  %ptr = alloca i64, align 8
1711  store atomic volatile i64 0, i64* %ptr seq_cst, align 8
1712  %load = load atomic volatile i64, i64* %ptr seq_cst, align 8
1713  ret void
1714}
1715
1716define i16 @PR24463() {
1717; Ensure we can handle a very interesting case where there is an integer-based
1718; rewrite of the uses of the alloca, but where one of the integers in that is
1719; a sub-integer that requires extraction *and* extends past the end of the
1720; alloca. SROA can split the alloca to avoid shift or trunc.
1721;
1722; CHECK-LABEL: @PR24463(
1723; CHECK-NOT: alloca
1724; CHECK-NOT: trunc
1725; CHECK-NOT: lshr
1726; CHECK: %[[ZEXT:.*]] = zext i8 {{.*}} to i16
1727; CHECK: ret i16 %[[ZEXT]]
1728entry:
1729  %alloca = alloca [3 x i8]
1730  %gep1 = getelementptr inbounds [3 x i8], [3 x i8]* %alloca, i64 0, i64 1
1731  %bc1 = bitcast i8* %gep1 to i16*
1732  store i16 0, i16* %bc1
1733  %gep2 = getelementptr inbounds [3 x i8], [3 x i8]* %alloca, i64 0, i64 2
1734  %bc2 = bitcast i8* %gep2 to i16*
1735  %load = load i16, i16* %bc2
1736  ret i16 %load
1737}
1738
1739%struct.STest = type { %struct.SPos, %struct.SPos }
1740%struct.SPos = type { float, float }
1741
1742define void @PR25873(%struct.STest* %outData) {
1743; CHECK-LABEL: @PR25873(
1744; CHECK: store i32 1123418112
1745; CHECK: store i32 1139015680
1746; CHECK: %[[HIZEXT:.*]] = zext i32 1139015680 to i64
1747; CHECK: %[[HISHL:.*]] = shl i64 %[[HIZEXT]], 32
1748; CHECK: %[[HIMASK:.*]] = and i64 undef, 4294967295
1749; CHECK: %[[HIINSERT:.*]] = or i64 %[[HIMASK]], %[[HISHL]]
1750; CHECK: %[[LOZEXT:.*]] = zext i32 1123418112 to i64
1751; CHECK: %[[LOMASK:.*]] = and i64 %[[HIINSERT]], -4294967296
1752; CHECK: %[[LOINSERT:.*]] = or i64 %[[LOMASK]], %[[LOZEXT]]
1753; CHECK: store i64 %[[LOINSERT]]
1754entry:
1755  %tmpData = alloca %struct.STest, align 8
1756  %0 = bitcast %struct.STest* %tmpData to i8*
1757  call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
1758  %x = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 0
1759  store float 1.230000e+02, float* %x, align 8
1760  %y = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 1
1761  store float 4.560000e+02, float* %y, align 4
1762  %m_posB = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 1
1763  %1 = bitcast %struct.STest* %tmpData to i64*
1764  %2 = bitcast %struct.SPos* %m_posB to i64*
1765  %3 = load i64, i64* %1, align 8
1766  store i64 %3, i64* %2, align 8
1767  %4 = bitcast %struct.STest* %outData to i8*
1768  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %0, i64 16, i1 false)
1769  call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
1770  ret void
1771}
1772
1773declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
1774
1775define void @PR27999() unnamed_addr {
1776; CHECK-LABEL: @PR27999(
1777; CHECK: entry-block:
1778; CHECK-NEXT: ret void
1779entry-block:
1780  %0 = alloca [2 x i64], align 8
1781  %1 = bitcast [2 x i64]* %0 to i8*
1782  call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
1783  %2 = getelementptr inbounds [2 x i64], [2 x i64]* %0, i32 0, i32 1
1784  %3 = bitcast i64* %2 to i8*
1785  call void @llvm.lifetime.end.p0i8(i64 8, i8* %3)
1786  ret void
1787}
1788
1789define void @PR29139() {
1790; CHECK-LABEL: @PR29139(
1791; CHECK: bb1:
1792; CHECK-NEXT: ret void
1793bb1:
1794  %e.7.sroa.6.i = alloca i32, align 1
1795  %e.7.sroa.6.0.load81.i = load i32, i32* %e.7.sroa.6.i, align 1
1796  %0 = bitcast i32* %e.7.sroa.6.i to i8*
1797  call void @llvm.lifetime.end.p0i8(i64 2, i8* %0)
1798  ret void
1799}
1800
1801; PR35657 reports assertion failure with this code
1802define void @PR35657(i64 %v) {
1803; CHECK-LABEL: @PR35657
1804; CHECK: call void @callee16(i16 %{{.*}})
1805; CHECK: call void @callee48(i48 %{{.*}})
1806; CHECK: ret void
1807entry:
1808  %a48 = alloca i48
1809  %a48.cast64 = bitcast i48* %a48 to i64*
1810  store i64 %v, i64* %a48.cast64
1811  %a48.cast16 = bitcast i48* %a48 to i16*
1812  %b0_15 = load i16, i16* %a48.cast16
1813  %a48.cast8 = bitcast i48* %a48 to i8*
1814  %a48_offset2 = getelementptr inbounds i8, i8* %a48.cast8, i64 2
1815  %a48_offset2.cast48 = bitcast i8* %a48_offset2 to i48*
1816  %b16_63 = load i48, i48* %a48_offset2.cast48, align 2
1817  call void @callee16(i16 %b0_15)
1818  call void @callee48(i48 %b16_63)
1819  ret void
1820}
1821
1822declare void @callee16(i16 %a)
1823declare void @callee48(i48 %a)
1824
1825define void @test28(i64 %v) #0 {
1826; SROA should split the first i64 store to avoid additional and/or instructions
1827; when storing into i32 fields
1828
1829; CHECK-LABEL: @test28(
1830; CHECK-NOT: alloca
1831; CHECK-NOT: and
1832; CHECK-NOT: or
1833; CHECK:      %[[shift:.*]] = lshr i64 %v, 32
1834; CHECK-NEXT: %{{.*}} = trunc i64 %[[shift]] to i32
1835; CHECK-NEXT: ret void
1836
1837entry:
1838  %t = alloca { i64, i32, i32 }
1839
1840  %b = getelementptr { i64, i32, i32 }, { i64, i32, i32 }* %t, i32 0, i32 1
1841  %0 = bitcast i32* %b to i64*
1842  store i64 %v, i64* %0
1843
1844  %1 = load i32, i32* %b
1845  %c = getelementptr { i64, i32, i32 }, { i64, i32, i32 }* %t, i32 0, i32 2
1846  store i32 %1, i32* %c
1847  ret void
1848}
1849
1850declare void @llvm.lifetime.start.isVoid.i64.p0i8(i64, [10 x float]* nocapture)
1851declare void @llvm.lifetime.end.isVoid.i64.p0i8(i64, [10 x float]* nocapture)
1852@array = dso_local global [10 x float] undef, align 4
1853
1854define void @test29(i32 %num, i32 %tid) {
1855; CHECK-LABEL: @test29(
1856; CHECK-NOT: alloca [10 x float]
1857; CHECK: ret void
1858
1859entry:
1860  %ra = alloca [10 x float], align 4
1861  call void @llvm.lifetime.start.isVoid.i64.p0i8(i64 40, [10 x float]* nonnull %ra)
1862
1863  %cmp1 = icmp sgt i32 %num, 0
1864  br i1 %cmp1, label %bb1, label %bb7
1865
1866bb1:
1867  %tobool = icmp eq i32 %tid, 0
1868  %conv.i = zext i32 %tid to i64
1869  %0 = bitcast [10 x float]* %ra to i32*
1870  %1 = load i32, i32* %0, align 4
1871  %arrayidx5 = getelementptr inbounds [10 x float], [10 x float]* @array, i64 0, i64 %conv.i
1872  %2 = bitcast float* %arrayidx5 to i32*
1873  br label %bb2
1874
1875bb2:
1876  %i.02 = phi i32 [ %num, %bb1 ], [ %sub, %bb5 ]
1877  br i1 %tobool, label %bb3, label %bb4
1878
1879bb3:
1880  br label %bb5
1881
1882bb4:
1883  store i32 %1, i32* %2, align 4
1884  br label %bb5
1885
1886bb5:
1887  %sub = add i32 %i.02, -1
1888  %cmp = icmp sgt i32 %sub, 0
1889  br i1 %cmp, label %bb2, label %bb6
1890
1891bb6:
1892  br label %bb7
1893
1894bb7:
1895  call void @llvm.lifetime.end.isVoid.i64.p0i8(i64 40, [10 x float]* nonnull %ra)
1896  ret void
1897}
1898
1899!0 = !{!1, !1, i64 0, i64 1}
1900!1 = !{!2, i64 1, !"type_0"}
1901!2 = !{!"root"}
1902!3 = !{!4, !4, i64 0, i64 1}
1903!4 = !{!2, i64 1, !"type_3"}
1904!5 = !{!6, !6, i64 0, i64 1}
1905!6 = !{!2, i64 1, !"type_5"}
1906!7 = !{!8, !8, i64 0, i64 1}
1907!8 = !{!2, i64 1, !"type_7"}
1908!9 = !{!10, !10, i64 0, i64 1}
1909!10 = !{!2, i64 1, !"type_9"}
1910!11 = !{!12, !12, i64 0, i64 1}
1911!12 = !{!2, i64 1, !"type_11"}
1912!13 = !{!14, !14, i64 0, i64 1}
1913!14 = !{!2, i64 1, !"type_13"}
1914!15 = !{!16, !16, i64 0, i64 1}
1915!16 = !{!2, i64 1, !"type_15"}
1916!17 = !{!18, !18, i64 0, i64 1}
1917!18 = !{!2, i64 1, !"type_17"}
1918!19 = !{!20, !20, i64 0, i64 1}
1919!20 = !{!2, i64 1, !"type_19"}
1920!21 = !{!22, !22, i64 0, i64 1}
1921!22 = !{!2, i64 1, !"type_21"}
1922!23 = !{!24, !24, i64 0, i64 1}
1923!24 = !{!2, i64 1, !"type_23"}
1924!25 = !{!26, !26, i64 0, i64 1}
1925!26 = !{!2, i64 1, !"type_25"}
1926!27 = !{!28, !28, i64 0, i64 1}
1927!28 = !{!2, i64 1, !"type_27"}
1928!29 = !{!30, !30, i64 0, i64 1}
1929!30 = !{!2, i64 1, !"type_29"}
1930!31 = !{!32, !32, i64 0, i64 1}
1931!32 = !{!2, i64 1, !"type_31"}
1932!33 = !{!34, !34, i64 0, i64 1}
1933!34 = !{!2, i64 1, !"type_33"}
1934!35 = !{!36, !36, i64 0, i64 1}
1935!36 = !{!2, i64 1, !"type_35"}
1936!37 = !{!38, !38, i64 0, i64 1}
1937!38 = !{!2, i64 1, !"type_37"}
1938!39 = !{!40, !40, i64 0, i64 1}
1939!40 = !{!2, i64 1, !"type_39"}
1940!41 = !{!42, !42, i64 0, i64 1}
1941!42 = !{!2, i64 1, !"type_41"}
1942!43 = !{!44, !44, i64 0, i64 1}
1943!44 = !{!2, i64 1, !"type_43"}
1944!45 = !{!46, !46, i64 0, i64 1}
1945!46 = !{!2, i64 1, !"type_45"}
1946!47 = !{!48, !48, i64 0, i64 1}
1947!48 = !{!2, i64 1, !"type_47"}
1948!49 = !{!50, !50, i64 0, i64 1}
1949!50 = !{!2, i64 1, !"type_49"}
1950!51 = !{!52, !52, i64 0, i64 1}
1951!52 = !{!2, i64 1, !"type_51"}
1952!53 = !{!54, !54, i64 0, i64 1}
1953!54 = !{!2, i64 1, !"type_53"}
1954!55 = !{!56, !56, i64 0, i64 1}
1955!56 = !{!2, i64 1, !"type_55"}
1956!57 = !{!58, !58, i64 0, i64 1}
1957!58 = !{!2, i64 1, !"type_57"}
1958!59 = !{!60, !60, i64 0, i64 1}
1959!60 = !{!2, i64 1, !"type_59"}
1960
1961; CHECK-DAG: [[TYPE_0:!.*]] = !{{{.*}}, !"type_0"}
1962; CHECK-DAG: [[TAG_0]] = !{[[TYPE_0]], [[TYPE_0]], i64 0, i64 1}
1963; CHECK-DAG: [[TYPE_3:!.*]] = !{{{.*}}, !"type_3"}
1964; CHECK-DAG: [[TAG_3]] = !{[[TYPE_3]], [[TYPE_3]], i64 0, i64 1}
1965; CHECK-DAG: [[TYPE_5:!.*]] = !{{{.*}}, !"type_5"}
1966; CHECK-DAG: [[TAG_5]] = !{[[TYPE_5]], [[TYPE_5]], i64 0, i64 1}
1967; CHECK-DAG: [[TYPE_7:!.*]] = !{{{.*}}, !"type_7"}
1968; CHECK-DAG: [[TAG_7]] = !{[[TYPE_7]], [[TYPE_7]], i64 0, i64 1}
1969; CHECK-DAG: [[TYPE_9:!.*]] = !{{{.*}}, !"type_9"}
1970; CHECK-DAG: [[TAG_9]] = !{[[TYPE_9]], [[TYPE_9]], i64 0, i64 1}
1971; CHECK-DAG: [[TYPE_11:!.*]] = !{{{.*}}, !"type_11"}
1972; CHECK-DAG: [[TAG_11]] = !{[[TYPE_11]], [[TYPE_11]], i64 0, i64 1}
1973; CHECK-DAG: [[TYPE_13:!.*]] = !{{{.*}}, !"type_13"}
1974; CHECK-DAG: [[TAG_13]] = !{[[TYPE_13]], [[TYPE_13]], i64 0, i64 1}
1975; CHECK-DAG: [[TYPE_15:!.*]] = !{{{.*}}, !"type_15"}
1976; CHECK-DAG: [[TAG_15]] = !{[[TYPE_15]], [[TYPE_15]], i64 0, i64 1}
1977; CHECK-DAG: [[TYPE_17:!.*]] = !{{{.*}}, !"type_17"}
1978; CHECK-DAG: [[TAG_17]] = !{[[TYPE_17]], [[TYPE_17]], i64 0, i64 1}
1979; CHECK-DAG: [[TYPE_19:!.*]] = !{{{.*}}, !"type_19"}
1980; CHECK-DAG: [[TAG_19]] = !{[[TYPE_19]], [[TYPE_19]], i64 0, i64 1}
1981; CHECK-DAG: [[TYPE_21:!.*]] = !{{{.*}}, !"type_21"}
1982; CHECK-DAG: [[TAG_21]] = !{[[TYPE_21]], [[TYPE_21]], i64 0, i64 1}
1983; CHECK-DAG: [[TYPE_23:!.*]] = !{{{.*}}, !"type_23"}
1984; CHECK-DAG: [[TAG_23]] = !{[[TYPE_23]], [[TYPE_23]], i64 0, i64 1}
1985; CHECK-DAG: [[TYPE_25:!.*]] = !{{{.*}}, !"type_25"}
1986; CHECK-DAG: [[TAG_25]] = !{[[TYPE_25]], [[TYPE_25]], i64 0, i64 1}
1987; CHECK-DAG: [[TYPE_27:!.*]] = !{{{.*}}, !"type_27"}
1988; CHECK-DAG: [[TAG_27]] = !{[[TYPE_27]], [[TYPE_27]], i64 0, i64 1}
1989; CHECK-DAG: [[TYPE_29:!.*]] = !{{{.*}}, !"type_29"}
1990; CHECK-DAG: [[TAG_29]] = !{[[TYPE_29]], [[TYPE_29]], i64 0, i64 1}
1991; CHECK-DAG: [[TYPE_31:!.*]] = !{{{.*}}, !"type_31"}
1992; CHECK-DAG: [[TAG_31]] = !{[[TYPE_31]], [[TYPE_31]], i64 0, i64 1}
1993; CHECK-DAG: [[TYPE_33:!.*]] = !{{{.*}}, !"type_33"}
1994; CHECK-DAG: [[TAG_33]] = !{[[TYPE_33]], [[TYPE_33]], i64 0, i64 1}
1995; CHECK-DAG: [[TYPE_35:!.*]] = !{{{.*}}, !"type_35"}
1996; CHECK-DAG: [[TAG_35]] = !{[[TYPE_35]], [[TYPE_35]], i64 0, i64 1}
1997; CHECK-DAG: [[TYPE_37:!.*]] = !{{{.*}}, !"type_37"}
1998; CHECK-DAG: [[TAG_37]] = !{[[TYPE_37]], [[TYPE_37]], i64 0, i64 1}
1999; CHECK-DAG: [[TYPE_39:!.*]] = !{{{.*}}, !"type_39"}
2000; CHECK-DAG: [[TAG_39]] = !{[[TYPE_39]], [[TYPE_39]], i64 0, i64 1}
2001; CHECK-DAG: [[TYPE_41:!.*]] = !{{{.*}}, !"type_41"}
2002; CHECK-DAG: [[TAG_41]] = !{[[TYPE_41]], [[TYPE_41]], i64 0, i64 1}
2003; CHECK-DAG: [[TYPE_43:!.*]] = !{{{.*}}, !"type_43"}
2004; CHECK-DAG: [[TAG_43]] = !{[[TYPE_43]], [[TYPE_43]], i64 0, i64 1}
2005; CHECK-DAG: [[TYPE_45:!.*]] = !{{{.*}}, !"type_45"}
2006; CHECK-DAG: [[TAG_45]] = !{[[TYPE_45]], [[TYPE_45]], i64 0, i64 1}
2007; CHECK-DAG: [[TYPE_47:!.*]] = !{{{.*}}, !"type_47"}
2008; CHECK-DAG: [[TAG_47]] = !{[[TYPE_47]], [[TYPE_47]], i64 0, i64 1}
2009; CHECK-DAG: [[TYPE_49:!.*]] = !{{{.*}}, !"type_49"}
2010; CHECK-DAG: [[TAG_49]] = !{[[TYPE_49]], [[TYPE_49]], i64 0, i64 1}
2011; CHECK-DAG: [[TYPE_51:!.*]] = !{{{.*}}, !"type_51"}
2012; CHECK-DAG: [[TAG_51]] = !{[[TYPE_51]], [[TYPE_51]], i64 0, i64 1}
2013; CHECK-DAG: [[TYPE_53:!.*]] = !{{{.*}}, !"type_53"}
2014; CHECK-DAG: [[TAG_53]] = !{[[TYPE_53]], [[TYPE_53]], i64 0, i64 1}
2015; CHECK-DAG: [[TYPE_55:!.*]] = !{{{.*}}, !"type_55"}
2016; CHECK-DAG: [[TAG_55]] = !{[[TYPE_55]], [[TYPE_55]], i64 0, i64 1}
2017; CHECK-DAG: [[TYPE_57:!.*]] = !{{{.*}}, !"type_57"}
2018; CHECK-DAG: [[TAG_57]] = !{[[TYPE_57]], [[TYPE_57]], i64 0, i64 1}
2019; CHECK-DAG: [[TYPE_59:!.*]] = !{{{.*}}, !"type_59"}
2020; CHECK-DAG: [[TAG_59]] = !{[[TYPE_59]], [[TYPE_59]], i64 0, i64 1}
2021