• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; "PLAIN" - No optimizations. This tests the default target layout
2; constant folder.
3; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
4
5; "OPT" - Optimizations but no targetdata. This tests default target layout
6; folding in the optimizers.
7; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=OPT %s
8
9; "TO" - Optimizations and targetdata. This tests target-dependent
10; folding in the optimizers.
11; RUN: opt -S -o - -instcombine -globalopt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
12
13; "SCEV" - ScalarEvolution with default target layout
14; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
15
16
17; The automatic constant folder in opt does not have targetdata access, so
18; it can't fold gep arithmetic, in general. However, the constant folder run
19; from instcombine and global opt can use targetdata.
20
21; PLAIN: @G8 = global i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -1)
22; PLAIN: @G1 = global i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -1)
23; PLAIN: @F8 = global i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -2)
24; PLAIN: @F1 = global i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -2)
25; PLAIN: @H8 = global i8* getelementptr (i8, i8* null, i32 -1)
26; PLAIN: @H1 = global i1* getelementptr (i1, i1* null, i32 -1)
27; OPT: @G8 = global i8* null
28; OPT: @G1 = global i1* null
29; OPT: @F8 = global i8* inttoptr (i64 -1 to i8*)
30; OPT: @F1 = global i1* inttoptr (i64 -1 to i1*)
31; OPT: @H8 = global i8* inttoptr (i64 -1 to i8*)
32; OPT: @H1 = global i1* inttoptr (i64 -1 to i1*)
33; TO: @G8 = global i8* null
34; TO: @G1 = global i1* null
35; TO: @F8 = global i8* inttoptr (i64 -1 to i8*)
36; TO: @F1 = global i1* inttoptr (i64 -1 to i1*)
37; TO: @H8 = global i8* inttoptr (i64 -1 to i8*)
38; TO: @H1 = global i1* inttoptr (i64 -1 to i1*)
39
40@G8 = global i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -1)
41@G1 = global i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -1)
42@F8 = global i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -2)
43@F1 = global i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -2)
44@H8 = global i8* getelementptr (i8, i8* inttoptr (i32 0 to i8*), i32 -1)
45@H1 = global i1* getelementptr (i1, i1* inttoptr (i32 0 to i1*), i32 -1)
46
47; The target-independent folder should be able to do some clever
48; simplifications on sizeof, alignof, and offsetof expressions. The
49; target-dependent folder should fold these down to constants.
50
51; PLAIN: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2310)
52; PLAIN: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64)
53; PLAIN: @c = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2)
54; PLAIN: @d = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 11)
55; PLAIN: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64)
56; PLAIN: @f = constant i64 1
57; PLAIN: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64)
58; PLAIN: @h = constant i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)
59; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }, { i1, i1* }* null, i64 0, i32 1) to i64)
60; OPT: @a = constant i64 18480
61; OPT: @b = constant i64 8
62; OPT: @c = constant i64 16
63; OPT: @d = constant i64 88
64; OPT: @e = constant i64 16
65; OPT: @f = constant i64 1
66; OPT: @g = constant i64 8
67; OPT: @h = constant i64 8
68; OPT: @i = constant i64 8
69; TO: @a = constant i64 18480
70; TO: @b = constant i64 8
71; TO: @c = constant i64 16
72; TO: @d = constant i64 88
73; TO: @e = constant i64 16
74; TO: @f = constant i64 1
75; TO: @g = constant i64 8
76; TO: @h = constant i64 8
77; TO: @i = constant i64 8
78
79@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}, {[7 x double], [7 x double]}* null, i64 11) to i64), i64 5))
80@b = constant i64 ptrtoint ([13 x double]* getelementptr ({i1, [13 x double]}, {i1, [13 x double]}* null, i64 0, i32 1) to i64)
81@c = constant i64 ptrtoint (double* getelementptr ({double, double, double, double}, {double, double, double, double}* null, i64 0, i32 2) to i64)
82@d = constant i64 ptrtoint (double* getelementptr ([13 x double], [13 x double]* null, i64 0, i32 11) to i64)
83@e = constant i64 ptrtoint (double* getelementptr ({double, float, double, double}, {double, float, double, double}* null, i64 0, i32 2) to i64)
84@f = constant i64 ptrtoint (<{ i16, i128 }>* getelementptr ({i1, <{ i16, i128 }>}, {i1, <{ i16, i128 }>}* null, i64 0, i32 1) to i64)
85@g = constant i64 ptrtoint ({double, double}* getelementptr ({i1, {double, double}}, {i1, {double, double}}* null, i64 0, i32 1) to i64)
86@h = constant i64 ptrtoint (double** getelementptr (double*, double** null, i64 1) to i64)
87@i = constant i64 ptrtoint (double** getelementptr ({i1, double*}, {i1, double*}* null, i64 0, i32 1) to i64)
88
89; The target-dependent folder should cast GEP indices to integer-sized pointers.
90
91; PLAIN: @M = constant i64* getelementptr (i64, i64* null, i32 1)
92; PLAIN: @N = constant i64* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 0, i32 1)
93; PLAIN: @O = constant i64* getelementptr ([2 x i64], [2 x i64]* null, i32 0, i32 1)
94; OPT: @M = constant i64* inttoptr (i64 8 to i64*)
95; OPT: @N = constant i64* inttoptr (i64 8 to i64*)
96; OPT: @O = constant i64* inttoptr (i64 8 to i64*)
97; TO: @M = constant i64* inttoptr (i64 8 to i64*)
98; TO: @N = constant i64* inttoptr (i64 8 to i64*)
99; TO: @O = constant i64* inttoptr (i64 8 to i64*)
100
101@M = constant i64* getelementptr (i64, i64* null, i32 1)
102@N = constant i64* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 0, i32 1)
103@O = constant i64* getelementptr ([2 x i64], [2 x i64]* null, i32 0, i32 1)
104
105; Fold GEP of a GEP. Very simple cases are folded without targetdata.
106
107; PLAIN: @Y = global [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 2)
108; PLAIN: @Z = global i32* getelementptr inbounds (i32, i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
109; OPT: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 2)
110; OPT: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
111; TO: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 2)
112; TO: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
113
114@ext = external global [3 x { i32, i32 }]
115@Y = global [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 1), i64 1)
116@Z = global i32* getelementptr inbounds (i32, i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
117
118; Duplicate all of the above as function return values rather than
119; global initializers.
120
121; PLAIN: define i8* @goo8() #0 {
122; PLAIN:   %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
123; PLAIN:   ret i8* %t
124; PLAIN: }
125; PLAIN: define i1* @goo1() #0 {
126; PLAIN:   %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
127; PLAIN:   ret i1* %t
128; PLAIN: }
129; PLAIN: define i8* @foo8() #0 {
130; PLAIN:   %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
131; PLAIN:   ret i8* %t
132; PLAIN: }
133; PLAIN: define i1* @foo1() #0 {
134; PLAIN:   %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
135; PLAIN:   ret i1* %t
136; PLAIN: }
137; PLAIN: define i8* @hoo8() #0 {
138; PLAIN:   %t = bitcast i8* getelementptr (i8, i8* null, i32 -1) to i8*
139; PLAIN:   ret i8* %t
140; PLAIN: }
141; PLAIN: define i1* @hoo1() #0 {
142; PLAIN:   %t = bitcast i1* getelementptr (i1, i1* null, i32 -1) to i1*
143; PLAIN:   ret i1* %t
144; PLAIN: }
145; OPT: define i8* @goo8() #0 {
146; OPT:   ret i8* null
147; OPT: }
148; OPT: define i1* @goo1() #0 {
149; OPT:   ret i1* null
150; OPT: }
151; OPT: define i8* @foo8() #0 {
152; OPT:   ret i8* inttoptr (i64 -1 to i8*)
153; OPT: }
154; OPT: define i1* @foo1() #0 {
155; OPT:   ret i1* inttoptr (i64 -1 to i1*)
156; OPT: }
157; OPT: define i8* @hoo8() #0 {
158; OPT:   ret i8* inttoptr (i64 -1 to i8*)
159; OPT: }
160; OPT: define i1* @hoo1() #0 {
161; OPT:   ret i1* inttoptr (i64 -1 to i1*)
162; OPT: }
163; TO: define i8* @goo8() #0 {
164; TO:   ret i8* null
165; TO: }
166; TO: define i1* @goo1() #0 {
167; TO:   ret i1* null
168; TO: }
169; TO: define i8* @foo8() #0 {
170; TO:   ret i8* inttoptr (i64 -1 to i8*)
171; TO: }
172; TO: define i1* @foo1() #0 {
173; TO:   ret i1* inttoptr (i64 -1 to i1*)
174; TO: }
175; TO: define i8* @hoo8() #0 {
176; TO:   ret i8* inttoptr (i64 -1 to i8*)
177; TO: }
178; TO: define i1* @hoo1() #0 {
179; TO:   ret i1* inttoptr (i64 -1 to i1*)
180; TO: }
181; SCEV: Classifying expressions for: @goo8
182; SCEV:   %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
183; SCEV:   -->  (-1 + inttoptr (i32 1 to i8*))
184; SCEV: Classifying expressions for: @goo1
185; SCEV:   %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
186; SCEV:   -->  (-1 + inttoptr (i32 1 to i1*))
187; SCEV: Classifying expressions for: @foo8
188; SCEV:   %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
189; SCEV:   -->  (-2 + inttoptr (i32 1 to i8*))
190; SCEV: Classifying expressions for: @foo1
191; SCEV:   %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
192; SCEV:   -->  (-2 + inttoptr (i32 1 to i1*))
193; SCEV: Classifying expressions for: @hoo8
194; SCEV:   -->  -1
195; SCEV: Classifying expressions for: @hoo1
196; SCEV:   -->  -1
197
198define i8* @goo8() nounwind {
199  %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
200  ret i8* %t
201}
202define i1* @goo1() nounwind {
203  %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
204  ret i1* %t
205}
206define i8* @foo8() nounwind {
207  %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
208  ret i8* %t
209}
210define i1* @foo1() nounwind {
211  %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
212  ret i1* %t
213}
214define i8* @hoo8() nounwind {
215  %t = bitcast i8* getelementptr (i8, i8* inttoptr (i32 0 to i8*), i32 -1) to i8*
216  ret i8* %t
217}
218define i1* @hoo1() nounwind {
219  %t = bitcast i1* getelementptr (i1, i1* inttoptr (i32 0 to i1*), i32 -1) to i1*
220  ret i1* %t
221}
222
223; PLAIN: define i64 @fa() #0 {
224; PLAIN:   %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2310) to i64
225; PLAIN:   ret i64 %t
226; PLAIN: }
227; PLAIN: define i64 @fb() #0 {
228; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
229; PLAIN:   ret i64 %t
230; PLAIN: }
231; PLAIN: define i64 @fc() #0 {
232; PLAIN:   %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2) to i64
233; PLAIN:   ret i64 %t
234; PLAIN: }
235; PLAIN: define i64 @fd() #0 {
236; PLAIN:   %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 11) to i64
237; PLAIN:   ret i64 %t
238; PLAIN: }
239; PLAIN: define i64 @fe() #0 {
240; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64) to i64
241; PLAIN:   ret i64 %t
242; PLAIN: }
243; PLAIN: define i64 @ff() #0 {
244; PLAIN:   %t = bitcast i64 1 to i64
245; PLAIN:   ret i64 %t
246; PLAIN: }
247; PLAIN: define i64 @fg() #0 {
248; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
249; PLAIN:   ret i64 %t
250; PLAIN: }
251; PLAIN: define i64 @fh() #0 {
252; PLAIN:   %t = bitcast i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64) to i64
253; PLAIN:   ret i64 %t
254; PLAIN: }
255; PLAIN: define i64 @fi() #0 {
256; PLAIN:   %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }, { i1, i1* }* null, i64 0, i32 1) to i64) to i64
257; PLAIN:   ret i64 %t
258; PLAIN: }
259; OPT: define i64 @fa() #0 {
260; OPT:   ret i64 18480
261; OPT: }
262; OPT: define i64 @fb() #0 {
263; OPT:   ret i64 8
264; OPT: }
265; OPT: define i64 @fc() #0 {
266; OPT:   ret i64 16
267; OPT: }
268; OPT: define i64 @fd() #0 {
269; OPT:   ret i64 88
270; OPT: }
271; OPT: define i64 @fe() #0 {
272; OPT:   ret i64 16
273; OPT: }
274; OPT: define i64 @ff() #0 {
275; OPT:   ret i64 1
276; OPT: }
277; OPT: define i64 @fg() #0 {
278; OPT:   ret i64 8
279; OPT: }
280; OPT: define i64 @fh() #0 {
281; OPT:   ret i64 8
282; OPT: }
283; OPT: define i64 @fi() #0 {
284; OPT:   ret i64 8
285; OPT: }
286; TO: define i64 @fa() #0 {
287; TO:   ret i64 18480
288; TO: }
289; TO: define i64 @fb() #0 {
290; TO:   ret i64 8
291; TO: }
292; TO: define i64 @fc() #0 {
293; TO:   ret i64 16
294; TO: }
295; TO: define i64 @fd() #0 {
296; TO:   ret i64 88
297; TO: }
298; TO: define i64 @fe() #0 {
299; TO:   ret i64 16
300; TO: }
301; TO: define i64 @ff() #0 {
302; TO:   ret i64 1
303; TO: }
304; TO: define i64 @fg() #0 {
305; TO:   ret i64 8
306; TO: }
307; TO: define i64 @fh() #0 {
308; TO:   ret i64 8
309; TO: }
310; TO: define i64 @fi() #0 {
311; TO:   ret i64 8
312; TO: }
313; SCEV: Classifying expressions for: @fa
314; SCEV:   %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2310) to i64
315; SCEV:   -->  (2310 * sizeof(double))
316; SCEV: Classifying expressions for: @fb
317; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
318; SCEV:   -->  alignof(double)
319; SCEV: Classifying expressions for: @fc
320; SCEV:   %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2) to i64
321; SCEV:   -->  (2 * sizeof(double))
322; SCEV: Classifying expressions for: @fd
323; SCEV:   %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 11) to i64
324; SCEV:   -->  (11 * sizeof(double))
325; SCEV: Classifying expressions for: @fe
326; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64) to i64
327; SCEV:   -->  offsetof({ double, float, double, double }, 2)
328; SCEV: Classifying expressions for: @ff
329; SCEV:   %t = bitcast i64 1 to i64
330; SCEV:   -->  1
331; SCEV: Classifying expressions for: @fg
332; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
333; SCEV:   -->  alignof(double)
334; SCEV: Classifying expressions for: @fh
335; SCEV:   %t = bitcast i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64) to i64
336; SCEV:   -->  sizeof(i1*)
337; SCEV: Classifying expressions for: @fi
338; SCEV:   %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }, { i1, i1* }* null, i64 0, i32 1) to i64) to i64
339; SCEV:   -->  alignof(i1*)
340
341define i64 @fa() nounwind {
342  %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}, {[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64
343  ret i64 %t
344}
345define i64 @fb() nounwind {
346  %t = bitcast i64 ptrtoint ([13 x double]* getelementptr ({i1, [13 x double]}, {i1, [13 x double]}* null, i64 0, i32 1) to i64) to i64
347  ret i64 %t
348}
349define i64 @fc() nounwind {
350  %t = bitcast i64 ptrtoint (double* getelementptr ({double, double, double, double}, {double, double, double, double}* null, i64 0, i32 2) to i64) to i64
351  ret i64 %t
352}
353define i64 @fd() nounwind {
354  %t = bitcast i64 ptrtoint (double* getelementptr ([13 x double], [13 x double]* null, i64 0, i32 11) to i64) to i64
355  ret i64 %t
356}
357define i64 @fe() nounwind {
358  %t = bitcast i64 ptrtoint (double* getelementptr ({double, float, double, double}, {double, float, double, double}* null, i64 0, i32 2) to i64) to i64
359  ret i64 %t
360}
361define i64 @ff() nounwind {
362  %t = bitcast i64 ptrtoint (<{ i16, i128 }>* getelementptr ({i1, <{ i16, i128 }>}, {i1, <{ i16, i128 }>}* null, i64 0, i32 1) to i64) to i64
363  ret i64 %t
364}
365define i64 @fg() nounwind {
366  %t = bitcast i64 ptrtoint ({double, double}* getelementptr ({i1, {double, double}}, {i1, {double, double}}* null, i64 0, i32 1) to i64) to i64
367  ret i64 %t
368}
369define i64 @fh() nounwind {
370  %t = bitcast i64 ptrtoint (double** getelementptr (double*, double** null, i32 1) to i64) to i64
371  ret i64 %t
372}
373define i64 @fi() nounwind {
374  %t = bitcast i64 ptrtoint (double** getelementptr ({i1, double*}, {i1, double*}* null, i64 0, i32 1) to i64) to i64
375  ret i64 %t
376}
377
378; PLAIN: define i64* @fM() #0 {
379; PLAIN:   %t = bitcast i64* getelementptr (i64, i64* null, i32 1) to i64*
380; PLAIN:   ret i64* %t
381; PLAIN: }
382; PLAIN: define i64* @fN() #0 {
383; PLAIN:   %t = bitcast i64* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 0, i32 1) to i64*
384; PLAIN:   ret i64* %t
385; PLAIN: }
386; PLAIN: define i64* @fO() #0 {
387; PLAIN:   %t = bitcast i64* getelementptr ([2 x i64], [2 x i64]* null, i32 0, i32 1) to i64*
388; PLAIN:   ret i64* %t
389; PLAIN: }
390; OPT: define i64* @fM() #0 {
391; OPT:   ret i64* inttoptr (i64 8 to i64*)
392; OPT: }
393; OPT: define i64* @fN() #0 {
394; OPT:   ret i64* inttoptr (i64 8 to i64*)
395; OPT: }
396; OPT: define i64* @fO() #0 {
397; OPT:   ret i64* inttoptr (i64 8 to i64*)
398; OPT: }
399; TO: define i64* @fM() #0 {
400; TO:   ret i64* inttoptr (i64 8 to i64*)
401; TO: }
402; TO: define i64* @fN() #0 {
403; TO:   ret i64* inttoptr (i64 8 to i64*)
404; TO: }
405; TO: define i64* @fO() #0 {
406; TO:   ret i64* inttoptr (i64 8 to i64*)
407; TO: }
408; SCEV: Classifying expressions for: @fM
409; SCEV:   %t = bitcast i64* getelementptr (i64, i64* null, i32 1) to i64*
410; SCEV:   -->  8
411; SCEV: Classifying expressions for: @fN
412; SCEV:   %t = bitcast i64* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 0, i32 1) to i64*
413; SCEV:   -->  8
414; SCEV: Classifying expressions for: @fO
415; SCEV:   %t = bitcast i64* getelementptr ([2 x i64], [2 x i64]* null, i32 0, i32 1) to i64*
416; SCEV:   -->  8
417
418define i64* @fM() nounwind {
419  %t = bitcast i64* getelementptr (i64, i64* null, i32 1) to i64*
420  ret i64* %t
421}
422define i64* @fN() nounwind {
423  %t = bitcast i64* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 0, i32 1) to i64*
424  ret i64* %t
425}
426define i64* @fO() nounwind {
427  %t = bitcast i64* getelementptr ([2 x i64], [2 x i64]* null, i32 0, i32 1) to i64*
428  ret i64* %t
429}
430
431; PLAIN: define i32* @fZ() #0 {
432; PLAIN:   %t = bitcast i32* getelementptr inbounds (i32, i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
433; PLAIN:   ret i32* %t
434; PLAIN: }
435; OPT: define i32* @fZ() #0 {
436; OPT:   ret i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
437; OPT: }
438; TO: define i32* @fZ() #0 {
439; TO:   ret i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
440; TO: }
441; SCEV: Classifying expressions for: @fZ
442; SCEV:   %t = bitcast i32* getelementptr inbounds (i32, i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
443; SCEV:   -->  (12 + @ext)
444
445define i32* @fZ() nounwind {
446  %t = bitcast i32* getelementptr inbounds (i32, i32* getelementptr inbounds ([3 x { i32, i32 }], [3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
447  ret i32* %t
448}
449
450; PR15262 - Check GEP folding with casts between address spaces.
451
452@p0 = global [4 x i8] zeroinitializer, align 1
453@p12 = addrspace(12) global [4 x i8] zeroinitializer, align 1
454
455define i8* @different_addrspace() nounwind noinline {
456; OPT: different_addrspace
457  %p = getelementptr inbounds i8, i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
458                                  i32 2
459  ret i8* %p
460; OPT: ret i8* getelementptr ([4 x i8], [4 x i8]* addrspacecast ([4 x i8] addrspace(12)* @p12 to [4 x i8]*), i64 0, i64 2)
461}
462
463define i8* @same_addrspace() nounwind noinline {
464; OPT: same_addrspace
465  %p = getelementptr inbounds i8, i8* bitcast ([4 x i8] * @p0 to i8*), i32 2
466  ret i8* %p
467; OPT: ret i8* getelementptr inbounds ([4 x i8], [4 x i8]* @p0, i64 0, i64 2)
468}
469
470@gv1 = internal global i32 1
471@gv2 = internal global [1 x i32] [ i32 2 ]
472@gv3 = internal global [1 x i32] [ i32 2 ]
473
474; Handled by TI-independent constant folder
475define i1 @gv_gep_vs_gv() {
476  ret i1 icmp eq (i32* getelementptr inbounds ([1 x i32], [1 x i32]* @gv2, i32 0, i32 0), i32* @gv1)
477}
478; PLAIN: gv_gep_vs_gv
479; PLAIN: ret i1 false
480
481define i1 @gv_gep_vs_gv_gep() {
482  ret i1 icmp eq (i32* getelementptr inbounds ([1 x i32], [1 x i32]* @gv2, i32 0, i32 0), i32* getelementptr inbounds ([1 x i32], [1 x i32]* @gv3, i32 0, i32 0))
483}
484; PLAIN: gv_gep_vs_gv_gep
485; PLAIN: ret i1 false
486
487; CHECK: attributes #0 = { nounwind }
488