• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -S | FileCheck %s
3target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
4target triple = "x86_64-apple-macosx10.9.0"
5
6@A = common global [2000 x double] zeroinitializer, align 16
7@B = common global [2000 x double] zeroinitializer, align 16
8@C = common global [2000 x float] zeroinitializer, align 16
9@D = common global [2000 x float] zeroinitializer, align 16
10
11; Currently SCEV isn't smart enough to figure out that accesses
12; A[3*i], A[3*i+1] and A[3*i+2] are consecutive, but in future
13; that would hopefully be fixed. For now, check that this isn't
14; vectorized.
15; Function Attrs: nounwind ssp uwtable
16define void @foo_3double(i32 %u) #0 {
17; CHECK-LABEL: @foo_3double(
18; CHECK-NEXT:  entry:
19; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
20; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
21; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 3
22; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
23; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
24; CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[ARRAYIDX]], align 8
25; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
26; CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[ARRAYIDX4]], align 8
27; CHECK-NEXT:    [[ADD5:%.*]] = fadd double [[TMP0]], [[TMP1]]
28; CHECK-NEXT:    store double [[ADD5]], double* [[ARRAYIDX]], align 8
29; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[MUL]], 1
30; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
31; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
32; CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[ARRAYIDX13]], align 8
33; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
34; CHECK-NEXT:    [[TMP3:%.*]] = load double, double* [[ARRAYIDX17]], align 8
35; CHECK-NEXT:    [[ADD18:%.*]] = fadd double [[TMP2]], [[TMP3]]
36; CHECK-NEXT:    store double [[ADD18]], double* [[ARRAYIDX13]], align 8
37; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[MUL]], 2
38; CHECK-NEXT:    [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64
39; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM25]]
40; CHECK-NEXT:    [[TMP4:%.*]] = load double, double* [[ARRAYIDX26]], align 8
41; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM25]]
42; CHECK-NEXT:    [[TMP5:%.*]] = load double, double* [[ARRAYIDX30]], align 8
43; CHECK-NEXT:    [[ADD31:%.*]] = fadd double [[TMP4]], [[TMP5]]
44; CHECK-NEXT:    store double [[ADD31]], double* [[ARRAYIDX26]], align 8
45; CHECK-NEXT:    ret void
46;
47entry:
48  %u.addr = alloca i32, align 4
49  store i32 %u, i32* %u.addr, align 4
50  %mul = mul nsw i32 %u, 3
51  %idxprom = sext i32 %mul to i64
52  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
53  %0 = load double, double* %arrayidx, align 8
54  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
55  %1 = load double, double* %arrayidx4, align 8
56  %add5 = fadd double %0, %1
57  store double %add5, double* %arrayidx, align 8
58  %add11 = add nsw i32 %mul, 1
59  %idxprom12 = sext i32 %add11 to i64
60  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
61  %2 = load double, double* %arrayidx13, align 8
62  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
63  %3 = load double, double* %arrayidx17, align 8
64  %add18 = fadd double %2, %3
65  store double %add18, double* %arrayidx13, align 8
66  %add24 = add nsw i32 %mul, 2
67  %idxprom25 = sext i32 %add24 to i64
68  %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25
69  %4 = load double, double* %arrayidx26, align 8
70  %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25
71  %5 = load double, double* %arrayidx30, align 8
72  %add31 = fadd double %4, %5
73  store double %add31, double* %arrayidx26, align 8
74  ret void
75}
76
77; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ...
78; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0.
79; Thus, the following code should be vectorized.
80; Function Attrs: nounwind ssp uwtable
81define void @foo_2double(i32 %u) #0 {
82; CHECK-LABEL: @foo_2double(
83; CHECK-NEXT:  entry:
84; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
85; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
86; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 2
87; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
88; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
89; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
90; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[MUL]], 1
91; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
92; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
93; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
94; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
95; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
96; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
97; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
98; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
99; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
100; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
101; CHECK-NEXT:    ret void
102;
103entry:
104  %u.addr = alloca i32, align 4
105  store i32 %u, i32* %u.addr, align 4
106  %mul = mul nsw i32 %u, 2
107  %idxprom = sext i32 %mul to i64
108  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
109  %0 = load double, double* %arrayidx, align 8
110  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
111  %1 = load double, double* %arrayidx4, align 8
112  %add5 = fadd double %0, %1
113  store double %add5, double* %arrayidx, align 8
114  %add11 = add nsw i32 %mul, 1
115  %idxprom12 = sext i32 %add11 to i64
116  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
117  %2 = load double, double* %arrayidx13, align 8
118  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
119  %3 = load double, double* %arrayidx17, align 8
120  %add18 = fadd double %2, %3
121  store double %add18, double* %arrayidx13, align 8
122  ret void
123}
124
125; Similar to the previous test, but with different datatype.
126; Function Attrs: nounwind ssp uwtable
127define void @foo_4float(i32 %u) #0 {
128; CHECK-LABEL: @foo_4float(
129; CHECK-NEXT:  entry:
130; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
131; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
132; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 4
133; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
134; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM]]
135; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM]]
136; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[MUL]], 1
137; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
138; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM12]]
139; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM12]]
140; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[MUL]], 2
141; CHECK-NEXT:    [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64
142; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM25]]
143; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM25]]
144; CHECK-NEXT:    [[ADD37:%.*]] = add nsw i32 [[MUL]], 3
145; CHECK-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[ADD37]] to i64
146; CHECK-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM38]]
147; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
148; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
149; CHECK-NEXT:    [[ARRAYIDX43:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM38]]
150; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[ARRAYIDX4]] to <4 x float>*
151; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
152; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]]
153; CHECK-NEXT:    [[TMP5:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
154; CHECK-NEXT:    store <4 x float> [[TMP4]], <4 x float>* [[TMP5]], align 4
155; CHECK-NEXT:    ret void
156;
157entry:
158  %u.addr = alloca i32, align 4
159  store i32 %u, i32* %u.addr, align 4
160  %mul = mul nsw i32 %u, 4
161  %idxprom = sext i32 %mul to i64
162  %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom
163  %0 = load float, float* %arrayidx, align 4
164  %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom
165  %1 = load float, float* %arrayidx4, align 4
166  %add5 = fadd float %0, %1
167  store float %add5, float* %arrayidx, align 4
168  %add11 = add nsw i32 %mul, 1
169  %idxprom12 = sext i32 %add11 to i64
170  %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12
171  %2 = load float, float* %arrayidx13, align 4
172  %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12
173  %3 = load float, float* %arrayidx17, align 4
174  %add18 = fadd float %2, %3
175  store float %add18, float* %arrayidx13, align 4
176  %add24 = add nsw i32 %mul, 2
177  %idxprom25 = sext i32 %add24 to i64
178  %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25
179  %4 = load float, float* %arrayidx26, align 4
180  %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25
181  %5 = load float, float* %arrayidx30, align 4
182  %add31 = fadd float %4, %5
183  store float %add31, float* %arrayidx26, align 4
184  %add37 = add nsw i32 %mul, 3
185  %idxprom38 = sext i32 %add37 to i64
186  %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38
187  %6 = load float, float* %arrayidx39, align 4
188  %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38
189  %7 = load float, float* %arrayidx43, align 4
190  %add44 = fadd float %6, %7
191  store float %add44, float* %arrayidx39, align 4
192  ret void
193}
194
195; Similar to the previous tests, but now we are dealing with AddRec SCEV.
196; Function Attrs: nounwind ssp uwtable
197define i32 @foo_loop(double* %A, i32 %n) #0 {
198; CHECK-LABEL: @foo_loop(
199; CHECK-NEXT:  entry:
200; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
201; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
202; CHECK-NEXT:    [[SUM:%.*]] = alloca double, align 8
203; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
204; CHECK-NEXT:    store double* [[A:%.*]], double** [[A_ADDR]], align 8
205; CHECK-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
206; CHECK-NEXT:    store double 0.000000e+00, double* [[SUM]], align 8
207; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
208; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N]]
209; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
210; CHECK:       for.body.lr.ph:
211; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
212; CHECK:       for.body:
213; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
214; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ]
215; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0]], 2
216; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
217; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]]
218; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[MUL]], 1
219; CHECK-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[ADD]] to i64
220; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM3]]
221; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
222; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
223; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
224; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
225; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
226; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
227; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
228; CHECK-NEXT:    store double [[ADD7]], double* [[SUM]], align 8
229; CHECK-NEXT:    [[INC]] = add nsw i32 [[TMP0]], 1
230; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
231; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
232; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
233; CHECK:       for.cond.for.end_crit_edge:
234; CHECK-NEXT:    [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ]
235; CHECK-NEXT:    br label [[FOR_END]]
236; CHECK:       for.end:
237; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
238; CHECK-NEXT:    [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32
239; CHECK-NEXT:    ret i32 [[CONV]]
240;
241entry:
242  %A.addr = alloca double*, align 8
243  %n.addr = alloca i32, align 4
244  %sum = alloca double, align 8
245  %i = alloca i32, align 4
246  store double* %A, double** %A.addr, align 8
247  store i32 %n, i32* %n.addr, align 4
248  store double 0.000000e+00, double* %sum, align 8
249  store i32 0, i32* %i, align 4
250  %cmp1 = icmp slt i32 0, %n
251  br i1 %cmp1, label %for.body.lr.ph, label %for.end
252
253for.body.lr.ph:                                   ; preds = %entry
254  br label %for.body
255
256for.body:                                         ; preds = %for.body.lr.ph, %for.body
257  %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
258  %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
259  %mul = mul nsw i32 %0, 2
260  %idxprom = sext i32 %mul to i64
261  %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
262  %2 = load double, double* %arrayidx, align 8
263  %mul1 = fmul double 7.000000e+00, %2
264  %add = add nsw i32 %mul, 1
265  %idxprom3 = sext i32 %add to i64
266  %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
267  %3 = load double, double* %arrayidx4, align 8
268  %mul5 = fmul double 7.000000e+00, %3
269  %add6 = fadd double %mul1, %mul5
270  %add7 = fadd double %1, %add6
271  store double %add7, double* %sum, align 8
272  %inc = add nsw i32 %0, 1
273  store i32 %inc, i32* %i, align 4
274  %cmp = icmp slt i32 %inc, %n
275  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
276
277for.cond.for.end_crit_edge:                       ; preds = %for.body
278  %split = phi double [ %add7, %for.body ]
279  br label %for.end
280
281for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
282  %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
283  %conv = fptosi double %.lcssa to i32
284  ret i32 %conv
285}
286
287; Similar to foo_2double but with a non-power-of-2 factor and potential
288; wrapping (both indices wrap or both don't in the same time)
289; Function Attrs: nounwind ssp uwtable
290define void @foo_2double_non_power_of_2(i32 %u) #0 {
291; CHECK-LABEL: @foo_2double_non_power_of_2(
292; CHECK-NEXT:  entry:
293; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
294; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
295; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[U]], 6
296; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[MUL]], 6
297; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD6]] to i64
298; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
299; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
300; CHECK-NEXT:    [[ADD7:%.*]] = add i32 [[MUL]], 7
301; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD7]] to i64
302; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
303; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
304; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
305; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
306; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
307; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
308; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
309; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
310; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
311; CHECK-NEXT:    ret void
312;
313entry:
314  %u.addr = alloca i32, align 4
315  store i32 %u, i32* %u.addr, align 4
316  %mul = mul i32 %u, 6
317  %add6 = add i32 %mul, 6
318  %idxprom = sext i32 %add6 to i64
319  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
320  %0 = load double, double* %arrayidx, align 8
321  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
322  %1 = load double, double* %arrayidx4, align 8
323  %add5 = fadd double %0, %1
324  store double %add5, double* %arrayidx, align 8
325  %add7 = add i32 %mul, 7
326  %idxprom12 = sext i32 %add7 to i64
327  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
328  %2 = load double, double* %arrayidx13, align 8
329  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
330  %3 = load double, double* %arrayidx17, align 8
331  %add18 = fadd double %2, %3
332  store double %add18, double* %arrayidx13, align 8
333  ret void
334}
335
336; Similar to foo_2double_non_power_of_2 but with zext's instead of sext's
337; Function Attrs: nounwind ssp uwtable
338define void @foo_2double_non_power_of_2_zext(i32 %u) #0 {
339; CHECK-LABEL: @foo_2double_non_power_of_2_zext(
340; CHECK-NEXT:  entry:
341; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
342; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
343; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[U]], 6
344; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[MUL]], 6
345; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[ADD6]] to i64
346; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
347; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
348; CHECK-NEXT:    [[ADD7:%.*]] = add i32 [[MUL]], 7
349; CHECK-NEXT:    [[IDXPROM12:%.*]] = zext i32 [[ADD7]] to i64
350; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
351; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
352; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
353; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
354; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
355; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
356; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
357; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
358; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
359; CHECK-NEXT:    ret void
360;
361entry:
362  %u.addr = alloca i32, align 4
363  store i32 %u, i32* %u.addr, align 4
364  %mul = mul i32 %u, 6
365  %add6 = add i32 %mul, 6
366  %idxprom = zext i32 %add6 to i64
367  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
368  %0 = load double, double* %arrayidx, align 8
369  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
370  %1 = load double, double* %arrayidx4, align 8
371  %add5 = fadd double %0, %1
372  store double %add5, double* %arrayidx, align 8
373  %add7 = add i32 %mul, 7
374  %idxprom12 = zext i32 %add7 to i64
375  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
376  %2 = load double, double* %arrayidx13, align 8
377  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
378  %3 = load double, double* %arrayidx17, align 8
379  %add18 = fadd double %2, %3
380  store double %add18, double* %arrayidx13, align 8
381  ret void
382}
383
384; Similar to foo_2double_non_power_of_2, but now we are dealing with AddRec SCEV.
385; Alternatively, this is like foo_loop, but with a non-power-of-2 factor and
386; potential wrapping (both indices wrap or both don't in the same time)
387; Function Attrs: nounwind ssp uwtable
388define i32 @foo_loop_non_power_of_2(double* %A, i32 %n) #0 {
389; CHECK-LABEL: @foo_loop_non_power_of_2(
390; CHECK-NEXT:  entry:
391; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
392; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
393; CHECK-NEXT:    [[SUM:%.*]] = alloca double, align 8
394; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
395; CHECK-NEXT:    store double* [[A:%.*]], double** [[A_ADDR]], align 8
396; CHECK-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
397; CHECK-NEXT:    store double 0.000000e+00, double* [[SUM]], align 8
398; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
399; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N]]
400; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
401; CHECK:       for.body.lr.ph:
402; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
403; CHECK:       for.body:
404; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
405; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ]
406; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0]], 12
407; CHECK-NEXT:    [[ADD_5:%.*]] = add i32 [[MUL]], 5
408; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD_5]] to i64
409; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]]
410; CHECK-NEXT:    [[ADD_6:%.*]] = add i32 [[MUL]], 6
411; CHECK-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[ADD_6]] to i64
412; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM3]]
413; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
414; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
415; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
416; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
417; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
418; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
419; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
420; CHECK-NEXT:    store double [[ADD7]], double* [[SUM]], align 8
421; CHECK-NEXT:    [[INC]] = add i32 [[TMP0]], 1
422; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
423; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
424; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
425; CHECK:       for.cond.for.end_crit_edge:
426; CHECK-NEXT:    [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ]
427; CHECK-NEXT:    br label [[FOR_END]]
428; CHECK:       for.end:
429; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
430; CHECK-NEXT:    [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32
431; CHECK-NEXT:    ret i32 [[CONV]]
432;
433entry:
434  %A.addr = alloca double*, align 8
435  %n.addr = alloca i32, align 4
436  %sum = alloca double, align 8
437  %i = alloca i32, align 4
438  store double* %A, double** %A.addr, align 8
439  store i32 %n, i32* %n.addr, align 4
440  store double 0.000000e+00, double* %sum, align 8
441  store i32 0, i32* %i, align 4
442  %cmp1 = icmp slt i32 0, %n
443  br i1 %cmp1, label %for.body.lr.ph, label %for.end
444
445for.body.lr.ph:                                   ; preds = %entry
446  br label %for.body
447
448for.body:                                         ; preds = %for.body.lr.ph, %for.body
449  %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
450  %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
451  %mul = mul i32 %0, 12
452  %add.5 = add i32 %mul, 5
453  %idxprom = sext i32 %add.5 to i64
454  %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
455  %2 = load double, double* %arrayidx, align 8
456  %mul1 = fmul double 7.000000e+00, %2
457  %add.6 = add i32 %mul, 6
458  %idxprom3 = sext i32 %add.6 to i64
459  %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
460  %3 = load double, double* %arrayidx4, align 8
461  %mul5 = fmul double 7.000000e+00, %3
462  %add6 = fadd double %mul1, %mul5
463  %add7 = fadd double %1, %add6
464  store double %add7, double* %sum, align 8
465  %inc = add i32 %0, 1
466  store i32 %inc, i32* %i, align 4
467  %cmp = icmp slt i32 %inc, %n
468  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
469
470for.cond.for.end_crit_edge:                       ; preds = %for.body
471  %split = phi double [ %add7, %for.body ]
472  br label %for.end
473
474for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
475  %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
476  %conv = fptosi double %.lcssa to i32
477  ret i32 %conv
478}
479
480; This is generated by `clang -std=c11 -Wpedantic -Wall -O3 main.c -S -o - -emit-llvm`
481; with !{!"clang version 7.0.0 (trunk 337339) (llvm/trunk 337344)"} and stripping off
482; the !tbaa metadata nodes to fit the rest of the test file, where `cat main.c` is:
483;
484;  double bar(double *a, unsigned n) {
485;    double x = 0.0;
486;    double y = 0.0;
487;    for (unsigned i = 0; i < n; i += 2) {
488;      x += a[i];
489;      y += a[i + 1];
490;    }
491;    return x * y;
492;  }
493;
494; The resulting IR is similar to @foo_loop, but with zext's instead of sext's.
495;
496; Make sure we are able to vectorize this from now on:
497;
498define double @bar(double* nocapture readonly %a, i32 %n) local_unnamed_addr #0 {
499; CHECK-LABEL: @bar(
500; CHECK-NEXT:  entry:
501; CHECK-NEXT:    [[CMP15:%.*]] = icmp eq i32 [[N:%.*]], 0
502; CHECK-NEXT:    br i1 [[CMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY:%.*]]
503; CHECK:       for.cond.cleanup:
504; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
505; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x double> [[TMP0]], i32 0
506; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP0]], i32 1
507; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]]
508; CHECK-NEXT:    ret double [[MUL]]
509; CHECK:       for.body:
510; CHECK-NEXT:    [[I_018:%.*]] = phi i32 [ [[ADD5:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
511; CHECK-NEXT:    [[TMP3:%.*]] = phi <2 x double> [ [[TMP6]], [[FOR_BODY]] ], [ zeroinitializer, [[ENTRY]] ]
512; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I_018]] to i64
513; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[IDXPROM]]
514; CHECK-NEXT:    [[ADD1:%.*]] = or i32 [[I_018]], 1
515; CHECK-NEXT:    [[IDXPROM2:%.*]] = zext i32 [[ADD1]] to i64
516; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM2]]
517; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
518; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8
519; CHECK-NEXT:    [[TMP6]] = fadd <2 x double> [[TMP3]], [[TMP5]]
520; CHECK-NEXT:    [[ADD5]] = add i32 [[I_018]], 2
521; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD5]], [[N]]
522; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
523;
524entry:
525  %cmp15 = icmp eq i32 %n, 0
526  br i1 %cmp15, label %for.cond.cleanup, label %for.body
527
528for.cond.cleanup:                                 ; preds = %for.body, %entry
529  %x.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ]
530  %y.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add4, %for.body ]
531  %mul = fmul double %x.0.lcssa, %y.0.lcssa
532  ret double %mul
533
534for.body:                                         ; preds = %entry, %for.body
535  %i.018 = phi i32 [ %add5, %for.body ], [ 0, %entry ]
536  %y.017 = phi double [ %add4, %for.body ], [ 0.000000e+00, %entry ]
537  %x.016 = phi double [ %add, %for.body ], [ 0.000000e+00, %entry ]
538  %idxprom = zext i32 %i.018 to i64
539  %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom
540  %0 = load double, double* %arrayidx, align 8
541  %add = fadd double %x.016, %0
542  %add1 = or i32 %i.018, 1
543  %idxprom2 = zext i32 %add1 to i64
544  %arrayidx3 = getelementptr inbounds double, double* %a, i64 %idxprom2
545  %1 = load double, double* %arrayidx3, align 8
546  %add4 = fadd double %y.017, %1
547  %add5 = add i32 %i.018, 2
548  %cmp = icmp ult i32 %add5, %n
549  br i1 %cmp, label %for.body, label %for.cond.cleanup
550}
551
552; Globals/constant expressions are not normal constants.
553; They should not be treated as the usual vectorization candidates.
554
555@g1 = external global i32, align 4
556@g2 = external global i32, align 4
557
558define void @PR33958(i32** nocapture %p) {
559; CHECK-LABEL: @PR33958(
560; CHECK-NEXT:    store i32* @g1, i32** [[P:%.*]], align 8
561; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32*, i32** [[P]], i64 1
562; CHECK-NEXT:    store i32* @g2, i32** [[ARRAYIDX1]], align 8
563; CHECK-NEXT:    ret void
564;
565  store i32* @g1, i32** %p, align 8
566  %arrayidx1 = getelementptr inbounds i32*, i32** %p, i64 1
567  store i32* @g2, i32** %arrayidx1, align 8
568  ret void
569}
570
571define void @store_constant_expression(i64* %p) {
572; CHECK-LABEL: @store_constant_expression(
573; CHECK-NEXT:    store i64 ptrtoint (i32* @g1 to i64), i64* [[P:%.*]], align 8
574; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1
575; CHECK-NEXT:    store i64 ptrtoint (i32* @g2 to i64), i64* [[ARRAYIDX1]], align 8
576; CHECK-NEXT:    ret void
577;
578  store i64 ptrtoint (i32* @g1 to i64), i64* %p, align 8
579  %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
580  store i64 ptrtoint (i32* @g2 to i64), i64* %arrayidx1, align 8
581  ret void
582}
583
584attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
585
586!llvm.ident = !{!0}
587
588!0 = !{!"clang version 3.5.0 "}
589