• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -loop-vectorize -mtriple=thumbv7s-apple-ios6.0.0 -S -enable-interleaved-mem-accesses=false < %s | FileCheck %s
2
3target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
4
5@kernel = global [512 x float] zeroinitializer, align 4
6@kernel2 = global [512 x float] zeroinitializer, align 4
7@kernel3 = global [512 x float] zeroinitializer, align 4
8@kernel4 = global [512 x float] zeroinitializer, align 4
9@src_data = global [1536 x float] zeroinitializer, align 4
10@r_ = global i8 0, align 4
11@g_ = global i8 0, align 4
12@b_ = global i8 0, align 4
13
14; We don't want to vectorize most loops containing gathers because they are
15; expensive. This function represents a point where vectorization starts to
16; become beneficial.
17; Make sure we are conservative and don't vectorize it.
18; CHECK-NOT: <2 x float>
19; CHECK-NOT: <4 x float>
20
21define void @_Z4testmm(i32 %size, i32 %offset) {
22entry:
23  %cmp53 = icmp eq i32 %size, 0
24  br i1 %cmp53, label %for.end, label %for.body.lr.ph
25
26for.body.lr.ph:
27  br label %for.body
28
29for.body:
30  %r.057 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add10, %for.body ]
31  %g.056 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add20, %for.body ]
32  %v.055 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
33  %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
34  %add = add i32 %v.055, %offset
35  %mul = mul i32 %add, 3
36  %arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %mul
37  %0 = load float, float* %arrayidx, align 4
38  %arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i32 0, i32 %v.055
39  %1 = load float, float* %arrayidx2, align 4
40  %mul3 = fmul fast float %0, %1
41  %arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i32 0, i32 %v.055
42  %2 = load float, float* %arrayidx4, align 4
43  %mul5 = fmul fast float %mul3, %2
44  %arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i32 0, i32 %v.055
45  %3 = load float, float* %arrayidx6, align 4
46  %mul7 = fmul fast float %mul5, %3
47  %arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i32 0, i32 %v.055
48  %4 = load float, float* %arrayidx8, align 4
49  %mul9 = fmul fast float %mul7, %4
50  %add10 = fadd fast float %r.057, %mul9
51  %arrayidx.sum = add i32 %mul, 1
52  %arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum
53  %5 = load float, float* %arrayidx11, align 4
54  %mul13 = fmul fast float %1, %5
55  %mul15 = fmul fast float %2, %mul13
56  %mul17 = fmul fast float %3, %mul15
57  %mul19 = fmul fast float %4, %mul17
58  %add20 = fadd fast float %g.056, %mul19
59  %arrayidx.sum52 = add i32 %mul, 2
60  %arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum52
61  %6 = load float, float* %arrayidx21, align 4
62  %mul23 = fmul fast float %1, %6
63  %mul25 = fmul fast float %2, %mul23
64  %mul27 = fmul fast float %3, %mul25
65  %mul29 = fmul fast float %4, %mul27
66  %add30 = fadd fast float %b.054, %mul29
67  %inc = add i32 %v.055, 1
68  %exitcond = icmp ne i32 %inc, %size
69  br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
70
71for.cond.for.end_crit_edge:
72  %add30.lcssa = phi float [ %add30, %for.body ]
73  %add20.lcssa = phi float [ %add20, %for.body ]
74  %add10.lcssa = phi float [ %add10, %for.body ]
75  %phitmp = fptoui float %add10.lcssa to i8
76  %phitmp60 = fptoui float %add20.lcssa to i8
77  %phitmp61 = fptoui float %add30.lcssa to i8
78  br label %for.end
79
80for.end:
81  %r.0.lcssa = phi i8 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
82  %g.0.lcssa = phi i8 [ %phitmp60, %for.cond.for.end_crit_edge ], [ 0, %entry ]
83  %b.0.lcssa = phi i8 [ %phitmp61, %for.cond.for.end_crit_edge ], [ 0, %entry ]
84  store i8 %r.0.lcssa, i8* @r_, align 4
85  store i8 %g.0.lcssa, i8* @g_, align 4
86  store i8 %b.0.lcssa, i8* @b_, align 4
87  ret void
88}
89