• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -mtriple=thumbv8.1m.main-arm-eabihf -mattr=+mve.fp -loop-vectorize -tail-predication=enabled -S < %s | \
2; RUN:  FileCheck %s
3
4; Check that loop hint predicate.enable loop can overrule the TTI hook. For
5; this test case, the TTI hook rejects tail-predication:
6;
7;   ARMHWLoops: Trip count does not fit into 32bits
8;   preferPredicateOverEpilogue: hardware-loop is not profitable.
9;
10define dso_local void @tail_folding(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) {
11; CHECK-LABEL: tail_folding(
12; CHECK:       vector.body:
13; CHECK-NOT:   call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(
14; CHECK-NOT:   call void @llvm.masked.store.v4i32.p0v4i32(
15; CHECK:       br i1 %{{.*}}, label %{{.*}}, label %vector.body
16entry:
17  br label %for.body
18
19for.cond.cleanup:
20  ret void
21
22for.body:
23  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
24  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
25  %0 = load i32, i32* %arrayidx, align 4
26  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
27  %1 = load i32, i32* %arrayidx2, align 4
28  %add = add nsw i32 %1, %0
29  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
30  store i32 %add, i32* %arrayidx4, align 4
31  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
32  %exitcond = icmp eq i64 %indvars.iv.next, 430
33  br i1 %exitcond, label %for.cond.cleanup, label %for.body
34}
35
36; The same test case but now with predicate.enable = true should get
37; tail-folded.
38;
39define dso_local void @predicate_loop_hint(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) {
40; CHECK-LABEL: predicate_loop_hint(
41; CHECK:       vector.body:
42; CHECK:         %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
43; CHECK:         %[[ELEM0:.*]] = add i64 %index, 0
44; CHECK:         %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %[[ELEM0]], i64 430)
45; CHECK:         %[[WML1:.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32({{.*}}<4 x i1> %active.lane.mask
46; CHECK:         %[[WML2:.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32({{.*}}<4 x i1> %active.lane.mask
47; CHECK:         %[[ADD:.*]] = add nsw <4 x i32> %[[WML2]], %[[WML1]]
48; CHECK:         call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %[[ADD]], {{.*}}<4 x i1> %active.lane.mask
49; CHECK:         %index.next = add i64 %index, 4
50; CHECK:         br i1 %{{.*}}, label %{{.*}}, label %vector.body
51entry:
52  br label %for.body
53
54for.cond.cleanup:
55  ret void
56
57for.body:
58  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
59  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
60  %0 = load i32, i32* %arrayidx, align 4
61  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
62  %1 = load i32, i32* %arrayidx2, align 4
63  %add = add nsw i32 %1, %0
64  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
65  store i32 %add, i32* %arrayidx4, align 4
66  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
67  %exitcond = icmp eq i64 %indvars.iv.next, 430
68  br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !6
69}
70
71; CHECK:      !0 = distinct !{!0, !1}
72; CHECK-NEXT: !1 = !{!"llvm.loop.isvectorized", i32 1}
73; CHECK-NEXT: !2 = distinct !{!2, !3, !1}
74; CHECK-NEXT: !3 = !{!"llvm.loop.unroll.runtime.disable"}
75; CHECK-NEXT: !4 = distinct !{!4, !1}
76; CHECK-NEXT: !5 = distinct !{!5, !3, !1}
77
78!6 = distinct !{!6, !7, !8}
79!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
80!8 = !{!"llvm.loop.vectorize.enable", i1 true}
81