1; RUN: opt < %s -S -loop-unroll -mcpu=nehalem | FileCheck %s 2; RUN: opt < %s -S -loop-unroll -mcpu=core -unroll-runtime=0 | FileCheck -check-prefix=CHECK-NOUNRL %s 3target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 4target triple = "x86_64-unknown-linux-gnu" 5 6define void @foo(i32* noalias nocapture readnone %ip, double %alpha, double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 { 7entry: 8 br label %vector.body 9 10vector.body: ; preds = %vector.body, %entry 11 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] 12 %0 = getelementptr inbounds double, double* %b, i64 %index 13 %1 = bitcast double* %0 to <2 x double>* 14 %wide.load = load <2 x double>, <2 x double>* %1, align 8 15 %.sum9 = or i64 %index, 2 16 %2 = getelementptr double, double* %b, i64 %.sum9 17 %3 = bitcast double* %2 to <2 x double>* 18 %wide.load8 = load <2 x double>, <2 x double>* %3, align 8 19 %4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00> 20 %5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00> 21 %6 = getelementptr inbounds double, double* %a, i64 %index 22 %7 = bitcast double* %6 to <2 x double>* 23 store <2 x double> %4, <2 x double>* %7, align 8 24 %.sum10 = or i64 %index, 2 25 %8 = getelementptr double, double* %a, i64 %.sum10 26 %9 = bitcast double* %8 to <2 x double>* 27 store <2 x double> %5, <2 x double>* %9, align 8 28 %index.next = add i64 %index, 4 29 %10 = icmp eq i64 %index.next, 1600 30 br i1 %10, label %for.end, label %vector.body 31 32; FIXME: We should probably unroll this loop by a factor of 2, but the cost 33; model needs to be fixed to account for instructions likely to be folded 34; as part of an addressing mode. 35; CHECK-LABEL: @foo 36; CHECK-NOUNRL-LABEL: @foo 37 38for.end: ; preds = %vector.body 39 ret void 40} 41 42define void @bar(i32* noalias nocapture readnone %ip, double %alpha, double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 { 43entry: 44 br label %vector.body 45 46vector.body: ; preds = %vector.body, %entry 47 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] 48 %v0 = getelementptr inbounds double, double* %b, i64 %index 49 %v1 = bitcast double* %v0 to <2 x double>* 50 %wide.load = load <2 x double>, <2 x double>* %v1, align 8 51 %v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00> 52 %v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00> 53 %v6 = getelementptr inbounds double, double* %a, i64 %index 54 %v7 = bitcast double* %v6 to <2 x double>* 55 store <2 x double> %v5, <2 x double>* %v7, align 8 56 %index.next = add i64 %index, 2 57 %v10 = icmp eq i64 %index.next, 1600 58 br i1 %v10, label %for.end, label %vector.body 59 60; FIXME: We should probably unroll this loop by a factor of 2, but the cost 61; model needs to first to fixed to account for instructions likely to be folded 62; as part of an addressing mode. 63 64; CHECK-LABEL: @bar 65; CHECK: fadd 66; CHECK-NEXT: fmul 67; CHECK: fadd 68; CHECK-NEXT: fmul 69 70; CHECK-NOUNRL-LABEL: @bar 71; CHECK-NOUNRL: fadd 72; CHECK-NOUNRL-NEXT: fmul 73; CHECK-NOUNRL-NOT: fadd 74 75for.end: ; preds = %vector.body 76 ret void 77} 78 79define zeroext i16 @test1(i16* nocapture readonly %arr, i32 %n) #0 { 80entry: 81 %cmp25 = icmp eq i32 %n, 0 82 br i1 %cmp25, label %for.end, label %for.body 83 84for.body: ; preds = %entry, %for.body 85 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 86 %reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ] 87 %arrayidx = getelementptr inbounds i16, i16* %arr, i64 %indvars.iv 88 %0 = load i16, i16* %arrayidx, align 2 89 %mul = shl i16 %0, 1 90 %add = add i16 %mul, %reduction.026 91 %sext = mul i64 %indvars.iv, 12884901888 92 %idxprom3 = ashr exact i64 %sext, 32 93 %arrayidx4 = getelementptr inbounds i16, i16* %arr, i64 %idxprom3 94 %1 = load i16, i16* %arrayidx4, align 2 95 %mul2 = shl i16 %1, 1 96 %add7 = add i16 %add, %mul2 97 %sext28 = mul i64 %indvars.iv, 21474836480 98 %idxprom10 = ashr exact i64 %sext28, 32 99 %arrayidx11 = getelementptr inbounds i16, i16* %arr, i64 %idxprom10 100 %2 = load i16, i16* %arrayidx11, align 2 101 %mul3 = shl i16 %2, 1 102 %add14 = add i16 %add7, %mul3 103 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 104 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 105 %exitcond = icmp eq i32 %lftr.wideiv, %n 106 br i1 %exitcond, label %for.end, label %for.body 107 108for.end: ; preds = %for.body, %entry 109 %reduction.0.lcssa = phi i16 [ 0, %entry ], [ %add14, %for.body ] 110 ret i16 %reduction.0.lcssa 111 112; This loop is too large to be partially unrolled (size=16) 113 114; CHECK-LABEL: @test1 115; CHECK: br 116; CHECK: br 117; CHECK: br 118; CHECK: br 119; CHECK-NOT: br 120 121; CHECK-NOUNRL-LABEL: @test1 122; CHECK-NOUNRL: br 123; CHECK-NOUNRL: br 124; CHECK-NOUNRL: br 125; CHECK-NOUNRL: br 126; CHECK-NOUNRL-NOT: br 127} 128 129attributes #0 = { nounwind uwtable } 130 131