1; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-optree-normalize-phi=true -polly-optree -analyze < %s | FileCheck %s -match-full-lines 2 3target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 4 5define internal fastcc void @kernel_atax([2100 x double]* nocapture readonly %A, double* nocapture readonly %x, double* nocapture %y, double* nocapture %tmp) unnamed_addr #0 { 6entry: 7 br label %entry.split 8 9entry.split: ; preds = %entry 10 %y15 = bitcast double* %y to i8* 11 call void @llvm.memset.p0i8.i64(i8* %y15, i8 0, i64 16800, i32 8, i1 false) 12 br label %for.body3 13 14for.body3: ; preds = %for.inc40, %entry.split 15 %indvars.iv8 = phi i64 [ 0, %entry.split ], [ %indvars.iv.next9, %for.inc40 ] 16 %arrayidx5 = getelementptr inbounds double, double* %tmp, i64 %indvars.iv8 17 store double 0.000000e+00, double* %arrayidx5, align 8, !tbaa !6 18 br label %for.body8 19 20for.body8: ; preds = %for.body8, %for.body3 21 %0 = phi double [ 0.000000e+00, %for.body3 ], [ %add, %for.body8 ] 22 %indvars.iv = phi i64 [ 0, %for.body3 ], [ %indvars.iv.next, %for.body8 ] 23 %arrayidx14 = getelementptr inbounds [2100 x double], [2100 x double]* %A, i64 %indvars.iv8, i64 %indvars.iv 24 %1 = load double, double* %arrayidx14, align 8, !tbaa !6 25 %arrayidx16 = getelementptr inbounds double, double* %x, i64 %indvars.iv 26 %2 = load double, double* %arrayidx16, align 8, !tbaa !6 27 %mul = fmul double %1, %2 28 %add = fadd double %0, %mul 29 store double %add, double* %arrayidx5, align 8, !tbaa !6 30 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 31 %exitcond = icmp eq i64 %indvars.iv.next, 2 32 br i1 %exitcond, label %for.end21, label %for.body8 33 34for.end21: ; preds = %for.body8 35 br label %for.body24 36 37for.body24: ; preds = %for.body24.for.body24_crit_edge, %for.end21 38 %3 = phi double [ %add, %for.end21 ], [ %.pre, %for.body24.for.body24_crit_edge ] 39 %indvars.iv5 = phi i64 [ 0, %for.end21 ], [ %indvars.iv.next6, %for.body24.for.body24_crit_edge ] 40 %arrayidx26 = getelementptr inbounds double, double* %y, i64 %indvars.iv5 41 %4 = load double, double* %arrayidx26, align 8, !tbaa !6 42 %arrayidx30 = getelementptr inbounds [2100 x double], [2100 x double]* %A, i64 %indvars.iv8, i64 %indvars.iv5 43 %5 = load double, double* %arrayidx30, align 8, !tbaa !6 44 %mul33 = fmul double %5, %3 45 %add34 = fadd double %4, %mul33 46 store double %add34, double* %arrayidx26, align 8, !tbaa !6 47 %indvars.iv.next6 = add nuw nsw i64 %indvars.iv5, 1 48 %exitcond7 = icmp eq i64 %indvars.iv.next6, 2 49 br i1 %exitcond7, label %for.inc40, label %for.body24.for.body24_crit_edge 50 51for.body24.for.body24_crit_edge: ; preds = %for.body24 52 %.pre = load double, double* %arrayidx5, align 8, !tbaa !6 53 br label %for.body24 54 55for.inc40: ; preds = %for.body24 56 %indvars.iv.next9 = add nuw nsw i64 %indvars.iv8, 1 57 %exitcond10 = icmp eq i64 %indvars.iv.next9, 2 58 br i1 %exitcond10, label %for.end42, label %for.body3 59 60for.end42: ; preds = %for.inc40 61 ret void 62} 63 64; Function Attrs: argmemonly nounwind 65declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1) #1 66 67attributes #0 = { noinline norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } 68attributes #1 = { argmemonly nounwind } 69 70!llvm.module.flags = !{!0} 71!llvm.ident = !{!1} 72 73!0 = !{i32 1, !"wchar_size", i32 4} 74!1 = !{!"clang version 6.0.0 (trunk 312565) (llvm/trunk 312564)"} 75!2 = !{!3, !3, i64 0} 76!3 = !{!"any pointer", !4, i64 0} 77!4 = !{!"omnipotent char", !5, i64 0} 78!5 = !{!"Simple C/C++ TBAA"} 79!6 = !{!7, !7, i64 0} 80!7 = !{!"double", !4, i64 0} 81 82 83; CHECK: Statistics { 84; CHECK: Operand trees forwarded: 2 85; CHECK: Statements with forwarded operand trees: 2 86; CHECK: } 87 88; CHECK-NEXT: After statements { 89; CHECK-NEXT: Stmt_for_body3 90; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] 91; CHECK-NEXT: { Stmt_for_body3[i0] -> MemRef_tmp[i0] }; 92; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] 93; CHECK-NEXT: { Stmt_for_body3[i0] -> MemRef1__phi[] }; 94; CHECK-NEXT: Instructions { 95; CHECK-NEXT: store double 0.000000e+00, double* %arrayidx5, align 8, !tbaa !2 96; CHECK-NEXT: } 97; CHECK-NEXT: Stmt_for_body8 98; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] 99; CHECK-NEXT: { Stmt_for_body8[i0, i1] -> MemRef1__phi[] }; 100; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 1] 101; CHECK-NEXT: { Stmt_for_body8[i0, i1] -> MemRef1__phi[] }; 102; CHECK-NEXT: new: { Stmt_for_body8[i0, i1] -> MemRef_tmp[i0] }; 103; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] 104; CHECK-NEXT: { Stmt_for_body8[i0, i1] -> MemRef_A[i0, i1] }; 105; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] 106; CHECK-NEXT: { Stmt_for_body8[i0, i1] -> MemRef_x[i1] }; 107; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] 108; CHECK-NEXT: { Stmt_for_body8[i0, i1] -> MemRef_tmp[i0] }; 109; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] 110; CHECK-NEXT: { Stmt_for_body8[i0, i1] -> MemRef_add[] }; 111; CHECK-NEXT: Instructions { 112; CHECK-NEXT: %0 = phi double [ 0.000000e+00, %for.body3 ], [ %add, %for.body8 ] 113; CHECK-NEXT: %1 = load double, double* %arrayidx14, align 8, !tbaa !2 114; CHECK-NEXT: %2 = load double, double* %arrayidx16, align 8, !tbaa !2 115; CHECK-NEXT: %mul = fmul double %1, %2 116; CHECK-NEXT: %add = fadd double %0, %mul 117; CHECK-NEXT: store double %add, double* %arrayidx5, align 8, !tbaa !2 118; CHECK-NEXT: %exitcond = icmp eq i64 %indvars.iv.next, 2 119; CHECK-NEXT: } 120; CHECK-NEXT: Stmt_for_end21 121; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 1] 122; CHECK-NEXT: { Stmt_for_end21[i0] -> MemRef_add[] }; 123; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] 124; CHECK-NEXT: { Stmt_for_end21[i0] -> MemRef5__phi[] }; 125; CHECK-NEXT: Instructions { 126; CHECK-NEXT: } 127; CHECK-NEXT: Stmt_for_body24 128; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 1] 129; CHECK-NEXT: { Stmt_for_body24[i0, i1] -> MemRef5__phi[] }; 130; CHECK-NEXT: new: { Stmt_for_body24[i0, i1] -> MemRef_tmp[i0] }; 131; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] 132; CHECK-NEXT: { Stmt_for_body24[i0, i1] -> MemRef_y[i1] }; 133; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] 134; CHECK-NEXT: { Stmt_for_body24[i0, i1] -> MemRef_A[i0, i1] }; 135; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] 136; CHECK-NEXT: { Stmt_for_body24[i0, i1] -> MemRef_y[i1] }; 137; CHECK-NEXT: Instructions { 138; CHECK-NEXT: %3 = phi double [ %add, %for.end21 ], [ %.pre, %for.body24.for.body24_crit_edge ] 139; CHECK-NEXT: %4 = load double, double* %arrayidx26, align 8, !tbaa !2 140; CHECK-NEXT: %5 = load double, double* %arrayidx30, align 8, !tbaa !2 141; CHECK-NEXT: %mul33 = fmul double %5, %3 142; CHECK-NEXT: %add34 = fadd double %4, %mul33 143; CHECK-NEXT: store double %add34, double* %arrayidx26, align 8, !tbaa !2 144; CHECK-NEXT: %exitcond7 = icmp eq i64 %indvars.iv.next6, 2 145; CHECK-NEXT: } 146; CHECK-NEXT: Stmt_for_body24_for_body24_crit_edge 147; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] 148; CHECK-NEXT: { Stmt_for_body24_for_body24_crit_edge[i0, i1] -> MemRef5__phi[] }; 149; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] 150; CHECK-NEXT: { Stmt_for_body24_for_body24_crit_edge[i0, i1] -> MemRef_tmp[i0] }; 151; CHECK-NEXT: Instructions { 152; CHECK-NEXT: %.pre = load double, double* %arrayidx5, align 8, !tbaa !2 153; CHECK-NEXT: } 154; CHECK-NEXT: } 155