1; RUN: opt -basicaa -loop-distribute -enable-loop-distribute -verify-loop-info -verify-dom-info -S \ 2; RUN: < %s | FileCheck %s 3 4; RUN: opt -basicaa -loop-distribute -enable-loop-distribute -loop-vectorize -force-vector-width=4 \ 5; RUN: -verify-loop-info -verify-dom-info -S < %s | \ 6; RUN: FileCheck --check-prefix=VECTORIZE %s 7 8; The memcheck version of basic.ll. We should distribute and vectorize the 9; second part of this loop with 5 memchecks (A+1 x {C, D, E} + C x {A, B}) 10; 11; for (i = 0; i < n; i++) { 12; A[i + 1] = A[i] * B[i]; 13; ------------------------------- 14; C[i] = D[i] * E[i]; 15; } 16 17target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 18target triple = "x86_64-apple-macosx10.10.0" 19 20@B = common global i32* null, align 8 21@A = common global i32* null, align 8 22@C = common global i32* null, align 8 23@D = common global i32* null, align 8 24@E = common global i32* null, align 8 25 26define void @f() { 27entry: 28 %a = load i32*, i32** @A, align 8 29 %b = load i32*, i32** @B, align 8 30 %c = load i32*, i32** @C, align 8 31 %d = load i32*, i32** @D, align 8 32 %e = load i32*, i32** @E, align 8 33 br label %for.body 34 35; We have two compares for each array overlap check. 36; Since the checks to A and A + 4 get merged, this will give us a 37; total of 8 compares. 38; 39; CHECK: for.body.lver.check: 40; CHECK: = icmp 41; CHECK: = icmp 42 43; CHECK: = icmp 44; CHECK: = icmp 45 46; CHECK: = icmp 47; CHECK: = icmp 48 49; CHECK: = icmp 50; CHECK: = icmp 51 52; CHECK-NOT: = icmp 53; CHECK: br i1 %memcheck.conflict, label %for.body.ph.lver.orig, label %for.body.ph.ldist1 54 55; The non-distributed loop that the memchecks fall back on. 56 57; CHECK: for.body.ph.lver.orig: 58; CHECK: br label %for.body.lver.orig 59; CHECK: for.body.lver.orig: 60; CHECK: br i1 %exitcond.lver.orig, label %for.end, label %for.body.lver.orig 61 62; Verify the two distributed loops. 63 64; CHECK: for.body.ph.ldist1: 65; CHECK: br label %for.body.ldist1 66; CHECK: for.body.ldist1: 67; CHECK: %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1 68; CHECK: br i1 %exitcond.ldist1, label %for.body.ph, label %for.body.ldist1 69 70; CHECK: for.body.ph: 71; CHECK: br label %for.body 72; CHECK: for.body: 73; CHECK: %mulC = mul i32 %loadD, %loadE 74; CHECK: for.end: 75 76 77; VECTORIZE: mul <4 x i32> 78 79for.body: ; preds = %for.body, %entry 80 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] 81 82 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind 83 %loadA = load i32, i32* %arrayidxA, align 4 84 85 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind 86 %loadB = load i32, i32* %arrayidxB, align 4 87 88 %mulA = mul i32 %loadB, %loadA 89 90 %add = add nuw nsw i64 %ind, 1 91 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add 92 store i32 %mulA, i32* %arrayidxA_plus_4, align 4 93 94 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind 95 %loadD = load i32, i32* %arrayidxD, align 4 96 97 %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind 98 %loadE = load i32, i32* %arrayidxE, align 4 99 100 %mulC = mul i32 %loadD, %loadE 101 102 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind 103 store i32 %mulC, i32* %arrayidxC, align 4 104 105 %exitcond = icmp eq i64 %add, 20 106 br i1 %exitcond, label %for.end, label %for.body 107 108for.end: ; preds = %for.body 109 ret void 110} 111