1; RUN: opt -basicaa -scoped-noalias -loop-vectorize -licm -force-vector-width=2 \ 2; RUN: -force-vector-interleave=1 -S < %s | FileCheck %s 3 4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 5 6; In order to vectorize the inner loop, it needs to be versioned with 7; memchecks between {A} x {B, C} first: 8; 9; for (i = 0; i < n; i++) 10; for (j = 0; j < m; j++) 11; A[j] += B[i] + C[j]; 12; 13; Since in the versioned vector loop A and B can no longer alias, B[i] can be 14; LICM'ed from the inner loop. 15 16 17define void @f(i32* %a, i32* %b, i32* %c) { 18entry: 19 br label %outer 20 21outer: 22 %i.2 = phi i64 [ 0, %entry ], [ %i, %inner.end ] 23 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %i.2 24 br label %inner.ph 25 26inner.ph: 27; CHECK: vector.ph: 28; CHECK: load i32, i32* %arrayidxB, 29; CHECK: br label %vector.body 30 br label %inner 31 32inner: 33 %j.2 = phi i64 [ 0, %inner.ph ], [ %j, %inner ] 34 35 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %j.2 36 %loadA = load i32, i32* %arrayidxA, align 4 37 38 %loadB = load i32, i32* %arrayidxB, align 4 39 40 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %j.2 41 %loadC = load i32, i32* %arrayidxC, align 4 42 43 %add = add nuw i32 %loadA, %loadB 44 %add2 = add nuw i32 %add, %loadC 45 46 store i32 %add2, i32* %arrayidxA, align 4 47 48 %j = add nuw nsw i64 %j.2, 1 49 %cond1 = icmp eq i64 %j, 20 50 br i1 %cond1, label %inner.end, label %inner 51 52inner.end: 53 %i = add nuw nsw i64 %i.2, 1 54 %cond2 = icmp eq i64 %i, 30 55 br i1 %cond2, label %outer.end, label %outer 56 57outer.end: 58 ret void 59} 60