• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -basicaa -loop-accesses -analyze < %s | FileCheck %s
2; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output  < %s 2>&1 | FileCheck %s
3
4; In:
5;
6;   store_ptr = A;
7;   load_ptr = &A[2];
8;   for (i = 0; i < n; i++)
9;    *store_ptr++ = *load_ptr++ *10;  // A[i] = A[i+2] * 10
10;
11; make sure, we look through the PHI to conclude that store_ptr and load_ptr
12; both have A as their underlying object.  The dependence is safe for
13; vectorization requiring no memchecks.
14;
15; Otherwise we would try to prove independence with a memcheck that is going
16; to always fail.
17
18target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
19target triple = "x86_64-apple-macosx10.10.0"
20
21; CHECK: Memory dependences are safe{{$}}
22
23define void @f(i8* noalias %A, i64 %width) {
24for.body.preheader:
25  %A_ahead = getelementptr inbounds i8, i8* %A, i64 2
26  br label %for.body
27
28for.body:
29  %i = phi i64 [ %i.1, %for.body ], [ 0, %for.body.preheader ]
30  %load_ptr = phi i8* [ %load_ptr.1, %for.body ], [ %A_ahead, %for.body.preheader ]
31  %store_ptr = phi i8* [ %store_ptr.1, %for.body ], [ %A, %for.body.preheader ]
32
33  %loadA = load i8, i8* %load_ptr, align 1
34
35  %mul = mul i8 %loadA, 10
36
37  store i8 %mul, i8* %store_ptr, align 1
38
39  %load_ptr.1 = getelementptr inbounds i8, i8* %load_ptr, i64 1
40  %store_ptr.1 = getelementptr inbounds i8, i8* %store_ptr, i64 1
41  %i.1 = add nuw i64 %i, 1
42
43  %exitcond = icmp eq i64 %i.1, %width
44  br i1 %exitcond, label %for.end, label %for.body
45
46for.end:
47  ret void
48}
49