1; Test that LICM correctly detects conflicting accesses to memory in deeply 2; nested subloops. This works in the legacy PM due to a special retained map of 3; alias information for inner loops, and in the new PM it is recomputed for each 4; loop. 5; 6; RUN: opt -S -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' < %s | FileCheck %s 7; RUN: opt -S -basicaa -licm < %s | FileCheck %s 8 9define i32 @test(i32* %a, i64 %n.0, i64 %n.0.0, i64 %n.0.0.0, i64 %n.0.0.0.0) nounwind uwtable readonly { 10; CHECK-LABEL: define i32 @test 11entry: 12 %b = alloca i32 13 %c = alloca i32 14 %a.i8 = bitcast i32* %a to i8* 15 %b.i8 = bitcast i32* %b to i8* 16 %c.i8 = bitcast i32* %c to i8* 17 br label %l.0.header 18; CHECK: %b = alloca i32 19; CHECK: %c = alloca i32 20; CHECK: %[[AI8:.*]] = bitcast i32* %a to i8* 21; CHECK: %[[BI8:.*]] = bitcast i32* %b to i8* 22; CHECK: %[[CI8:.*]] = bitcast i32* %c to i8* 23; CHECK-NOT: load 24; CHECK: br 25 26l.0.header: 27 %iv.0 = phi i64 [ %iv.0.next, %l.0.latch ], [ 0, %entry ] 28 %iv.0.next = add i64 %iv.0, 1 29 %exitcond.0 = icmp eq i64 %iv.0.next, %n.0 30 %a.val = load i32, i32* %a 31 store i32 %a.val, i32* %b 32 %c.val = trunc i64 %iv.0 to i32 33 store i32 %c.val, i32* %c 34 br label %l.0.0.header 35; CHECK: %[[AV:.*]] = load i32, i32* %a 36; CHECK: store i32 %[[AV]], i32* %b 37; CHECK: %[[CT:.*]] = trunc i64 {{.*}} to i32 38; CHECK: store i32 %[[CT]], i32* %c 39; CHECK: br 40 41l.0.0.header: 42 %iv.0.0 = phi i64 [ %iv.0.0.next, %l.0.0.latch ], [ 0, %l.0.header ] 43 %iv.0.0.next = add i64 %iv.0.0, 1 44 %exitcond.0.0 = icmp eq i64 %iv.0.0.next, %n.0.0 45 br label %l.0.0.0.header 46; CHECK: br 47 48l.0.0.0.header: 49 %iv.0.0.0 = phi i64 [ %iv.0.0.0.next, %l.0.0.0.header ], [ 0, %l.0.0.header ] 50 %iv.0.0.0.next = add i64 %iv.0.0.0, 1 51 %exitcond.0.0.0 = icmp eq i64 %iv.0.0.0.next, %n.0.0.0 52 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a.i8, i8* %c.i8, i64 4, i1 false) 53 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %b.i8, i8* %c.i8, i64 4, i1 false) 54 br i1 %exitcond.0.0.0, label %l.0.0.0.header, label %l.0.0.latch 55; CHECK: call void @llvm.memcpy.{{.*}}(i8* %[[AI8]], i8* %[[CI8]], i64 4 56; CHECK: call void @llvm.memcpy.{{.*}}(i8* %[[BI8]], i8* %[[CI8]], i64 4 57; CHECK: br 58 59l.0.0.latch: 60 br i1 %exitcond.0.0, label %l.0.0.header, label %l.0.latch 61; CHECK: br 62 63l.0.latch: 64 %b.val = load i32, i32* %b 65 br i1 %exitcond.0, label %exit, label %l.0.header 66; CHECK: %[[BV:.*]] = load i32, i32* %b 67; CHECK: br 68 69exit: 70 %result.lcssa = phi i32 [ %b.val, %l.0.latch ] 71 ret i32 %b.val 72; CHECK: %[[LCSSA:.*]] = phi i32 [ %[[BV]], %{{.*}} ] 73; CHECK: ret i32 %[[LCSSA]] 74} 75 76declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) 77