1; REQUIRES: asserts 2; 3; The Cortext-A57 machine model will avoid scheduling load instructions in 4; succession because loads on the A57 have a latency of 4 cycles and they all 5; issue to the same pipeline. Instead, it will move other instructions between 6; the loads to avoid unnecessary stalls. The generic machine model schedules 4 7; loads consecutively for this case and will cause stalls. 8; 9; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s 10; CHECK: ********** MI Scheduling ********** 11; CHECK: main:%bb.2 12; CHECK: LDR 13; CHECK: Latency : 4 14; CHECK: *** Final schedule for %bb.2 *** 15; CHECK: LDR 16; CHECK: LDR 17; CHECK-NOT: LDR 18; CHECK: {{.*}} 19; CHECK: ********** MI Scheduling ********** 20 21@main.x = private unnamed_addr constant [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 4 22@main.y = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2], align 4 23 24; Function Attrs: nounwind 25define i32 @main() #0 { 26entry: 27 %retval = alloca i32, align 4 28 %x = alloca [8 x i32], align 4 29 %y = alloca [8 x i32], align 4 30 %i = alloca i32, align 4 31 %xx = alloca i32, align 4 32 %yy = alloca i32, align 4 33 store i32 0, i32* %retval 34 %0 = bitcast [8 x i32]* %x to i8* 35 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false) 36 %1 = bitcast [8 x i32]* %y to i8* 37 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false) 38 store i32 0, i32* %xx, align 4 39 store i32 0, i32* %yy, align 4 40 store i32 0, i32* %i, align 4 41 br label %for.cond 42 43for.cond: ; preds = %for.inc, %entry 44 %2 = load i32, i32* %i, align 4 45 %cmp = icmp slt i32 %2, 8 46 br i1 %cmp, label %for.body, label %for.end 47 48for.body: ; preds = %for.cond 49 %3 = load i32, i32* %yy, align 4 50 %4 = load i32, i32* %i, align 4 51 %idxprom = sext i32 %4 to i64 52 %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom 53 %5 = load i32, i32* %arrayidx, align 4 54 %add = add nsw i32 %5, 1 55 store i32 %add, i32* %xx, align 4 56 %6 = load i32, i32* %xx, align 4 57 %add1 = add nsw i32 %6, 12 58 store i32 %add1, i32* %xx, align 4 59 %7 = load i32, i32* %xx, align 4 60 %add2 = add nsw i32 %7, 23 61 store i32 %add2, i32* %xx, align 4 62 %8 = load i32, i32* %xx, align 4 63 %add3 = add nsw i32 %8, 34 64 store i32 %add3, i32* %xx, align 4 65 %9 = load i32, i32* %i, align 4 66 %idxprom4 = sext i32 %9 to i64 67 %arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4 68 %10 = load i32, i32* %arrayidx5, align 4 69 70 %add4 = add nsw i32 %9, %add 71 %add5 = add nsw i32 %10, %add1 72 %add6 = add nsw i32 %add4, %add5 73 74 %add7 = add nsw i32 %9, %add3 75 %add8 = add nsw i32 %10, %add4 76 %add9 = add nsw i32 %add7, %add8 77 78 %add10 = add nsw i32 %9, %add6 79 %add11 = add nsw i32 %10, %add7 80 %add12 = add nsw i32 %add10, %add11 81 82 %add13 = add nsw i32 %9, %add9 83 %add14 = add nsw i32 %10, %add10 84 %add15 = add nsw i32 %add13, %add14 85 86 store i32 %add15, i32* %xx, align 4 87 88 %div = sdiv i32 %4, %5 89 90 store i32 %div, i32* %yy, align 4 91 92 br label %for.inc 93 94for.inc: ; preds = %for.body 95 %11 = load i32, i32* %i, align 4 96 %inc = add nsw i32 %11, 1 97 store i32 %inc, i32* %i, align 4 98 br label %for.cond 99 100for.end: ; preds = %for.cond 101 %12 = load i32, i32* %xx, align 4 102 %13 = load i32, i32* %yy, align 4 103 %add67 = add nsw i32 %12, %13 104 ret i32 %add67 105} 106 107 108; Function Attrs: nounwind 109declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 110 111attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } 112attributes #1 = { nounwind } 113