1# Basic machine sched model test for Thumb2 int instructions 2# RUN: llc -o /dev/null %s -mtriple=thumbv7-eabi -mcpu=swift -run-pass machine-scheduler -enable-misched -verify-misched \ 3# RUN: -debug-only=machine-scheduler 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_SWIFT 4# RUN: llc -o /dev/null %s -mtriple=thumbv7--eabi -mcpu=cortex-a9 -run-pass machine-scheduler -enable-misched -verify-misched \ 5# RUN: -debug-only=machine-scheduler 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_A9 6# RUN: llc -o /dev/null %s -mtriple=thumbv8r-eabi -mcpu=cortex-r52 -run-pass machine-scheduler -enable-misched -verify-misched \ 7# RUN: -debug-only=machine-scheduler 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_R52 8# REQUIRES: asserts 9--- | 10 ; ModuleID = 'foo.ll' 11 source_filename = "foo.ll" 12 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" 13 target triple = "thumbv7---eabi" 14 15 @g1 = common global i32 0, align 4 16 @g2 = common global i32 0, align 4 17 18 define i64 @foo(i16 signext %a, i16 signext %b) { 19 entry: 20 %0 = load i32, i32* @g1, align 4 21 %1 = load i32, i32* @g2, align 4 22 %2 = add nuw nsw i32 %0, %0 23 %3 = sdiv i32 %2, %1 24 store i32 %3, i32* @g1, align 4 25 %d = mul nsw i16 %a, %a 26 %e = mul nsw i16 %b, %b 27 %f = add nuw nsw i16 %e, %d 28 %c = zext i16 %f to i32 29 %mul8 = mul nsw i32 %c, %3 30 %mul9 = mul nsw i32 %mul8, %mul8 31 %add10 = add nuw nsw i32 %mul9, %mul8 32 %conv1130 = zext i32 %add10 to i64 33 %mul12 = mul nuw nsw i64 %conv1130, %conv1130 34 %mul13 = mul nsw i64 %mul12, %mul12 35 %add14 = add nuw nsw i64 %mul13, %mul12 36 ret i64 %add14 37 } 38# 39# CHECK: ********** MI Scheduling ********** 40# CHECK: SU(2): %2:rgpr = t2MOVi32imm @g1 41# CHECK_A9: Latency : 2 42# CHECK_SWIFT: Latency : 2 43# CHECK_R52: Latency : 2 44# 45# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, $noreg :: (dereferenceable load 4 from @g1) 46# CHECK_A9: Latency : 1 47# CHECK_SWIFT: Latency : 3 48# CHECK_R52: Latency : 4 49# 50# CHECK: SU(6): %6:rgpr = t2ADDrr %3:rgpr, %3:rgpr, 14, $noreg, $noreg 51# CHECK_A9: Latency : 1 52# CHECK_SWIFT: Latency : 1 53# CHECK_R52: Latency : 3 54 55# CHECK: SU(7): %7:rgpr = t2SDIV %6:rgpr, %5:rgpr, 14, $noreg 56# CHECK_A9: Latency : 0 57# CHECK_SWIFT: Latency : 14 58# CHECK_R52: Latency : 8 59 60# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, $noreg :: (store 4 into @g1) 61# CHECK_A9: Latency : 1 62# CHECK_SWIFT: Latency : 0 63# CHECK_R52: Latency : 4 64# 65# CHECK: SU(9): %8:rgpr = t2SMULBB %1:rgpr, %1:rgpr, 14, $noreg 66# CHECK_A9: Latency : 2 67# CHECK_SWIFT: Latency : 4 68# CHECK_R52: Latency : 4 69# 70# CHECK: SU(10): %9:rgpr = t2SMLABB %0:rgpr, %0:rgpr, %8:rgpr, 14, $noreg 71# CHECK_A9: Latency : 2 72# CHECK_SWIFT: Latency : 4 73# CHECK_R52: Latency : 4 74# 75# CHECK: SU(11): %10:rgpr = t2UXTH %9:rgpr, 0, 14, $noreg 76# CHECK_A9: Latency : 1 77# CHECK_SWIFT: Latency : 1 78# CHECK_R52: Latency : 3 79# 80# CHECK: SU(12): %11:rgpr = t2MUL %10:rgpr, %7:rgpr, 14, $noreg 81# CHECK_A9: Latency : 2 82# CHECK_SWIFT: Latency : 4 83# CHECK_R52: Latency : 4 84# 85# CHECK: SU(13): %12:rgpr = t2MLA %11:rgpr, %11:rgpr, %11:rgpr, 14, $noreg 86# CHECK_A9: Latency : 2 87# CHECK_SWIFT: Latency : 4 88# CHECK_R52: Latency : 4 89# 90# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12:rgpr, %12:rgpr, 14, $noreg 91# CHECK_A9: Latency : 3 92# CHECK_SWIFT: Latency : 5 93# CHECK_R52: Latency : 4 94# 95# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12:rgpr, %12:rgpr, %19:rgpr(tied-def 0), %20:rgpr(tied-def 1), 14, $noreg 96# CHECK_A9: Latency : 3 97# CHECK_SWIFT: Latency : 7 98# CHECK_R52: Latency : 4 99# CHECK: ** ScheduleDAGMILive::schedule picking next node 100... 101--- 102name: foo 103alignment: 2 104exposesReturnsTwice: false 105legalized: false 106regBankSelected: false 107selected: false 108tracksRegLiveness: true 109registers: 110 - { id: 0, class: rgpr } 111 - { id: 1, class: rgpr } 112 - { id: 2, class: rgpr } 113 - { id: 3, class: rgpr } 114 - { id: 4, class: rgpr } 115 - { id: 5, class: rgpr } 116 - { id: 6, class: rgpr } 117 - { id: 7, class: rgpr } 118 - { id: 8, class: rgpr } 119 - { id: 9, class: rgpr } 120 - { id: 10, class: rgpr } 121 - { id: 11, class: rgpr } 122 - { id: 12, class: rgpr } 123 - { id: 13, class: rgpr } 124 - { id: 14, class: rgpr } 125 - { id: 15, class: rgpr } 126 - { id: 16, class: rgpr } 127 - { id: 17, class: rgpr } 128 - { id: 18, class: rgpr } 129 - { id: 19, class: rgpr } 130 - { id: 20, class: rgpr } 131liveins: 132 - { reg: '$r0', virtual-reg: '%0' } 133 - { reg: '$r1', virtual-reg: '%1' } 134frameInfo: 135 isFrameAddressTaken: false 136 isReturnAddressTaken: false 137 hasStackMap: false 138 hasPatchPoint: false 139 stackSize: 0 140 offsetAdjustment: 0 141 maxAlignment: 0 142 adjustsStack: false 143 hasCalls: false 144 maxCallFrameSize: 0 145 hasOpaqueSPAdjustment: false 146 hasVAStart: false 147 hasMustTailInVarArgFunc: false 148body: | 149 bb.0.entry: 150 liveins: $r0, $r1 151 152 %1 = COPY $r1 153 %0 = COPY $r0 154 %2 = t2MOVi32imm @g1 155 %3 = t2LDRi12 %2, 0, 14, $noreg :: (dereferenceable load 4 from @g1) 156 %4 = t2MOVi32imm @g2 157 %5 = t2LDRi12 %4, 0, 14, $noreg :: (dereferenceable load 4 from @g2) 158 %6 = t2ADDrr %3, %3, 14, $noreg, $noreg 159 %7 = t2SDIV %6, %5, 14, $noreg 160 t2STRi12 %7, %2, 0, 14, $noreg :: (store 4 into @g1) 161 %8 = t2SMULBB %1, %1, 14, $noreg 162 %9 = t2SMLABB %0, %0, %8, 14, $noreg 163 %10 = t2UXTH %9, 0, 14, $noreg 164 %11 = t2MUL %10, %7, 14, $noreg 165 %12 = t2MLA %11, %11, %11, 14, $noreg 166 %13, %14 = t2UMULL %12, %12, 14, $noreg 167 %19, %16 = t2UMULL %13, %13, 14, $noreg 168 %17 = t2MLA %13, %14, %16, 14, $noreg 169 %20 = t2MLA %13, %14, %17, 14, $noreg 170 %19, %20 = t2UMLAL %12, %12, %19, %20, 14, $noreg 171 $r0 = COPY %19 172 $r1 = COPY %20 173 tBX_RET 14, $noreg, implicit $r0, implicit $r1 174 175... 176