1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel -run-pass=instruction-select %s -o - | FileCheck %s 3 4... 5--- 6name: uaddo_s32 7alignment: 4 8legalized: true 9regBankSelected: true 10tracksRegLiveness: true 11body: | 12 bb.1.entry: 13 liveins: $w0, $w1, $x2 14 15 ; CHECK-LABEL: name: uaddo_s32 16 ; CHECK: liveins: $w0, $w1, $x2 17 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 18 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 19 ; CHECK: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv 20 ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 21 ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0 22 ; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7 23 ; CHECK: $w0 = COPY [[UBFMWri1]] 24 ; CHECK: RET_ReallyLR implicit $w0 25 %0:gpr(s32) = COPY $w0 26 %1:gpr(s32) = COPY $w1 27 %3:gpr(s32), %4:gpr(s1) = G_UADDO %0, %1 28 %5:gpr(s8) = G_ZEXT %4(s1) 29 %6:gpr(s32) = G_ZEXT %5(s8) 30 $w0 = COPY %6(s32) 31 RET_ReallyLR implicit $w0 32 33... 34--- 35name: uaddo_s64 36alignment: 4 37legalized: true 38regBankSelected: true 39tracksRegLiveness: true 40body: | 41 bb.1.entry: 42 liveins: $x0, $x1, $x2 43 44 ; CHECK-LABEL: name: uaddo_s64 45 ; CHECK: liveins: $x0, $x1, $x2 46 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 47 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 48 ; CHECK: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv 49 ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 50 ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0 51 ; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7 52 ; CHECK: $w0 = COPY [[UBFMWri1]] 53 ; CHECK: RET_ReallyLR implicit $w0 54 %0:gpr(s64) = COPY $x0 55 %1:gpr(s64) = COPY $x1 56 %3:gpr(s64), %4:gpr(s1) = G_UADDO %0, %1 57 %5:gpr(s8) = G_ZEXT %4(s1) 58 %6:gpr(s32) = G_ZEXT %5(s8) 59 $w0 = COPY %6(s32) 60 RET_ReallyLR implicit $w0 61 62... 63--- 64name: uaddo_s32_imm 65alignment: 4 66legalized: true 67regBankSelected: true 68tracksRegLiveness: true 69body: | 70 bb.1.entry: 71 liveins: $w0, $w1, $x2 72 ; Check that we get ADDSWri when we can fold in a constant. 73 ; 74 ; CHECK-LABEL: name: uaddo_s32_imm 75 ; CHECK: liveins: $w0, $w1, $x2 76 ; CHECK: %copy:gpr32sp = COPY $w0 77 ; CHECK: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv 78 ; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 79 ; CHECK: $w0 = COPY %add 80 ; CHECK: RET_ReallyLR implicit $w0 81 %copy:gpr(s32) = COPY $w0 82 %constant:gpr(s32) = G_CONSTANT i32 16 83 %add:gpr(s32), %overflow:gpr(s1) = G_UADDO %copy, %constant 84 $w0 = COPY %add(s32) 85 RET_ReallyLR implicit $w0 86 87... 88--- 89name: uaddo_s32_shifted 90alignment: 4 91legalized: true 92regBankSelected: true 93tracksRegLiveness: true 94body: | 95 bb.1.entry: 96 liveins: $w0, $w1, $x2 97 ; Check that we get ADDSWrs when we can fold in a shift. 98 ; 99 ; CHECK-LABEL: name: uaddo_s32_shifted 100 ; CHECK: liveins: $w0, $w1, $x2 101 ; CHECK: %copy1:gpr32 = COPY $w0 102 ; CHECK: %copy2:gpr32 = COPY $w1 103 ; CHECK: %add:gpr32 = ADDSWrs %copy1, %copy2, 16, implicit-def $nzcv 104 ; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 105 ; CHECK: $w0 = COPY %add 106 ; CHECK: RET_ReallyLR implicit $w0 107 %copy1:gpr(s32) = COPY $w0 108 %copy2:gpr(s32) = COPY $w1 109 %constant:gpr(s32) = G_CONSTANT i32 16 110 %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32) 111 %add:gpr(s32), %overflow:gpr(s1) = G_UADDO %copy1, %shift 112 $w0 = COPY %add(s32) 113 RET_ReallyLR implicit $w0 114 115... 116--- 117name: uaddo_s32_neg_imm 118alignment: 4 119legalized: true 120regBankSelected: true 121tracksRegLiveness: true 122body: | 123 bb.1.entry: 124 liveins: $w0, $w1, $x2 125 ; Check that we get SUBSWri when we can fold in a negative constant. 126 ; 127 ; CHECK-LABEL: name: uaddo_s32_neg_imm 128 ; CHECK: liveins: $w0, $w1, $x2 129 ; CHECK: %copy:gpr32sp = COPY $w0 130 ; CHECK: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv 131 ; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 132 ; CHECK: $w0 = COPY %add 133 ; CHECK: RET_ReallyLR implicit $w0 134 %copy:gpr(s32) = COPY $w0 135 %constant:gpr(s32) = G_CONSTANT i32 -16 136 %add:gpr(s32), %overflow:gpr(s1) = G_UADDO %copy, %constant 137 $w0 = COPY %add(s32) 138 RET_ReallyLR implicit $w0 139 140... 141--- 142name: uaddo_arith_extended 143alignment: 4 144legalized: true 145regBankSelected: true 146tracksRegLiveness: true 147body: | 148 bb.1.entry: 149 liveins: $w0, $x0 150 ; Check that we get ADDSXrx. 151 ; CHECK-LABEL: name: uaddo_arith_extended 152 ; CHECK: liveins: $w0, $x0 153 ; CHECK: %reg0:gpr64sp = COPY $x0 154 ; CHECK: %reg1:gpr32 = COPY $w0 155 ; CHECK: %add:gpr64 = ADDSXrx %reg0, %reg1, 18, implicit-def $nzcv 156 ; CHECK: %flags:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 157 ; CHECK: $x0 = COPY %add 158 ; CHECK: RET_ReallyLR implicit $x0 159 %reg0:gpr(s64) = COPY $x0 160 %reg1:gpr(s32) = COPY $w0 161 %ext:gpr(s64) = G_ZEXT %reg1(s32) 162 %cst:gpr(s64) = G_CONSTANT i64 2 163 %shift:gpr(s64) = G_SHL %ext, %cst(s64) 164 %add:gpr(s64), %flags:gpr(s1) = G_UADDO %reg0, %shift 165 $x0 = COPY %add(s64) 166 RET_ReallyLR implicit $x0 167