1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -run-pass arm-mve-vpt-opts %s -o - | FileCheck %s 3 4--- | 5 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" 6 target triple = "thumbv8.1m.main-none-none-eabi" 7 8 ; Functions are intentionally left blank - see the MIR sequences below. 9 10 define arm_aapcs_vfpcc <4 x float> @vcmp_with_opposite_cond(<4 x float> %inactive1) #0 { 11 entry: 12 ret <4 x float> %inactive1 13 } 14 15 define arm_aapcs_vfpcc <4 x float> @vcmp_with_opposite_cond_and_swapped_operands(<4 x float> %inactive1) #0 { 16 entry: 17 ret <4 x float> %inactive1 18 } 19 20 define arm_aapcs_vfpcc <4 x float> @triple_vcmp(<4 x float> %inactive1) #0 { 21 entry: 22 ret <4 x float> %inactive1 23 } 24 25 define arm_aapcs_vfpcc <4 x float> @killed_vccr_values(<4 x float> %inactive1) #0 { 26 entry: 27 ret <4 x float> %inactive1 28 } 29 30 define arm_aapcs_vfpcc <4 x float> @predicated_vcmps(<4 x float> %inactive1) #0 { 31 entry: 32 ret <4 x float> %inactive1 33 } 34 35 define arm_aapcs_vfpcc <4 x float> @flt_with_swapped_operands(<4 x float> %inactive1) #0 { 36 entry: 37 ret <4 x float> %inactive1 38 } 39 40 define arm_aapcs_vfpcc <4 x float> @different_opcodes(<4 x float> %inactive1) #0 { 41 entry: 42 ret <4 x float> %inactive1 43 } 44 45 define arm_aapcs_vfpcc <4 x float> @incorrect_condcode(<4 x float> %inactive1) #0 { 46 entry: 47 ret <4 x float> %inactive1 48 } 49 50 define arm_aapcs_vfpcc <4 x float> @vpr_or_vccr_write_between_vcmps(<4 x float> %inactive1) #0 { 51 entry: 52 ret <4 x float> %inactive1 53 } 54 55 define arm_aapcs_vfpcc <4 x float> @spill_prevention(<4 x float> %inactive1) #0 { 56 entry: 57 ret <4 x float> %inactive1 58 } 59 60 define arm_aapcs_vfpcc <4 x float> @spill_prevention_multi(<4 x float> %inactive1) #0 { 61 entry: 62 ret <4 x float> %inactive1 63 } 64 65 define arm_aapcs_vfpcc <4 x float> @spill_prevention_predicated_vpnots(<4 x float> %inactive1) #0 { 66 entry: 67 ret <4 x float> %inactive1 68 } 69 70 define arm_aapcs_vfpcc <4 x float> @spill_prevention_copies(<4 x float> %inactive1) #0 { 71 entry: 72 ret <4 x float> %inactive1 73 } 74 75 define arm_aapcs_vfpcc <4 x float> @spill_prevention_vpnot_reordering(<4 x float> %inactive1) #0 { 76 entry: 77 ret <4 x float> %inactive1 78 } 79 80 define arm_aapcs_vfpcc <4 x float> @spill_prevention_stop_after_write(<4 x float> %inactive1) #0 { 81 entry: 82 ret <4 x float> %inactive1 83 } 84 85 attributes #0 = { "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" } 86... 87--- 88name: vcmp_with_opposite_cond 89alignment: 4 90body: | 91 ; CHECK-LABEL: name: vcmp_with_opposite_cond 92 ; CHECK: bb.0: 93 ; CHECK: successors: %bb.1(0x80000000) 94 ; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg 95 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf16_]], 0, $noreg 96 ; CHECK: bb.1: 97 ; CHECK: successors: %bb.2(0x80000000) 98 ; CHECK: [[MVE_VCMPf32_:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg 99 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf32_]], 0, $noreg 100 ; CHECK: bb.2: 101 ; CHECK: successors: %bb.3(0x80000000) 102 ; CHECK: [[MVE_VCMPi16_:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg 103 ; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi16_]], 0, $noreg 104 ; CHECK: bb.3: 105 ; CHECK: successors: %bb.4(0x80000000) 106 ; CHECK: [[MVE_VCMPi32_:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg 107 ; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi32_]], 0, $noreg 108 ; CHECK: bb.4: 109 ; CHECK: successors: %bb.5(0x80000000) 110 ; CHECK: [[MVE_VCMPi8_:%[0-9]+]]:vccr = MVE_VCMPi8 %1:mqpr, %2:mqpr, 10, 0, $noreg 111 ; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi8_]], 0, $noreg 112 ; CHECK: bb.5: 113 ; CHECK: successors: %bb.6(0x80000000) 114 ; CHECK: [[MVE_VCMPs16_:%[0-9]+]]:vccr = MVE_VCMPs16 %1:mqpr, %2:mqpr, 10, 0, $noreg 115 ; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs16_]], 0, $noreg 116 ; CHECK: bb.6: 117 ; CHECK: successors: %bb.7(0x80000000) 118 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 119 ; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 120 ; CHECK: bb.7: 121 ; CHECK: successors: %bb.8(0x80000000) 122 ; CHECK: [[MVE_VCMPs8_:%[0-9]+]]:vccr = MVE_VCMPs8 %1:mqpr, %2:mqpr, 10, 0, $noreg 123 ; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs8_]], 0, $noreg 124 ; CHECK: bb.8: 125 ; CHECK: successors: %bb.9(0x80000000) 126 ; CHECK: [[MVE_VCMPu16_:%[0-9]+]]:vccr = MVE_VCMPu16 %1:mqpr, %2:mqpr, 10, 0, $noreg 127 ; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu16_]], 0, $noreg 128 ; CHECK: bb.9: 129 ; CHECK: successors: %bb.10(0x80000000) 130 ; CHECK: [[MVE_VCMPu32_:%[0-9]+]]:vccr = MVE_VCMPu32 %1:mqpr, %2:mqpr, 10, 0, $noreg 131 ; CHECK: [[MVE_VPNOT9:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu32_]], 0, $noreg 132 ; CHECK: bb.10: 133 ; CHECK: successors: %bb.11(0x80000000) 134 ; CHECK: [[MVE_VCMPu8_:%[0-9]+]]:vccr = MVE_VCMPu8 %1:mqpr, %2:mqpr, 10, 0, $noreg 135 ; CHECK: [[MVE_VPNOT10:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8_]], 0, $noreg 136 ; CHECK: bb.11: 137 ; CHECK: successors: %bb.12(0x80000000) 138 ; CHECK: [[MVE_VCMPf16r:%[0-9]+]]:vccr = MVE_VCMPf16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 139 ; CHECK: [[MVE_VPNOT11:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf16r]], 0, $noreg 140 ; CHECK: bb.12: 141 ; CHECK: successors: %bb.13(0x80000000) 142 ; CHECK: [[MVE_VCMPf32r:%[0-9]+]]:vccr = MVE_VCMPf32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 143 ; CHECK: [[MVE_VPNOT12:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf32r]], 0, $noreg 144 ; CHECK: bb.13: 145 ; CHECK: successors: %bb.14(0x80000000) 146 ; CHECK: [[MVE_VCMPi16r:%[0-9]+]]:vccr = MVE_VCMPi16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 147 ; CHECK: [[MVE_VPNOT13:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi16r]], 0, $noreg 148 ; CHECK: bb.14: 149 ; CHECK: successors: %bb.15(0x80000000) 150 ; CHECK: [[MVE_VCMPi32r:%[0-9]+]]:vccr = MVE_VCMPi32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 151 ; CHECK: [[MVE_VPNOT14:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi32r]], 0, $noreg 152 ; CHECK: bb.15: 153 ; CHECK: successors: %bb.16(0x80000000) 154 ; CHECK: [[MVE_VCMPi8r:%[0-9]+]]:vccr = MVE_VCMPi8r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 155 ; CHECK: [[MVE_VPNOT15:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi8r]], 0, $noreg 156 ; CHECK: bb.16: 157 ; CHECK: successors: %bb.17(0x80000000) 158 ; CHECK: [[MVE_VCMPs16r:%[0-9]+]]:vccr = MVE_VCMPs16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 159 ; CHECK: [[MVE_VPNOT16:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs16r]], 0, $noreg 160 ; CHECK: bb.17: 161 ; CHECK: successors: %bb.18(0x80000000) 162 ; CHECK: [[MVE_VCMPs32r:%[0-9]+]]:vccr = MVE_VCMPs32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 163 ; CHECK: [[MVE_VPNOT17:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32r]], 0, $noreg 164 ; CHECK: bb.18: 165 ; CHECK: successors: %bb.19(0x80000000) 166 ; CHECK: [[MVE_VCMPs8r:%[0-9]+]]:vccr = MVE_VCMPs8r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 167 ; CHECK: [[MVE_VPNOT18:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs8r]], 0, $noreg 168 ; CHECK: bb.19: 169 ; CHECK: successors: %bb.20(0x80000000) 170 ; CHECK: [[MVE_VCMPu16r:%[0-9]+]]:vccr = MVE_VCMPu16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 171 ; CHECK: [[MVE_VPNOT19:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu16r]], 0, $noreg 172 ; CHECK: bb.20: 173 ; CHECK: successors: %bb.21(0x80000000) 174 ; CHECK: [[MVE_VCMPu32r:%[0-9]+]]:vccr = MVE_VCMPu32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 175 ; CHECK: [[MVE_VPNOT20:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu32r]], 0, $noreg 176 ; CHECK: bb.21: 177 ; CHECK: successors: %bb.22(0x80000000) 178 ; CHECK: [[MVE_VCMPu8r:%[0-9]+]]:vccr = MVE_VCMPu8r %1:mqpr, %25:gprwithzr, 10, 0, $noreg 179 ; CHECK: [[MVE_VPNOT21:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8r]], 0, $noreg 180 ; CHECK: bb.22: 181 ; CHECK: [[MVE_VCMPu8r1:%[0-9]+]]:vccr = MVE_VCMPu8r %1:mqpr, $zr, 10, 0, $noreg 182 ; CHECK: [[MVE_VPNOT22:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8r1]], 0, $noreg 183 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 184 ; 185 ; Tests that VCMPs with an opposite condition are correctly converted into VPNOTs. 186 ; 187 bb.0: 188 %3:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg 189 %4:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 11, 0, $noreg 190 191 bb.1: 192 %5:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg 193 %6:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 11, 0, $noreg 194 195 bb.2: 196 %7:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg 197 %8:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 11, 0, $noreg 198 199 bb.3: 200 %9:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg 201 %10:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 11, 0, $noreg 202 203 bb.4: 204 %11:vccr = MVE_VCMPi8 %0:mqpr, %1:mqpr, 10, 0, $noreg 205 %12:vccr = MVE_VCMPi8 %0:mqpr, %1:mqpr, 11, 0, $noreg 206 207 bb.5: 208 %13:vccr = MVE_VCMPs16 %0:mqpr, %1:mqpr, 10, 0, $noreg 209 %14:vccr = MVE_VCMPs16 %0:mqpr, %1:mqpr, 11, 0, $noreg 210 211 bb.6: 212 %15:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 213 %16:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 11, 0, $noreg 214 215 bb.7: 216 %17:vccr = MVE_VCMPs8 %0:mqpr, %1:mqpr, 10, 0, $noreg 217 %18:vccr = MVE_VCMPs8 %0:mqpr, %1:mqpr, 11, 0, $noreg 218 219 bb.8: 220 %19:vccr = MVE_VCMPu16 %0:mqpr, %1:mqpr, 10, 0, $noreg 221 %20:vccr = MVE_VCMPu16 %0:mqpr, %1:mqpr, 11, 0, $noreg 222 223 bb.9: 224 %21:vccr = MVE_VCMPu32 %0:mqpr, %1:mqpr, 10, 0, $noreg 225 %22:vccr = MVE_VCMPu32 %0:mqpr, %1:mqpr, 11, 0, $noreg 226 227 bb.10: 228 %23:vccr = MVE_VCMPu8 %0:mqpr, %1:mqpr, 10, 0, $noreg 229 %24:vccr = MVE_VCMPu8 %0:mqpr, %1:mqpr, 11, 0, $noreg 230 231 bb.11: 232 %25:vccr = MVE_VCMPf16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 233 %26:vccr = MVE_VCMPf16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 234 235 bb.12: 236 %27:vccr = MVE_VCMPf32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 237 %28:vccr = MVE_VCMPf32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 238 239 bb.13: 240 %29:vccr = MVE_VCMPi16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 241 %30:vccr = MVE_VCMPi16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 242 243 bb.14: 244 %31:vccr = MVE_VCMPi32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 245 %32:vccr = MVE_VCMPi32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 246 247 bb.15: 248 %33:vccr = MVE_VCMPi8r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 249 %34:vccr = MVE_VCMPi8r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 250 251 bb.16: 252 %35:vccr = MVE_VCMPs16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 253 %36:vccr = MVE_VCMPs16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 254 255 bb.17: 256 %37:vccr = MVE_VCMPs32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 257 %38:vccr = MVE_VCMPs32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 258 259 bb.18: 260 %39:vccr = MVE_VCMPs8r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 261 %40:vccr = MVE_VCMPs8r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 262 263 bb.19: 264 %41:vccr = MVE_VCMPu16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 265 %42:vccr = MVE_VCMPu16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 266 267 bb.20: 268 %43:vccr = MVE_VCMPu32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 269 %44:vccr = MVE_VCMPu32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 270 271 bb.21: 272 %45:vccr = MVE_VCMPu8r %0:mqpr, %2:gprwithzr, 10, 0, $noreg 273 %46:vccr = MVE_VCMPu8r %0:mqpr, %2:gprwithzr, 11, 0, $noreg 274 275 bb.22: 276 ; There shouldn't be any exception for $zr, so the second VCMP should 277 ; be transformed into a VPNOT. 278 %47:vccr = MVE_VCMPu8r %0:mqpr, $zr, 10, 0, $noreg 279 %48:vccr = MVE_VCMPu8r %0:mqpr, $zr, 11, 0, $noreg 280 281 tBX_RET 14, $noreg, implicit %0:mqpr 282... 283--- 284name: vcmp_with_opposite_cond_and_swapped_operands 285alignment: 4 286body: | 287 ; CHECK-LABEL: name: vcmp_with_opposite_cond_and_swapped_operands 288 ; CHECK: bb.0: 289 ; CHECK: successors: %bb.1(0x80000000) 290 ; CHECK: [[MVE_VCMPi16_:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg 291 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi16_]], 0, $noreg 292 ; CHECK: bb.1: 293 ; CHECK: successors: %bb.2(0x80000000) 294 ; CHECK: [[MVE_VCMPi32_:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg 295 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi32_]], 0, $noreg 296 ; CHECK: bb.2: 297 ; CHECK: successors: %bb.3(0x80000000) 298 ; CHECK: [[MVE_VCMPi8_:%[0-9]+]]:vccr = MVE_VCMPi8 %1:mqpr, %2:mqpr, 10, 0, $noreg 299 ; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi8_]], 0, $noreg 300 ; CHECK: bb.3: 301 ; CHECK: successors: %bb.4(0x80000000) 302 ; CHECK: [[MVE_VCMPs16_:%[0-9]+]]:vccr = MVE_VCMPs16 %1:mqpr, %2:mqpr, 10, 0, $noreg 303 ; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs16_]], 0, $noreg 304 ; CHECK: bb.4: 305 ; CHECK: successors: %bb.5(0x80000000) 306 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 307 ; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 308 ; CHECK: bb.5: 309 ; CHECK: successors: %bb.6(0x80000000) 310 ; CHECK: [[MVE_VCMPs8_:%[0-9]+]]:vccr = MVE_VCMPs8 %1:mqpr, %2:mqpr, 10, 0, $noreg 311 ; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs8_]], 0, $noreg 312 ; CHECK: bb.6: 313 ; CHECK: successors: %bb.7(0x80000000) 314 ; CHECK: [[MVE_VCMPu16_:%[0-9]+]]:vccr = MVE_VCMPu16 %1:mqpr, %2:mqpr, 10, 0, $noreg 315 ; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu16_]], 0, $noreg 316 ; CHECK: bb.7: 317 ; CHECK: successors: %bb.8(0x80000000) 318 ; CHECK: [[MVE_VCMPu32_:%[0-9]+]]:vccr = MVE_VCMPu32 %1:mqpr, %2:mqpr, 10, 0, $noreg 319 ; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu32_]], 0, $noreg 320 ; CHECK: bb.8: 321 ; CHECK: [[MVE_VCMPu8_:%[0-9]+]]:vccr = MVE_VCMPu8 %1:mqpr, %2:mqpr, 10, 0, $noreg 322 ; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8_]], 0, $noreg 323 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 324 ; 325 ; Tests that VCMPs with an opposite condition and swapped operands are 326 ; correctly converted into VPNOTs. 327 ; 328 bb.0: 329 %2:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg 330 %3:vccr = MVE_VCMPi16 %1:mqpr, %0:mqpr, 12, 0, $noreg 331 332 bb.1: 333 %4:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg 334 %5:vccr = MVE_VCMPi32 %1:mqpr, %0:mqpr, 12, 0, $noreg 335 336 bb.2: 337 %6:vccr = MVE_VCMPi8 %0:mqpr, %1:mqpr, 10, 0, $noreg 338 %7:vccr = MVE_VCMPi8 %1:mqpr, %0:mqpr, 12, 0, $noreg 339 340 bb.3: 341 %8:vccr = MVE_VCMPs16 %0:mqpr, %1:mqpr, 10, 0, $noreg 342 %9:vccr = MVE_VCMPs16 %1:mqpr, %0:mqpr, 12, 0, $noreg 343 344 bb.4: 345 %10:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 346 %11:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg 347 348 bb.5: 349 %12:vccr = MVE_VCMPs8 %0:mqpr, %1:mqpr, 10, 0, $noreg 350 %13:vccr = MVE_VCMPs8 %1:mqpr, %0:mqpr, 12, 0, $noreg 351 352 bb.6: 353 %14:vccr = MVE_VCMPu16 %0:mqpr, %1:mqpr, 10, 0, $noreg 354 %15:vccr = MVE_VCMPu16 %1:mqpr, %0:mqpr, 12, 0, $noreg 355 356 bb.7: 357 %16:vccr = MVE_VCMPu32 %0:mqpr, %1:mqpr, 10, 0, $noreg 358 %17:vccr = MVE_VCMPu32 %1:mqpr, %0:mqpr, 12, 0, $noreg 359 360 bb.8: 361 %18:vccr = MVE_VCMPu8 %0:mqpr, %1:mqpr, 10, 0, $noreg 362 %19:vccr = MVE_VCMPu8 %1:mqpr, %0:mqpr, 12, 0, $noreg 363 364 tBX_RET 14, $noreg, implicit %0:mqpr 365... 366--- 367name: triple_vcmp 368alignment: 4 369body: | 370 ; 371 ; Tests that, when there are 2 "VPNOT-like VCMPs" in a row, only the first 372 ; becomes a VPNOT. 373 ; 374 bb.0: 375 ; CHECK-LABEL: name: triple_vcmp 376 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 377 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 378 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 12, 0, $noreg 379 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 380 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 381 %3:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg 382 %4:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg 383 tBX_RET 14, $noreg, implicit %0:mqpr 384... 385--- 386name: killed_vccr_values 387alignment: 4 388body: | 389 ; CHECK-LABEL: name: killed_vccr_values 390 ; CHECK: bb.0: 391 ; CHECK: successors: %bb.1(0x80000000) 392 ; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg 393 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VCMPf16_]], undef [[MVE_VORR]] 394 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf16_]], 0, $noreg 395 ; CHECK: bb.1: 396 ; CHECK: successors: %bb.2(0x80000000) 397 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 398 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 399 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]] 400 ; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT1]], 0, $noreg 401 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR2]] 402 ; CHECK: bb.2: 403 ; CHECK: successors: %bb.3(0x80000000) 404 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 405 ; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg 406 ; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT3]], undef [[MVE_VORR3]] 407 ; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT3]], 0, $noreg 408 ; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR3]], [[MVE_VORR3]], 1, [[MVE_VPNOT4]], undef [[MVE_VORR4]] 409 ; CHECK: bb.3: 410 ; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 411 ; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_2]], 0, $noreg 412 ; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT5]], undef [[MVE_VORR5]] 413 ; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT5]], 0, $noreg 414 ; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR6]] 415 ; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR7]] 416 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 417 bb.0: 418 ; 419 ; Tests that, if the result of the VCMP is killed before the 420 ; second VCMP (that will be converted into a VPNOT) is found, 421 ; the kill flag is removed. 422 ; 423 %2:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg 424 %3:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, killed %2:vccr, undef %3:mqpr 425 %4:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 11, 0, $noreg 426 bb.1: 427 ; 428 ; Tests that, if the result of the VCMP that has been replaced with a 429 ; VPNOT is killed (before the insertion of the second VPNOT), 430 ; the kill flag is removed. 431 ; 432 %5:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 433 %6:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg 434 %7:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, killed %6:vccr, undef %7:mqpr 435 %8:mqpr = MVE_VORR %7:mqpr, %7:mqpr, 1, %5:vccr, undef %8:mqpr 436 bb.2: 437 ; 438 ; Tests that the kill flag is removed when inserting a VPNOT for 439 ; an instruction. 440 ; 441 %9:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 442 %10:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg 443 %11:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %10:vccr, undef %11:mqpr 444 %12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 1, killed %9:vccr, undef %12:mqpr 445 bb.3: 446 ; 447 ; Tests that the kill flag is correctly removed when replacing a use 448 ; of the opposite VCCR value with the last VPNOT's result 449 ; 450 %13:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 451 %14:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg 452 %15:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %14:vccr, undef %15:mqpr 453 %16:mqpr = MVE_VORR %15:mqpr, %15:mqpr, 1, %13:vccr, undef %16:mqpr 454 %17:mqpr = MVE_VORR %16:mqpr, %16:mqpr, 1, killed %13:vccr, undef %17:mqpr 455 tBX_RET 14, $noreg, implicit %0:mqpr 456... 457--- 458name: predicated_vcmps 459alignment: 4 460body: | 461 ; CHECK-LABEL: name: predicated_vcmps 462 ; CHECK: bb.0: 463 ; CHECK: successors: %bb.1(0x80000000) 464 ; CHECK: [[MVE_VCMPi16_:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg 465 ; CHECK: [[MVE_VCMPi16_1:%[0-9]+]]:vccr = MVE_VCMPi16 %2:mqpr, %1:mqpr, 12, 1, [[MVE_VCMPi16_]] 466 ; CHECK: [[MVE_VCMPi16_2:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi16_]] 467 ; CHECK: bb.1: 468 ; CHECK: successors: %bb.2(0x80000000) 469 ; CHECK: [[MVE_VCMPi32_:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg 470 ; CHECK: [[MVE_VCMPi32_1:%[0-9]+]]:vccr = MVE_VCMPi32 %2:mqpr, %1:mqpr, 12, 1, [[MVE_VCMPi32_]] 471 ; CHECK: [[MVE_VCMPi32_2:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi32_]] 472 ; CHECK: bb.2: 473 ; CHECK: successors: %bb.3(0x80000000) 474 ; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg 475 ; CHECK: [[MVE_VCMPf16_1:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPf16_]] 476 ; CHECK: [[MVE_VCMPf16_2:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPf16_]] 477 ; CHECK: bb.3: 478 ; CHECK: successors: %bb.4(0x80000000) 479 ; CHECK: [[MVE_VCMPf32_:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg 480 ; CHECK: [[MVE_VCMPf32_1:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPf32_]] 481 ; CHECK: [[MVE_VCMPf32_2:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPf32_]] 482 ; CHECK: bb.4: 483 ; CHECK: successors: %bb.5(0x80000000) 484 ; CHECK: [[MVE_VCMPi16_3:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg 485 ; CHECK: [[MVE_VCMPi16_4:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPi16_3]] 486 ; CHECK: [[MVE_VCMPi16_5:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi16_3]] 487 ; CHECK: bb.5: 488 ; CHECK: [[MVE_VCMPi32_3:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg 489 ; CHECK: [[MVE_VCMPi32_4:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPi32_3]] 490 ; CHECK: [[MVE_VCMPi32_5:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi32_3]] 491 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 492 ; 493 ; Tests that predicated VCMPs are not replaced. 494 ; 495 bb.0: 496 %2:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg 497 %3:vccr = MVE_VCMPi16 %1:mqpr, %0:mqpr, 12, 1, %2:vccr 498 %4:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 1, %2:vccr 499 500 bb.1: 501 %5:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg 502 %6:vccr = MVE_VCMPi32 %1:mqpr, %0:mqpr, 12, 1, %5:vccr 503 %7:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 1, %5:vccr 504 505 bb.2: 506 %8:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg 507 %9:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 11, 1, %8:vccr 508 %10:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 1, %8:vccr 509 510 bb.3: 511 %11:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg 512 %12:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 11, 1, %11:vccr 513 %13:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 1, %11:vccr 514 515 bb.4: 516 %14:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg 517 %15:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 11, 1, %14:vccr 518 %16:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 1, %14:vccr 519 520 bb.5: 521 %17:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg 522 %18:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 11, 1, %17:vccr 523 %19:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 1, %17:vccr 524 525 tBX_RET 14, $noreg, implicit %0:mqpr 526... 527--- 528name: flt_with_swapped_operands 529alignment: 4 530body: | 531 ; CHECK-LABEL: name: flt_with_swapped_operands 532 ; CHECK: bb.0: 533 ; CHECK: successors: %bb.1(0x80000000) 534 ; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg 535 ; CHECK: [[MVE_VCMPf16_1:%[0-9]+]]:vccr = MVE_VCMPf16 %2:mqpr, %1:mqpr, 12, 0, $noreg 536 ; CHECK: bb.1: 537 ; CHECK: successors: %bb.2(0x80000000) 538 ; CHECK: [[MVE_VCMPf32_:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg 539 ; CHECK: [[MVE_VCMPf32_1:%[0-9]+]]:vccr = MVE_VCMPf32 %2:mqpr, %1:mqpr, 12, 0, $noreg 540 ; CHECK: bb.2: 541 ; CHECK: successors: %bb.3(0x80000000) 542 ; CHECK: [[MVE_VCMPf16_2:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg 543 ; CHECK: [[MVE_VCMPf16_3:%[0-9]+]]:vccr = MVE_VCMPf16 %2:mqpr, %1:mqpr, 11, 0, $noreg 544 ; CHECK: bb.3: 545 ; CHECK: [[MVE_VCMPf32_2:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg 546 ; CHECK: [[MVE_VCMPf32_3:%[0-9]+]]:vccr = MVE_VCMPf32 %2:mqpr, %1:mqpr, 11, 0, $noreg 547 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 548 ; 549 ; Tests that float VCMPs with an opposite condition and swapped operands 550 ; are not transformed into VPNOTs. 551 ; 552 bb.0: 553 %2:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg 554 %3:vccr = MVE_VCMPf16 %1:mqpr, %0:mqpr, 12, 0, $noreg 555 556 bb.1: 557 %4:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg 558 %5:vccr = MVE_VCMPf32 %1:mqpr, %0:mqpr, 12, 0, $noreg 559 560 bb.2: 561 %6:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg 562 %7:vccr = MVE_VCMPf16 %1:mqpr, %0:mqpr, 11, 0, $noreg 563 564 bb.3: 565 %8:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg 566 %9:vccr = MVE_VCMPf32 %1:mqpr, %0:mqpr, 11, 0, $noreg 567 tBX_RET 14, $noreg, implicit %0:mqpr 568... 569--- 570name: different_opcodes 571alignment: 4 572body: | 573 ; 574 ; Tests that a "VPNOT-like VCMP" with an opcode different from the previous VCMP 575 ; is not transformed into a VPNOT. 576 ; 577 bb.0: 578 ; CHECK-LABEL: name: different_opcodes 579 ; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 0, 0, $noreg 580 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 1, 1, $noreg 581 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 582 %2:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 0, 0, $noreg 583 %3:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 1, 1, $noreg 584 tBX_RET 14, $noreg, implicit %0:mqpr 585... 586--- 587name: incorrect_condcode 588alignment: 4 589body: | 590 ; CHECK-LABEL: name: incorrect_condcode 591 ; CHECK: bb.0: 592 ; CHECK: successors: %bb.1(0x80000000) 593 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 594 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 11, 0, $noreg 595 ; CHECK: bb.1: 596 ; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 597 ; CHECK: [[MVE_VCMPs32_3:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 12, 0, $noreg 598 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 599 ; 600 ; Tests that a VCMP is not transformed into a VPNOT if its CondCode is not 601 ; the opposite CondCode. 602 ; 603 bb.0: 604 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 605 %3:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 11, 0, $noreg 606 bb.1: 607 %4:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 608 %5:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 12, 0, $noreg 609 tBX_RET 14, $noreg, implicit %0:mqpr 610... 611--- 612name: vpr_or_vccr_write_between_vcmps 613alignment: 4 614body: | 615 ; 616 ; Tests that a "VPNOT-like VCMP" will not be transformed into a VPNOT if 617 ; VCCR/VPR is written to in-between. 618 ; 619 bb.0: 620 ; CHECK-LABEL: name: vpr_or_vccr_write_between_vcmps 621 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 12, 0, $noreg 622 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT killed [[MVE_VCMPs32_]], 0, $noreg 623 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 10, 0, $noreg 624 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 625 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 12, 0, $noreg 626 %3:vccr = MVE_VPNOT killed %2:vccr, 0, $noreg 627 %4:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 10, 0, $noreg 628 tBX_RET 14, $noreg, implicit %0:mqpr 629... 630--- 631name: spill_prevention 632alignment: 4 633body: | 634 ; CHECK-LABEL: name: spill_prevention 635 ; CHECK: bb.0: 636 ; CHECK: successors: %bb.1(0x80000000) 637 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 638 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 639 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]] 640 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT]], 0, $noreg 641 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]] 642 ; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT1]], 0, $noreg 643 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR2]] 644 ; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT2]], 0, $noreg 645 ; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR2]], [[MVE_VORR2]], 1, [[MVE_VPNOT3]], undef [[MVE_VORR3]] 646 ; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT3]], 0, $noreg 647 ; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR3]], [[MVE_VORR3]], 1, [[MVE_VPNOT4]], undef [[MVE_VORR4]] 648 ; CHECK: bb.1: 649 ; CHECK: successors: %bb.2(0x80000000) 650 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 651 ; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg 652 ; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT5]], undef [[MVE_VORR5]] 653 ; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 0, $noreg, undef [[MVE_VORR6]] 654 ; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT5]], 0, $noreg 655 ; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR7]] 656 ; CHECK: [[MVE_VORR8:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR7]], [[MVE_VORR7]], 0, $noreg, undef [[MVE_VORR8]] 657 ; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT6]], 0, $noreg 658 ; CHECK: [[MVE_VORR9:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR8]], [[MVE_VORR8]], 1, [[MVE_VPNOT7]], undef [[MVE_VORR9]] 659 ; CHECK: bb.2: 660 ; CHECK: successors: %bb.3(0x80000000) 661 ; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 662 ; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_2]], 0, $noreg 663 ; CHECK: [[MVE_VORR10:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT8]], undef [[MVE_VORR10]] 664 ; CHECK: [[MVE_VORR11:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR10]], [[MVE_VORR10]], 1, [[MVE_VPNOT8]], undef [[MVE_VORR11]] 665 ; CHECK: [[MVE_VPNOT9:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT8]], 0, $noreg 666 ; CHECK: [[MVE_VORR12:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR11]], [[MVE_VORR11]], 1, [[MVE_VPNOT9]], undef [[MVE_VORR12]] 667 ; CHECK: [[MVE_VORR13:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR12]], [[MVE_VORR12]], 1, [[MVE_VPNOT9]], undef [[MVE_VORR13]] 668 ; CHECK: [[MVE_VPNOT10:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT9]], 0, $noreg 669 ; CHECK: [[MVE_VORR14:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR13]], [[MVE_VORR13]], 1, [[MVE_VPNOT10]], undef [[MVE_VORR14]] 670 ; CHECK: [[MVE_VORR15:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR14]], [[MVE_VORR14]], 1, [[MVE_VPNOT10]], undef [[MVE_VORR15]] 671 ; CHECK: [[MVE_VPNOT11:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT10]], 0, $noreg 672 ; CHECK: [[MVE_VORR16:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR15]], [[MVE_VORR15]], 1, [[MVE_VPNOT11]], undef [[MVE_VORR16]] 673 ; CHECK: [[MVE_VORR17:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR16]], [[MVE_VORR16]], 1, [[MVE_VPNOT11]], undef [[MVE_VORR17]] 674 ; CHECK: bb.3: 675 ; CHECK: successors: %bb.4(0x80000000) 676 ; CHECK: [[MVE_VCMPs32_3:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 677 ; CHECK: [[MVE_VPNOT12:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_3]], 0, $noreg 678 ; CHECK: [[MVE_VORR18:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT12]], undef [[MVE_VORR11]] 679 ; CHECK: [[MVE_VPNOT13:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT12]], 0, $noreg 680 ; CHECK: [[MVE_VORR19:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT13]], undef [[MVE_VORR19]] 681 ; CHECK: bb.4: 682 ; CHECK: [[VMSR_P0_:%[0-9]+]]:vccr = VMSR_P0 killed %32:gpr, 14 /* CC::al */, $noreg 683 ; CHECK: [[MVE_VPNOT14:%[0-9]+]]:vccr = MVE_VPNOT [[VMSR_P0_]], 0, $noreg 684 ; CHECK: [[MVE_VORR20:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR19]], [[MVE_VORR19]], 1, [[MVE_VPNOT14]], undef [[MVE_VORR20]] 685 ; CHECK: [[MVE_VPNOT15:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT14]], 0, $noreg 686 ; CHECK: [[MVE_VORR21:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR20]], [[MVE_VORR20]], 1, [[MVE_VPNOT15]], undef [[MVE_VORR21]] 687 ; CHECK: [[MVE_VPNOT16:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT15]], 0, $noreg 688 ; CHECK: [[MVE_VORR22:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR21]], [[MVE_VORR21]], 1, [[MVE_VPNOT16]], undef [[MVE_VORR22]] 689 ; CHECK: [[MVE_VPNOT17:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT16]], 0, $noreg 690 ; CHECK: [[MVE_VORR23:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR22]], [[MVE_VORR22]], 1, [[MVE_VPNOT17]], undef [[MVE_VORR23]] 691 ; CHECK: [[MVE_VPNOT18:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT17]], 0, $noreg 692 ; CHECK: [[MVE_VORR24:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR23]], [[MVE_VORR23]], 1, [[MVE_VPNOT18]], undef [[MVE_VORR24]] 693 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 694 bb.0: 695 ; 696 ; Basic test case 697 ; 698 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 699 %3:vccr = MVE_VPNOT %2:vccr, 0, $noreg 700 %4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr 701 %5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %2:vccr, undef %5:mqpr 702 %6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, %3:vccr, undef %6:mqpr 703 %7:mqpr = MVE_VORR %6:mqpr, %6:mqpr, 1, %2:vccr, undef %7:mqpr 704 %8:mqpr = MVE_VORR %7:mqpr, %7:mqpr, 1, %3:vccr, undef %8:mqpr 705 bb.1: 706 ; 707 ; Tests that unpredicated instructions in the middle of the block 708 ; don't interfere with the replacement. 709 ; 710 %9:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 711 %10:vccr = MVE_VPNOT %9:vccr, 0, $noreg 712 %11:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %10:vccr, undef %11:mqpr 713 %12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 0, $noreg, undef %12:mqpr 714 %13:mqpr = MVE_VORR %12:mqpr, %12:mqpr, 1, %9:vccr, undef %13:mqpr 715 %14:mqpr = MVE_VORR %13:mqpr, %13:mqpr, 0, $noreg, undef %14:mqpr 716 %15:mqpr = MVE_VORR %14:mqpr, %14:mqpr, 1, %10:vccr, undef %15:mqpr 717 bb.2: 718 ; 719 ; Tests that all uses of the register are replaced, even when it's used 720 ; multiple times in a row. 721 ; 722 %16:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 723 %17:vccr = MVE_VPNOT %16:vccr, 0, $noreg 724 %18:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %17:vccr, undef %18:mqpr 725 %19:mqpr = MVE_VORR %18:mqpr, %18:mqpr, 1, %17:vccr, undef %19:mqpr 726 %20:mqpr = MVE_VORR %19:mqpr, %19:mqpr, 1, %16:vccr, undef %20:mqpr 727 %21:mqpr = MVE_VORR %20:mqpr, %20:mqpr, 1, %16:vccr, undef %21:mqpr 728 %22:mqpr = MVE_VORR %21:mqpr, %21:mqpr, 1, %17:vccr, undef %22:mqpr 729 %23:mqpr = MVE_VORR %22:mqpr, %22:mqpr, 1, %17:vccr, undef %23:mqpr 730 %24:mqpr = MVE_VORR %23:mqpr, %23:mqpr, 1, %16:vccr, undef %24:mqpr 731 %25:mqpr = MVE_VORR %24:mqpr, %24:mqpr, 1, %16:vccr, undef %25:mqpr 732 bb.3: 733 ; 734 ; Tests that already present VPNOTs are "registered" by the pass so 735 ; it does not insert a useless VPNOT. 736 ; 737 %26:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 738 %27:vccr = MVE_VPNOT %26:vccr, 0, $noreg 739 %28:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %27:vccr, undef %19:mqpr 740 %29:vccr = MVE_VPNOT %27:vccr, 0, $noreg 741 %30:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %26:vccr, undef %30:mqpr 742 bb.4: 743 ; 744 ; Tests that the pass works with instructions other than vcmp. 745 ; 746 %32:vccr = VMSR_P0 killed %31:gpr, 14, $noreg 747 %33:vccr = MVE_VPNOT %32:vccr, 0, $noreg 748 %34:mqpr = MVE_VORR %30:mqpr, %30:mqpr, 1, %33:vccr, undef %34:mqpr 749 %35:mqpr = MVE_VORR %34:mqpr, %34:mqpr, 1, %32:vccr, undef %35:mqpr 750 %36:mqpr = MVE_VORR %35:mqpr, %35:mqpr, 1, %33:vccr, undef %36:mqpr 751 %37:mqpr = MVE_VORR %36:mqpr, %36:mqpr, 1, %32:vccr, undef %37:mqpr 752 %38:mqpr = MVE_VORR %37:mqpr, %37:mqpr, 1, %33:vccr, undef %38:mqpr 753 tBX_RET 14, $noreg, implicit %0:mqpr 754... 755--- 756name: spill_prevention_multi 757alignment: 4 758body: | 759 bb.0: 760 ; 761 ; Tests that multiple groups of predicated instructions in the same basic block are optimized. 762 ; 763 ; CHECK-LABEL: name: spill_prevention_multi 764 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 765 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 766 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]] 767 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT]], 0, $noreg 768 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]] 769 ; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT1]], 0, $noreg 770 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR2]] 771 ; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT2]], 0, $noreg 772 ; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR2]], [[MVE_VORR2]], 1, [[MVE_VPNOT3]], undef [[MVE_VORR3]] 773 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 774 ; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR4]] 775 ; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg 776 ; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR4]], [[MVE_VORR4]], 1, [[MVE_VPNOT4]], undef [[MVE_VORR5]] 777 ; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT4]], 0, $noreg 778 ; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 1, [[MVE_VPNOT5]], undef [[MVE_VORR6]] 779 ; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT5]], 0, $noreg 780 ; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR7]] 781 ; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 782 ; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_2]], 0, $noreg 783 ; CHECK: [[MVE_VORR8:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT7]], undef [[MVE_VORR8]] 784 ; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT7]], 0, $noreg 785 ; CHECK: [[MVE_VORR9:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR8]], [[MVE_VORR8]], 1, [[MVE_VPNOT8]], undef [[MVE_VORR9]] 786 ; CHECK: [[MVE_VPNOT9:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT8]], 0, $noreg 787 ; CHECK: [[MVE_VORR10:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR9]], [[MVE_VORR9]], 1, [[MVE_VPNOT9]], undef [[MVE_VORR10]] 788 ; CHECK: [[MVE_VPNOT10:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT9]], 0, $noreg 789 ; CHECK: [[MVE_VORR11:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR10]], [[MVE_VORR10]], 1, [[MVE_VPNOT10]], undef [[MVE_VORR11]] 790 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 791 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 792 %3:vccr = MVE_VPNOT %2:vccr, 0, $noreg 793 %4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr 794 %5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %2:vccr, undef %5:mqpr 795 %6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, %3:vccr, undef %6:mqpr 796 %7:mqpr = MVE_VORR %6:mqpr, %6:mqpr, 1, %2:vccr, undef %7:mqpr 797 %8:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 798 %9:vccr = MVE_VPNOT %8:vccr, 0, $noreg 799 %10:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %8:vccr, undef %10:mqpr 800 %11:mqpr = MVE_VORR %10:mqpr, %10:mqpr, 1, %9:vccr, undef %11:mqpr 801 %12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 1, %8:vccr, undef %12:mqpr 802 %13:mqpr = MVE_VORR %12:mqpr, %12:mqpr, 1, %9:vccr, undef %13:mqpr 803 %14:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 804 %15:vccr = MVE_VPNOT %14:vccr, 0, $noreg 805 %16:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %15:vccr, undef %16:mqpr 806 %17:mqpr = MVE_VORR %16:mqpr, %16:mqpr, 1, %14:vccr, undef %17:mqpr 807 %18:mqpr = MVE_VORR %17:mqpr, %17:mqpr, 1, %15:vccr, undef %18:mqpr 808 %19:mqpr = MVE_VORR %18:mqpr, %18:mqpr, 1, %14:vccr, undef %19:mqpr 809 tBX_RET 14, $noreg, implicit %0:mqpr 810... 811--- 812name: spill_prevention_predicated_vpnots 813alignment: 4 814body: | 815 ; CHECK-LABEL: name: spill_prevention_predicated_vpnots 816 ; CHECK: bb.0: 817 ; CHECK: successors: %bb.1(0x80000000) 818 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 819 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 1, [[MVE_VCMPs32_]] 820 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_]], undef [[MVE_VORR]] 821 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT]], undef [[MVE_VORR1]] 822 ; CHECK: bb.1: 823 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 824 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 1, [[MVE_VCMPs32_1]] 825 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VPNOT1]], undef [[MVE_VORR2]] 826 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR2]] 827 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %2:mqpr, %1:mqpr, 1, [[MVE_VPNOT1]], undef [[MVE_VORR2]] 828 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 829 ; 830 ; Tests that predicated VPNOTs are not considered by this pass 831 ; (This means that these examples should not be optimized.) 832 ; 833 bb.0: 834 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 835 %3:vccr = MVE_VPNOT %2:vccr, 1, %2:vccr 836 %4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %2:vccr, undef %4:mqpr 837 %5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %3:vccr, undef %5:mqpr 838 bb.1: 839 %12:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 840 %13:vccr = MVE_VPNOT %12:vccr, 1, %12:vccr 841 %14:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, %13:vccr, undef %14:mqpr 842 %15:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %12:vccr, undef %15:mqpr 843 %16:mqpr = MVE_VORR %1:mqpr, %0:mqpr, 1, %13:vccr, undef %16:mqpr 844 tBX_RET 14, $noreg, implicit %0:mqpr 845... 846--- 847name: spill_prevention_copies 848alignment: 4 849body: | 850 ; 851 ; Tests that VPNOTs are replaced by a COPY instead of inserting a VPNOT 852 ; (which would result in a double VPNOT). 853 ; 854 bb.0: 855 ; CHECK-LABEL: name: spill_prevention_copies 856 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 857 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 858 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]] 859 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR1]] 860 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR2]] 861 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 862 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 863 %3:vccr = MVE_VPNOT %2:vccr, 0, $noreg 864 %4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr 865 %5:vccr = MVE_VPNOT %2:vccr, 0, $noreg 866 %6:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %5:vccr, undef %6:mqpr 867 %7:vccr = MVE_VPNOT %2:vccr, 0, $noreg 868 %8:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %7:vccr, undef %8:mqpr 869 tBX_RET 14, $noreg, implicit %0:mqpr 870... 871--- 872name: spill_prevention_vpnot_reordering 873alignment: 4 874body: | 875 ; CHECK-LABEL: name: spill_prevention_vpnot_reordering 876 ; CHECK: bb.0: 877 ; CHECK: successors: %bb.1(0x80000000) 878 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 879 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VCMPs32_]], undef [[MVE_VORR]] 880 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR %2:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_]], undef [[MVE_VORR1]] 881 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 882 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR1]], 1, [[MVE_VPNOT]], undef [[MVE_VORR2]] 883 ; CHECK: bb.1: 884 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 885 ; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR3]] 886 ; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR %2:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR4]] 887 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg 888 ; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR3]], [[MVE_VORR4]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR5]] 889 ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr 890 ; 891 ; Tests that the first VPNOT is moved down when the result of the VCMP is used 892 ; before the first usage of the VPNOT's result. 893 ; 894 bb.0: 895 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 896 %3:vccr = MVE_VPNOT %2:vccr, 0, $noreg 897 %4:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, %2:vccr, undef %4:mqpr 898 %5:mqpr = MVE_VORR %1:mqpr, %0:mqpr, 1, %2:vccr, undef %5:mqpr 899 %6:mqpr = MVE_VORR %4:mqpr, %5:mqpr, 1, %3:vccr, undef %6:mqpr 900 bb.1: 901 ; Test again with a "killed" flag to check if it's properly removed. 902 %7:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 903 %8:vccr = MVE_VPNOT %7:vccr, 0, $noreg 904 %9:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, %7:vccr, undef %9:mqpr 905 %10:mqpr = MVE_VORR %1:mqpr, %0:mqpr, 1, killed %7:vccr, undef %10:mqpr 906 %11:mqpr = MVE_VORR %9:mqpr, %10:mqpr, 1, %8:vccr, undef %11:mqpr 907 tBX_RET 14, $noreg, implicit %0:mqpr 908... 909--- 910name: spill_prevention_stop_after_write 911alignment: 4 912body: | 913 ; CHECK-LABEL: name: spill_prevention_stop_after_write 914 ; CHECK: bb.0: 915 ; CHECK: successors: %bb.1(0x80000000) 916 ; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 917 ; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg 918 ; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]] 919 ; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT]], 0, $noreg 920 ; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]] 921 ; CHECK: [[VMSR_P0_:%[0-9]+]]:vccr = VMSR_P0 killed %7:gpr, 14 /* CC::al */, $noreg 922 ; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VCMPs32_]], undef [[MVE_VORR2]] 923 ; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR2]], [[MVE_VORR2]], 1, [[MVE_VPNOT]], undef [[MVE_VORR3]] 924 ; CHECK: bb.1: 925 ; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg 926 ; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg 927 ; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT2]], undef [[MVE_VORR]] 928 ; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT2]], 0, $noreg 929 ; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR4]], [[MVE_VORR4]], 1, [[MVE_VPNOT3]], undef [[MVE_VORR5]] 930 ; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 10, 0, $noreg 931 ; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR6]] 932 ; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR7]] 933 ; CHECK: [[MVE_VORR8:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR7]], [[MVE_VORR7]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR8]] 934 ; 935 ; Tests that the optimisation stops when it sees an instruction 936 ; that writes to VPR, and that doesn't use any of the registers we care about. 937 ; 938 bb.0: 939 %2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 940 %3:vccr = MVE_VPNOT %2:vccr, 0, $noreg 941 %4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr 942 %5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %2:vccr, undef %5:mqpr 943 %6:vccr = VMSR_P0 killed %20:gpr, 14, $noreg 944 %7:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, %2:vccr, undef %7:mqpr 945 %8:mqpr = MVE_VORR %7:mqpr, %7:mqpr, 1, %3:vccr, undef %8:mqpr 946 bb.1: 947 %9:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg 948 %10:vccr = MVE_VPNOT %9:vccr, 0, $noreg 949 %11:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %10:vccr, undef %4:mqpr 950 %12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 1, %9:vccr, undef %12:mqpr 951 %13:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 10, 0, $noreg 952 %14:mqpr = MVE_VORR %12:mqpr, %12:mqpr, 1, %10:vccr, undef %14:mqpr 953 %15:mqpr = MVE_VORR %14:mqpr, %14:mqpr, 1, %9:vccr, undef %15:mqpr 954 %16:mqpr = MVE_VORR %15:mqpr, %15:mqpr, 1, %10:vccr, undef %16:mqpr 955... 956