1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc %s -verify-machineinstrs -O0 -run-pass=legalizer -mtriple aarch64-unknown-unknown -o - | FileCheck %s 3... 4--- 5name: v2s64 6alignment: 4 7tracksRegLiveness: true 8body: | 9 bb.0: 10 liveins: $q0, $q1 11 12 ; CHECK-LABEL: name: v2s64 13 ; CHECK: liveins: $q0, $q1 14 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 15 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1 16 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 17 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) 18 ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[COPY]](<2 x s64>), [[BUILD_VECTOR]] 19 ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) 20 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 21 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) 22 ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY2]], [[BUILD_VECTOR1]](<2 x s64>) 23 ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<2 x s64>) 24 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 25 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C2]](s64), [[C2]](s64) 26 ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]] 27 ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY1]], [[ASHR]] 28 ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY]], [[XOR]] 29 ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] 30 ; CHECK: $q0 = COPY [[OR]](<2 x s64>) 31 ; CHECK: RET_ReallyLR implicit $q0 32 %0:_(<2 x s64>) = COPY $q0 33 %1:_(<2 x s64>) = COPY $q1 34 %3:_(s64) = G_CONSTANT i64 0 35 %2:_(<2 x s64>) = G_BUILD_VECTOR %3(s64), %3(s64) 36 %4:_(<2 x s1>) = G_ICMP intpred(sgt), %0(<2 x s64>), %2 37 %5:_(<2 x s64>) = G_SELECT %4(<2 x s1>), %1, %0 38 $q0 = COPY %5(<2 x s64>) 39 RET_ReallyLR implicit $q0 40 41... 42--- 43name: v2s32 44alignment: 4 45tracksRegLiveness: true 46body: | 47 bb.0: 48 liveins: $d0, $d1 49 50 ; CHECK-LABEL: name: v2s32 51 ; CHECK: liveins: $d0, $d1 52 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0 53 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1 54 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 55 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32) 56 ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s32>) = G_ICMP intpred(sgt), [[COPY]](<2 x s32>), [[BUILD_VECTOR]] 57 ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[ICMP]](<2 x s32>) 58 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 59 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32) 60 ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s32>) = G_SHL [[COPY2]], [[BUILD_VECTOR1]](<2 x s32>) 61 ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s32>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<2 x s32>) 62 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 63 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32) 64 ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]] 65 ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY1]], [[ASHR]] 66 ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY]], [[XOR]] 67 ; CHECK: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[AND]], [[AND1]] 68 ; CHECK: $d0 = COPY [[OR]](<2 x s32>) 69 ; CHECK: RET_ReallyLR implicit $d0 70 %0:_(<2 x s32>) = COPY $d0 71 %1:_(<2 x s32>) = COPY $d1 72 %3:_(s32) = G_CONSTANT i32 0 73 %2:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %3(s32) 74 %4:_(<2 x s1>) = G_ICMP intpred(sgt), %0(<2 x s32>), %2 75 %5:_(<2 x s32>) = G_SELECT %4(<2 x s1>), %1, %0 76 $d0 = COPY %5(<2 x s32>) 77 RET_ReallyLR implicit $d0 78 79... 80--- 81name: v16s8 82alignment: 4 83tracksRegLiveness: true 84body: | 85 bb.0: 86 liveins: $q0, $q1 87 88 ; CHECK-LABEL: name: v16s8 89 ; CHECK: liveins: $q0, $q1 90 ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0 91 ; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1 92 ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 93 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8) 94 ; CHECK: [[ICMP:%[0-9]+]]:_(<16 x s8>) = G_ICMP intpred(sgt), [[COPY]](<16 x s8>), [[BUILD_VECTOR]] 95 ; CHECK: [[COPY2:%[0-9]+]]:_(<16 x s8>) = COPY [[ICMP]](<16 x s8>) 96 ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 7 97 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8) 98 ; CHECK: [[SHL:%[0-9]+]]:_(<16 x s8>) = G_SHL [[COPY2]], [[BUILD_VECTOR1]](<16 x s8>) 99 ; CHECK: [[ASHR:%[0-9]+]]:_(<16 x s8>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<16 x s8>) 100 ; CHECK: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1 101 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8) 102 ; CHECK: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]] 103 ; CHECK: [[AND:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY1]], [[ASHR]] 104 ; CHECK: [[AND1:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY]], [[XOR]] 105 ; CHECK: [[OR:%[0-9]+]]:_(<16 x s8>) = G_OR [[AND]], [[AND1]] 106 ; CHECK: $q0 = COPY [[OR]](<16 x s8>) 107 ; CHECK: RET_ReallyLR implicit $q0 108 %0:_(<16 x s8>) = COPY $q0 109 %1:_(<16 x s8>) = COPY $q1 110 %3:_(s8) = G_CONSTANT i8 0 111 %2:_(<16 x s8>) = G_BUILD_VECTOR %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8) 112 %4:_(<16 x s1>) = G_ICMP intpred(sgt), %0(<16 x s8>), %2 113 %5:_(<16 x s8>) = G_SELECT %4(<16 x s1>), %1, %0 114 $q0 = COPY %5(<16 x s8>) 115 RET_ReallyLR implicit $q0 116... 117--- 118name: scalar_mask 119alignment: 4 120tracksRegLiveness: true 121liveins: 122 - { reg: '$w0' } 123 - { reg: '$q0' } 124body: | 125 bb.1: 126 liveins: $q0, $w0 127 128 ; CHECK-LABEL: name: scalar_mask 129 ; CHECK: liveins: $q0, $w0 130 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 131 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q0 132 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4100 133 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 134 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32) 135 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] 136 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) 137 ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 1 138 ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF 139 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 140 ; CHECK: [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[SEXT_INREG]](s32), [[C2]](s64) 141 ; CHECK: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0) 142 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 143 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32) 144 ; CHECK: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]] 145 ; CHECK: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[SHUF]] 146 ; CHECK: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[BUILD_VECTOR]], [[XOR]] 147 ; CHECK: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[AND]], [[AND1]] 148 ; CHECK: $q0 = COPY [[OR]](<4 x s32>) 149 ; CHECK: RET_ReallyLR implicit $q0 150 %0:_(s32) = COPY $w0 151 %1:_(<4 x s32>) = COPY $q0 152 %2:_(s32) = G_CONSTANT i32 4100 153 %6:_(s32) = G_FCONSTANT float 0.000000e+00 154 %5:_(<4 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32), %6(s32), %6(s32) 155 %3:_(s1) = G_ICMP intpred(eq), %0(s32), %2 156 %4:_(<4 x s32>) = G_SELECT %3(s1), %1, %5 157 $q0 = COPY %4(<4 x s32>) 158 RET_ReallyLR implicit $q0 159 160... 161