1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s 3# 4# Check that we can combine a G_SHUFFLE_VECTOR into a G_EXT. 5 6... 7--- 8name: v8s8_cst3 9alignment: 4 10legalized: true 11tracksRegLiveness: true 12body: | 13 bb.0: 14 liveins: $d0, $d1 15 ; CHECK-LABEL: name: v8s8_cst3 16 ; CHECK: liveins: $d0, $d1 17 ; CHECK: %v1:_(<8 x s8>) = COPY $d0 18 ; CHECK: %v2:_(<8 x s8>) = COPY $d1 19 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 20 ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32) 21 ; CHECK: $d0 = COPY %shuf(<8 x s8>) 22 ; CHECK: RET_ReallyLR implicit $d0 23 %v1:_(<8 x s8>) = COPY $d0 24 %v2:_(<8 x s8>) = COPY $d1 25 %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(3, 4, 5, 6, 7, 8, 9, 10) 26 $d0 = COPY %shuf(<8 x s8>) 27 RET_ReallyLR implicit $d0 28... 29--- 30name: v8s8_cst5 31alignment: 4 32legalized: true 33tracksRegLiveness: true 34body: | 35 bb.0: 36 liveins: $d0, $d1 37 ; CHECK-LABEL: name: v8s8_cst5 38 ; CHECK: liveins: $d0, $d1 39 ; CHECK: %v1:_(<8 x s8>) = COPY $d0 40 ; CHECK: %v2:_(<8 x s8>) = COPY $d1 41 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 42 ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32) 43 ; CHECK: $d0 = COPY %shuf(<8 x s8>) 44 ; CHECK: RET_ReallyLR implicit $d0 45 %v1:_(<8 x s8>) = COPY $d0 46 %v2:_(<8 x s8>) = COPY $d1 47 %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(13, 14, 15, 0, 1, 2, 3, 4) 48 $d0 = COPY %shuf(<8 x s8>) 49 RET_ReallyLR implicit $d0 50... 51--- 52name: v16s8_cst3 53alignment: 4 54legalized: true 55tracksRegLiveness: true 56body: | 57 bb.0: 58 liveins: $q0, $q1 59 ; CHECK-LABEL: name: v16s8_cst3 60 ; CHECK: liveins: $q0, $q1 61 ; CHECK: %v1:_(<16 x s8>) = COPY $q0 62 ; CHECK: %v2:_(<16 x s8>) = COPY $q1 63 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 64 ; CHECK: %shuf:_(<16 x s8>) = G_EXT %v1, %v2, [[C]](s32) 65 ; CHECK: $q0 = COPY %shuf(<16 x s8>) 66 ; CHECK: RET_ReallyLR implicit $q0 67 %v1:_(<16 x s8>) = COPY $q0 68 %v2:_(<16 x s8>) = COPY $q1 69 %shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18) 70 $q0 = COPY %shuf(<16 x s8>) 71 RET_ReallyLR implicit $q0 72... 73--- 74name: v16s8_cst7 75alignment: 4 76legalized: true 77tracksRegLiveness: true 78body: | 79 bb.0: 80 liveins: $q0, $q1 81 ; CHECK-LABEL: name: v16s8_cst7 82 ; CHECK: liveins: $q0, $q1 83 ; CHECK: %v1:_(<16 x s8>) = COPY $q0 84 ; CHECK: %v2:_(<16 x s8>) = COPY $q1 85 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 86 ; CHECK: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32) 87 ; CHECK: $q0 = COPY %shuf(<16 x s8>) 88 ; CHECK: RET_ReallyLR implicit $q0 89 %v1:_(<16 x s8>) = COPY $q0 90 %v2:_(<16 x s8>) = COPY $q1 91 %shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6) 92 $q0 = COPY %shuf(<16 x s8>) 93 RET_ReallyLR implicit $q0 94... 95--- 96name: v4s16_cst6 97alignment: 4 98legalized: true 99tracksRegLiveness: true 100body: | 101 bb.0: 102 liveins: $d0, $d1 103 ; CHECK-LABEL: name: v4s16_cst6 104 ; CHECK: liveins: $d0, $d1 105 ; CHECK: %v1:_(<4 x s16>) = COPY $d0 106 ; CHECK: %v2:_(<4 x s16>) = COPY $d1 107 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 108 ; CHECK: %shuf:_(<4 x s16>) = G_EXT %v1, %v2, [[C]](s32) 109 ; CHECK: $d0 = COPY %shuf(<4 x s16>) 110 ; CHECK: RET_ReallyLR implicit $d0 111 %v1:_(<4 x s16>) = COPY $d0 112 %v2:_(<4 x s16>) = COPY $d1 113 %shuf:_(<4 x s16>) = G_SHUFFLE_VECTOR %v1(<4 x s16>), %v2, shufflemask(3, 4, 5, 6) 114 $d0 = COPY %shuf(<4 x s16>) 115 RET_ReallyLR implicit $d0 116... 117--- 118name: v4s32_cst12 119alignment: 4 120legalized: true 121tracksRegLiveness: true 122body: | 123 bb.0: 124 liveins: $q0, $q1 125 ; CHECK-LABEL: name: v4s32_cst12 126 ; CHECK: liveins: $q0, $q1 127 ; CHECK: %v1:_(<4 x s32>) = COPY $q0 128 ; CHECK: %v2:_(<4 x s32>) = COPY $q1 129 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 130 ; CHECK: %shuf:_(<4 x s32>) = G_EXT %v1, %v2, [[C]](s32) 131 ; CHECK: $q0 = COPY %shuf(<4 x s32>) 132 ; CHECK: RET_ReallyLR implicit $q0 133 %v1:_(<4 x s32>) = COPY $q0 134 %v2:_(<4 x s32>) = COPY $q1 135 %shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %v1(<4 x s32>), %v2, shufflemask(3, 4, 5, 6) 136 $q0 = COPY %shuf(<4 x s32>) 137 RET_ReallyLR implicit $q0 138... 139--- 140name: undef_elts_should_match_1 141alignment: 4 142legalized: true 143tracksRegLiveness: true 144body: | 145 bb.0: 146 liveins: $d0, $d1 147 ; Undef shuffle indices should not prevent matching G_EXT. 148 ; We should get a constant 3 here. 149 ; 150 ; CHECK-LABEL: name: undef_elts_should_match_1 151 ; CHECK: liveins: $d0, $d1 152 ; CHECK: %v1:_(<8 x s8>) = COPY $d0 153 ; CHECK: %v2:_(<8 x s8>) = COPY $d1 154 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 155 ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32) 156 ; CHECK: $d0 = COPY %shuf(<8 x s8>) 157 ; CHECK: RET_ReallyLR implicit $d0 158 %v1:_(<8 x s8>) = COPY $d0 159 %v2:_(<8 x s8>) = COPY $d1 160 %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(3, -1, -1, 6, 7, 8, 9, 10) 161 $d0 = COPY %shuf(<8 x s8>) 162 RET_ReallyLR implicit $d0 163... 164--- 165name: undef_elts_should_match_2 166alignment: 4 167legalized: true 168tracksRegLiveness: true 169body: | 170 bb.0: 171 liveins: $d0, $d1 172 ; Undef shuffle indices should not prevent matching G_EXT. 173 ; We should get a constant 6 here. 174 ; 175 ; CHECK-LABEL: name: undef_elts_should_match_2 176 ; CHECK: liveins: $d0, $d1 177 ; CHECK: %v1:_(<8 x s8>) = COPY $d0 178 ; CHECK: %v2:_(<8 x s8>) = COPY $d1 179 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 180 ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32) 181 ; CHECK: $d0 = COPY %shuf(<8 x s8>) 182 ; CHECK: RET_ReallyLR implicit $d0 183 %v1:_(<8 x s8>) = COPY $d0 184 %v2:_(<8 x s8>) = COPY $d1 185 %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(-1, -1, -1, -1, 2, 3, 4, 5) 186 $d0 = COPY %shuf(<8 x s8>) 187 RET_ReallyLR implicit $d0 188... 189--- 190name: undef_elts_should_match_3 191alignment: 4 192legalized: true 193tracksRegLiveness: true 194body: | 195 bb.0: 196 liveins: $q0, $q1 197 ; Undef shuffle indices should not prevent matching G_EXT. 198 ; We should get a constant 7 here. 199 ; CHECK-LABEL: name: undef_elts_should_match_3 200 ; CHECK: liveins: $q0, $q1 201 ; CHECK: %v1:_(<16 x s8>) = COPY $q0 202 ; CHECK: %v2:_(<16 x s8>) = COPY $q1 203 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 204 ; CHECK: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32) 205 ; CHECK: $q0 = COPY %shuf(<16 x s8>) 206 ; CHECK: RET_ReallyLR implicit $q0 207 %v1:_(<16 x s8>) = COPY $q0 208 %v2:_(<16 x s8>) = COPY $q1 209 %shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(23, 24, 25, 26, -1, -1, 29, 30, 31, 0, 1, 2, 3, 4, -1, 6) 210 $q0 = COPY %shuf(<16 x s8>) 211 RET_ReallyLR implicit $q0 212... 213--- 214name: undef_elts_should_match_4 215alignment: 4 216legalized: true 217tracksRegLiveness: true 218body: | 219 bb.0: 220 liveins: $q0, $q1 221 ; Undef shuffle indices should not prevent matching G_EXT. 222 ; We should get a constant 10 here. 223 ; CHECK-LABEL: name: undef_elts_should_match_4 224 ; CHECK: liveins: $q0, $q1 225 ; CHECK: %v1:_(<8 x s16>) = COPY $q0 226 ; CHECK: %v2:_(<8 x s16>) = COPY $q1 227 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 10 228 ; CHECK: %shuf:_(<8 x s16>) = G_EXT %v2, %v1, [[C]](s32) 229 ; CHECK: $q0 = COPY %shuf(<8 x s16>) 230 ; CHECK: RET_ReallyLR implicit $q0 231 %v1:_(<8 x s16>) = COPY $q0 232 %v2:_(<8 x s16>) = COPY $q1 233 %shuf:_(<8 x s16>) = G_SHUFFLE_VECTOR %v1(<8 x s16>), %v2, shufflemask(-1, -1, -1, -1, 1, 2, 3, 4) 234 $q0 = COPY %shuf(<8 x s16>) 235 RET_ReallyLR implicit $q0 236... 237--- 238name: all_undef 239alignment: 4 240legalized: true 241tracksRegLiveness: true 242body: | 243 bb.0: 244 liveins: $q0, $q1 245 ; We expect at least one defined element in the shuffle mask. 246 ; 247 ; CHECK-LABEL: name: all_undef 248 ; CHECK: liveins: $q0, $q1 249 ; CHECK: %v1:_(<8 x s16>) = COPY $q0 250 ; CHECK: %shuf:_(<8 x s16>) = G_REV64 %v1 251 ; CHECK: $q0 = COPY %shuf(<8 x s16>) 252 ; CHECK: RET_ReallyLR implicit $q0 253 %v1:_(<8 x s16>) = COPY $q0 254 %v2:_(<8 x s16>) = COPY $q1 255 %shuf:_(<8 x s16>) = G_SHUFFLE_VECTOR %v1(<8 x s16>), %v2, shufflemask(-1, -1, -1, -1, -1, -1, -1, -1) 256 $q0 = COPY %shuf(<8 x s16>) 257 RET_ReallyLR implicit $q0 258... 259