1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL 3 4--- | 5 define void @test_insert_128_idx0() { 6 ret void 7 } 8 9 define void @test_insert_128_idx0_undef() { 10 ret void 11 } 12 13 define void @test_insert_128_idx1() { 14 ret void 15 } 16 17 define void @test_insert_128_idx1_undef() { 18 ret void 19 } 20 21 define void @test_insert_256_idx0() { 22 ret void 23 } 24 25 define void @test_insert_256_idx0_undef() { 26 ret void 27 } 28 29 define void @test_insert_256_idx1() { 30 ret void 31 } 32 33 define void @test_insert_256_idx1_undef() { 34 ret void 35 } 36 37... 38--- 39name: test_insert_128_idx0 40alignment: 4 41legalized: true 42regBankSelected: true 43registers: 44 - { id: 0, class: vecr } 45 - { id: 1, class: vecr } 46 - { id: 2, class: vecr } 47body: | 48 bb.1 (%ir-block.0): 49 liveins: $zmm0, $ymm1 50 51 ; ALL-LABEL: name: test_insert_128_idx0 52 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 53 ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 54 ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 0 55 ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] 56 ; ALL: RET 0, implicit $ymm0 57 %0(<16 x s32>) = COPY $zmm0 58 %1(<4 x s32>) = COPY $xmm1 59 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0 60 $zmm0 = COPY %2(<16 x s32>) 61 RET 0, implicit $ymm0 62 63... 64--- 65name: test_insert_128_idx0_undef 66alignment: 4 67legalized: true 68regBankSelected: true 69registers: 70 - { id: 0, class: vecr } 71 - { id: 1, class: vecr } 72 - { id: 2, class: vecr } 73body: | 74 bb.1 (%ir-block.0): 75 liveins: $ymm0, $ymm1 76 77 ; ALL-LABEL: name: test_insert_128_idx0_undef 78 ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1 79 ; ALL: undef %2.sub_xmm:vr512 = COPY [[COPY]] 80 ; ALL: $zmm0 = COPY %2 81 ; ALL: RET 0, implicit $ymm0 82 %0(<16 x s32>) = IMPLICIT_DEF 83 %1(<4 x s32>) = COPY $xmm1 84 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0 85 $zmm0 = COPY %2(<16 x s32>) 86 RET 0, implicit $ymm0 87 88... 89--- 90name: test_insert_128_idx1 91alignment: 4 92legalized: true 93regBankSelected: true 94registers: 95 - { id: 0, class: vecr } 96 - { id: 1, class: vecr } 97 - { id: 2, class: vecr } 98body: | 99 bb.1 (%ir-block.0): 100 liveins: $ymm0, $ymm1 101 102 ; ALL-LABEL: name: test_insert_128_idx1 103 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 104 ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 105 ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 1 106 ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] 107 ; ALL: RET 0, implicit $ymm0 108 %0(<16 x s32>) = COPY $zmm0 109 %1(<4 x s32>) = COPY $xmm1 110 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128 111 $zmm0 = COPY %2(<16 x s32>) 112 RET 0, implicit $ymm0 113... 114--- 115name: test_insert_128_idx1_undef 116alignment: 4 117legalized: true 118regBankSelected: true 119registers: 120 - { id: 0, class: vecr } 121 - { id: 1, class: vecr } 122 - { id: 2, class: vecr } 123body: | 124 bb.1 (%ir-block.0): 125 liveins: $ymm0, $ymm1 126 127 ; ALL-LABEL: name: test_insert_128_idx1_undef 128 ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF 129 ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1 130 ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[DEF]], [[COPY]], 1 131 ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] 132 ; ALL: RET 0, implicit $ymm0 133 %0(<16 x s32>) = IMPLICIT_DEF 134 %1(<4 x s32>) = COPY $xmm1 135 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128 136 $zmm0 = COPY %2(<16 x s32>) 137 RET 0, implicit $ymm0 138... 139--- 140name: test_insert_256_idx0 141alignment: 4 142legalized: true 143regBankSelected: true 144registers: 145 - { id: 0, class: vecr } 146 - { id: 1, class: vecr } 147 - { id: 2, class: vecr } 148body: | 149 bb.1 (%ir-block.0): 150 liveins: $zmm0, $ymm1 151 152 ; ALL-LABEL: name: test_insert_256_idx0 153 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 154 ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 155 ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 0 156 ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] 157 ; ALL: RET 0, implicit $ymm0 158 %0(<16 x s32>) = COPY $zmm0 159 %1(<8 x s32>) = COPY $ymm1 160 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0 161 $zmm0 = COPY %2(<16 x s32>) 162 RET 0, implicit $ymm0 163 164... 165--- 166name: test_insert_256_idx0_undef 167alignment: 4 168legalized: true 169regBankSelected: true 170registers: 171 - { id: 0, class: vecr } 172 - { id: 1, class: vecr } 173 - { id: 2, class: vecr } 174body: | 175 bb.1 (%ir-block.0): 176 liveins: $ymm0, $ymm1 177 178 ; ALL-LABEL: name: test_insert_256_idx0_undef 179 ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1 180 ; ALL: undef %2.sub_ymm:vr512 = COPY [[COPY]] 181 ; ALL: $zmm0 = COPY %2 182 ; ALL: RET 0, implicit $ymm0 183 %0(<16 x s32>) = IMPLICIT_DEF 184 %1(<8 x s32>) = COPY $ymm1 185 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0 186 $zmm0 = COPY %2(<16 x s32>) 187 RET 0, implicit $ymm0 188 189... 190--- 191name: test_insert_256_idx1 192alignment: 4 193legalized: true 194regBankSelected: true 195registers: 196 - { id: 0, class: vecr } 197 - { id: 1, class: vecr } 198 - { id: 2, class: vecr } 199body: | 200 bb.1 (%ir-block.0): 201 liveins: $ymm0, $ymm1 202 203 ; ALL-LABEL: name: test_insert_256_idx1 204 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 205 ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 206 ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 1 207 ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] 208 ; ALL: RET 0, implicit $ymm0 209 %0(<16 x s32>) = COPY $zmm0 210 %1(<8 x s32>) = COPY $ymm1 211 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256 212 $zmm0 = COPY %2(<16 x s32>) 213 RET 0, implicit $ymm0 214... 215--- 216name: test_insert_256_idx1_undef 217alignment: 4 218legalized: true 219regBankSelected: true 220registers: 221 - { id: 0, class: vecr } 222 - { id: 1, class: vecr } 223 - { id: 2, class: vecr } 224body: | 225 bb.1 (%ir-block.0): 226 liveins: $ymm0, $ymm1 227 228 ; ALL-LABEL: name: test_insert_256_idx1_undef 229 ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF 230 ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1 231 ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[DEF]], [[COPY]], 1 232 ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] 233 ; ALL: RET 0, implicit $ymm0 234 %0(<16 x s32>) = IMPLICIT_DEF 235 %1(<8 x s32>) = COPY $ymm1 236 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256 237 $zmm0 = COPY %2(<16 x s32>) 238 RET 0, implicit $ymm0 239... 240