1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 3; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 4; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F 5; 6; Just one 32-bit run to make sure we do reasonable things. 7; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX 8 9define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noinline ssp { 10; AVX-LABEL: merge_4f64_2f64_23: 11; AVX: # %bb.0: 12; AVX-NEXT: vmovups 32(%rdi), %ymm0 13; AVX-NEXT: retq 14; 15; X86-AVX-LABEL: merge_4f64_2f64_23: 16; X86-AVX: # %bb.0: 17; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 18; X86-AVX-NEXT: vmovups 32(%eax), %ymm0 19; X86-AVX-NEXT: retl 20 %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2 21 %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 3 22 %val0 = load <2 x double>, <2 x double>* %ptr0 23 %val1 = load <2 x double>, <2 x double>* %ptr1 24 %res = shufflevector <2 x double> %val0, <2 x double> %val1, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 25 ret <4 x double> %res 26} 27 28define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noinline ssp { 29; AVX-LABEL: merge_4f64_2f64_2z: 30; AVX: # %bb.0: 31; AVX-NEXT: vmovaps 32(%rdi), %xmm0 32; AVX-NEXT: retq 33; 34; X86-AVX-LABEL: merge_4f64_2f64_2z: 35; X86-AVX: # %bb.0: 36; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 37; X86-AVX-NEXT: vmovaps 32(%eax), %xmm0 38; X86-AVX-NEXT: retl 39 %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2 40 %val0 = load <2 x double>, <2 x double>* %ptr0 41 %res = shufflevector <2 x double> %val0, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 42 ret <4 x double> %res 43} 44 45define <4 x double> @merge_4f64_f64_2345(double* %ptr) nounwind uwtable noinline ssp { 46; AVX-LABEL: merge_4f64_f64_2345: 47; AVX: # %bb.0: 48; AVX-NEXT: vmovups 16(%rdi), %ymm0 49; AVX-NEXT: retq 50; 51; X86-AVX-LABEL: merge_4f64_f64_2345: 52; X86-AVX: # %bb.0: 53; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 54; X86-AVX-NEXT: vmovups 16(%eax), %ymm0 55; X86-AVX-NEXT: retl 56 %ptr0 = getelementptr inbounds double, double* %ptr, i64 2 57 %ptr1 = getelementptr inbounds double, double* %ptr, i64 3 58 %ptr2 = getelementptr inbounds double, double* %ptr, i64 4 59 %ptr3 = getelementptr inbounds double, double* %ptr, i64 5 60 %val0 = load double, double* %ptr0 61 %val1 = load double, double* %ptr1 62 %val2 = load double, double* %ptr2 63 %val3 = load double, double* %ptr3 64 %res0 = insertelement <4 x double> undef, double %val0, i32 0 65 %res1 = insertelement <4 x double> %res0, double %val1, i32 1 66 %res2 = insertelement <4 x double> %res1, double %val2, i32 2 67 %res3 = insertelement <4 x double> %res2, double %val3, i32 3 68 ret <4 x double> %res3 69} 70 71define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline ssp { 72; AVX-LABEL: merge_4f64_f64_3zuu: 73; AVX: # %bb.0: 74; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 75; AVX-NEXT: retq 76; 77; X86-AVX-LABEL: merge_4f64_f64_3zuu: 78; X86-AVX: # %bb.0: 79; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 80; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 81; X86-AVX-NEXT: retl 82 %ptr0 = getelementptr inbounds double, double* %ptr, i64 3 83 %val0 = load double, double* %ptr0 84 %res0 = insertelement <4 x double> undef, double %val0, i32 0 85 %res1 = insertelement <4 x double> %res0, double 0.0, i32 1 86 ret <4 x double> %res1 87} 88 89define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline ssp { 90; AVX-LABEL: merge_4f64_f64_34uu: 91; AVX: # %bb.0: 92; AVX-NEXT: vmovups 24(%rdi), %xmm0 93; AVX-NEXT: retq 94; 95; X86-AVX-LABEL: merge_4f64_f64_34uu: 96; X86-AVX: # %bb.0: 97; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 98; X86-AVX-NEXT: vmovups 24(%eax), %xmm0 99; X86-AVX-NEXT: retl 100 %ptr0 = getelementptr inbounds double, double* %ptr, i64 3 101 %ptr1 = getelementptr inbounds double, double* %ptr, i64 4 102 %val0 = load double, double* %ptr0 103 %val1 = load double, double* %ptr1 104 %res0 = insertelement <4 x double> undef, double %val0, i32 0 105 %res1 = insertelement <4 x double> %res0, double %val1, i32 1 106 ret <4 x double> %res1 107} 108 109define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline ssp { 110; AVX-LABEL: merge_4f64_f64_45zz: 111; AVX: # %bb.0: 112; AVX-NEXT: vmovups 32(%rdi), %xmm0 113; AVX-NEXT: retq 114; 115; X86-AVX-LABEL: merge_4f64_f64_45zz: 116; X86-AVX: # %bb.0: 117; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 118; X86-AVX-NEXT: vmovups 32(%eax), %xmm0 119; X86-AVX-NEXT: retl 120 %ptr0 = getelementptr inbounds double, double* %ptr, i64 4 121 %ptr1 = getelementptr inbounds double, double* %ptr, i64 5 122 %val0 = load double, double* %ptr0 123 %val1 = load double, double* %ptr1 124 %res0 = insertelement <4 x double> zeroinitializer, double %val0, i32 0 125 %res1 = insertelement <4 x double> %res0, double %val1, i32 1 126 ret <4 x double> %res1 127} 128 129define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline ssp { 130; AVX-LABEL: merge_4f64_f64_34z6: 131; AVX: # %bb.0: 132; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 133; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7] 134; AVX-NEXT: retq 135; 136; X86-AVX-LABEL: merge_4f64_f64_34z6: 137; X86-AVX: # %bb.0: 138; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 139; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 140; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7] 141; X86-AVX-NEXT: retl 142 %ptr0 = getelementptr inbounds double, double* %ptr, i64 3 143 %ptr1 = getelementptr inbounds double, double* %ptr, i64 4 144 %ptr3 = getelementptr inbounds double, double* %ptr, i64 6 145 %val0 = load double, double* %ptr0 146 %val1 = load double, double* %ptr1 147 %val3 = load double, double* %ptr3 148 %res0 = insertelement <4 x double> undef, double %val0, i32 0 149 %res1 = insertelement <4 x double> %res0, double %val1, i32 1 150 %res2 = insertelement <4 x double> %res1, double 0.0, i32 2 151 %res3 = insertelement <4 x double> %res2, double %val3, i32 3 152 ret <4 x double> %res3 153} 154 155define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline ssp { 156; AVX-LABEL: merge_4i64_2i64_3z: 157; AVX: # %bb.0: 158; AVX-NEXT: vmovaps 48(%rdi), %xmm0 159; AVX-NEXT: retq 160; 161; X86-AVX-LABEL: merge_4i64_2i64_3z: 162; X86-AVX: # %bb.0: 163; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 164; X86-AVX-NEXT: vmovaps 48(%eax), %xmm0 165; X86-AVX-NEXT: retl 166 %ptr0 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 3 167 %val0 = load <2 x i64>, <2 x i64>* %ptr0 168 %res = shufflevector <2 x i64> %val0, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 169 ret <4 x i64> %res 170} 171 172define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp { 173; AVX-LABEL: merge_4i64_i64_1234: 174; AVX: # %bb.0: 175; AVX-NEXT: vmovups 8(%rdi), %ymm0 176; AVX-NEXT: retq 177; 178; X86-AVX-LABEL: merge_4i64_i64_1234: 179; X86-AVX: # %bb.0: 180; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 181; X86-AVX-NEXT: vmovups 8(%eax), %ymm0 182; X86-AVX-NEXT: retl 183 %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1 184 %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 2 185 %ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3 186 %ptr3 = getelementptr inbounds i64, i64* %ptr, i64 4 187 %val0 = load i64, i64* %ptr0 188 %val1 = load i64, i64* %ptr1 189 %val2 = load i64, i64* %ptr2 190 %val3 = load i64, i64* %ptr3 191 %res0 = insertelement <4 x i64> undef, i64 %val0, i32 0 192 %res1 = insertelement <4 x i64> %res0, i64 %val1, i32 1 193 %res2 = insertelement <4 x i64> %res1, i64 %val2, i32 2 194 %res3 = insertelement <4 x i64> %res2, i64 %val3, i32 3 195 ret <4 x i64> %res3 196} 197 198define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp { 199; AVX-LABEL: merge_4i64_i64_1zzu: 200; AVX: # %bb.0: 201; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 202; AVX-NEXT: retq 203; 204; X86-AVX-LABEL: merge_4i64_i64_1zzu: 205; X86-AVX: # %bb.0: 206; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 207; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 208; X86-AVX-NEXT: retl 209 %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1 210 %val0 = load i64, i64* %ptr0 211 %res0 = insertelement <4 x i64> undef, i64 %val0, i32 0 212 %res1 = insertelement <4 x i64> %res0, i64 0, i32 1 213 %res2 = insertelement <4 x i64> %res1, i64 0, i32 2 214 ret <4 x i64> %res2 215} 216 217define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp { 218; AVX-LABEL: merge_4i64_i64_23zz: 219; AVX: # %bb.0: 220; AVX-NEXT: vmovups 16(%rdi), %xmm0 221; AVX-NEXT: retq 222; 223; X86-AVX-LABEL: merge_4i64_i64_23zz: 224; X86-AVX: # %bb.0: 225; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 226; X86-AVX-NEXT: vmovups 16(%eax), %xmm0 227; X86-AVX-NEXT: retl 228 %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 2 229 %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 3 230 %val0 = load i64, i64* %ptr0 231 %val1 = load i64, i64* %ptr1 232 %res0 = insertelement <4 x i64> zeroinitializer, i64 %val0, i32 0 233 %res1 = insertelement <4 x i64> %res0, i64 %val1, i32 1 234 ret <4 x i64> %res1 235} 236 237define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noinline ssp { 238; AVX-LABEL: merge_8f32_2f32_23z5: 239; AVX: # %bb.0: 240; AVX-NEXT: vmovups 16(%rdi), %xmm0 241; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 242; AVX-NEXT: vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1] 243; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 244; AVX-NEXT: retq 245; 246; X86-AVX-LABEL: merge_8f32_2f32_23z5: 247; X86-AVX: # %bb.0: 248; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 249; X86-AVX-NEXT: vmovups 16(%eax), %xmm0 250; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 251; X86-AVX-NEXT: vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1] 252; X86-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 253; X86-AVX-NEXT: retl 254 %ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2 255 %ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3 256 %ptr3 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 5 257 %val0 = load <2 x float>, <2 x float>* %ptr0 258 %val1 = load <2 x float>, <2 x float>* %ptr1 259 %val3 = load <2 x float>, <2 x float>* %ptr3 260 %res01 = shufflevector <2 x float> %val0, <2 x float> %val1, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 261 %res23 = shufflevector <2 x float> zeroinitializer, <2 x float> %val3, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 262 %res = shufflevector <4 x float> %res01, <4 x float> %res23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 263 ret <8 x float> %res 264} 265 266define <8 x float> @merge_8f32_4f32_z2(<4 x float>* %ptr) nounwind uwtable noinline ssp { 267; AVX-LABEL: merge_8f32_4f32_z2: 268; AVX: # %bb.0: 269; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 270; AVX-NEXT: vinsertf128 $1, 32(%rdi), %ymm0, %ymm0 271; AVX-NEXT: retq 272; 273; X86-AVX-LABEL: merge_8f32_4f32_z2: 274; X86-AVX: # %bb.0: 275; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 276; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 277; X86-AVX-NEXT: vinsertf128 $1, 32(%eax), %ymm0, %ymm0 278; X86-AVX-NEXT: retl 279 %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2 280 %val1 = load <4 x float>, <4 x float>* %ptr1 281 %res = shufflevector <4 x float> zeroinitializer, <4 x float> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 282 ret <8 x float> %res 283} 284 285define <8 x float> @merge_8f32_f32_12zzuuzz(float* %ptr) nounwind uwtable noinline ssp { 286; AVX-LABEL: merge_8f32_f32_12zzuuzz: 287; AVX: # %bb.0: 288; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 289; AVX-NEXT: retq 290; 291; X86-AVX-LABEL: merge_8f32_f32_12zzuuzz: 292; X86-AVX: # %bb.0: 293; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 294; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 295; X86-AVX-NEXT: retl 296 %ptr0 = getelementptr inbounds float, float* %ptr, i64 1 297 %ptr1 = getelementptr inbounds float, float* %ptr, i64 2 298 %val0 = load float, float* %ptr0 299 %val1 = load float, float* %ptr1 300 %res0 = insertelement <8 x float> undef, float %val0, i32 0 301 %res1 = insertelement <8 x float> %res0, float %val1, i32 1 302 %res2 = insertelement <8 x float> %res1, float 0.0, i32 2 303 %res3 = insertelement <8 x float> %res2, float 0.0, i32 3 304 %res6 = insertelement <8 x float> %res3, float 0.0, i32 6 305 %res7 = insertelement <8 x float> %res6, float 0.0, i32 7 306 ret <8 x float> %res7 307} 308 309define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinline ssp { 310; AVX-LABEL: merge_8f32_f32_1u3u5zu8: 311; AVX: # %bb.0: 312; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 313; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7] 314; AVX-NEXT: retq 315; 316; X86-AVX-LABEL: merge_8f32_f32_1u3u5zu8: 317; X86-AVX: # %bb.0: 318; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 319; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 320; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7] 321; X86-AVX-NEXT: retl 322 %ptr0 = getelementptr inbounds float, float* %ptr, i64 1 323 %ptr2 = getelementptr inbounds float, float* %ptr, i64 3 324 %ptr4 = getelementptr inbounds float, float* %ptr, i64 5 325 %ptr7 = getelementptr inbounds float, float* %ptr, i64 8 326 %val0 = load float, float* %ptr0 327 %val2 = load float, float* %ptr2 328 %val4 = load float, float* %ptr4 329 %val7 = load float, float* %ptr7 330 %res0 = insertelement <8 x float> undef, float %val0, i32 0 331 %res2 = insertelement <8 x float> %res0, float %val2, i32 2 332 %res4 = insertelement <8 x float> %res2, float %val4, i32 4 333 %res5 = insertelement <8 x float> %res4, float 0.0, i32 5 334 %res7 = insertelement <8 x float> %res5, float %val7, i32 7 335 ret <8 x float> %res7 336} 337 338define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline ssp { 339; AVX-LABEL: merge_8i32_4i32_z3: 340; AVX: # %bb.0: 341; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 342; AVX-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0 343; AVX-NEXT: retq 344; 345; X86-AVX-LABEL: merge_8i32_4i32_z3: 346; X86-AVX: # %bb.0: 347; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 348; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 349; X86-AVX-NEXT: vinsertf128 $1, 48(%eax), %ymm0, %ymm0 350; X86-AVX-NEXT: retl 351 %ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3 352 %val1 = load <4 x i32>, <4 x i32>* %ptr1 353 %res = shufflevector <4 x i32> zeroinitializer, <4 x i32> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 354 ret <8 x i32> %res 355} 356 357define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline ssp { 358; AVX-LABEL: merge_8i32_i32_56zz9uzz: 359; AVX: # %bb.0: 360; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 361; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 362; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 363; AVX-NEXT: retq 364; 365; X86-AVX-LABEL: merge_8i32_i32_56zz9uzz: 366; X86-AVX: # %bb.0: 367; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 368; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 369; X86-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 370; X86-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 371; X86-AVX-NEXT: retl 372 %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 5 373 %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 6 374 %ptr4 = getelementptr inbounds i32, i32* %ptr, i64 9 375 %val0 = load i32, i32* %ptr0 376 %val1 = load i32, i32* %ptr1 377 %val4 = load i32, i32* %ptr4 378 %res0 = insertelement <8 x i32> undef, i32 %val0, i32 0 379 %res1 = insertelement <8 x i32> %res0, i32 %val1, i32 1 380 %res2 = insertelement <8 x i32> %res1, i32 0, i32 2 381 %res3 = insertelement <8 x i32> %res2, i32 0, i32 3 382 %res4 = insertelement <8 x i32> %res3, i32 %val4, i32 4 383 %res6 = insertelement <8 x i32> %res4, i32 0, i32 6 384 %res7 = insertelement <8 x i32> %res6, i32 0, i32 7 385 ret <8 x i32> %res7 386} 387 388define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline ssp { 389; AVX-LABEL: merge_8i32_i32_1u3u5zu8: 390; AVX: # %bb.0: 391; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 392; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7] 393; AVX-NEXT: retq 394; 395; X86-AVX-LABEL: merge_8i32_i32_1u3u5zu8: 396; X86-AVX: # %bb.0: 397; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 398; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 399; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7] 400; X86-AVX-NEXT: retl 401 %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1 402 %ptr2 = getelementptr inbounds i32, i32* %ptr, i64 3 403 %ptr4 = getelementptr inbounds i32, i32* %ptr, i64 5 404 %ptr7 = getelementptr inbounds i32, i32* %ptr, i64 8 405 %val0 = load i32, i32* %ptr0 406 %val2 = load i32, i32* %ptr2 407 %val4 = load i32, i32* %ptr4 408 %val7 = load i32, i32* %ptr7 409 %res0 = insertelement <8 x i32> undef, i32 %val0, i32 0 410 %res2 = insertelement <8 x i32> %res0, i32 %val2, i32 2 411 %res4 = insertelement <8 x i32> %res2, i32 %val4, i32 4 412 %res5 = insertelement <8 x i32> %res4, i32 0, i32 5 413 %res7 = insertelement <8 x i32> %res5, i32 %val7, i32 7 414 ret <8 x i32> %res7 415} 416 417define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable noinline ssp { 418; AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz: 419; AVX: # %bb.0: 420; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 421; AVX-NEXT: retq 422; 423; X86-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz: 424; X86-AVX: # %bb.0: 425; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 426; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 427; X86-AVX-NEXT: retl 428 %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 8 429 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 9 430 %val0 = load i16, i16* %ptr0 431 %val1 = load i16, i16* %ptr1 432 %res0 = insertelement <16 x i16> undef, i16 %val0, i16 0 433 %res1 = insertelement <16 x i16> %res0, i16 %val1, i16 1 434 %res2 = insertelement <16 x i16> %res1, i16 0, i16 2 435 %res3 = insertelement <16 x i16> %res2, i16 0, i16 3 436 %res4 = insertelement <16 x i16> %res3, i16 0, i16 4 437 %resF = insertelement <16 x i16> %res4, i16 0, i16 15 438 ret <16 x i16> %resF 439} 440 441define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp { 442; AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu: 443; AVX: # %bb.0: 444; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 445; AVX-NEXT: retq 446; 447; X86-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu: 448; X86-AVX: # %bb.0: 449; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 450; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 451; X86-AVX-NEXT: retl 452 %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4 453 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5 454 %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7 455 %val0 = load i16, i16* %ptr0 456 %val1 = load i16, i16* %ptr1 457 %val3 = load i16, i16* %ptr3 458 %res0 = insertelement <16 x i16> undef, i16 %val0, i16 0 459 %res1 = insertelement <16 x i16> %res0, i16 %val1, i16 1 460 %res3 = insertelement <16 x i16> %res1, i16 %val3, i16 3 461 ret <16 x i16> %res3 462} 463 464define <16 x i16> @merge_16i16_i16_0uu3uuuuuuuuCuEF(i16* %ptr) nounwind uwtable noinline ssp { 465; AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF: 466; AVX: # %bb.0: 467; AVX-NEXT: vmovups (%rdi), %ymm0 468; AVX-NEXT: retq 469; 470; X86-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF: 471; X86-AVX: # %bb.0: 472; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 473; X86-AVX-NEXT: vmovups (%eax), %ymm0 474; X86-AVX-NEXT: retl 475 %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 476 %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3 477 %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12 478 %ptrE = getelementptr inbounds i16, i16* %ptr, i64 14 479 %ptrF = getelementptr inbounds i16, i16* %ptr, i64 15 480 %val0 = load i16, i16* %ptr0 481 %val3 = load i16, i16* %ptr3 482 %valC = load i16, i16* %ptrC 483 %valE = load i16, i16* %ptrE 484 %valF = load i16, i16* %ptrF 485 %res0 = insertelement <16 x i16> undef, i16 %val0, i16 0 486 %res3 = insertelement <16 x i16> %res0, i16 %val3, i16 3 487 %resC = insertelement <16 x i16> %res3, i16 %valC, i16 12 488 %resE = insertelement <16 x i16> %resC, i16 %valE, i16 14 489 %resF = insertelement <16 x i16> %resE, i16 %valF, i16 15 490 ret <16 x i16> %resF 491} 492 493define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable noinline ssp { 494; AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF: 495; AVX: # %bb.0: 496; AVX-NEXT: vmovups (%rdi), %ymm0 497; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 498; AVX-NEXT: retq 499; 500; X86-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF: 501; X86-AVX: # %bb.0: 502; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 503; X86-AVX-NEXT: vmovups (%eax), %ymm0 504; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 505; X86-AVX-NEXT: retl 506 %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 507 %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3 508 %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12 509 %ptrE = getelementptr inbounds i16, i16* %ptr, i64 14 510 %ptrF = getelementptr inbounds i16, i16* %ptr, i64 15 511 %val0 = load i16, i16* %ptr0 512 %val3 = load i16, i16* %ptr3 513 %valC = load i16, i16* %ptrC 514 %valE = load i16, i16* %ptrE 515 %valF = load i16, i16* %ptrF 516 %res0 = insertelement <16 x i16> undef, i16 %val0, i16 0 517 %res3 = insertelement <16 x i16> %res0, i16 %val3, i16 3 518 %res4 = insertelement <16 x i16> %res3, i16 0, i16 4 519 %res5 = insertelement <16 x i16> %res4, i16 0, i16 5 520 %resC = insertelement <16 x i16> %res5, i16 %valC, i16 12 521 %resD = insertelement <16 x i16> %resC, i16 0, i16 13 522 %resE = insertelement <16 x i16> %resD, i16 %valE, i16 14 523 %resF = insertelement <16 x i16> %resE, i16 %valF, i16 15 524 ret <16 x i16> %resF 525} 526 527define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp { 528; AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu: 529; AVX: # %bb.0: 530; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 531; AVX-NEXT: retq 532; 533; X86-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu: 534; X86-AVX: # %bb.0: 535; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 536; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 537; X86-AVX-NEXT: retl 538 %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 4 539 %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 5 540 %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 7 541 %val0 = load i8, i8* %ptr0 542 %val1 = load i8, i8* %ptr1 543 %val3 = load i8, i8* %ptr3 544 %res0 = insertelement <32 x i8> undef, i8 %val0, i8 0 545 %res1 = insertelement <32 x i8> %res0, i8 %val1, i8 1 546 %res3 = insertelement <32 x i8> %res1, i8 %val3, i8 3 547 ret <32 x i8> %res3 548} 549 550define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp { 551; AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu: 552; AVX: # %bb.0: 553; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 554; AVX-NEXT: retq 555; 556; X86-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu: 557; X86-AVX: # %bb.0: 558; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 559; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 560; X86-AVX-NEXT: retl 561 %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 2 562 %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 3 563 %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 5 564 %val0 = load i8, i8* %ptr0 565 %val1 = load i8, i8* %ptr1 566 %val3 = load i8, i8* %ptr3 567 %res0 = insertelement <32 x i8> undef, i8 %val0, i8 0 568 %res1 = insertelement <32 x i8> %res0, i8 %val1, i8 1 569 %res3 = insertelement <32 x i8> %res1, i8 %val3, i8 3 570 %resE = insertelement <32 x i8> %res3, i8 0, i8 14 571 %resF = insertelement <32 x i8> %resE, i8 0, i8 15 572 %resG = insertelement <32 x i8> %resF, i8 0, i8 16 573 %resH = insertelement <32 x i8> %resG, i8 0, i8 17 574 ret <32 x i8> %resH 575} 576 577; 578; consecutive loads including any/all volatiles may not be combined 579; 580 581define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable noinline ssp { 582; AVX-LABEL: merge_4f64_f64_34uz_volatile: 583; AVX: # %bb.0: 584; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 585; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] 586; AVX-NEXT: retq 587; 588; X86-AVX-LABEL: merge_4f64_f64_34uz_volatile: 589; X86-AVX: # %bb.0: 590; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 591; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 592; X86-AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] 593; X86-AVX-NEXT: retl 594 %ptr0 = getelementptr inbounds double, double* %ptr, i64 3 595 %ptr1 = getelementptr inbounds double, double* %ptr, i64 4 596 %val0 = load volatile double, double* %ptr0 597 %val1 = load volatile double, double* %ptr1 598 %res0 = insertelement <4 x double> undef, double %val0, i32 0 599 %res1 = insertelement <4 x double> %res0, double %val1, i32 1 600 %res3 = insertelement <4 x double> %res1, double 0.0, i32 3 601 ret <4 x double> %res3 602} 603 604define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind uwtable noinline ssp { 605; AVX1-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile: 606; AVX1: # %bb.0: 607; AVX1-NEXT: movzwl (%rdi), %eax 608; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 609; AVX1-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 610; AVX1-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 611; AVX1-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 612; AVX1-NEXT: vmovd %eax, %xmm1 613; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 614; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 615; AVX1-NEXT: retq 616; 617; AVX2-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile: 618; AVX2: # %bb.0: 619; AVX2-NEXT: movzwl (%rdi), %eax 620; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 621; AVX2-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 622; AVX2-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 623; AVX2-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 624; AVX2-NEXT: vmovd %eax, %xmm1 625; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 626; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 627; AVX2-NEXT: retq 628; 629; AVX512F-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile: 630; AVX512F: # %bb.0: 631; AVX512F-NEXT: movzwl (%rdi), %eax 632; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0 633; AVX512F-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 634; AVX512F-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 635; AVX512F-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 636; AVX512F-NEXT: vmovd %eax, %xmm1 637; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 638; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 639; AVX512F-NEXT: retq 640; 641; X86-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile: 642; X86-AVX: # %bb.0: 643; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 644; X86-AVX-NEXT: movzwl (%eax), %ecx 645; X86-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 646; X86-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0 647; X86-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0 648; X86-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0 649; X86-AVX-NEXT: vmovd %ecx, %xmm1 650; X86-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1 651; X86-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 652; X86-AVX-NEXT: retl 653 %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 654 %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3 655 %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12 656 %ptrE = getelementptr inbounds i16, i16* %ptr, i64 14 657 %ptrF = getelementptr inbounds i16, i16* %ptr, i64 15 658 %val0 = load volatile i16, i16* %ptr0 659 %val3 = load i16, i16* %ptr3 660 %valC = load i16, i16* %ptrC 661 %valE = load i16, i16* %ptrE 662 %valF = load volatile i16, i16* %ptrF 663 %res0 = insertelement <16 x i16> undef, i16 %val0, i16 0 664 %res3 = insertelement <16 x i16> %res0, i16 %val3, i16 3 665 %res4 = insertelement <16 x i16> %res3, i16 0, i16 4 666 %res5 = insertelement <16 x i16> %res4, i16 0, i16 5 667 %resC = insertelement <16 x i16> %res5, i16 %valC, i16 12 668 %resD = insertelement <16 x i16> %resC, i16 0, i16 13 669 %resE = insertelement <16 x i16> %resD, i16 %valE, i16 14 670 %resF = insertelement <16 x i16> %resE, i16 %valF, i16 15 671 ret <16 x i16> %resF 672} 673 674; 675; Volatile tests. 676; 677 678@l = external dso_local global <32 x i8>, align 32 679 680define <2 x i8> @PR42846(<2 x i8>* %j, <2 x i8> %k) { 681; AVX-LABEL: PR42846: 682; AVX: # %bb.0: 683; AVX-NEXT: vmovdqa {{.*}}(%rip), %ymm0 684; AVX-NEXT: vpextrw $0, %xmm0, (%rdi) 685; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 686; AVX-NEXT: vzeroupper 687; AVX-NEXT: retq 688; 689; X86-AVX-LABEL: PR42846: 690; X86-AVX: # %bb.0: 691; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 692; X86-AVX-NEXT: vmovdqa l, %ymm0 693; X86-AVX-NEXT: vpextrw $0, %xmm0, (%eax) 694; X86-AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 695; X86-AVX-NEXT: vzeroupper 696; X86-AVX-NEXT: retl 697 %t0 = load volatile <32 x i8>, <32 x i8>* @l, align 32 698 %shuffle = shufflevector <32 x i8> %t0, <32 x i8> undef, <2 x i32> <i32 0, i32 1> 699 store <2 x i8> %shuffle, <2 x i8>* %j, align 2 700 ret <2 x i8> %shuffle 701} 702