1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64 4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32 5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64 6 7define <4 x double> @test_buildvector_v4f64(double %a0, double %a1, double %a2, double %a3) { 8; AVX-32-LABEL: test_buildvector_v4f64: 9; AVX-32: # %bb.0: 10; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 11; AVX-32-NEXT: retl 12; 13; AVX-64-LABEL: test_buildvector_v4f64: 14; AVX-64: # %bb.0: 15; AVX-64-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] 16; AVX-64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] 17; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 18; AVX-64-NEXT: retq 19 %ins0 = insertelement <4 x double> undef, double %a0, i32 0 20 %ins1 = insertelement <4 x double> %ins0, double %a1, i32 1 21 %ins2 = insertelement <4 x double> %ins1, double %a2, i32 2 22 %ins3 = insertelement <4 x double> %ins2, double %a3, i32 3 23 ret <4 x double> %ins3 24} 25 26define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) { 27; AVX-32-LABEL: test_buildvector_v8f32: 28; AVX-32: # %bb.0: 29; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 30; AVX-32-NEXT: retl 31; 32; AVX-64-LABEL: test_buildvector_v8f32: 33; AVX-64: # %bb.0: 34; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] 35; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] 36; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] 37; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] 38; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] 39; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] 40; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 41; AVX-64-NEXT: retq 42 %ins0 = insertelement <8 x float> undef, float %a0, i32 0 43 %ins1 = insertelement <8 x float> %ins0, float %a1, i32 1 44 %ins2 = insertelement <8 x float> %ins1, float %a2, i32 2 45 %ins3 = insertelement <8 x float> %ins2, float %a3, i32 3 46 %ins4 = insertelement <8 x float> %ins3, float %a4, i32 4 47 %ins5 = insertelement <8 x float> %ins4, float %a5, i32 5 48 %ins6 = insertelement <8 x float> %ins5, float %a6, i32 6 49 %ins7 = insertelement <8 x float> %ins6, float %a7, i32 7 50 ret <8 x float> %ins7 51} 52 53define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) { 54; AVX-32-LABEL: test_buildvector_v4i64: 55; AVX-32: # %bb.0: 56; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 57; AVX-32-NEXT: retl 58; 59; AVX1-64-LABEL: test_buildvector_v4i64: 60; AVX1-64: # %bb.0: 61; AVX1-64-NEXT: vmovq %rcx, %xmm0 62; AVX1-64-NEXT: vmovq %rdx, %xmm1 63; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] 64; AVX1-64-NEXT: vmovq %rsi, %xmm1 65; AVX1-64-NEXT: vmovq %rdi, %xmm2 66; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] 67; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 68; AVX1-64-NEXT: retq 69; 70; AVX2-64-LABEL: test_buildvector_v4i64: 71; AVX2-64: # %bb.0: 72; AVX2-64-NEXT: vmovq %rcx, %xmm0 73; AVX2-64-NEXT: vmovq %rdx, %xmm1 74; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] 75; AVX2-64-NEXT: vmovq %rsi, %xmm1 76; AVX2-64-NEXT: vmovq %rdi, %xmm2 77; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] 78; AVX2-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 79; AVX2-64-NEXT: retq 80 %ins0 = insertelement <4 x i64> undef, i64 %a0, i32 0 81 %ins1 = insertelement <4 x i64> %ins0, i64 %a1, i32 1 82 %ins2 = insertelement <4 x i64> %ins1, i64 %a2, i32 2 83 %ins3 = insertelement <4 x i64> %ins2, i64 %a3, i32 3 84 ret <4 x i64> %ins3 85} 86 87define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) { 88; AVX-32-LABEL: test_buildvector_v8i32: 89; AVX-32: # %bb.0: 90; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 91; AVX-32-NEXT: retl 92; 93; AVX1-64-LABEL: test_buildvector_v8i32: 94; AVX1-64: # %bb.0: 95; AVX1-64-NEXT: vmovd %edi, %xmm0 96; AVX1-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 97; AVX1-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 98; AVX1-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 99; AVX1-64-NEXT: vmovd %r8d, %xmm1 100; AVX1-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 101; AVX1-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 102; AVX1-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 103; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 104; AVX1-64-NEXT: retq 105; 106; AVX2-64-LABEL: test_buildvector_v8i32: 107; AVX2-64: # %bb.0: 108; AVX2-64-NEXT: vmovd %edi, %xmm0 109; AVX2-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 110; AVX2-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 111; AVX2-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 112; AVX2-64-NEXT: vmovd %r8d, %xmm1 113; AVX2-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 114; AVX2-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 115; AVX2-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 116; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 117; AVX2-64-NEXT: retq 118 %ins0 = insertelement <8 x i32> undef, i32 %a0, i32 0 119 %ins1 = insertelement <8 x i32> %ins0, i32 %a1, i32 1 120 %ins2 = insertelement <8 x i32> %ins1, i32 %a2, i32 2 121 %ins3 = insertelement <8 x i32> %ins2, i32 %a3, i32 3 122 %ins4 = insertelement <8 x i32> %ins3, i32 %a4, i32 4 123 %ins5 = insertelement <8 x i32> %ins4, i32 %a5, i32 5 124 %ins6 = insertelement <8 x i32> %ins5, i32 %a6, i32 6 125 %ins7 = insertelement <8 x i32> %ins6, i32 %a7, i32 7 126 ret <8 x i32> %ins7 127} 128 129define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) { 130; AVX1-32-LABEL: test_buildvector_v16i16: 131; AVX1-32: # %bb.0: 132; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 133; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 134; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 135; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 136; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 137; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 138; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 139; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 140; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 141; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 142; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 143; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 144; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 145; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 146; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 147; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 148; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 149; AVX1-32-NEXT: retl 150; 151; AVX1-64-LABEL: test_buildvector_v16i16: 152; AVX1-64: # %bb.0: 153; AVX1-64-NEXT: vmovd %edi, %xmm0 154; AVX1-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 155; AVX1-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 156; AVX1-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 157; AVX1-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 158; AVX1-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 159; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 160; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 161; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 162; AVX1-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 163; AVX1-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 164; AVX1-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 165; AVX1-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 166; AVX1-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 167; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 168; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 169; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 170; AVX1-64-NEXT: retq 171; 172; AVX2-32-LABEL: test_buildvector_v16i16: 173; AVX2-32: # %bb.0: 174; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 175; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 176; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 177; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 178; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 179; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 180; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 181; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 182; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 183; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 184; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 185; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 186; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 187; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 188; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 189; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 190; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 191; AVX2-32-NEXT: retl 192; 193; AVX2-64-LABEL: test_buildvector_v16i16: 194; AVX2-64: # %bb.0: 195; AVX2-64-NEXT: vmovd %edi, %xmm0 196; AVX2-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 197; AVX2-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 198; AVX2-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 199; AVX2-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 200; AVX2-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 201; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 202; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 203; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 204; AVX2-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 205; AVX2-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 206; AVX2-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 207; AVX2-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 208; AVX2-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 209; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 210; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 211; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 212; AVX2-64-NEXT: retq 213 %ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0 214 %ins1 = insertelement <16 x i16> %ins0, i16 %a1, i32 1 215 %ins2 = insertelement <16 x i16> %ins1, i16 %a2, i32 2 216 %ins3 = insertelement <16 x i16> %ins2, i16 %a3, i32 3 217 %ins4 = insertelement <16 x i16> %ins3, i16 %a4, i32 4 218 %ins5 = insertelement <16 x i16> %ins4, i16 %a5, i32 5 219 %ins6 = insertelement <16 x i16> %ins5, i16 %a6, i32 6 220 %ins7 = insertelement <16 x i16> %ins6, i16 %a7, i32 7 221 %ins8 = insertelement <16 x i16> %ins7, i16 %a8, i32 8 222 %ins9 = insertelement <16 x i16> %ins8, i16 %a9, i32 9 223 %ins10 = insertelement <16 x i16> %ins9, i16 %a10, i32 10 224 %ins11 = insertelement <16 x i16> %ins10, i16 %a11, i32 11 225 %ins12 = insertelement <16 x i16> %ins11, i16 %a12, i32 12 226 %ins13 = insertelement <16 x i16> %ins12, i16 %a13, i32 13 227 %ins14 = insertelement <16 x i16> %ins13, i16 %a14, i32 14 228 %ins15 = insertelement <16 x i16> %ins14, i16 %a15, i32 15 229 ret <16 x i16> %ins15 230} 231 232define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) { 233; AVX1-32-LABEL: test_buildvector_v32i8: 234; AVX1-32: # %bb.0: 235; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 236; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 237; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 238; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 239; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 240; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 241; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 242; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 243; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 244; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 245; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 246; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 247; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 248; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 249; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 250; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 251; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 252; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 253; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 254; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 255; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 256; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 257; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 258; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 259; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 260; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 261; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 262; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 263; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 264; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 265; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 266; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 267; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 268; AVX1-32-NEXT: retl 269; 270; AVX1-64-LABEL: test_buildvector_v32i8: 271; AVX1-64: # %bb.0: 272; AVX1-64-NEXT: vmovd %edi, %xmm0 273; AVX1-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 274; AVX1-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 275; AVX1-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 276; AVX1-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 277; AVX1-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 278; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 279; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 280; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 281; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 282; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 283; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 284; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 285; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 286; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 287; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 288; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 289; AVX1-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 290; AVX1-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 291; AVX1-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 292; AVX1-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 293; AVX1-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 294; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 295; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 296; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 297; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 298; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 299; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 300; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 301; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 302; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 303; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 304; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 305; AVX1-64-NEXT: retq 306; 307; AVX2-32-LABEL: test_buildvector_v32i8: 308; AVX2-32: # %bb.0: 309; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 310; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 311; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 312; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 313; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 314; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 315; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 316; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 317; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 318; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 319; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 320; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 321; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 322; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 323; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 324; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 325; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 326; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 327; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 328; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 329; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 330; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 331; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 332; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 333; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 334; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 335; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 336; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 337; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 338; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 339; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 340; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 341; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 342; AVX2-32-NEXT: retl 343; 344; AVX2-64-LABEL: test_buildvector_v32i8: 345; AVX2-64: # %bb.0: 346; AVX2-64-NEXT: vmovd %edi, %xmm0 347; AVX2-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 348; AVX2-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 349; AVX2-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 350; AVX2-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 351; AVX2-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 352; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 353; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 354; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 355; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 356; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 357; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 358; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 359; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 360; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 361; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 362; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 363; AVX2-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 364; AVX2-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 365; AVX2-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 366; AVX2-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 367; AVX2-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 368; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 369; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 370; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 371; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 372; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 373; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 374; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 375; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 376; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 377; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 378; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 379; AVX2-64-NEXT: retq 380 %ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0 381 %ins1 = insertelement <32 x i8> %ins0, i8 %a1, i32 1 382 %ins2 = insertelement <32 x i8> %ins1, i8 %a2, i32 2 383 %ins3 = insertelement <32 x i8> %ins2, i8 %a3, i32 3 384 %ins4 = insertelement <32 x i8> %ins3, i8 %a4, i32 4 385 %ins5 = insertelement <32 x i8> %ins4, i8 %a5, i32 5 386 %ins6 = insertelement <32 x i8> %ins5, i8 %a6, i32 6 387 %ins7 = insertelement <32 x i8> %ins6, i8 %a7, i32 7 388 %ins8 = insertelement <32 x i8> %ins7, i8 %a8, i32 8 389 %ins9 = insertelement <32 x i8> %ins8, i8 %a9, i32 9 390 %ins10 = insertelement <32 x i8> %ins9, i8 %a10, i32 10 391 %ins11 = insertelement <32 x i8> %ins10, i8 %a11, i32 11 392 %ins12 = insertelement <32 x i8> %ins11, i8 %a12, i32 12 393 %ins13 = insertelement <32 x i8> %ins12, i8 %a13, i32 13 394 %ins14 = insertelement <32 x i8> %ins13, i8 %a14, i32 14 395 %ins15 = insertelement <32 x i8> %ins14, i8 %a15, i32 15 396 %ins16 = insertelement <32 x i8> %ins15, i8 %a16, i32 16 397 %ins17 = insertelement <32 x i8> %ins16, i8 %a17, i32 17 398 %ins18 = insertelement <32 x i8> %ins17, i8 %a18, i32 18 399 %ins19 = insertelement <32 x i8> %ins18, i8 %a19, i32 19 400 %ins20 = insertelement <32 x i8> %ins19, i8 %a20, i32 20 401 %ins21 = insertelement <32 x i8> %ins20, i8 %a21, i32 21 402 %ins22 = insertelement <32 x i8> %ins21, i8 %a22, i32 22 403 %ins23 = insertelement <32 x i8> %ins22, i8 %a23, i32 23 404 %ins24 = insertelement <32 x i8> %ins23, i8 %a24, i32 24 405 %ins25 = insertelement <32 x i8> %ins24, i8 %a25, i32 25 406 %ins26 = insertelement <32 x i8> %ins25, i8 %a26, i32 26 407 %ins27 = insertelement <32 x i8> %ins26, i8 %a27, i32 27 408 %ins28 = insertelement <32 x i8> %ins27, i8 %a28, i32 28 409 %ins29 = insertelement <32 x i8> %ins28, i8 %a29, i32 29 410 %ins30 = insertelement <32 x i8> %ins29, i8 %a30, i32 30 411 %ins31 = insertelement <32 x i8> %ins30, i8 %a31, i32 31 412 ret <32 x i8> %ins31 413} 414 415; PR30780 416 417define <8 x i32> @test_buildvector_v8i32_splat_sext_i8(i8 %in) { 418; AVX1-32-LABEL: test_buildvector_v8i32_splat_sext_i8: 419; AVX1-32: # %bb.0: 420; AVX1-32-NEXT: movsbl {{[0-9]+}}(%esp), %eax 421; AVX1-32-NEXT: vmovd %eax, %xmm0 422; AVX1-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 423; AVX1-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 424; AVX1-32-NEXT: retl 425; 426; AVX1-64-LABEL: test_buildvector_v8i32_splat_sext_i8: 427; AVX1-64: # %bb.0: 428; AVX1-64-NEXT: movsbl %dil, %eax 429; AVX1-64-NEXT: vmovd %eax, %xmm0 430; AVX1-64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 431; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 432; AVX1-64-NEXT: retq 433; 434; AVX2-32-LABEL: test_buildvector_v8i32_splat_sext_i8: 435; AVX2-32: # %bb.0: 436; AVX2-32-NEXT: movsbl {{[0-9]+}}(%esp), %eax 437; AVX2-32-NEXT: vmovd %eax, %xmm0 438; AVX2-32-NEXT: vpbroadcastd %xmm0, %ymm0 439; AVX2-32-NEXT: retl 440; 441; AVX2-64-LABEL: test_buildvector_v8i32_splat_sext_i8: 442; AVX2-64: # %bb.0: 443; AVX2-64-NEXT: movsbl %dil, %eax 444; AVX2-64-NEXT: vmovd %eax, %xmm0 445; AVX2-64-NEXT: vpbroadcastd %xmm0, %ymm0 446; AVX2-64-NEXT: retq 447 %ext = sext i8 %in to i32 448 %insert = insertelement <8 x i32> undef, i32 %ext, i32 0 449 %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer 450 ret <8 x i32> %splat 451} 452 453define <8 x i32> @test_buildvector_v8i32_splat_zext_i8(i8 %in) { 454; AVX1-32-LABEL: test_buildvector_v8i32_splat_zext_i8: 455; AVX1-32: # %bb.0: 456; AVX1-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax 457; AVX1-32-NEXT: vmovd %eax, %xmm0 458; AVX1-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 459; AVX1-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 460; AVX1-32-NEXT: retl 461; 462; AVX1-64-LABEL: test_buildvector_v8i32_splat_zext_i8: 463; AVX1-64: # %bb.0: 464; AVX1-64-NEXT: movzbl %dil, %eax 465; AVX1-64-NEXT: vmovd %eax, %xmm0 466; AVX1-64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 467; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 468; AVX1-64-NEXT: retq 469; 470; AVX2-32-LABEL: test_buildvector_v8i32_splat_zext_i8: 471; AVX2-32: # %bb.0: 472; AVX2-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax 473; AVX2-32-NEXT: vmovd %eax, %xmm0 474; AVX2-32-NEXT: vpbroadcastd %xmm0, %ymm0 475; AVX2-32-NEXT: retl 476; 477; AVX2-64-LABEL: test_buildvector_v8i32_splat_zext_i8: 478; AVX2-64: # %bb.0: 479; AVX2-64-NEXT: movzbl %dil, %eax 480; AVX2-64-NEXT: vmovd %eax, %xmm0 481; AVX2-64-NEXT: vpbroadcastd %xmm0, %ymm0 482; AVX2-64-NEXT: retq 483 %ext = zext i8 %in to i32 484 %insert = insertelement <8 x i32> undef, i32 %ext, i32 0 485 %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer 486 ret <8 x i32> %splat 487} 488