; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=SSE3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSSE3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,XOP ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL,AVX512VLBW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+avx512vbmi | FileCheck %s --check-prefixes=AVX,AVX512VL,VLVBMI define <2 x i64> @var_shuffle_v2i64(<2 x i64> %v, <2 x i64> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v2i64: ; SSE3: # %bb.0: ; SSE3-NEXT: movq %xmm1, %rax ; SSE3-NEXT: andl $1, %eax ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] ; SSE3-NEXT: movq %xmm1, %rcx ; SSE3-NEXT: andl $1, %ecx ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v2i64: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movq %xmm1, %rax ; SSSE3-NEXT: andl $1, %eax ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] ; SSSE3-NEXT: movq %xmm1, %rcx ; SSSE3-NEXT: andl $1, %ecx ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v2i64: ; SSE41: # %bb.0: ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: pcmpeqq %xmm1, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,1] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movapd %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v2i64: ; AVX: # %bb.0: ; AVX-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %index0 = extractelement <2 x i64> %indices, i32 0 %index1 = extractelement <2 x i64> %indices, i32 1 %v0 = extractelement <2 x i64> %v, i64 %index0 %v1 = extractelement <2 x i64> %v, i64 %index1 %ret0 = insertelement <2 x i64> undef, i64 %v0, i32 0 %ret1 = insertelement <2 x i64> %ret0, i64 %v1, i32 1 ret <2 x i64> %ret1 } define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v4i32: ; SSE3: # %bb.0: ; SSE3-NEXT: movd %xmm1, %eax ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] ; SSE3-NEXT: movd %xmm2, %ecx ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] ; SSE3-NEXT: movd %xmm2, %edx ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE3-NEXT: movd %xmm1, %esi ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: andl $3, %eax ; SSE3-NEXT: andl $3, %ecx ; SSE3-NEXT: andl $3, %edx ; SSE3-NEXT: andl $3, %esi ; SSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v4i32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [67372036,67372036,67372036,67372036] ; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSSE3-NEXT: pmuludq %xmm2, %xmm1 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSSE3-NEXT: pmuludq %xmm2, %xmm3 ; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSSE3-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v4i32: ; SSE41: # %bb.0: ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v4i32: ; AVX: # %bb.0: ; AVX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %index0 = extractelement <4 x i32> %indices, i32 0 %index1 = extractelement <4 x i32> %indices, i32 1 %index2 = extractelement <4 x i32> %indices, i32 2 %index3 = extractelement <4 x i32> %indices, i32 3 %v0 = extractelement <4 x i32> %v, i32 %index0 %v1 = extractelement <4 x i32> %v, i32 %index1 %v2 = extractelement <4 x i32> %v, i32 %index2 %v3 = extractelement <4 x i32> %v, i32 %index3 %ret0 = insertelement <4 x i32> undef, i32 %v0, i32 0 %ret1 = insertelement <4 x i32> %ret0, i32 %v1, i32 1 %ret2 = insertelement <4 x i32> %ret1, i32 %v2, i32 2 %ret3 = insertelement <4 x i32> %ret2, i32 %v3, i32 3 ret <4 x i32> %ret3 } define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v8i16: ; SSE3: # %bb.0: ; SSE3-NEXT: movd %xmm1, %r8d ; SSE3-NEXT: pextrw $1, %xmm1, %r9d ; SSE3-NEXT: pextrw $2, %xmm1, %r10d ; SSE3-NEXT: pextrw $3, %xmm1, %esi ; SSE3-NEXT: pextrw $4, %xmm1, %edi ; SSE3-NEXT: pextrw $5, %xmm1, %eax ; SSE3-NEXT: pextrw $6, %xmm1, %ecx ; SSE3-NEXT: pextrw $7, %xmm1, %edx ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: andl $7, %r8d ; SSE3-NEXT: andl $7, %r9d ; SSE3-NEXT: andl $7, %r10d ; SSE3-NEXT: andl $7, %esi ; SSE3-NEXT: andl $7, %edi ; SSE3-NEXT: andl $7, %eax ; SSE3-NEXT: andl $7, %ecx ; SSE3-NEXT: andl $7, %edx ; SSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx ; SSE3-NEXT: movd %edx, %xmm0 ; SSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE3-NEXT: movd %ecx, %xmm1 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE3-NEXT: movd %eax, %xmm0 ; SSE3-NEXT: movzwl -24(%rsp,%rdi,2), %eax ; SSE3-NEXT: movd %eax, %xmm2 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSE3-NEXT: movd %eax, %xmm0 ; SSE3-NEXT: movzwl -24(%rsp,%r10,2), %eax ; SSE3-NEXT: movd %eax, %xmm1 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE3-NEXT: movzwl -24(%rsp,%r9,2), %eax ; SSE3-NEXT: movd %eax, %xmm3 ; SSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax ; SSE3-NEXT: movd %eax, %xmm0 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v8i16: ; SSSE3: # %bb.0: ; SSSE3-NEXT: pmullw {{.*}}(%rip), %xmm1 ; SSSE3-NEXT: paddw {{.*}}(%rip), %xmm1 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v8i16: ; SSE41: # %bb.0: ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1 ; SSE41-NEXT: paddw {{.*}}(%rip), %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVXNOVLBW-LABEL: var_shuffle_v8i16: ; AVXNOVLBW: # %bb.0: ; AVXNOVLBW-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1 ; AVXNOVLBW-NEXT: vpaddw {{.*}}(%rip), %xmm1, %xmm1 ; AVXNOVLBW-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVXNOVLBW-NEXT: retq ; ; AVX512VL-LABEL: var_shuffle_v8i16: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpermw %xmm0, %xmm1, %xmm0 ; AVX512VL-NEXT: retq %index0 = extractelement <8 x i16> %indices, i32 0 %index1 = extractelement <8 x i16> %indices, i32 1 %index2 = extractelement <8 x i16> %indices, i32 2 %index3 = extractelement <8 x i16> %indices, i32 3 %index4 = extractelement <8 x i16> %indices, i32 4 %index5 = extractelement <8 x i16> %indices, i32 5 %index6 = extractelement <8 x i16> %indices, i32 6 %index7 = extractelement <8 x i16> %indices, i32 7 %v0 = extractelement <8 x i16> %v, i16 %index0 %v1 = extractelement <8 x i16> %v, i16 %index1 %v2 = extractelement <8 x i16> %v, i16 %index2 %v3 = extractelement <8 x i16> %v, i16 %index3 %v4 = extractelement <8 x i16> %v, i16 %index4 %v5 = extractelement <8 x i16> %v, i16 %index5 %v6 = extractelement <8 x i16> %v, i16 %index6 %v7 = extractelement <8 x i16> %v, i16 %index7 %ret0 = insertelement <8 x i16> undef, i16 %v0, i32 0 %ret1 = insertelement <8 x i16> %ret0, i16 %v1, i32 1 %ret2 = insertelement <8 x i16> %ret1, i16 %v2, i32 2 %ret3 = insertelement <8 x i16> %ret2, i16 %v3, i32 3 %ret4 = insertelement <8 x i16> %ret3, i16 %v4, i32 4 %ret5 = insertelement <8 x i16> %ret4, i16 %v5, i32 5 %ret6 = insertelement <8 x i16> %ret5, i16 %v6, i32 6 %ret7 = insertelement <8 x i16> %ret6, i16 %v7, i32 7 ret <8 x i16> %ret7 } define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v16i8: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm8 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm15 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm9 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm3 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm10 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm7 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm11 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm6 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm12 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm5 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm13 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm4 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm14 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm1 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm2 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm0 ; SSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8: ; SSSE3: # %bb.0: ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v16i8: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %index0 = extractelement <16 x i8> %indices, i32 0 %index1 = extractelement <16 x i8> %indices, i32 1 %index2 = extractelement <16 x i8> %indices, i32 2 %index3 = extractelement <16 x i8> %indices, i32 3 %index4 = extractelement <16 x i8> %indices, i32 4 %index5 = extractelement <16 x i8> %indices, i32 5 %index6 = extractelement <16 x i8> %indices, i32 6 %index7 = extractelement <16 x i8> %indices, i32 7 %index8 = extractelement <16 x i8> %indices, i32 8 %index9 = extractelement <16 x i8> %indices, i32 9 %index10 = extractelement <16 x i8> %indices, i32 10 %index11 = extractelement <16 x i8> %indices, i32 11 %index12 = extractelement <16 x i8> %indices, i32 12 %index13 = extractelement <16 x i8> %indices, i32 13 %index14 = extractelement <16 x i8> %indices, i32 14 %index15 = extractelement <16 x i8> %indices, i32 15 %v0 = extractelement <16 x i8> %v, i8 %index0 %v1 = extractelement <16 x i8> %v, i8 %index1 %v2 = extractelement <16 x i8> %v, i8 %index2 %v3 = extractelement <16 x i8> %v, i8 %index3 %v4 = extractelement <16 x i8> %v, i8 %index4 %v5 = extractelement <16 x i8> %v, i8 %index5 %v6 = extractelement <16 x i8> %v, i8 %index6 %v7 = extractelement <16 x i8> %v, i8 %index7 %v8 = extractelement <16 x i8> %v, i8 %index8 %v9 = extractelement <16 x i8> %v, i8 %index9 %v10 = extractelement <16 x i8> %v, i8 %index10 %v11 = extractelement <16 x i8> %v, i8 %index11 %v12 = extractelement <16 x i8> %v, i8 %index12 %v13 = extractelement <16 x i8> %v, i8 %index13 %v14 = extractelement <16 x i8> %v, i8 %index14 %v15 = extractelement <16 x i8> %v, i8 %index15 %ret0 = insertelement <16 x i8> undef, i8 %v0, i32 0 %ret1 = insertelement <16 x i8> %ret0, i8 %v1, i32 1 %ret2 = insertelement <16 x i8> %ret1, i8 %v2, i32 2 %ret3 = insertelement <16 x i8> %ret2, i8 %v3, i32 3 %ret4 = insertelement <16 x i8> %ret3, i8 %v4, i32 4 %ret5 = insertelement <16 x i8> %ret4, i8 %v5, i32 5 %ret6 = insertelement <16 x i8> %ret5, i8 %v6, i32 6 %ret7 = insertelement <16 x i8> %ret6, i8 %v7, i32 7 %ret8 = insertelement <16 x i8> %ret7, i8 %v8, i32 8 %ret9 = insertelement <16 x i8> %ret8, i8 %v9, i32 9 %ret10 = insertelement <16 x i8> %ret9, i8 %v10, i32 10 %ret11 = insertelement <16 x i8> %ret10, i8 %v11, i32 11 %ret12 = insertelement <16 x i8> %ret11, i8 %v12, i32 12 %ret13 = insertelement <16 x i8> %ret12, i8 %v13, i32 13 %ret14 = insertelement <16 x i8> %ret13, i8 %v14, i32 14 %ret15 = insertelement <16 x i8> %ret14, i8 %v15, i32 15 ret <16 x i8> %ret15 } define <2 x double> @var_shuffle_v2f64(<2 x double> %v, <2 x i64> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v2f64: ; SSE3: # %bb.0: ; SSE3-NEXT: movq %xmm1, %rax ; SSE3-NEXT: andl $1, %eax ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] ; SSE3-NEXT: movq %xmm1, %rcx ; SSE3-NEXT: andl $1, %ecx ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE3-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v2f64: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movq %xmm1, %rax ; SSSE3-NEXT: andl $1, %eax ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] ; SSSE3-NEXT: movq %xmm1, %rcx ; SSSE3-NEXT: andl $1, %ecx ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSSE3-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v2f64: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 ; SSE41-NEXT: movddup {{.*#+}} xmm1 = xmm2[0,0] ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: movapd %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v2f64: ; AVX: # %bb.0: ; AVX-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %index0 = extractelement <2 x i64> %indices, i32 0 %index1 = extractelement <2 x i64> %indices, i32 1 %v0 = extractelement <2 x double> %v, i64 %index0 %v1 = extractelement <2 x double> %v, i64 %index1 %ret0 = insertelement <2 x double> undef, double %v0, i32 0 %ret1 = insertelement <2 x double> %ret0, double %v1, i32 1 ret <2 x double> %ret1 } define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v4f32: ; SSE3: # %bb.0: ; SSE3-NEXT: movd %xmm1, %eax ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] ; SSE3-NEXT: movd %xmm2, %ecx ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] ; SSE3-NEXT: movd %xmm2, %edx ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE3-NEXT: movd %xmm1, %esi ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: andl $3, %eax ; SSE3-NEXT: andl $3, %ecx ; SSE3-NEXT: andl $3, %edx ; SSE3-NEXT: andl $3, %esi ; SSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v4f32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [67372036,67372036,67372036,67372036] ; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSSE3-NEXT: pmuludq %xmm2, %xmm1 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSSE3-NEXT: pmuludq %xmm2, %xmm3 ; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSSE3-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v4f32: ; SSE41: # %bb.0: ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v4f32: ; AVX: # %bb.0: ; AVX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %index0 = extractelement <4 x i32> %indices, i32 0 %index1 = extractelement <4 x i32> %indices, i32 1 %index2 = extractelement <4 x i32> %indices, i32 2 %index3 = extractelement <4 x i32> %indices, i32 3 %v0 = extractelement <4 x float> %v, i32 %index0 %v1 = extractelement <4 x float> %v, i32 %index1 %v2 = extractelement <4 x float> %v, i32 %index2 %v3 = extractelement <4 x float> %v, i32 %index3 %ret0 = insertelement <4 x float> undef, float %v0, i32 0 %ret1 = insertelement <4 x float> %ret0, float %v1, i32 1 %ret2 = insertelement <4 x float> %ret1, float %v2, i32 2 %ret3 = insertelement <4 x float> %ret2, float %v3, i32 3 ret <4 x float> %ret3 } define <16 x i8> @var_shuffle_v16i8_from_v16i8_v32i8(<16 x i8> %v, <32 x i8> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v16i8_from_v16i8_v32i8: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm8 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm15 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm9 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm3 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm10 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm7 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm11 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm6 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm12 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm5 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm13 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm4 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm14 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm1 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm2 ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm0 ; SSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8_from_v16i8_v32i8: ; SSSE3: # %bb.0: ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v16i8_from_v16i8_v32i8: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v16i8_from_v16i8_v32i8: ; AVX: # %bb.0: ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %index0 = extractelement <32 x i8> %indices, i32 0 %index1 = extractelement <32 x i8> %indices, i32 1 %index2 = extractelement <32 x i8> %indices, i32 2 %index3 = extractelement <32 x i8> %indices, i32 3 %index4 = extractelement <32 x i8> %indices, i32 4 %index5 = extractelement <32 x i8> %indices, i32 5 %index6 = extractelement <32 x i8> %indices, i32 6 %index7 = extractelement <32 x i8> %indices, i32 7 %index8 = extractelement <32 x i8> %indices, i32 8 %index9 = extractelement <32 x i8> %indices, i32 9 %index10 = extractelement <32 x i8> %indices, i32 10 %index11 = extractelement <32 x i8> %indices, i32 11 %index12 = extractelement <32 x i8> %indices, i32 12 %index13 = extractelement <32 x i8> %indices, i32 13 %index14 = extractelement <32 x i8> %indices, i32 14 %index15 = extractelement <32 x i8> %indices, i32 15 %v0 = extractelement <16 x i8> %v, i8 %index0 %v1 = extractelement <16 x i8> %v, i8 %index1 %v2 = extractelement <16 x i8> %v, i8 %index2 %v3 = extractelement <16 x i8> %v, i8 %index3 %v4 = extractelement <16 x i8> %v, i8 %index4 %v5 = extractelement <16 x i8> %v, i8 %index5 %v6 = extractelement <16 x i8> %v, i8 %index6 %v7 = extractelement <16 x i8> %v, i8 %index7 %v8 = extractelement <16 x i8> %v, i8 %index8 %v9 = extractelement <16 x i8> %v, i8 %index9 %v10 = extractelement <16 x i8> %v, i8 %index10 %v11 = extractelement <16 x i8> %v, i8 %index11 %v12 = extractelement <16 x i8> %v, i8 %index12 %v13 = extractelement <16 x i8> %v, i8 %index13 %v14 = extractelement <16 x i8> %v, i8 %index14 %v15 = extractelement <16 x i8> %v, i8 %index15 %ret0 = insertelement <16 x i8> undef, i8 %v0, i32 0 %ret1 = insertelement <16 x i8> %ret0, i8 %v1, i32 1 %ret2 = insertelement <16 x i8> %ret1, i8 %v2, i32 2 %ret3 = insertelement <16 x i8> %ret2, i8 %v3, i32 3 %ret4 = insertelement <16 x i8> %ret3, i8 %v4, i32 4 %ret5 = insertelement <16 x i8> %ret4, i8 %v5, i32 5 %ret6 = insertelement <16 x i8> %ret5, i8 %v6, i32 6 %ret7 = insertelement <16 x i8> %ret6, i8 %v7, i32 7 %ret8 = insertelement <16 x i8> %ret7, i8 %v8, i32 8 %ret9 = insertelement <16 x i8> %ret8, i8 %v9, i32 9 %ret10 = insertelement <16 x i8> %ret9, i8 %v10, i32 10 %ret11 = insertelement <16 x i8> %ret10, i8 %v11, i32 11 %ret12 = insertelement <16 x i8> %ret11, i8 %v12, i32 12 %ret13 = insertelement <16 x i8> %ret12, i8 %v13, i32 13 %ret14 = insertelement <16 x i8> %ret13, i8 %v14, i32 14 %ret15 = insertelement <16 x i8> %ret14, i8 %v15, i32 15 ret <16 x i8> %ret15 } define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; SSE3: # %bb.0: ; SSE3-NEXT: pushq %rbp ; SSE3-NEXT: pushq %r15 ; SSE3-NEXT: pushq %r14 ; SSE3-NEXT: pushq %r13 ; SSE3-NEXT: pushq %r12 ; SSE3-NEXT: pushq %rbx ; SSE3-NEXT: subq $424, %rsp # imm = 0x1A8 ; SSE3-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, (%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d ; SSE3-NEXT: andl $31, %r8d ; SSE3-NEXT: movzbl -96(%rsp,%r8), %esi ; SSE3-NEXT: movd %esi, %xmm8 ; SSE3-NEXT: andl $31, %ebp ; SSE3-NEXT: movzbl -64(%rsp,%rbp), %esi ; SSE3-NEXT: movd %esi, %xmm15 ; SSE3-NEXT: andl $31, %edx ; SSE3-NEXT: movzbl -32(%rsp,%rdx), %edx ; SSE3-NEXT: movd %edx, %xmm9 ; SSE3-NEXT: andl $31, %ecx ; SSE3-NEXT: movzbl (%rsp,%rcx), %ecx ; SSE3-NEXT: movd %ecx, %xmm3 ; SSE3-NEXT: andl $31, %eax ; SSE3-NEXT: movzbl 32(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm10 ; SSE3-NEXT: andl $31, %edi ; SSE3-NEXT: movzbl 64(%rsp,%rdi), %eax ; SSE3-NEXT: movd %eax, %xmm7 ; SSE3-NEXT: andl $31, %ebx ; SSE3-NEXT: movzbl 96(%rsp,%rbx), %eax ; SSE3-NEXT: movd %eax, %xmm11 ; SSE3-NEXT: andl $31, %r9d ; SSE3-NEXT: movzbl 128(%rsp,%r9), %eax ; SSE3-NEXT: movd %eax, %xmm6 ; SSE3-NEXT: andl $31, %r13d ; SSE3-NEXT: movzbl 160(%rsp,%r13), %eax ; SSE3-NEXT: movd %eax, %xmm12 ; SSE3-NEXT: andl $31, %r12d ; SSE3-NEXT: movzbl 192(%rsp,%r12), %eax ; SSE3-NEXT: movd %eax, %xmm5 ; SSE3-NEXT: andl $31, %r15d ; SSE3-NEXT: movzbl 224(%rsp,%r15), %eax ; SSE3-NEXT: movd %eax, %xmm13 ; SSE3-NEXT: andl $31, %r14d ; SSE3-NEXT: movzbl 256(%rsp,%r14), %eax ; SSE3-NEXT: movd %eax, %xmm4 ; SSE3-NEXT: andl $31, %r11d ; SSE3-NEXT: movzbl 288(%rsp,%r11), %eax ; SSE3-NEXT: movd %eax, %xmm14 ; SSE3-NEXT: andl $31, %r10d ; SSE3-NEXT: movzbl 320(%rsp,%r10), %eax ; SSE3-NEXT: movd %eax, %xmm1 ; SSE3-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE3-NEXT: andl $31, %eax ; SSE3-NEXT: movzbl 352(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm2 ; SSE3-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE3-NEXT: andl $31, %eax ; SSE3-NEXT: movzbl 384(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm0 ; SSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] ; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0] ; SSE3-NEXT: addq $424, %rsp # imm = 0x1A8 ; SSE3-NEXT: popq %rbx ; SSE3-NEXT: popq %r12 ; SSE3-NEXT: popq %r13 ; SSE3-NEXT: popq %r14 ; SSE3-NEXT: popq %r15 ; SSE3-NEXT: popq %rbp ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; SSSE3: # %bb.0: ; SSSE3-NEXT: pushq %rbp ; SSSE3-NEXT: pushq %r15 ; SSSE3-NEXT: pushq %r14 ; SSSE3-NEXT: pushq %r13 ; SSSE3-NEXT: pushq %r12 ; SSSE3-NEXT: pushq %rbx ; SSSE3-NEXT: subq $424, %rsp # imm = 0x1A8 ; SSSE3-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSSE3-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSSE3-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, (%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx ; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp ; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d ; SSSE3-NEXT: andl $31, %r8d ; SSSE3-NEXT: movzbl -96(%rsp,%r8), %esi ; SSSE3-NEXT: movd %esi, %xmm8 ; SSSE3-NEXT: andl $31, %ebp ; SSSE3-NEXT: movzbl -64(%rsp,%rbp), %esi ; SSSE3-NEXT: movd %esi, %xmm15 ; SSSE3-NEXT: andl $31, %edx ; SSSE3-NEXT: movzbl -32(%rsp,%rdx), %edx ; SSSE3-NEXT: movd %edx, %xmm9 ; SSSE3-NEXT: andl $31, %ecx ; SSSE3-NEXT: movzbl (%rsp,%rcx), %ecx ; SSSE3-NEXT: movd %ecx, %xmm3 ; SSSE3-NEXT: andl $31, %eax ; SSSE3-NEXT: movzbl 32(%rsp,%rax), %eax ; SSSE3-NEXT: movd %eax, %xmm10 ; SSSE3-NEXT: andl $31, %edi ; SSSE3-NEXT: movzbl 64(%rsp,%rdi), %eax ; SSSE3-NEXT: movd %eax, %xmm7 ; SSSE3-NEXT: andl $31, %ebx ; SSSE3-NEXT: movzbl 96(%rsp,%rbx), %eax ; SSSE3-NEXT: movd %eax, %xmm11 ; SSSE3-NEXT: andl $31, %r9d ; SSSE3-NEXT: movzbl 128(%rsp,%r9), %eax ; SSSE3-NEXT: movd %eax, %xmm6 ; SSSE3-NEXT: andl $31, %r13d ; SSSE3-NEXT: movzbl 160(%rsp,%r13), %eax ; SSSE3-NEXT: movd %eax, %xmm12 ; SSSE3-NEXT: andl $31, %r12d ; SSSE3-NEXT: movzbl 192(%rsp,%r12), %eax ; SSSE3-NEXT: movd %eax, %xmm5 ; SSSE3-NEXT: andl $31, %r15d ; SSSE3-NEXT: movzbl 224(%rsp,%r15), %eax ; SSSE3-NEXT: movd %eax, %xmm13 ; SSSE3-NEXT: andl $31, %r14d ; SSSE3-NEXT: movzbl 256(%rsp,%r14), %eax ; SSSE3-NEXT: movd %eax, %xmm4 ; SSSE3-NEXT: andl $31, %r11d ; SSSE3-NEXT: movzbl 288(%rsp,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm14 ; SSSE3-NEXT: andl $31, %r10d ; SSSE3-NEXT: movzbl 320(%rsp,%r10), %eax ; SSSE3-NEXT: movd %eax, %xmm1 ; SSSE3-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSSE3-NEXT: andl $31, %eax ; SSSE3-NEXT: movzbl 352(%rsp,%rax), %eax ; SSSE3-NEXT: movd %eax, %xmm2 ; SSSE3-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSSE3-NEXT: andl $31, %eax ; SSSE3-NEXT: movzbl 384(%rsp,%rax), %eax ; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0] ; SSSE3-NEXT: addq $424, %rsp # imm = 0x1A8 ; SSSE3-NEXT: popq %rbx ; SSSE3-NEXT: popq %r12 ; SSSE3-NEXT: popq %r13 ; SSSE3-NEXT: popq %r14 ; SSSE3-NEXT: popq %r15 ; SSSE3-NEXT: popq %rbp ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; SSE41: # %bb.0: ; SSE41-NEXT: subq $392, %rsp # imm = 0x188 ; SSE41-NEXT: movd %xmm2, %eax ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, (%rsp) ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movzbl 352(%rsp,%rax), %eax ; SSE41-NEXT: movd %eax, %xmm0 ; SSE41-NEXT: pextrb $1, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $1, 320(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $2, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $2, 288(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $3, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $3, 256(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $4, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $4, 224(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $5, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $5, 192(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $6, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $6, 160(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $7, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $7, 128(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $8, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $8, 96(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $9, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $9, 64(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $10, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $10, 32(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $11, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $11, (%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $12, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $12, -32(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $13, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $13, -64(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $14, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $14, -96(%rsp,%rax), %xmm0 ; SSE41-NEXT: pextrb $15, %xmm2, %eax ; SSE41-NEXT: andl $31, %eax ; SSE41-NEXT: pinsrb $15, -128(%rsp,%rax), %xmm0 ; SSE41-NEXT: addq $392, %rsp # imm = 0x188 ; SSE41-NEXT: retq ; ; XOP-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2 ; XOP-NEXT: vpperm %xmm1, %xmm2, %xmm0, %xmm0 ; XOP-NEXT: vzeroupper ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm2 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtb {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpshufb %xmm1, %xmm2, %xmm2 ; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpgtb {{.*}}(%rip), %xmm1, %xmm1 ; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vpshufb %xmm1, %xmm2, %xmm2 ; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpcmpgtb {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; AVX512VLBW-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VLBW-NEXT: vpshufb %xmm1, %xmm2, %xmm2 ; AVX512VLBW-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX512VLBW-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %k1 ; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm0 {%k1} ; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VLBW-NEXT: vzeroupper ; AVX512VLBW-NEXT: retq ; ; VLVBMI-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; VLVBMI: # %bb.0: ; VLVBMI-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0 ; VLVBMI-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; VLVBMI-NEXT: vzeroupper ; VLVBMI-NEXT: retq %index0 = extractelement <16 x i8> %indices, i32 0 %index1 = extractelement <16 x i8> %indices, i32 1 %index2 = extractelement <16 x i8> %indices, i32 2 %index3 = extractelement <16 x i8> %indices, i32 3 %index4 = extractelement <16 x i8> %indices, i32 4 %index5 = extractelement <16 x i8> %indices, i32 5 %index6 = extractelement <16 x i8> %indices, i32 6 %index7 = extractelement <16 x i8> %indices, i32 7 %index8 = extractelement <16 x i8> %indices, i32 8 %index9 = extractelement <16 x i8> %indices, i32 9 %index10 = extractelement <16 x i8> %indices, i32 10 %index11 = extractelement <16 x i8> %indices, i32 11 %index12 = extractelement <16 x i8> %indices, i32 12 %index13 = extractelement <16 x i8> %indices, i32 13 %index14 = extractelement <16 x i8> %indices, i32 14 %index15 = extractelement <16 x i8> %indices, i32 15 %v0 = extractelement <32 x i8> %v, i8 %index0 %v1 = extractelement <32 x i8> %v, i8 %index1 %v2 = extractelement <32 x i8> %v, i8 %index2 %v3 = extractelement <32 x i8> %v, i8 %index3 %v4 = extractelement <32 x i8> %v, i8 %index4 %v5 = extractelement <32 x i8> %v, i8 %index5 %v6 = extractelement <32 x i8> %v, i8 %index6 %v7 = extractelement <32 x i8> %v, i8 %index7 %v8 = extractelement <32 x i8> %v, i8 %index8 %v9 = extractelement <32 x i8> %v, i8 %index9 %v10 = extractelement <32 x i8> %v, i8 %index10 %v11 = extractelement <32 x i8> %v, i8 %index11 %v12 = extractelement <32 x i8> %v, i8 %index12 %v13 = extractelement <32 x i8> %v, i8 %index13 %v14 = extractelement <32 x i8> %v, i8 %index14 %v15 = extractelement <32 x i8> %v, i8 %index15 %ret0 = insertelement <16 x i8> undef, i8 %v0, i32 0 %ret1 = insertelement <16 x i8> %ret0, i8 %v1, i32 1 %ret2 = insertelement <16 x i8> %ret1, i8 %v2, i32 2 %ret3 = insertelement <16 x i8> %ret2, i8 %v3, i32 3 %ret4 = insertelement <16 x i8> %ret3, i8 %v4, i32 4 %ret5 = insertelement <16 x i8> %ret4, i8 %v5, i32 5 %ret6 = insertelement <16 x i8> %ret5, i8 %v6, i32 6 %ret7 = insertelement <16 x i8> %ret6, i8 %v7, i32 7 %ret8 = insertelement <16 x i8> %ret7, i8 %v8, i32 8 %ret9 = insertelement <16 x i8> %ret8, i8 %v9, i32 9 %ret10 = insertelement <16 x i8> %ret9, i8 %v10, i32 10 %ret11 = insertelement <16 x i8> %ret10, i8 %v11, i32 11 %ret12 = insertelement <16 x i8> %ret11, i8 %v12, i32 12 %ret13 = insertelement <16 x i8> %ret12, i8 %v13, i32 13 %ret14 = insertelement <16 x i8> %ret13, i8 %v14, i32 14 %ret15 = insertelement <16 x i8> %ret14, i8 %v15, i32 15 ret <16 x i8> %ret15 }