; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW ; ; Variable Shifts ; define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind { ; ALL-LABEL: var_shift_v8i64: ; ALL: ## BB#0: ; ALL-NEXT: vpsllvq %zmm1, %zmm0, %zmm0 ; ALL-NEXT: retq %shift = shl <8 x i64> %a, %b ret <8 x i64> %shift } define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind { ; ALL-LABEL: var_shift_v16i32: ; ALL: ## BB#0: ; ALL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 ; ALL-NEXT: retq %shift = shl <16 x i32> %a, %b ret <16 x i32> %shift } define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { ; AVX512DQ-LABEL: var_shift_v32i16: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] ; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15] ; AVX512DQ-NEXT: vpsllvd %ymm5, %ymm6, %ymm5 ; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5 ; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] ; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11] ; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] ; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15] ; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm5, %ymm2 ; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] ; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11] ; AVX512DQ-NEXT: vpsllvd %ymm3, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: var_shift_v32i16: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %shift = shl <32 x i16> %a, %b ret <32 x i16> %shift } define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-LABEL: var_shift_v64i8: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm4 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm4 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] ; AVX512DQ-NEXT: vpand %ymm6, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm4 ; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpand %ymm6, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq %shift = shl <64 x i8> %a, %b ret <64 x i8> %shift } ; ; Uniform Variable Shifts ; define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind { ; ALL-LABEL: splatvar_shift_v8i64: ; ALL: ## BB#0: ; ALL-NEXT: vpsllq %xmm1, %zmm0, %zmm0 ; ALL-NEXT: retq %splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer %shift = shl <8 x i64> %a, %splat ret <8 x i64> %shift } define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind { ; ALL-LABEL: splatvar_shift_v16i32: ; ALL: ## BB#0: ; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; ALL-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] ; ALL-NEXT: vpslld %xmm1, %zmm0, %zmm0 ; ALL-NEXT: retq %splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer %shift = shl <16 x i32> %a, %splat ret <16 x i32> %shift } define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { ; AVX512DQ-LABEL: splatvar_shift_v32i16: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vmovd %xmm2, %eax ; AVX512DQ-NEXT: movzwl %ax, %eax ; AVX512DQ-NEXT: vmovd %eax, %xmm2 ; AVX512DQ-NEXT: vpsllw %xmm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw %xmm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: splatvar_shift_v32i16: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: vmovd %xmm1, %eax ; AVX512BW-NEXT: movzwl %ax, %eax ; AVX512BW-NEXT: vmovd %eax, %xmm1 ; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer %shift = shl <32 x i16> %a, %splat ret <32 x i16> %shift } define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-LABEL: splatvar_shift_v64i8: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2 ; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm3 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm3 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] ; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6 ; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm3 ; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7 ; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm3 ; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer %shift = shl <64 x i8> %a, %splat ret <64 x i8> %shift } ; ; Constant Shifts ; define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind { ; ALL-LABEL: constant_shift_v8i64: ; ALL: ## BB#0: ; ALL-NEXT: vpsllvq {{.*}}(%rip), %zmm0, %zmm0 ; ALL-NEXT: retq %shift = shl <8 x i64> %a, ret <8 x i64> %shift } define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind { ; ALL-LABEL: constant_shift_v16i32: ; ALL: ## BB#0: ; ALL-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0 ; ALL-NEXT: retq %shift = shl <16 x i32> %a, ret <16 x i32> %shift } define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind { ; AVX512DQ-LABEL: constant_shift_v32i16: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] ; AVX512DQ-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: constant_shift_v32i16: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: retq %shift = shl <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind { ; AVX512DQ-LABEL: constant_shift_v64i8: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] ; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] ; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpaddb %ymm4, %ymm4, %ymm6 ; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2 ; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7 ; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq %shift = shl <64 x i8> %a, ret <64 x i8> %shift } ; ; Uniform Constant Shifts ; define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind { ; ALL-LABEL: splatconstant_shift_v8i64: ; ALL: ## BB#0: ; ALL-NEXT: vpsllq $7, %zmm0, %zmm0 ; ALL-NEXT: retq %shift = shl <8 x i64> %a, ret <8 x i64> %shift } define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind { ; ALL-LABEL: splatconstant_shift_v16i32: ; ALL: ## BB#0: ; ALL-NEXT: vpslld $5, %zmm0, %zmm0 ; ALL-NEXT: retq %shift = shl <16 x i32> %a, ret <16 x i32> %shift } define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind { ; AVX512DQ-LABEL: splatconstant_shift_v32i16: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_shift_v32i16: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %shift = shl <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind { ; AVX512DQ-LABEL: splatconstant_shift_v64i8: ; AVX512DQ: ## BB#0: ; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_shift_v64i8: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: retq %shift = shl <64 x i8> %a, ret <64 x i8> %shift }