; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlvbmi2-builtins.c define <2 x i64> @test_mm_mask_compress_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_mask_compress_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpcompressw %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_compress_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressw %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <8 x i16> %1 = bitcast <2 x i64> %__S to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16> %0, <8 x i16> %1, i8 %__U) %3 = bitcast <8 x i16> %2 to <2 x i64> ret <2 x i64> %3 } define <2 x i64> @test_mm_maskz_compress_epi16(i8 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_maskz_compress_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpcompressw %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_compress_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressw %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <8 x i16> %1 = tail call <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16> %0, <8 x i16> zeroinitializer, i8 %__U) %2 = bitcast <8 x i16> %1 to <2 x i64> ret <2 x i64> %2 } define <2 x i64> @test_mm_mask_compress_epi8(<2 x i64> %__S, i16 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_mask_compress_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpcompressb %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_compress_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressb %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <16 x i8> %1 = bitcast <2 x i64> %__S to <16 x i8> %2 = tail call <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8> %0, <16 x i8> %1, i16 %__U) %3 = bitcast <16 x i8> %2 to <2 x i64> ret <2 x i64> %3 } define <2 x i64> @test_mm_maskz_compress_epi8(i16 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_maskz_compress_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_compress_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <16 x i8> %1 = tail call <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8> %0, <16 x i8> zeroinitializer, i16 %__U) %2 = bitcast <16 x i8> %1 to <2 x i64> ret <2 x i64> %2 } define void @test_mm_mask_compressstoreu_epi16(i8* %__P, i8 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_mask_compressstoreu_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpcompressw %xmm0, (%ecx) {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_compressstoreu_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %esi, %k1 ; X64-NEXT: vpcompressw %xmm0, (%rdi) {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <8 x i16> %1 = bitcast i8* %__P to i16* %2 = bitcast i8 %__U to <8 x i1> tail call void @llvm.masked.compressstore.v8i16(<8 x i16> %0, i16* %1, <8 x i1> %2) ret void } define void @test_mm_mask_compressstoreu_epi8(i8* %__P, i16 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_mask_compressstoreu_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vpcompressb %xmm0, (%eax) {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_compressstoreu_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %esi, %k1 ; X64-NEXT: vpcompressb %xmm0, (%rdi) {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <16 x i8> %1 = bitcast i16 %__U to <16 x i1> tail call void @llvm.masked.compressstore.v16i8(<16 x i8> %0, i8* %__P, <16 x i1> %1) ret void } define <2 x i64> @test_mm_mask_expand_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_mask_expand_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpexpandw %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_expand_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <8 x i16> %1 = bitcast <2 x i64> %__S to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16> %0, <8 x i16> %1, i8 %__U) %3 = bitcast <8 x i16> %2 to <2 x i64> ret <2 x i64> %3 } define <2 x i64> @test_mm_maskz_expand_epi16(i8 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_maskz_expand_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpexpandw %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_expand_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <8 x i16> %1 = tail call <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16> %0, <8 x i16> zeroinitializer, i8 %__U) %2 = bitcast <8 x i16> %1 to <2 x i64> ret <2 x i64> %2 } define <2 x i64> @test_mm_mask_expand_epi8(<2 x i64> %__S, i16 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_mask_expand_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_expand_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <16 x i8> %1 = bitcast <2 x i64> %__S to <16 x i8> %2 = tail call <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8> %0, <16 x i8> %1, i16 %__U) %3 = bitcast <16 x i8> %2 to <2 x i64> ret <2 x i64> %3 } define <2 x i64> @test_mm_maskz_expand_epi8(i16 zeroext %__U, <2 x i64> %__D) { ; X86-LABEL: test_mm_maskz_expand_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_expand_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__D to <16 x i8> %1 = tail call <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8> %0, <16 x i8> zeroinitializer, i16 %__U) %2 = bitcast <16 x i8> %1 to <2 x i64> ret <2 x i64> %2 } define <2 x i64> @test_mm_mask_expandloadu_epi16(<2 x i64> %__S, i8 zeroext %__U, i8* readonly %__P) { ; X86-LABEL: test_mm_mask_expandloadu_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl ; X86-NEXT: kmovd %ecx, %k1 ; X86-NEXT: vpexpandw (%eax), %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_expandloadu_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw (%rsi), %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast i8* %__P to i16* %2 = bitcast i8 %__U to <8 x i1> %3 = tail call <8 x i16> @llvm.masked.expandload.v8i16(i16* %1, <8 x i1> %2, <8 x i16> %0) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_maskz_expandloadu_epi16(i8 zeroext %__U, i8* readonly %__P) { ; X86-LABEL: test_mm_maskz_expandloadu_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl ; X86-NEXT: kmovd %ecx, %k1 ; X86-NEXT: vpexpandw (%eax), %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_expandloadu_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw (%rsi), %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast i8* %__P to i16* %1 = bitcast i8 %__U to <8 x i1> %2 = tail call <8 x i16> @llvm.masked.expandload.v8i16(i16* %0, <8 x i1> %1, <8 x i16> zeroinitializer) %3 = bitcast <8 x i16> %2 to <2 x i64> ret <2 x i64> %3 } define <2 x i64> @test_mm_mask_expandloadu_epi8(<2 x i64> %__S, i16 zeroext %__U, i8* readonly %__P) { ; X86-LABEL: test_mm_mask_expandloadu_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb (%eax), %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_expandloadu_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb (%rsi), %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <16 x i8> %1 = bitcast i16 %__U to <16 x i1> %2 = tail call <16 x i8> @llvm.masked.expandload.v16i8(i8* %__P, <16 x i1> %1, <16 x i8> %0) %3 = bitcast <16 x i8> %2 to <2 x i64> ret <2 x i64> %3 } define <2 x i64> @test_mm_maskz_expandloadu_epi8(i16 zeroext %__U, i8* readonly %__P) { ; X86-LABEL: test_mm_maskz_expandloadu_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb (%eax), %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_expandloadu_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb (%rsi), %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast i16 %__U to <16 x i1> %1 = tail call <16 x i8> @llvm.masked.expandload.v16i8(i8* %__P, <16 x i1> %0, <16 x i8> zeroinitializer) %2 = bitcast <16 x i8> %1 to <2 x i64> ret <2 x i64> %2 } define <4 x i64> @test_mm256_mask_compress_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_mask_compress_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpcompressw %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_compress_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressw %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <16 x i16> %1 = bitcast <4 x i64> %__S to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16> %0, <16 x i16> %1, i16 %__U) %3 = bitcast <16 x i16> %2 to <4 x i64> ret <4 x i64> %3 } define <4 x i64> @test_mm256_maskz_compress_epi16(i16 zeroext %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_maskz_compress_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpcompressw %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_compress_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressw %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <16 x i16> %1 = tail call <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16> %0, <16 x i16> zeroinitializer, i16 %__U) %2 = bitcast <16 x i16> %1 to <4 x i64> ret <4 x i64> %2 } define <4 x i64> @test_mm256_mask_compress_epi8(<4 x i64> %__S, i32 %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_mask_compress_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpcompressb %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_compress_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressb %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <32 x i8> %1 = bitcast <4 x i64> %__S to <32 x i8> %2 = tail call <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8> %0, <32 x i8> %1, i32 %__U) %3 = bitcast <32 x i8> %2 to <4 x i64> ret <4 x i64> %3 } define <4 x i64> @test_mm256_maskz_compress_epi8(i32 %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_maskz_compress_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpcompressb %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_compress_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpcompressb %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <32 x i8> %1 = tail call <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8> %0, <32 x i8> zeroinitializer, i32 %__U) %2 = bitcast <32 x i8> %1 to <4 x i64> ret <4 x i64> %2 } define void @test_mm256_mask_compressstoreu_epi16(i8* %__P, i16 zeroext %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_mask_compressstoreu_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vpcompressw %ymm0, (%eax) {%k1} ; X86-NEXT: vzeroupper ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_compressstoreu_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %esi, %k1 ; X64-NEXT: vpcompressw %ymm0, (%rdi) {%k1} ; X64-NEXT: vzeroupper ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <16 x i16> %1 = bitcast i8* %__P to i16* %2 = bitcast i16 %__U to <16 x i1> tail call void @llvm.masked.compressstore.v16i16(<16 x i16> %0, i16* %1, <16 x i1> %2) ret void } define void @test_mm256_mask_compressstoreu_epi8(i8* %__P, i32 %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_mask_compressstoreu_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vpcompressb %ymm0, (%eax) {%k1} ; X86-NEXT: vzeroupper ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_compressstoreu_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %esi, %k1 ; X64-NEXT: vpcompressb %ymm0, (%rdi) {%k1} ; X64-NEXT: vzeroupper ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <32 x i8> %1 = bitcast i32 %__U to <32 x i1> tail call void @llvm.masked.compressstore.v32i8(<32 x i8> %0, i8* %__P, <32 x i1> %1) ret void } define <4 x i64> @test_mm256_mask_expand_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_mask_expand_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandw %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_expand_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <16 x i16> %1 = bitcast <4 x i64> %__S to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16> %0, <16 x i16> %1, i16 %__U) %3 = bitcast <16 x i16> %2 to <4 x i64> ret <4 x i64> %3 } define <4 x i64> @test_mm256_maskz_expand_epi16(i16 zeroext %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_maskz_expand_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandw %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_expand_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <16 x i16> %1 = tail call <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16> %0, <16 x i16> zeroinitializer, i16 %__U) %2 = bitcast <16 x i16> %1 to <4 x i64> ret <4 x i64> %2 } define <4 x i64> @test_mm256_mask_expand_epi8(<4 x i64> %__S, i32 %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_mask_expand_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_expand_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <32 x i8> %1 = bitcast <4 x i64> %__S to <32 x i8> %2 = tail call <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8> %0, <32 x i8> %1, i32 %__U) %3 = bitcast <32 x i8> %2 to <4 x i64> ret <4 x i64> %3 } define <4 x i64> @test_mm256_maskz_expand_epi8(i32 %__U, <4 x i64> %__D) { ; X86-LABEL: test_mm256_maskz_expand_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_expand_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__D to <32 x i8> %1 = tail call <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8> %0, <32 x i8> zeroinitializer, i32 %__U) %2 = bitcast <32 x i8> %1 to <4 x i64> ret <4 x i64> %2 } define <4 x i64> @test_mm256_mask_expandloadu_epi16(<4 x i64> %__S, i16 zeroext %__U, i8* readonly %__P) { ; X86-LABEL: test_mm256_mask_expandloadu_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandw (%eax), %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_expandloadu_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw (%rsi), %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast i8* %__P to i16* %2 = bitcast i16 %__U to <16 x i1> %3 = tail call <16 x i16> @llvm.masked.expandload.v16i16(i16* %1, <16 x i1> %2, <16 x i16> %0) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_maskz_expandloadu_epi16(i16 zeroext %__U, i8* readonly %__P) { ; X86-LABEL: test_mm256_maskz_expandloadu_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandw (%eax), %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_expandloadu_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandw (%rsi), %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast i8* %__P to i16* %1 = bitcast i16 %__U to <16 x i1> %2 = tail call <16 x i16> @llvm.masked.expandload.v16i16(i16* %0, <16 x i1> %1, <16 x i16> zeroinitializer) %3 = bitcast <16 x i16> %2 to <4 x i64> ret <4 x i64> %3 } define <4 x i64> @test_mm256_mask_expandloadu_epi8(<4 x i64> %__S, i32 %__U, i8* readonly %__P) { ; X86-LABEL: test_mm256_mask_expandloadu_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb (%eax), %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_expandloadu_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb (%rsi), %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <32 x i8> %1 = bitcast i32 %__U to <32 x i1> %2 = tail call <32 x i8> @llvm.masked.expandload.v32i8(i8* %__P, <32 x i1> %1, <32 x i8> %0) %3 = bitcast <32 x i8> %2 to <4 x i64> ret <4 x i64> %3 } define <4 x i64> @test_mm256_maskz_expandloadu_epi8(i32 %__U, i8* readonly %__P) { ; X86-LABEL: test_mm256_maskz_expandloadu_epi8: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpexpandb (%eax), %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_expandloadu_epi8: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpexpandb (%rsi), %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast i32 %__U to <32 x i1> %1 = tail call <32 x i8> @llvm.masked.expandload.v32i8(i8* %__P, <32 x i1> %0, <32 x i8> zeroinitializer) %2 = bitcast <32 x i8> %1 to <4 x i64> ret <4 x i64> %2 } define <4 x i64> @test_mm256_mask_shldi_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shldi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldq $127, %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shldi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldq $127, %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 127) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__S ret <4 x i64> %2 } declare <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64>, <4 x i64>, i32) define <4 x i64> @test_mm256_maskz_shldi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shldi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldq $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shldi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldq $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer ret <4 x i64> %2 } define <4 x i64> @test_mm256_shldi_epi64(<4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shldi_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldq $31, %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31) ret <4 x i64> %0 } define <2 x i64> @test_mm_mask_shldi_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shldi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldq $127, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shldi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldq $127, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 127) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__S ret <2 x i64> %2 } declare <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64>, <2 x i64>, i32) #3 define <2 x i64> @test_mm_maskz_shldi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shldi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldq $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shldi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldq $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer ret <2 x i64> %2 } define <2 x i64> @test_mm_shldi_epi64(<2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shldi_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldq $31, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31) ret <2 x i64> %0 } define <4 x i64> @test_mm256_mask_shldi_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shldi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldd $127, %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shldi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldd $127, %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <8 x i32> %1 = bitcast <4 x i64> %__B to <8 x i32> %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 127) %3 = bitcast <4 x i64> %__S to <8 x i32> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3 %6 = bitcast <8 x i32> %5 to <4 x i64> ret <4 x i64> %6 } declare <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32>, <8 x i32>, i32) define <4 x i64> @test_mm256_maskz_shldi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shldi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldd $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shldi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldd $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <8 x i32> %1 = bitcast <4 x i64> %__B to <8 x i32> %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 63) %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer %5 = bitcast <8 x i32> %4 to <4 x i64> ret <4 x i64> %5 } define <4 x i64> @test_mm256_shldi_epi32(<4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shldi_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldd $31, %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__A to <8 x i32> %1 = bitcast <4 x i64> %__B to <8 x i32> %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 31) %3 = bitcast <8 x i32> %2 to <4 x i64> ret <4 x i64> %3 } define <2 x i64> @test_mm_mask_shldi_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shldi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldd $127, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shldi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldd $127, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <4 x i32> %1 = bitcast <2 x i64> %__B to <4 x i32> %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 127) %3 = bitcast <2 x i64> %__S to <4 x i32> %4 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> %5 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> %3 %6 = bitcast <4 x i32> %5 to <2 x i64> ret <2 x i64> %6 } declare <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32>, <4 x i32>, i32) define <2 x i64> @test_mm_maskz_shldi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shldi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldd $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shldi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldd $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <4 x i32> %1 = bitcast <2 x i64> %__B to <4 x i32> %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 63) %3 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> %4 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> zeroinitializer %5 = bitcast <4 x i32> %4 to <2 x i64> ret <2 x i64> %5 } define <2 x i64> @test_mm_shldi_epi32(<2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shldi_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldd $31, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__A to <4 x i32> %1 = bitcast <2 x i64> %__B to <4 x i32> %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 31) %3 = bitcast <4 x i32> %2 to <2 x i64> ret <2 x i64> %3 } define <4 x i64> @test_mm256_mask_shldi_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shldi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshldw $127, %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shldi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldw $127, %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <16 x i16> %1 = bitcast <4 x i64> %__B to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 127) %3 = bitcast <4 x i64> %__S to <16 x i16> %4 = bitcast i16 %__U to <16 x i1> %5 = select <16 x i1> %4, <16 x i16> %2, <16 x i16> %3 %6 = bitcast <16 x i16> %5 to <4 x i64> ret <4 x i64> %6 } declare <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16>, <16 x i16>, i32) define <4 x i64> @test_mm256_maskz_shldi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shldi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshldw $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shldi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldw $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <16 x i16> %1 = bitcast <4 x i64> %__B to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 63) %3 = bitcast i16 %__U to <16 x i1> %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer %5 = bitcast <16 x i16> %4 to <4 x i64> ret <4 x i64> %5 } define <4 x i64> @test_mm256_shldi_epi16(<4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shldi_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldw $31, %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__A to <16 x i16> %1 = bitcast <4 x i64> %__B to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 31) %3 = bitcast <16 x i16> %2 to <4 x i64> ret <4 x i64> %3 } define <2 x i64> @test_mm_mask_shldi_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shldi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldw $127, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shldi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldw $127, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <8 x i16> %1 = bitcast <2 x i64> %__B to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 127) %3 = bitcast <2 x i64> %__S to <8 x i16> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x i16> %2, <8 x i16> %3 %6 = bitcast <8 x i16> %5 to <2 x i64> ret <2 x i64> %6 } declare <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16>, <8 x i16>, i32) define <2 x i64> @test_mm_maskz_shldi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shldi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldw $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shldi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldw $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <8 x i16> %1 = bitcast <2 x i64> %__B to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 63) %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer %5 = bitcast <8 x i16> %4 to <2 x i64> ret <2 x i64> %5 } define <2 x i64> @test_mm_shldi_epi16(<2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shldi_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldw $31, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__A to <8 x i16> %1 = bitcast <2 x i64> %__B to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 31) %3 = bitcast <8 x i16> %2 to <2 x i64> ret <2 x i64> %3 } define <4 x i64> @test_mm256_mask_shrdi_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shrdi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdq $127, %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shrdi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdq $127, %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 127) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__S ret <4 x i64> %2 } declare <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64>, <4 x i64>, i32) define <4 x i64> @test_mm256_maskz_shrdi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shrdi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdq $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shrdi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdq $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer ret <4 x i64> %2 } define <4 x i64> @test_mm256_shrdi_epi64(<4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shrdi_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdq $31, %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31) ret <4 x i64> %0 } define <2 x i64> @test_mm_mask_shrdi_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shrdi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdq $127, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shrdi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdq $127, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 127) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__S ret <2 x i64> %2 } declare <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64>, <2 x i64>, i32) define <2 x i64> @test_mm_maskz_shrdi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shrdi_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdq $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shrdi_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdq $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63) %1 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer ret <2 x i64> %2 } define <2 x i64> @test_mm_shrdi_epi64(<2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shrdi_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdq $31, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31) ret <2 x i64> %0 } define <4 x i64> @test_mm256_mask_shrdi_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shrdi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdd $127, %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shrdi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdd $127, %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <8 x i32> %1 = bitcast <4 x i64> %__B to <8 x i32> %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 127) %3 = bitcast <4 x i64> %__S to <8 x i32> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3 %6 = bitcast <8 x i32> %5 to <4 x i64> ret <4 x i64> %6 } declare <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32>, <8 x i32>, i32) define <4 x i64> @test_mm256_maskz_shrdi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shrdi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdd $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shrdi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdd $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <8 x i32> %1 = bitcast <4 x i64> %__B to <8 x i32> %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 63) %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer %5 = bitcast <8 x i32> %4 to <4 x i64> ret <4 x i64> %5 } define <4 x i64> @test_mm256_shrdi_epi32(<4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shrdi_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdd $31, %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__A to <8 x i32> %1 = bitcast <4 x i64> %__B to <8 x i32> %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 31) %3 = bitcast <8 x i32> %2 to <4 x i64> ret <4 x i64> %3 } define <2 x i64> @test_mm_mask_shrdi_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shrdi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdd $127, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shrdi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdd $127, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <4 x i32> %1 = bitcast <2 x i64> %__B to <4 x i32> %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 127) %3 = bitcast <2 x i64> %__S to <4 x i32> %4 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> %5 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> %3 %6 = bitcast <4 x i32> %5 to <2 x i64> ret <2 x i64> %6 } declare <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32>, <4 x i32>, i32) define <2 x i64> @test_mm_maskz_shrdi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shrdi_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdd $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shrdi_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdd $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <4 x i32> %1 = bitcast <2 x i64> %__B to <4 x i32> %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 63) %3 = bitcast i8 %__U to <8 x i1> %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> %4 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> zeroinitializer %5 = bitcast <4 x i32> %4 to <2 x i64> ret <2 x i64> %5 } define <2 x i64> @test_mm_shrdi_epi32(<2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shrdi_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdd $31, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__A to <4 x i32> %1 = bitcast <2 x i64> %__B to <4 x i32> %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 31) %3 = bitcast <4 x i32> %2 to <2 x i64> ret <2 x i64> %3 } define <4 x i64> @test_mm256_mask_shrdi_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shrdi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshrdw $127, %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shrdi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdw $127, %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <16 x i16> %1 = bitcast <4 x i64> %__B to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 127) %3 = bitcast <4 x i64> %__S to <16 x i16> %4 = bitcast i16 %__U to <16 x i1> %5 = select <16 x i1> %4, <16 x i16> %2, <16 x i16> %3 %6 = bitcast <16 x i16> %5 to <4 x i64> ret <4 x i64> %6 } declare <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16>, <16 x i16>, i32) define <4 x i64> @test_mm256_maskz_shrdi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shrdi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshrdw $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shrdi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdw $63, %ymm1, %ymm0, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__A to <16 x i16> %1 = bitcast <4 x i64> %__B to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 63) %3 = bitcast i16 %__U to <16 x i1> %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer %5 = bitcast <16 x i16> %4 to <4 x i64> ret <4 x i64> %5 } define <4 x i64> @test_mm256_shrdi_epi16(<4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shrdi_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdw $31, %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__A to <16 x i16> %1 = bitcast <4 x i64> %__B to <16 x i16> %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 31) %3 = bitcast <16 x i16> %2 to <4 x i64> ret <4 x i64> %3 } define <2 x i64> @test_mm_mask_shrdi_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shrdi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdw $127, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shrdi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdw $127, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <8 x i16> %1 = bitcast <2 x i64> %__B to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 127) %3 = bitcast <2 x i64> %__S to <8 x i16> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x i16> %2, <8 x i16> %3 %6 = bitcast <8 x i16> %5 to <2 x i64> ret <2 x i64> %6 } declare <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16>, <8 x i16>, i32) define <2 x i64> @test_mm_maskz_shrdi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shrdi_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdw $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shrdi_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdw $63, %xmm1, %xmm0, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__A to <8 x i16> %1 = bitcast <2 x i64> %__B to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 63) %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer %5 = bitcast <8 x i16> %4 to <2 x i64> ret <2 x i64> %5 } define <2 x i64> @test_mm_shrdi_epi16(<2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shrdi_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdw $31, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__A to <8 x i16> %1 = bitcast <2 x i64> %__B to <8 x i16> %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 31) %3 = bitcast <8 x i16> %2 to <2 x i64> ret <2 x i64> %3 } define <4 x i64> @test_mm256_mask_shldv_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shldv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shldv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U) ret <4 x i64> %0 } define <4 x i64> @test_mm256_maskz_shldv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shldv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shldv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U) ret <4 x i64> %0 } define <4 x i64> @test_mm256_shldv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shldv_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1) ret <4 x i64> %0 } define <2 x i64> @test_mm_mask_shldv_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shldv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shldv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U) ret <2 x i64> %0 } define <2 x i64> @test_mm_maskz_shldv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shldv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shldv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U) ret <2 x i64> %0 } define <2 x i64> @test_mm_shldv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shldv_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1) ret <2 x i64> %0 } define <4 x i64> @test_mm256_mask_shldv_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shldv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shldv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <8 x i32> %1 = bitcast <4 x i64> %__A to <8 x i32> %2 = bitcast <4 x i64> %__B to <8 x i32> %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U) %4 = bitcast <8 x i32> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_maskz_shldv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shldv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shldv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <8 x i32> %1 = bitcast <4 x i64> %__A to <8 x i32> %2 = bitcast <4 x i64> %__B to <8 x i32> %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U) %4 = bitcast <8 x i32> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_shldv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shldv_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__S to <8 x i32> %1 = bitcast <4 x i64> %__A to <8 x i32> %2 = bitcast <4 x i64> %__B to <8 x i32> %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1) %4 = bitcast <8 x i32> %3 to <4 x i64> ret <4 x i64> %4 } define <2 x i64> @test_mm_mask_shldv_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shldv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shldv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <4 x i32> %1 = bitcast <2 x i64> %__A to <4 x i32> %2 = bitcast <2 x i64> %__B to <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U) %4 = bitcast <4 x i32> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_maskz_shldv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shldv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shldv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <4 x i32> %1 = bitcast <2 x i64> %__A to <4 x i32> %2 = bitcast <2 x i64> %__B to <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U) %4 = bitcast <4 x i32> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_shldv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shldv_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__S to <4 x i32> %1 = bitcast <2 x i64> %__A to <4 x i32> %2 = bitcast <2 x i64> %__B to <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1) %4 = bitcast <4 x i32> %3 to <2 x i64> ret <2 x i64> %4 } define <4 x i64> @test_mm256_mask_shldv_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shldv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shldv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast <4 x i64> %__A to <16 x i16> %2 = bitcast <4 x i64> %__B to <16 x i16> %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_maskz_shldv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shldv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shldv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast <4 x i64> %__A to <16 x i16> %2 = bitcast <4 x i64> %__B to <16 x i16> %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_shldv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shldv_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast <4 x i64> %__A to <16 x i16> %2 = bitcast <4 x i64> %__B to <16 x i16> %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <2 x i64> @test_mm_mask_shldv_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shldv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shldv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast <2 x i64> %__A to <8 x i16> %2 = bitcast <2 x i64> %__B to <8 x i16> %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_maskz_shldv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shldv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shldv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast <2 x i64> %__A to <8 x i16> %2 = bitcast <2 x i64> %__B to <8 x i16> %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_shldv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shldv_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast <2 x i64> %__A to <8 x i16> %2 = bitcast <2 x i64> %__B to <8 x i16> %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } define <4 x i64> @test_mm256_mask_shrdv_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shrdv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shrdv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U) ret <4 x i64> %0 } define <4 x i64> @test_mm256_maskz_shrdv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shrdv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shrdv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U) ret <4 x i64> %0 } define <4 x i64> @test_mm256_shrdv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shrdv_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1) ret <4 x i64> %0 } define <2 x i64> @test_mm_mask_shrdv_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shrdv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shrdv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U) ret <2 x i64> %0 } define <2 x i64> @test_mm_maskz_shrdv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shrdv_epi64: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shrdv_epi64: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U) ret <2 x i64> %0 } define <2 x i64> @test_mm_shrdv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shrdv_epi64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1) ret <2 x i64> %0 } define <4 x i64> @test_mm256_mask_shrdv_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shrdv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shrdv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <8 x i32> %1 = bitcast <4 x i64> %__A to <8 x i32> %2 = bitcast <4 x i64> %__B to <8 x i32> %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U) %4 = bitcast <8 x i32> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_maskz_shrdv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shrdv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shrdv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <8 x i32> %1 = bitcast <4 x i64> %__A to <8 x i32> %2 = bitcast <4 x i64> %__B to <8 x i32> %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U) %4 = bitcast <8 x i32> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_shrdv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shrdv_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__S to <8 x i32> %1 = bitcast <4 x i64> %__A to <8 x i32> %2 = bitcast <4 x i64> %__B to <8 x i32> %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1) %4 = bitcast <8 x i32> %3 to <4 x i64> ret <4 x i64> %4 } define <2 x i64> @test_mm_mask_shrdv_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shrdv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shrdv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <4 x i32> %1 = bitcast <2 x i64> %__A to <4 x i32> %2 = bitcast <2 x i64> %__B to <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U) %4 = bitcast <4 x i32> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_maskz_shrdv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shrdv_epi32: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shrdv_epi32: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <4 x i32> %1 = bitcast <2 x i64> %__A to <4 x i32> %2 = bitcast <2 x i64> %__B to <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U) %4 = bitcast <4 x i32> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_shrdv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shrdv_epi32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__S to <4 x i32> %1 = bitcast <2 x i64> %__A to <4 x i32> %2 = bitcast <2 x i64> %__B to <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1) %4 = bitcast <4 x i32> %3 to <2 x i64> ret <2 x i64> %4 } define <4 x i64> @test_mm256_mask_shrdv_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_mask_shrdv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shrdv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast <4 x i64> %__A to <16 x i16> %2 = bitcast <4 x i64> %__B to <16 x i16> %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_maskz_shrdv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; X86-LABEL: test_mm256_maskz_shrdv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ; X86-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shrdv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast <4 x i64> %__A to <16 x i16> %2 = bitcast <4 x i64> %__B to <16 x i16> %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <4 x i64> @test_mm256_shrdv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) { ; CHECK-LABEL: test_mm256_shrdv_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <4 x i64> %__S to <16 x i16> %1 = bitcast <4 x i64> %__A to <16 x i16> %2 = bitcast <4 x i64> %__B to <16 x i16> %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1) %4 = bitcast <16 x i16> %3 to <4 x i64> ret <4 x i64> %4 } define <2 x i64> @test_mm_mask_shrdv_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_mask_shrdv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_shrdv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast <2 x i64> %__A to <8 x i16> %2 = bitcast <2 x i64> %__B to <8 x i16> %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_maskz_shrdv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; X86-LABEL: test_mm_maskz_shrdv_epi16: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovd %eax, %k1 ; X86-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shrdv_epi16: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 ; X64-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast <2 x i64> %__A to <8 x i16> %2 = bitcast <2 x i64> %__B to <8 x i16> %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } define <2 x i64> @test_mm_shrdv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) { ; CHECK-LABEL: test_mm_shrdv_epi16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} entry: %0 = bitcast <2 x i64> %__S to <8 x i16> %1 = bitcast <2 x i64> %__A to <8 x i16> %2 = bitcast <2 x i64> %__B to <8 x i16> %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1) %4 = bitcast <8 x i16> %3 to <2 x i64> ret <2 x i64> %4 } declare <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16>, <8 x i16>, i8) declare <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8>, <16 x i8>, i16) declare void @llvm.masked.compressstore.v8i16(<8 x i16>, i16*, <8 x i1>) declare void @llvm.masked.compressstore.v16i8(<16 x i8>, i8*, <16 x i1>) declare <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16>, <8 x i16>, i8) declare <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8>, <16 x i8>, i16) declare <8 x i16> @llvm.masked.expandload.v8i16(i16*, <8 x i1>, <8 x i16>) declare <16 x i8> @llvm.masked.expandload.v16i8(i8*, <16 x i1>, <16 x i8>) declare <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16>, <16 x i16>, i16) declare <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8>, <32 x i8>, i32) declare void @llvm.masked.compressstore.v16i16(<16 x i16>, i16*, <16 x i1>) declare void @llvm.masked.compressstore.v32i8(<32 x i8>, i8*, <32 x i1>) declare <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16>, <16 x i16>, i16) declare <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8>, <32 x i8>, i32) declare <16 x i16> @llvm.masked.expandload.v16i16(i16*, <16 x i1>, <16 x i16>) declare <32 x i8> @llvm.masked.expandload.v32i8(i8*, <32 x i1>, <32 x i8>) declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)