1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-- -mattr=avx512vl | FileCheck %s 3 4define <2 x i64> @foo() { 5; CHECK-LABEL: foo: 6; CHECK: # %bb.0: 7; CHECK-NEXT: vmovdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1] 8; CHECK-NEXT: movb $1, %al 9; CHECK-NEXT: kmovw %eax, %k1 10; CHECK-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 11; CHECK-NEXT: vmovdqa32 %ymm1, %ymm1 {%k1} {z} 12; CHECK-NEXT: vpmovdw %ymm1, %xmm1 13; CHECK-NEXT: vpblendvb %xmm1, %xmm0, %xmm0, %xmm0 14; CHECK-NEXT: vzeroupper 15; CHECK-NEXT: retq 16 %1 = tail call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> undef, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, i8 1) #3 17 %2 = bitcast <8 x i16> %1 to <2 x i64> 18 ret <2 x i64> %2 19} 20 21define <4 x i64> @goo() { 22; CHECK-LABEL: goo: 23; CHECK: # %bb.0: 24; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] 25; CHECK-NEXT: movw $1, %ax 26; CHECK-NEXT: kmovw %eax, %k1 27; CHECK-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z} 28; CHECK-NEXT: vpmovdw %zmm1, %ymm1 29; CHECK-NEXT: vpblendvb %ymm1, %ymm0, %ymm0, %ymm0 30; CHECK-NEXT: retq 31 %1 = tail call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> undef, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, i16 1) #3 32 %2 = bitcast <16 x i16> %1 to <4 x i64> 33 ret <4 x i64> %2 34} 35 36declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8) 37declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16) 38