1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw < %s | FileCheck %s --check-prefix=AVX512BW 3 4define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) { 5; AVX512BW-LABEL: test_load_32f64: 6; AVX512BW: ## %bb.0: 7; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0 8; AVX512BW-NEXT: vpmovb2m %zmm0, %k1 9; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1} 10; AVX512BW-NEXT: kshiftrw $8, %k1, %k2 11; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k2} 12; AVX512BW-NEXT: kshiftrd $16, %k1, %k1 13; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm2 {%k1} 14; AVX512BW-NEXT: kshiftrw $8, %k1, %k1 15; AVX512BW-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1} 16; AVX512BW-NEXT: retq 17 %res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0) 18 ret <32 x double> %res 19} 20 21define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64> %src0) { 22; AVX512BW-LABEL: test_load_32i64: 23; AVX512BW: ## %bb.0: 24; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0 25; AVX512BW-NEXT: vpmovb2m %zmm0, %k1 26; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1} 27; AVX512BW-NEXT: kshiftrw $8, %k1, %k2 28; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k2} 29; AVX512BW-NEXT: kshiftrd $16, %k1, %k1 30; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm2 {%k1} 31; AVX512BW-NEXT: kshiftrw $8, %k1, %k1 32; AVX512BW-NEXT: vpblendmq 192(%rdi), %zmm4, %zmm3 {%k1} 33; AVX512BW-NEXT: retq 34 %res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask, <32 x i64> %src0) 35 ret <32 x i64> %res 36} 37 38declare <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32, <32 x i1> %mask, <32 x i64> %src0) 39declare <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32, <32 x i1> %mask, <32 x double> %src0) 40