• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw < %s | FileCheck %s --check-prefix=AVX512BW
3
4define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0)  {
5; AVX512BW-LABEL: test_load_32f64:
6; AVX512BW:       ## BB#0:
7; AVX512BW-NEXT:    vpsllw $7, %ymm0, %ymm0
8; AVX512BW-NEXT:    vpmovb2m %zmm0, %k1
9; AVX512BW-NEXT:    vmovupd (%rdi), %zmm1 {%k1}
10; AVX512BW-NEXT:    kshiftrd $16, %k1, %k2
11; AVX512BW-NEXT:    vmovupd 128(%rdi), %zmm3 {%k2}
12; AVX512BW-NEXT:    kshiftrw $8, %k1, %k1
13; AVX512BW-NEXT:    vmovupd 64(%rdi), %zmm2 {%k1}
14; AVX512BW-NEXT:    kshiftrw $8, %k2, %k1
15; AVX512BW-NEXT:    vmovupd 192(%rdi), %zmm4 {%k1}
16; AVX512BW-NEXT:    vmovaps %zmm1, %zmm0
17; AVX512BW-NEXT:    vmovaps %zmm2, %zmm1
18; AVX512BW-NEXT:    vmovaps %zmm3, %zmm2
19; AVX512BW-NEXT:    vmovaps %zmm4, %zmm3
20; AVX512BW-NEXT:    retq
21  %res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0)
22  ret <32 x double> %res
23}
24
25define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64> %src0)  {
26; AVX512BW-LABEL: test_load_32i64:
27; AVX512BW:       ## BB#0:
28; AVX512BW-NEXT:    vpsllw $7, %ymm0, %ymm0
29; AVX512BW-NEXT:    vpmovb2m %zmm0, %k1
30; AVX512BW-NEXT:    vmovdqu64 (%rdi), %zmm1 {%k1}
31; AVX512BW-NEXT:    kshiftrd $16, %k1, %k2
32; AVX512BW-NEXT:    vmovdqu64 128(%rdi), %zmm3 {%k2}
33; AVX512BW-NEXT:    kshiftrw $8, %k1, %k1
34; AVX512BW-NEXT:    vmovdqu64 64(%rdi), %zmm2 {%k1}
35; AVX512BW-NEXT:    kshiftrw $8, %k2, %k1
36; AVX512BW-NEXT:    vmovdqu64 192(%rdi), %zmm4 {%k1}
37; AVX512BW-NEXT:    vmovaps %zmm1, %zmm0
38; AVX512BW-NEXT:    vmovaps %zmm2, %zmm1
39; AVX512BW-NEXT:    vmovaps %zmm3, %zmm2
40; AVX512BW-NEXT:    vmovaps %zmm4, %zmm3
41; AVX512BW-NEXT:    retq
42  %res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask, <32 x i64> %src0)
43  ret <32 x i64> %res
44}
45
46declare <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32, <32 x i1> %mask, <32 x i64> %src0)
47declare <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32, <32 x i1> %mask, <32 x double> %src0)
48