• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -fast-isel-sink-local-values < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
3
4; ModuleID = 'mask_set.c'
5source_filename = "mask_set.c"
6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7target triple = "x86_64-unknown-linux-gnu"
8
9declare void @llvm.dbg.declare(metadata, metadata, metadata)
10
11; Function Attrs: nounwind uwtable
12declare i64 @calc_expected_mask_val(i8* %valp, i32 %el_size, i32 %length)
13; Function Attrs: nounwind uwtable
14declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, i8* %fname, i8* %input)
15
16; Function Attrs: nounwind uwtable
17define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){
18; CHECK-LABEL: test_xmm:
19; CHECK:       ## %bb.0:
20; CHECK-NEXT:    subq $56, %rsp
21; CHECK-NEXT:    .cfi_def_cfa_offset 64
22; CHECK-NEXT:    vpmovw2m %xmm0, %k0
23; CHECK-NEXT:    movl $2, %esi
24; CHECK-NEXT:    movl $8, %eax
25; CHECK-NEXT:    movq %rdx, %rdi
26; CHECK-NEXT:    movq %rdx, {{[0-9]+}}(%rsp) ## 8-byte Spill
27; CHECK-NEXT:    movl %eax, %edx
28; CHECK-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
29; CHECK-NEXT:    movq %rcx, {{[0-9]+}}(%rsp) ## 8-byte Spill
30; CHECK-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
31; CHECK-NEXT:    callq _calc_expected_mask_val
32; CHECK-NEXT:    movl %eax, %edx
33; CHECK-NEXT:    movw %dx, %r8w
34; CHECK-NEXT:    movzwl %r8w, %esi
35; CHECK-NEXT:    kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
36; CHECK-NEXT:    kmovb %k0, %edi
37; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload
38; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rcx ## 8-byte Reload
39; CHECK-NEXT:    callq _check_mask16
40; CHECK-NEXT:    vmovaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
41; CHECK-NEXT:    vpmovd2m %xmm0, %k0
42; CHECK-NEXT:    kmovq %k0, %k1
43; CHECK-NEXT:    kmovd %k0, %esi
44; CHECK-NEXT:    movb %sil, %r9b
45; CHECK-NEXT:    movzbl %r9b, %esi
46; CHECK-NEXT:    movw %si, %r8w
47; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rdi ## 8-byte Reload
48; CHECK-NEXT:    movl $4, %esi
49; CHECK-NEXT:    movl %esi, {{[0-9]+}}(%rsp) ## 4-byte Spill
50; CHECK-NEXT:    movl {{[0-9]+}}(%rsp), %edx ## 4-byte Reload
51; CHECK-NEXT:    movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill
52; CHECK-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp) ## 2-byte Spill
53; CHECK-NEXT:    movw %r8w, {{[0-9]+}}(%rsp) ## 2-byte Spill
54; CHECK-NEXT:    callq _calc_expected_mask_val
55; CHECK-NEXT:    movw %ax, %r8w
56; CHECK-NEXT:    movw {{[0-9]+}}(%rsp), %r10w ## 2-byte Reload
57; CHECK-NEXT:    movzwl %r10w, %edi
58; CHECK-NEXT:    movzwl %r8w, %esi
59; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload
60; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rcx ## 8-byte Reload
61; CHECK-NEXT:    callq _check_mask16
62; CHECK-NEXT:    movl %eax, (%rsp) ## 4-byte Spill
63; CHECK-NEXT:    addq $56, %rsp
64; CHECK-NEXT:    retq
65  %d2 = bitcast <2 x i64> %a to <8 x i16>
66  %m2 = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %d2)
67  %conv7 = zext i8 %m2 to i16
68  %call9 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 2, i32 8)
69  %conv10 = trunc i64 %call9 to i16
70  %call12 = call i32 @check_mask16(i16 zeroext %conv7, i16 zeroext %conv10, i8* %fname, i8* %arraydecay)
71  %d3 = bitcast <2 x i64> %a to <4 x i32>
72  %m3 = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %d3)
73  %conv14 = zext i8 %m3 to i16
74  %call16 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 4, i32 4)
75  %conv17 = trunc i64 %call16 to i16
76  %call19 = call i32 @check_mask16(i16 zeroext %conv14, i16 zeroext %conv17, i8* %fname, i8* %arraydecay)
77  ret void
78}
79
80; Function Attrs: nounwind readnone
81declare i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16>)
82
83; Function Attrs: nounwind readnone
84declare i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32>)
85
86