• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
3
4declare void @f()
5define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) {
6; CHECK-LABEL: test_4i1:
7; CHECK:       ## %bb.0:
8; CHECK-NEXT:    pushq %rax
9; CHECK-NEXT:    .cfi_def_cfa_offset 16
10; CHECK-NEXT:    vpcmpnleud %xmm1, %xmm0, %k0
11; CHECK-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
12; CHECK-NEXT:    korw %k1, %k0, %k0
13; CHECK-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
14; CHECK-NEXT:    callq _f
15; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 ## 2-byte Reload
16; CHECK-NEXT:    vpmovm2d %k0, %xmm0
17; CHECK-NEXT:    popq %rax
18; CHECK-NEXT:    retq
19
20  %cmp_res = icmp ugt <4 x i32> %a, %b
21  %cmp_res2 = icmp sgt <4 x i32> %a, %b
22  call void @f()
23  %res = or <4 x i1> %cmp_res, %cmp_res2
24  ret <4 x i1> %res
25}
26
27define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
28; CHECK-LABEL: test_8i1:
29; CHECK:       ## %bb.0:
30; CHECK-NEXT:    pushq %rax
31; CHECK-NEXT:    .cfi_def_cfa_offset 16
32; CHECK-NEXT:    vpcmpnleud %ymm1, %ymm0, %k0
33; CHECK-NEXT:    vpcmpgtd %ymm1, %ymm0, %k1
34; CHECK-NEXT:    korb %k1, %k0, %k0
35; CHECK-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
36; CHECK-NEXT:    vzeroupper
37; CHECK-NEXT:    callq _f
38; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 ## 2-byte Reload
39; CHECK-NEXT:    vpmovm2w %k0, %xmm0
40; CHECK-NEXT:    popq %rax
41; CHECK-NEXT:    retq
42
43  %cmp_res = icmp ugt <8 x i32> %a, %b
44  %cmp_res2 = icmp sgt <8 x i32> %a, %b
45  call void @f()
46  %res = or <8 x i1> %cmp_res, %cmp_res2
47  ret <8 x i1> %res
48}
49
50define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
51; CHECK-LABEL: test_16i1:
52; CHECK:       ## %bb.0:
53; CHECK-NEXT:    pushq %rax
54; CHECK-NEXT:    .cfi_def_cfa_offset 16
55; CHECK-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
56; CHECK-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
57; CHECK-NEXT:    korw %k1, %k0, %k0
58; CHECK-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
59; CHECK-NEXT:    vzeroupper
60; CHECK-NEXT:    callq _f
61; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 ## 2-byte Reload
62; CHECK-NEXT:    vpmovm2b %k0, %xmm0
63; CHECK-NEXT:    popq %rax
64; CHECK-NEXT:    retq
65  %cmp_res = icmp ugt <16 x i32> %a, %b
66  %cmp_res2 = icmp sgt <16 x i32> %a, %b
67  call void @f()
68  %res = or <16 x i1> %cmp_res, %cmp_res2
69  ret <16 x i1> %res
70}
71
72define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
73; CHECK-LABEL: test_32i1:
74; CHECK:       ## %bb.0:
75; CHECK-NEXT:    pushq %rax
76; CHECK-NEXT:    .cfi_def_cfa_offset 16
77; CHECK-NEXT:    vpcmpnleuw %zmm1, %zmm0, %k0
78; CHECK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
79; CHECK-NEXT:    kord %k1, %k0, %k0
80; CHECK-NEXT:    kmovd %k0, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
81; CHECK-NEXT:    vzeroupper
82; CHECK-NEXT:    callq _f
83; CHECK-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k0 ## 4-byte Reload
84; CHECK-NEXT:    vpmovm2b %k0, %ymm0
85; CHECK-NEXT:    popq %rax
86; CHECK-NEXT:    retq
87  %cmp_res = icmp ugt <32 x i16> %a, %b
88  %cmp_res2 = icmp sgt <32 x i16> %a, %b
89  call void @f()
90  %res = or <32 x i1> %cmp_res, %cmp_res2
91  ret <32 x i1> %res
92}
93
94define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) {
95; CHECK-LABEL: test_64i1:
96; CHECK:       ## %bb.0:
97; CHECK-NEXT:    pushq %rax
98; CHECK-NEXT:    .cfi_def_cfa_offset 16
99; CHECK-NEXT:    vpcmpnleub %zmm1, %zmm0, %k0
100; CHECK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
101; CHECK-NEXT:    korq %k1, %k0, %k0
102; CHECK-NEXT:    kmovq %k0, (%rsp) ## 8-byte Spill
103; CHECK-NEXT:    vzeroupper
104; CHECK-NEXT:    callq _f
105; CHECK-NEXT:    kmovq (%rsp), %k0 ## 8-byte Reload
106; CHECK-NEXT:    vpmovm2b %k0, %zmm0
107; CHECK-NEXT:    popq %rax
108; CHECK-NEXT:    retq
109
110  %cmp_res = icmp ugt <64 x i8> %a, %b
111  %cmp_res2 = icmp sgt <64 x i8> %a, %b
112  call void @f()
113  %res = or <64 x i1> %cmp_res, %cmp_res2
114  ret <64 x i1> %res
115}
116