• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512cd | FileCheck %s --check-prefixes=CHECK,X86
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512cd | FileCheck %s --check-prefixes=CHECK,X64
4
5define <16 x i32> @test_conflict_d(<16 x i32> %a) {
6; CHECK-LABEL: test_conflict_d:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vpconflictd %zmm0, %zmm0
9; CHECK-NEXT:    ret{{[l|q]}}
10  %1 = call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %a)
11  ret <16 x i32> %1
12}
13
14define <16 x i32> @test_mask_conflict_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
15; X86-LABEL: test_mask_conflict_d:
16; X86:       # %bb.0:
17; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
18; X86-NEXT:    vpconflictd %zmm0, %zmm1 {%k1}
19; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
20; X86-NEXT:    retl
21;
22; X64-LABEL: test_mask_conflict_d:
23; X64:       # %bb.0:
24; X64-NEXT:    kmovw %edi, %k1
25; X64-NEXT:    vpconflictd %zmm0, %zmm1 {%k1}
26; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
27; X64-NEXT:    retq
28  %1 = call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %a)
29  %2 = bitcast i16 %mask to <16 x i1>
30  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %b
31  ret <16 x i32> %3
32}
33
34define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
35; X86-LABEL: test_maskz_conflict_d:
36; X86:       # %bb.0:
37; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
38; X86-NEXT:    vpconflictd %zmm0, %zmm0 {%k1} {z}
39; X86-NEXT:    retl
40;
41; X64-LABEL: test_maskz_conflict_d:
42; X64:       # %bb.0:
43; X64-NEXT:    kmovw %edi, %k1
44; X64-NEXT:    vpconflictd %zmm0, %zmm0 {%k1} {z}
45; X64-NEXT:    retq
46  %1 = call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %a)
47  %2 = bitcast i16 %mask to <16 x i1>
48  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
49  ret <16 x i32> %3
50}
51
52define <8 x i64> @test_conflict_q(<8 x i64> %a) {
53; CHECK-LABEL: test_conflict_q:
54; CHECK:       # %bb.0:
55; CHECK-NEXT:    vpconflictq %zmm0, %zmm0
56; CHECK-NEXT:    ret{{[l|q]}}
57  %1 = call <8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %a)
58  ret <8 x i64> %1
59}
60
61define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
62; X86-LABEL: test_mask_conflict_q:
63; X86:       # %bb.0:
64; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
65; X86-NEXT:    kmovw %eax, %k1
66; X86-NEXT:    vpconflictq %zmm0, %zmm1 {%k1}
67; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
68; X86-NEXT:    retl
69;
70; X64-LABEL: test_mask_conflict_q:
71; X64:       # %bb.0:
72; X64-NEXT:    kmovw %edi, %k1
73; X64-NEXT:    vpconflictq %zmm0, %zmm1 {%k1}
74; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
75; X64-NEXT:    retq
76  %1 = call <8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %a)
77  %2 = bitcast i8 %mask to <8 x i1>
78  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %b
79  ret <8 x i64> %3
80}
81
82define <8 x i64> @test_maskz_conflict_q(<8 x i64> %a, i8 %mask) {
83; X86-LABEL: test_maskz_conflict_q:
84; X86:       # %bb.0:
85; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
86; X86-NEXT:    kmovw %eax, %k1
87; X86-NEXT:    vpconflictq %zmm0, %zmm0 {%k1} {z}
88; X86-NEXT:    retl
89;
90; X64-LABEL: test_maskz_conflict_q:
91; X64:       # %bb.0:
92; X64-NEXT:    kmovw %edi, %k1
93; X64-NEXT:    vpconflictq %zmm0, %zmm0 {%k1} {z}
94; X64-NEXT:    retq
95  %1 = call <8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %a)
96  %2 = bitcast i8 %mask to <8 x i1>
97  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> zeroinitializer
98  ret <8 x i64> %3
99}
100
101define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
102; CHECK-LABEL: test_lzcnt_d:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    vplzcntd %zmm0, %zmm0
105; CHECK-NEXT:    ret{{[l|q]}}
106  %1 = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
107  ret <16 x i32> %1
108}
109declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) #0
110
111define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
112; CHECK-LABEL: test_lzcnt_q:
113; CHECK:       # %bb.0:
114; CHECK-NEXT:    vplzcntq %zmm0, %zmm0
115; CHECK-NEXT:    ret{{[l|q]}}
116  %1 = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
117  ret <8 x i64> %1
118}
119declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) #0
120
121define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
122; X86-LABEL: test_mask_lzcnt_d:
123; X86:       # %bb.0:
124; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
125; X86-NEXT:    vplzcntd %zmm0, %zmm1 {%k1}
126; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
127; X86-NEXT:    retl
128;
129; X64-LABEL: test_mask_lzcnt_d:
130; X64:       # %bb.0:
131; X64-NEXT:    kmovw %edi, %k1
132; X64-NEXT:    vplzcntd %zmm0, %zmm1 {%k1}
133; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
134; X64-NEXT:    retq
135  %1 = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
136  %2 = bitcast i16 %mask to <16 x i1>
137  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %b
138  ret <16 x i32> %3
139}
140
141define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
142; X86-LABEL: test_mask_lzcnt_q:
143; X86:       # %bb.0:
144; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
145; X86-NEXT:    kmovw %eax, %k1
146; X86-NEXT:    vplzcntq %zmm0, %zmm1 {%k1}
147; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
148; X86-NEXT:    retl
149;
150; X64-LABEL: test_mask_lzcnt_q:
151; X64:       # %bb.0:
152; X64-NEXT:    kmovw %edi, %k1
153; X64-NEXT:    vplzcntq %zmm0, %zmm1 {%k1}
154; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
155; X64-NEXT:    retq
156  %1 = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
157  %2 = bitcast i8 %mask to <8 x i1>
158  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %b
159  ret <8 x i64> %3
160}
161
162declare <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32>)
163declare <8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64>)
164