• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O3 -x86-asm-syntax=intel -mtriple=x86_64-linux-generic-march=x86-64 -mcpu=skylake-avx512 < %s | FileCheck %s
3
4declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64, i64)
5declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64)
6declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64)
7declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64, i64)
8declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
9declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i32(i32, i32)
10
11define <7 x i1> @create_mask7(i64 %0) {
12; CHECK-LABEL: create_mask7:
13; CHECK:       # %bb.0:
14; CHECK-NEXT:    mov rax, rdi
15; CHECK-NEXT:    vpbroadcastq zmm0, rsi
16; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
17; CHECK-NEXT:    kshiftrb k1, k0, 6
18; CHECK-NEXT:    kmovd r8d, k1
19; CHECK-NEXT:    kshiftrb k1, k0, 5
20; CHECK-NEXT:    kmovd r9d, k1
21; CHECK-NEXT:    kshiftrb k1, k0, 4
22; CHECK-NEXT:    kmovd r10d, k1
23; CHECK-NEXT:    kshiftrb k1, k0, 3
24; CHECK-NEXT:    kmovd edi, k1
25; CHECK-NEXT:    kshiftrb k1, k0, 2
26; CHECK-NEXT:    kmovd ecx, k1
27; CHECK-NEXT:    kshiftrb k1, k0, 1
28; CHECK-NEXT:    kmovd edx, k1
29; CHECK-NEXT:    kmovd esi, k0
30; CHECK-NEXT:    and sil, 1
31; CHECK-NEXT:    and dl, 1
32; CHECK-NEXT:    add dl, dl
33; CHECK-NEXT:    or dl, sil
34; CHECK-NEXT:    and cl, 1
35; CHECK-NEXT:    shl cl, 2
36; CHECK-NEXT:    or cl, dl
37; CHECK-NEXT:    and dil, 1
38; CHECK-NEXT:    shl dil, 3
39; CHECK-NEXT:    or dil, cl
40; CHECK-NEXT:    and r10b, 1
41; CHECK-NEXT:    shl r10b, 4
42; CHECK-NEXT:    or r10b, dil
43; CHECK-NEXT:    and r9b, 1
44; CHECK-NEXT:    shl r9b, 5
45; CHECK-NEXT:    or r9b, r10b
46; CHECK-NEXT:    shl r8b, 6
47; CHECK-NEXT:    or r8b, r9b
48; CHECK-NEXT:    and r8b, 127
49; CHECK-NEXT:    mov byte ptr [rax], r8b
50; CHECK-NEXT:    vzeroupper
51; CHECK-NEXT:    ret
52  %2 = call <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64 0, i64 %0)
53  ret <7 x i1> %2
54}
55
56define <16 x i1> @create_mask16(i64 %0) {
57; CHECK-LABEL: create_mask16:
58; CHECK:       # %bb.0:
59; CHECK-NEXT:    vpbroadcastq zmm0, rdi
60; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
61; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
62; CHECK-NEXT:    kunpckbw k0, k1, k0
63; CHECK-NEXT:    vpmovm2b xmm0, k0
64; CHECK-NEXT:    vzeroupper
65; CHECK-NEXT:    ret
66  %2 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 0, i64 %0)
67  ret <16 x i1> %2
68}
69
70define <32 x i1> @create_mask32(i64 %0) {
71; CHECK-LABEL: create_mask32:
72; CHECK:       # %bb.0:
73; CHECK-NEXT:    vpbroadcastq zmm0, rdi
74; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
75; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
76; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
77; CHECK-NEXT:    kunpckbw k0, k1, k0
78; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
79; CHECK-NEXT:    kunpckbw k1, k1, k2
80; CHECK-NEXT:    kunpckwd k0, k1, k0
81; CHECK-NEXT:    vpmovm2b ymm0, k0
82; CHECK-NEXT:    ret
83  %2 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 0, i64 %0)
84  ret <32 x i1> %2
85}
86
87define <64 x i1> @create_mask64(i64 %0) {
88; CHECK-LABEL: create_mask64:
89; CHECK:       # %bb.0:
90; CHECK-NEXT:    vpbroadcastq zmm0, rdi
91; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
92; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
93; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
94; CHECK-NEXT:    kunpckbw k0, k1, k0
95; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
96; CHECK-NEXT:    kunpckbw k1, k1, k2
97; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
98; CHECK-NEXT:    kunpckwd k0, k1, k0
99; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
100; CHECK-NEXT:    kunpckbw k1, k1, k2
101; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
102; CHECK-NEXT:    vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
103; CHECK-NEXT:    kunpckbw k2, k3, k2
104; CHECK-NEXT:    kunpckwd k1, k2, k1
105; CHECK-NEXT:    kunpckdq k0, k1, k0
106; CHECK-NEXT:    vpmovm2b zmm0, k0
107; CHECK-NEXT:    ret
108  %2 = call <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64 0, i64 %0)
109  ret <64 x i1> %2
110}
111
112define <16 x i1> @create_mask16_i32(i32 %0) {
113; CHECK-LABEL: create_mask16_i32:
114; CHECK:       # %bb.0:
115; CHECK-NEXT:    vpbroadcastd zmm0, edi
116; CHECK-NEXT:    vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
117; CHECK-NEXT:    vpmovm2b xmm0, k0
118; CHECK-NEXT:    vzeroupper
119; CHECK-NEXT:    ret
120  %2 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 0, i32 %0)
121  ret <16 x i1> %2
122}
123
124define <64 x i1> @create_mask64_i32(i32 %0) {
125; CHECK-LABEL: create_mask64_i32:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vpbroadcastd zmm0, edi
128; CHECK-NEXT:    vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
129; CHECK-NEXT:    vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
130; CHECK-NEXT:    vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
131; CHECK-NEXT:    kunpckwd k0, k1, k0
132; CHECK-NEXT:    vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
133; CHECK-NEXT:    kunpckwd k1, k1, k2
134; CHECK-NEXT:    kunpckdq k0, k1, k0
135; CHECK-NEXT:    vpmovm2b zmm0, k0
136; CHECK-NEXT:    ret
137  %2 = call <64 x i1> @llvm.get.active.lane.mask.v64i1.i32(i32 0, i32 %0)
138  ret <64 x i1> %2
139}
140