• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Bitcasts between 512-bit vector types. Return the original type since
2// no instruction is needed for the conversion
3let Predicates = [HasAVX512] in {
4  def : Pat<(v8f64  (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
5  def : Pat<(v8f64  (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
6  def : Pat<(v8f64  (bitconvert (v8i64 VR512:$src))),  (v8f64 VR512:$src)>;
7  def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
8  def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))),  (v16f32 VR512:$src)>;
9  def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))),  (v16f32 VR512:$src)>;
10  def : Pat<(v8i64  (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
11  def : Pat<(v8i64  (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
12  def : Pat<(v8i64  (bitconvert (v8f64 VR512:$src))),  (v8i64 VR512:$src)>;
13  def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
14  def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))),  (v16i32 VR512:$src)>;
15  def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))),  (v16i32 VR512:$src)>;
16  def : Pat<(v8f64  (bitconvert (v8i64 VR512:$src))),  (v8f64 VR512:$src)>;
17
18  def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
19  def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
20  def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
21  def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
22  def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
23  def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
24  def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
25  def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
26  def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
27  def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
28  def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
29  def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
30  def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
31  def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
32  def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
33  def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
34  def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
35  def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
36  def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
37  def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
38  def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
39  def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
40  def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
41  def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
42  def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
43  def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
44  def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
45  def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
46  def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
47  def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
48
49// Bitcasts between 256-bit vector types. Return the original type since
50// no instruction is needed for the conversion
51  def : Pat<(v4f64  (bitconvert (v8f32 VR256X:$src))),  (v4f64 VR256X:$src)>;
52  def : Pat<(v4f64  (bitconvert (v8i32 VR256X:$src))),  (v4f64 VR256X:$src)>;
53  def : Pat<(v4f64  (bitconvert (v4i64 VR256X:$src))),  (v4f64 VR256X:$src)>;
54  def : Pat<(v4f64  (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
55  def : Pat<(v4f64  (bitconvert (v32i8 VR256X:$src))),  (v4f64 VR256X:$src)>;
56  def : Pat<(v8f32  (bitconvert (v8i32 VR256X:$src))),  (v8f32 VR256X:$src)>;
57  def : Pat<(v8f32  (bitconvert (v4i64 VR256X:$src))),  (v8f32 VR256X:$src)>;
58  def : Pat<(v8f32  (bitconvert (v4f64 VR256X:$src))),  (v8f32 VR256X:$src)>;
59  def : Pat<(v8f32  (bitconvert (v32i8 VR256X:$src))),  (v8f32 VR256X:$src)>;
60  def : Pat<(v8f32  (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
61  def : Pat<(v4i64  (bitconvert (v8f32 VR256X:$src))),  (v4i64 VR256X:$src)>;
62  def : Pat<(v4i64  (bitconvert (v8i32 VR256X:$src))),  (v4i64 VR256X:$src)>;
63  def : Pat<(v4i64  (bitconvert (v4f64 VR256X:$src))),  (v4i64 VR256X:$src)>;
64  def : Pat<(v4i64  (bitconvert (v32i8 VR256X:$src))),  (v4i64 VR256X:$src)>;
65  def : Pat<(v4i64  (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
66  def : Pat<(v32i8  (bitconvert (v4f64 VR256X:$src))),  (v32i8 VR256X:$src)>;
67  def : Pat<(v32i8  (bitconvert (v4i64 VR256X:$src))),  (v32i8 VR256X:$src)>;
68  def : Pat<(v32i8  (bitconvert (v8f32 VR256X:$src))),  (v32i8 VR256X:$src)>;
69  def : Pat<(v32i8  (bitconvert (v8i32 VR256X:$src))),  (v32i8 VR256X:$src)>;
70  def : Pat<(v32i8  (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
71  def : Pat<(v8i32  (bitconvert (v32i8 VR256X:$src))),  (v8i32 VR256X:$src)>;
72  def : Pat<(v8i32  (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
73  def : Pat<(v8i32  (bitconvert (v8f32 VR256X:$src))),  (v8i32 VR256X:$src)>;
74  def : Pat<(v8i32  (bitconvert (v4i64 VR256X:$src))),  (v8i32 VR256X:$src)>;
75  def : Pat<(v8i32  (bitconvert (v4f64 VR256X:$src))),  (v8i32 VR256X:$src)>;
76  def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))),  (v16i16 VR256X:$src)>;
77  def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))),  (v16i16 VR256X:$src)>;
78  def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))),  (v16i16 VR256X:$src)>;
79  def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))),  (v16i16 VR256X:$src)>;
80  def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))),  (v16i16 VR256X:$src)>;
81}
82
83//===----------------------------------------------------------------------===//
84// AVX-512 - VECTOR INSERT
85//
86// -- 32x8 form --
87let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
88def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
89          (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
90          "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
91          []>, EVEX_4V, EVEX_V512;
92let mayLoad = 1 in
93def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
94          (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
95          "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
96          []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
97}
98
99// -- 64x4 fp form --
100let neverHasSideEffects = 1, ExeDomain = SSEPackedDouble in {
101def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
102          (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
103          "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
104          []>, EVEX_4V, EVEX_V512, VEX_W;
105let mayLoad = 1 in
106def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
107          (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
108          "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
109          []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
110}
111// -- 32x4 integer form --
112let neverHasSideEffects = 1 in {
113def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
114          (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
115          "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
116          []>, EVEX_4V, EVEX_V512;
117let mayLoad = 1 in
118def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
119          (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
120          "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
121          []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
122
123}
124
125let neverHasSideEffects = 1 in {
126// -- 64x4 form --
127def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
128          (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
129          "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
130          []>, EVEX_4V, EVEX_V512, VEX_W;
131let mayLoad = 1 in
132def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
133          (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
134          "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
135          []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
136}
137
138def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
139           (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
140                        (INSERT_get_vinsert128_imm VR512:$ins))>;
141def : Pat<(vinsert128_insert:$ins (v8f64  VR512:$src1), (v2f64 VR128X:$src2),
142           (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
143                        (INSERT_get_vinsert128_imm VR512:$ins))>;
144def : Pat<(vinsert128_insert:$ins (v8i64  VR512:$src1), (v2i64 VR128X:$src2),
145           (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
146                        (INSERT_get_vinsert128_imm VR512:$ins))>;
147def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
148           (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
149                        (INSERT_get_vinsert128_imm VR512:$ins))>;
150
151def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
152           (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
153                        (INSERT_get_vinsert128_imm VR512:$ins))>;
154def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
155	                (bc_v4i32 (loadv2i64 addr:$src2)),
156           (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
157                        (INSERT_get_vinsert128_imm VR512:$ins))>;
158def : Pat<(vinsert128_insert:$ins (v8f64  VR512:$src1), (loadv2f64 addr:$src2),
159           (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
160                        (INSERT_get_vinsert128_imm VR512:$ins))>;
161def : Pat<(vinsert128_insert:$ins (v8i64  VR512:$src1), (loadv2i64 addr:$src2),
162           (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
163                        (INSERT_get_vinsert128_imm VR512:$ins))>;
164
165def : Pat<(vinsert256_insert:$ins (v16f32  VR512:$src1), (v8f32 VR256X:$src2),
166           (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
167                        (INSERT_get_vinsert256_imm VR512:$ins))>;
168def : Pat<(vinsert256_insert:$ins (v8f64  VR512:$src1), (v4f64 VR256X:$src2),
169           (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
170                        (INSERT_get_vinsert256_imm VR512:$ins))>;
171def : Pat<(vinsert128_insert:$ins (v8i64  VR512:$src1), (v4i64 VR256X:$src2),
172           (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
173                        (INSERT_get_vinsert256_imm VR512:$ins))>;
174def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
175           (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
176                        (INSERT_get_vinsert256_imm VR512:$ins))>;
177
178def : Pat<(vinsert256_insert:$ins (v16f32  VR512:$src1), (loadv8f32 addr:$src2),
179           (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
180                        (INSERT_get_vinsert256_imm VR512:$ins))>;
181def : Pat<(vinsert256_insert:$ins (v8f64  VR512:$src1), (loadv4f64 addr:$src2),
182           (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
183                        (INSERT_get_vinsert256_imm VR512:$ins))>;
184def : Pat<(vinsert256_insert:$ins (v8i64  VR512:$src1), (loadv4i64 addr:$src2),
185           (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
186                        (INSERT_get_vinsert256_imm VR512:$ins))>;
187def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
188	                (bc_v8i32 (loadv4i64 addr:$src2)),
189           (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
190                        (INSERT_get_vinsert256_imm VR512:$ins))>;
191
192// vinsertps - insert f32 to XMM
193def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
194      (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
195      "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
196      [(set VR128X:$dst, (X86insrtps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
197      EVEX_4V;
198def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
199      (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
200      "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
201      [(set VR128X:$dst, (X86insrtps VR128X:$src1,
202                          (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
203                          imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
204
205//===----------------------------------------------------------------------===//
206// AVX-512 VECTOR EXTRACT
207//---
208let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
209// -- 32x4 form --
210def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
211          (ins VR512:$src1, i8imm:$src2),
212          "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
213          []>, EVEX, EVEX_V512;
214def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
215          (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
216          "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
217          []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
218
219// -- 64x4 form --
220def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
221          (ins VR512:$src1, i8imm:$src2),
222          "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
223          []>, EVEX, EVEX_V512, VEX_W;
224let mayStore = 1 in
225def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
226          (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
227          "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
228          []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
229}
230
231let neverHasSideEffects = 1 in {
232// -- 32x4 form --
233def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
234          (ins VR512:$src1, i8imm:$src2),
235          "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
236          []>, EVEX, EVEX_V512;
237def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
238          (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
239          "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
240          []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
241
242// -- 64x4 form --
243def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
244          (ins VR512:$src1, i8imm:$src2),
245          "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
246          []>, EVEX, EVEX_V512, VEX_W;
247let mayStore = 1 in
248def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
249          (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
250          "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
251          []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
252}
253
254def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
255          (v4f32 (VEXTRACTF32x4rr VR512:$src1,
256                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
257
258def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
259          (v4i32 (VEXTRACTF32x4rr VR512:$src1,
260                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
261
262def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
263          (v2f64 (VEXTRACTF32x4rr VR512:$src1,
264                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
265
266def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
267          (v2i64 (VEXTRACTI32x4rr VR512:$src1,
268                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
269
270
271def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
272          (v8f32 (VEXTRACTF64x4rr VR512:$src1,
273                  (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
274
275def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
276          (v8i32 (VEXTRACTI64x4rr VR512:$src1,
277                    (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
278
279def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
280          (v4f64 (VEXTRACTF64x4rr VR512:$src1,
281                  (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
282
283def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
284          (v4i64 (VEXTRACTI64x4rr VR512:$src1,
285                  (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
286
287// A 256-bit subvector extract from the first 512-bit vector position
288// is a subregister copy that needs no instruction.
289def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
290          (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
291def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
292          (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
293def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
294          (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
295def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
296          (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
297
298// zmm -> xmm
299def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
300          (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
301def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
302          (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
303def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
304          (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
305def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
306          (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
307
308
309// A 128-bit subvector insert to the first 512-bit vector position
310// is a subregister copy that needs no instruction.
311def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
312          (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
313          (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
314          sub_ymm)>;
315def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
316          (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
317          (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
318          sub_ymm)>;
319def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
320          (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
321          (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
322          sub_ymm)>;
323def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
324          (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
325          (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
326          sub_ymm)>;
327
328def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
329          (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
330def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
331          (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
332def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
333          (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
334def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
335          (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
336
337// vextractps - extract 32 bits from XMM
338def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
339      (ins VR128X:$src1, u32u8imm:$src2),
340      "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
341      [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
342      EVEX;
343
344def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
345      (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
346      "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
347      [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
348                          addr:$dst)]>, EVEX;
349
350//===---------------------------------------------------------------------===//
351// AVX-512 BROADCAST
352//---
353multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
354                         RegisterClass DestRC,
355                         RegisterClass SrcRC, X86MemOperand x86memop> {
356  def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
357         !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
358         []>, EVEX;
359  def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
360        !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),[]>, EVEX;
361}
362let ExeDomain = SSEPackedSingle in {
363  defm VBROADCASTSSZ  : avx512_fp_broadcast<0x18, "vbroadcastss{z}", VR512,
364                                       VR128X, f32mem>,
365                                       EVEX_V512, EVEX_CD8<32, CD8VT1>;
366}
367
368let ExeDomain = SSEPackedDouble in {
369  defm VBROADCASTSDZ  : avx512_fp_broadcast<0x19, "vbroadcastsd{z}", VR512,
370                                       VR128X, f64mem>,
371                                       EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
372}
373
374def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
375          (VBROADCASTSSZrm addr:$src)>;
376def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
377          (VBROADCASTSDZrm addr:$src)>;
378
379multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
380                          RegisterClass SrcRC, RegisterClass KRC> {
381  def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
382                   !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
383                   []>, EVEX, EVEX_V512;
384  def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
385                   (ins KRC:$mask, SrcRC:$src),
386                   !strconcat(OpcodeStr,
387                        "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
388                   []>, EVEX, EVEX_V512, EVEX_KZ;
389}
390
391defm VPBROADCASTDr  : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
392defm VPBROADCASTQr  : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
393                                            VEX_W;
394
395def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
396           (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
397
398def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
399           (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
400
401def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
402        (VPBROADCASTDrZrr GR32:$src)>;
403def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
404        (VPBROADCASTQrZrr GR64:$src)>;
405
406multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
407                          X86MemOperand x86memop, PatFrag ld_frag,
408                          RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
409                          RegisterClass KRC> {
410  def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
411                  !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
412                  [(set DstRC:$dst,
413                    (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
414  def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
415                                                         VR128X:$src),
416                    !strconcat(OpcodeStr,
417                    "\t{$src, ${dst}{${mask}}{z}|${dst}{${mask}}{z}, $src}"),
418                    [(set DstRC:$dst,
419                      (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
420                    EVEX, EVEX_KZ;
421  def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
422                  !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
423                  [(set DstRC:$dst,
424                    (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
425  def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
426                                                         x86memop:$src),
427                  !strconcat(OpcodeStr,
428                      "\t{$src, ${dst}{${mask}}{z}|${dst}{${mask}}{z}, $src}"),
429                  [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
430                                     (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
431}
432
433defm VPBROADCASTDZ  : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
434                      loadi32, VR512, v16i32, v4i32, VK16WM>,
435                      EVEX_V512, EVEX_CD8<32, CD8VT1>;
436defm VPBROADCASTQZ  : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
437                      loadi64, VR512, v8i64, v2i64, VK8WM>,  EVEX_V512, VEX_W,
438                      EVEX_CD8<64, CD8VT1>;
439
440def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
441          (VBROADCASTSSZrr VR128X:$src)>;
442def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
443          (VBROADCASTSDZrr VR128X:$src)>;
444
445// Provide fallback in case the load node that is used in the patterns above
446// is used by additional users, which prevents the pattern selection.
447def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
448          (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
449def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
450          (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
451
452
453let Predicates = [HasAVX512] in {
454def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
455           (EXTRACT_SUBREG
456              (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
457                       addr:$src)), sub_ymm)>;
458}
459//===----------------------------------------------------------------------===//
460// AVX-512 BROADCAST MASK TO VECTOR REGISTER
461//---
462
463multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
464                       RegisterClass DstRC, RegisterClass KRC,
465                       ValueType OpVT, ValueType SrcVT> {
466def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
467                  !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
468                  []>, EVEX;
469}
470
471defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
472                                             VK16, v16i32, v16i1>, EVEX_V512;
473defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
474                                            VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
475
476// Mask register copy, including
477// - copy between mask registers
478// - load/store mask registers
479// - copy from GPR to mask register and vice versa
480//
481multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
482                         string OpcodeStr, RegisterClass KRC,
483                         ValueType vt, X86MemOperand x86memop> {
484  let neverHasSideEffects = 1 in {
485    def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
486               !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
487    let mayLoad = 1 in
488    def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
489               !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
490               [(set KRC:$dst, (vt (load addr:$src)))]>;
491    let mayStore = 1 in
492    def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
493               !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
494  }
495}
496
497multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
498                             string OpcodeStr,
499                             RegisterClass KRC, RegisterClass GRC> {
500  let neverHasSideEffects = 1 in {
501    def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
502               !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
503    def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
504               !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
505  }
506}
507
508let Predicates = [HasAVX512] in {
509  defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
510               VEX, TB;
511  defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
512               VEX, TB;
513}
514
515let Predicates = [HasAVX512] in {
516  // GR16 from/to 16-bit mask
517  def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
518            (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
519  def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
520            (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
521
522  // Store kreg in memory
523  def : Pat<(store (v16i1 VK16:$src), addr:$dst),
524            (KMOVWmk addr:$dst, VK16:$src)>;
525
526  def : Pat<(store (v8i1 VK8:$src), addr:$dst),
527            (KMOVWmk addr:$dst, (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16)))>;
528}
529// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
530let Predicates = [HasAVX512] in {
531  // GR from/to 8-bit mask without native support
532  def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
533            (COPY_TO_REGCLASS
534              (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
535              VK8)>;
536  def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
537            (EXTRACT_SUBREG
538              (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
539              sub_8bit)>;
540}
541
542// Mask unary operation
543// - KNOT
544multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
545                         RegisterClass KRC, SDPatternOperator OpNode> {
546  let Predicates = [HasAVX512] in
547    def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
548               !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
549               [(set KRC:$dst, (OpNode KRC:$src))]>;
550}
551
552multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
553                               SDPatternOperator OpNode> {
554  defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
555                          VEX, TB;
556}
557
558defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
559
560def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
561def : Pat<(xor VK8:$src1,  (v8i1 immAllOnesV)),
562          (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
563
564// With AVX-512, 8-bit mask is promoted to 16-bit mask.
565def : Pat<(not VK8:$src),
566          (COPY_TO_REGCLASS
567            (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
568
569// Mask binary operation
570// - KADD, KAND, KANDN, KOR, KXNOR, KXOR
571multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
572                           RegisterClass KRC, SDPatternOperator OpNode> {
573  let Predicates = [HasAVX512] in
574    def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
575               !strconcat(OpcodeStr,
576                          "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
577               [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
578}
579
580multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
581                             SDPatternOperator OpNode> {
582  defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
583                           VEX_4V, VEX_L, TB;
584}
585
586def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
587def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
588
589let isCommutable = 1 in {
590  defm KADD  : avx512_mask_binop_w<0x4a, "kadd",  add>;
591  defm KAND  : avx512_mask_binop_w<0x41, "kand",  and>;
592  let isCommutable = 0 in
593  defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
594  defm KOR   : avx512_mask_binop_w<0x45, "kor",   or>;
595  defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>;
596  defm KXOR  : avx512_mask_binop_w<0x47, "kxor",  xor>;
597}
598
599multiclass avx512_mask_binop_int<string IntName, string InstName> {
600  let Predicates = [HasAVX512] in
601    def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
602                VK16:$src1, VK16:$src2),
603              (!cast<Instruction>(InstName##"Wrr") VK16:$src1, VK16:$src2)>;
604}
605
606defm : avx512_mask_binop_int<"kadd",  "KADD">;
607defm : avx512_mask_binop_int<"kand",  "KAND">;
608defm : avx512_mask_binop_int<"kandn", "KANDN">;
609defm : avx512_mask_binop_int<"kor",   "KOR">;
610defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
611defm : avx512_mask_binop_int<"kxor",  "KXOR">;
612// With AVX-512, 8-bit mask is promoted to 16-bit mask.
613multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
614  let Predicates = [HasAVX512] in
615    def : Pat<(OpNode VK8:$src1, VK8:$src2),
616              (COPY_TO_REGCLASS
617                (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
618                      (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
619}
620
621defm : avx512_binop_pat<and,  KANDWrr>;
622defm : avx512_binop_pat<andn, KANDNWrr>;
623defm : avx512_binop_pat<or,   KORWrr>;
624defm : avx512_binop_pat<xnor, KXNORWrr>;
625defm : avx512_binop_pat<xor,  KXORWrr>;
626
627// Mask unpacking
628multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
629                           RegisterClass KRC1, RegisterClass KRC2> {
630  let Predicates = [HasAVX512] in
631    def rr : I<opc, MRMSrcReg, (outs KRC1:$dst), (ins KRC2:$src1, KRC2:$src2),
632               !strconcat(OpcodeStr,
633                          "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
634}
635
636multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
637  defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16, VK8>,
638                            VEX_4V, VEX_L, OpSize, TB;
639}
640
641defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
642
643multiclass avx512_mask_unpck_int<string IntName, string InstName> {
644  let Predicates = [HasAVX512] in
645    def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
646                VK8:$src1, VK8:$src2),
647              (!cast<Instruction>(InstName##"BWrr") VK8:$src1, VK8:$src2)>;
648}
649
650defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
651// Mask bit testing
652multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
653                            SDNode OpNode> {
654  let Predicates = [HasAVX512], Defs = [EFLAGS] in
655    def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
656               !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
657               [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
658}
659
660multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
661  defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
662                            VEX, TB;
663}
664
665defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
666defm KTEST   : avx512_mask_testop_w<0x99, "ktest", X86ktest>;
667
668// Mask shift
669multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
670                             SDNode OpNode> {
671  let Predicates = [HasAVX512] in
672    def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
673                 !strconcat(OpcodeStr,
674                            "\t{$imm, $src, $dst|$dst, $src, $imm}"),
675                            [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
676}
677
678multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
679                               SDNode OpNode> {
680  defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
681                             VEX, OpSize, TA, VEX_W;
682}
683
684defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", shl>;
685defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", srl>;
686
687// Mask setting all 0s or 1s
688multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
689  let Predicates = [HasAVX512] in
690    let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
691      def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
692                     [(set KRC:$dst, (VT Val))]>;
693}
694
695multiclass avx512_mask_setop_w<PatFrag Val> {
696  defm B : avx512_mask_setop<VK8,  v8i1, Val>;
697  defm W : avx512_mask_setop<VK16, v16i1, Val>;
698}
699
700defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
701defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
702
703// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
704let Predicates = [HasAVX512] in {
705  def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
706  def : Pat<(v8i1 immAllOnesV),  (COPY_TO_REGCLASS (KSET1W), VK8)>;
707}
708def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
709          (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
710
711def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
712          (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
713
714def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
715          (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
716