• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1//===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides pattern fragments useful for SIMD instructions.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// MMX Pattern Fragments
16//===----------------------------------------------------------------------===//
17
18def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
19def bc_mmx  : PatFrag<(ops node:$in), (x86mmx  (bitconvert node:$in))>;
20
21//===----------------------------------------------------------------------===//
22// SSE specific DAG Nodes.
23//===----------------------------------------------------------------------===//
24
25def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
26                                            SDTCisFP<0>, SDTCisInt<2> ]>;
27def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
28                                       SDTCisFP<1>, SDTCisVT<3, i8>]>;
29
30def X86fmin    : SDNode<"X86ISD::FMIN",      SDTFPBinOp>;
31def X86fmax    : SDNode<"X86ISD::FMAX",      SDTFPBinOp>;
32
33// Commutative and Associative FMIN and FMAX.
34def X86fminc    : SDNode<"X86ISD::FMINC", SDTFPBinOp,
35    [SDNPCommutative, SDNPAssociative]>;
36def X86fmaxc    : SDNode<"X86ISD::FMAXC", SDTFPBinOp,
37    [SDNPCommutative, SDNPAssociative]>;
38
39def X86fand    : SDNode<"X86ISD::FAND",      SDTFPBinOp,
40                        [SDNPCommutative, SDNPAssociative]>;
41def X86for     : SDNode<"X86ISD::FOR",       SDTFPBinOp,
42                        [SDNPCommutative, SDNPAssociative]>;
43def X86fxor    : SDNode<"X86ISD::FXOR",      SDTFPBinOp,
44                        [SDNPCommutative, SDNPAssociative]>;
45def X86frsqrt  : SDNode<"X86ISD::FRSQRT",    SDTFPUnaryOp>;
46def X86frcp    : SDNode<"X86ISD::FRCP",      SDTFPUnaryOp>;
47def X86fsrl    : SDNode<"X86ISD::FSRL",      SDTX86FPShiftOp>;
48def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
49def X86fhadd   : SDNode<"X86ISD::FHADD",     SDTFPBinOp>;
50def X86fhsub   : SDNode<"X86ISD::FHSUB",     SDTFPBinOp>;
51def X86hadd    : SDNode<"X86ISD::HADD",      SDTIntBinOp>;
52def X86hsub    : SDNode<"X86ISD::HSUB",      SDTIntBinOp>;
53def X86comi    : SDNode<"X86ISD::COMI",      SDTX86CmpTest>;
54def X86ucomi   : SDNode<"X86ISD::UCOMI",     SDTX86CmpTest>;
55def X86cmpss   : SDNode<"X86ISD::FSETCCss",    SDTX86Cmpss>;
56def X86cmpsd   : SDNode<"X86ISD::FSETCCsd",    SDTX86Cmpsd>;
57def X86pshufb  : SDNode<"X86ISD::PSHUFB",
58                 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
59                                      SDTCisSameAs<0,2>]>>;
60def X86andnp   : SDNode<"X86ISD::ANDNP",
61                 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
62                                      SDTCisSameAs<0,2>]>>;
63def X86psign   : SDNode<"X86ISD::PSIGN",
64                 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
65                                      SDTCisSameAs<0,2>]>>;
66def X86pextrb  : SDNode<"X86ISD::PEXTRB",
67                 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
68def X86pextrw  : SDNode<"X86ISD::PEXTRW",
69                 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
70def X86pinsrb  : SDNode<"X86ISD::PINSRB",
71                 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
72                                      SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
73def X86pinsrw  : SDNode<"X86ISD::PINSRW",
74                 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
75                                      SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
76def X86insrtps : SDNode<"X86ISD::INSERTPS",
77                 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
78                                      SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
79def X86vzmovl  : SDNode<"X86ISD::VZEXT_MOVL",
80                 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
81
82def X86vzmovly  : SDNode<"X86ISD::VZEXT_MOVL",
83                 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
84                                      SDTCisOpSmallerThanOp<1, 0> ]>>;
85
86def X86vsmovl  : SDNode<"X86ISD::VSEXT_MOVL",
87                 SDTypeProfile<1, 1,
88                 [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
89
90def X86vzload  : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
91                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
92
93def X86vfpext  : SDNode<"X86ISD::VFPEXT",
94                        SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
95                                             SDTCisFP<0>, SDTCisFP<1>]>>;
96
97def X86vshldq  : SDNode<"X86ISD::VSHLDQ",    SDTIntShiftOp>;
98def X86vshrdq  : SDNode<"X86ISD::VSRLDQ",    SDTIntShiftOp>;
99def X86cmpp    : SDNode<"X86ISD::CMPP",      SDTX86VFCMP>;
100def X86pcmpeq  : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
101def X86pcmpgt  : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
102
103def X86vshl    : SDNode<"X86ISD::VSHL",
104                        SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
105                                      SDTCisVec<2>]>>;
106def X86vsrl    : SDNode<"X86ISD::VSRL",
107                        SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
108                                      SDTCisVec<2>]>>;
109def X86vsra    : SDNode<"X86ISD::VSRA",
110                        SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
111                                      SDTCisVec<2>]>>;
112
113def X86vshli   : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
114def X86vsrli   : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
115def X86vsrai   : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
116
117def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
118                                          SDTCisVec<1>,
119                                          SDTCisSameAs<2, 1>]>;
120def X86ptest   : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
121def X86testp   : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
122
123def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
124                        SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
125                                      SDTCisSameAs<1,2>]>>;
126
127// Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
128// translated into one of the target nodes below during lowering.
129// Note: this is a work in progress...
130def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
131def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
132                                SDTCisSameAs<0,2>]>;
133
134def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
135                                 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
136def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
137                                 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
138
139def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
140def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
141                             SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
142
143def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
144                           SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
145
146def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
147
148def X86PShufd  : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
149def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
150def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
151
152def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
153
154def X86Movddup  : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
155def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
156def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
157
158def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
159def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
160
161def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
162def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
163def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
164
165def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
166def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
167
168def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
169def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
170
171def X86VPermilp  : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>;
172def X86VPermv    : SDNode<"X86ISD::VPERMV",   SDTShuff2Op>;
173def X86VPermi    : SDNode<"X86ISD::VPERMI",   SDTShuff2OpI>;
174
175def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
176
177def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
178
179def X86Blendpw   : SDNode<"X86ISD::BLENDPW",   SDTBlend>;
180def X86Blendps   : SDNode<"X86ISD::BLENDPS",   SDTBlend>;
181def X86Blendpd   : SDNode<"X86ISD::BLENDPD",   SDTBlend>;
182def X86Fmadd     : SDNode<"X86ISD::FMADD",     SDTFma>;
183def X86Fnmadd    : SDNode<"X86ISD::FNMADD",    SDTFma>;
184def X86Fmsub     : SDNode<"X86ISD::FMSUB",     SDTFma>;
185def X86Fnmsub    : SDNode<"X86ISD::FNMSUB",    SDTFma>;
186def X86Fmaddsub  : SDNode<"X86ISD::FMADDSUB",  SDTFma>;
187def X86Fmsubadd  : SDNode<"X86ISD::FMSUBADD",  SDTFma>;
188
189def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
190                                         SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
191                                         SDTCisVT<4, i8>]>;
192def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
193                                         SDTCisVT<2, v16i8>, SDTCisVT<3, i32>,
194                                         SDTCisVT<4, v16i8>, SDTCisVT<5, i32>,
195                                         SDTCisVT<6, i8>]>;
196
197def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
198def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
199
200//===----------------------------------------------------------------------===//
201// SSE Complex Patterns
202//===----------------------------------------------------------------------===//
203
204// These are 'extloads' from a scalar to the low element of a vector, zeroing
205// the top elements.  These are used for the SSE 'ss' and 'sd' instruction
206// forms.
207def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
208                                  [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
209                                   SDNPWantRoot]>;
210def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
211                                  [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
212                                   SDNPWantRoot]>;
213
214def ssmem : Operand<v4f32> {
215  let PrintMethod = "printf32mem";
216  let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
217  let ParserMatchClass = X86MemAsmOperand;
218  let OperandType = "OPERAND_MEMORY";
219}
220def sdmem : Operand<v2f64> {
221  let PrintMethod = "printf64mem";
222  let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
223  let ParserMatchClass = X86MemAsmOperand;
224  let OperandType = "OPERAND_MEMORY";
225}
226
227//===----------------------------------------------------------------------===//
228// SSE pattern fragments
229//===----------------------------------------------------------------------===//
230
231// 128-bit load pattern fragments
232// NOTE: all 128-bit integer vector loads are promoted to v2i64
233def loadv4f32    : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
234def loadv2f64    : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
235def loadv2i64    : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
236
237// 256-bit load pattern fragments
238// NOTE: all 256-bit integer vector loads are promoted to v4i64
239def loadv8f32    : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
240def loadv4f64    : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
241def loadv4i64    : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
242
243// 128-/256-bit extload pattern fragments
244def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
245def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
246
247// Like 'store', but always requires 128-bit vector alignment.
248def alignedstore : PatFrag<(ops node:$val, node:$ptr),
249                           (store node:$val, node:$ptr), [{
250  return cast<StoreSDNode>(N)->getAlignment() >= 16;
251}]>;
252
253// Like 'store', but always requires 256-bit vector alignment.
254def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
255                              (store node:$val, node:$ptr), [{
256  return cast<StoreSDNode>(N)->getAlignment() >= 32;
257}]>;
258
259// Like 'load', but always requires 128-bit vector alignment.
260def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
261  return cast<LoadSDNode>(N)->getAlignment() >= 16;
262}]>;
263
264// Like 'X86vzload', but always requires 128-bit vector alignment.
265def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
266  return cast<MemSDNode>(N)->getAlignment() >= 16;
267}]>;
268
269// Like 'load', but always requires 256-bit vector alignment.
270def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
271  return cast<LoadSDNode>(N)->getAlignment() >= 32;
272}]>;
273
274def alignedloadfsf32 : PatFrag<(ops node:$ptr),
275                               (f32 (alignedload node:$ptr))>;
276def alignedloadfsf64 : PatFrag<(ops node:$ptr),
277                               (f64 (alignedload node:$ptr))>;
278
279// 128-bit aligned load pattern fragments
280// NOTE: all 128-bit integer vector loads are promoted to v2i64
281def alignedloadv4f32 : PatFrag<(ops node:$ptr),
282                               (v4f32 (alignedload node:$ptr))>;
283def alignedloadv2f64 : PatFrag<(ops node:$ptr),
284                               (v2f64 (alignedload node:$ptr))>;
285def alignedloadv2i64 : PatFrag<(ops node:$ptr),
286                               (v2i64 (alignedload node:$ptr))>;
287
288// 256-bit aligned load pattern fragments
289// NOTE: all 256-bit integer vector loads are promoted to v4i64
290def alignedloadv8f32 : PatFrag<(ops node:$ptr),
291                               (v8f32 (alignedload256 node:$ptr))>;
292def alignedloadv4f64 : PatFrag<(ops node:$ptr),
293                               (v4f64 (alignedload256 node:$ptr))>;
294def alignedloadv4i64 : PatFrag<(ops node:$ptr),
295                               (v4i64 (alignedload256 node:$ptr))>;
296
297// Like 'load', but uses special alignment checks suitable for use in
298// memory operands in most SSE instructions, which are required to
299// be naturally aligned on some targets but not on others.  If the subtarget
300// allows unaligned accesses, match any load, though this may require
301// setting a feature bit in the processor (on startup, for example).
302// Opteron 10h and later implement such a feature.
303def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
304  return    Subtarget->hasVectorUAMem()
305         || cast<LoadSDNode>(N)->getAlignment() >= 16;
306}]>;
307
308def memopfsf32 : PatFrag<(ops node:$ptr), (f32   (memop node:$ptr))>;
309def memopfsf64 : PatFrag<(ops node:$ptr), (f64   (memop node:$ptr))>;
310
311// 128-bit memop pattern fragments
312// NOTE: all 128-bit integer vector loads are promoted to v2i64
313def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
314def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
315def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
316
317// 256-bit memop pattern fragments
318// NOTE: all 256-bit integer vector loads are promoted to v4i64
319def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
320def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
321def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
322
323// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
324// 16-byte boundary.
325// FIXME: 8 byte alignment for mmx reads is not required
326def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
327  return cast<LoadSDNode>(N)->getAlignment() >= 8;
328}]>;
329
330def memopmmx  : PatFrag<(ops node:$ptr), (x86mmx  (memop64 node:$ptr))>;
331
332// MOVNT Support
333// Like 'store', but requires the non-temporal bit to be set
334def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
335                           (st node:$val, node:$ptr), [{
336  if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
337    return ST->isNonTemporal();
338  return false;
339}]>;
340
341def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
342                                    (st node:$val, node:$ptr), [{
343  if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
344    return ST->isNonTemporal() && !ST->isTruncatingStore() &&
345           ST->getAddressingMode() == ISD::UNINDEXED &&
346           ST->getAlignment() >= 16;
347  return false;
348}]>;
349
350def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
351                                      (st node:$val, node:$ptr), [{
352  if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
353    return ST->isNonTemporal() &&
354           ST->getAlignment() < 16;
355  return false;
356}]>;
357
358// 128-bit bitconvert pattern fragments
359def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
360def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
361def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
362def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
363def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
364def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
365
366// 256-bit bitconvert pattern fragments
367def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
368def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
369def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
370def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
371
372def vzmovl_v2i64 : PatFrag<(ops node:$src),
373                           (bitconvert (v2i64 (X86vzmovl
374                             (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
375def vzmovl_v4i32 : PatFrag<(ops node:$src),
376                           (bitconvert (v4i32 (X86vzmovl
377                             (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
378
379def vzload_v2i64 : PatFrag<(ops node:$src),
380                           (bitconvert (v2i64 (X86vzload node:$src)))>;
381
382
383def fp32imm0 : PatLeaf<(f32 fpimm), [{
384  return N->isExactlyValue(+0.0);
385}]>;
386
387// BYTE_imm - Transform bit immediates into byte immediates.
388def BYTE_imm  : SDNodeXForm<imm, [{
389  // Transformation function: imm >> 3
390  return getI32Imm(N->getZExtValue() >> 3);
391}]>;
392
393// EXTRACT_get_vextractf128_imm xform function: convert extract_subvector index
394// to VEXTRACTF128 imm.
395def EXTRACT_get_vextractf128_imm : SDNodeXForm<extract_subvector, [{
396  return getI8Imm(X86::getExtractVEXTRACTF128Immediate(N));
397}]>;
398
399// INSERT_get_vinsertf128_imm xform function: convert insert_subvector index to
400// VINSERTF128 imm.
401def INSERT_get_vinsertf128_imm : SDNodeXForm<insert_subvector, [{
402  return getI8Imm(X86::getInsertVINSERTF128Immediate(N));
403}]>;
404
405def vextractf128_extract : PatFrag<(ops node:$bigvec, node:$index),
406                                   (extract_subvector node:$bigvec,
407                                                      node:$index), [{
408  return X86::isVEXTRACTF128Index(N);
409}], EXTRACT_get_vextractf128_imm>;
410
411def vinsertf128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
412                                      node:$index),
413                                 (insert_subvector node:$bigvec, node:$smallvec,
414                                                   node:$index), [{
415  return X86::isVINSERTF128Index(N);
416}], INSERT_get_vinsertf128_imm>;
417
418