• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1//===- HexagonInstrInfoVector.td - Hexagon Vector Patterns -*- tablegen -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the Hexagon Vector instructions in TableGen format.
11//
12//===----------------------------------------------------------------------===//
13
14def V2I1:  PatLeaf<(v2i1  PredRegs:$R)>;
15def V4I1:  PatLeaf<(v4i1  PredRegs:$R)>;
16def V8I1:  PatLeaf<(v8i1  PredRegs:$R)>;
17def V4I8:  PatLeaf<(v4i8  IntRegs:$R)>;
18def V2I16: PatLeaf<(v2i16 IntRegs:$R)>;
19def V8I8:  PatLeaf<(v8i8  DoubleRegs:$R)>;
20def V4I16: PatLeaf<(v4i16 DoubleRegs:$R)>;
21def V2I32: PatLeaf<(v2i32 DoubleRegs:$R)>;
22
23
24multiclass bitconvert_32<ValueType a, ValueType b> {
25  def : Pat <(b (bitconvert (a IntRegs:$src))),
26             (b IntRegs:$src)>;
27  def : Pat <(a (bitconvert (b IntRegs:$src))),
28             (a IntRegs:$src)>;
29}
30
31multiclass bitconvert_64<ValueType a, ValueType b> {
32  def : Pat <(b (bitconvert (a DoubleRegs:$src))),
33             (b DoubleRegs:$src)>;
34  def : Pat <(a (bitconvert (b DoubleRegs:$src))),
35             (a DoubleRegs:$src)>;
36}
37
38// Bit convert vector types to integers.
39defm : bitconvert_32<v4i8,  i32>;
40defm : bitconvert_32<v2i16, i32>;
41defm : bitconvert_64<v8i8,  i64>;
42defm : bitconvert_64<v4i16, i64>;
43defm : bitconvert_64<v2i32, i64>;
44
45// Vector shift support. Vector shifting in Hexagon is rather different
46// from internal representation of LLVM.
47// LLVM assumes all shifts (in vector case) will have the form
48// <VT> = SHL/SRA/SRL <VT> by <VT>
49// while Hexagon has the following format:
50// <VT> = SHL/SRA/SRL <VT> by <IT/i32>
51// As a result, special care is needed to guarantee correctness and
52// performance.
53class vshift_v4i16<SDNode Op, string Str, bits<3>MajOp, bits<3>MinOp>
54  : S_2OpInstImm<Str, MajOp, MinOp, u4Imm,
55      [(set (v4i16 DoubleRegs:$dst),
56            (Op (v4i16 DoubleRegs:$src1), u4ImmPred:$src2))]> {
57  bits<4> src2;
58  let Inst{11-8} = src2;
59}
60
61class vshift_v2i32<SDNode Op, string Str, bits<3>MajOp, bits<3>MinOp>
62  : S_2OpInstImm<Str, MajOp, MinOp, u5Imm,
63      [(set (v2i32 DoubleRegs:$dst),
64            (Op (v2i32 DoubleRegs:$src1), u5ImmPred:$src2))]> {
65  bits<5> src2;
66  let Inst{12-8} = src2;
67}
68
69def : Pat<(v2i16 (add (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
70          (A2_svaddh IntRegs:$src1, IntRegs:$src2)>;
71
72def : Pat<(v2i16 (sub (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
73          (A2_svsubh IntRegs:$src1, IntRegs:$src2)>;
74
75def S2_asr_i_vw : vshift_v2i32<sra, "vasrw", 0b010, 0b000>;
76def S2_lsr_i_vw : vshift_v2i32<srl, "vlsrw", 0b010, 0b001>;
77def S2_asl_i_vw : vshift_v2i32<shl, "vaslw", 0b010, 0b010>;
78
79def S2_asr_i_vh : vshift_v4i16<sra, "vasrh", 0b100, 0b000>;
80def S2_lsr_i_vh : vshift_v4i16<srl, "vlsrh", 0b100, 0b001>;
81def S2_asl_i_vh : vshift_v4i16<shl, "vaslh", 0b100, 0b010>;
82
83
84def HexagonVSPLATB: SDNode<"HexagonISD::VSPLATB", SDTUnaryOp>;
85def HexagonVSPLATH: SDNode<"HexagonISD::VSPLATH", SDTUnaryOp>;
86
87// Replicate the low 8-bits from 32-bits input register into each of the
88// four bytes of 32-bits destination register.
89def: Pat<(v4i8  (HexagonVSPLATB I32:$Rs)), (S2_vsplatrb I32:$Rs)>;
90
91// Replicate the low 16-bits from 32-bits input register into each of the
92// four halfwords of 64-bits destination register.
93def: Pat<(v4i16 (HexagonVSPLATH I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
94
95
96class VArith_pat <InstHexagon MI, SDNode Op, PatFrag Type>
97  : Pat <(Op Type:$Rss, Type:$Rtt),
98         (MI Type:$Rss, Type:$Rtt)>;
99
100def: VArith_pat <A2_vaddub, add, V8I8>;
101def: VArith_pat <A2_vaddh,  add, V4I16>;
102def: VArith_pat <A2_vaddw,  add, V2I32>;
103def: VArith_pat <A2_vsubub, sub, V8I8>;
104def: VArith_pat <A2_vsubh,  sub, V4I16>;
105def: VArith_pat <A2_vsubw,  sub, V2I32>;
106
107def: VArith_pat <A2_and,    and, V2I16>;
108def: VArith_pat <A2_xor,    xor, V2I16>;
109def: VArith_pat <A2_or,     or,  V2I16>;
110
111def: VArith_pat <A2_andp,   and, V8I8>;
112def: VArith_pat <A2_andp,   and, V4I16>;
113def: VArith_pat <A2_andp,   and, V2I32>;
114def: VArith_pat <A2_orp,    or,  V8I8>;
115def: VArith_pat <A2_orp,    or,  V4I16>;
116def: VArith_pat <A2_orp,    or,  V2I32>;
117def: VArith_pat <A2_xorp,   xor, V8I8>;
118def: VArith_pat <A2_xorp,   xor, V4I16>;
119def: VArith_pat <A2_xorp,   xor, V2I32>;
120
121def: Pat<(v2i32 (sra V2I32:$b, (i64 (HexagonCOMBINE (i32 u5ImmPred:$c),
122                                                    (i32 u5ImmPred:$c))))),
123         (S2_asr_i_vw V2I32:$b, imm:$c)>;
124def: Pat<(v2i32 (srl V2I32:$b, (i64 (HexagonCOMBINE (i32 u5ImmPred:$c),
125                                                    (i32 u5ImmPred:$c))))),
126         (S2_lsr_i_vw V2I32:$b, imm:$c)>;
127def: Pat<(v2i32 (shl V2I32:$b, (i64 (HexagonCOMBINE (i32 u5ImmPred:$c),
128                                                    (i32 u5ImmPred:$c))))),
129         (S2_asl_i_vw V2I32:$b, imm:$c)>;
130
131def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
132         (S2_asr_i_vh V4I16:$b, imm:$c)>;
133def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
134         (S2_lsr_i_vh V4I16:$b, imm:$c)>;
135def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
136         (S2_asl_i_vh V4I16:$b, imm:$c)>;
137
138
139def SDTHexagon_v2i32_v2i32_i32 : SDTypeProfile<1, 2,
140  [SDTCisSameAs<0, 1>, SDTCisVT<0, v2i32>, SDTCisInt<2>]>;
141def SDTHexagon_v4i16_v4i16_i32 : SDTypeProfile<1, 2,
142  [SDTCisSameAs<0, 1>, SDTCisVT<0, v4i16>, SDTCisInt<2>]>;
143
144def HexagonVSRAW: SDNode<"HexagonISD::VSRAW", SDTHexagon_v2i32_v2i32_i32>;
145def HexagonVSRAH: SDNode<"HexagonISD::VSRAH", SDTHexagon_v4i16_v4i16_i32>;
146def HexagonVSRLW: SDNode<"HexagonISD::VSRLW", SDTHexagon_v2i32_v2i32_i32>;
147def HexagonVSRLH: SDNode<"HexagonISD::VSRLH", SDTHexagon_v4i16_v4i16_i32>;
148def HexagonVSHLW: SDNode<"HexagonISD::VSHLW", SDTHexagon_v2i32_v2i32_i32>;
149def HexagonVSHLH: SDNode<"HexagonISD::VSHLH", SDTHexagon_v4i16_v4i16_i32>;
150
151def: Pat<(v2i32 (HexagonVSRAW V2I32:$Rs, u5ImmPred:$u5)),
152         (S2_asr_i_vw V2I32:$Rs, imm:$u5)>;
153def: Pat<(v4i16 (HexagonVSRAH V4I16:$Rs, u4ImmPred:$u4)),
154         (S2_asr_i_vh V4I16:$Rs, imm:$u4)>;
155def: Pat<(v2i32 (HexagonVSRLW V2I32:$Rs, u5ImmPred:$u5)),
156         (S2_lsr_i_vw V2I32:$Rs, imm:$u5)>;
157def: Pat<(v4i16 (HexagonVSRLH V4I16:$Rs, u4ImmPred:$u4)),
158         (S2_lsr_i_vh V4I16:$Rs, imm:$u4)>;
159def: Pat<(v2i32 (HexagonVSHLW V2I32:$Rs, u5ImmPred:$u5)),
160         (S2_asl_i_vw V2I32:$Rs, imm:$u5)>;
161def: Pat<(v4i16 (HexagonVSHLH V4I16:$Rs, u4ImmPred:$u4)),
162         (S2_asl_i_vh V4I16:$Rs, imm:$u4)>;
163
164// Vector shift words by register
165def S2_asr_r_vw : T_S3op_shiftVect < "vasrw", 0b00, 0b00>;
166def S2_lsr_r_vw : T_S3op_shiftVect < "vlsrw", 0b00, 0b01>;
167def S2_asl_r_vw : T_S3op_shiftVect < "vaslw", 0b00, 0b10>;
168def S2_lsl_r_vw : T_S3op_shiftVect < "vlslw", 0b00, 0b11>;
169
170// Vector shift halfwords by register
171def S2_asr_r_vh : T_S3op_shiftVect < "vasrh", 0b01, 0b00>;
172def S2_lsr_r_vh : T_S3op_shiftVect < "vlsrh", 0b01, 0b01>;
173def S2_asl_r_vh : T_S3op_shiftVect < "vaslh", 0b01, 0b10>;
174def S2_lsl_r_vh : T_S3op_shiftVect < "vlslh", 0b01, 0b11>;
175
176class vshift_rr_pat<InstHexagon MI, SDNode Op, PatFrag Value>
177  : Pat <(Op Value:$Rs, I32:$Rt),
178         (MI Value:$Rs, I32:$Rt)>;
179
180def: vshift_rr_pat <S2_asr_r_vw, HexagonVSRAW, V2I32>;
181def: vshift_rr_pat <S2_asr_r_vh, HexagonVSRAH, V4I16>;
182def: vshift_rr_pat <S2_lsr_r_vw, HexagonVSRLW, V2I32>;
183def: vshift_rr_pat <S2_lsr_r_vh, HexagonVSRLH, V4I16>;
184def: vshift_rr_pat <S2_asl_r_vw, HexagonVSHLW, V2I32>;
185def: vshift_rr_pat <S2_asl_r_vh, HexagonVSHLH, V4I16>;
186
187
188def SDTHexagonVecCompare_v8i8 : SDTypeProfile<1, 2,
189  [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v8i8>]>;
190def SDTHexagonVecCompare_v4i16 : SDTypeProfile<1, 2,
191  [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v4i16>]>;
192def SDTHexagonVecCompare_v2i32 : SDTypeProfile<1, 2,
193  [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v2i32>]>;
194
195def HexagonVCMPBEQ:  SDNode<"HexagonISD::VCMPBEQ",  SDTHexagonVecCompare_v8i8>;
196def HexagonVCMPBGT:  SDNode<"HexagonISD::VCMPBGT",  SDTHexagonVecCompare_v8i8>;
197def HexagonVCMPBGTU: SDNode<"HexagonISD::VCMPBGTU", SDTHexagonVecCompare_v8i8>;
198def HexagonVCMPHEQ:  SDNode<"HexagonISD::VCMPHEQ",  SDTHexagonVecCompare_v4i16>;
199def HexagonVCMPHGT:  SDNode<"HexagonISD::VCMPHGT",  SDTHexagonVecCompare_v4i16>;
200def HexagonVCMPHGTU: SDNode<"HexagonISD::VCMPHGTU", SDTHexagonVecCompare_v4i16>;
201def HexagonVCMPWEQ:  SDNode<"HexagonISD::VCMPWEQ",  SDTHexagonVecCompare_v2i32>;
202def HexagonVCMPWGT:  SDNode<"HexagonISD::VCMPWGT",  SDTHexagonVecCompare_v2i32>;
203def HexagonVCMPWGTU: SDNode<"HexagonISD::VCMPWGTU", SDTHexagonVecCompare_v2i32>;
204
205
206class vcmp_i1_pat<InstHexagon MI, SDNode Op, PatFrag Value>
207  : Pat <(i1 (Op Value:$Rs, Value:$Rt)),
208         (MI Value:$Rs, Value:$Rt)>;
209
210def: vcmp_i1_pat<A2_vcmpbeq,  HexagonVCMPBEQ,  V8I8>;
211def: vcmp_i1_pat<A4_vcmpbgt,  HexagonVCMPBGT,  V8I8>;
212def: vcmp_i1_pat<A2_vcmpbgtu, HexagonVCMPBGTU, V8I8>;
213
214def: vcmp_i1_pat<A2_vcmpheq,  HexagonVCMPHEQ,  V4I16>;
215def: vcmp_i1_pat<A2_vcmphgt,  HexagonVCMPHGT,  V4I16>;
216def: vcmp_i1_pat<A2_vcmphgtu, HexagonVCMPHGTU, V4I16>;
217
218def: vcmp_i1_pat<A2_vcmpweq,  HexagonVCMPWEQ,  V2I32>;
219def: vcmp_i1_pat<A2_vcmpwgt,  HexagonVCMPWGT,  V2I32>;
220def: vcmp_i1_pat<A2_vcmpwgtu, HexagonVCMPWGTU, V2I32>;
221
222
223class vcmp_vi1_pat<InstHexagon MI, PatFrag Op, PatFrag InVal, ValueType OutTy>
224  : Pat <(OutTy (Op InVal:$Rs, InVal:$Rt)),
225         (MI InVal:$Rs, InVal:$Rt)>;
226
227def: vcmp_vi1_pat<A2_vcmpweq,  seteq,  V2I32, v2i1>;
228def: vcmp_vi1_pat<A2_vcmpwgt,  setgt,  V2I32, v2i1>;
229def: vcmp_vi1_pat<A2_vcmpwgtu, setugt, V2I32, v2i1>;
230
231def: vcmp_vi1_pat<A2_vcmpheq,  seteq,  V4I16, v4i1>;
232def: vcmp_vi1_pat<A2_vcmphgt,  setgt,  V4I16, v4i1>;
233def: vcmp_vi1_pat<A2_vcmphgtu, setugt, V4I16, v4i1>;
234
235
236// Hexagon doesn't have a vector multiply with C semantics.
237// Instead, generate a pseudo instruction that gets expaneded into two
238// scalar MPYI instructions.
239// This is expanded by ExpandPostRAPseudos.
240let isPseudo = 1 in
241def VMULW : PseudoM<(outs DoubleRegs:$Rd),
242      (ins DoubleRegs:$Rs, DoubleRegs:$Rt),
243      ".error \"Should never try to emit VMULW\"",
244      [(set V2I32:$Rd, (mul V2I32:$Rs, V2I32:$Rt))]>;
245
246let isPseudo = 1 in
247def VMULW_ACC : PseudoM<(outs DoubleRegs:$Rd),
248      (ins DoubleRegs:$Rx, DoubleRegs:$Rs, DoubleRegs:$Rt),
249      ".error \"Should never try to emit VMULW_ACC\"",
250      [(set V2I32:$Rd, (add V2I32:$Rx, (mul V2I32:$Rs, V2I32:$Rt)))],
251      "$Rd = $Rx">;
252
253// Adds two v4i8: Hexagon does not have an insn for this one, so we
254// use the double add v8i8, and use only the low part of the result.
255def: Pat<(v4i8 (add (v4i8 IntRegs:$Rs), (v4i8 IntRegs:$Rt))),
256         (LoReg (A2_vaddub (Zext64 $Rs), (Zext64 $Rt)))>;
257
258// Subtract two v4i8: Hexagon does not have an insn for this one, so we
259// use the double sub v8i8, and use only the low part of the result.
260def: Pat<(v4i8 (sub (v4i8 IntRegs:$Rs), (v4i8 IntRegs:$Rt))),
261         (LoReg (A2_vsubub (Zext64 $Rs), (Zext64 $Rt)))>;
262
263//
264// No 32 bit vector mux.
265//
266def: Pat<(v4i8 (select I1:$Pu, V4I8:$Rs, V4I8:$Rt)),
267         (LoReg (C2_vmux I1:$Pu, (Zext64 $Rs), (Zext64 $Rt)))>;
268def: Pat<(v2i16 (select I1:$Pu, V2I16:$Rs, V2I16:$Rt)),
269         (LoReg (C2_vmux I1:$Pu, (Zext64 $Rs), (Zext64 $Rt)))>;
270
271//
272// 64-bit vector mux.
273//
274def: Pat<(v8i8 (vselect V8I1:$Pu, V8I8:$Rs, V8I8:$Rt)),
275         (C2_vmux V8I1:$Pu, V8I8:$Rs, V8I8:$Rt)>;
276def: Pat<(v4i16 (vselect V4I1:$Pu, V4I16:$Rs, V4I16:$Rt)),
277         (C2_vmux V4I1:$Pu, V4I16:$Rs, V4I16:$Rt)>;
278def: Pat<(v2i32 (vselect V2I1:$Pu, V2I32:$Rs, V2I32:$Rt)),
279         (C2_vmux V2I1:$Pu, V2I32:$Rs, V2I32:$Rt)>;
280
281//
282// No 32 bit vector compare.
283//
284def: Pat<(i1 (seteq V4I8:$Rs, V4I8:$Rt)),
285         (A2_vcmpbeq (Zext64 $Rs), (Zext64 $Rt))>;
286def: Pat<(i1 (setgt V4I8:$Rs, V4I8:$Rt)),
287         (A4_vcmpbgt (Zext64 $Rs), (Zext64 $Rt))>;
288def: Pat<(i1 (setugt V4I8:$Rs, V4I8:$Rt)),
289         (A2_vcmpbgtu (Zext64 $Rs), (Zext64 $Rt))>;
290
291def: Pat<(i1 (seteq V2I16:$Rs, V2I16:$Rt)),
292         (A2_vcmpheq (Zext64 $Rs), (Zext64 $Rt))>;
293def: Pat<(i1 (setgt V2I16:$Rs, V2I16:$Rt)),
294         (A2_vcmphgt (Zext64 $Rs), (Zext64 $Rt))>;
295def: Pat<(i1 (setugt V2I16:$Rs, V2I16:$Rt)),
296         (A2_vcmphgtu (Zext64 $Rs), (Zext64 $Rt))>;
297
298
299class InvertCmp_pat<InstHexagon InvMI, PatFrag CmpOp, PatFrag Value,
300                    ValueType CmpTy>
301  : Pat<(CmpTy (CmpOp Value:$Rs, Value:$Rt)),
302        (InvMI Value:$Rt, Value:$Rs)>;
303
304// Map from a compare operation to the corresponding instruction with the
305// order of operands reversed, e.g.  x > y --> cmp.lt(y,x).
306def: InvertCmp_pat<A4_vcmpbgt,  setlt,  V8I8,  i1>;
307def: InvertCmp_pat<A4_vcmpbgt,  setlt,  V8I8,  v8i1>;
308def: InvertCmp_pat<A2_vcmphgt,  setlt,  V4I16, i1>;
309def: InvertCmp_pat<A2_vcmphgt,  setlt,  V4I16, v4i1>;
310def: InvertCmp_pat<A2_vcmpwgt,  setlt,  V2I32, i1>;
311def: InvertCmp_pat<A2_vcmpwgt,  setlt,  V2I32, v2i1>;
312
313def: InvertCmp_pat<A2_vcmpbgtu, setult, V8I8,  i1>;
314def: InvertCmp_pat<A2_vcmpbgtu, setult, V8I8,  v8i1>;
315def: InvertCmp_pat<A2_vcmphgtu, setult, V4I16, i1>;
316def: InvertCmp_pat<A2_vcmphgtu, setult, V4I16, v4i1>;
317def: InvertCmp_pat<A2_vcmpwgtu, setult, V2I32, i1>;
318def: InvertCmp_pat<A2_vcmpwgtu, setult, V2I32, v2i1>;
319
320// Map from vcmpne(Rss) -> !vcmpew(Rss).
321// rs != rt -> !(rs == rt).
322def: Pat<(v2i1 (setne V2I32:$Rs, V2I32:$Rt)),
323         (C2_not (v2i1 (A2_vcmpbeq V2I32:$Rs, V2I32:$Rt)))>;
324
325
326// Truncate: from vector B copy all 'E'ven 'B'yte elements:
327// A[0] = B[0];  A[1] = B[2];  A[2] = B[4];  A[3] = B[6];
328def: Pat<(v4i8 (trunc V4I16:$Rs)),
329         (S2_vtrunehb V4I16:$Rs)>;
330
331// Truncate: from vector B copy all 'O'dd 'B'yte elements:
332// A[0] = B[1];  A[1] = B[3];  A[2] = B[5];  A[3] = B[7];
333// S2_vtrunohb
334
335// Truncate: from vectors B and C copy all 'E'ven 'H'alf-word elements:
336// A[0] = B[0];  A[1] = B[2];  A[2] = C[0];  A[3] = C[2];
337// S2_vtruneh
338
339def: Pat<(v2i16 (trunc V2I32:$Rs)),
340         (LoReg (S2_packhl (HiReg $Rs), (LoReg $Rs)))>;
341
342
343def HexagonVSXTBH : SDNode<"HexagonISD::VSXTBH", SDTUnaryOp>;
344def HexagonVSXTBW : SDNode<"HexagonISD::VSXTBW", SDTUnaryOp>;
345
346def: Pat<(i64 (HexagonVSXTBH I32:$Rs)), (S2_vsxtbh I32:$Rs)>;
347def: Pat<(i64 (HexagonVSXTBW I32:$Rs)), (S2_vsxthw I32:$Rs)>;
348
349def: Pat<(v4i16 (zext   V4I8:$Rs)),  (S2_vzxtbh V4I8:$Rs)>;
350def: Pat<(v2i32 (zext   V2I16:$Rs)), (S2_vzxthw V2I16:$Rs)>;
351def: Pat<(v4i16 (anyext V4I8:$Rs)),  (S2_vzxtbh V4I8:$Rs)>;
352def: Pat<(v2i32 (anyext V2I16:$Rs)), (S2_vzxthw V2I16:$Rs)>;
353def: Pat<(v4i16 (sext   V4I8:$Rs)),  (S2_vsxtbh V4I8:$Rs)>;
354def: Pat<(v2i32 (sext   V2I16:$Rs)), (S2_vsxthw V2I16:$Rs)>;
355
356// Sign extends a v2i8 into a v2i32.
357def: Pat<(v2i32 (sext_inreg V2I32:$Rs, v2i8)),
358         (A2_combinew (A2_sxtb (HiReg $Rs)), (A2_sxtb (LoReg $Rs)))>;
359
360// Sign extends a v2i16 into a v2i32.
361def: Pat<(v2i32 (sext_inreg V2I32:$Rs, v2i16)),
362         (A2_combinew (A2_sxth (HiReg $Rs)), (A2_sxth (LoReg $Rs)))>;
363
364
365// Multiplies two v2i16 and returns a v2i32.  We are using here the
366// saturating multiply, as hexagon does not provide a non saturating
367// vector multiply, and saturation does not impact the result that is
368// in double precision of the operands.
369
370// Multiplies two v2i16 vectors: as Hexagon does not have a multiply
371// with the C semantics for this one, this pattern uses the half word
372// multiply vmpyh that takes two v2i16 and returns a v2i32.  This is
373// then truncated to fit this back into a v2i16 and to simulate the
374// wrap around semantics for unsigned in C.
375def vmpyh: OutPatFrag<(ops node:$Rs, node:$Rt),
376                      (M2_vmpy2s_s0 (i32 $Rs), (i32 $Rt))>;
377
378def: Pat<(v2i16 (mul V2I16:$Rs, V2I16:$Rt)),
379         (LoReg (S2_vtrunewh (v2i32 (A2_combineii 0, 0)),
380                             (v2i32 (vmpyh V2I16:$Rs, V2I16:$Rt))))>;
381
382// Multiplies two v4i16 vectors.
383def: Pat<(v4i16 (mul V4I16:$Rs, V4I16:$Rt)),
384         (S2_vtrunewh (vmpyh (HiReg $Rs), (HiReg $Rt)),
385                      (vmpyh (LoReg $Rs), (LoReg $Rt)))>;
386
387def VMPYB_no_V5: OutPatFrag<(ops node:$Rs, node:$Rt),
388  (S2_vtrunewh (vmpyh (HiReg (S2_vsxtbh $Rs)), (HiReg (S2_vsxtbh $Rt))),
389               (vmpyh (LoReg (S2_vsxtbh $Rs)), (LoReg (S2_vsxtbh $Rt))))>;
390
391// Multiplies two v4i8 vectors.
392def: Pat<(v4i8 (mul V4I8:$Rs, V4I8:$Rt)),
393         (S2_vtrunehb (M5_vmpybsu V4I8:$Rs, V4I8:$Rt))>,
394     Requires<[HasV5T]>;
395
396def: Pat<(v4i8 (mul V4I8:$Rs, V4I8:$Rt)),
397         (S2_vtrunehb (VMPYB_no_V5 V4I8:$Rs, V4I8:$Rt))>;
398
399// Multiplies two v8i8 vectors.
400def: Pat<(v8i8 (mul V8I8:$Rs, V8I8:$Rt)),
401         (A2_combinew (S2_vtrunehb (M5_vmpybsu (HiReg $Rs), (HiReg $Rt))),
402                      (S2_vtrunehb (M5_vmpybsu (LoReg $Rs), (LoReg $Rt))))>,
403     Requires<[HasV5T]>;
404
405def: Pat<(v8i8 (mul V8I8:$Rs, V8I8:$Rt)),
406         (A2_combinew (S2_vtrunehb (VMPYB_no_V5 (HiReg $Rs), (HiReg $Rt))),
407                      (S2_vtrunehb (VMPYB_no_V5 (LoReg $Rs), (LoReg $Rt))))>;
408
409
410class shuffler<SDNode Op, string Str>
411  : SInst<(outs DoubleRegs:$a), (ins DoubleRegs:$b, DoubleRegs:$c),
412      "$a = " # Str # "($b, $c)",
413      [(set (i64 DoubleRegs:$a),
414            (i64 (Op (i64 DoubleRegs:$b), (i64 DoubleRegs:$c))))],
415      "", S_3op_tc_1_SLOT23>;
416
417def SDTHexagonBinOp64 : SDTypeProfile<1, 2,
418  [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>]>;
419
420def HexagonSHUFFEB: SDNode<"HexagonISD::SHUFFEB", SDTHexagonBinOp64>;
421def HexagonSHUFFEH: SDNode<"HexagonISD::SHUFFEH", SDTHexagonBinOp64>;
422def HexagonSHUFFOB: SDNode<"HexagonISD::SHUFFOB", SDTHexagonBinOp64>;
423def HexagonSHUFFOH: SDNode<"HexagonISD::SHUFFOH", SDTHexagonBinOp64>;
424
425class ShufflePat<InstHexagon MI, SDNode Op>
426  : Pat<(i64 (Op DoubleRegs:$src1, DoubleRegs:$src2)),
427        (i64 (MI DoubleRegs:$src1, DoubleRegs:$src2))>;
428
429// Shuffles even bytes for i=0..3: A[2*i].b = C[2*i].b; A[2*i+1].b = B[2*i].b
430def: ShufflePat<S2_shuffeb, HexagonSHUFFEB>;
431
432// Shuffles odd bytes for i=0..3: A[2*i].b = C[2*i+1].b; A[2*i+1].b = B[2*i+1].b
433def: ShufflePat<S2_shuffob, HexagonSHUFFOB>;
434
435// Shuffles even half for i=0,1: A[2*i].h = C[2*i].h; A[2*i+1].h = B[2*i].h
436def: ShufflePat<S2_shuffeh, HexagonSHUFFEH>;
437
438// Shuffles odd half for i=0,1: A[2*i].h = C[2*i+1].h; A[2*i+1].h = B[2*i+1].h
439def: ShufflePat<S2_shuffoh, HexagonSHUFFOH>;
440
441
442// Truncated store from v4i16 to v4i8.
443def truncstorev4i8: PatFrag<(ops node:$val, node:$ptr),
444                            (truncstore node:$val, node:$ptr),
445    [{ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4i8; }]>;
446
447// Truncated store from v2i32 to v2i16.
448def truncstorev2i16: PatFrag<(ops node:$val, node:$ptr),
449                             (truncstore node:$val, node:$ptr),
450    [{ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2i16; }]>;
451
452def: Pat<(truncstorev2i16 V2I32:$Rs, I32:$Rt),
453         (S2_storeri_io I32:$Rt, 0, (LoReg (S2_packhl (HiReg $Rs),
454                                                      (LoReg $Rs))))>;
455
456def: Pat<(truncstorev4i8 V4I16:$Rs, I32:$Rt),
457         (S2_storeri_io I32:$Rt, 0, (S2_vtrunehb V4I16:$Rs))>;
458
459
460// Zero and sign extended load from v2i8 into v2i16.
461def zextloadv2i8: PatFrag<(ops node:$ptr), (zextload node:$ptr),
462    [{ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v2i8; }]>;
463
464def sextloadv2i8: PatFrag<(ops node:$ptr), (sextload node:$ptr),
465    [{ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v2i8; }]>;
466
467def: Pat<(v2i16 (zextloadv2i8 I32:$Rs)),
468         (LoReg (v4i16 (S2_vzxtbh (L2_loadruh_io I32:$Rs, 0))))>;
469
470def: Pat<(v2i16 (sextloadv2i8 I32:$Rs)),
471         (LoReg (v4i16 (S2_vsxtbh (L2_loadrh_io I32:$Rs, 0))))>;
472
473def: Pat<(v2i32 (zextloadv2i8 I32:$Rs)),
474         (S2_vzxthw (LoReg (v4i16 (S2_vzxtbh (L2_loadruh_io I32:$Rs, 0)))))>;
475
476def: Pat<(v2i32 (sextloadv2i8 I32:$Rs)),
477         (S2_vsxthw (LoReg (v4i16 (S2_vsxtbh (L2_loadrh_io I32:$Rs, 0)))))>;
478