• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "arm-isel"
16 #include "ARM.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMISelLowering.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMPerfectShuffle.h"
22 #include "ARMRegisterInfo.h"
23 #include "ARMSubtarget.h"
24 #include "ARMTargetMachine.h"
25 #include "ARMTargetObjectFile.h"
26 #include "MCTargetDesc/ARMAddressingModes.h"
27 #include "llvm/CallingConv.h"
28 #include "llvm/Constants.h"
29 #include "llvm/Function.h"
30 #include "llvm/GlobalValue.h"
31 #include "llvm/Instruction.h"
32 #include "llvm/Instructions.h"
33 #include "llvm/Intrinsics.h"
34 #include "llvm/Type.h"
35 #include "llvm/CodeGen/CallingConvLower.h"
36 #include "llvm/CodeGen/IntrinsicLowering.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineModuleInfo.h"
42 #include "llvm/CodeGen/MachineRegisterInfo.h"
43 #include "llvm/CodeGen/PseudoSourceValue.h"
44 #include "llvm/CodeGen/SelectionDAG.h"
45 #include "llvm/MC/MCSectionMachO.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/ADT/VectorExtras.h"
48 #include "llvm/ADT/StringExtras.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include <sstream>
55 using namespace llvm;
56 
57 STATISTIC(NumTailCalls, "Number of tail calls");
58 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
59 
60 // This option should go away when tail calls fully work.
61 static cl::opt<bool>
62 EnableARMTailCalls("arm-tail-calls", cl::Hidden,
63   cl::desc("Generate tail calls (TEMPORARY OPTION)."),
64   cl::init(false));
65 
66 cl::opt<bool>
67 EnableARMLongCalls("arm-long-calls", cl::Hidden,
68   cl::desc("Generate calls via indirect call instructions"),
69   cl::init(false));
70 
71 static cl::opt<bool>
72 ARMInterworking("arm-interworking", cl::Hidden,
73   cl::desc("Enable / disable ARM interworking (for debugging only)"),
74   cl::init(true));
75 
76 namespace llvm {
77   class ARMCCState : public CCState {
78   public:
ARMCCState(CallingConv::ID CC,bool isVarArg,MachineFunction & MF,const TargetMachine & TM,SmallVector<CCValAssign,16> & locs,LLVMContext & C,ParmContext PC)79     ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
80                const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
81                LLVMContext &C, ParmContext PC)
82         : CCState(CC, isVarArg, MF, TM, locs, C) {
83       assert(((PC == Call) || (PC == Prologue)) &&
84              "ARMCCState users must specify whether their context is call"
85              "or prologue generation.");
86       CallOrPrologue = PC;
87     }
88   };
89 }
90 
91 // The APCS parameter registers.
92 static const unsigned GPRArgRegs[] = {
93   ARM::R0, ARM::R1, ARM::R2, ARM::R3
94 };
95 
addTypeForNEON(EVT VT,EVT PromotedLdStVT,EVT PromotedBitwiseVT)96 void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
97                                        EVT PromotedBitwiseVT) {
98   if (VT != PromotedLdStVT) {
99     setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
100     AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
101                        PromotedLdStVT.getSimpleVT());
102 
103     setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
104     AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
105                        PromotedLdStVT.getSimpleVT());
106   }
107 
108   EVT ElemTy = VT.getVectorElementType();
109   if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
110     setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
111   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
112   if (ElemTy != MVT::i32) {
113     setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
114     setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
115     setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
116     setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
117   }
118   setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
119   setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
120   setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
121   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
122   setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
123   setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
124   if (VT.isInteger()) {
125     setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
126     setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
127     setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
128     setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand);
129     setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand);
130     for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
131          InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
132       setTruncStoreAction(VT.getSimpleVT(),
133                           (MVT::SimpleValueType)InnerVT, Expand);
134   }
135   setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
136 
137   // Promote all bit-wise operations.
138   if (VT.isInteger() && VT != PromotedBitwiseVT) {
139     setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
140     AddPromotedToType (ISD::AND, VT.getSimpleVT(),
141                        PromotedBitwiseVT.getSimpleVT());
142     setOperationAction(ISD::OR,  VT.getSimpleVT(), Promote);
143     AddPromotedToType (ISD::OR,  VT.getSimpleVT(),
144                        PromotedBitwiseVT.getSimpleVT());
145     setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
146     AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
147                        PromotedBitwiseVT.getSimpleVT());
148   }
149 
150   // Neon does not support vector divide/remainder operations.
151   setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
152   setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
153   setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
154   setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
155   setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
156   setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
157 }
158 
addDRTypeForNEON(EVT VT)159 void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
160   addRegisterClass(VT, ARM::DPRRegisterClass);
161   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
162 }
163 
addQRTypeForNEON(EVT VT)164 void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
165   addRegisterClass(VT, ARM::QPRRegisterClass);
166   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
167 }
168 
createTLOF(TargetMachine & TM)169 static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
170   if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
171     return new TargetLoweringObjectFileMachO();
172 
173   return new ARMElfTargetObjectFile();
174 }
175 
ARMTargetLowering(TargetMachine & TM)176 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
177     : TargetLowering(TM, createTLOF(TM)) {
178   Subtarget = &TM.getSubtarget<ARMSubtarget>();
179   RegInfo = TM.getRegisterInfo();
180   Itins = TM.getInstrItineraryData();
181 
182   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
183 
184   if (Subtarget->isTargetDarwin()) {
185     // Uses VFP for Thumb libfuncs if available.
186     if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
187       // Single-precision floating-point arithmetic.
188       setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
189       setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
190       setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
191       setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
192 
193       // Double-precision floating-point arithmetic.
194       setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
195       setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
196       setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
197       setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
198 
199       // Single-precision comparisons.
200       setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
201       setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
202       setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
203       setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
204       setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
205       setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
206       setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
207       setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
208 
209       setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
210       setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
211       setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
212       setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
213       setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
214       setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
215       setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
216       setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
217 
218       // Double-precision comparisons.
219       setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
220       setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
221       setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
222       setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
223       setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
224       setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
225       setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
226       setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
227 
228       setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
229       setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
230       setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
231       setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
232       setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
233       setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
234       setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
235       setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
236 
237       // Floating-point to integer conversions.
238       // i64 conversions are done via library routines even when generating VFP
239       // instructions, so use the same ones.
240       setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
241       setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
242       setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
243       setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
244 
245       // Conversions between floating types.
246       setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
247       setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
248 
249       // Integer to floating-point conversions.
250       // i64 conversions are done via library routines even when generating VFP
251       // instructions, so use the same ones.
252       // FIXME: There appears to be some naming inconsistency in ARM libgcc:
253       // e.g., __floatunsidf vs. __floatunssidfvfp.
254       setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
255       setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
256       setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
257       setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
258     }
259   }
260 
261   // These libcalls are not available in 32-bit.
262   setLibcallName(RTLIB::SHL_I128, 0);
263   setLibcallName(RTLIB::SRL_I128, 0);
264   setLibcallName(RTLIB::SRA_I128, 0);
265 
266   if (Subtarget->isAAPCS_ABI()) {
267     // Double-precision floating-point arithmetic helper functions
268     // RTABI chapter 4.1.2, Table 2
269     setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
270     setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
271     setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul");
272     setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub");
273     setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS);
274     setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS);
275     setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS);
276     setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS);
277 
278     // Double-precision floating-point comparison helper functions
279     // RTABI chapter 4.1.2, Table 3
280     setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq");
281     setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
282     setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq");
283     setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ);
284     setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt");
285     setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
286     setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple");
287     setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
288     setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge");
289     setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
290     setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt");
291     setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
292     setLibcallName(RTLIB::UO_F64,  "__aeabi_dcmpun");
293     setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
294     setLibcallName(RTLIB::O_F64,   "__aeabi_dcmpun");
295     setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
296     setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS);
297     setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS);
298     setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS);
299     setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS);
300     setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS);
301     setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS);
302     setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS);
303     setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS);
304 
305     // Single-precision floating-point arithmetic helper functions
306     // RTABI chapter 4.1.2, Table 4
307     setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd");
308     setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv");
309     setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul");
310     setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub");
311     setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS);
312     setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS);
313     setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS);
314     setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS);
315 
316     // Single-precision floating-point comparison helper functions
317     // RTABI chapter 4.1.2, Table 5
318     setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq");
319     setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
320     setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq");
321     setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ);
322     setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt");
323     setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
324     setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple");
325     setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
326     setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge");
327     setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
328     setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt");
329     setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
330     setLibcallName(RTLIB::UO_F32,  "__aeabi_fcmpun");
331     setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
332     setLibcallName(RTLIB::O_F32,   "__aeabi_fcmpun");
333     setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
334     setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS);
335     setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS);
336     setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS);
337     setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS);
338     setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS);
339     setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS);
340     setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS);
341     setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS);
342 
343     // Floating-point to integer conversions.
344     // RTABI chapter 4.1.2, Table 6
345     setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz");
346     setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz");
347     setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz");
348     setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz");
349     setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz");
350     setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz");
351     setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz");
352     setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz");
353     setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS);
354     setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS);
355     setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS);
356     setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS);
357     setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS);
358     setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS);
359     setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS);
360     setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS);
361 
362     // Conversions between floating types.
363     // RTABI chapter 4.1.2, Table 7
364     setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
365     setLibcallName(RTLIB::FPEXT_F32_F64,   "__aeabi_f2d");
366     setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
367     setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
368 
369     // Integer to floating-point conversions.
370     // RTABI chapter 4.1.2, Table 8
371     setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d");
372     setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d");
373     setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d");
374     setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d");
375     setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f");
376     setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f");
377     setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f");
378     setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f");
379     setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
380     setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
381     setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
382     setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
383     setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
384     setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
385     setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
386     setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
387 
388     // Long long helper functions
389     // RTABI chapter 4.2, Table 9
390     setLibcallName(RTLIB::MUL_I64,  "__aeabi_lmul");
391     setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
392     setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
393     setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl");
394     setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr");
395     setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr");
396     setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS);
397     setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
398     setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
399     setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS);
400     setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS);
401     setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS);
402 
403     // Integer division functions
404     // RTABI chapter 4.3.1
405     setLibcallName(RTLIB::SDIV_I8,  "__aeabi_idiv");
406     setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv");
407     setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv");
408     setLibcallName(RTLIB::UDIV_I8,  "__aeabi_uidiv");
409     setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv");
410     setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv");
411     setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS);
412     setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS);
413     setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
414     setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
415     setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
416     setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
417 
418     // Memory operations
419     // RTABI chapter 4.3.4
420     setLibcallName(RTLIB::MEMCPY,  "__aeabi_memcpy");
421     setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove");
422     setLibcallName(RTLIB::MEMSET,  "__aeabi_memset");
423   }
424 
425   // Use divmod compiler-rt calls for iOS 5.0 and later.
426   if (Subtarget->getTargetTriple().getOS() == Triple::IOS &&
427       !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
428     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
429     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
430   }
431 
432   if (Subtarget->isThumb1Only())
433     addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
434   else
435     addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
436   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
437     addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
438     if (!Subtarget->isFPOnlySP())
439       addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
440 
441     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
442   }
443 
444   if (Subtarget->hasNEON()) {
445     addDRTypeForNEON(MVT::v2f32);
446     addDRTypeForNEON(MVT::v8i8);
447     addDRTypeForNEON(MVT::v4i16);
448     addDRTypeForNEON(MVT::v2i32);
449     addDRTypeForNEON(MVT::v1i64);
450 
451     addQRTypeForNEON(MVT::v4f32);
452     addQRTypeForNEON(MVT::v2f64);
453     addQRTypeForNEON(MVT::v16i8);
454     addQRTypeForNEON(MVT::v8i16);
455     addQRTypeForNEON(MVT::v4i32);
456     addQRTypeForNEON(MVT::v2i64);
457 
458     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
459     // neither Neon nor VFP support any arithmetic operations on it.
460     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
461     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
462     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
463     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
464     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
465     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
466     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
467     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
468     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
469     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
470     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
471     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
472     setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
473     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
474     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
475     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
476     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
477     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
478     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
479     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
480     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
481     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
482     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
483     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
484 
485     setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
486 
487     // Neon does not support some operations on v1i64 and v2i64 types.
488     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
489     // Custom handling for some quad-vector types to detect VMULL.
490     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
491     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
492     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
493     // Custom handling for some vector types to avoid expensive expansions
494     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
495     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
496     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
497     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
498     setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
499     setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
500     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
501     // a destination type that is wider than the source.
502     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
503     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
504 
505     setTargetDAGCombine(ISD::INTRINSIC_VOID);
506     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
507     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
508     setTargetDAGCombine(ISD::SHL);
509     setTargetDAGCombine(ISD::SRL);
510     setTargetDAGCombine(ISD::SRA);
511     setTargetDAGCombine(ISD::SIGN_EXTEND);
512     setTargetDAGCombine(ISD::ZERO_EXTEND);
513     setTargetDAGCombine(ISD::ANY_EXTEND);
514     setTargetDAGCombine(ISD::SELECT_CC);
515     setTargetDAGCombine(ISD::BUILD_VECTOR);
516     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
517     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
518     setTargetDAGCombine(ISD::STORE);
519     setTargetDAGCombine(ISD::FP_TO_SINT);
520     setTargetDAGCombine(ISD::FP_TO_UINT);
521     setTargetDAGCombine(ISD::FDIV);
522   }
523 
524   computeRegisterProperties();
525 
526   // ARM does not have f32 extending load.
527   setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
528 
529   // ARM does not have i1 sign extending load.
530   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
531 
532   // ARM supports all 4 flavors of integer indexed load / store.
533   if (!Subtarget->isThumb1Only()) {
534     for (unsigned im = (unsigned)ISD::PRE_INC;
535          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
536       setIndexedLoadAction(im,  MVT::i1,  Legal);
537       setIndexedLoadAction(im,  MVT::i8,  Legal);
538       setIndexedLoadAction(im,  MVT::i16, Legal);
539       setIndexedLoadAction(im,  MVT::i32, Legal);
540       setIndexedStoreAction(im, MVT::i1,  Legal);
541       setIndexedStoreAction(im, MVT::i8,  Legal);
542       setIndexedStoreAction(im, MVT::i16, Legal);
543       setIndexedStoreAction(im, MVT::i32, Legal);
544     }
545   }
546 
547   // i64 operation support.
548   setOperationAction(ISD::MUL,     MVT::i64, Expand);
549   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
550   if (Subtarget->isThumb1Only()) {
551     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
552     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
553   }
554   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
555       || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
556     setOperationAction(ISD::MULHS, MVT::i32, Expand);
557 
558   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
559   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
560   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
561   setOperationAction(ISD::SRL,       MVT::i64, Custom);
562   setOperationAction(ISD::SRA,       MVT::i64, Custom);
563 
564   if (!Subtarget->isThumb1Only()) {
565     // FIXME: We should do this for Thumb1 as well.
566     setOperationAction(ISD::ADDC,    MVT::i32, Custom);
567     setOperationAction(ISD::ADDE,    MVT::i32, Custom);
568     setOperationAction(ISD::SUBC,    MVT::i32, Custom);
569     setOperationAction(ISD::SUBE,    MVT::i32, Custom);
570   }
571 
572   // ARM does not have ROTL.
573   setOperationAction(ISD::ROTL,  MVT::i32, Expand);
574   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
575   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
576   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
577     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
578 
579   // Only ARMv6 has BSWAP.
580   if (!Subtarget->hasV6Ops())
581     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
582 
583   // These are expanded into libcalls.
584   if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) {
585     // v7M has a hardware divider
586     setOperationAction(ISD::SDIV,  MVT::i32, Expand);
587     setOperationAction(ISD::UDIV,  MVT::i32, Expand);
588   }
589   setOperationAction(ISD::SREM,  MVT::i32, Expand);
590   setOperationAction(ISD::UREM,  MVT::i32, Expand);
591   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
592   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
593 
594   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
595   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
596   setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
597   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
598   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
599 
600   setOperationAction(ISD::TRAP, MVT::Other, Legal);
601 
602   // Use the default implementation.
603   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
604   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
605   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
606   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
607   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
608   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
609   setOperationAction(ISD::EHSELECTION,        MVT::i32,   Expand);
610   setOperationAction(ISD::EXCEPTIONADDR,      MVT::i32,   Expand);
611   setExceptionPointerRegister(ARM::R0);
612   setExceptionSelectorRegister(ARM::R1);
613 
614   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
615   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
616   // the default expansion.
617   // FIXME: This should be checking for v6k, not just v6.
618   if (Subtarget->hasDataBarrier() ||
619       (Subtarget->hasV6Ops() && !Subtarget->isThumb())) {
620     // membarrier needs custom lowering; the rest are legal and handled
621     // normally.
622     setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
623     setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
624     // Custom lowering for 64-bit ops
625     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i64, Custom);
626     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i64, Custom);
627     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i64, Custom);
628     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i64, Custom);
629     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i64, Custom);
630     setOperationAction(ISD::ATOMIC_SWAP,  MVT::i64, Custom);
631     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
632     // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
633     setInsertFencesForAtomic(true);
634   } else {
635     // Set them all for expansion, which will force libcalls.
636     setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
637     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
638     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
639     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
640     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
641     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
642     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
643     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
644     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
645     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
646     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
647     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
648     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
649     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
650     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
651     // Unordered/Monotonic case.
652     setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
653     setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
654     // Since the libcalls include locking, fold in the fences
655     setShouldFoldAtomicFences(true);
656   }
657 
658   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
659 
660   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
661   if (!Subtarget->hasV6Ops()) {
662     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
663     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
664   }
665   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
666 
667   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
668     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
669     // iff target supports vfp2.
670     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
671     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
672   }
673 
674   // We want to custom lower some of our intrinsics.
675   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
676   if (Subtarget->isTargetDarwin()) {
677     setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
678     setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
679     setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom);
680     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
681   }
682 
683   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
684   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
685   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
686   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
687   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
688   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
689   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
690   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
691   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
692 
693   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
694   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
695   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
696   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
697   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
698 
699   // We don't support sin/cos/fmod/copysign/pow
700   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
701   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
702   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
703   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
704   setOperationAction(ISD::FREM,      MVT::f64, Expand);
705   setOperationAction(ISD::FREM,      MVT::f32, Expand);
706   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
707     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
708     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
709   }
710   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
711   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
712 
713   setOperationAction(ISD::FMA, MVT::f64, Expand);
714   setOperationAction(ISD::FMA, MVT::f32, Expand);
715 
716   // Various VFP goodness
717   if (!UseSoftFloat && !Subtarget->isThumb1Only()) {
718     // int <-> fp are custom expanded into bit_convert + ARMISD ops.
719     if (Subtarget->hasVFP2()) {
720       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
721       setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
722       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
723       setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
724     }
725     // Special handling for half-precision FP.
726     if (!Subtarget->hasFP16()) {
727       setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand);
728       setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand);
729     }
730   }
731 
732   // We have target-specific dag combine patterns for the following nodes:
733   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
734   setTargetDAGCombine(ISD::ADD);
735   setTargetDAGCombine(ISD::SUB);
736   setTargetDAGCombine(ISD::MUL);
737 
738   if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON())
739     setTargetDAGCombine(ISD::OR);
740   if (Subtarget->hasNEON())
741     setTargetDAGCombine(ISD::AND);
742 
743   setStackPointerRegisterToSaveRestore(ARM::SP);
744 
745   if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2())
746     setSchedulingPreference(Sched::RegPressure);
747   else
748     setSchedulingPreference(Sched::Hybrid);
749 
750   //// temporary - rewrite interface to use type
751   maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1;
752 
753   // On ARM arguments smaller than 4 bytes are extended, so all arguments
754   // are at least 4 bytes aligned.
755   setMinStackArgumentAlignment(4);
756 
757   benefitFromCodePlacementOpt = true;
758 
759   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
760 }
761 
762 // FIXME: It might make sense to define the representative register class as the
763 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
764 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
765 // SPR's representative would be DPR_VFP2. This should work well if register
766 // pressure tracking were modified such that a register use would increment the
767 // pressure of the register class's representative and all of it's super
768 // classes' representatives transitively. We have not implemented this because
769 // of the difficulty prior to coalescing of modeling operand register classes
770 // due to the common occurrence of cross class copies and subregister insertions
771 // and extractions.
772 std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(EVT VT) const773 ARMTargetLowering::findRepresentativeClass(EVT VT) const{
774   const TargetRegisterClass *RRC = 0;
775   uint8_t Cost = 1;
776   switch (VT.getSimpleVT().SimpleTy) {
777   default:
778     return TargetLowering::findRepresentativeClass(VT);
779   // Use DPR as representative register class for all floating point
780   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
781   // the cost is 1 for both f32 and f64.
782   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
783   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
784     RRC = ARM::DPRRegisterClass;
785     // When NEON is used for SP, only half of the register file is available
786     // because operations that define both SP and DP results will be constrained
787     // to the VFP2 class (D0-D15). We currently model this constraint prior to
788     // coalescing by double-counting the SP regs. See the FIXME above.
789     if (Subtarget->useNEONForSinglePrecisionFP())
790       Cost = 2;
791     break;
792   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
793   case MVT::v4f32: case MVT::v2f64:
794     RRC = ARM::DPRRegisterClass;
795     Cost = 2;
796     break;
797   case MVT::v4i64:
798     RRC = ARM::DPRRegisterClass;
799     Cost = 4;
800     break;
801   case MVT::v8i64:
802     RRC = ARM::DPRRegisterClass;
803     Cost = 8;
804     break;
805   }
806   return std::make_pair(RRC, Cost);
807 }
808 
getTargetNodeName(unsigned Opcode) const809 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
810   switch (Opcode) {
811   default: return 0;
812   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
813   case ARMISD::WrapperDYN:    return "ARMISD::WrapperDYN";
814   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
815   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
816   case ARMISD::CALL:          return "ARMISD::CALL";
817   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
818   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
819   case ARMISD::tCALL:         return "ARMISD::tCALL";
820   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
821   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
822   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
823   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
824   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
825   case ARMISD::CMP:           return "ARMISD::CMP";
826   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
827   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
828   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
829   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
830   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
831   case ARMISD::CMOV:          return "ARMISD::CMOV";
832 
833   case ARMISD::RBIT:          return "ARMISD::RBIT";
834 
835   case ARMISD::FTOSI:         return "ARMISD::FTOSI";
836   case ARMISD::FTOUI:         return "ARMISD::FTOUI";
837   case ARMISD::SITOF:         return "ARMISD::SITOF";
838   case ARMISD::UITOF:         return "ARMISD::UITOF";
839 
840   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
841   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
842   case ARMISD::RRX:           return "ARMISD::RRX";
843 
844   case ARMISD::ADDC:          return "ARMISD::ADDC";
845   case ARMISD::ADDE:          return "ARMISD::ADDE";
846   case ARMISD::SUBC:          return "ARMISD::SUBC";
847   case ARMISD::SUBE:          return "ARMISD::SUBE";
848 
849   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
850   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
851 
852   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
853   case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
854   case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP";
855 
856   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
857 
858   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
859 
860   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
861 
862   case ARMISD::MEMBARRIER:    return "ARMISD::MEMBARRIER";
863   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
864 
865   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
866 
867   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
868   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
869   case ARMISD::VCGE:          return "ARMISD::VCGE";
870   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
871   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
872   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
873   case ARMISD::VCGT:          return "ARMISD::VCGT";
874   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
875   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
876   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
877   case ARMISD::VTST:          return "ARMISD::VTST";
878 
879   case ARMISD::VSHL:          return "ARMISD::VSHL";
880   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
881   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
882   case ARMISD::VSHLLs:        return "ARMISD::VSHLLs";
883   case ARMISD::VSHLLu:        return "ARMISD::VSHLLu";
884   case ARMISD::VSHLLi:        return "ARMISD::VSHLLi";
885   case ARMISD::VSHRN:         return "ARMISD::VSHRN";
886   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
887   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
888   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
889   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
890   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
891   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
892   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
893   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
894   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
895   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
896   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
897   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
898   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
899   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
900   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
901   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
902   case ARMISD::VDUP:          return "ARMISD::VDUP";
903   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
904   case ARMISD::VEXT:          return "ARMISD::VEXT";
905   case ARMISD::VREV64:        return "ARMISD::VREV64";
906   case ARMISD::VREV32:        return "ARMISD::VREV32";
907   case ARMISD::VREV16:        return "ARMISD::VREV16";
908   case ARMISD::VZIP:          return "ARMISD::VZIP";
909   case ARMISD::VUZP:          return "ARMISD::VUZP";
910   case ARMISD::VTRN:          return "ARMISD::VTRN";
911   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
912   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
913   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
914   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
915   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
916   case ARMISD::FMAX:          return "ARMISD::FMAX";
917   case ARMISD::FMIN:          return "ARMISD::FMIN";
918   case ARMISD::BFI:           return "ARMISD::BFI";
919   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
920   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
921   case ARMISD::VBSL:          return "ARMISD::VBSL";
922   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
923   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
924   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
925   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
926   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
927   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
928   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
929   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
930   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
931   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
932   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
933   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
934   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
935   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
936   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
937   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
938   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
939   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
940   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
941   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
942   }
943 }
944 
getSetCCResultType(EVT VT) const945 EVT ARMTargetLowering::getSetCCResultType(EVT VT) const {
946   if (!VT.isVector()) return getPointerTy();
947   return VT.changeVectorElementTypeToInteger();
948 }
949 
950 /// getRegClassFor - Return the register class that should be used for the
951 /// specified value type.
getRegClassFor(EVT VT) const952 TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
953   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
954   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
955   // load / store 4 to 8 consecutive D registers.
956   if (Subtarget->hasNEON()) {
957     if (VT == MVT::v4i64)
958       return ARM::QQPRRegisterClass;
959     else if (VT == MVT::v8i64)
960       return ARM::QQQQPRRegisterClass;
961   }
962   return TargetLowering::getRegClassFor(VT);
963 }
964 
965 // Create a fast isel object.
966 FastISel *
createFastISel(FunctionLoweringInfo & funcInfo) const967 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
968   return ARM::createFastISel(funcInfo);
969 }
970 
971 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
972 /// be used for loads / stores from the global.
getMaximalGlobalOffset() const973 unsigned ARMTargetLowering::getMaximalGlobalOffset() const {
974   return (Subtarget->isThumb1Only() ? 127 : 4095);
975 }
976 
getSchedulingPreference(SDNode * N) const977 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
978   unsigned NumVals = N->getNumValues();
979   if (!NumVals)
980     return Sched::RegPressure;
981 
982   for (unsigned i = 0; i != NumVals; ++i) {
983     EVT VT = N->getValueType(i);
984     if (VT == MVT::Glue || VT == MVT::Other)
985       continue;
986     if (VT.isFloatingPoint() || VT.isVector())
987       return Sched::Latency;
988   }
989 
990   if (!N->isMachineOpcode())
991     return Sched::RegPressure;
992 
993   // Load are scheduled for latency even if there instruction itinerary
994   // is not available.
995   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
996   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
997 
998   if (MCID.getNumDefs() == 0)
999     return Sched::RegPressure;
1000   if (!Itins->isEmpty() &&
1001       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1002     return Sched::Latency;
1003 
1004   return Sched::RegPressure;
1005 }
1006 
1007 //===----------------------------------------------------------------------===//
1008 // Lowering Code
1009 //===----------------------------------------------------------------------===//
1010 
1011 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
IntCCToARMCC(ISD::CondCode CC)1012 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1013   switch (CC) {
1014   default: llvm_unreachable("Unknown condition code!");
1015   case ISD::SETNE:  return ARMCC::NE;
1016   case ISD::SETEQ:  return ARMCC::EQ;
1017   case ISD::SETGT:  return ARMCC::GT;
1018   case ISD::SETGE:  return ARMCC::GE;
1019   case ISD::SETLT:  return ARMCC::LT;
1020   case ISD::SETLE:  return ARMCC::LE;
1021   case ISD::SETUGT: return ARMCC::HI;
1022   case ISD::SETUGE: return ARMCC::HS;
1023   case ISD::SETULT: return ARMCC::LO;
1024   case ISD::SETULE: return ARMCC::LS;
1025   }
1026 }
1027 
1028 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
FPCCToARMCC(ISD::CondCode CC,ARMCC::CondCodes & CondCode,ARMCC::CondCodes & CondCode2)1029 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1030                         ARMCC::CondCodes &CondCode2) {
1031   CondCode2 = ARMCC::AL;
1032   switch (CC) {
1033   default: llvm_unreachable("Unknown FP condition!");
1034   case ISD::SETEQ:
1035   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1036   case ISD::SETGT:
1037   case ISD::SETOGT: CondCode = ARMCC::GT; break;
1038   case ISD::SETGE:
1039   case ISD::SETOGE: CondCode = ARMCC::GE; break;
1040   case ISD::SETOLT: CondCode = ARMCC::MI; break;
1041   case ISD::SETOLE: CondCode = ARMCC::LS; break;
1042   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1043   case ISD::SETO:   CondCode = ARMCC::VC; break;
1044   case ISD::SETUO:  CondCode = ARMCC::VS; break;
1045   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1046   case ISD::SETUGT: CondCode = ARMCC::HI; break;
1047   case ISD::SETUGE: CondCode = ARMCC::PL; break;
1048   case ISD::SETLT:
1049   case ISD::SETULT: CondCode = ARMCC::LT; break;
1050   case ISD::SETLE:
1051   case ISD::SETULE: CondCode = ARMCC::LE; break;
1052   case ISD::SETNE:
1053   case ISD::SETUNE: CondCode = ARMCC::NE; break;
1054   }
1055 }
1056 
1057 //===----------------------------------------------------------------------===//
1058 //                      Calling Convention Implementation
1059 //===----------------------------------------------------------------------===//
1060 
1061 #include "ARMGenCallingConv.inc"
1062 
1063 /// CCAssignFnForNode - Selects the correct CCAssignFn for a the
1064 /// given CallingConvention value.
CCAssignFnForNode(CallingConv::ID CC,bool Return,bool isVarArg) const1065 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1066                                                  bool Return,
1067                                                  bool isVarArg) const {
1068   switch (CC) {
1069   default:
1070     llvm_unreachable("Unsupported calling convention");
1071   case CallingConv::Fast:
1072     if (Subtarget->hasVFP2() && !isVarArg) {
1073       if (!Subtarget->isAAPCS_ABI())
1074         return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1075       // For AAPCS ABI targets, just use VFP variant of the calling convention.
1076       return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1077     }
1078     // Fallthrough
1079   case CallingConv::C: {
1080     // Use target triple & subtarget features to do actual dispatch.
1081     if (!Subtarget->isAAPCS_ABI())
1082       return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1083     else if (Subtarget->hasVFP2() &&
1084              FloatABIType == FloatABI::Hard && !isVarArg)
1085       return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1086     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1087   }
1088   case CallingConv::ARM_AAPCS_VFP:
1089     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1090   case CallingConv::ARM_AAPCS:
1091     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1092   case CallingConv::ARM_APCS:
1093     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1094   case CallingConv::GHC:
1095     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1096   }
1097 }
1098 
1099 /// LowerCallResult - Lower the result values of a call into the
1100 /// appropriate copies out of appropriate physical registers.
1101 SDValue
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,DebugLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const1102 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1103                                    CallingConv::ID CallConv, bool isVarArg,
1104                                    const SmallVectorImpl<ISD::InputArg> &Ins,
1105                                    DebugLoc dl, SelectionDAG &DAG,
1106                                    SmallVectorImpl<SDValue> &InVals) const {
1107 
1108   // Assign locations to each value returned by this call.
1109   SmallVector<CCValAssign, 16> RVLocs;
1110   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1111                     getTargetMachine(), RVLocs, *DAG.getContext(), Call);
1112   CCInfo.AnalyzeCallResult(Ins,
1113                            CCAssignFnForNode(CallConv, /* Return*/ true,
1114                                              isVarArg));
1115 
1116   // Copy all of the result registers out of their specified physreg.
1117   for (unsigned i = 0; i != RVLocs.size(); ++i) {
1118     CCValAssign VA = RVLocs[i];
1119 
1120     SDValue Val;
1121     if (VA.needsCustom()) {
1122       // Handle f64 or half of a v2f64.
1123       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1124                                       InFlag);
1125       Chain = Lo.getValue(1);
1126       InFlag = Lo.getValue(2);
1127       VA = RVLocs[++i]; // skip ahead to next loc
1128       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1129                                       InFlag);
1130       Chain = Hi.getValue(1);
1131       InFlag = Hi.getValue(2);
1132       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1133 
1134       if (VA.getLocVT() == MVT::v2f64) {
1135         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1136         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1137                           DAG.getConstant(0, MVT::i32));
1138 
1139         VA = RVLocs[++i]; // skip ahead to next loc
1140         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1141         Chain = Lo.getValue(1);
1142         InFlag = Lo.getValue(2);
1143         VA = RVLocs[++i]; // skip ahead to next loc
1144         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1145         Chain = Hi.getValue(1);
1146         InFlag = Hi.getValue(2);
1147         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1148         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1149                           DAG.getConstant(1, MVT::i32));
1150       }
1151     } else {
1152       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1153                                InFlag);
1154       Chain = Val.getValue(1);
1155       InFlag = Val.getValue(2);
1156     }
1157 
1158     switch (VA.getLocInfo()) {
1159     default: llvm_unreachable("Unknown loc info!");
1160     case CCValAssign::Full: break;
1161     case CCValAssign::BCvt:
1162       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1163       break;
1164     }
1165 
1166     InVals.push_back(Val);
1167   }
1168 
1169   return Chain;
1170 }
1171 
1172 /// LowerMemOpCallTo - Store the argument to the stack.
1173 SDValue
LowerMemOpCallTo(SDValue Chain,SDValue StackPtr,SDValue Arg,DebugLoc dl,SelectionDAG & DAG,const CCValAssign & VA,ISD::ArgFlagsTy Flags) const1174 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
1175                                     SDValue StackPtr, SDValue Arg,
1176                                     DebugLoc dl, SelectionDAG &DAG,
1177                                     const CCValAssign &VA,
1178                                     ISD::ArgFlagsTy Flags) const {
1179   unsigned LocMemOffset = VA.getLocMemOffset();
1180   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
1181   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1182   return DAG.getStore(Chain, dl, Arg, PtrOff,
1183                       MachinePointerInfo::getStack(LocMemOffset),
1184                       false, false, 0);
1185 }
1186 
PassF64ArgInRegs(DebugLoc dl,SelectionDAG & DAG,SDValue Chain,SDValue & Arg,RegsToPassVector & RegsToPass,CCValAssign & VA,CCValAssign & NextVA,SDValue & StackPtr,SmallVector<SDValue,8> & MemOpChains,ISD::ArgFlagsTy Flags) const1187 void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
1188                                          SDValue Chain, SDValue &Arg,
1189                                          RegsToPassVector &RegsToPass,
1190                                          CCValAssign &VA, CCValAssign &NextVA,
1191                                          SDValue &StackPtr,
1192                                          SmallVector<SDValue, 8> &MemOpChains,
1193                                          ISD::ArgFlagsTy Flags) const {
1194 
1195   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1196                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
1197   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
1198 
1199   if (NextVA.isRegLoc())
1200     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
1201   else {
1202     assert(NextVA.isMemLoc());
1203     if (StackPtr.getNode() == 0)
1204       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
1205 
1206     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
1207                                            dl, DAG, NextVA,
1208                                            Flags));
1209   }
1210 }
1211 
1212 /// LowerCall - Lowering a call into a callseq_start <-
1213 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1214 /// nodes.
1215 SDValue
LowerCall(SDValue Chain,SDValue Callee,CallingConv::ID CallConv,bool isVarArg,bool & isTailCall,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,DebugLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const1216 ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1217                              CallingConv::ID CallConv, bool isVarArg,
1218                              bool &isTailCall,
1219                              const SmallVectorImpl<ISD::OutputArg> &Outs,
1220                              const SmallVectorImpl<SDValue> &OutVals,
1221                              const SmallVectorImpl<ISD::InputArg> &Ins,
1222                              DebugLoc dl, SelectionDAG &DAG,
1223                              SmallVectorImpl<SDValue> &InVals) const {
1224   MachineFunction &MF = DAG.getMachineFunction();
1225   bool IsStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1226   bool IsSibCall = false;
1227   // Disable tail calls if they're not supported.
1228   if (!EnableARMTailCalls && !Subtarget->supportsTailCall())
1229     isTailCall = false;
1230   if (isTailCall) {
1231     // Check if it's really possible to do a tail call.
1232     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1233                     isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1234                                                    Outs, OutVals, Ins, DAG);
1235     // We don't support GuaranteedTailCallOpt for ARM, only automatically
1236     // detected sibcalls.
1237     if (isTailCall) {
1238       ++NumTailCalls;
1239       IsSibCall = true;
1240     }
1241   }
1242 
1243   // Analyze operands of the call, assigning locations to each operand.
1244   SmallVector<CCValAssign, 16> ArgLocs;
1245   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1246                  getTargetMachine(), ArgLocs, *DAG.getContext(), Call);
1247   CCInfo.AnalyzeCallOperands(Outs,
1248                              CCAssignFnForNode(CallConv, /* Return*/ false,
1249                                                isVarArg));
1250 
1251   // Get a count of how many bytes are to be pushed on the stack.
1252   unsigned NumBytes = CCInfo.getNextStackOffset();
1253 
1254   // For tail calls, memory operands are available in our caller's stack.
1255   if (IsSibCall)
1256     NumBytes = 0;
1257 
1258   // Adjust the stack pointer for the new arguments...
1259   // These operations are automatically eliminated by the prolog/epilog pass
1260   if (!IsSibCall)
1261     Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
1262 
1263   SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
1264 
1265   RegsToPassVector RegsToPass;
1266   SmallVector<SDValue, 8> MemOpChains;
1267 
1268   // Walk the register/memloc assignments, inserting copies/loads.  In the case
1269   // of tail call optimization, arguments are handled later.
1270   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1271        i != e;
1272        ++i, ++realArgIdx) {
1273     CCValAssign &VA = ArgLocs[i];
1274     SDValue Arg = OutVals[realArgIdx];
1275     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1276     bool isByVal = Flags.isByVal();
1277 
1278     // Promote the value if needed.
1279     switch (VA.getLocInfo()) {
1280     default: llvm_unreachable("Unknown loc info!");
1281     case CCValAssign::Full: break;
1282     case CCValAssign::SExt:
1283       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1284       break;
1285     case CCValAssign::ZExt:
1286       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1287       break;
1288     case CCValAssign::AExt:
1289       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1290       break;
1291     case CCValAssign::BCvt:
1292       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1293       break;
1294     }
1295 
1296     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1297     if (VA.needsCustom()) {
1298       if (VA.getLocVT() == MVT::v2f64) {
1299         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1300                                   DAG.getConstant(0, MVT::i32));
1301         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1302                                   DAG.getConstant(1, MVT::i32));
1303 
1304         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1305                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1306 
1307         VA = ArgLocs[++i]; // skip ahead to next loc
1308         if (VA.isRegLoc()) {
1309           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1310                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1311         } else {
1312           assert(VA.isMemLoc());
1313 
1314           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1315                                                  dl, DAG, VA, Flags));
1316         }
1317       } else {
1318         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1319                          StackPtr, MemOpChains, Flags);
1320       }
1321     } else if (VA.isRegLoc()) {
1322       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1323     } else if (isByVal) {
1324       assert(VA.isMemLoc());
1325       unsigned offset = 0;
1326 
1327       // True if this byval aggregate will be split between registers
1328       // and memory.
1329       if (CCInfo.isFirstByValRegValid()) {
1330         EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1331         unsigned int i, j;
1332         for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) {
1333           SDValue Const = DAG.getConstant(4*i, MVT::i32);
1334           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
1335           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
1336                                      MachinePointerInfo(),
1337                                      false, false, 0);
1338           MemOpChains.push_back(Load.getValue(1));
1339           RegsToPass.push_back(std::make_pair(j, Load));
1340         }
1341         offset = ARM::R4 - CCInfo.getFirstByValReg();
1342         CCInfo.clearFirstByValReg();
1343       }
1344 
1345       unsigned LocMemOffset = VA.getLocMemOffset();
1346       SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
1347       SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
1348                                 StkPtrOff);
1349       SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
1350       SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
1351       SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
1352                                          MVT::i32);
1353       // TODO: Disable AlwaysInline when it becomes possible
1354       //       to emit a nested call sequence.
1355       MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
1356                                           Flags.getByValAlign(),
1357                                           /*isVolatile=*/false,
1358                                           /*AlwaysInline=*/true,
1359                                           MachinePointerInfo(0),
1360                                           MachinePointerInfo(0)));
1361 
1362     } else if (!IsSibCall) {
1363       assert(VA.isMemLoc());
1364 
1365       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1366                                              dl, DAG, VA, Flags));
1367     }
1368   }
1369 
1370   if (!MemOpChains.empty())
1371     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1372                         &MemOpChains[0], MemOpChains.size());
1373 
1374   // Build a sequence of copy-to-reg nodes chained together with token chain
1375   // and flag operands which copy the outgoing args into the appropriate regs.
1376   SDValue InFlag;
1377   // Tail call byval lowering might overwrite argument registers so in case of
1378   // tail call optimization the copies to registers are lowered later.
1379   if (!isTailCall)
1380     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1381       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1382                                RegsToPass[i].second, InFlag);
1383       InFlag = Chain.getValue(1);
1384     }
1385 
1386   // For tail calls lower the arguments to the 'real' stack slot.
1387   if (isTailCall) {
1388     // Force all the incoming stack arguments to be loaded from the stack
1389     // before any new outgoing arguments are stored to the stack, because the
1390     // outgoing stack slots may alias the incoming argument stack slots, and
1391     // the alias isn't otherwise explicit. This is slightly more conservative
1392     // than necessary, because it means that each store effectively depends
1393     // on every argument instead of just those arguments it would clobber.
1394 
1395     // Do not flag preceding copytoreg stuff together with the following stuff.
1396     InFlag = SDValue();
1397     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1398       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1399                                RegsToPass[i].second, InFlag);
1400       InFlag = Chain.getValue(1);
1401     }
1402     InFlag =SDValue();
1403   }
1404 
1405   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1406   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1407   // node so that legalize doesn't hack it.
1408   bool isDirect = false;
1409   bool isARMFunc = false;
1410   bool isLocalARMFunc = false;
1411   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1412 
1413   if (EnableARMLongCalls) {
1414     assert (getTargetMachine().getRelocationModel() == Reloc::Static
1415             && "long-calls with non-static relocation model!");
1416     // Handle a global address or an external symbol. If it's not one of
1417     // those, the target's already in a register, so we don't need to do
1418     // anything extra.
1419     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1420       const GlobalValue *GV = G->getGlobal();
1421       // Create a constant pool entry for the callee address
1422       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1423       ARMConstantPoolValue *CPV =
1424         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
1425 
1426       // Get the address of the callee into a register
1427       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1428       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1429       Callee = DAG.getLoad(getPointerTy(), dl,
1430                            DAG.getEntryNode(), CPAddr,
1431                            MachinePointerInfo::getConstantPool(),
1432                            false, false, 0);
1433     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
1434       const char *Sym = S->getSymbol();
1435 
1436       // Create a constant pool entry for the callee address
1437       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1438       ARMConstantPoolValue *CPV =
1439         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
1440                                       ARMPCLabelIndex, 0);
1441       // Get the address of the callee into a register
1442       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1443       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1444       Callee = DAG.getLoad(getPointerTy(), dl,
1445                            DAG.getEntryNode(), CPAddr,
1446                            MachinePointerInfo::getConstantPool(),
1447                            false, false, 0);
1448     }
1449   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1450     const GlobalValue *GV = G->getGlobal();
1451     isDirect = true;
1452     bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
1453     bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
1454                    getTargetMachine().getRelocationModel() != Reloc::Static;
1455     isARMFunc = !Subtarget->isThumb() || isStub;
1456     // ARM call to a local ARM function is predicable.
1457     isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
1458     // tBX takes a register source operand.
1459     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1460       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1461       ARMConstantPoolValue *CPV =
1462         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4);
1463       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1464       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1465       Callee = DAG.getLoad(getPointerTy(), dl,
1466                            DAG.getEntryNode(), CPAddr,
1467                            MachinePointerInfo::getConstantPool(),
1468                            false, false, 0);
1469       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1470       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1471                            getPointerTy(), Callee, PICLabel);
1472     } else {
1473       // On ELF targets for PIC code, direct calls should go through the PLT
1474       unsigned OpFlags = 0;
1475       if (Subtarget->isTargetELF() &&
1476                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
1477         OpFlags = ARMII::MO_PLT;
1478       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
1479     }
1480   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1481     isDirect = true;
1482     bool isStub = Subtarget->isTargetDarwin() &&
1483                   getTargetMachine().getRelocationModel() != Reloc::Static;
1484     isARMFunc = !Subtarget->isThumb() || isStub;
1485     // tBX takes a register source operand.
1486     const char *Sym = S->getSymbol();
1487     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1488       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1489       ARMConstantPoolValue *CPV =
1490         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
1491                                       ARMPCLabelIndex, 4);
1492       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1493       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1494       Callee = DAG.getLoad(getPointerTy(), dl,
1495                            DAG.getEntryNode(), CPAddr,
1496                            MachinePointerInfo::getConstantPool(),
1497                            false, false, 0);
1498       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1499       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1500                            getPointerTy(), Callee, PICLabel);
1501     } else {
1502       unsigned OpFlags = 0;
1503       // On ELF targets for PIC code, direct calls should go through the PLT
1504       if (Subtarget->isTargetELF() &&
1505                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
1506         OpFlags = ARMII::MO_PLT;
1507       Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
1508     }
1509   }
1510 
1511   // FIXME: handle tail calls differently.
1512   unsigned CallOpc;
1513   if (Subtarget->isThumb()) {
1514     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
1515       CallOpc = ARMISD::CALL_NOLINK;
1516     else
1517       CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
1518   } else {
1519     CallOpc = (isDirect || Subtarget->hasV5TOps())
1520       ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
1521       : ARMISD::CALL_NOLINK;
1522   }
1523 
1524   std::vector<SDValue> Ops;
1525   Ops.push_back(Chain);
1526   Ops.push_back(Callee);
1527 
1528   // Add argument registers to the end of the list so that they are known live
1529   // into the call.
1530   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1531     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1532                                   RegsToPass[i].second.getValueType()));
1533 
1534   if (InFlag.getNode())
1535     Ops.push_back(InFlag);
1536 
1537   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1538   if (isTailCall)
1539     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1540 
1541   // Returns a chain and a flag for retval copy to use.
1542   Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
1543   InFlag = Chain.getValue(1);
1544 
1545   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1546                              DAG.getIntPtrConstant(0, true), InFlag);
1547   if (!Ins.empty())
1548     InFlag = Chain.getValue(1);
1549 
1550   // Handle result values, copying them out of physregs into vregs that we
1551   // return.
1552   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
1553                          dl, DAG, InVals);
1554 }
1555 
1556 /// HandleByVal - Every parameter *after* a byval parameter is passed
1557 /// on the stack.  Remember the next parameter register to allocate,
1558 /// and then confiscate the rest of the parameter registers to insure
1559 /// this.
1560 void
HandleByVal(CCState * State,unsigned & size) const1561 llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const {
1562   unsigned reg = State->AllocateReg(GPRArgRegs, 4);
1563   assert((State->getCallOrPrologue() == Prologue ||
1564           State->getCallOrPrologue() == Call) &&
1565          "unhandled ParmContext");
1566   if ((!State->isFirstByValRegValid()) &&
1567       (ARM::R0 <= reg) && (reg <= ARM::R3)) {
1568     State->setFirstByValReg(reg);
1569     // At a call site, a byval parameter that is split between
1570     // registers and memory needs its size truncated here.  In a
1571     // function prologue, such byval parameters are reassembled in
1572     // memory, and are not truncated.
1573     if (State->getCallOrPrologue() == Call) {
1574       unsigned excess = 4 * (ARM::R4 - reg);
1575       assert(size >= excess && "expected larger existing stack allocation");
1576       size -= excess;
1577     }
1578   }
1579   // Confiscate any remaining parameter registers to preclude their
1580   // assignment to subsequent parameters.
1581   while (State->AllocateReg(GPRArgRegs, 4))
1582     ;
1583 }
1584 
1585 /// MatchingStackOffset - Return true if the given stack call argument is
1586 /// already available in the same position (relatively) of the caller's
1587 /// incoming argument stack.
1588 static
MatchingStackOffset(SDValue Arg,unsigned Offset,ISD::ArgFlagsTy Flags,MachineFrameInfo * MFI,const MachineRegisterInfo * MRI,const ARMInstrInfo * TII)1589 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
1590                          MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
1591                          const ARMInstrInfo *TII) {
1592   unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
1593   int FI = INT_MAX;
1594   if (Arg.getOpcode() == ISD::CopyFromReg) {
1595     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
1596     if (!TargetRegisterInfo::isVirtualRegister(VR))
1597       return false;
1598     MachineInstr *Def = MRI->getVRegDef(VR);
1599     if (!Def)
1600       return false;
1601     if (!Flags.isByVal()) {
1602       if (!TII->isLoadFromStackSlot(Def, FI))
1603         return false;
1604     } else {
1605       return false;
1606     }
1607   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
1608     if (Flags.isByVal())
1609       // ByVal argument is passed in as a pointer but it's now being
1610       // dereferenced. e.g.
1611       // define @foo(%struct.X* %A) {
1612       //   tail call @bar(%struct.X* byval %A)
1613       // }
1614       return false;
1615     SDValue Ptr = Ld->getBasePtr();
1616     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
1617     if (!FINode)
1618       return false;
1619     FI = FINode->getIndex();
1620   } else
1621     return false;
1622 
1623   assert(FI != INT_MAX);
1624   if (!MFI->isFixedObjectIndex(FI))
1625     return false;
1626   return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
1627 }
1628 
1629 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1630 /// for tail call optimization. Targets which want to do tail call
1631 /// optimization should implement this function.
1632 bool
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,bool isCalleeStructRet,bool isCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const1633 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1634                                                      CallingConv::ID CalleeCC,
1635                                                      bool isVarArg,
1636                                                      bool isCalleeStructRet,
1637                                                      bool isCallerStructRet,
1638                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
1639                                     const SmallVectorImpl<SDValue> &OutVals,
1640                                     const SmallVectorImpl<ISD::InputArg> &Ins,
1641                                                      SelectionDAG& DAG) const {
1642   const Function *CallerF = DAG.getMachineFunction().getFunction();
1643   CallingConv::ID CallerCC = CallerF->getCallingConv();
1644   bool CCMatch = CallerCC == CalleeCC;
1645 
1646   // Look for obvious safe cases to perform tail call optimization that do not
1647   // require ABI changes. This is what gcc calls sibcall.
1648 
1649   // Do not sibcall optimize vararg calls unless the call site is not passing
1650   // any arguments.
1651   if (isVarArg && !Outs.empty())
1652     return false;
1653 
1654   // Also avoid sibcall optimization if either caller or callee uses struct
1655   // return semantics.
1656   if (isCalleeStructRet || isCallerStructRet)
1657     return false;
1658 
1659   // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
1660   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
1661   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
1662   // support in the assembler and linker to be used. This would need to be
1663   // fixed to fully support tail calls in Thumb1.
1664   //
1665   // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
1666   // LR.  This means if we need to reload LR, it takes an extra instructions,
1667   // which outweighs the value of the tail call; but here we don't know yet
1668   // whether LR is going to be used.  Probably the right approach is to
1669   // generate the tail call here and turn it back into CALL/RET in
1670   // emitEpilogue if LR is used.
1671 
1672   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
1673   // but we need to make sure there are enough registers; the only valid
1674   // registers are the 4 used for parameters.  We don't currently do this
1675   // case.
1676   if (Subtarget->isThumb1Only())
1677     return false;
1678 
1679   // If the calling conventions do not match, then we'd better make sure the
1680   // results are returned in the same way as what the caller expects.
1681   if (!CCMatch) {
1682     SmallVector<CCValAssign, 16> RVLocs1;
1683     ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1684                        getTargetMachine(), RVLocs1, *DAG.getContext(), Call);
1685     CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
1686 
1687     SmallVector<CCValAssign, 16> RVLocs2;
1688     ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1689                        getTargetMachine(), RVLocs2, *DAG.getContext(), Call);
1690     CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
1691 
1692     if (RVLocs1.size() != RVLocs2.size())
1693       return false;
1694     for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1695       if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1696         return false;
1697       if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1698         return false;
1699       if (RVLocs1[i].isRegLoc()) {
1700         if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1701           return false;
1702       } else {
1703         if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1704           return false;
1705       }
1706     }
1707   }
1708 
1709   // If the callee takes no arguments then go on to check the results of the
1710   // call.
1711   if (!Outs.empty()) {
1712     // Check if stack adjustment is needed. For now, do not do this if any
1713     // argument is passed on the stack.
1714     SmallVector<CCValAssign, 16> ArgLocs;
1715     ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
1716                       getTargetMachine(), ArgLocs, *DAG.getContext(), Call);
1717     CCInfo.AnalyzeCallOperands(Outs,
1718                                CCAssignFnForNode(CalleeCC, false, isVarArg));
1719     if (CCInfo.getNextStackOffset()) {
1720       MachineFunction &MF = DAG.getMachineFunction();
1721 
1722       // Check if the arguments are already laid out in the right way as
1723       // the caller's fixed stack objects.
1724       MachineFrameInfo *MFI = MF.getFrameInfo();
1725       const MachineRegisterInfo *MRI = &MF.getRegInfo();
1726       const ARMInstrInfo *TII =
1727         ((ARMTargetMachine&)getTargetMachine()).getInstrInfo();
1728       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1729            i != e;
1730            ++i, ++realArgIdx) {
1731         CCValAssign &VA = ArgLocs[i];
1732         EVT RegVT = VA.getLocVT();
1733         SDValue Arg = OutVals[realArgIdx];
1734         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1735         if (VA.getLocInfo() == CCValAssign::Indirect)
1736           return false;
1737         if (VA.needsCustom()) {
1738           // f64 and vector types are split into multiple registers or
1739           // register/stack-slot combinations.  The types will not match
1740           // the registers; give up on memory f64 refs until we figure
1741           // out what to do about this.
1742           if (!VA.isRegLoc())
1743             return false;
1744           if (!ArgLocs[++i].isRegLoc())
1745             return false;
1746           if (RegVT == MVT::v2f64) {
1747             if (!ArgLocs[++i].isRegLoc())
1748               return false;
1749             if (!ArgLocs[++i].isRegLoc())
1750               return false;
1751           }
1752         } else if (!VA.isRegLoc()) {
1753           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
1754                                    MFI, MRI, TII))
1755             return false;
1756         }
1757       }
1758     }
1759   }
1760 
1761   return true;
1762 }
1763 
1764 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,DebugLoc dl,SelectionDAG & DAG) const1765 ARMTargetLowering::LowerReturn(SDValue Chain,
1766                                CallingConv::ID CallConv, bool isVarArg,
1767                                const SmallVectorImpl<ISD::OutputArg> &Outs,
1768                                const SmallVectorImpl<SDValue> &OutVals,
1769                                DebugLoc dl, SelectionDAG &DAG) const {
1770 
1771   // CCValAssign - represent the assignment of the return value to a location.
1772   SmallVector<CCValAssign, 16> RVLocs;
1773 
1774   // CCState - Info about the registers and stack slots.
1775   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1776                     getTargetMachine(), RVLocs, *DAG.getContext(), Call);
1777 
1778   // Analyze outgoing return values.
1779   CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
1780                                                isVarArg));
1781 
1782   // If this is the first return lowered for this function, add
1783   // the regs to the liveout set for the function.
1784   if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1785     for (unsigned i = 0; i != RVLocs.size(); ++i)
1786       if (RVLocs[i].isRegLoc())
1787         DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1788   }
1789 
1790   SDValue Flag;
1791 
1792   // Copy the result values into the output registers.
1793   for (unsigned i = 0, realRVLocIdx = 0;
1794        i != RVLocs.size();
1795        ++i, ++realRVLocIdx) {
1796     CCValAssign &VA = RVLocs[i];
1797     assert(VA.isRegLoc() && "Can only return in registers!");
1798 
1799     SDValue Arg = OutVals[realRVLocIdx];
1800 
1801     switch (VA.getLocInfo()) {
1802     default: llvm_unreachable("Unknown loc info!");
1803     case CCValAssign::Full: break;
1804     case CCValAssign::BCvt:
1805       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1806       break;
1807     }
1808 
1809     if (VA.needsCustom()) {
1810       if (VA.getLocVT() == MVT::v2f64) {
1811         // Extract the first half and return it in two registers.
1812         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1813                                    DAG.getConstant(0, MVT::i32));
1814         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
1815                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
1816 
1817         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
1818         Flag = Chain.getValue(1);
1819         VA = RVLocs[++i]; // skip ahead to next loc
1820         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1821                                  HalfGPRs.getValue(1), Flag);
1822         Flag = Chain.getValue(1);
1823         VA = RVLocs[++i]; // skip ahead to next loc
1824 
1825         // Extract the 2nd half and fall through to handle it as an f64 value.
1826         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1827                           DAG.getConstant(1, MVT::i32));
1828       }
1829       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
1830       // available.
1831       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1832                                   DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
1833       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
1834       Flag = Chain.getValue(1);
1835       VA = RVLocs[++i]; // skip ahead to next loc
1836       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
1837                                Flag);
1838     } else
1839       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1840 
1841     // Guarantee that all emitted copies are
1842     // stuck together, avoiding something bad.
1843     Flag = Chain.getValue(1);
1844   }
1845 
1846   SDValue result;
1847   if (Flag.getNode())
1848     result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1849   else // Return Void
1850     result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
1851 
1852   return result;
1853 }
1854 
isUsedByReturnOnly(SDNode * N) const1855 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
1856   if (N->getNumValues() != 1)
1857     return false;
1858   if (!N->hasNUsesOfValue(1, 0))
1859     return false;
1860 
1861   unsigned NumCopies = 0;
1862   SDNode* Copies[2];
1863   SDNode *Use = *N->use_begin();
1864   if (Use->getOpcode() == ISD::CopyToReg) {
1865     Copies[NumCopies++] = Use;
1866   } else if (Use->getOpcode() == ARMISD::VMOVRRD) {
1867     // f64 returned in a pair of GPRs.
1868     for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end();
1869          UI != UE; ++UI) {
1870       if (UI->getOpcode() != ISD::CopyToReg)
1871         return false;
1872       Copies[UI.getUse().getResNo()] = *UI;
1873       ++NumCopies;
1874     }
1875   } else if (Use->getOpcode() == ISD::BITCAST) {
1876     // f32 returned in a single GPR.
1877     if (!Use->hasNUsesOfValue(1, 0))
1878       return false;
1879     Use = *Use->use_begin();
1880     if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0))
1881       return false;
1882     Copies[NumCopies++] = Use;
1883   } else {
1884     return false;
1885   }
1886 
1887   if (NumCopies != 1 && NumCopies != 2)
1888     return false;
1889 
1890   bool HasRet = false;
1891   for (unsigned i = 0; i < NumCopies; ++i) {
1892     SDNode *Copy = Copies[i];
1893     for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
1894          UI != UE; ++UI) {
1895       if (UI->getOpcode() == ISD::CopyToReg) {
1896         SDNode *Use = *UI;
1897         if (Use == Copies[0] || Use == Copies[1])
1898           continue;
1899         return false;
1900       }
1901       if (UI->getOpcode() != ARMISD::RET_FLAG)
1902         return false;
1903       HasRet = true;
1904     }
1905   }
1906 
1907   return HasRet;
1908 }
1909 
mayBeEmittedAsTailCall(CallInst * CI) const1910 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
1911   if (!EnableARMTailCalls)
1912     return false;
1913 
1914   if (!CI->isTailCall())
1915     return false;
1916 
1917   return !Subtarget->isThumb1Only();
1918 }
1919 
1920 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
1921 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
1922 // one of the above mentioned nodes. It has to be wrapped because otherwise
1923 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
1924 // be used to form addressing mode. These wrapped nodes will be selected
1925 // into MOVi.
LowerConstantPool(SDValue Op,SelectionDAG & DAG)1926 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
1927   EVT PtrVT = Op.getValueType();
1928   // FIXME there is no actual debug info here
1929   DebugLoc dl = Op.getDebugLoc();
1930   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1931   SDValue Res;
1932   if (CP->isMachineConstantPoolEntry())
1933     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1934                                     CP->getAlignment());
1935   else
1936     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1937                                     CP->getAlignment());
1938   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
1939 }
1940 
getJumpTableEncoding() const1941 unsigned ARMTargetLowering::getJumpTableEncoding() const {
1942   return MachineJumpTableInfo::EK_Inline;
1943 }
1944 
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const1945 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
1946                                              SelectionDAG &DAG) const {
1947   MachineFunction &MF = DAG.getMachineFunction();
1948   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1949   unsigned ARMPCLabelIndex = 0;
1950   DebugLoc DL = Op.getDebugLoc();
1951   EVT PtrVT = getPointerTy();
1952   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1953   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1954   SDValue CPAddr;
1955   if (RelocM == Reloc::Static) {
1956     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
1957   } else {
1958     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1959     ARMPCLabelIndex = AFI->createPICLabelUId();
1960     ARMConstantPoolValue *CPV =
1961       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
1962                                       ARMCP::CPBlockAddress, PCAdj);
1963     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1964   }
1965   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
1966   SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
1967                                MachinePointerInfo::getConstantPool(),
1968                                false, false, 0);
1969   if (RelocM == Reloc::Static)
1970     return Result;
1971   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1972   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
1973 }
1974 
1975 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
1976 SDValue
LowerToTLSGeneralDynamicModel(GlobalAddressSDNode * GA,SelectionDAG & DAG) const1977 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1978                                                  SelectionDAG &DAG) const {
1979   DebugLoc dl = GA->getDebugLoc();
1980   EVT PtrVT = getPointerTy();
1981   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1982   MachineFunction &MF = DAG.getMachineFunction();
1983   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1984   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1985   ARMConstantPoolValue *CPV =
1986     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
1987                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
1988   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1989   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
1990   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
1991                          MachinePointerInfo::getConstantPool(),
1992                          false, false, 0);
1993   SDValue Chain = Argument.getValue(1);
1994 
1995   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1996   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
1997 
1998   // call __tls_get_addr.
1999   ArgListTy Args;
2000   ArgListEntry Entry;
2001   Entry.Node = Argument;
2002   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
2003   Args.push_back(Entry);
2004   // FIXME: is there useful debug info available here?
2005   std::pair<SDValue, SDValue> CallResult =
2006     LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()),
2007                 false, false, false, false,
2008                 0, CallingConv::C, false, /*isReturnValueUsed=*/true,
2009                 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
2010   return CallResult.first;
2011 }
2012 
2013 // Lower ISD::GlobalTLSAddress using the "initial exec" or
2014 // "local exec" model.
2015 SDValue
LowerToTLSExecModels(GlobalAddressSDNode * GA,SelectionDAG & DAG) const2016 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
2017                                         SelectionDAG &DAG) const {
2018   const GlobalValue *GV = GA->getGlobal();
2019   DebugLoc dl = GA->getDebugLoc();
2020   SDValue Offset;
2021   SDValue Chain = DAG.getEntryNode();
2022   EVT PtrVT = getPointerTy();
2023   // Get the Thread Pointer
2024   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2025 
2026   if (GV->isDeclaration()) {
2027     MachineFunction &MF = DAG.getMachineFunction();
2028     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2029     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2030     // Initial exec model.
2031     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2032     ARMConstantPoolValue *CPV =
2033       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2034                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
2035                                       true);
2036     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2037     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2038     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
2039                          MachinePointerInfo::getConstantPool(),
2040                          false, false, 0);
2041     Chain = Offset.getValue(1);
2042 
2043     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2044     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
2045 
2046     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
2047                          MachinePointerInfo::getConstantPool(),
2048                          false, false, 0);
2049   } else {
2050     // local exec model
2051     ARMConstantPoolValue *CPV =
2052       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
2053     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2054     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2055     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
2056                          MachinePointerInfo::getConstantPool(),
2057                          false, false, 0);
2058   }
2059 
2060   // The address of the thread local variable is the add of the thread
2061   // pointer with the offset of the variable.
2062   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
2063 }
2064 
2065 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const2066 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
2067   // TODO: implement the "local dynamic" model
2068   assert(Subtarget->isTargetELF() &&
2069          "TLS not implemented for non-ELF targets");
2070   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2071   // If the relocation model is PIC, use the "General Dynamic" TLS Model,
2072   // otherwise use the "Local Exec" TLS Model
2073   if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
2074     return LowerToTLSGeneralDynamicModel(GA, DAG);
2075   else
2076     return LowerToTLSExecModels(GA, DAG);
2077 }
2078 
LowerGlobalAddressELF(SDValue Op,SelectionDAG & DAG) const2079 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
2080                                                  SelectionDAG &DAG) const {
2081   EVT PtrVT = getPointerTy();
2082   DebugLoc dl = Op.getDebugLoc();
2083   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2084   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2085   if (RelocM == Reloc::PIC_) {
2086     bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
2087     ARMConstantPoolValue *CPV =
2088       ARMConstantPoolConstant::Create(GV,
2089                                       UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
2090     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2091     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2092     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
2093                                  CPAddr,
2094                                  MachinePointerInfo::getConstantPool(),
2095                                  false, false, 0);
2096     SDValue Chain = Result.getValue(1);
2097     SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2098     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
2099     if (!UseGOTOFF)
2100       Result = DAG.getLoad(PtrVT, dl, Chain, Result,
2101                            MachinePointerInfo::getGOT(), false, false, 0);
2102     return Result;
2103   }
2104 
2105   // If we have T2 ops, we can materialize the address directly via movt/movw
2106   // pair. This is always cheaper.
2107   if (Subtarget->useMovt()) {
2108     ++NumMovwMovt;
2109     // FIXME: Once remat is capable of dealing with instructions with register
2110     // operands, expand this into two nodes.
2111     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
2112                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
2113   } else {
2114     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
2115     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2116     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2117                        MachinePointerInfo::getConstantPool(),
2118                        false, false, 0);
2119   }
2120 }
2121 
LowerGlobalAddressDarwin(SDValue Op,SelectionDAG & DAG) const2122 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
2123                                                     SelectionDAG &DAG) const {
2124   EVT PtrVT = getPointerTy();
2125   DebugLoc dl = Op.getDebugLoc();
2126   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2127   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2128   MachineFunction &MF = DAG.getMachineFunction();
2129   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2130 
2131   // FIXME: Enable this for static codegen when tool issues are fixed.
2132   if (Subtarget->useMovt() && RelocM != Reloc::Static) {
2133     ++NumMovwMovt;
2134     // FIXME: Once remat is capable of dealing with instructions with register
2135     // operands, expand this into two nodes.
2136     if (RelocM == Reloc::Static)
2137       return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
2138                                  DAG.getTargetGlobalAddress(GV, dl, PtrVT));
2139 
2140     unsigned Wrapper = (RelocM == Reloc::PIC_)
2141       ? ARMISD::WrapperPIC : ARMISD::WrapperDYN;
2142     SDValue Result = DAG.getNode(Wrapper, dl, PtrVT,
2143                                  DAG.getTargetGlobalAddress(GV, dl, PtrVT));
2144     if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
2145       Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
2146                            MachinePointerInfo::getGOT(), false, false, 0);
2147     return Result;
2148   }
2149 
2150   unsigned ARMPCLabelIndex = 0;
2151   SDValue CPAddr;
2152   if (RelocM == Reloc::Static) {
2153     CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
2154   } else {
2155     ARMPCLabelIndex = AFI->createPICLabelUId();
2156     unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
2157     ARMConstantPoolValue *CPV =
2158       ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue,
2159                                       PCAdj);
2160     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2161   }
2162   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2163 
2164   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2165                                MachinePointerInfo::getConstantPool(),
2166                                false, false, 0);
2167   SDValue Chain = Result.getValue(1);
2168 
2169   if (RelocM == Reloc::PIC_) {
2170     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2171     Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2172   }
2173 
2174   if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
2175     Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(),
2176                          false, false, 0);
2177 
2178   return Result;
2179 }
2180 
LowerGLOBAL_OFFSET_TABLE(SDValue Op,SelectionDAG & DAG) const2181 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
2182                                                     SelectionDAG &DAG) const {
2183   assert(Subtarget->isTargetELF() &&
2184          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
2185   MachineFunction &MF = DAG.getMachineFunction();
2186   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2187   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2188   EVT PtrVT = getPointerTy();
2189   DebugLoc dl = Op.getDebugLoc();
2190   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2191   ARMConstantPoolValue *CPV =
2192     ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
2193                                   ARMPCLabelIndex, PCAdj);
2194   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2195   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2196   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2197                                MachinePointerInfo::getConstantPool(),
2198                                false, false, 0);
2199   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2200   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2201 }
2202 
2203 SDValue
LowerEH_SJLJ_DISPATCHSETUP(SDValue Op,SelectionDAG & DAG) const2204 ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG)
2205   const {
2206   DebugLoc dl = Op.getDebugLoc();
2207   return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
2208                      Op.getOperand(0), Op.getOperand(1));
2209 }
2210 
2211 SDValue
LowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const2212 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
2213   DebugLoc dl = Op.getDebugLoc();
2214   SDValue Val = DAG.getConstant(0, MVT::i32);
2215   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
2216                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
2217                      Op.getOperand(1), Val);
2218 }
2219 
2220 SDValue
LowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const2221 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
2222   DebugLoc dl = Op.getDebugLoc();
2223   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
2224                      Op.getOperand(1), DAG.getConstant(0, MVT::i32));
2225 }
2226 
2227 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget) const2228 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
2229                                           const ARMSubtarget *Subtarget) const {
2230   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2231   DebugLoc dl = Op.getDebugLoc();
2232   switch (IntNo) {
2233   default: return SDValue();    // Don't custom lower most intrinsics.
2234   case Intrinsic::arm_thread_pointer: {
2235     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2236     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2237   }
2238   case Intrinsic::eh_sjlj_lsda: {
2239     MachineFunction &MF = DAG.getMachineFunction();
2240     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2241     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2242     EVT PtrVT = getPointerTy();
2243     DebugLoc dl = Op.getDebugLoc();
2244     Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2245     SDValue CPAddr;
2246     unsigned PCAdj = (RelocM != Reloc::PIC_)
2247       ? 0 : (Subtarget->isThumb() ? 4 : 8);
2248     ARMConstantPoolValue *CPV =
2249       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
2250                                       ARMCP::CPLSDA, PCAdj);
2251     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2252     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2253     SDValue Result =
2254       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2255                   MachinePointerInfo::getConstantPool(),
2256                   false, false, 0);
2257 
2258     if (RelocM == Reloc::PIC_) {
2259       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2260       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2261     }
2262     return Result;
2263   }
2264   case Intrinsic::arm_neon_vmulls:
2265   case Intrinsic::arm_neon_vmullu: {
2266     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
2267       ? ARMISD::VMULLs : ARMISD::VMULLu;
2268     return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(),
2269                        Op.getOperand(1), Op.getOperand(2));
2270   }
2271   }
2272 }
2273 
LowerMEMBARRIER(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget)2274 static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
2275                                const ARMSubtarget *Subtarget) {
2276   DebugLoc dl = Op.getDebugLoc();
2277   if (!Subtarget->hasDataBarrier()) {
2278     // Some ARMv6 cpus can support data barriers with an mcr instruction.
2279     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
2280     // here.
2281     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
2282            "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
2283     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
2284                        DAG.getConstant(0, MVT::i32));
2285   }
2286 
2287   SDValue Op5 = Op.getOperand(5);
2288   bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0;
2289   unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2290   unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2291   bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0);
2292 
2293   ARM_MB::MemBOpt DMBOpt;
2294   if (isDeviceBarrier)
2295     DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY;
2296   else
2297     DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH;
2298   return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
2299                      DAG.getConstant(DMBOpt, MVT::i32));
2300 }
2301 
2302 
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget)2303 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
2304                                  const ARMSubtarget *Subtarget) {
2305   // FIXME: handle "fence singlethread" more efficiently.
2306   DebugLoc dl = Op.getDebugLoc();
2307   if (!Subtarget->hasDataBarrier()) {
2308     // Some ARMv6 cpus can support data barriers with an mcr instruction.
2309     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
2310     // here.
2311     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
2312            "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
2313     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
2314                        DAG.getConstant(0, MVT::i32));
2315   }
2316 
2317   return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
2318                      DAG.getConstant(ARM_MB::ISH, MVT::i32));
2319 }
2320 
LowerPREFETCH(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget)2321 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
2322                              const ARMSubtarget *Subtarget) {
2323   // ARM pre v5TE and Thumb1 does not have preload instructions.
2324   if (!(Subtarget->isThumb2() ||
2325         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
2326     // Just preserve the chain.
2327     return Op.getOperand(0);
2328 
2329   DebugLoc dl = Op.getDebugLoc();
2330   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
2331   if (!isRead &&
2332       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
2333     // ARMv7 with MP extension has PLDW.
2334     return Op.getOperand(0);
2335 
2336   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
2337   if (Subtarget->isThumb()) {
2338     // Invert the bits.
2339     isRead = ~isRead & 1;
2340     isData = ~isData & 1;
2341   }
2342 
2343   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
2344                      Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
2345                      DAG.getConstant(isData, MVT::i32));
2346 }
2347 
LowerVASTART(SDValue Op,SelectionDAG & DAG)2348 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
2349   MachineFunction &MF = DAG.getMachineFunction();
2350   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
2351 
2352   // vastart just stores the address of the VarArgsFrameIndex slot into the
2353   // memory location argument.
2354   DebugLoc dl = Op.getDebugLoc();
2355   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2356   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2357   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2358   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
2359                       MachinePointerInfo(SV), false, false, 0);
2360 }
2361 
2362 SDValue
GetF64FormalArgument(CCValAssign & VA,CCValAssign & NextVA,SDValue & Root,SelectionDAG & DAG,DebugLoc dl) const2363 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
2364                                         SDValue &Root, SelectionDAG &DAG,
2365                                         DebugLoc dl) const {
2366   MachineFunction &MF = DAG.getMachineFunction();
2367   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2368 
2369   TargetRegisterClass *RC;
2370   if (AFI->isThumb1OnlyFunction())
2371     RC = ARM::tGPRRegisterClass;
2372   else
2373     RC = ARM::GPRRegisterClass;
2374 
2375   // Transform the arguments stored in physical registers into virtual ones.
2376   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2377   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
2378 
2379   SDValue ArgValue2;
2380   if (NextVA.isMemLoc()) {
2381     MachineFrameInfo *MFI = MF.getFrameInfo();
2382     int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
2383 
2384     // Create load node to retrieve arguments from the stack.
2385     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2386     ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
2387                             MachinePointerInfo::getFixedStack(FI),
2388                             false, false, 0);
2389   } else {
2390     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2391     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
2392   }
2393 
2394   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
2395 }
2396 
2397 void
computeRegArea(CCState & CCInfo,MachineFunction & MF,unsigned & VARegSize,unsigned & VARegSaveSize) const2398 ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
2399                                   unsigned &VARegSize, unsigned &VARegSaveSize)
2400   const {
2401   unsigned NumGPRs;
2402   if (CCInfo.isFirstByValRegValid())
2403     NumGPRs = ARM::R4 - CCInfo.getFirstByValReg();
2404   else {
2405     unsigned int firstUnalloced;
2406     firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs,
2407                                                 sizeof(GPRArgRegs) /
2408                                                 sizeof(GPRArgRegs[0]));
2409     NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
2410   }
2411 
2412   unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
2413   VARegSize = NumGPRs * 4;
2414   VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
2415 }
2416 
2417 // The remaining GPRs hold either the beginning of variable-argument
2418 // data, or the beginning of an aggregate passed by value (usuall
2419 // byval).  Either way, we allocate stack slots adjacent to the data
2420 // provided by our caller, and store the unallocated registers there.
2421 // If this is a variadic function, the va_list pointer will begin with
2422 // these values; otherwise, this reassembles a (byval) structure that
2423 // was split between registers and memory.
2424 void
VarArgStyleRegisters(CCState & CCInfo,SelectionDAG & DAG,DebugLoc dl,SDValue & Chain,unsigned ArgOffset) const2425 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
2426                                         DebugLoc dl, SDValue &Chain,
2427                                         unsigned ArgOffset) const {
2428   MachineFunction &MF = DAG.getMachineFunction();
2429   MachineFrameInfo *MFI = MF.getFrameInfo();
2430   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2431   unsigned firstRegToSaveIndex;
2432   if (CCInfo.isFirstByValRegValid())
2433     firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0;
2434   else {
2435     firstRegToSaveIndex = CCInfo.getFirstUnallocated
2436       (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
2437   }
2438 
2439   unsigned VARegSize, VARegSaveSize;
2440   computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize);
2441   if (VARegSaveSize) {
2442     // If this function is vararg, store any remaining integer argument regs
2443     // to their spots on the stack so that they may be loaded by deferencing
2444     // the result of va_next.
2445     AFI->setVarArgsRegSaveSize(VARegSaveSize);
2446     AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize,
2447                                                      ArgOffset + VARegSaveSize
2448                                                      - VARegSize,
2449                                                      false));
2450     SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
2451                                     getPointerTy());
2452 
2453     SmallVector<SDValue, 4> MemOps;
2454     for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) {
2455       TargetRegisterClass *RC;
2456       if (AFI->isThumb1OnlyFunction())
2457         RC = ARM::tGPRRegisterClass;
2458       else
2459         RC = ARM::GPRRegisterClass;
2460 
2461       unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC);
2462       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
2463       SDValue Store =
2464         DAG.getStore(Val.getValue(1), dl, Val, FIN,
2465                  MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()),
2466                      false, false, 0);
2467       MemOps.push_back(Store);
2468       FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
2469                         DAG.getConstant(4, getPointerTy()));
2470     }
2471     if (!MemOps.empty())
2472       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2473                           &MemOps[0], MemOps.size());
2474   } else
2475     // This will point to the next argument passed via stack.
2476     AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true));
2477 }
2478 
2479 SDValue
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,DebugLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const2480 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
2481                                         CallingConv::ID CallConv, bool isVarArg,
2482                                         const SmallVectorImpl<ISD::InputArg>
2483                                           &Ins,
2484                                         DebugLoc dl, SelectionDAG &DAG,
2485                                         SmallVectorImpl<SDValue> &InVals)
2486                                           const {
2487   MachineFunction &MF = DAG.getMachineFunction();
2488   MachineFrameInfo *MFI = MF.getFrameInfo();
2489 
2490   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2491 
2492   // Assign locations to all of the incoming arguments.
2493   SmallVector<CCValAssign, 16> ArgLocs;
2494   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
2495                     getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue);
2496   CCInfo.AnalyzeFormalArguments(Ins,
2497                                 CCAssignFnForNode(CallConv, /* Return*/ false,
2498                                                   isVarArg));
2499 
2500   SmallVector<SDValue, 16> ArgValues;
2501   int lastInsIndex = -1;
2502 
2503   SDValue ArgValue;
2504   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2505     CCValAssign &VA = ArgLocs[i];
2506 
2507     // Arguments stored in registers.
2508     if (VA.isRegLoc()) {
2509       EVT RegVT = VA.getLocVT();
2510 
2511       if (VA.needsCustom()) {
2512         // f64 and vector types are split up into multiple registers or
2513         // combinations of registers and stack slots.
2514         if (VA.getLocVT() == MVT::v2f64) {
2515           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
2516                                                    Chain, DAG, dl);
2517           VA = ArgLocs[++i]; // skip ahead to next loc
2518           SDValue ArgValue2;
2519           if (VA.isMemLoc()) {
2520             int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
2521             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2522             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
2523                                     MachinePointerInfo::getFixedStack(FI),
2524                                     false, false, 0);
2525           } else {
2526             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
2527                                              Chain, DAG, dl);
2528           }
2529           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2530           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
2531                                  ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
2532           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
2533                                  ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
2534         } else
2535           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
2536 
2537       } else {
2538         TargetRegisterClass *RC;
2539 
2540         if (RegVT == MVT::f32)
2541           RC = ARM::SPRRegisterClass;
2542         else if (RegVT == MVT::f64)
2543           RC = ARM::DPRRegisterClass;
2544         else if (RegVT == MVT::v2f64)
2545           RC = ARM::QPRRegisterClass;
2546         else if (RegVT == MVT::i32)
2547           RC = (AFI->isThumb1OnlyFunction() ?
2548                 ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
2549         else
2550           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
2551 
2552         // Transform the arguments in physical registers into virtual ones.
2553         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2554         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2555       }
2556 
2557       // If this is an 8 or 16-bit value, it is really passed promoted
2558       // to 32 bits.  Insert an assert[sz]ext to capture this, then
2559       // truncate to the right size.
2560       switch (VA.getLocInfo()) {
2561       default: llvm_unreachable("Unknown loc info!");
2562       case CCValAssign::Full: break;
2563       case CCValAssign::BCvt:
2564         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2565         break;
2566       case CCValAssign::SExt:
2567         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2568                                DAG.getValueType(VA.getValVT()));
2569         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2570         break;
2571       case CCValAssign::ZExt:
2572         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2573                                DAG.getValueType(VA.getValVT()));
2574         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2575         break;
2576       }
2577 
2578       InVals.push_back(ArgValue);
2579 
2580     } else { // VA.isRegLoc()
2581 
2582       // sanity check
2583       assert(VA.isMemLoc());
2584       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
2585 
2586       int index = ArgLocs[i].getValNo();
2587 
2588       // Some Ins[] entries become multiple ArgLoc[] entries.
2589       // Process them only once.
2590       if (index != lastInsIndex)
2591         {
2592           ISD::ArgFlagsTy Flags = Ins[index].Flags;
2593           // FIXME: For now, all byval parameter objects are marked mutable.
2594           // This can be changed with more analysis.
2595           // In case of tail call optimization mark all arguments mutable.
2596           // Since they could be overwritten by lowering of arguments in case of
2597           // a tail call.
2598           if (Flags.isByVal()) {
2599             unsigned VARegSize, VARegSaveSize;
2600             computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize);
2601             VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0);
2602             unsigned Bytes = Flags.getByValSize() - VARegSize;
2603             if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2604             int FI = MFI->CreateFixedObject(Bytes,
2605                                             VA.getLocMemOffset(), false);
2606             InVals.push_back(DAG.getFrameIndex(FI, getPointerTy()));
2607           } else {
2608             int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
2609                                             VA.getLocMemOffset(), true);
2610 
2611             // Create load nodes to retrieve arguments from the stack.
2612             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2613             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
2614                                          MachinePointerInfo::getFixedStack(FI),
2615                                          false, false, 0));
2616           }
2617           lastInsIndex = index;
2618         }
2619     }
2620   }
2621 
2622   // varargs
2623   if (isVarArg)
2624     VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset());
2625 
2626   return Chain;
2627 }
2628 
2629 /// isFloatingPointZero - Return true if this is +0.0.
isFloatingPointZero(SDValue Op)2630 static bool isFloatingPointZero(SDValue Op) {
2631   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
2632     return CFP->getValueAPF().isPosZero();
2633   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
2634     // Maybe this has already been legalized into the constant pool?
2635     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
2636       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
2637       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
2638         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
2639           return CFP->getValueAPF().isPosZero();
2640     }
2641   }
2642   return false;
2643 }
2644 
2645 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
2646 /// the given operands.
2647 SDValue
getARMCmp(SDValue LHS,SDValue RHS,ISD::CondCode CC,SDValue & ARMcc,SelectionDAG & DAG,DebugLoc dl) const2648 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2649                              SDValue &ARMcc, SelectionDAG &DAG,
2650                              DebugLoc dl) const {
2651   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
2652     unsigned C = RHSC->getZExtValue();
2653     if (!isLegalICmpImmediate(C)) {
2654       // Constant does not fit, try adjusting it by one?
2655       switch (CC) {
2656       default: break;
2657       case ISD::SETLT:
2658       case ISD::SETGE:
2659         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
2660           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
2661           RHS = DAG.getConstant(C-1, MVT::i32);
2662         }
2663         break;
2664       case ISD::SETULT:
2665       case ISD::SETUGE:
2666         if (C != 0 && isLegalICmpImmediate(C-1)) {
2667           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
2668           RHS = DAG.getConstant(C-1, MVT::i32);
2669         }
2670         break;
2671       case ISD::SETLE:
2672       case ISD::SETGT:
2673         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
2674           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
2675           RHS = DAG.getConstant(C+1, MVT::i32);
2676         }
2677         break;
2678       case ISD::SETULE:
2679       case ISD::SETUGT:
2680         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
2681           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2682           RHS = DAG.getConstant(C+1, MVT::i32);
2683         }
2684         break;
2685       }
2686     }
2687   }
2688 
2689   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
2690   ARMISD::NodeType CompareType;
2691   switch (CondCode) {
2692   default:
2693     CompareType = ARMISD::CMP;
2694     break;
2695   case ARMCC::EQ:
2696   case ARMCC::NE:
2697     // Uses only Z Flag
2698     CompareType = ARMISD::CMPZ;
2699     break;
2700   }
2701   ARMcc = DAG.getConstant(CondCode, MVT::i32);
2702   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
2703 }
2704 
2705 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
2706 SDValue
getVFPCmp(SDValue LHS,SDValue RHS,SelectionDAG & DAG,DebugLoc dl) const2707 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
2708                              DebugLoc dl) const {
2709   SDValue Cmp;
2710   if (!isFloatingPointZero(RHS))
2711     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
2712   else
2713     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
2714   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
2715 }
2716 
2717 /// duplicateCmp - Glue values can have only one use, so this function
2718 /// duplicates a comparison node.
2719 SDValue
duplicateCmp(SDValue Cmp,SelectionDAG & DAG) const2720 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
2721   unsigned Opc = Cmp.getOpcode();
2722   DebugLoc DL = Cmp.getDebugLoc();
2723   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
2724     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
2725 
2726   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
2727   Cmp = Cmp.getOperand(0);
2728   Opc = Cmp.getOpcode();
2729   if (Opc == ARMISD::CMPFP)
2730     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
2731   else {
2732     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
2733     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
2734   }
2735   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
2736 }
2737 
LowerSELECT(SDValue Op,SelectionDAG & DAG) const2738 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2739   SDValue Cond = Op.getOperand(0);
2740   SDValue SelectTrue = Op.getOperand(1);
2741   SDValue SelectFalse = Op.getOperand(2);
2742   DebugLoc dl = Op.getDebugLoc();
2743 
2744   // Convert:
2745   //
2746   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
2747   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
2748   //
2749   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
2750     const ConstantSDNode *CMOVTrue =
2751       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
2752     const ConstantSDNode *CMOVFalse =
2753       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2754 
2755     if (CMOVTrue && CMOVFalse) {
2756       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
2757       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
2758 
2759       SDValue True;
2760       SDValue False;
2761       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
2762         True = SelectTrue;
2763         False = SelectFalse;
2764       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
2765         True = SelectFalse;
2766         False = SelectTrue;
2767       }
2768 
2769       if (True.getNode() && False.getNode()) {
2770         EVT VT = Op.getValueType();
2771         SDValue ARMcc = Cond.getOperand(2);
2772         SDValue CCR = Cond.getOperand(3);
2773         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
2774         assert(True.getValueType() == VT);
2775         return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp);
2776       }
2777     }
2778   }
2779 
2780   return DAG.getSelectCC(dl, Cond,
2781                          DAG.getConstant(0, Cond.getValueType()),
2782                          SelectTrue, SelectFalse, ISD::SETNE);
2783 }
2784 
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const2785 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2786   EVT VT = Op.getValueType();
2787   SDValue LHS = Op.getOperand(0);
2788   SDValue RHS = Op.getOperand(1);
2789   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2790   SDValue TrueVal = Op.getOperand(2);
2791   SDValue FalseVal = Op.getOperand(3);
2792   DebugLoc dl = Op.getDebugLoc();
2793 
2794   if (LHS.getValueType() == MVT::i32) {
2795     SDValue ARMcc;
2796     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2797     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2798     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp);
2799   }
2800 
2801   ARMCC::CondCodes CondCode, CondCode2;
2802   FPCCToARMCC(CC, CondCode, CondCode2);
2803 
2804   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
2805   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2806   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2807   SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
2808                                ARMcc, CCR, Cmp);
2809   if (CondCode2 != ARMCC::AL) {
2810     SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
2811     // FIXME: Needs another CMP because flag can have but one use.
2812     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
2813     Result = DAG.getNode(ARMISD::CMOV, dl, VT,
2814                          Result, TrueVal, ARMcc2, CCR, Cmp2);
2815   }
2816   return Result;
2817 }
2818 
2819 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
2820 /// to morph to an integer compare sequence.
canChangeToInt(SDValue Op,bool & SeenZero,const ARMSubtarget * Subtarget)2821 static bool canChangeToInt(SDValue Op, bool &SeenZero,
2822                            const ARMSubtarget *Subtarget) {
2823   SDNode *N = Op.getNode();
2824   if (!N->hasOneUse())
2825     // Otherwise it requires moving the value from fp to integer registers.
2826     return false;
2827   if (!N->getNumValues())
2828     return false;
2829   EVT VT = Op.getValueType();
2830   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
2831     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
2832     // vmrs are very slow, e.g. cortex-a8.
2833     return false;
2834 
2835   if (isFloatingPointZero(Op)) {
2836     SeenZero = true;
2837     return true;
2838   }
2839   return ISD::isNormalLoad(N);
2840 }
2841 
bitcastf32Toi32(SDValue Op,SelectionDAG & DAG)2842 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
2843   if (isFloatingPointZero(Op))
2844     return DAG.getConstant(0, MVT::i32);
2845 
2846   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
2847     return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2848                        Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
2849                        Ld->isVolatile(), Ld->isNonTemporal(),
2850                        Ld->getAlignment());
2851 
2852   llvm_unreachable("Unknown VFP cmp argument!");
2853 }
2854 
expandf64Toi32(SDValue Op,SelectionDAG & DAG,SDValue & RetVal1,SDValue & RetVal2)2855 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
2856                            SDValue &RetVal1, SDValue &RetVal2) {
2857   if (isFloatingPointZero(Op)) {
2858     RetVal1 = DAG.getConstant(0, MVT::i32);
2859     RetVal2 = DAG.getConstant(0, MVT::i32);
2860     return;
2861   }
2862 
2863   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
2864     SDValue Ptr = Ld->getBasePtr();
2865     RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2866                           Ld->getChain(), Ptr,
2867                           Ld->getPointerInfo(),
2868                           Ld->isVolatile(), Ld->isNonTemporal(),
2869                           Ld->getAlignment());
2870 
2871     EVT PtrType = Ptr.getValueType();
2872     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
2873     SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(),
2874                                  PtrType, Ptr, DAG.getConstant(4, PtrType));
2875     RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2876                           Ld->getChain(), NewPtr,
2877                           Ld->getPointerInfo().getWithOffset(4),
2878                           Ld->isVolatile(), Ld->isNonTemporal(),
2879                           NewAlign);
2880     return;
2881   }
2882 
2883   llvm_unreachable("Unknown VFP cmp argument!");
2884 }
2885 
2886 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
2887 /// f32 and even f64 comparisons to integer ones.
2888 SDValue
OptimizeVFPBrcond(SDValue Op,SelectionDAG & DAG) const2889 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
2890   SDValue Chain = Op.getOperand(0);
2891   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2892   SDValue LHS = Op.getOperand(2);
2893   SDValue RHS = Op.getOperand(3);
2894   SDValue Dest = Op.getOperand(4);
2895   DebugLoc dl = Op.getDebugLoc();
2896 
2897   bool SeenZero = false;
2898   if (canChangeToInt(LHS, SeenZero, Subtarget) &&
2899       canChangeToInt(RHS, SeenZero, Subtarget) &&
2900       // If one of the operand is zero, it's safe to ignore the NaN case since
2901       // we only care about equality comparisons.
2902       (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) {
2903     // If unsafe fp math optimization is enabled and there are no other uses of
2904     // the CMP operands, and the condition code is EQ or NE, we can optimize it
2905     // to an integer comparison.
2906     if (CC == ISD::SETOEQ)
2907       CC = ISD::SETEQ;
2908     else if (CC == ISD::SETUNE)
2909       CC = ISD::SETNE;
2910 
2911     SDValue ARMcc;
2912     if (LHS.getValueType() == MVT::f32) {
2913       LHS = bitcastf32Toi32(LHS, DAG);
2914       RHS = bitcastf32Toi32(RHS, DAG);
2915       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2916       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2917       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2918                          Chain, Dest, ARMcc, CCR, Cmp);
2919     }
2920 
2921     SDValue LHS1, LHS2;
2922     SDValue RHS1, RHS2;
2923     expandf64Toi32(LHS, DAG, LHS1, LHS2);
2924     expandf64Toi32(RHS, DAG, RHS1, RHS2);
2925     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
2926     ARMcc = DAG.getConstant(CondCode, MVT::i32);
2927     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
2928     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
2929     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7);
2930   }
2931 
2932   return SDValue();
2933 }
2934 
LowerBR_CC(SDValue Op,SelectionDAG & DAG) const2935 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2936   SDValue Chain = Op.getOperand(0);
2937   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2938   SDValue LHS = Op.getOperand(2);
2939   SDValue RHS = Op.getOperand(3);
2940   SDValue Dest = Op.getOperand(4);
2941   DebugLoc dl = Op.getDebugLoc();
2942 
2943   if (LHS.getValueType() == MVT::i32) {
2944     SDValue ARMcc;
2945     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2946     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2947     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2948                        Chain, Dest, ARMcc, CCR, Cmp);
2949   }
2950 
2951   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
2952 
2953   if (UnsafeFPMath &&
2954       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
2955        CC == ISD::SETNE || CC == ISD::SETUNE)) {
2956     SDValue Result = OptimizeVFPBrcond(Op, DAG);
2957     if (Result.getNode())
2958       return Result;
2959   }
2960 
2961   ARMCC::CondCodes CondCode, CondCode2;
2962   FPCCToARMCC(CC, CondCode, CondCode2);
2963 
2964   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
2965   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2966   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2967   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
2968   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
2969   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2970   if (CondCode2 != ARMCC::AL) {
2971     ARMcc = DAG.getConstant(CondCode2, MVT::i32);
2972     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
2973     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2974   }
2975   return Res;
2976 }
2977 
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const2978 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
2979   SDValue Chain = Op.getOperand(0);
2980   SDValue Table = Op.getOperand(1);
2981   SDValue Index = Op.getOperand(2);
2982   DebugLoc dl = Op.getDebugLoc();
2983 
2984   EVT PTy = getPointerTy();
2985   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
2986   ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
2987   SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
2988   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
2989   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
2990   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
2991   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
2992   if (Subtarget->isThumb2()) {
2993     // Thumb2 uses a two-level jump. That is, it jumps into the jump table
2994     // which does another jump to the destination. This also makes it easier
2995     // to translate it to TBB / TBH later.
2996     // FIXME: This might not work if the function is extremely large.
2997     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
2998                        Addr, Op.getOperand(2), JTI, UId);
2999   }
3000   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
3001     Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
3002                        MachinePointerInfo::getJumpTable(),
3003                        false, false, 0);
3004     Chain = Addr.getValue(1);
3005     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
3006     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
3007   } else {
3008     Addr = DAG.getLoad(PTy, dl, Chain, Addr,
3009                        MachinePointerInfo::getJumpTable(), false, false, 0);
3010     Chain = Addr.getValue(1);
3011     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
3012   }
3013 }
3014 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG)3015 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
3016   DebugLoc dl = Op.getDebugLoc();
3017   unsigned Opc;
3018 
3019   switch (Op.getOpcode()) {
3020   default:
3021     assert(0 && "Invalid opcode!");
3022   case ISD::FP_TO_SINT:
3023     Opc = ARMISD::FTOSI;
3024     break;
3025   case ISD::FP_TO_UINT:
3026     Opc = ARMISD::FTOUI;
3027     break;
3028   }
3029   Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
3030   return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
3031 }
3032 
LowerVectorINT_TO_FP(SDValue Op,SelectionDAG & DAG)3033 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
3034   EVT VT = Op.getValueType();
3035   DebugLoc dl = Op.getDebugLoc();
3036 
3037   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
3038          "Invalid type for custom lowering!");
3039   if (VT != MVT::v4f32)
3040     return DAG.UnrollVectorOp(Op.getNode());
3041 
3042   unsigned CastOpc;
3043   unsigned Opc;
3044   switch (Op.getOpcode()) {
3045   default:
3046     assert(0 && "Invalid opcode!");
3047   case ISD::SINT_TO_FP:
3048     CastOpc = ISD::SIGN_EXTEND;
3049     Opc = ISD::SINT_TO_FP;
3050     break;
3051   case ISD::UINT_TO_FP:
3052     CastOpc = ISD::ZERO_EXTEND;
3053     Opc = ISD::UINT_TO_FP;
3054     break;
3055   }
3056 
3057   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
3058   return DAG.getNode(Opc, dl, VT, Op);
3059 }
3060 
LowerINT_TO_FP(SDValue Op,SelectionDAG & DAG)3061 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
3062   EVT VT = Op.getValueType();
3063   if (VT.isVector())
3064     return LowerVectorINT_TO_FP(Op, DAG);
3065 
3066   DebugLoc dl = Op.getDebugLoc();
3067   unsigned Opc;
3068 
3069   switch (Op.getOpcode()) {
3070   default:
3071     assert(0 && "Invalid opcode!");
3072   case ISD::SINT_TO_FP:
3073     Opc = ARMISD::SITOF;
3074     break;
3075   case ISD::UINT_TO_FP:
3076     Opc = ARMISD::UITOF;
3077     break;
3078   }
3079 
3080   Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
3081   return DAG.getNode(Opc, dl, VT, Op);
3082 }
3083 
LowerFCOPYSIGN(SDValue Op,SelectionDAG & DAG) const3084 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
3085   // Implement fcopysign with a fabs and a conditional fneg.
3086   SDValue Tmp0 = Op.getOperand(0);
3087   SDValue Tmp1 = Op.getOperand(1);
3088   DebugLoc dl = Op.getDebugLoc();
3089   EVT VT = Op.getValueType();
3090   EVT SrcVT = Tmp1.getValueType();
3091   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
3092     Tmp0.getOpcode() == ARMISD::VMOVDRR;
3093   bool UseNEON = !InGPR && Subtarget->hasNEON();
3094 
3095   if (UseNEON) {
3096     // Use VBSL to copy the sign bit.
3097     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
3098     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
3099                                DAG.getTargetConstant(EncodedVal, MVT::i32));
3100     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
3101     if (VT == MVT::f64)
3102       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
3103                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
3104                          DAG.getConstant(32, MVT::i32));
3105     else /*if (VT == MVT::f32)*/
3106       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
3107     if (SrcVT == MVT::f32) {
3108       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
3109       if (VT == MVT::f64)
3110         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
3111                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
3112                            DAG.getConstant(32, MVT::i32));
3113     } else if (VT == MVT::f32)
3114       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
3115                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
3116                          DAG.getConstant(32, MVT::i32));
3117     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
3118     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
3119 
3120     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
3121                                             MVT::i32);
3122     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
3123     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
3124                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
3125 
3126     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
3127                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
3128                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
3129     if (VT == MVT::f32) {
3130       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
3131       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
3132                         DAG.getConstant(0, MVT::i32));
3133     } else {
3134       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
3135     }
3136 
3137     return Res;
3138   }
3139 
3140   // Bitcast operand 1 to i32.
3141   if (SrcVT == MVT::f64)
3142     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
3143                        &Tmp1, 1).getValue(1);
3144   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
3145 
3146   // Or in the signbit with integer operations.
3147   SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
3148   SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
3149   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
3150   if (VT == MVT::f32) {
3151     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
3152                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
3153     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
3154                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
3155   }
3156 
3157   // f64: Or the high part with signbit and then combine two parts.
3158   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
3159                      &Tmp0, 1);
3160   SDValue Lo = Tmp0.getValue(0);
3161   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
3162   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
3163   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
3164 }
3165 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const3166 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
3167   MachineFunction &MF = DAG.getMachineFunction();
3168   MachineFrameInfo *MFI = MF.getFrameInfo();
3169   MFI->setReturnAddressIsTaken(true);
3170 
3171   EVT VT = Op.getValueType();
3172   DebugLoc dl = Op.getDebugLoc();
3173   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3174   if (Depth) {
3175     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
3176     SDValue Offset = DAG.getConstant(4, MVT::i32);
3177     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
3178                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
3179                        MachinePointerInfo(), false, false, 0);
3180   }
3181 
3182   // Return LR, which contains the return address. Mark it an implicit live-in.
3183   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
3184   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
3185 }
3186 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const3187 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
3188   MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3189   MFI->setFrameAddressIsTaken(true);
3190 
3191   EVT VT = Op.getValueType();
3192   DebugLoc dl = Op.getDebugLoc();  // FIXME probably not meaningful
3193   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3194   unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
3195     ? ARM::R7 : ARM::R11;
3196   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
3197   while (Depth--)
3198     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
3199                             MachinePointerInfo(),
3200                             false, false, 0);
3201   return FrameAddr;
3202 }
3203 
3204 /// ExpandBITCAST - If the target supports VFP, this function is called to
3205 /// expand a bit convert where either the source or destination type is i64 to
3206 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
3207 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
3208 /// vectors), since the legalizer won't know what to do with that.
ExpandBITCAST(SDNode * N,SelectionDAG & DAG)3209 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
3210   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3211   DebugLoc dl = N->getDebugLoc();
3212   SDValue Op = N->getOperand(0);
3213 
3214   // This function is only supposed to be called for i64 types, either as the
3215   // source or destination of the bit convert.
3216   EVT SrcVT = Op.getValueType();
3217   EVT DstVT = N->getValueType(0);
3218   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
3219          "ExpandBITCAST called for non-i64 type");
3220 
3221   // Turn i64->f64 into VMOVDRR.
3222   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
3223     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
3224                              DAG.getConstant(0, MVT::i32));
3225     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
3226                              DAG.getConstant(1, MVT::i32));
3227     return DAG.getNode(ISD::BITCAST, dl, DstVT,
3228                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
3229   }
3230 
3231   // Turn f64->i64 into VMOVRRD.
3232   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
3233     SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
3234                               DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
3235     // Merge the pieces into a single i64 value.
3236     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
3237   }
3238 
3239   return SDValue();
3240 }
3241 
3242 /// getZeroVector - Returns a vector of specified type with all zero elements.
3243 /// Zero vectors are used to represent vector negation and in those cases
3244 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
3245 /// not support i64 elements, so sometimes the zero vectors will need to be
3246 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
3247 /// zero vector.
getZeroVector(EVT VT,SelectionDAG & DAG,DebugLoc dl)3248 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
3249   assert(VT.isVector() && "Expected a vector type");
3250   // The canonical modified immediate encoding of a zero vector is....0!
3251   SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
3252   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
3253   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
3254   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3255 }
3256 
3257 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
3258 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
LowerShiftRightParts(SDValue Op,SelectionDAG & DAG) const3259 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
3260                                                 SelectionDAG &DAG) const {
3261   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
3262   EVT VT = Op.getValueType();
3263   unsigned VTBits = VT.getSizeInBits();
3264   DebugLoc dl = Op.getDebugLoc();
3265   SDValue ShOpLo = Op.getOperand(0);
3266   SDValue ShOpHi = Op.getOperand(1);
3267   SDValue ShAmt  = Op.getOperand(2);
3268   SDValue ARMcc;
3269   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
3270 
3271   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
3272 
3273   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
3274                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
3275   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
3276   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
3277                                    DAG.getConstant(VTBits, MVT::i32));
3278   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
3279   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
3280   SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
3281 
3282   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3283   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
3284                           ARMcc, DAG, dl);
3285   SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
3286   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
3287                            CCR, Cmp);
3288 
3289   SDValue Ops[2] = { Lo, Hi };
3290   return DAG.getMergeValues(Ops, 2, dl);
3291 }
3292 
3293 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
3294 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
LowerShiftLeftParts(SDValue Op,SelectionDAG & DAG) const3295 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
3296                                                SelectionDAG &DAG) const {
3297   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
3298   EVT VT = Op.getValueType();
3299   unsigned VTBits = VT.getSizeInBits();
3300   DebugLoc dl = Op.getDebugLoc();
3301   SDValue ShOpLo = Op.getOperand(0);
3302   SDValue ShOpHi = Op.getOperand(1);
3303   SDValue ShAmt  = Op.getOperand(2);
3304   SDValue ARMcc;
3305 
3306   assert(Op.getOpcode() == ISD::SHL_PARTS);
3307   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
3308                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
3309   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
3310   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
3311                                    DAG.getConstant(VTBits, MVT::i32));
3312   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
3313   SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
3314 
3315   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
3316   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3317   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
3318                           ARMcc, DAG, dl);
3319   SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
3320   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
3321                            CCR, Cmp);
3322 
3323   SDValue Ops[2] = { Lo, Hi };
3324   return DAG.getMergeValues(Ops, 2, dl);
3325 }
3326 
LowerFLT_ROUNDS_(SDValue Op,SelectionDAG & DAG) const3327 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
3328                                             SelectionDAG &DAG) const {
3329   // The rounding mode is in bits 23:22 of the FPSCR.
3330   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
3331   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
3332   // so that the shift + and get folded into a bitfield extract.
3333   DebugLoc dl = Op.getDebugLoc();
3334   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
3335                               DAG.getConstant(Intrinsic::arm_get_fpscr,
3336                                               MVT::i32));
3337   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
3338                                   DAG.getConstant(1U << 22, MVT::i32));
3339   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
3340                               DAG.getConstant(22, MVT::i32));
3341   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
3342                      DAG.getConstant(3, MVT::i32));
3343 }
3344 
LowerCTTZ(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)3345 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
3346                          const ARMSubtarget *ST) {
3347   EVT VT = N->getValueType(0);
3348   DebugLoc dl = N->getDebugLoc();
3349 
3350   if (!ST->hasV6T2Ops())
3351     return SDValue();
3352 
3353   SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
3354   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
3355 }
3356 
LowerShift(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)3357 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
3358                           const ARMSubtarget *ST) {
3359   EVT VT = N->getValueType(0);
3360   DebugLoc dl = N->getDebugLoc();
3361 
3362   if (!VT.isVector())
3363     return SDValue();
3364 
3365   // Lower vector shifts on NEON to use VSHL.
3366   assert(ST->hasNEON() && "unexpected vector shift");
3367 
3368   // Left shifts translate directly to the vshiftu intrinsic.
3369   if (N->getOpcode() == ISD::SHL)
3370     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3371                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
3372                        N->getOperand(0), N->getOperand(1));
3373 
3374   assert((N->getOpcode() == ISD::SRA ||
3375           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
3376 
3377   // NEON uses the same intrinsics for both left and right shifts.  For
3378   // right shifts, the shift amounts are negative, so negate the vector of
3379   // shift amounts.
3380   EVT ShiftVT = N->getOperand(1).getValueType();
3381   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
3382                                      getZeroVector(ShiftVT, DAG, dl),
3383                                      N->getOperand(1));
3384   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
3385                              Intrinsic::arm_neon_vshifts :
3386                              Intrinsic::arm_neon_vshiftu);
3387   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3388                      DAG.getConstant(vshiftInt, MVT::i32),
3389                      N->getOperand(0), NegatedCount);
3390 }
3391 
Expand64BitShift(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)3392 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
3393                                 const ARMSubtarget *ST) {
3394   EVT VT = N->getValueType(0);
3395   DebugLoc dl = N->getDebugLoc();
3396 
3397   // We can get here for a node like i32 = ISD::SHL i32, i64
3398   if (VT != MVT::i64)
3399     return SDValue();
3400 
3401   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
3402          "Unknown shift to lower!");
3403 
3404   // We only lower SRA, SRL of 1 here, all others use generic lowering.
3405   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
3406       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
3407     return SDValue();
3408 
3409   // If we are in thumb mode, we don't have RRX.
3410   if (ST->isThumb1Only()) return SDValue();
3411 
3412   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
3413   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
3414                            DAG.getConstant(0, MVT::i32));
3415   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
3416                            DAG.getConstant(1, MVT::i32));
3417 
3418   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
3419   // captures the result into a carry flag.
3420   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
3421   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1);
3422 
3423   // The low part is an ARMISD::RRX operand, which shifts the carry in.
3424   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
3425 
3426   // Merge the pieces into a single i64 value.
3427  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
3428 }
3429 
LowerVSETCC(SDValue Op,SelectionDAG & DAG)3430 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
3431   SDValue TmpOp0, TmpOp1;
3432   bool Invert = false;
3433   bool Swap = false;
3434   unsigned Opc = 0;
3435 
3436   SDValue Op0 = Op.getOperand(0);
3437   SDValue Op1 = Op.getOperand(1);
3438   SDValue CC = Op.getOperand(2);
3439   EVT VT = Op.getValueType();
3440   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
3441   DebugLoc dl = Op.getDebugLoc();
3442 
3443   if (Op.getOperand(1).getValueType().isFloatingPoint()) {
3444     switch (SetCCOpcode) {
3445     default: llvm_unreachable("Illegal FP comparison"); break;
3446     case ISD::SETUNE:
3447     case ISD::SETNE:  Invert = true; // Fallthrough
3448     case ISD::SETOEQ:
3449     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
3450     case ISD::SETOLT:
3451     case ISD::SETLT: Swap = true; // Fallthrough
3452     case ISD::SETOGT:
3453     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
3454     case ISD::SETOLE:
3455     case ISD::SETLE:  Swap = true; // Fallthrough
3456     case ISD::SETOGE:
3457     case ISD::SETGE: Opc = ARMISD::VCGE; break;
3458     case ISD::SETUGE: Swap = true; // Fallthrough
3459     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
3460     case ISD::SETUGT: Swap = true; // Fallthrough
3461     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
3462     case ISD::SETUEQ: Invert = true; // Fallthrough
3463     case ISD::SETONE:
3464       // Expand this to (OLT | OGT).
3465       TmpOp0 = Op0;
3466       TmpOp1 = Op1;
3467       Opc = ISD::OR;
3468       Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3469       Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
3470       break;
3471     case ISD::SETUO: Invert = true; // Fallthrough
3472     case ISD::SETO:
3473       // Expand this to (OLT | OGE).
3474       TmpOp0 = Op0;
3475       TmpOp1 = Op1;
3476       Opc = ISD::OR;
3477       Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3478       Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
3479       break;
3480     }
3481   } else {
3482     // Integer comparisons.
3483     switch (SetCCOpcode) {
3484     default: llvm_unreachable("Illegal integer comparison"); break;
3485     case ISD::SETNE:  Invert = true;
3486     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
3487     case ISD::SETLT:  Swap = true;
3488     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
3489     case ISD::SETLE:  Swap = true;
3490     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
3491     case ISD::SETULT: Swap = true;
3492     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
3493     case ISD::SETULE: Swap = true;
3494     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
3495     }
3496 
3497     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
3498     if (Opc == ARMISD::VCEQ) {
3499 
3500       SDValue AndOp;
3501       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
3502         AndOp = Op0;
3503       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
3504         AndOp = Op1;
3505 
3506       // Ignore bitconvert.
3507       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
3508         AndOp = AndOp.getOperand(0);
3509 
3510       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
3511         Opc = ARMISD::VTST;
3512         Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
3513         Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
3514         Invert = !Invert;
3515       }
3516     }
3517   }
3518 
3519   if (Swap)
3520     std::swap(Op0, Op1);
3521 
3522   // If one of the operands is a constant vector zero, attempt to fold the
3523   // comparison to a specialized compare-against-zero form.
3524   SDValue SingleOp;
3525   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
3526     SingleOp = Op0;
3527   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
3528     if (Opc == ARMISD::VCGE)
3529       Opc = ARMISD::VCLEZ;
3530     else if (Opc == ARMISD::VCGT)
3531       Opc = ARMISD::VCLTZ;
3532     SingleOp = Op1;
3533   }
3534 
3535   SDValue Result;
3536   if (SingleOp.getNode()) {
3537     switch (Opc) {
3538     case ARMISD::VCEQ:
3539       Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
3540     case ARMISD::VCGE:
3541       Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
3542     case ARMISD::VCLEZ:
3543       Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
3544     case ARMISD::VCGT:
3545       Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
3546     case ARMISD::VCLTZ:
3547       Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
3548     default:
3549       Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3550     }
3551   } else {
3552      Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3553   }
3554 
3555   if (Invert)
3556     Result = DAG.getNOT(dl, Result, VT);
3557 
3558   return Result;
3559 }
3560 
3561 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
3562 /// valid vector constant for a NEON instruction with a "modified immediate"
3563 /// operand (e.g., VMOV).  If so, return the encoded value.
isNEONModifiedImm(uint64_t SplatBits,uint64_t SplatUndef,unsigned SplatBitSize,SelectionDAG & DAG,EVT & VT,bool is128Bits,NEONModImmType type)3564 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
3565                                  unsigned SplatBitSize, SelectionDAG &DAG,
3566                                  EVT &VT, bool is128Bits, NEONModImmType type) {
3567   unsigned OpCmode, Imm;
3568 
3569   // SplatBitSize is set to the smallest size that splats the vector, so a
3570   // zero vector will always have SplatBitSize == 8.  However, NEON modified
3571   // immediate instructions others than VMOV do not support the 8-bit encoding
3572   // of a zero vector, and the default encoding of zero is supposed to be the
3573   // 32-bit version.
3574   if (SplatBits == 0)
3575     SplatBitSize = 32;
3576 
3577   switch (SplatBitSize) {
3578   case 8:
3579     if (type != VMOVModImm)
3580       return SDValue();
3581     // Any 1-byte value is OK.  Op=0, Cmode=1110.
3582     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
3583     OpCmode = 0xe;
3584     Imm = SplatBits;
3585     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3586     break;
3587 
3588   case 16:
3589     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
3590     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3591     if ((SplatBits & ~0xff) == 0) {
3592       // Value = 0x00nn: Op=x, Cmode=100x.
3593       OpCmode = 0x8;
3594       Imm = SplatBits;
3595       break;
3596     }
3597     if ((SplatBits & ~0xff00) == 0) {
3598       // Value = 0xnn00: Op=x, Cmode=101x.
3599       OpCmode = 0xa;
3600       Imm = SplatBits >> 8;
3601       break;
3602     }
3603     return SDValue();
3604 
3605   case 32:
3606     // NEON's 32-bit VMOV supports splat values where:
3607     // * only one byte is nonzero, or
3608     // * the least significant byte is 0xff and the second byte is nonzero, or
3609     // * the least significant 2 bytes are 0xff and the third is nonzero.
3610     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3611     if ((SplatBits & ~0xff) == 0) {
3612       // Value = 0x000000nn: Op=x, Cmode=000x.
3613       OpCmode = 0;
3614       Imm = SplatBits;
3615       break;
3616     }
3617     if ((SplatBits & ~0xff00) == 0) {
3618       // Value = 0x0000nn00: Op=x, Cmode=001x.
3619       OpCmode = 0x2;
3620       Imm = SplatBits >> 8;
3621       break;
3622     }
3623     if ((SplatBits & ~0xff0000) == 0) {
3624       // Value = 0x00nn0000: Op=x, Cmode=010x.
3625       OpCmode = 0x4;
3626       Imm = SplatBits >> 16;
3627       break;
3628     }
3629     if ((SplatBits & ~0xff000000) == 0) {
3630       // Value = 0xnn000000: Op=x, Cmode=011x.
3631       OpCmode = 0x6;
3632       Imm = SplatBits >> 24;
3633       break;
3634     }
3635 
3636     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
3637     if (type == OtherModImm) return SDValue();
3638 
3639     if ((SplatBits & ~0xffff) == 0 &&
3640         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
3641       // Value = 0x0000nnff: Op=x, Cmode=1100.
3642       OpCmode = 0xc;
3643       Imm = SplatBits >> 8;
3644       SplatBits |= 0xff;
3645       break;
3646     }
3647 
3648     if ((SplatBits & ~0xffffff) == 0 &&
3649         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3650       // Value = 0x00nnffff: Op=x, Cmode=1101.
3651       OpCmode = 0xd;
3652       Imm = SplatBits >> 16;
3653       SplatBits |= 0xffff;
3654       break;
3655     }
3656 
3657     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
3658     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
3659     // VMOV.I32.  A (very) minor optimization would be to replicate the value
3660     // and fall through here to test for a valid 64-bit splat.  But, then the
3661     // caller would also need to check and handle the change in size.
3662     return SDValue();
3663 
3664   case 64: {
3665     if (type != VMOVModImm)
3666       return SDValue();
3667     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
3668     uint64_t BitMask = 0xff;
3669     uint64_t Val = 0;
3670     unsigned ImmMask = 1;
3671     Imm = 0;
3672     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3673       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3674         Val |= BitMask;
3675         Imm |= ImmMask;
3676       } else if ((SplatBits & BitMask) != 0) {
3677         return SDValue();
3678       }
3679       BitMask <<= 8;
3680       ImmMask <<= 1;
3681     }
3682     // Op=1, Cmode=1110.
3683     OpCmode = 0x1e;
3684     SplatBits = Val;
3685     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3686     break;
3687   }
3688 
3689   default:
3690     llvm_unreachable("unexpected size for isNEONModifiedImm");
3691     return SDValue();
3692   }
3693 
3694   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
3695   return DAG.getTargetConstant(EncodedVal, MVT::i32);
3696 }
3697 
isVEXTMask(const SmallVectorImpl<int> & M,EVT VT,bool & ReverseVEXT,unsigned & Imm)3698 static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
3699                        bool &ReverseVEXT, unsigned &Imm) {
3700   unsigned NumElts = VT.getVectorNumElements();
3701   ReverseVEXT = false;
3702 
3703   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
3704   if (M[0] < 0)
3705     return false;
3706 
3707   Imm = M[0];
3708 
3709   // If this is a VEXT shuffle, the immediate value is the index of the first
3710   // element.  The other shuffle indices must be the successive elements after
3711   // the first one.
3712   unsigned ExpectedElt = Imm;
3713   for (unsigned i = 1; i < NumElts; ++i) {
3714     // Increment the expected index.  If it wraps around, it may still be
3715     // a VEXT but the source vectors must be swapped.
3716     ExpectedElt += 1;
3717     if (ExpectedElt == NumElts * 2) {
3718       ExpectedElt = 0;
3719       ReverseVEXT = true;
3720     }
3721 
3722     if (M[i] < 0) continue; // ignore UNDEF indices
3723     if (ExpectedElt != static_cast<unsigned>(M[i]))
3724       return false;
3725   }
3726 
3727   // Adjust the index value if the source operands will be swapped.
3728   if (ReverseVEXT)
3729     Imm -= NumElts;
3730 
3731   return true;
3732 }
3733 
3734 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
3735 /// instruction with the specified blocksize.  (The order of the elements
3736 /// within each block of the vector is reversed.)
isVREVMask(const SmallVectorImpl<int> & M,EVT VT,unsigned BlockSize)3737 static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
3738                        unsigned BlockSize) {
3739   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
3740          "Only possible block sizes for VREV are: 16, 32, 64");
3741 
3742   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3743   if (EltSz == 64)
3744     return false;
3745 
3746   unsigned NumElts = VT.getVectorNumElements();
3747   unsigned BlockElts = M[0] + 1;
3748   // If the first shuffle index is UNDEF, be optimistic.
3749   if (M[0] < 0)
3750     BlockElts = BlockSize / EltSz;
3751 
3752   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
3753     return false;
3754 
3755   for (unsigned i = 0; i < NumElts; ++i) {
3756     if (M[i] < 0) continue; // ignore UNDEF indices
3757     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
3758       return false;
3759   }
3760 
3761   return true;
3762 }
3763 
isVTBLMask(const SmallVectorImpl<int> & M,EVT VT)3764 static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) {
3765   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
3766   // range, then 0 is placed into the resulting vector. So pretty much any mask
3767   // of 8 elements can work here.
3768   return VT == MVT::v8i8 && M.size() == 8;
3769 }
3770 
isVTRNMask(const SmallVectorImpl<int> & M,EVT VT,unsigned & WhichResult)3771 static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
3772                        unsigned &WhichResult) {
3773   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3774   if (EltSz == 64)
3775     return false;
3776 
3777   unsigned NumElts = VT.getVectorNumElements();
3778   WhichResult = (M[0] == 0 ? 0 : 1);
3779   for (unsigned i = 0; i < NumElts; i += 2) {
3780     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
3781         (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
3782       return false;
3783   }
3784   return true;
3785 }
3786 
3787 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
3788 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3789 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
isVTRN_v_undef_Mask(const SmallVectorImpl<int> & M,EVT VT,unsigned & WhichResult)3790 static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3791                                 unsigned &WhichResult) {
3792   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3793   if (EltSz == 64)
3794     return false;
3795 
3796   unsigned NumElts = VT.getVectorNumElements();
3797   WhichResult = (M[0] == 0 ? 0 : 1);
3798   for (unsigned i = 0; i < NumElts; i += 2) {
3799     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
3800         (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
3801       return false;
3802   }
3803   return true;
3804 }
3805 
isVUZPMask(const SmallVectorImpl<int> & M,EVT VT,unsigned & WhichResult)3806 static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
3807                        unsigned &WhichResult) {
3808   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3809   if (EltSz == 64)
3810     return false;
3811 
3812   unsigned NumElts = VT.getVectorNumElements();
3813   WhichResult = (M[0] == 0 ? 0 : 1);
3814   for (unsigned i = 0; i != NumElts; ++i) {
3815     if (M[i] < 0) continue; // ignore UNDEF indices
3816     if ((unsigned) M[i] != 2 * i + WhichResult)
3817       return false;
3818   }
3819 
3820   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3821   if (VT.is64BitVector() && EltSz == 32)
3822     return false;
3823 
3824   return true;
3825 }
3826 
3827 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
3828 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3829 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
isVUZP_v_undef_Mask(const SmallVectorImpl<int> & M,EVT VT,unsigned & WhichResult)3830 static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3831                                 unsigned &WhichResult) {
3832   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3833   if (EltSz == 64)
3834     return false;
3835 
3836   unsigned Half = VT.getVectorNumElements() / 2;
3837   WhichResult = (M[0] == 0 ? 0 : 1);
3838   for (unsigned j = 0; j != 2; ++j) {
3839     unsigned Idx = WhichResult;
3840     for (unsigned i = 0; i != Half; ++i) {
3841       int MIdx = M[i + j * Half];
3842       if (MIdx >= 0 && (unsigned) MIdx != Idx)
3843         return false;
3844       Idx += 2;
3845     }
3846   }
3847 
3848   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3849   if (VT.is64BitVector() && EltSz == 32)
3850     return false;
3851 
3852   return true;
3853 }
3854 
isVZIPMask(const SmallVectorImpl<int> & M,EVT VT,unsigned & WhichResult)3855 static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
3856                        unsigned &WhichResult) {
3857   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3858   if (EltSz == 64)
3859     return false;
3860 
3861   unsigned NumElts = VT.getVectorNumElements();
3862   WhichResult = (M[0] == 0 ? 0 : 1);
3863   unsigned Idx = WhichResult * NumElts / 2;
3864   for (unsigned i = 0; i != NumElts; i += 2) {
3865     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
3866         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
3867       return false;
3868     Idx += 1;
3869   }
3870 
3871   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3872   if (VT.is64BitVector() && EltSz == 32)
3873     return false;
3874 
3875   return true;
3876 }
3877 
3878 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
3879 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3880 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
isVZIP_v_undef_Mask(const SmallVectorImpl<int> & M,EVT VT,unsigned & WhichResult)3881 static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3882                                 unsigned &WhichResult) {
3883   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3884   if (EltSz == 64)
3885     return false;
3886 
3887   unsigned NumElts = VT.getVectorNumElements();
3888   WhichResult = (M[0] == 0 ? 0 : 1);
3889   unsigned Idx = WhichResult * NumElts / 2;
3890   for (unsigned i = 0; i != NumElts; i += 2) {
3891     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
3892         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
3893       return false;
3894     Idx += 1;
3895   }
3896 
3897   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3898   if (VT.is64BitVector() && EltSz == 32)
3899     return false;
3900 
3901   return true;
3902 }
3903 
3904 // If N is an integer constant that can be moved into a register in one
3905 // instruction, return an SDValue of such a constant (will become a MOV
3906 // instruction).  Otherwise return null.
IsSingleInstrConstant(SDValue N,SelectionDAG & DAG,const ARMSubtarget * ST,DebugLoc dl)3907 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
3908                                      const ARMSubtarget *ST, DebugLoc dl) {
3909   uint64_t Val;
3910   if (!isa<ConstantSDNode>(N))
3911     return SDValue();
3912   Val = cast<ConstantSDNode>(N)->getZExtValue();
3913 
3914   if (ST->isThumb1Only()) {
3915     if (Val <= 255 || ~Val <= 255)
3916       return DAG.getConstant(Val, MVT::i32);
3917   } else {
3918     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
3919       return DAG.getConstant(Val, MVT::i32);
3920   }
3921   return SDValue();
3922 }
3923 
3924 // If this is a case we can't handle, return null and let the default
3925 // expansion code take care of it.
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST) const3926 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3927                                              const ARMSubtarget *ST) const {
3928   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
3929   DebugLoc dl = Op.getDebugLoc();
3930   EVT VT = Op.getValueType();
3931 
3932   APInt SplatBits, SplatUndef;
3933   unsigned SplatBitSize;
3934   bool HasAnyUndefs;
3935   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3936     if (SplatBitSize <= 64) {
3937       // Check if an immediate VMOV works.
3938       EVT VmovVT;
3939       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
3940                                       SplatUndef.getZExtValue(), SplatBitSize,
3941                                       DAG, VmovVT, VT.is128BitVector(),
3942                                       VMOVModImm);
3943       if (Val.getNode()) {
3944         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
3945         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3946       }
3947 
3948       // Try an immediate VMVN.
3949       uint64_t NegatedImm = (~SplatBits).getZExtValue();
3950       Val = isNEONModifiedImm(NegatedImm,
3951                                       SplatUndef.getZExtValue(), SplatBitSize,
3952                                       DAG, VmovVT, VT.is128BitVector(),
3953                                       VMVNModImm);
3954       if (Val.getNode()) {
3955         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
3956         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3957       }
3958     }
3959   }
3960 
3961   // Scan through the operands to see if only one value is used.
3962   unsigned NumElts = VT.getVectorNumElements();
3963   bool isOnlyLowElement = true;
3964   bool usesOnlyOneValue = true;
3965   bool isConstant = true;
3966   SDValue Value;
3967   for (unsigned i = 0; i < NumElts; ++i) {
3968     SDValue V = Op.getOperand(i);
3969     if (V.getOpcode() == ISD::UNDEF)
3970       continue;
3971     if (i > 0)
3972       isOnlyLowElement = false;
3973     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
3974       isConstant = false;
3975 
3976     if (!Value.getNode())
3977       Value = V;
3978     else if (V != Value)
3979       usesOnlyOneValue = false;
3980   }
3981 
3982   if (!Value.getNode())
3983     return DAG.getUNDEF(VT);
3984 
3985   if (isOnlyLowElement)
3986     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
3987 
3988   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3989 
3990   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
3991   // i32 and try again.
3992   if (usesOnlyOneValue && EltSize <= 32) {
3993     if (!isConstant)
3994       return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
3995     if (VT.getVectorElementType().isFloatingPoint()) {
3996       SmallVector<SDValue, 8> Ops;
3997       for (unsigned i = 0; i < NumElts; ++i)
3998         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
3999                                   Op.getOperand(i)));
4000       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
4001       SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
4002       Val = LowerBUILD_VECTOR(Val, DAG, ST);
4003       if (Val.getNode())
4004         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4005     }
4006     SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
4007     if (Val.getNode())
4008       return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
4009   }
4010 
4011   // If all elements are constants and the case above didn't get hit, fall back
4012   // to the default expansion, which will generate a load from the constant
4013   // pool.
4014   if (isConstant)
4015     return SDValue();
4016 
4017   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
4018   if (NumElts >= 4) {
4019     SDValue shuffle = ReconstructShuffle(Op, DAG);
4020     if (shuffle != SDValue())
4021       return shuffle;
4022   }
4023 
4024   // Vectors with 32- or 64-bit elements can be built by directly assigning
4025   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
4026   // will be legalized.
4027   if (EltSize >= 32) {
4028     // Do the expansion with floating-point types, since that is what the VFP
4029     // registers are defined to use, and since i64 is not legal.
4030     EVT EltVT = EVT::getFloatingPointVT(EltSize);
4031     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
4032     SmallVector<SDValue, 8> Ops;
4033     for (unsigned i = 0; i < NumElts; ++i)
4034       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
4035     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
4036     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4037   }
4038 
4039   return SDValue();
4040 }
4041 
4042 // Gather data to see if the operation can be modelled as a
4043 // shuffle in combination with VEXTs.
ReconstructShuffle(SDValue Op,SelectionDAG & DAG) const4044 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
4045                                               SelectionDAG &DAG) const {
4046   DebugLoc dl = Op.getDebugLoc();
4047   EVT VT = Op.getValueType();
4048   unsigned NumElts = VT.getVectorNumElements();
4049 
4050   SmallVector<SDValue, 2> SourceVecs;
4051   SmallVector<unsigned, 2> MinElts;
4052   SmallVector<unsigned, 2> MaxElts;
4053 
4054   for (unsigned i = 0; i < NumElts; ++i) {
4055     SDValue V = Op.getOperand(i);
4056     if (V.getOpcode() == ISD::UNDEF)
4057       continue;
4058     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
4059       // A shuffle can only come from building a vector from various
4060       // elements of other vectors.
4061       return SDValue();
4062     } else if (V.getOperand(0).getValueType().getVectorElementType() !=
4063                VT.getVectorElementType()) {
4064       // This code doesn't know how to handle shuffles where the vector
4065       // element types do not match (this happens because type legalization
4066       // promotes the return type of EXTRACT_VECTOR_ELT).
4067       // FIXME: It might be appropriate to extend this code to handle
4068       // mismatched types.
4069       return SDValue();
4070     }
4071 
4072     // Record this extraction against the appropriate vector if possible...
4073     SDValue SourceVec = V.getOperand(0);
4074     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
4075     bool FoundSource = false;
4076     for (unsigned j = 0; j < SourceVecs.size(); ++j) {
4077       if (SourceVecs[j] == SourceVec) {
4078         if (MinElts[j] > EltNo)
4079           MinElts[j] = EltNo;
4080         if (MaxElts[j] < EltNo)
4081           MaxElts[j] = EltNo;
4082         FoundSource = true;
4083         break;
4084       }
4085     }
4086 
4087     // Or record a new source if not...
4088     if (!FoundSource) {
4089       SourceVecs.push_back(SourceVec);
4090       MinElts.push_back(EltNo);
4091       MaxElts.push_back(EltNo);
4092     }
4093   }
4094 
4095   // Currently only do something sane when at most two source vectors
4096   // involved.
4097   if (SourceVecs.size() > 2)
4098     return SDValue();
4099 
4100   SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
4101   int VEXTOffsets[2] = {0, 0};
4102 
4103   // This loop extracts the usage patterns of the source vectors
4104   // and prepares appropriate SDValues for a shuffle if possible.
4105   for (unsigned i = 0; i < SourceVecs.size(); ++i) {
4106     if (SourceVecs[i].getValueType() == VT) {
4107       // No VEXT necessary
4108       ShuffleSrcs[i] = SourceVecs[i];
4109       VEXTOffsets[i] = 0;
4110       continue;
4111     } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
4112       // It probably isn't worth padding out a smaller vector just to
4113       // break it down again in a shuffle.
4114       return SDValue();
4115     }
4116 
4117     // Since only 64-bit and 128-bit vectors are legal on ARM and
4118     // we've eliminated the other cases...
4119     assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
4120            "unexpected vector sizes in ReconstructShuffle");
4121 
4122     if (MaxElts[i] - MinElts[i] >= NumElts) {
4123       // Span too large for a VEXT to cope
4124       return SDValue();
4125     }
4126 
4127     if (MinElts[i] >= NumElts) {
4128       // The extraction can just take the second half
4129       VEXTOffsets[i] = NumElts;
4130       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4131                                    SourceVecs[i],
4132                                    DAG.getIntPtrConstant(NumElts));
4133     } else if (MaxElts[i] < NumElts) {
4134       // The extraction can just take the first half
4135       VEXTOffsets[i] = 0;
4136       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4137                                    SourceVecs[i],
4138                                    DAG.getIntPtrConstant(0));
4139     } else {
4140       // An actual VEXT is needed
4141       VEXTOffsets[i] = MinElts[i];
4142       SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4143                                      SourceVecs[i],
4144                                      DAG.getIntPtrConstant(0));
4145       SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4146                                      SourceVecs[i],
4147                                      DAG.getIntPtrConstant(NumElts));
4148       ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
4149                                    DAG.getConstant(VEXTOffsets[i], MVT::i32));
4150     }
4151   }
4152 
4153   SmallVector<int, 8> Mask;
4154 
4155   for (unsigned i = 0; i < NumElts; ++i) {
4156     SDValue Entry = Op.getOperand(i);
4157     if (Entry.getOpcode() == ISD::UNDEF) {
4158       Mask.push_back(-1);
4159       continue;
4160     }
4161 
4162     SDValue ExtractVec = Entry.getOperand(0);
4163     int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
4164                                           .getOperand(1))->getSExtValue();
4165     if (ExtractVec == SourceVecs[0]) {
4166       Mask.push_back(ExtractElt - VEXTOffsets[0]);
4167     } else {
4168       Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
4169     }
4170   }
4171 
4172   // Final check before we try to produce nonsense...
4173   if (isShuffleMaskLegal(Mask, VT))
4174     return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
4175                                 &Mask[0]);
4176 
4177   return SDValue();
4178 }
4179 
4180 /// isShuffleMaskLegal - Targets can use this to indicate that they only
4181 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
4182 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
4183 /// are assumed to be legal.
4184 bool
isShuffleMaskLegal(const SmallVectorImpl<int> & M,EVT VT) const4185 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
4186                                       EVT VT) const {
4187   if (VT.getVectorNumElements() == 4 &&
4188       (VT.is128BitVector() || VT.is64BitVector())) {
4189     unsigned PFIndexes[4];
4190     for (unsigned i = 0; i != 4; ++i) {
4191       if (M[i] < 0)
4192         PFIndexes[i] = 8;
4193       else
4194         PFIndexes[i] = M[i];
4195     }
4196 
4197     // Compute the index in the perfect shuffle table.
4198     unsigned PFTableIndex =
4199       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
4200     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
4201     unsigned Cost = (PFEntry >> 30);
4202 
4203     if (Cost <= 4)
4204       return true;
4205   }
4206 
4207   bool ReverseVEXT;
4208   unsigned Imm, WhichResult;
4209 
4210   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4211   return (EltSize >= 32 ||
4212           ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
4213           isVREVMask(M, VT, 64) ||
4214           isVREVMask(M, VT, 32) ||
4215           isVREVMask(M, VT, 16) ||
4216           isVEXTMask(M, VT, ReverseVEXT, Imm) ||
4217           isVTBLMask(M, VT) ||
4218           isVTRNMask(M, VT, WhichResult) ||
4219           isVUZPMask(M, VT, WhichResult) ||
4220           isVZIPMask(M, VT, WhichResult) ||
4221           isVTRN_v_undef_Mask(M, VT, WhichResult) ||
4222           isVUZP_v_undef_Mask(M, VT, WhichResult) ||
4223           isVZIP_v_undef_Mask(M, VT, WhichResult));
4224 }
4225 
4226 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
4227 /// the specified operations to build the shuffle.
GeneratePerfectShuffle(unsigned PFEntry,SDValue LHS,SDValue RHS,SelectionDAG & DAG,DebugLoc dl)4228 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
4229                                       SDValue RHS, SelectionDAG &DAG,
4230                                       DebugLoc dl) {
4231   unsigned OpNum = (PFEntry >> 26) & 0x0F;
4232   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
4233   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
4234 
4235   enum {
4236     OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
4237     OP_VREV,
4238     OP_VDUP0,
4239     OP_VDUP1,
4240     OP_VDUP2,
4241     OP_VDUP3,
4242     OP_VEXT1,
4243     OP_VEXT2,
4244     OP_VEXT3,
4245     OP_VUZPL, // VUZP, left result
4246     OP_VUZPR, // VUZP, right result
4247     OP_VZIPL, // VZIP, left result
4248     OP_VZIPR, // VZIP, right result
4249     OP_VTRNL, // VTRN, left result
4250     OP_VTRNR  // VTRN, right result
4251   };
4252 
4253   if (OpNum == OP_COPY) {
4254     if (LHSID == (1*9+2)*9+3) return LHS;
4255     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
4256     return RHS;
4257   }
4258 
4259   SDValue OpLHS, OpRHS;
4260   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
4261   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
4262   EVT VT = OpLHS.getValueType();
4263 
4264   switch (OpNum) {
4265   default: llvm_unreachable("Unknown shuffle opcode!");
4266   case OP_VREV:
4267     // VREV divides the vector in half and swaps within the half.
4268     if (VT.getVectorElementType() == MVT::i32 ||
4269         VT.getVectorElementType() == MVT::f32)
4270       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
4271     // vrev <4 x i16> -> VREV32
4272     if (VT.getVectorElementType() == MVT::i16)
4273       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
4274     // vrev <4 x i8> -> VREV16
4275     assert(VT.getVectorElementType() == MVT::i8);
4276     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
4277   case OP_VDUP0:
4278   case OP_VDUP1:
4279   case OP_VDUP2:
4280   case OP_VDUP3:
4281     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
4282                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
4283   case OP_VEXT1:
4284   case OP_VEXT2:
4285   case OP_VEXT3:
4286     return DAG.getNode(ARMISD::VEXT, dl, VT,
4287                        OpLHS, OpRHS,
4288                        DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
4289   case OP_VUZPL:
4290   case OP_VUZPR:
4291     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4292                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
4293   case OP_VZIPL:
4294   case OP_VZIPR:
4295     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4296                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
4297   case OP_VTRNL:
4298   case OP_VTRNR:
4299     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4300                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
4301   }
4302 }
4303 
LowerVECTOR_SHUFFLEv8i8(SDValue Op,SmallVectorImpl<int> & ShuffleMask,SelectionDAG & DAG)4304 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
4305                                        SmallVectorImpl<int> &ShuffleMask,
4306                                        SelectionDAG &DAG) {
4307   // Check to see if we can use the VTBL instruction.
4308   SDValue V1 = Op.getOperand(0);
4309   SDValue V2 = Op.getOperand(1);
4310   DebugLoc DL = Op.getDebugLoc();
4311 
4312   SmallVector<SDValue, 8> VTBLMask;
4313   for (SmallVectorImpl<int>::iterator
4314          I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
4315     VTBLMask.push_back(DAG.getConstant(*I, MVT::i32));
4316 
4317   if (V2.getNode()->getOpcode() == ISD::UNDEF)
4318     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
4319                        DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8,
4320                                    &VTBLMask[0], 8));
4321 
4322   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
4323                      DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8,
4324                                  &VTBLMask[0], 8));
4325 }
4326 
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG)4327 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
4328   SDValue V1 = Op.getOperand(0);
4329   SDValue V2 = Op.getOperand(1);
4330   DebugLoc dl = Op.getDebugLoc();
4331   EVT VT = Op.getValueType();
4332   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
4333   SmallVector<int, 8> ShuffleMask;
4334 
4335   // Convert shuffles that are directly supported on NEON to target-specific
4336   // DAG nodes, instead of keeping them as shuffles and matching them again
4337   // during code selection.  This is more efficient and avoids the possibility
4338   // of inconsistencies between legalization and selection.
4339   // FIXME: floating-point vectors should be canonicalized to integer vectors
4340   // of the same time so that they get CSEd properly.
4341   SVN->getMask(ShuffleMask);
4342 
4343   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4344   if (EltSize <= 32) {
4345     if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
4346       int Lane = SVN->getSplatIndex();
4347       // If this is undef splat, generate it via "just" vdup, if possible.
4348       if (Lane == -1) Lane = 0;
4349 
4350       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
4351         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
4352       }
4353       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
4354                          DAG.getConstant(Lane, MVT::i32));
4355     }
4356 
4357     bool ReverseVEXT;
4358     unsigned Imm;
4359     if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
4360       if (ReverseVEXT)
4361         std::swap(V1, V2);
4362       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
4363                          DAG.getConstant(Imm, MVT::i32));
4364     }
4365 
4366     if (isVREVMask(ShuffleMask, VT, 64))
4367       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
4368     if (isVREVMask(ShuffleMask, VT, 32))
4369       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
4370     if (isVREVMask(ShuffleMask, VT, 16))
4371       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
4372 
4373     // Check for Neon shuffles that modify both input vectors in place.
4374     // If both results are used, i.e., if there are two shuffles with the same
4375     // source operands and with masks corresponding to both results of one of
4376     // these operations, DAG memoization will ensure that a single node is
4377     // used for both shuffles.
4378     unsigned WhichResult;
4379     if (isVTRNMask(ShuffleMask, VT, WhichResult))
4380       return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4381                          V1, V2).getValue(WhichResult);
4382     if (isVUZPMask(ShuffleMask, VT, WhichResult))
4383       return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4384                          V1, V2).getValue(WhichResult);
4385     if (isVZIPMask(ShuffleMask, VT, WhichResult))
4386       return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4387                          V1, V2).getValue(WhichResult);
4388 
4389     if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
4390       return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4391                          V1, V1).getValue(WhichResult);
4392     if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
4393       return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4394                          V1, V1).getValue(WhichResult);
4395     if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
4396       return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4397                          V1, V1).getValue(WhichResult);
4398   }
4399 
4400   // If the shuffle is not directly supported and it has 4 elements, use
4401   // the PerfectShuffle-generated table to synthesize it from other shuffles.
4402   unsigned NumElts = VT.getVectorNumElements();
4403   if (NumElts == 4) {
4404     unsigned PFIndexes[4];
4405     for (unsigned i = 0; i != 4; ++i) {
4406       if (ShuffleMask[i] < 0)
4407         PFIndexes[i] = 8;
4408       else
4409         PFIndexes[i] = ShuffleMask[i];
4410     }
4411 
4412     // Compute the index in the perfect shuffle table.
4413     unsigned PFTableIndex =
4414       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
4415     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
4416     unsigned Cost = (PFEntry >> 30);
4417 
4418     if (Cost <= 4)
4419       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
4420   }
4421 
4422   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
4423   if (EltSize >= 32) {
4424     // Do the expansion with floating-point types, since that is what the VFP
4425     // registers are defined to use, and since i64 is not legal.
4426     EVT EltVT = EVT::getFloatingPointVT(EltSize);
4427     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
4428     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
4429     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
4430     SmallVector<SDValue, 8> Ops;
4431     for (unsigned i = 0; i < NumElts; ++i) {
4432       if (ShuffleMask[i] < 0)
4433         Ops.push_back(DAG.getUNDEF(EltVT));
4434       else
4435         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
4436                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
4437                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
4438                                                   MVT::i32)));
4439     }
4440     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
4441     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4442   }
4443 
4444   if (VT == MVT::v8i8) {
4445     SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG);
4446     if (NewOp.getNode())
4447       return NewOp;
4448   }
4449 
4450   return SDValue();
4451 }
4452 
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG)4453 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
4454   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
4455   SDValue Lane = Op.getOperand(1);
4456   if (!isa<ConstantSDNode>(Lane))
4457     return SDValue();
4458 
4459   SDValue Vec = Op.getOperand(0);
4460   if (Op.getValueType() == MVT::i32 &&
4461       Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
4462     DebugLoc dl = Op.getDebugLoc();
4463     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
4464   }
4465 
4466   return Op;
4467 }
4468 
LowerCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG)4469 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
4470   // The only time a CONCAT_VECTORS operation can have legal types is when
4471   // two 64-bit vectors are concatenated to a 128-bit vector.
4472   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
4473          "unexpected CONCAT_VECTORS");
4474   DebugLoc dl = Op.getDebugLoc();
4475   SDValue Val = DAG.getUNDEF(MVT::v2f64);
4476   SDValue Op0 = Op.getOperand(0);
4477   SDValue Op1 = Op.getOperand(1);
4478   if (Op0.getOpcode() != ISD::UNDEF)
4479     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
4480                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
4481                       DAG.getIntPtrConstant(0));
4482   if (Op1.getOpcode() != ISD::UNDEF)
4483     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
4484                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
4485                       DAG.getIntPtrConstant(1));
4486   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
4487 }
4488 
4489 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
4490 /// element has been zero/sign-extended, depending on the isSigned parameter,
4491 /// from an integer type half its size.
isExtendedBUILD_VECTOR(SDNode * N,SelectionDAG & DAG,bool isSigned)4492 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4493                                    bool isSigned) {
4494   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
4495   EVT VT = N->getValueType(0);
4496   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
4497     SDNode *BVN = N->getOperand(0).getNode();
4498     if (BVN->getValueType(0) != MVT::v4i32 ||
4499         BVN->getOpcode() != ISD::BUILD_VECTOR)
4500       return false;
4501     unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
4502     unsigned HiElt = 1 - LoElt;
4503     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
4504     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
4505     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
4506     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
4507     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
4508       return false;
4509     if (isSigned) {
4510       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
4511           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
4512         return true;
4513     } else {
4514       if (Hi0->isNullValue() && Hi1->isNullValue())
4515         return true;
4516     }
4517     return false;
4518   }
4519 
4520   if (N->getOpcode() != ISD::BUILD_VECTOR)
4521     return false;
4522 
4523   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
4524     SDNode *Elt = N->getOperand(i).getNode();
4525     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4526       unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4527       unsigned HalfSize = EltSize / 2;
4528       if (isSigned) {
4529         int64_t SExtVal = C->getSExtValue();
4530         if ((SExtVal >> HalfSize) != (SExtVal >> EltSize))
4531           return false;
4532       } else {
4533         if ((C->getZExtValue() >> HalfSize) != 0)
4534           return false;
4535       }
4536       continue;
4537     }
4538     return false;
4539   }
4540 
4541   return true;
4542 }
4543 
4544 /// isSignExtended - Check if a node is a vector value that is sign-extended
4545 /// or a constant BUILD_VECTOR with sign-extended elements.
isSignExtended(SDNode * N,SelectionDAG & DAG)4546 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4547   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
4548     return true;
4549   if (isExtendedBUILD_VECTOR(N, DAG, true))
4550     return true;
4551   return false;
4552 }
4553 
4554 /// isZeroExtended - Check if a node is a vector value that is zero-extended
4555 /// or a constant BUILD_VECTOR with zero-extended elements.
isZeroExtended(SDNode * N,SelectionDAG & DAG)4556 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4557   if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
4558     return true;
4559   if (isExtendedBUILD_VECTOR(N, DAG, false))
4560     return true;
4561   return false;
4562 }
4563 
4564 /// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending
4565 /// load, or BUILD_VECTOR with extended elements, return the unextended value.
SkipExtension(SDNode * N,SelectionDAG & DAG)4566 static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) {
4567   if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
4568     return N->getOperand(0);
4569   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
4570     return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(),
4571                        LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
4572                        LD->isNonTemporal(), LD->getAlignment());
4573   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
4574   // have been legalized as a BITCAST from v4i32.
4575   if (N->getOpcode() == ISD::BITCAST) {
4576     SDNode *BVN = N->getOperand(0).getNode();
4577     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
4578            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
4579     unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
4580     return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32,
4581                        BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
4582   }
4583   // Construct a new BUILD_VECTOR with elements truncated to half the size.
4584   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4585   EVT VT = N->getValueType(0);
4586   unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
4587   unsigned NumElts = VT.getVectorNumElements();
4588   MVT TruncVT = MVT::getIntegerVT(EltSize);
4589   SmallVector<SDValue, 8> Ops;
4590   for (unsigned i = 0; i != NumElts; ++i) {
4591     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4592     const APInt &CInt = C->getAPIntValue();
4593     Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT));
4594   }
4595   return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
4596                      MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
4597 }
4598 
isAddSubSExt(SDNode * N,SelectionDAG & DAG)4599 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4600   unsigned Opcode = N->getOpcode();
4601   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4602     SDNode *N0 = N->getOperand(0).getNode();
4603     SDNode *N1 = N->getOperand(1).getNode();
4604     return N0->hasOneUse() && N1->hasOneUse() &&
4605       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4606   }
4607   return false;
4608 }
4609 
isAddSubZExt(SDNode * N,SelectionDAG & DAG)4610 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4611   unsigned Opcode = N->getOpcode();
4612   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4613     SDNode *N0 = N->getOperand(0).getNode();
4614     SDNode *N1 = N->getOperand(1).getNode();
4615     return N0->hasOneUse() && N1->hasOneUse() &&
4616       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4617   }
4618   return false;
4619 }
4620 
LowerMUL(SDValue Op,SelectionDAG & DAG)4621 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
4622   // Multiplications are only custom-lowered for 128-bit vectors so that
4623   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
4624   EVT VT = Op.getValueType();
4625   assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL");
4626   SDNode *N0 = Op.getOperand(0).getNode();
4627   SDNode *N1 = Op.getOperand(1).getNode();
4628   unsigned NewOpc = 0;
4629   bool isMLA = false;
4630   bool isN0SExt = isSignExtended(N0, DAG);
4631   bool isN1SExt = isSignExtended(N1, DAG);
4632   if (isN0SExt && isN1SExt)
4633     NewOpc = ARMISD::VMULLs;
4634   else {
4635     bool isN0ZExt = isZeroExtended(N0, DAG);
4636     bool isN1ZExt = isZeroExtended(N1, DAG);
4637     if (isN0ZExt && isN1ZExt)
4638       NewOpc = ARMISD::VMULLu;
4639     else if (isN1SExt || isN1ZExt) {
4640       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
4641       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
4642       if (isN1SExt && isAddSubSExt(N0, DAG)) {
4643         NewOpc = ARMISD::VMULLs;
4644         isMLA = true;
4645       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
4646         NewOpc = ARMISD::VMULLu;
4647         isMLA = true;
4648       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
4649         std::swap(N0, N1);
4650         NewOpc = ARMISD::VMULLu;
4651         isMLA = true;
4652       }
4653     }
4654 
4655     if (!NewOpc) {
4656       if (VT == MVT::v2i64)
4657         // Fall through to expand this.  It is not legal.
4658         return SDValue();
4659       else
4660         // Other vector multiplications are legal.
4661         return Op;
4662     }
4663   }
4664 
4665   // Legalize to a VMULL instruction.
4666   DebugLoc DL = Op.getDebugLoc();
4667   SDValue Op0;
4668   SDValue Op1 = SkipExtension(N1, DAG);
4669   if (!isMLA) {
4670     Op0 = SkipExtension(N0, DAG);
4671     assert(Op0.getValueType().is64BitVector() &&
4672            Op1.getValueType().is64BitVector() &&
4673            "unexpected types for extended operands to VMULL");
4674     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4675   }
4676 
4677   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
4678   // isel lowering to take advantage of no-stall back to back vmul + vmla.
4679   //   vmull q0, d4, d6
4680   //   vmlal q0, d5, d6
4681   // is faster than
4682   //   vaddl q0, d4, d5
4683   //   vmovl q1, d6
4684   //   vmul  q0, q0, q1
4685   SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG);
4686   SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG);
4687   EVT Op1VT = Op1.getValueType();
4688   return DAG.getNode(N0->getOpcode(), DL, VT,
4689                      DAG.getNode(NewOpc, DL, VT,
4690                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4691                      DAG.getNode(NewOpc, DL, VT,
4692                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
4693 }
4694 
4695 static SDValue
LowerSDIV_v4i8(SDValue X,SDValue Y,DebugLoc dl,SelectionDAG & DAG)4696 LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) {
4697   // Convert to float
4698   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
4699   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
4700   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
4701   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
4702   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
4703   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
4704   // Get reciprocal estimate.
4705   // float4 recip = vrecpeq_f32(yf);
4706   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4707                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y);
4708   // Because char has a smaller range than uchar, we can actually get away
4709   // without any newton steps.  This requires that we use a weird bias
4710   // of 0xb000, however (again, this has been exhaustively tested).
4711   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
4712   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
4713   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
4714   Y = DAG.getConstant(0xb000, MVT::i32);
4715   Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y);
4716   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
4717   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
4718   // Convert back to short.
4719   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
4720   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
4721   return X;
4722 }
4723 
4724 static SDValue
LowerSDIV_v4i16(SDValue N0,SDValue N1,DebugLoc dl,SelectionDAG & DAG)4725 LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) {
4726   SDValue N2;
4727   // Convert to float.
4728   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
4729   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
4730   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
4731   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
4732   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
4733   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
4734 
4735   // Use reciprocal estimate and one refinement step.
4736   // float4 recip = vrecpeq_f32(yf);
4737   // recip *= vrecpsq_f32(yf, recip);
4738   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4739                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1);
4740   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4741                    DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
4742                    N1, N2);
4743   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
4744   // Because short has a smaller range than ushort, we can actually get away
4745   // with only a single newton step.  This requires that we use a weird bias
4746   // of 89, however (again, this has been exhaustively tested).
4747   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
4748   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
4749   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
4750   N1 = DAG.getConstant(0x89, MVT::i32);
4751   N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
4752   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
4753   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
4754   // Convert back to integer and return.
4755   // return vmovn_s32(vcvt_s32_f32(result));
4756   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
4757   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
4758   return N0;
4759 }
4760 
LowerSDIV(SDValue Op,SelectionDAG & DAG)4761 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
4762   EVT VT = Op.getValueType();
4763   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
4764          "unexpected type for custom-lowering ISD::SDIV");
4765 
4766   DebugLoc dl = Op.getDebugLoc();
4767   SDValue N0 = Op.getOperand(0);
4768   SDValue N1 = Op.getOperand(1);
4769   SDValue N2, N3;
4770 
4771   if (VT == MVT::v8i8) {
4772     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
4773     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
4774 
4775     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4776                      DAG.getIntPtrConstant(4));
4777     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4778                      DAG.getIntPtrConstant(4));
4779     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4780                      DAG.getIntPtrConstant(0));
4781     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4782                      DAG.getIntPtrConstant(0));
4783 
4784     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
4785     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
4786 
4787     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
4788     N0 = LowerCONCAT_VECTORS(N0, DAG);
4789 
4790     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
4791     return N0;
4792   }
4793   return LowerSDIV_v4i16(N0, N1, dl, DAG);
4794 }
4795 
LowerUDIV(SDValue Op,SelectionDAG & DAG)4796 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
4797   EVT VT = Op.getValueType();
4798   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
4799          "unexpected type for custom-lowering ISD::UDIV");
4800 
4801   DebugLoc dl = Op.getDebugLoc();
4802   SDValue N0 = Op.getOperand(0);
4803   SDValue N1 = Op.getOperand(1);
4804   SDValue N2, N3;
4805 
4806   if (VT == MVT::v8i8) {
4807     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
4808     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
4809 
4810     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4811                      DAG.getIntPtrConstant(4));
4812     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4813                      DAG.getIntPtrConstant(4));
4814     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4815                      DAG.getIntPtrConstant(0));
4816     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4817                      DAG.getIntPtrConstant(0));
4818 
4819     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
4820     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
4821 
4822     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
4823     N0 = LowerCONCAT_VECTORS(N0, DAG);
4824 
4825     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
4826                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32),
4827                      N0);
4828     return N0;
4829   }
4830 
4831   // v4i16 sdiv ... Convert to float.
4832   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
4833   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
4834   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
4835   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
4836   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
4837   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
4838 
4839   // Use reciprocal estimate and two refinement steps.
4840   // float4 recip = vrecpeq_f32(yf);
4841   // recip *= vrecpsq_f32(yf, recip);
4842   // recip *= vrecpsq_f32(yf, recip);
4843   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4844                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1);
4845   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4846                    DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
4847                    BN1, N2);
4848   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
4849   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4850                    DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
4851                    BN1, N2);
4852   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
4853   // Simply multiplying by the reciprocal estimate can leave us a few ulps
4854   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
4855   // and that it will never cause us to return an answer too large).
4856   // float4 result = as_float4(as_int4(xf*recip) + 2);
4857   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
4858   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
4859   N1 = DAG.getConstant(2, MVT::i32);
4860   N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
4861   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
4862   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
4863   // Convert back to integer and return.
4864   // return vmovn_u32(vcvt_s32_f32(result));
4865   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
4866   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
4867   return N0;
4868 }
4869 
LowerADDC_ADDE_SUBC_SUBE(SDValue Op,SelectionDAG & DAG)4870 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
4871   EVT VT = Op.getNode()->getValueType(0);
4872   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
4873 
4874   unsigned Opc;
4875   bool ExtraOp = false;
4876   switch (Op.getOpcode()) {
4877   default: assert(0 && "Invalid code");
4878   case ISD::ADDC: Opc = ARMISD::ADDC; break;
4879   case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
4880   case ISD::SUBC: Opc = ARMISD::SUBC; break;
4881   case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break;
4882   }
4883 
4884   if (!ExtraOp)
4885     return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
4886                        Op.getOperand(1));
4887   return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
4888                      Op.getOperand(1), Op.getOperand(2));
4889 }
4890 
LowerAtomicLoadStore(SDValue Op,SelectionDAG & DAG)4891 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
4892   // Monotonic load/store is legal for all targets
4893   if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
4894     return Op;
4895 
4896   // Aquire/Release load/store is not legal for targets without a
4897   // dmb or equivalent available.
4898   return SDValue();
4899 }
4900 
4901 
4902 static void
ReplaceATOMIC_OP_64(SDNode * Node,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG,unsigned NewOp)4903 ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results,
4904                     SelectionDAG &DAG, unsigned NewOp) {
4905   DebugLoc dl = Node->getDebugLoc();
4906   assert (Node->getValueType(0) == MVT::i64 &&
4907           "Only know how to expand i64 atomics");
4908 
4909   SmallVector<SDValue, 6> Ops;
4910   Ops.push_back(Node->getOperand(0)); // Chain
4911   Ops.push_back(Node->getOperand(1)); // Ptr
4912   // Low part of Val1
4913   Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4914                             Node->getOperand(2), DAG.getIntPtrConstant(0)));
4915   // High part of Val1
4916   Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4917                             Node->getOperand(2), DAG.getIntPtrConstant(1)));
4918   if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) {
4919     // High part of Val1
4920     Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4921                               Node->getOperand(3), DAG.getIntPtrConstant(0)));
4922     // High part of Val2
4923     Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4924                               Node->getOperand(3), DAG.getIntPtrConstant(1)));
4925   }
4926   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4927   SDValue Result =
4928     DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64,
4929                             cast<MemSDNode>(Node)->getMemOperand());
4930   SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) };
4931   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
4932   Results.push_back(Result.getValue(2));
4933 }
4934 
LowerOperation(SDValue Op,SelectionDAG & DAG) const4935 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4936   switch (Op.getOpcode()) {
4937   default: llvm_unreachable("Don't know how to custom lower this!");
4938   case ISD::ConstantPool:  return LowerConstantPool(Op, DAG);
4939   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
4940   case ISD::GlobalAddress:
4941     return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
4942       LowerGlobalAddressELF(Op, DAG);
4943   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
4944   case ISD::SELECT:        return LowerSELECT(Op, DAG);
4945   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
4946   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
4947   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
4948   case ISD::VASTART:       return LowerVASTART(Op, DAG);
4949   case ISD::MEMBARRIER:    return LowerMEMBARRIER(Op, DAG, Subtarget);
4950   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
4951   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
4952   case ISD::SINT_TO_FP:
4953   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
4954   case ISD::FP_TO_SINT:
4955   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
4956   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
4957   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
4958   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
4959   case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
4960   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
4961   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
4962   case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
4963   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
4964                                                                Subtarget);
4965   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG);
4966   case ISD::SHL:
4967   case ISD::SRL:
4968   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
4969   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
4970   case ISD::SRL_PARTS:
4971   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
4972   case ISD::CTTZ:          return LowerCTTZ(Op.getNode(), DAG, Subtarget);
4973   case ISD::SETCC:         return LowerVSETCC(Op, DAG);
4974   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
4975   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
4976   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
4977   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
4978   case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
4979   case ISD::MUL:           return LowerMUL(Op, DAG);
4980   case ISD::SDIV:          return LowerSDIV(Op, DAG);
4981   case ISD::UDIV:          return LowerUDIV(Op, DAG);
4982   case ISD::ADDC:
4983   case ISD::ADDE:
4984   case ISD::SUBC:
4985   case ISD::SUBE:          return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
4986   case ISD::ATOMIC_LOAD:
4987   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
4988   }
4989   return SDValue();
4990 }
4991 
4992 /// ReplaceNodeResults - Replace the results of node with an illegal result
4993 /// type with new values built out of custom code.
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const4994 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
4995                                            SmallVectorImpl<SDValue>&Results,
4996                                            SelectionDAG &DAG) const {
4997   SDValue Res;
4998   switch (N->getOpcode()) {
4999   default:
5000     llvm_unreachable("Don't know how to custom expand this!");
5001     break;
5002   case ISD::BITCAST:
5003     Res = ExpandBITCAST(N, DAG);
5004     break;
5005   case ISD::SRL:
5006   case ISD::SRA:
5007     Res = Expand64BitShift(N, DAG, Subtarget);
5008     break;
5009   case ISD::ATOMIC_LOAD_ADD:
5010     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG);
5011     return;
5012   case ISD::ATOMIC_LOAD_AND:
5013     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG);
5014     return;
5015   case ISD::ATOMIC_LOAD_NAND:
5016     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG);
5017     return;
5018   case ISD::ATOMIC_LOAD_OR:
5019     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG);
5020     return;
5021   case ISD::ATOMIC_LOAD_SUB:
5022     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG);
5023     return;
5024   case ISD::ATOMIC_LOAD_XOR:
5025     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG);
5026     return;
5027   case ISD::ATOMIC_SWAP:
5028     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG);
5029     return;
5030   case ISD::ATOMIC_CMP_SWAP:
5031     ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG);
5032     return;
5033   }
5034   if (Res.getNode())
5035     Results.push_back(Res);
5036 }
5037 
5038 //===----------------------------------------------------------------------===//
5039 //                           ARM Scheduler Hooks
5040 //===----------------------------------------------------------------------===//
5041 
5042 MachineBasicBlock *
EmitAtomicCmpSwap(MachineInstr * MI,MachineBasicBlock * BB,unsigned Size) const5043 ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
5044                                      MachineBasicBlock *BB,
5045                                      unsigned Size) const {
5046   unsigned dest    = MI->getOperand(0).getReg();
5047   unsigned ptr     = MI->getOperand(1).getReg();
5048   unsigned oldval  = MI->getOperand(2).getReg();
5049   unsigned newval  = MI->getOperand(3).getReg();
5050   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5051   DebugLoc dl = MI->getDebugLoc();
5052   bool isThumb2 = Subtarget->isThumb2();
5053 
5054   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5055   unsigned scratch =
5056     MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass
5057                                        : ARM::GPRRegisterClass);
5058 
5059   if (isThumb2) {
5060     MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
5061     MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass);
5062     MRI.constrainRegClass(newval, ARM::rGPRRegisterClass);
5063   }
5064 
5065   unsigned ldrOpc, strOpc;
5066   switch (Size) {
5067   default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
5068   case 1:
5069     ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
5070     strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
5071     break;
5072   case 2:
5073     ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
5074     strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
5075     break;
5076   case 4:
5077     ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
5078     strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
5079     break;
5080   }
5081 
5082   MachineFunction *MF = BB->getParent();
5083   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5084   MachineFunction::iterator It = BB;
5085   ++It; // insert the new blocks after the current block
5086 
5087   MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
5088   MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
5089   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5090   MF->insert(It, loop1MBB);
5091   MF->insert(It, loop2MBB);
5092   MF->insert(It, exitMBB);
5093 
5094   // Transfer the remainder of BB and its successor edges to exitMBB.
5095   exitMBB->splice(exitMBB->begin(), BB,
5096                   llvm::next(MachineBasicBlock::iterator(MI)),
5097                   BB->end());
5098   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5099 
5100   //  thisMBB:
5101   //   ...
5102   //   fallthrough --> loop1MBB
5103   BB->addSuccessor(loop1MBB);
5104 
5105   // loop1MBB:
5106   //   ldrex dest, [ptr]
5107   //   cmp dest, oldval
5108   //   bne exitMBB
5109   BB = loop1MBB;
5110   MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
5111   if (ldrOpc == ARM::t2LDREX)
5112     MIB.addImm(0);
5113   AddDefaultPred(MIB);
5114   AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
5115                  .addReg(dest).addReg(oldval));
5116   BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5117     .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5118   BB->addSuccessor(loop2MBB);
5119   BB->addSuccessor(exitMBB);
5120 
5121   // loop2MBB:
5122   //   strex scratch, newval, [ptr]
5123   //   cmp scratch, #0
5124   //   bne loop1MBB
5125   BB = loop2MBB;
5126   MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr);
5127   if (strOpc == ARM::t2STREX)
5128     MIB.addImm(0);
5129   AddDefaultPred(MIB);
5130   AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5131                  .addReg(scratch).addImm(0));
5132   BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5133     .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5134   BB->addSuccessor(loop1MBB);
5135   BB->addSuccessor(exitMBB);
5136 
5137   //  exitMBB:
5138   //   ...
5139   BB = exitMBB;
5140 
5141   MI->eraseFromParent();   // The instruction is gone now.
5142 
5143   return BB;
5144 }
5145 
5146 MachineBasicBlock *
EmitAtomicBinary(MachineInstr * MI,MachineBasicBlock * BB,unsigned Size,unsigned BinOpcode) const5147 ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
5148                                     unsigned Size, unsigned BinOpcode) const {
5149   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
5150   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5151 
5152   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5153   MachineFunction *MF = BB->getParent();
5154   MachineFunction::iterator It = BB;
5155   ++It;
5156 
5157   unsigned dest = MI->getOperand(0).getReg();
5158   unsigned ptr = MI->getOperand(1).getReg();
5159   unsigned incr = MI->getOperand(2).getReg();
5160   DebugLoc dl = MI->getDebugLoc();
5161   bool isThumb2 = Subtarget->isThumb2();
5162 
5163   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5164   if (isThumb2) {
5165     MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
5166     MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
5167   }
5168 
5169   unsigned ldrOpc, strOpc;
5170   switch (Size) {
5171   default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
5172   case 1:
5173     ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
5174     strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
5175     break;
5176   case 2:
5177     ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
5178     strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
5179     break;
5180   case 4:
5181     ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
5182     strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
5183     break;
5184   }
5185 
5186   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5187   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5188   MF->insert(It, loopMBB);
5189   MF->insert(It, exitMBB);
5190 
5191   // Transfer the remainder of BB and its successor edges to exitMBB.
5192   exitMBB->splice(exitMBB->begin(), BB,
5193                   llvm::next(MachineBasicBlock::iterator(MI)),
5194                   BB->end());
5195   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5196 
5197   TargetRegisterClass *TRC =
5198     isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5199   unsigned scratch = MRI.createVirtualRegister(TRC);
5200   unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
5201 
5202   //  thisMBB:
5203   //   ...
5204   //   fallthrough --> loopMBB
5205   BB->addSuccessor(loopMBB);
5206 
5207   //  loopMBB:
5208   //   ldrex dest, ptr
5209   //   <binop> scratch2, dest, incr
5210   //   strex scratch, scratch2, ptr
5211   //   cmp scratch, #0
5212   //   bne- loopMBB
5213   //   fallthrough --> exitMBB
5214   BB = loopMBB;
5215   MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
5216   if (ldrOpc == ARM::t2LDREX)
5217     MIB.addImm(0);
5218   AddDefaultPred(MIB);
5219   if (BinOpcode) {
5220     // operand order needs to go the other way for NAND
5221     if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
5222       AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
5223                      addReg(incr).addReg(dest)).addReg(0);
5224     else
5225       AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
5226                      addReg(dest).addReg(incr)).addReg(0);
5227   }
5228 
5229   MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
5230   if (strOpc == ARM::t2STREX)
5231     MIB.addImm(0);
5232   AddDefaultPred(MIB);
5233   AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5234                  .addReg(scratch).addImm(0));
5235   BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5236     .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5237 
5238   BB->addSuccessor(loopMBB);
5239   BB->addSuccessor(exitMBB);
5240 
5241   //  exitMBB:
5242   //   ...
5243   BB = exitMBB;
5244 
5245   MI->eraseFromParent();   // The instruction is gone now.
5246 
5247   return BB;
5248 }
5249 
5250 MachineBasicBlock *
EmitAtomicBinaryMinMax(MachineInstr * MI,MachineBasicBlock * BB,unsigned Size,bool signExtend,ARMCC::CondCodes Cond) const5251 ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
5252                                           MachineBasicBlock *BB,
5253                                           unsigned Size,
5254                                           bool signExtend,
5255                                           ARMCC::CondCodes Cond) const {
5256   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5257 
5258   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5259   MachineFunction *MF = BB->getParent();
5260   MachineFunction::iterator It = BB;
5261   ++It;
5262 
5263   unsigned dest = MI->getOperand(0).getReg();
5264   unsigned ptr = MI->getOperand(1).getReg();
5265   unsigned incr = MI->getOperand(2).getReg();
5266   unsigned oldval = dest;
5267   DebugLoc dl = MI->getDebugLoc();
5268   bool isThumb2 = Subtarget->isThumb2();
5269 
5270   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5271   if (isThumb2) {
5272     MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
5273     MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
5274   }
5275 
5276   unsigned ldrOpc, strOpc, extendOpc;
5277   switch (Size) {
5278   default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
5279   case 1:
5280     ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
5281     strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
5282     extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
5283     break;
5284   case 2:
5285     ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
5286     strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
5287     extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
5288     break;
5289   case 4:
5290     ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
5291     strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
5292     extendOpc = 0;
5293     break;
5294   }
5295 
5296   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5297   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5298   MF->insert(It, loopMBB);
5299   MF->insert(It, exitMBB);
5300 
5301   // Transfer the remainder of BB and its successor edges to exitMBB.
5302   exitMBB->splice(exitMBB->begin(), BB,
5303                   llvm::next(MachineBasicBlock::iterator(MI)),
5304                   BB->end());
5305   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5306 
5307   TargetRegisterClass *TRC =
5308     isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5309   unsigned scratch = MRI.createVirtualRegister(TRC);
5310   unsigned scratch2 = MRI.createVirtualRegister(TRC);
5311 
5312   //  thisMBB:
5313   //   ...
5314   //   fallthrough --> loopMBB
5315   BB->addSuccessor(loopMBB);
5316 
5317   //  loopMBB:
5318   //   ldrex dest, ptr
5319   //   (sign extend dest, if required)
5320   //   cmp dest, incr
5321   //   cmov.cond scratch2, dest, incr
5322   //   strex scratch, scratch2, ptr
5323   //   cmp scratch, #0
5324   //   bne- loopMBB
5325   //   fallthrough --> exitMBB
5326   BB = loopMBB;
5327   MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
5328   if (ldrOpc == ARM::t2LDREX)
5329     MIB.addImm(0);
5330   AddDefaultPred(MIB);
5331 
5332   // Sign extend the value, if necessary.
5333   if (signExtend && extendOpc) {
5334     oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass);
5335     AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval)
5336                      .addReg(dest)
5337                      .addImm(0));
5338   }
5339 
5340   // Build compare and cmov instructions.
5341   AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
5342                  .addReg(oldval).addReg(incr));
5343   BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2)
5344          .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR);
5345 
5346   MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
5347   if (strOpc == ARM::t2STREX)
5348     MIB.addImm(0);
5349   AddDefaultPred(MIB);
5350   AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5351                  .addReg(scratch).addImm(0));
5352   BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5353     .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5354 
5355   BB->addSuccessor(loopMBB);
5356   BB->addSuccessor(exitMBB);
5357 
5358   //  exitMBB:
5359   //   ...
5360   BB = exitMBB;
5361 
5362   MI->eraseFromParent();   // The instruction is gone now.
5363 
5364   return BB;
5365 }
5366 
5367 MachineBasicBlock *
EmitAtomicBinary64(MachineInstr * MI,MachineBasicBlock * BB,unsigned Op1,unsigned Op2,bool NeedsCarry,bool IsCmpxchg) const5368 ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
5369                                       unsigned Op1, unsigned Op2,
5370                                       bool NeedsCarry, bool IsCmpxchg) const {
5371   // This also handles ATOMIC_SWAP, indicated by Op1==0.
5372   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5373 
5374   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5375   MachineFunction *MF = BB->getParent();
5376   MachineFunction::iterator It = BB;
5377   ++It;
5378 
5379   unsigned destlo = MI->getOperand(0).getReg();
5380   unsigned desthi = MI->getOperand(1).getReg();
5381   unsigned ptr = MI->getOperand(2).getReg();
5382   unsigned vallo = MI->getOperand(3).getReg();
5383   unsigned valhi = MI->getOperand(4).getReg();
5384   DebugLoc dl = MI->getDebugLoc();
5385   bool isThumb2 = Subtarget->isThumb2();
5386 
5387   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5388   if (isThumb2) {
5389     MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass);
5390     MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass);
5391     MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
5392   }
5393 
5394   unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD;
5395   unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD;
5396 
5397   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5398   MachineBasicBlock *contBB = 0, *cont2BB = 0;
5399   if (IsCmpxchg) {
5400     contBB = MF->CreateMachineBasicBlock(LLVM_BB);
5401     cont2BB = MF->CreateMachineBasicBlock(LLVM_BB);
5402   }
5403   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5404   MF->insert(It, loopMBB);
5405   if (IsCmpxchg) {
5406     MF->insert(It, contBB);
5407     MF->insert(It, cont2BB);
5408   }
5409   MF->insert(It, exitMBB);
5410 
5411   // Transfer the remainder of BB and its successor edges to exitMBB.
5412   exitMBB->splice(exitMBB->begin(), BB,
5413                   llvm::next(MachineBasicBlock::iterator(MI)),
5414                   BB->end());
5415   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5416 
5417   TargetRegisterClass *TRC =
5418     isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5419   unsigned storesuccess = MRI.createVirtualRegister(TRC);
5420 
5421   //  thisMBB:
5422   //   ...
5423   //   fallthrough --> loopMBB
5424   BB->addSuccessor(loopMBB);
5425 
5426   //  loopMBB:
5427   //   ldrexd r2, r3, ptr
5428   //   <binopa> r0, r2, incr
5429   //   <binopb> r1, r3, incr
5430   //   strexd storesuccess, r0, r1, ptr
5431   //   cmp storesuccess, #0
5432   //   bne- loopMBB
5433   //   fallthrough --> exitMBB
5434   //
5435   // Note that the registers are explicitly specified because there is not any
5436   // way to force the register allocator to allocate a register pair.
5437   //
5438   // FIXME: The hardcoded registers are not necessary for Thumb2, but we
5439   // need to properly enforce the restriction that the two output registers
5440   // for ldrexd must be different.
5441   BB = loopMBB;
5442   // Load
5443   AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
5444                  .addReg(ARM::R2, RegState::Define)
5445                  .addReg(ARM::R3, RegState::Define).addReg(ptr));
5446   // Copy r2/r3 into dest.  (This copy will normally be coalesced.)
5447   BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2);
5448   BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3);
5449 
5450   if (IsCmpxchg) {
5451     // Add early exit
5452     for (unsigned i = 0; i < 2; i++) {
5453       AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr :
5454                                                          ARM::CMPrr))
5455                      .addReg(i == 0 ? destlo : desthi)
5456                      .addReg(i == 0 ? vallo : valhi));
5457       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5458         .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5459       BB->addSuccessor(exitMBB);
5460       BB->addSuccessor(i == 0 ? contBB : cont2BB);
5461       BB = (i == 0 ? contBB : cont2BB);
5462     }
5463 
5464     // Copy to physregs for strexd
5465     unsigned setlo = MI->getOperand(5).getReg();
5466     unsigned sethi = MI->getOperand(6).getReg();
5467     BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo);
5468     BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi);
5469   } else if (Op1) {
5470     // Perform binary operation
5471     AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0)
5472                    .addReg(destlo).addReg(vallo))
5473         .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry));
5474     AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1)
5475                    .addReg(desthi).addReg(valhi)).addReg(0);
5476   } else {
5477     // Copy to physregs for strexd
5478     BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo);
5479     BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi);
5480   }
5481 
5482   // Store
5483   AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess)
5484                  .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr));
5485   // Cmp+jump
5486   AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5487                  .addReg(storesuccess).addImm(0));
5488   BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5489     .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5490 
5491   BB->addSuccessor(loopMBB);
5492   BB->addSuccessor(exitMBB);
5493 
5494   //  exitMBB:
5495   //   ...
5496   BB = exitMBB;
5497 
5498   MI->eraseFromParent();   // The instruction is gone now.
5499 
5500   return BB;
5501 }
5502 
5503 /// EmitBasePointerRecalculation - For functions using a base pointer, we
5504 /// rematerialize it (via the frame pointer).
5505 void ARMTargetLowering::
EmitBasePointerRecalculation(MachineInstr * MI,MachineBasicBlock * MBB,MachineBasicBlock * DispatchBB) const5506 EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB,
5507                              MachineBasicBlock *DispatchBB) const {
5508   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5509   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
5510   MachineFunction &MF = *MI->getParent()->getParent();
5511   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
5512   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
5513 
5514   if (!RI.hasBasePointer(MF)) return;
5515 
5516   MachineBasicBlock::iterator MBBI = MI;
5517 
5518   int32_t NumBytes = AFI->getFramePtrSpillOffset();
5519   unsigned FramePtr = RI.getFrameRegister(MF);
5520   assert(MF.getTarget().getFrameLowering()->hasFP(MF) &&
5521          "Base pointer without frame pointer?");
5522 
5523   if (AFI->isThumb2Function())
5524     llvm::emitT2RegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
5525                                  FramePtr, -NumBytes, ARMCC::AL, 0, *AII);
5526   else if (AFI->isThumbFunction())
5527     llvm::emitThumbRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
5528                                     FramePtr, -NumBytes, *AII, RI);
5529   else
5530     llvm::emitARMRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
5531                                   FramePtr, -NumBytes, ARMCC::AL, 0, *AII);
5532 
5533   if (!RI.needsStackRealignment(MF)) return;
5534 
5535   // If there's dynamic realignment, adjust for it.
5536   MachineFrameInfo *MFI = MF.getFrameInfo();
5537   unsigned MaxAlign = MFI->getMaxAlignment();
5538   assert(!AFI->isThumb1OnlyFunction());
5539 
5540   // Emit bic r6, r6, MaxAlign
5541   unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri;
5542   AddDefaultCC(
5543     AddDefaultPred(
5544       BuildMI(*MBB, MBBI, MI->getDebugLoc(), TII->get(bicOpc), ARM::R6)
5545       .addReg(ARM::R6, RegState::Kill)
5546       .addImm(MaxAlign - 1)));
5547 }
5548 
5549 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
5550 /// registers the function context.
5551 void ARMTargetLowering::
SetupEntryBlockForSjLj(MachineInstr * MI,MachineBasicBlock * MBB,MachineBasicBlock * DispatchBB,int FI) const5552 SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
5553                        MachineBasicBlock *DispatchBB, int FI) const {
5554   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5555   DebugLoc dl = MI->getDebugLoc();
5556   MachineFunction *MF = MBB->getParent();
5557   MachineRegisterInfo *MRI = &MF->getRegInfo();
5558   MachineConstantPool *MCP = MF->getConstantPool();
5559   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
5560   const Function *F = MF->getFunction();
5561 
5562   bool isThumb = Subtarget->isThumb();
5563   bool isThumb2 = Subtarget->isThumb2();
5564 
5565   unsigned PCLabelId = AFI->createPICLabelUId();
5566   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
5567   ARMConstantPoolValue *CPV =
5568     ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
5569   unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
5570 
5571   const TargetRegisterClass *TRC =
5572     isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5573 
5574   // Grab constant pool and fixed stack memory operands.
5575   MachineMemOperand *CPMMO =
5576     MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(),
5577                              MachineMemOperand::MOLoad, 4, 4);
5578 
5579   MachineMemOperand *FIMMOSt =
5580     MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
5581                              MachineMemOperand::MOStore, 4, 4);
5582 
5583   EmitBasePointerRecalculation(MI, MBB, DispatchBB);
5584 
5585   // Load the address of the dispatch MBB into the jump buffer.
5586   if (isThumb2) {
5587     // Incoming value: jbuf
5588     //   ldr.n  r5, LCPI1_1
5589     //   orr    r5, r5, #1
5590     //   add    r5, pc
5591     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
5592     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5593     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
5594                    .addConstantPoolIndex(CPI)
5595                    .addMemOperand(CPMMO));
5596     // Set the low bit because of thumb mode.
5597     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5598     AddDefaultCC(
5599       AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
5600                      .addReg(NewVReg1, RegState::Kill)
5601                      .addImm(0x01)));
5602     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5603     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
5604       .addReg(NewVReg2, RegState::Kill)
5605       .addImm(PCLabelId);
5606     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
5607                    .addReg(NewVReg3, RegState::Kill)
5608                    .addFrameIndex(FI)
5609                    .addImm(36)  // &jbuf[1] :: pc
5610                    .addMemOperand(FIMMOSt));
5611   } else if (isThumb) {
5612     // Incoming value: jbuf
5613     //   ldr.n  r1, LCPI1_4
5614     //   add    r1, pc
5615     //   mov    r2, #1
5616     //   orrs   r1, r2
5617     //   add    r2, $jbuf, #+4 ; &jbuf[1]
5618     //   str    r1, [r2]
5619     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5620     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
5621                    .addConstantPoolIndex(CPI)
5622                    .addMemOperand(CPMMO));
5623     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5624     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
5625       .addReg(NewVReg1, RegState::Kill)
5626       .addImm(PCLabelId);
5627     // Set the low bit because of thumb mode.
5628     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5629     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
5630                    .addReg(ARM::CPSR, RegState::Define)
5631                    .addImm(1));
5632     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5633     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
5634                    .addReg(ARM::CPSR, RegState::Define)
5635                    .addReg(NewVReg2, RegState::Kill)
5636                    .addReg(NewVReg3, RegState::Kill));
5637     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
5638     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5)
5639                    .addFrameIndex(FI)
5640                    .addImm(36)); // &jbuf[1] :: pc
5641     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
5642                    .addReg(NewVReg4, RegState::Kill)
5643                    .addReg(NewVReg5, RegState::Kill)
5644                    .addImm(0)
5645                    .addMemOperand(FIMMOSt));
5646   } else {
5647     // Incoming value: jbuf
5648     //   ldr  r1, LCPI1_1
5649     //   add  r1, pc, r1
5650     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
5651     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5652     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12),  NewVReg1)
5653                    .addConstantPoolIndex(CPI)
5654                    .addImm(0)
5655                    .addMemOperand(CPMMO));
5656     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5657     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
5658                    .addReg(NewVReg1, RegState::Kill)
5659                    .addImm(PCLabelId));
5660     AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
5661                    .addReg(NewVReg2, RegState::Kill)
5662                    .addFrameIndex(FI)
5663                    .addImm(36)  // &jbuf[1] :: pc
5664                    .addMemOperand(FIMMOSt));
5665   }
5666 }
5667 
5668 MachineBasicBlock *ARMTargetLowering::
EmitSjLjDispatchBlock(MachineInstr * MI,MachineBasicBlock * MBB) const5669 EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
5670   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5671   DebugLoc dl = MI->getDebugLoc();
5672   MachineFunction *MF = MBB->getParent();
5673   MachineRegisterInfo *MRI = &MF->getRegInfo();
5674   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
5675   MachineFrameInfo *MFI = MF->getFrameInfo();
5676   int FI = MFI->getFunctionContextIndex();
5677 
5678   const TargetRegisterClass *TRC =
5679     Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5680 
5681   // Get a mapping of the call site numbers to all of the landing pads they're
5682   // associated with.
5683   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad;
5684   unsigned MaxCSNum = 0;
5685   MachineModuleInfo &MMI = MF->getMMI();
5686   for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) {
5687     if (!BB->isLandingPad()) continue;
5688 
5689     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
5690     // pad.
5691     for (MachineBasicBlock::iterator
5692            II = BB->begin(), IE = BB->end(); II != IE; ++II) {
5693       if (!II->isEHLabel()) continue;
5694 
5695       MCSymbol *Sym = II->getOperand(0).getMCSymbol();
5696       if (!MMI.hasCallSiteLandingPad(Sym)) continue;
5697 
5698       SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym);
5699       for (SmallVectorImpl<unsigned>::iterator
5700              CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
5701            CSI != CSE; ++CSI) {
5702         CallSiteNumToLPad[*CSI].push_back(BB);
5703         MaxCSNum = std::max(MaxCSNum, *CSI);
5704       }
5705       break;
5706     }
5707   }
5708 
5709   // Get an ordered list of the machine basic blocks for the jump table.
5710   std::vector<MachineBasicBlock*> LPadList;
5711   SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs;
5712   LPadList.reserve(CallSiteNumToLPad.size());
5713   for (unsigned I = 1; I <= MaxCSNum; ++I) {
5714     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
5715     for (SmallVectorImpl<MachineBasicBlock*>::iterator
5716            II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
5717       LPadList.push_back(*II);
5718       InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
5719     }
5720   }
5721 
5722   assert(!LPadList.empty() &&
5723          "No landing pad destinations for the dispatch jump table!");
5724 
5725   // Create the jump table and associated information.
5726   MachineJumpTableInfo *JTI =
5727     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
5728   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
5729   unsigned UId = AFI->createJumpTableUId();
5730 
5731   // Create the MBBs for the dispatch code.
5732 
5733   // Shove the dispatch's address into the return slot in the function context.
5734   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
5735   DispatchBB->setIsLandingPad();
5736 
5737   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
5738   BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP));
5739   DispatchBB->addSuccessor(TrapBB);
5740 
5741   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
5742   DispatchBB->addSuccessor(DispContBB);
5743 
5744   // Insert and renumber MBBs.
5745   MachineBasicBlock *Last = &MF->back();
5746   MF->insert(MF->end(), DispatchBB);
5747   MF->insert(MF->end(), DispContBB);
5748   MF->insert(MF->end(), TrapBB);
5749   MF->RenumberBlocks(Last);
5750 
5751   // Insert code into the entry block that creates and registers the function
5752   // context.
5753   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
5754 
5755   MachineMemOperand *FIMMOLd =
5756     MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
5757                              MachineMemOperand::MOLoad |
5758                              MachineMemOperand::MOVolatile, 4, 4);
5759 
5760   if (Subtarget->isThumb2()) {
5761     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5762     AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
5763                    .addFrameIndex(FI)
5764                    .addImm(4)
5765                    .addMemOperand(FIMMOLd));
5766     AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
5767                    .addReg(NewVReg1)
5768                    .addImm(LPadList.size()));
5769     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
5770       .addMBB(TrapBB)
5771       .addImm(ARMCC::HI)
5772       .addReg(ARM::CPSR);
5773 
5774     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5775     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg2)
5776                    .addJumpTableIndex(MJTI)
5777                    .addImm(UId));
5778 
5779     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5780     AddDefaultCC(
5781       AddDefaultPred(
5782         BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg3)
5783         .addReg(NewVReg2, RegState::Kill)
5784         .addReg(NewVReg1)
5785         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
5786 
5787     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
5788       .addReg(NewVReg3, RegState::Kill)
5789       .addReg(NewVReg1)
5790       .addJumpTableIndex(MJTI)
5791       .addImm(UId);
5792   } else if (Subtarget->isThumb()) {
5793     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5794     AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
5795                    .addFrameIndex(FI)
5796                    .addImm(1)
5797                    .addMemOperand(FIMMOLd));
5798 
5799     AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
5800                    .addReg(NewVReg1)
5801                    .addImm(LPadList.size()));
5802     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
5803       .addMBB(TrapBB)
5804       .addImm(ARMCC::HI)
5805       .addReg(ARM::CPSR);
5806 
5807     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5808     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
5809                    .addReg(ARM::CPSR, RegState::Define)
5810                    .addReg(NewVReg1)
5811                    .addImm(2));
5812 
5813     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5814     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
5815                    .addJumpTableIndex(MJTI)
5816                    .addImm(UId));
5817 
5818     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5819     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
5820                    .addReg(ARM::CPSR, RegState::Define)
5821                    .addReg(NewVReg2, RegState::Kill)
5822                    .addReg(NewVReg3));
5823 
5824     MachineMemOperand *JTMMOLd =
5825       MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(),
5826                                MachineMemOperand::MOLoad, 4, 4);
5827 
5828     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
5829     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
5830                    .addReg(NewVReg4, RegState::Kill)
5831                    .addImm(0)
5832                    .addMemOperand(JTMMOLd));
5833 
5834     unsigned NewVReg6 = MRI->createVirtualRegister(TRC);
5835     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
5836                    .addReg(ARM::CPSR, RegState::Define)
5837                    .addReg(NewVReg5, RegState::Kill)
5838                    .addReg(NewVReg3));
5839 
5840     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
5841       .addReg(NewVReg6, RegState::Kill)
5842       .addJumpTableIndex(MJTI)
5843       .addImm(UId);
5844   } else {
5845     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5846     AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
5847                    .addFrameIndex(FI)
5848                    .addImm(4)
5849                    .addMemOperand(FIMMOLd));
5850     AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
5851                    .addReg(NewVReg1)
5852                    .addImm(LPadList.size()));
5853     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
5854       .addMBB(TrapBB)
5855       .addImm(ARMCC::HI)
5856       .addReg(ARM::CPSR);
5857 
5858     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5859     AddDefaultCC(
5860       AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg2)
5861                      .addReg(NewVReg1)
5862                      .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
5863     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5864     AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg3)
5865                    .addJumpTableIndex(MJTI)
5866                    .addImm(UId));
5867 
5868     MachineMemOperand *JTMMOLd =
5869       MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(),
5870                                MachineMemOperand::MOLoad, 4, 4);
5871     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5872     AddDefaultPred(
5873       BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg4)
5874       .addReg(NewVReg2, RegState::Kill)
5875       .addReg(NewVReg3)
5876       .addImm(0)
5877       .addMemOperand(JTMMOLd));
5878 
5879     BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
5880       .addReg(NewVReg4, RegState::Kill)
5881       .addReg(NewVReg3)
5882       .addJumpTableIndex(MJTI)
5883       .addImm(UId);
5884   }
5885 
5886   // Add the jump table entries as successors to the MBB.
5887   MachineBasicBlock *PrevMBB = 0;
5888   for (std::vector<MachineBasicBlock*>::iterator
5889          I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
5890     MachineBasicBlock *CurMBB = *I;
5891     if (PrevMBB != CurMBB)
5892       DispContBB->addSuccessor(CurMBB);
5893     PrevMBB = CurMBB;
5894   }
5895 
5896   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
5897   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
5898   const unsigned *SavedRegs = RI.getCalleeSavedRegs(MF);
5899   for (SmallPtrSet<MachineBasicBlock*, 64>::iterator
5900          I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) {
5901     MachineBasicBlock *BB = *I;
5902 
5903     // Remove the landing pad successor from the invoke block and replace it
5904     // with the new dispatch block.
5905     for (MachineBasicBlock::succ_iterator
5906            SI = BB->succ_begin(), SE = BB->succ_end(); SI != SE; ++SI) {
5907       MachineBasicBlock *SMBB = *SI;
5908       if (SMBB->isLandingPad()) {
5909         BB->removeSuccessor(SMBB);
5910         SMBB->setIsLandingPad(false);
5911       }
5912     }
5913 
5914     BB->addSuccessor(DispatchBB);
5915 
5916     // Find the invoke call and mark all of the callee-saved registers as
5917     // 'implicit defined' so that they're spilled. This prevents code from
5918     // moving instructions to before the EH block, where they will never be
5919     // executed.
5920     for (MachineBasicBlock::reverse_iterator
5921            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
5922       if (!II->getDesc().isCall()) continue;
5923 
5924       DenseMap<unsigned, bool> DefRegs;
5925       for (MachineInstr::mop_iterator
5926              OI = II->operands_begin(), OE = II->operands_end();
5927            OI != OE; ++OI) {
5928         if (!OI->isReg()) continue;
5929         DefRegs[OI->getReg()] = true;
5930       }
5931 
5932       MachineInstrBuilder MIB(&*II);
5933 
5934       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
5935         if (!TRC->contains(SavedRegs[i])) continue;
5936         if (!DefRegs[SavedRegs[i]])
5937           MIB.addReg(SavedRegs[i], RegState::ImplicitDefine | RegState::Dead);
5938       }
5939 
5940       break;
5941     }
5942   }
5943 
5944   // The instruction is gone now.
5945   MI->eraseFromParent();
5946 
5947   return MBB;
5948 }
5949 
5950 static
OtherSucc(MachineBasicBlock * MBB,MachineBasicBlock * Succ)5951 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
5952   for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
5953        E = MBB->succ_end(); I != E; ++I)
5954     if (*I != Succ)
5955       return *I;
5956   llvm_unreachable("Expecting a BB with two successors!");
5957 }
5958 
5959 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr * MI,MachineBasicBlock * BB) const5960 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
5961                                                MachineBasicBlock *BB) const {
5962   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5963   DebugLoc dl = MI->getDebugLoc();
5964   bool isThumb2 = Subtarget->isThumb2();
5965   switch (MI->getOpcode()) {
5966   default: {
5967     MI->dump();
5968     llvm_unreachable("Unexpected instr type to insert");
5969   }
5970   // The Thumb2 pre-indexed stores have the same MI operands, they just
5971   // define them differently in the .td files from the isel patterns, so
5972   // they need pseudos.
5973   case ARM::t2STR_preidx:
5974     MI->setDesc(TII->get(ARM::t2STR_PRE));
5975     return BB;
5976   case ARM::t2STRB_preidx:
5977     MI->setDesc(TII->get(ARM::t2STRB_PRE));
5978     return BB;
5979   case ARM::t2STRH_preidx:
5980     MI->setDesc(TII->get(ARM::t2STRH_PRE));
5981     return BB;
5982 
5983   case ARM::STRi_preidx:
5984   case ARM::STRBi_preidx: {
5985     unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ?
5986       ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM;
5987     // Decode the offset.
5988     unsigned Offset = MI->getOperand(4).getImm();
5989     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
5990     Offset = ARM_AM::getAM2Offset(Offset);
5991     if (isSub)
5992       Offset = -Offset;
5993 
5994     MachineMemOperand *MMO = *MI->memoperands_begin();
5995     BuildMI(*BB, MI, dl, TII->get(NewOpc))
5996       .addOperand(MI->getOperand(0))  // Rn_wb
5997       .addOperand(MI->getOperand(1))  // Rt
5998       .addOperand(MI->getOperand(2))  // Rn
5999       .addImm(Offset)                 // offset (skip GPR==zero_reg)
6000       .addOperand(MI->getOperand(5))  // pred
6001       .addOperand(MI->getOperand(6))
6002       .addMemOperand(MMO);
6003     MI->eraseFromParent();
6004     return BB;
6005   }
6006   case ARM::STRr_preidx:
6007   case ARM::STRBr_preidx:
6008   case ARM::STRH_preidx: {
6009     unsigned NewOpc;
6010     switch (MI->getOpcode()) {
6011     default: llvm_unreachable("unexpected opcode!");
6012     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
6013     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
6014     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
6015     }
6016     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
6017     for (unsigned i = 0; i < MI->getNumOperands(); ++i)
6018       MIB.addOperand(MI->getOperand(i));
6019     MI->eraseFromParent();
6020     return BB;
6021   }
6022   case ARM::ATOMIC_LOAD_ADD_I8:
6023      return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
6024   case ARM::ATOMIC_LOAD_ADD_I16:
6025      return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
6026   case ARM::ATOMIC_LOAD_ADD_I32:
6027      return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
6028 
6029   case ARM::ATOMIC_LOAD_AND_I8:
6030      return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6031   case ARM::ATOMIC_LOAD_AND_I16:
6032      return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6033   case ARM::ATOMIC_LOAD_AND_I32:
6034      return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6035 
6036   case ARM::ATOMIC_LOAD_OR_I8:
6037      return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6038   case ARM::ATOMIC_LOAD_OR_I16:
6039      return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6040   case ARM::ATOMIC_LOAD_OR_I32:
6041      return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6042 
6043   case ARM::ATOMIC_LOAD_XOR_I8:
6044      return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6045   case ARM::ATOMIC_LOAD_XOR_I16:
6046      return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6047   case ARM::ATOMIC_LOAD_XOR_I32:
6048      return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6049 
6050   case ARM::ATOMIC_LOAD_NAND_I8:
6051      return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
6052   case ARM::ATOMIC_LOAD_NAND_I16:
6053      return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
6054   case ARM::ATOMIC_LOAD_NAND_I32:
6055      return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
6056 
6057   case ARM::ATOMIC_LOAD_SUB_I8:
6058      return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
6059   case ARM::ATOMIC_LOAD_SUB_I16:
6060      return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
6061   case ARM::ATOMIC_LOAD_SUB_I32:
6062      return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
6063 
6064   case ARM::ATOMIC_LOAD_MIN_I8:
6065      return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT);
6066   case ARM::ATOMIC_LOAD_MIN_I16:
6067      return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT);
6068   case ARM::ATOMIC_LOAD_MIN_I32:
6069      return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT);
6070 
6071   case ARM::ATOMIC_LOAD_MAX_I8:
6072      return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT);
6073   case ARM::ATOMIC_LOAD_MAX_I16:
6074      return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT);
6075   case ARM::ATOMIC_LOAD_MAX_I32:
6076      return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT);
6077 
6078   case ARM::ATOMIC_LOAD_UMIN_I8:
6079      return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO);
6080   case ARM::ATOMIC_LOAD_UMIN_I16:
6081      return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO);
6082   case ARM::ATOMIC_LOAD_UMIN_I32:
6083      return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO);
6084 
6085   case ARM::ATOMIC_LOAD_UMAX_I8:
6086      return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI);
6087   case ARM::ATOMIC_LOAD_UMAX_I16:
6088      return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI);
6089   case ARM::ATOMIC_LOAD_UMAX_I32:
6090      return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI);
6091 
6092   case ARM::ATOMIC_SWAP_I8:  return EmitAtomicBinary(MI, BB, 1, 0);
6093   case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
6094   case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
6095 
6096   case ARM::ATOMIC_CMP_SWAP_I8:  return EmitAtomicCmpSwap(MI, BB, 1);
6097   case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
6098   case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
6099 
6100 
6101   case ARM::ATOMADD6432:
6102     return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr,
6103                               isThumb2 ? ARM::t2ADCrr : ARM::ADCrr,
6104                               /*NeedsCarry*/ true);
6105   case ARM::ATOMSUB6432:
6106     return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
6107                               isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
6108                               /*NeedsCarry*/ true);
6109   case ARM::ATOMOR6432:
6110     return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr,
6111                               isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6112   case ARM::ATOMXOR6432:
6113     return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr,
6114                               isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6115   case ARM::ATOMAND6432:
6116     return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
6117                               isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6118   case ARM::ATOMSWAP6432:
6119     return EmitAtomicBinary64(MI, BB, 0, 0, false);
6120   case ARM::ATOMCMPXCHG6432:
6121     return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
6122                               isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
6123                               /*NeedsCarry*/ false, /*IsCmpxchg*/true);
6124 
6125   case ARM::tMOVCCr_pseudo: {
6126     // To "insert" a SELECT_CC instruction, we actually have to insert the
6127     // diamond control-flow pattern.  The incoming instruction knows the
6128     // destination vreg to set, the condition code register to branch on, the
6129     // true/false values to select between, and a branch opcode to use.
6130     const BasicBlock *LLVM_BB = BB->getBasicBlock();
6131     MachineFunction::iterator It = BB;
6132     ++It;
6133 
6134     //  thisMBB:
6135     //  ...
6136     //   TrueVal = ...
6137     //   cmpTY ccX, r1, r2
6138     //   bCC copy1MBB
6139     //   fallthrough --> copy0MBB
6140     MachineBasicBlock *thisMBB  = BB;
6141     MachineFunction *F = BB->getParent();
6142     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
6143     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
6144     F->insert(It, copy0MBB);
6145     F->insert(It, sinkMBB);
6146 
6147     // Transfer the remainder of BB and its successor edges to sinkMBB.
6148     sinkMBB->splice(sinkMBB->begin(), BB,
6149                     llvm::next(MachineBasicBlock::iterator(MI)),
6150                     BB->end());
6151     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
6152 
6153     BB->addSuccessor(copy0MBB);
6154     BB->addSuccessor(sinkMBB);
6155 
6156     BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
6157       .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
6158 
6159     //  copy0MBB:
6160     //   %FalseValue = ...
6161     //   # fallthrough to sinkMBB
6162     BB = copy0MBB;
6163 
6164     // Update machine-CFG edges
6165     BB->addSuccessor(sinkMBB);
6166 
6167     //  sinkMBB:
6168     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
6169     //  ...
6170     BB = sinkMBB;
6171     BuildMI(*BB, BB->begin(), dl,
6172             TII->get(ARM::PHI), MI->getOperand(0).getReg())
6173       .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
6174       .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
6175 
6176     MI->eraseFromParent();   // The pseudo instruction is gone now.
6177     return BB;
6178   }
6179 
6180   case ARM::BCCi64:
6181   case ARM::BCCZi64: {
6182     // If there is an unconditional branch to the other successor, remove it.
6183     BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
6184 
6185     // Compare both parts that make up the double comparison separately for
6186     // equality.
6187     bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
6188 
6189     unsigned LHS1 = MI->getOperand(1).getReg();
6190     unsigned LHS2 = MI->getOperand(2).getReg();
6191     if (RHSisZero) {
6192       AddDefaultPred(BuildMI(BB, dl,
6193                              TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
6194                      .addReg(LHS1).addImm(0));
6195       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
6196         .addReg(LHS2).addImm(0)
6197         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
6198     } else {
6199       unsigned RHS1 = MI->getOperand(3).getReg();
6200       unsigned RHS2 = MI->getOperand(4).getReg();
6201       AddDefaultPred(BuildMI(BB, dl,
6202                              TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
6203                      .addReg(LHS1).addReg(RHS1));
6204       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
6205         .addReg(LHS2).addReg(RHS2)
6206         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
6207     }
6208 
6209     MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB();
6210     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
6211     if (MI->getOperand(0).getImm() == ARMCC::NE)
6212       std::swap(destMBB, exitMBB);
6213 
6214     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6215       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
6216     if (isThumb2)
6217       AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB));
6218     else
6219       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
6220 
6221     MI->eraseFromParent();   // The pseudo instruction is gone now.
6222     return BB;
6223   }
6224 
6225   case ARM::ABS:
6226   case ARM::t2ABS: {
6227     // To insert an ABS instruction, we have to insert the
6228     // diamond control-flow pattern.  The incoming instruction knows the
6229     // source vreg to test against 0, the destination vreg to set,
6230     // the condition code register to branch on, the
6231     // true/false values to select between, and a branch opcode to use.
6232     // It transforms
6233     //     V1 = ABS V0
6234     // into
6235     //     V2 = MOVS V0
6236     //     BCC                      (branch to SinkBB if V0 >= 0)
6237     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
6238     //     SinkBB: V1 = PHI(V2, V3)
6239     const BasicBlock *LLVM_BB = BB->getBasicBlock();
6240     MachineFunction::iterator BBI = BB;
6241     ++BBI;
6242     MachineFunction *Fn = BB->getParent();
6243     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
6244     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
6245     Fn->insert(BBI, RSBBB);
6246     Fn->insert(BBI, SinkBB);
6247 
6248     unsigned int ABSSrcReg = MI->getOperand(1).getReg();
6249     unsigned int ABSDstReg = MI->getOperand(0).getReg();
6250     bool isThumb2 = Subtarget->isThumb2();
6251     MachineRegisterInfo &MRI = Fn->getRegInfo();
6252     // In Thumb mode S must not be specified if source register is the SP or
6253     // PC and if destination register is the SP, so restrict register class
6254     unsigned NewMovDstReg = MRI.createVirtualRegister(
6255       isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass);
6256     unsigned NewRsbDstReg = MRI.createVirtualRegister(
6257       isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass);
6258 
6259     // Transfer the remainder of BB and its successor edges to sinkMBB.
6260     SinkBB->splice(SinkBB->begin(), BB,
6261       llvm::next(MachineBasicBlock::iterator(MI)),
6262       BB->end());
6263     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
6264 
6265     BB->addSuccessor(RSBBB);
6266     BB->addSuccessor(SinkBB);
6267 
6268     // fall through to SinkMBB
6269     RSBBB->addSuccessor(SinkBB);
6270 
6271     // insert a movs at the end of BB
6272     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVr : ARM::MOVr),
6273       NewMovDstReg)
6274       .addReg(ABSSrcReg, RegState::Kill)
6275       .addImm((unsigned)ARMCC::AL).addReg(0)
6276       .addReg(ARM::CPSR, RegState::Define);
6277 
6278     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
6279     BuildMI(BB, dl,
6280       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
6281       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
6282 
6283     // insert rsbri in RSBBB
6284     // Note: BCC and rsbri will be converted into predicated rsbmi
6285     // by if-conversion pass
6286     BuildMI(*RSBBB, RSBBB->begin(), dl,
6287       TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
6288       .addReg(NewMovDstReg, RegState::Kill)
6289       .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
6290 
6291     // insert PHI in SinkBB,
6292     // reuse ABSDstReg to not change uses of ABS instruction
6293     BuildMI(*SinkBB, SinkBB->begin(), dl,
6294       TII->get(ARM::PHI), ABSDstReg)
6295       .addReg(NewRsbDstReg).addMBB(RSBBB)
6296       .addReg(NewMovDstReg).addMBB(BB);
6297 
6298     // remove ABS instruction
6299     MI->eraseFromParent();
6300 
6301     // return last added BB
6302     return SinkBB;
6303   }
6304   }
6305 }
6306 
AdjustInstrPostInstrSelection(MachineInstr * MI,SDNode * Node) const6307 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
6308                                                       SDNode *Node) const {
6309   const MCInstrDesc &MCID = MI->getDesc();
6310   if (!MCID.hasPostISelHook()) {
6311     assert(!convertAddSubFlagsOpcode(MI->getOpcode()) &&
6312            "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'");
6313     return;
6314   }
6315 
6316   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
6317   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
6318   // operand is still set to noreg. If needed, set the optional operand's
6319   // register to CPSR, and remove the redundant implicit def.
6320   //
6321   // e.g. ADCS (...opt:%noreg, CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
6322 
6323   // Rename pseudo opcodes.
6324   unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode());
6325   if (NewOpc) {
6326     const ARMBaseInstrInfo *TII =
6327       static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo());
6328     MI->setDesc(TII->get(NewOpc));
6329   }
6330   unsigned ccOutIdx = MCID.getNumOperands() - 1;
6331 
6332   // Any ARM instruction that sets the 's' bit should specify an optional
6333   // "cc_out" operand in the last operand position.
6334   if (!MCID.hasOptionalDef() || !MCID.OpInfo[ccOutIdx].isOptionalDef()) {
6335     assert(!NewOpc && "Optional cc_out operand required");
6336     return;
6337   }
6338   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
6339   // since we already have an optional CPSR def.
6340   bool definesCPSR = false;
6341   bool deadCPSR = false;
6342   for (unsigned i = MCID.getNumOperands(), e = MI->getNumOperands();
6343        i != e; ++i) {
6344     const MachineOperand &MO = MI->getOperand(i);
6345     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
6346       definesCPSR = true;
6347       if (MO.isDead())
6348         deadCPSR = true;
6349       MI->RemoveOperand(i);
6350       break;
6351     }
6352   }
6353   if (!definesCPSR) {
6354     assert(!NewOpc && "Optional cc_out operand required");
6355     return;
6356   }
6357   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
6358   if (deadCPSR) {
6359     assert(!MI->getOperand(ccOutIdx).getReg() &&
6360            "expect uninitialized optional cc_out operand");
6361     return;
6362   }
6363 
6364   // If this instruction was defined with an optional CPSR def and its dag node
6365   // had a live implicit CPSR def, then activate the optional CPSR def.
6366   MachineOperand &MO = MI->getOperand(ccOutIdx);
6367   MO.setReg(ARM::CPSR);
6368   MO.setIsDef(true);
6369 }
6370 
6371 //===----------------------------------------------------------------------===//
6372 //                           ARM Optimization Hooks
6373 //===----------------------------------------------------------------------===//
6374 
6375 static
combineSelectAndUse(SDNode * N,SDValue Slct,SDValue OtherOp,TargetLowering::DAGCombinerInfo & DCI)6376 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6377                             TargetLowering::DAGCombinerInfo &DCI) {
6378   SelectionDAG &DAG = DCI.DAG;
6379   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6380   EVT VT = N->getValueType(0);
6381   unsigned Opc = N->getOpcode();
6382   bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
6383   SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
6384   SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
6385   ISD::CondCode CC = ISD::SETCC_INVALID;
6386 
6387   if (isSlctCC) {
6388     CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
6389   } else {
6390     SDValue CCOp = Slct.getOperand(0);
6391     if (CCOp.getOpcode() == ISD::SETCC)
6392       CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
6393   }
6394 
6395   bool DoXform = false;
6396   bool InvCC = false;
6397   assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
6398           "Bad input!");
6399 
6400   if (LHS.getOpcode() == ISD::Constant &&
6401       cast<ConstantSDNode>(LHS)->isNullValue()) {
6402     DoXform = true;
6403   } else if (CC != ISD::SETCC_INVALID &&
6404              RHS.getOpcode() == ISD::Constant &&
6405              cast<ConstantSDNode>(RHS)->isNullValue()) {
6406     std::swap(LHS, RHS);
6407     SDValue Op0 = Slct.getOperand(0);
6408     EVT OpVT = isSlctCC ? Op0.getValueType() :
6409                           Op0.getOperand(0).getValueType();
6410     bool isInt = OpVT.isInteger();
6411     CC = ISD::getSetCCInverse(CC, isInt);
6412 
6413     if (!TLI.isCondCodeLegal(CC, OpVT))
6414       return SDValue();         // Inverse operator isn't legal.
6415 
6416     DoXform = true;
6417     InvCC = true;
6418   }
6419 
6420   if (DoXform) {
6421     SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
6422     if (isSlctCC)
6423       return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
6424                              Slct.getOperand(0), Slct.getOperand(1), CC);
6425     SDValue CCOp = Slct.getOperand(0);
6426     if (InvCC)
6427       CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
6428                           CCOp.getOperand(0), CCOp.getOperand(1), CC);
6429     return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
6430                        CCOp, OtherOp, Result);
6431   }
6432   return SDValue();
6433 }
6434 
6435 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
6436 // (only after legalization).
AddCombineToVPADDL(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)6437 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
6438                                  TargetLowering::DAGCombinerInfo &DCI,
6439                                  const ARMSubtarget *Subtarget) {
6440 
6441   // Only perform optimization if after legalize, and if NEON is available. We
6442   // also expected both operands to be BUILD_VECTORs.
6443   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
6444       || N0.getOpcode() != ISD::BUILD_VECTOR
6445       || N1.getOpcode() != ISD::BUILD_VECTOR)
6446     return SDValue();
6447 
6448   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
6449   EVT VT = N->getValueType(0);
6450   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
6451     return SDValue();
6452 
6453   // Check that the vector operands are of the right form.
6454   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
6455   // operands, where N is the size of the formed vector.
6456   // Each EXTRACT_VECTOR should have the same input vector and odd or even
6457   // index such that we have a pair wise add pattern.
6458 
6459   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
6460   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
6461     return SDValue();
6462   SDValue Vec = N0->getOperand(0)->getOperand(0);
6463   SDNode *V = Vec.getNode();
6464   unsigned nextIndex = 0;
6465 
6466   // For each operands to the ADD which are BUILD_VECTORs,
6467   // check to see if each of their operands are an EXTRACT_VECTOR with
6468   // the same vector and appropriate index.
6469   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
6470     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
6471         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
6472 
6473       SDValue ExtVec0 = N0->getOperand(i);
6474       SDValue ExtVec1 = N1->getOperand(i);
6475 
6476       // First operand is the vector, verify its the same.
6477       if (V != ExtVec0->getOperand(0).getNode() ||
6478           V != ExtVec1->getOperand(0).getNode())
6479         return SDValue();
6480 
6481       // Second is the constant, verify its correct.
6482       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
6483       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
6484 
6485       // For the constant, we want to see all the even or all the odd.
6486       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
6487           || C1->getZExtValue() != nextIndex+1)
6488         return SDValue();
6489 
6490       // Increment index.
6491       nextIndex+=2;
6492     } else
6493       return SDValue();
6494   }
6495 
6496   // Create VPADDL node.
6497   SelectionDAG &DAG = DCI.DAG;
6498   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6499 
6500   // Build operand list.
6501   SmallVector<SDValue, 8> Ops;
6502   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls,
6503                                 TLI.getPointerTy()));
6504 
6505   // Input is the vector.
6506   Ops.push_back(Vec);
6507 
6508   // Get widened type and narrowed type.
6509   MVT widenType;
6510   unsigned numElem = VT.getVectorNumElements();
6511   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
6512     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
6513     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
6514     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
6515     default:
6516       assert(0 && "Invalid vector element type for padd optimization.");
6517   }
6518 
6519   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
6520                             widenType, &Ops[0], Ops.size());
6521   return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp);
6522 }
6523 
6524 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
6525 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
6526 /// called with the default operands, and if that fails, with commuted
6527 /// operands.
PerformADDCombineWithOperands(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)6528 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
6529                                           TargetLowering::DAGCombinerInfo &DCI,
6530                                           const ARMSubtarget *Subtarget){
6531 
6532   // Attempt to create vpaddl for this add.
6533   SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget);
6534   if (Result.getNode())
6535     return Result;
6536 
6537   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
6538   if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
6539     SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
6540     if (Result.getNode()) return Result;
6541   }
6542   return SDValue();
6543 }
6544 
6545 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
6546 ///
PerformADDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)6547 static SDValue PerformADDCombine(SDNode *N,
6548                                  TargetLowering::DAGCombinerInfo &DCI,
6549                                  const ARMSubtarget *Subtarget) {
6550   SDValue N0 = N->getOperand(0);
6551   SDValue N1 = N->getOperand(1);
6552 
6553   // First try with the default operand order.
6554   SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget);
6555   if (Result.getNode())
6556     return Result;
6557 
6558   // If that didn't work, try again with the operands commuted.
6559   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
6560 }
6561 
6562 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
6563 ///
PerformSUBCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)6564 static SDValue PerformSUBCombine(SDNode *N,
6565                                  TargetLowering::DAGCombinerInfo &DCI) {
6566   SDValue N0 = N->getOperand(0);
6567   SDValue N1 = N->getOperand(1);
6568 
6569   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
6570   if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
6571     SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
6572     if (Result.getNode()) return Result;
6573   }
6574 
6575   return SDValue();
6576 }
6577 
6578 /// PerformVMULCombine
6579 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
6580 /// special multiplier accumulator forwarding.
6581 ///   vmul d3, d0, d2
6582 ///   vmla d3, d1, d2
6583 /// is faster than
6584 ///   vadd d3, d0, d1
6585 ///   vmul d3, d3, d2
PerformVMULCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)6586 static SDValue PerformVMULCombine(SDNode *N,
6587                                   TargetLowering::DAGCombinerInfo &DCI,
6588                                   const ARMSubtarget *Subtarget) {
6589   if (!Subtarget->hasVMLxForwarding())
6590     return SDValue();
6591 
6592   SelectionDAG &DAG = DCI.DAG;
6593   SDValue N0 = N->getOperand(0);
6594   SDValue N1 = N->getOperand(1);
6595   unsigned Opcode = N0.getOpcode();
6596   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
6597       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
6598     Opcode = N1.getOpcode();
6599     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
6600         Opcode != ISD::FADD && Opcode != ISD::FSUB)
6601       return SDValue();
6602     std::swap(N0, N1);
6603   }
6604 
6605   EVT VT = N->getValueType(0);
6606   DebugLoc DL = N->getDebugLoc();
6607   SDValue N00 = N0->getOperand(0);
6608   SDValue N01 = N0->getOperand(1);
6609   return DAG.getNode(Opcode, DL, VT,
6610                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
6611                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
6612 }
6613 
PerformMULCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)6614 static SDValue PerformMULCombine(SDNode *N,
6615                                  TargetLowering::DAGCombinerInfo &DCI,
6616                                  const ARMSubtarget *Subtarget) {
6617   SelectionDAG &DAG = DCI.DAG;
6618 
6619   if (Subtarget->isThumb1Only())
6620     return SDValue();
6621 
6622   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
6623     return SDValue();
6624 
6625   EVT VT = N->getValueType(0);
6626   if (VT.is64BitVector() || VT.is128BitVector())
6627     return PerformVMULCombine(N, DCI, Subtarget);
6628   if (VT != MVT::i32)
6629     return SDValue();
6630 
6631   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6632   if (!C)
6633     return SDValue();
6634 
6635   uint64_t MulAmt = C->getZExtValue();
6636   unsigned ShiftAmt = CountTrailingZeros_64(MulAmt);
6637   ShiftAmt = ShiftAmt & (32 - 1);
6638   SDValue V = N->getOperand(0);
6639   DebugLoc DL = N->getDebugLoc();
6640 
6641   SDValue Res;
6642   MulAmt >>= ShiftAmt;
6643   if (isPowerOf2_32(MulAmt - 1)) {
6644     // (mul x, 2^N + 1) => (add (shl x, N), x)
6645     Res = DAG.getNode(ISD::ADD, DL, VT,
6646                       V, DAG.getNode(ISD::SHL, DL, VT,
6647                                      V, DAG.getConstant(Log2_32(MulAmt-1),
6648                                                         MVT::i32)));
6649   } else if (isPowerOf2_32(MulAmt + 1)) {
6650     // (mul x, 2^N - 1) => (sub (shl x, N), x)
6651     Res = DAG.getNode(ISD::SUB, DL, VT,
6652                       DAG.getNode(ISD::SHL, DL, VT,
6653                                   V, DAG.getConstant(Log2_32(MulAmt+1),
6654                                                      MVT::i32)),
6655                                                      V);
6656   } else
6657     return SDValue();
6658 
6659   if (ShiftAmt != 0)
6660     Res = DAG.getNode(ISD::SHL, DL, VT, Res,
6661                       DAG.getConstant(ShiftAmt, MVT::i32));
6662 
6663   // Do not add new nodes to DAG combiner worklist.
6664   DCI.CombineTo(N, Res, false);
6665   return SDValue();
6666 }
6667 
PerformANDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)6668 static SDValue PerformANDCombine(SDNode *N,
6669                                 TargetLowering::DAGCombinerInfo &DCI) {
6670 
6671   // Attempt to use immediate-form VBIC
6672   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
6673   DebugLoc dl = N->getDebugLoc();
6674   EVT VT = N->getValueType(0);
6675   SelectionDAG &DAG = DCI.DAG;
6676 
6677   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
6678     return SDValue();
6679 
6680   APInt SplatBits, SplatUndef;
6681   unsigned SplatBitSize;
6682   bool HasAnyUndefs;
6683   if (BVN &&
6684       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6685     if (SplatBitSize <= 64) {
6686       EVT VbicVT;
6687       SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
6688                                       SplatUndef.getZExtValue(), SplatBitSize,
6689                                       DAG, VbicVT, VT.is128BitVector(),
6690                                       OtherModImm);
6691       if (Val.getNode()) {
6692         SDValue Input =
6693           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
6694         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
6695         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
6696       }
6697     }
6698   }
6699 
6700   return SDValue();
6701 }
6702 
6703 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
PerformORCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)6704 static SDValue PerformORCombine(SDNode *N,
6705                                 TargetLowering::DAGCombinerInfo &DCI,
6706                                 const ARMSubtarget *Subtarget) {
6707   // Attempt to use immediate-form VORR
6708   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
6709   DebugLoc dl = N->getDebugLoc();
6710   EVT VT = N->getValueType(0);
6711   SelectionDAG &DAG = DCI.DAG;
6712 
6713   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
6714     return SDValue();
6715 
6716   APInt SplatBits, SplatUndef;
6717   unsigned SplatBitSize;
6718   bool HasAnyUndefs;
6719   if (BVN && Subtarget->hasNEON() &&
6720       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6721     if (SplatBitSize <= 64) {
6722       EVT VorrVT;
6723       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6724                                       SplatUndef.getZExtValue(), SplatBitSize,
6725                                       DAG, VorrVT, VT.is128BitVector(),
6726                                       OtherModImm);
6727       if (Val.getNode()) {
6728         SDValue Input =
6729           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
6730         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
6731         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
6732       }
6733     }
6734   }
6735 
6736   SDValue N0 = N->getOperand(0);
6737   if (N0.getOpcode() != ISD::AND)
6738     return SDValue();
6739   SDValue N1 = N->getOperand(1);
6740 
6741   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
6742   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
6743       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
6744     APInt SplatUndef;
6745     unsigned SplatBitSize;
6746     bool HasAnyUndefs;
6747 
6748     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
6749     APInt SplatBits0;
6750     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
6751                                   HasAnyUndefs) && !HasAnyUndefs) {
6752       BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
6753       APInt SplatBits1;
6754       if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
6755                                     HasAnyUndefs) && !HasAnyUndefs &&
6756           SplatBits0 == ~SplatBits1) {
6757         // Canonicalize the vector type to make instruction selection simpler.
6758         EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
6759         SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
6760                                      N0->getOperand(1), N0->getOperand(0),
6761                                      N1->getOperand(0));
6762         return DAG.getNode(ISD::BITCAST, dl, VT, Result);
6763       }
6764     }
6765   }
6766 
6767   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
6768   // reasonable.
6769 
6770   // BFI is only available on V6T2+
6771   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
6772     return SDValue();
6773 
6774   DebugLoc DL = N->getDebugLoc();
6775   // 1) or (and A, mask), val => ARMbfi A, val, mask
6776   //      iff (val & mask) == val
6777   //
6778   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
6779   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
6780   //          && mask == ~mask2
6781   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
6782   //          && ~mask == mask2
6783   //  (i.e., copy a bitfield value into another bitfield of the same width)
6784 
6785   if (VT != MVT::i32)
6786     return SDValue();
6787 
6788   SDValue N00 = N0.getOperand(0);
6789 
6790   // The value and the mask need to be constants so we can verify this is
6791   // actually a bitfield set. If the mask is 0xffff, we can do better
6792   // via a movt instruction, so don't use BFI in that case.
6793   SDValue MaskOp = N0.getOperand(1);
6794   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
6795   if (!MaskC)
6796     return SDValue();
6797   unsigned Mask = MaskC->getZExtValue();
6798   if (Mask == 0xffff)
6799     return SDValue();
6800   SDValue Res;
6801   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
6802   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
6803   if (N1C) {
6804     unsigned Val = N1C->getZExtValue();
6805     if ((Val & ~Mask) != Val)
6806       return SDValue();
6807 
6808     if (ARM::isBitFieldInvertedMask(Mask)) {
6809       Val >>= CountTrailingZeros_32(~Mask);
6810 
6811       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
6812                         DAG.getConstant(Val, MVT::i32),
6813                         DAG.getConstant(Mask, MVT::i32));
6814 
6815       // Do not add new nodes to DAG combiner worklist.
6816       DCI.CombineTo(N, Res, false);
6817       return SDValue();
6818     }
6819   } else if (N1.getOpcode() == ISD::AND) {
6820     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
6821     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
6822     if (!N11C)
6823       return SDValue();
6824     unsigned Mask2 = N11C->getZExtValue();
6825 
6826     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
6827     // as is to match.
6828     if (ARM::isBitFieldInvertedMask(Mask) &&
6829         (Mask == ~Mask2)) {
6830       // The pack halfword instruction works better for masks that fit it,
6831       // so use that when it's available.
6832       if (Subtarget->hasT2ExtractPack() &&
6833           (Mask == 0xffff || Mask == 0xffff0000))
6834         return SDValue();
6835       // 2a
6836       unsigned amt = CountTrailingZeros_32(Mask2);
6837       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
6838                         DAG.getConstant(amt, MVT::i32));
6839       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
6840                         DAG.getConstant(Mask, MVT::i32));
6841       // Do not add new nodes to DAG combiner worklist.
6842       DCI.CombineTo(N, Res, false);
6843       return SDValue();
6844     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
6845                (~Mask == Mask2)) {
6846       // The pack halfword instruction works better for masks that fit it,
6847       // so use that when it's available.
6848       if (Subtarget->hasT2ExtractPack() &&
6849           (Mask2 == 0xffff || Mask2 == 0xffff0000))
6850         return SDValue();
6851       // 2b
6852       unsigned lsb = CountTrailingZeros_32(Mask);
6853       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
6854                         DAG.getConstant(lsb, MVT::i32));
6855       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
6856                         DAG.getConstant(Mask2, MVT::i32));
6857       // Do not add new nodes to DAG combiner worklist.
6858       DCI.CombineTo(N, Res, false);
6859       return SDValue();
6860     }
6861   }
6862 
6863   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
6864       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
6865       ARM::isBitFieldInvertedMask(~Mask)) {
6866     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
6867     // where lsb(mask) == #shamt and masked bits of B are known zero.
6868     SDValue ShAmt = N00.getOperand(1);
6869     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
6870     unsigned LSB = CountTrailingZeros_32(Mask);
6871     if (ShAmtC != LSB)
6872       return SDValue();
6873 
6874     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
6875                       DAG.getConstant(~Mask, MVT::i32));
6876 
6877     // Do not add new nodes to DAG combiner worklist.
6878     DCI.CombineTo(N, Res, false);
6879   }
6880 
6881   return SDValue();
6882 }
6883 
6884 /// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
6885 /// the bits being cleared by the AND are not demanded by the BFI.
PerformBFICombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)6886 static SDValue PerformBFICombine(SDNode *N,
6887                                  TargetLowering::DAGCombinerInfo &DCI) {
6888   SDValue N1 = N->getOperand(1);
6889   if (N1.getOpcode() == ISD::AND) {
6890     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
6891     if (!N11C)
6892       return SDValue();
6893     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
6894     unsigned LSB = CountTrailingZeros_32(~InvMask);
6895     unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB;
6896     unsigned Mask = (1 << Width)-1;
6897     unsigned Mask2 = N11C->getZExtValue();
6898     if ((Mask & (~Mask2)) == 0)
6899       return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0),
6900                              N->getOperand(0), N1.getOperand(0),
6901                              N->getOperand(2));
6902   }
6903   return SDValue();
6904 }
6905 
6906 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
6907 /// ARMISD::VMOVRRD.
PerformVMOVRRDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)6908 static SDValue PerformVMOVRRDCombine(SDNode *N,
6909                                      TargetLowering::DAGCombinerInfo &DCI) {
6910   // vmovrrd(vmovdrr x, y) -> x,y
6911   SDValue InDouble = N->getOperand(0);
6912   if (InDouble.getOpcode() == ARMISD::VMOVDRR)
6913     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
6914 
6915   // vmovrrd(load f64) -> (load i32), (load i32)
6916   SDNode *InNode = InDouble.getNode();
6917   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
6918       InNode->getValueType(0) == MVT::f64 &&
6919       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
6920       !cast<LoadSDNode>(InNode)->isVolatile()) {
6921     // TODO: Should this be done for non-FrameIndex operands?
6922     LoadSDNode *LD = cast<LoadSDNode>(InNode);
6923 
6924     SelectionDAG &DAG = DCI.DAG;
6925     DebugLoc DL = LD->getDebugLoc();
6926     SDValue BasePtr = LD->getBasePtr();
6927     SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr,
6928                                  LD->getPointerInfo(), LD->isVolatile(),
6929                                  LD->isNonTemporal(), LD->getAlignment());
6930 
6931     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
6932                                     DAG.getConstant(4, MVT::i32));
6933     SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr,
6934                                  LD->getPointerInfo(), LD->isVolatile(),
6935                                  LD->isNonTemporal(),
6936                                  std::min(4U, LD->getAlignment() / 2));
6937 
6938     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
6939     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
6940     DCI.RemoveFromWorklist(LD);
6941     DAG.DeleteNode(LD);
6942     return Result;
6943   }
6944 
6945   return SDValue();
6946 }
6947 
6948 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
6949 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
PerformVMOVDRRCombine(SDNode * N,SelectionDAG & DAG)6950 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
6951   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
6952   SDValue Op0 = N->getOperand(0);
6953   SDValue Op1 = N->getOperand(1);
6954   if (Op0.getOpcode() == ISD::BITCAST)
6955     Op0 = Op0.getOperand(0);
6956   if (Op1.getOpcode() == ISD::BITCAST)
6957     Op1 = Op1.getOperand(0);
6958   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
6959       Op0.getNode() == Op1.getNode() &&
6960       Op0.getResNo() == 0 && Op1.getResNo() == 1)
6961     return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
6962                        N->getValueType(0), Op0.getOperand(0));
6963   return SDValue();
6964 }
6965 
6966 /// PerformSTORECombine - Target-specific dag combine xforms for
6967 /// ISD::STORE.
PerformSTORECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)6968 static SDValue PerformSTORECombine(SDNode *N,
6969                                    TargetLowering::DAGCombinerInfo &DCI) {
6970   // Bitcast an i64 store extracted from a vector to f64.
6971   // Otherwise, the i64 value will be legalized to a pair of i32 values.
6972   StoreSDNode *St = cast<StoreSDNode>(N);
6973   SDValue StVal = St->getValue();
6974   if (!ISD::isNormalStore(St) || St->isVolatile())
6975     return SDValue();
6976 
6977   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
6978       StVal.getNode()->hasOneUse() && !St->isVolatile()) {
6979     SelectionDAG  &DAG = DCI.DAG;
6980     DebugLoc DL = St->getDebugLoc();
6981     SDValue BasePtr = St->getBasePtr();
6982     SDValue NewST1 = DAG.getStore(St->getChain(), DL,
6983                                   StVal.getNode()->getOperand(0), BasePtr,
6984                                   St->getPointerInfo(), St->isVolatile(),
6985                                   St->isNonTemporal(), St->getAlignment());
6986 
6987     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
6988                                     DAG.getConstant(4, MVT::i32));
6989     return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1),
6990                         OffsetPtr, St->getPointerInfo(), St->isVolatile(),
6991                         St->isNonTemporal(),
6992                         std::min(4U, St->getAlignment() / 2));
6993   }
6994 
6995   if (StVal.getValueType() != MVT::i64 ||
6996       StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
6997     return SDValue();
6998 
6999   SelectionDAG &DAG = DCI.DAG;
7000   DebugLoc dl = StVal.getDebugLoc();
7001   SDValue IntVec = StVal.getOperand(0);
7002   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
7003                                  IntVec.getValueType().getVectorNumElements());
7004   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
7005   SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
7006                                Vec, StVal.getOperand(1));
7007   dl = N->getDebugLoc();
7008   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
7009   // Make the DAGCombiner fold the bitcasts.
7010   DCI.AddToWorklist(Vec.getNode());
7011   DCI.AddToWorklist(ExtElt.getNode());
7012   DCI.AddToWorklist(V.getNode());
7013   return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
7014                       St->getPointerInfo(), St->isVolatile(),
7015                       St->isNonTemporal(), St->getAlignment(),
7016                       St->getTBAAInfo());
7017 }
7018 
7019 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
7020 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
7021 /// i64 vector to have f64 elements, since the value can then be loaded
7022 /// directly into a VFP register.
hasNormalLoadOperand(SDNode * N)7023 static bool hasNormalLoadOperand(SDNode *N) {
7024   unsigned NumElts = N->getValueType(0).getVectorNumElements();
7025   for (unsigned i = 0; i < NumElts; ++i) {
7026     SDNode *Elt = N->getOperand(i).getNode();
7027     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
7028       return true;
7029   }
7030   return false;
7031 }
7032 
7033 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
7034 /// ISD::BUILD_VECTOR.
PerformBUILD_VECTORCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)7035 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
7036                                           TargetLowering::DAGCombinerInfo &DCI){
7037   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
7038   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
7039   // into a pair of GPRs, which is fine when the value is used as a scalar,
7040   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
7041   SelectionDAG &DAG = DCI.DAG;
7042   if (N->getNumOperands() == 2) {
7043     SDValue RV = PerformVMOVDRRCombine(N, DAG);
7044     if (RV.getNode())
7045       return RV;
7046   }
7047 
7048   // Load i64 elements as f64 values so that type legalization does not split
7049   // them up into i32 values.
7050   EVT VT = N->getValueType(0);
7051   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
7052     return SDValue();
7053   DebugLoc dl = N->getDebugLoc();
7054   SmallVector<SDValue, 8> Ops;
7055   unsigned NumElts = VT.getVectorNumElements();
7056   for (unsigned i = 0; i < NumElts; ++i) {
7057     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
7058     Ops.push_back(V);
7059     // Make the DAGCombiner fold the bitcast.
7060     DCI.AddToWorklist(V.getNode());
7061   }
7062   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
7063   SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts);
7064   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
7065 }
7066 
7067 /// PerformInsertEltCombine - Target-specific dag combine xforms for
7068 /// ISD::INSERT_VECTOR_ELT.
PerformInsertEltCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)7069 static SDValue PerformInsertEltCombine(SDNode *N,
7070                                        TargetLowering::DAGCombinerInfo &DCI) {
7071   // Bitcast an i64 load inserted into a vector to f64.
7072   // Otherwise, the i64 value will be legalized to a pair of i32 values.
7073   EVT VT = N->getValueType(0);
7074   SDNode *Elt = N->getOperand(1).getNode();
7075   if (VT.getVectorElementType() != MVT::i64 ||
7076       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
7077     return SDValue();
7078 
7079   SelectionDAG &DAG = DCI.DAG;
7080   DebugLoc dl = N->getDebugLoc();
7081   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
7082                                  VT.getVectorNumElements());
7083   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
7084   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
7085   // Make the DAGCombiner fold the bitcasts.
7086   DCI.AddToWorklist(Vec.getNode());
7087   DCI.AddToWorklist(V.getNode());
7088   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
7089                                Vec, V, N->getOperand(2));
7090   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
7091 }
7092 
7093 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
7094 /// ISD::VECTOR_SHUFFLE.
PerformVECTOR_SHUFFLECombine(SDNode * N,SelectionDAG & DAG)7095 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
7096   // The LLVM shufflevector instruction does not require the shuffle mask
7097   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
7098   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
7099   // operands do not match the mask length, they are extended by concatenating
7100   // them with undef vectors.  That is probably the right thing for other
7101   // targets, but for NEON it is better to concatenate two double-register
7102   // size vector operands into a single quad-register size vector.  Do that
7103   // transformation here:
7104   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
7105   //   shuffle(concat(v1, v2), undef)
7106   SDValue Op0 = N->getOperand(0);
7107   SDValue Op1 = N->getOperand(1);
7108   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
7109       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
7110       Op0.getNumOperands() != 2 ||
7111       Op1.getNumOperands() != 2)
7112     return SDValue();
7113   SDValue Concat0Op1 = Op0.getOperand(1);
7114   SDValue Concat1Op1 = Op1.getOperand(1);
7115   if (Concat0Op1.getOpcode() != ISD::UNDEF ||
7116       Concat1Op1.getOpcode() != ISD::UNDEF)
7117     return SDValue();
7118   // Skip the transformation if any of the types are illegal.
7119   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7120   EVT VT = N->getValueType(0);
7121   if (!TLI.isTypeLegal(VT) ||
7122       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
7123       !TLI.isTypeLegal(Concat1Op1.getValueType()))
7124     return SDValue();
7125 
7126   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
7127                                   Op0.getOperand(0), Op1.getOperand(0));
7128   // Translate the shuffle mask.
7129   SmallVector<int, 16> NewMask;
7130   unsigned NumElts = VT.getVectorNumElements();
7131   unsigned HalfElts = NumElts/2;
7132   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
7133   for (unsigned n = 0; n < NumElts; ++n) {
7134     int MaskElt = SVN->getMaskElt(n);
7135     int NewElt = -1;
7136     if (MaskElt < (int)HalfElts)
7137       NewElt = MaskElt;
7138     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
7139       NewElt = HalfElts + MaskElt - NumElts;
7140     NewMask.push_back(NewElt);
7141   }
7142   return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat,
7143                               DAG.getUNDEF(VT), NewMask.data());
7144 }
7145 
7146 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and
7147 /// NEON load/store intrinsics to merge base address updates.
CombineBaseUpdate(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)7148 static SDValue CombineBaseUpdate(SDNode *N,
7149                                  TargetLowering::DAGCombinerInfo &DCI) {
7150   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
7151     return SDValue();
7152 
7153   SelectionDAG &DAG = DCI.DAG;
7154   bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
7155                       N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
7156   unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
7157   SDValue Addr = N->getOperand(AddrOpIdx);
7158 
7159   // Search for a use of the address operand that is an increment.
7160   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
7161          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
7162     SDNode *User = *UI;
7163     if (User->getOpcode() != ISD::ADD ||
7164         UI.getUse().getResNo() != Addr.getResNo())
7165       continue;
7166 
7167     // Check that the add is independent of the load/store.  Otherwise, folding
7168     // it would create a cycle.
7169     if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
7170       continue;
7171 
7172     // Find the new opcode for the updating load/store.
7173     bool isLoad = true;
7174     bool isLaneOp = false;
7175     unsigned NewOpc = 0;
7176     unsigned NumVecs = 0;
7177     if (isIntrinsic) {
7178       unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
7179       switch (IntNo) {
7180       default: assert(0 && "unexpected intrinsic for Neon base update");
7181       case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
7182         NumVecs = 1; break;
7183       case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
7184         NumVecs = 2; break;
7185       case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
7186         NumVecs = 3; break;
7187       case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
7188         NumVecs = 4; break;
7189       case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
7190         NumVecs = 2; isLaneOp = true; break;
7191       case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
7192         NumVecs = 3; isLaneOp = true; break;
7193       case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
7194         NumVecs = 4; isLaneOp = true; break;
7195       case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
7196         NumVecs = 1; isLoad = false; break;
7197       case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
7198         NumVecs = 2; isLoad = false; break;
7199       case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
7200         NumVecs = 3; isLoad = false; break;
7201       case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
7202         NumVecs = 4; isLoad = false; break;
7203       case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
7204         NumVecs = 2; isLoad = false; isLaneOp = true; break;
7205       case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
7206         NumVecs = 3; isLoad = false; isLaneOp = true; break;
7207       case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
7208         NumVecs = 4; isLoad = false; isLaneOp = true; break;
7209       }
7210     } else {
7211       isLaneOp = true;
7212       switch (N->getOpcode()) {
7213       default: assert(0 && "unexpected opcode for Neon base update");
7214       case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
7215       case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
7216       case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
7217       }
7218     }
7219 
7220     // Find the size of memory referenced by the load/store.
7221     EVT VecTy;
7222     if (isLoad)
7223       VecTy = N->getValueType(0);
7224     else
7225       VecTy = N->getOperand(AddrOpIdx+1).getValueType();
7226     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
7227     if (isLaneOp)
7228       NumBytes /= VecTy.getVectorNumElements();
7229 
7230     // If the increment is a constant, it must match the memory ref size.
7231     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
7232     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
7233       uint64_t IncVal = CInc->getZExtValue();
7234       if (IncVal != NumBytes)
7235         continue;
7236     } else if (NumBytes >= 3 * 16) {
7237       // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
7238       // separate instructions that make it harder to use a non-constant update.
7239       continue;
7240     }
7241 
7242     // Create the new updating load/store node.
7243     EVT Tys[6];
7244     unsigned NumResultVecs = (isLoad ? NumVecs : 0);
7245     unsigned n;
7246     for (n = 0; n < NumResultVecs; ++n)
7247       Tys[n] = VecTy;
7248     Tys[n++] = MVT::i32;
7249     Tys[n] = MVT::Other;
7250     SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2);
7251     SmallVector<SDValue, 8> Ops;
7252     Ops.push_back(N->getOperand(0)); // incoming chain
7253     Ops.push_back(N->getOperand(AddrOpIdx));
7254     Ops.push_back(Inc);
7255     for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
7256       Ops.push_back(N->getOperand(i));
7257     }
7258     MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
7259     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys,
7260                                            Ops.data(), Ops.size(),
7261                                            MemInt->getMemoryVT(),
7262                                            MemInt->getMemOperand());
7263 
7264     // Update the uses.
7265     std::vector<SDValue> NewResults;
7266     for (unsigned i = 0; i < NumResultVecs; ++i) {
7267       NewResults.push_back(SDValue(UpdN.getNode(), i));
7268     }
7269     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
7270     DCI.CombineTo(N, NewResults);
7271     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
7272 
7273     break;
7274   }
7275   return SDValue();
7276 }
7277 
7278 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
7279 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
7280 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
7281 /// return true.
CombineVLDDUP(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)7282 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
7283   SelectionDAG &DAG = DCI.DAG;
7284   EVT VT = N->getValueType(0);
7285   // vldN-dup instructions only support 64-bit vectors for N > 1.
7286   if (!VT.is64BitVector())
7287     return false;
7288 
7289   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
7290   SDNode *VLD = N->getOperand(0).getNode();
7291   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
7292     return false;
7293   unsigned NumVecs = 0;
7294   unsigned NewOpc = 0;
7295   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
7296   if (IntNo == Intrinsic::arm_neon_vld2lane) {
7297     NumVecs = 2;
7298     NewOpc = ARMISD::VLD2DUP;
7299   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
7300     NumVecs = 3;
7301     NewOpc = ARMISD::VLD3DUP;
7302   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
7303     NumVecs = 4;
7304     NewOpc = ARMISD::VLD4DUP;
7305   } else {
7306     return false;
7307   }
7308 
7309   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
7310   // numbers match the load.
7311   unsigned VLDLaneNo =
7312     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
7313   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
7314        UI != UE; ++UI) {
7315     // Ignore uses of the chain result.
7316     if (UI.getUse().getResNo() == NumVecs)
7317       continue;
7318     SDNode *User = *UI;
7319     if (User->getOpcode() != ARMISD::VDUPLANE ||
7320         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
7321       return false;
7322   }
7323 
7324   // Create the vldN-dup node.
7325   EVT Tys[5];
7326   unsigned n;
7327   for (n = 0; n < NumVecs; ++n)
7328     Tys[n] = VT;
7329   Tys[n] = MVT::Other;
7330   SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1);
7331   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
7332   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
7333   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys,
7334                                            Ops, 2, VLDMemInt->getMemoryVT(),
7335                                            VLDMemInt->getMemOperand());
7336 
7337   // Update the uses.
7338   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
7339        UI != UE; ++UI) {
7340     unsigned ResNo = UI.getUse().getResNo();
7341     // Ignore uses of the chain result.
7342     if (ResNo == NumVecs)
7343       continue;
7344     SDNode *User = *UI;
7345     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
7346   }
7347 
7348   // Now the vldN-lane intrinsic is dead except for its chain result.
7349   // Update uses of the chain.
7350   std::vector<SDValue> VLDDupResults;
7351   for (unsigned n = 0; n < NumVecs; ++n)
7352     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
7353   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
7354   DCI.CombineTo(VLD, VLDDupResults);
7355 
7356   return true;
7357 }
7358 
7359 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
7360 /// ARMISD::VDUPLANE.
PerformVDUPLANECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)7361 static SDValue PerformVDUPLANECombine(SDNode *N,
7362                                       TargetLowering::DAGCombinerInfo &DCI) {
7363   SDValue Op = N->getOperand(0);
7364 
7365   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
7366   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
7367   if (CombineVLDDUP(N, DCI))
7368     return SDValue(N, 0);
7369 
7370   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
7371   // redundant.  Ignore bit_converts for now; element sizes are checked below.
7372   while (Op.getOpcode() == ISD::BITCAST)
7373     Op = Op.getOperand(0);
7374   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
7375     return SDValue();
7376 
7377   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
7378   unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits();
7379   // The canonical VMOV for a zero vector uses a 32-bit element size.
7380   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7381   unsigned EltBits;
7382   if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
7383     EltSize = 8;
7384   EVT VT = N->getValueType(0);
7385   if (EltSize > VT.getVectorElementType().getSizeInBits())
7386     return SDValue();
7387 
7388   return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
7389 }
7390 
7391 // isConstVecPow2 - Return true if each vector element is a power of 2, all
7392 // elements are the same constant, C, and Log2(C) ranges from 1 to 32.
isConstVecPow2(SDValue ConstVec,bool isSigned,uint64_t & C)7393 static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C)
7394 {
7395   integerPart cN;
7396   integerPart c0 = 0;
7397   for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements();
7398        I != E; I++) {
7399     ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I));
7400     if (!C)
7401       return false;
7402 
7403     bool isExact;
7404     APFloat APF = C->getValueAPF();
7405     if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact)
7406         != APFloat::opOK || !isExact)
7407       return false;
7408 
7409     c0 = (I == 0) ? cN : c0;
7410     if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32)
7411       return false;
7412   }
7413   C = c0;
7414   return true;
7415 }
7416 
7417 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
7418 /// can replace combinations of VMUL and VCVT (floating-point to integer)
7419 /// when the VMUL has a constant operand that is a power of 2.
7420 ///
7421 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
7422 ///  vmul.f32        d16, d17, d16
7423 ///  vcvt.s32.f32    d16, d16
7424 /// becomes:
7425 ///  vcvt.s32.f32    d16, d16, #3
PerformVCVTCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)7426 static SDValue PerformVCVTCombine(SDNode *N,
7427                                   TargetLowering::DAGCombinerInfo &DCI,
7428                                   const ARMSubtarget *Subtarget) {
7429   SelectionDAG &DAG = DCI.DAG;
7430   SDValue Op = N->getOperand(0);
7431 
7432   if (!Subtarget->hasNEON() || !Op.getValueType().isVector() ||
7433       Op.getOpcode() != ISD::FMUL)
7434     return SDValue();
7435 
7436   uint64_t C;
7437   SDValue N0 = Op->getOperand(0);
7438   SDValue ConstVec = Op->getOperand(1);
7439   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
7440 
7441   if (ConstVec.getOpcode() != ISD::BUILD_VECTOR ||
7442       !isConstVecPow2(ConstVec, isSigned, C))
7443     return SDValue();
7444 
7445   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
7446     Intrinsic::arm_neon_vcvtfp2fxu;
7447   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
7448                      N->getValueType(0),
7449                      DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
7450                      DAG.getConstant(Log2_64(C), MVT::i32));
7451 }
7452 
7453 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
7454 /// can replace combinations of VCVT (integer to floating-point) and VDIV
7455 /// when the VDIV has a constant operand that is a power of 2.
7456 ///
7457 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
7458 ///  vcvt.f32.s32    d16, d16
7459 ///  vdiv.f32        d16, d17, d16
7460 /// becomes:
7461 ///  vcvt.f32.s32    d16, d16, #3
PerformVDIVCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)7462 static SDValue PerformVDIVCombine(SDNode *N,
7463                                   TargetLowering::DAGCombinerInfo &DCI,
7464                                   const ARMSubtarget *Subtarget) {
7465   SelectionDAG &DAG = DCI.DAG;
7466   SDValue Op = N->getOperand(0);
7467   unsigned OpOpcode = Op.getNode()->getOpcode();
7468 
7469   if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() ||
7470       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
7471     return SDValue();
7472 
7473   uint64_t C;
7474   SDValue ConstVec = N->getOperand(1);
7475   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
7476 
7477   if (ConstVec.getOpcode() != ISD::BUILD_VECTOR ||
7478       !isConstVecPow2(ConstVec, isSigned, C))
7479     return SDValue();
7480 
7481   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
7482     Intrinsic::arm_neon_vcvtfxu2fp;
7483   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
7484                      Op.getValueType(),
7485                      DAG.getConstant(IntrinsicOpcode, MVT::i32),
7486                      Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32));
7487 }
7488 
7489 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
7490 /// operand of a vector shift operation, where all the elements of the
7491 /// build_vector must have the same constant integer value.
getVShiftImm(SDValue Op,unsigned ElementBits,int64_t & Cnt)7492 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
7493   // Ignore bit_converts.
7494   while (Op.getOpcode() == ISD::BITCAST)
7495     Op = Op.getOperand(0);
7496   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
7497   APInt SplatBits, SplatUndef;
7498   unsigned SplatBitSize;
7499   bool HasAnyUndefs;
7500   if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
7501                                       HasAnyUndefs, ElementBits) ||
7502       SplatBitSize > ElementBits)
7503     return false;
7504   Cnt = SplatBits.getSExtValue();
7505   return true;
7506 }
7507 
7508 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
7509 /// operand of a vector shift left operation.  That value must be in the range:
7510 ///   0 <= Value < ElementBits for a left shift; or
7511 ///   0 <= Value <= ElementBits for a long left shift.
isVShiftLImm(SDValue Op,EVT VT,bool isLong,int64_t & Cnt)7512 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
7513   assert(VT.isVector() && "vector shift count is not a vector type");
7514   unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
7515   if (! getVShiftImm(Op, ElementBits, Cnt))
7516     return false;
7517   return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
7518 }
7519 
7520 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
7521 /// operand of a vector shift right operation.  For a shift opcode, the value
7522 /// is positive, but for an intrinsic the value count must be negative. The
7523 /// absolute value must be in the range:
7524 ///   1 <= |Value| <= ElementBits for a right shift; or
7525 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
isVShiftRImm(SDValue Op,EVT VT,bool isNarrow,bool isIntrinsic,int64_t & Cnt)7526 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
7527                          int64_t &Cnt) {
7528   assert(VT.isVector() && "vector shift count is not a vector type");
7529   unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
7530   if (! getVShiftImm(Op, ElementBits, Cnt))
7531     return false;
7532   if (isIntrinsic)
7533     Cnt = -Cnt;
7534   return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
7535 }
7536 
7537 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
PerformIntrinsicCombine(SDNode * N,SelectionDAG & DAG)7538 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
7539   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7540   switch (IntNo) {
7541   default:
7542     // Don't do anything for most intrinsics.
7543     break;
7544 
7545   // Vector shifts: check for immediate versions and lower them.
7546   // Note: This is done during DAG combining instead of DAG legalizing because
7547   // the build_vectors for 64-bit vector element shift counts are generally
7548   // not legal, and it is hard to see their values after they get legalized to
7549   // loads from a constant pool.
7550   case Intrinsic::arm_neon_vshifts:
7551   case Intrinsic::arm_neon_vshiftu:
7552   case Intrinsic::arm_neon_vshiftls:
7553   case Intrinsic::arm_neon_vshiftlu:
7554   case Intrinsic::arm_neon_vshiftn:
7555   case Intrinsic::arm_neon_vrshifts:
7556   case Intrinsic::arm_neon_vrshiftu:
7557   case Intrinsic::arm_neon_vrshiftn:
7558   case Intrinsic::arm_neon_vqshifts:
7559   case Intrinsic::arm_neon_vqshiftu:
7560   case Intrinsic::arm_neon_vqshiftsu:
7561   case Intrinsic::arm_neon_vqshiftns:
7562   case Intrinsic::arm_neon_vqshiftnu:
7563   case Intrinsic::arm_neon_vqshiftnsu:
7564   case Intrinsic::arm_neon_vqrshiftns:
7565   case Intrinsic::arm_neon_vqrshiftnu:
7566   case Intrinsic::arm_neon_vqrshiftnsu: {
7567     EVT VT = N->getOperand(1).getValueType();
7568     int64_t Cnt;
7569     unsigned VShiftOpc = 0;
7570 
7571     switch (IntNo) {
7572     case Intrinsic::arm_neon_vshifts:
7573     case Intrinsic::arm_neon_vshiftu:
7574       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
7575         VShiftOpc = ARMISD::VSHL;
7576         break;
7577       }
7578       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
7579         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
7580                      ARMISD::VSHRs : ARMISD::VSHRu);
7581         break;
7582       }
7583       return SDValue();
7584 
7585     case Intrinsic::arm_neon_vshiftls:
7586     case Intrinsic::arm_neon_vshiftlu:
7587       if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
7588         break;
7589       llvm_unreachable("invalid shift count for vshll intrinsic");
7590 
7591     case Intrinsic::arm_neon_vrshifts:
7592     case Intrinsic::arm_neon_vrshiftu:
7593       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
7594         break;
7595       return SDValue();
7596 
7597     case Intrinsic::arm_neon_vqshifts:
7598     case Intrinsic::arm_neon_vqshiftu:
7599       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
7600         break;
7601       return SDValue();
7602 
7603     case Intrinsic::arm_neon_vqshiftsu:
7604       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
7605         break;
7606       llvm_unreachable("invalid shift count for vqshlu intrinsic");
7607 
7608     case Intrinsic::arm_neon_vshiftn:
7609     case Intrinsic::arm_neon_vrshiftn:
7610     case Intrinsic::arm_neon_vqshiftns:
7611     case Intrinsic::arm_neon_vqshiftnu:
7612     case Intrinsic::arm_neon_vqshiftnsu:
7613     case Intrinsic::arm_neon_vqrshiftns:
7614     case Intrinsic::arm_neon_vqrshiftnu:
7615     case Intrinsic::arm_neon_vqrshiftnsu:
7616       // Narrowing shifts require an immediate right shift.
7617       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
7618         break;
7619       llvm_unreachable("invalid shift count for narrowing vector shift "
7620                        "intrinsic");
7621 
7622     default:
7623       llvm_unreachable("unhandled vector shift");
7624     }
7625 
7626     switch (IntNo) {
7627     case Intrinsic::arm_neon_vshifts:
7628     case Intrinsic::arm_neon_vshiftu:
7629       // Opcode already set above.
7630       break;
7631     case Intrinsic::arm_neon_vshiftls:
7632     case Intrinsic::arm_neon_vshiftlu:
7633       if (Cnt == VT.getVectorElementType().getSizeInBits())
7634         VShiftOpc = ARMISD::VSHLLi;
7635       else
7636         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
7637                      ARMISD::VSHLLs : ARMISD::VSHLLu);
7638       break;
7639     case Intrinsic::arm_neon_vshiftn:
7640       VShiftOpc = ARMISD::VSHRN; break;
7641     case Intrinsic::arm_neon_vrshifts:
7642       VShiftOpc = ARMISD::VRSHRs; break;
7643     case Intrinsic::arm_neon_vrshiftu:
7644       VShiftOpc = ARMISD::VRSHRu; break;
7645     case Intrinsic::arm_neon_vrshiftn:
7646       VShiftOpc = ARMISD::VRSHRN; break;
7647     case Intrinsic::arm_neon_vqshifts:
7648       VShiftOpc = ARMISD::VQSHLs; break;
7649     case Intrinsic::arm_neon_vqshiftu:
7650       VShiftOpc = ARMISD::VQSHLu; break;
7651     case Intrinsic::arm_neon_vqshiftsu:
7652       VShiftOpc = ARMISD::VQSHLsu; break;
7653     case Intrinsic::arm_neon_vqshiftns:
7654       VShiftOpc = ARMISD::VQSHRNs; break;
7655     case Intrinsic::arm_neon_vqshiftnu:
7656       VShiftOpc = ARMISD::VQSHRNu; break;
7657     case Intrinsic::arm_neon_vqshiftnsu:
7658       VShiftOpc = ARMISD::VQSHRNsu; break;
7659     case Intrinsic::arm_neon_vqrshiftns:
7660       VShiftOpc = ARMISD::VQRSHRNs; break;
7661     case Intrinsic::arm_neon_vqrshiftnu:
7662       VShiftOpc = ARMISD::VQRSHRNu; break;
7663     case Intrinsic::arm_neon_vqrshiftnsu:
7664       VShiftOpc = ARMISD::VQRSHRNsu; break;
7665     }
7666 
7667     return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
7668                        N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
7669   }
7670 
7671   case Intrinsic::arm_neon_vshiftins: {
7672     EVT VT = N->getOperand(1).getValueType();
7673     int64_t Cnt;
7674     unsigned VShiftOpc = 0;
7675 
7676     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
7677       VShiftOpc = ARMISD::VSLI;
7678     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
7679       VShiftOpc = ARMISD::VSRI;
7680     else {
7681       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
7682     }
7683 
7684     return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
7685                        N->getOperand(1), N->getOperand(2),
7686                        DAG.getConstant(Cnt, MVT::i32));
7687   }
7688 
7689   case Intrinsic::arm_neon_vqrshifts:
7690   case Intrinsic::arm_neon_vqrshiftu:
7691     // No immediate versions of these to check for.
7692     break;
7693   }
7694 
7695   return SDValue();
7696 }
7697 
7698 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
7699 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
7700 /// combining instead of DAG legalizing because the build_vectors for 64-bit
7701 /// vector element shift counts are generally not legal, and it is hard to see
7702 /// their values after they get legalized to loads from a constant pool.
PerformShiftCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)7703 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
7704                                    const ARMSubtarget *ST) {
7705   EVT VT = N->getValueType(0);
7706 
7707   // Nothing to be done for scalar shifts.
7708   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7709   if (!VT.isVector() || !TLI.isTypeLegal(VT))
7710     return SDValue();
7711 
7712   assert(ST->hasNEON() && "unexpected vector shift");
7713   int64_t Cnt;
7714 
7715   switch (N->getOpcode()) {
7716   default: llvm_unreachable("unexpected shift opcode");
7717 
7718   case ISD::SHL:
7719     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
7720       return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
7721                          DAG.getConstant(Cnt, MVT::i32));
7722     break;
7723 
7724   case ISD::SRA:
7725   case ISD::SRL:
7726     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
7727       unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
7728                             ARMISD::VSHRs : ARMISD::VSHRu);
7729       return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
7730                          DAG.getConstant(Cnt, MVT::i32));
7731     }
7732   }
7733   return SDValue();
7734 }
7735 
7736 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
7737 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
PerformExtendCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)7738 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
7739                                     const ARMSubtarget *ST) {
7740   SDValue N0 = N->getOperand(0);
7741 
7742   // Check for sign- and zero-extensions of vector extract operations of 8-
7743   // and 16-bit vector elements.  NEON supports these directly.  They are
7744   // handled during DAG combining because type legalization will promote them
7745   // to 32-bit types and it is messy to recognize the operations after that.
7746   if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
7747     SDValue Vec = N0.getOperand(0);
7748     SDValue Lane = N0.getOperand(1);
7749     EVT VT = N->getValueType(0);
7750     EVT EltVT = N0.getValueType();
7751     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7752 
7753     if (VT == MVT::i32 &&
7754         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
7755         TLI.isTypeLegal(Vec.getValueType()) &&
7756         isa<ConstantSDNode>(Lane)) {
7757 
7758       unsigned Opc = 0;
7759       switch (N->getOpcode()) {
7760       default: llvm_unreachable("unexpected opcode");
7761       case ISD::SIGN_EXTEND:
7762         Opc = ARMISD::VGETLANEs;
7763         break;
7764       case ISD::ZERO_EXTEND:
7765       case ISD::ANY_EXTEND:
7766         Opc = ARMISD::VGETLANEu;
7767         break;
7768       }
7769       return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
7770     }
7771   }
7772 
7773   return SDValue();
7774 }
7775 
7776 /// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC
7777 /// to match f32 max/min patterns to use NEON vmax/vmin instructions.
PerformSELECT_CCCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)7778 static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
7779                                        const ARMSubtarget *ST) {
7780   // If the target supports NEON, try to use vmax/vmin instructions for f32
7781   // selects like "x < y ? x : y".  Unless the NoNaNsFPMath option is set,
7782   // be careful about NaNs:  NEON's vmax/vmin return NaN if either operand is
7783   // a NaN; only do the transformation when it matches that behavior.
7784 
7785   // For now only do this when using NEON for FP operations; if using VFP, it
7786   // is not obvious that the benefit outweighs the cost of switching to the
7787   // NEON pipeline.
7788   if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() ||
7789       N->getValueType(0) != MVT::f32)
7790     return SDValue();
7791 
7792   SDValue CondLHS = N->getOperand(0);
7793   SDValue CondRHS = N->getOperand(1);
7794   SDValue LHS = N->getOperand(2);
7795   SDValue RHS = N->getOperand(3);
7796   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
7797 
7798   unsigned Opcode = 0;
7799   bool IsReversed;
7800   if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) {
7801     IsReversed = false; // x CC y ? x : y
7802   } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) {
7803     IsReversed = true ; // x CC y ? y : x
7804   } else {
7805     return SDValue();
7806   }
7807 
7808   bool IsUnordered;
7809   switch (CC) {
7810   default: break;
7811   case ISD::SETOLT:
7812   case ISD::SETOLE:
7813   case ISD::SETLT:
7814   case ISD::SETLE:
7815   case ISD::SETULT:
7816   case ISD::SETULE:
7817     // If LHS is NaN, an ordered comparison will be false and the result will
7818     // be the RHS, but vmin(NaN, RHS) = NaN.  Avoid this by checking that LHS
7819     // != NaN.  Likewise, for unordered comparisons, check for RHS != NaN.
7820     IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE);
7821     if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
7822       break;
7823     // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin
7824     // will return -0, so vmin can only be used for unsafe math or if one of
7825     // the operands is known to be nonzero.
7826     if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) &&
7827         !UnsafeFPMath &&
7828         !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
7829       break;
7830     Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN;
7831     break;
7832 
7833   case ISD::SETOGT:
7834   case ISD::SETOGE:
7835   case ISD::SETGT:
7836   case ISD::SETGE:
7837   case ISD::SETUGT:
7838   case ISD::SETUGE:
7839     // If LHS is NaN, an ordered comparison will be false and the result will
7840     // be the RHS, but vmax(NaN, RHS) = NaN.  Avoid this by checking that LHS
7841     // != NaN.  Likewise, for unordered comparisons, check for RHS != NaN.
7842     IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE);
7843     if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
7844       break;
7845     // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax
7846     // will return +0, so vmax can only be used for unsafe math or if one of
7847     // the operands is known to be nonzero.
7848     if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) &&
7849         !UnsafeFPMath &&
7850         !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
7851       break;
7852     Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX;
7853     break;
7854   }
7855 
7856   if (!Opcode)
7857     return SDValue();
7858   return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS);
7859 }
7860 
7861 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
7862 SDValue
PerformCMOVCombine(SDNode * N,SelectionDAG & DAG) const7863 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
7864   SDValue Cmp = N->getOperand(4);
7865   if (Cmp.getOpcode() != ARMISD::CMPZ)
7866     // Only looking at EQ and NE cases.
7867     return SDValue();
7868 
7869   EVT VT = N->getValueType(0);
7870   DebugLoc dl = N->getDebugLoc();
7871   SDValue LHS = Cmp.getOperand(0);
7872   SDValue RHS = Cmp.getOperand(1);
7873   SDValue FalseVal = N->getOperand(0);
7874   SDValue TrueVal = N->getOperand(1);
7875   SDValue ARMcc = N->getOperand(2);
7876   ARMCC::CondCodes CC =
7877     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
7878 
7879   // Simplify
7880   //   mov     r1, r0
7881   //   cmp     r1, x
7882   //   mov     r0, y
7883   //   moveq   r0, x
7884   // to
7885   //   cmp     r0, x
7886   //   movne   r0, y
7887   //
7888   //   mov     r1, r0
7889   //   cmp     r1, x
7890   //   mov     r0, x
7891   //   movne   r0, y
7892   // to
7893   //   cmp     r0, x
7894   //   movne   r0, y
7895   /// FIXME: Turn this into a target neutral optimization?
7896   SDValue Res;
7897   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
7898     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
7899                       N->getOperand(3), Cmp);
7900   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
7901     SDValue ARMcc;
7902     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
7903     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
7904                       N->getOperand(3), NewCmp);
7905   }
7906 
7907   if (Res.getNode()) {
7908     APInt KnownZero, KnownOne;
7909     APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
7910     DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne);
7911     // Capture demanded bits information that would be otherwise lost.
7912     if (KnownZero == 0xfffffffe)
7913       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
7914                         DAG.getValueType(MVT::i1));
7915     else if (KnownZero == 0xffffff00)
7916       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
7917                         DAG.getValueType(MVT::i8));
7918     else if (KnownZero == 0xffff0000)
7919       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
7920                         DAG.getValueType(MVT::i16));
7921   }
7922 
7923   return Res;
7924 }
7925 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const7926 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
7927                                              DAGCombinerInfo &DCI) const {
7928   switch (N->getOpcode()) {
7929   default: break;
7930   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
7931   case ISD::SUB:        return PerformSUBCombine(N, DCI);
7932   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
7933   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
7934   case ISD::AND:        return PerformANDCombine(N, DCI);
7935   case ARMISD::BFI:     return PerformBFICombine(N, DCI);
7936   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
7937   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
7938   case ISD::STORE:      return PerformSTORECombine(N, DCI);
7939   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI);
7940   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
7941   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
7942   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
7943   case ISD::FP_TO_SINT:
7944   case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget);
7945   case ISD::FDIV:       return PerformVDIVCombine(N, DCI, Subtarget);
7946   case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
7947   case ISD::SHL:
7948   case ISD::SRA:
7949   case ISD::SRL:        return PerformShiftCombine(N, DCI.DAG, Subtarget);
7950   case ISD::SIGN_EXTEND:
7951   case ISD::ZERO_EXTEND:
7952   case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
7953   case ISD::SELECT_CC:  return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
7954   case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
7955   case ARMISD::VLD2DUP:
7956   case ARMISD::VLD3DUP:
7957   case ARMISD::VLD4DUP:
7958     return CombineBaseUpdate(N, DCI);
7959   case ISD::INTRINSIC_VOID:
7960   case ISD::INTRINSIC_W_CHAIN:
7961     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
7962     case Intrinsic::arm_neon_vld1:
7963     case Intrinsic::arm_neon_vld2:
7964     case Intrinsic::arm_neon_vld3:
7965     case Intrinsic::arm_neon_vld4:
7966     case Intrinsic::arm_neon_vld2lane:
7967     case Intrinsic::arm_neon_vld3lane:
7968     case Intrinsic::arm_neon_vld4lane:
7969     case Intrinsic::arm_neon_vst1:
7970     case Intrinsic::arm_neon_vst2:
7971     case Intrinsic::arm_neon_vst3:
7972     case Intrinsic::arm_neon_vst4:
7973     case Intrinsic::arm_neon_vst2lane:
7974     case Intrinsic::arm_neon_vst3lane:
7975     case Intrinsic::arm_neon_vst4lane:
7976       return CombineBaseUpdate(N, DCI);
7977     default: break;
7978     }
7979     break;
7980   }
7981   return SDValue();
7982 }
7983 
isDesirableToTransformToIntegerOp(unsigned Opc,EVT VT) const7984 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
7985                                                           EVT VT) const {
7986   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
7987 }
7988 
allowsUnalignedMemoryAccesses(EVT VT) const7989 bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
7990   if (!Subtarget->allowsUnalignedMem())
7991     return false;
7992 
7993   switch (VT.getSimpleVT().SimpleTy) {
7994   default:
7995     return false;
7996   case MVT::i8:
7997   case MVT::i16:
7998   case MVT::i32:
7999     return true;
8000   // FIXME: VLD1 etc with standard alignment is legal.
8001   }
8002 }
8003 
isLegalT1AddressImmediate(int64_t V,EVT VT)8004 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
8005   if (V < 0)
8006     return false;
8007 
8008   unsigned Scale = 1;
8009   switch (VT.getSimpleVT().SimpleTy) {
8010   default: return false;
8011   case MVT::i1:
8012   case MVT::i8:
8013     // Scale == 1;
8014     break;
8015   case MVT::i16:
8016     // Scale == 2;
8017     Scale = 2;
8018     break;
8019   case MVT::i32:
8020     // Scale == 4;
8021     Scale = 4;
8022     break;
8023   }
8024 
8025   if ((V & (Scale - 1)) != 0)
8026     return false;
8027   V /= Scale;
8028   return V == (V & ((1LL << 5) - 1));
8029 }
8030 
isLegalT2AddressImmediate(int64_t V,EVT VT,const ARMSubtarget * Subtarget)8031 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
8032                                       const ARMSubtarget *Subtarget) {
8033   bool isNeg = false;
8034   if (V < 0) {
8035     isNeg = true;
8036     V = - V;
8037   }
8038 
8039   switch (VT.getSimpleVT().SimpleTy) {
8040   default: return false;
8041   case MVT::i1:
8042   case MVT::i8:
8043   case MVT::i16:
8044   case MVT::i32:
8045     // + imm12 or - imm8
8046     if (isNeg)
8047       return V == (V & ((1LL << 8) - 1));
8048     return V == (V & ((1LL << 12) - 1));
8049   case MVT::f32:
8050   case MVT::f64:
8051     // Same as ARM mode. FIXME: NEON?
8052     if (!Subtarget->hasVFP2())
8053       return false;
8054     if ((V & 3) != 0)
8055       return false;
8056     V >>= 2;
8057     return V == (V & ((1LL << 8) - 1));
8058   }
8059 }
8060 
8061 /// isLegalAddressImmediate - Return true if the integer value can be used
8062 /// as the offset of the target addressing mode for load / store of the
8063 /// given type.
isLegalAddressImmediate(int64_t V,EVT VT,const ARMSubtarget * Subtarget)8064 static bool isLegalAddressImmediate(int64_t V, EVT VT,
8065                                     const ARMSubtarget *Subtarget) {
8066   if (V == 0)
8067     return true;
8068 
8069   if (!VT.isSimple())
8070     return false;
8071 
8072   if (Subtarget->isThumb1Only())
8073     return isLegalT1AddressImmediate(V, VT);
8074   else if (Subtarget->isThumb2())
8075     return isLegalT2AddressImmediate(V, VT, Subtarget);
8076 
8077   // ARM mode.
8078   if (V < 0)
8079     V = - V;
8080   switch (VT.getSimpleVT().SimpleTy) {
8081   default: return false;
8082   case MVT::i1:
8083   case MVT::i8:
8084   case MVT::i32:
8085     // +- imm12
8086     return V == (V & ((1LL << 12) - 1));
8087   case MVT::i16:
8088     // +- imm8
8089     return V == (V & ((1LL << 8) - 1));
8090   case MVT::f32:
8091   case MVT::f64:
8092     if (!Subtarget->hasVFP2()) // FIXME: NEON?
8093       return false;
8094     if ((V & 3) != 0)
8095       return false;
8096     V >>= 2;
8097     return V == (V & ((1LL << 8) - 1));
8098   }
8099 }
8100 
isLegalT2ScaledAddressingMode(const AddrMode & AM,EVT VT) const8101 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
8102                                                       EVT VT) const {
8103   int Scale = AM.Scale;
8104   if (Scale < 0)
8105     return false;
8106 
8107   switch (VT.getSimpleVT().SimpleTy) {
8108   default: return false;
8109   case MVT::i1:
8110   case MVT::i8:
8111   case MVT::i16:
8112   case MVT::i32:
8113     if (Scale == 1)
8114       return true;
8115     // r + r << imm
8116     Scale = Scale & ~1;
8117     return Scale == 2 || Scale == 4 || Scale == 8;
8118   case MVT::i64:
8119     // r + r
8120     if (((unsigned)AM.HasBaseReg + Scale) <= 2)
8121       return true;
8122     return false;
8123   case MVT::isVoid:
8124     // Note, we allow "void" uses (basically, uses that aren't loads or
8125     // stores), because arm allows folding a scale into many arithmetic
8126     // operations.  This should be made more precise and revisited later.
8127 
8128     // Allow r << imm, but the imm has to be a multiple of two.
8129     if (Scale & 1) return false;
8130     return isPowerOf2_32(Scale);
8131   }
8132 }
8133 
8134 /// isLegalAddressingMode - Return true if the addressing mode represented
8135 /// by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const AddrMode & AM,Type * Ty) const8136 bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
8137                                               Type *Ty) const {
8138   EVT VT = getValueType(Ty, true);
8139   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
8140     return false;
8141 
8142   // Can never fold addr of global into load/store.
8143   if (AM.BaseGV)
8144     return false;
8145 
8146   switch (AM.Scale) {
8147   case 0:  // no scale reg, must be "r+i" or "r", or "i".
8148     break;
8149   case 1:
8150     if (Subtarget->isThumb1Only())
8151       return false;
8152     // FALL THROUGH.
8153   default:
8154     // ARM doesn't support any R+R*scale+imm addr modes.
8155     if (AM.BaseOffs)
8156       return false;
8157 
8158     if (!VT.isSimple())
8159       return false;
8160 
8161     if (Subtarget->isThumb2())
8162       return isLegalT2ScaledAddressingMode(AM, VT);
8163 
8164     int Scale = AM.Scale;
8165     switch (VT.getSimpleVT().SimpleTy) {
8166     default: return false;
8167     case MVT::i1:
8168     case MVT::i8:
8169     case MVT::i32:
8170       if (Scale < 0) Scale = -Scale;
8171       if (Scale == 1)
8172         return true;
8173       // r + r << imm
8174       return isPowerOf2_32(Scale & ~1);
8175     case MVT::i16:
8176     case MVT::i64:
8177       // r + r
8178       if (((unsigned)AM.HasBaseReg + Scale) <= 2)
8179         return true;
8180       return false;
8181 
8182     case MVT::isVoid:
8183       // Note, we allow "void" uses (basically, uses that aren't loads or
8184       // stores), because arm allows folding a scale into many arithmetic
8185       // operations.  This should be made more precise and revisited later.
8186 
8187       // Allow r << imm, but the imm has to be a multiple of two.
8188       if (Scale & 1) return false;
8189       return isPowerOf2_32(Scale);
8190     }
8191     break;
8192   }
8193   return true;
8194 }
8195 
8196 /// isLegalICmpImmediate - Return true if the specified immediate is legal
8197 /// icmp immediate, that is the target has icmp instructions which can compare
8198 /// a register against the immediate without having to materialize the
8199 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const8200 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
8201   if (!Subtarget->isThumb())
8202     return ARM_AM::getSOImmVal(Imm) != -1;
8203   if (Subtarget->isThumb2())
8204     return ARM_AM::getT2SOImmVal(Imm) != -1;
8205   return Imm >= 0 && Imm <= 255;
8206 }
8207 
8208 /// isLegalAddImmediate - Return true if the specified immediate is legal
8209 /// add immediate, that is the target has add instructions which can add
8210 /// a register with the immediate without having to materialize the
8211 /// immediate into a register.
isLegalAddImmediate(int64_t Imm) const8212 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
8213   return ARM_AM::getSOImmVal(Imm) != -1;
8214 }
8215 
getARMIndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)8216 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
8217                                       bool isSEXTLoad, SDValue &Base,
8218                                       SDValue &Offset, bool &isInc,
8219                                       SelectionDAG &DAG) {
8220   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
8221     return false;
8222 
8223   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
8224     // AddressingMode 3
8225     Base = Ptr->getOperand(0);
8226     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
8227       int RHSC = (int)RHS->getZExtValue();
8228       if (RHSC < 0 && RHSC > -256) {
8229         assert(Ptr->getOpcode() == ISD::ADD);
8230         isInc = false;
8231         Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
8232         return true;
8233       }
8234     }
8235     isInc = (Ptr->getOpcode() == ISD::ADD);
8236     Offset = Ptr->getOperand(1);
8237     return true;
8238   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
8239     // AddressingMode 2
8240     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
8241       int RHSC = (int)RHS->getZExtValue();
8242       if (RHSC < 0 && RHSC > -0x1000) {
8243         assert(Ptr->getOpcode() == ISD::ADD);
8244         isInc = false;
8245         Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
8246         Base = Ptr->getOperand(0);
8247         return true;
8248       }
8249     }
8250 
8251     if (Ptr->getOpcode() == ISD::ADD) {
8252       isInc = true;
8253       ARM_AM::ShiftOpc ShOpcVal=
8254         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
8255       if (ShOpcVal != ARM_AM::no_shift) {
8256         Base = Ptr->getOperand(1);
8257         Offset = Ptr->getOperand(0);
8258       } else {
8259         Base = Ptr->getOperand(0);
8260         Offset = Ptr->getOperand(1);
8261       }
8262       return true;
8263     }
8264 
8265     isInc = (Ptr->getOpcode() == ISD::ADD);
8266     Base = Ptr->getOperand(0);
8267     Offset = Ptr->getOperand(1);
8268     return true;
8269   }
8270 
8271   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
8272   return false;
8273 }
8274 
getT2IndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)8275 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
8276                                      bool isSEXTLoad, SDValue &Base,
8277                                      SDValue &Offset, bool &isInc,
8278                                      SelectionDAG &DAG) {
8279   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
8280     return false;
8281 
8282   Base = Ptr->getOperand(0);
8283   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
8284     int RHSC = (int)RHS->getZExtValue();
8285     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
8286       assert(Ptr->getOpcode() == ISD::ADD);
8287       isInc = false;
8288       Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
8289       return true;
8290     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
8291       isInc = Ptr->getOpcode() == ISD::ADD;
8292       Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
8293       return true;
8294     }
8295   }
8296 
8297   return false;
8298 }
8299 
8300 /// getPreIndexedAddressParts - returns true by value, base pointer and
8301 /// offset pointer and addressing mode by reference if the node's address
8302 /// can be legally represented as pre-indexed load / store address.
8303 bool
getPreIndexedAddressParts(SDNode * N,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const8304 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
8305                                              SDValue &Offset,
8306                                              ISD::MemIndexedMode &AM,
8307                                              SelectionDAG &DAG) const {
8308   if (Subtarget->isThumb1Only())
8309     return false;
8310 
8311   EVT VT;
8312   SDValue Ptr;
8313   bool isSEXTLoad = false;
8314   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8315     Ptr = LD->getBasePtr();
8316     VT  = LD->getMemoryVT();
8317     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
8318   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
8319     Ptr = ST->getBasePtr();
8320     VT  = ST->getMemoryVT();
8321   } else
8322     return false;
8323 
8324   bool isInc;
8325   bool isLegal = false;
8326   if (Subtarget->isThumb2())
8327     isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
8328                                        Offset, isInc, DAG);
8329   else
8330     isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
8331                                         Offset, isInc, DAG);
8332   if (!isLegal)
8333     return false;
8334 
8335   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
8336   return true;
8337 }
8338 
8339 /// getPostIndexedAddressParts - returns true by value, base pointer and
8340 /// offset pointer and addressing mode by reference if this node can be
8341 /// combined with a load / store to form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const8342 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
8343                                                    SDValue &Base,
8344                                                    SDValue &Offset,
8345                                                    ISD::MemIndexedMode &AM,
8346                                                    SelectionDAG &DAG) const {
8347   if (Subtarget->isThumb1Only())
8348     return false;
8349 
8350   EVT VT;
8351   SDValue Ptr;
8352   bool isSEXTLoad = false;
8353   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8354     VT  = LD->getMemoryVT();
8355     Ptr = LD->getBasePtr();
8356     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
8357   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
8358     VT  = ST->getMemoryVT();
8359     Ptr = ST->getBasePtr();
8360   } else
8361     return false;
8362 
8363   bool isInc;
8364   bool isLegal = false;
8365   if (Subtarget->isThumb2())
8366     isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
8367                                        isInc, DAG);
8368   else
8369     isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
8370                                         isInc, DAG);
8371   if (!isLegal)
8372     return false;
8373 
8374   if (Ptr != Base) {
8375     // Swap base ptr and offset to catch more post-index load / store when
8376     // it's legal. In Thumb2 mode, offset must be an immediate.
8377     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
8378         !Subtarget->isThumb2())
8379       std::swap(Base, Offset);
8380 
8381     // Post-indexed load / store update the base pointer.
8382     if (Ptr != Base)
8383       return false;
8384   }
8385 
8386   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
8387   return true;
8388 }
8389 
computeMaskedBitsForTargetNode(const SDValue Op,const APInt & Mask,APInt & KnownZero,APInt & KnownOne,const SelectionDAG & DAG,unsigned Depth) const8390 void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
8391                                                        const APInt &Mask,
8392                                                        APInt &KnownZero,
8393                                                        APInt &KnownOne,
8394                                                        const SelectionDAG &DAG,
8395                                                        unsigned Depth) const {
8396   KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
8397   switch (Op.getOpcode()) {
8398   default: break;
8399   case ARMISD::CMOV: {
8400     // Bits are known zero/one if known on the LHS and RHS.
8401     DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
8402     if (KnownZero == 0 && KnownOne == 0) return;
8403 
8404     APInt KnownZeroRHS, KnownOneRHS;
8405     DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
8406                           KnownZeroRHS, KnownOneRHS, Depth+1);
8407     KnownZero &= KnownZeroRHS;
8408     KnownOne  &= KnownOneRHS;
8409     return;
8410   }
8411   }
8412 }
8413 
8414 //===----------------------------------------------------------------------===//
8415 //                           ARM Inline Assembly Support
8416 //===----------------------------------------------------------------------===//
8417 
ExpandInlineAsm(CallInst * CI) const8418 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
8419   // Looking for "rev" which is V6+.
8420   if (!Subtarget->hasV6Ops())
8421     return false;
8422 
8423   InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
8424   std::string AsmStr = IA->getAsmString();
8425   SmallVector<StringRef, 4> AsmPieces;
8426   SplitString(AsmStr, AsmPieces, ";\n");
8427 
8428   switch (AsmPieces.size()) {
8429   default: return false;
8430   case 1:
8431     AsmStr = AsmPieces[0];
8432     AsmPieces.clear();
8433     SplitString(AsmStr, AsmPieces, " \t,");
8434 
8435     // rev $0, $1
8436     if (AsmPieces.size() == 3 &&
8437         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
8438         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
8439       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
8440       if (Ty && Ty->getBitWidth() == 32)
8441         return IntrinsicLowering::LowerToByteSwap(CI);
8442     }
8443     break;
8444   }
8445 
8446   return false;
8447 }
8448 
8449 /// getConstraintType - Given a constraint letter, return the type of
8450 /// constraint it is for this target.
8451 ARMTargetLowering::ConstraintType
getConstraintType(const std::string & Constraint) const8452 ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
8453   if (Constraint.size() == 1) {
8454     switch (Constraint[0]) {
8455     default:  break;
8456     case 'l': return C_RegisterClass;
8457     case 'w': return C_RegisterClass;
8458     case 'h': return C_RegisterClass;
8459     case 'x': return C_RegisterClass;
8460     case 't': return C_RegisterClass;
8461     case 'j': return C_Other; // Constant for movw.
8462       // An address with a single base register. Due to the way we
8463       // currently handle addresses it is the same as an 'r' memory constraint.
8464     case 'Q': return C_Memory;
8465     }
8466   } else if (Constraint.size() == 2) {
8467     switch (Constraint[0]) {
8468     default: break;
8469     // All 'U+' constraints are addresses.
8470     case 'U': return C_Memory;
8471     }
8472   }
8473   return TargetLowering::getConstraintType(Constraint);
8474 }
8475 
8476 /// Examine constraint type and operand type and determine a weight value.
8477 /// This object must already have been set up with the operand type
8478 /// and the current alternative constraint selected.
8479 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const8480 ARMTargetLowering::getSingleConstraintMatchWeight(
8481     AsmOperandInfo &info, const char *constraint) const {
8482   ConstraintWeight weight = CW_Invalid;
8483   Value *CallOperandVal = info.CallOperandVal;
8484     // If we don't have a value, we can't do a match,
8485     // but allow it at the lowest weight.
8486   if (CallOperandVal == NULL)
8487     return CW_Default;
8488   Type *type = CallOperandVal->getType();
8489   // Look at the constraint type.
8490   switch (*constraint) {
8491   default:
8492     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
8493     break;
8494   case 'l':
8495     if (type->isIntegerTy()) {
8496       if (Subtarget->isThumb())
8497         weight = CW_SpecificReg;
8498       else
8499         weight = CW_Register;
8500     }
8501     break;
8502   case 'w':
8503     if (type->isFloatingPointTy())
8504       weight = CW_Register;
8505     break;
8506   }
8507   return weight;
8508 }
8509 
8510 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
8511 RCPair
getRegForInlineAsmConstraint(const std::string & Constraint,EVT VT) const8512 ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
8513                                                 EVT VT) const {
8514   if (Constraint.size() == 1) {
8515     // GCC ARM Constraint Letters
8516     switch (Constraint[0]) {
8517     case 'l': // Low regs or general regs.
8518       if (Subtarget->isThumb())
8519         return RCPair(0U, ARM::tGPRRegisterClass);
8520       else
8521         return RCPair(0U, ARM::GPRRegisterClass);
8522     case 'h': // High regs or no regs.
8523       if (Subtarget->isThumb())
8524         return RCPair(0U, ARM::hGPRRegisterClass);
8525       break;
8526     case 'r':
8527       return RCPair(0U, ARM::GPRRegisterClass);
8528     case 'w':
8529       if (VT == MVT::f32)
8530         return RCPair(0U, ARM::SPRRegisterClass);
8531       if (VT.getSizeInBits() == 64)
8532         return RCPair(0U, ARM::DPRRegisterClass);
8533       if (VT.getSizeInBits() == 128)
8534         return RCPair(0U, ARM::QPRRegisterClass);
8535       break;
8536     case 'x':
8537       if (VT == MVT::f32)
8538         return RCPair(0U, ARM::SPR_8RegisterClass);
8539       if (VT.getSizeInBits() == 64)
8540         return RCPair(0U, ARM::DPR_8RegisterClass);
8541       if (VT.getSizeInBits() == 128)
8542         return RCPair(0U, ARM::QPR_8RegisterClass);
8543       break;
8544     case 't':
8545       if (VT == MVT::f32)
8546         return RCPair(0U, ARM::SPRRegisterClass);
8547       break;
8548     }
8549   }
8550   if (StringRef("{cc}").equals_lower(Constraint))
8551     return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass);
8552 
8553   return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
8554 }
8555 
8556 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
8557 /// vector.  If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const8558 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
8559                                                      std::string &Constraint,
8560                                                      std::vector<SDValue>&Ops,
8561                                                      SelectionDAG &DAG) const {
8562   SDValue Result(0, 0);
8563 
8564   // Currently only support length 1 constraints.
8565   if (Constraint.length() != 1) return;
8566 
8567   char ConstraintLetter = Constraint[0];
8568   switch (ConstraintLetter) {
8569   default: break;
8570   case 'j':
8571   case 'I': case 'J': case 'K': case 'L':
8572   case 'M': case 'N': case 'O':
8573     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
8574     if (!C)
8575       return;
8576 
8577     int64_t CVal64 = C->getSExtValue();
8578     int CVal = (int) CVal64;
8579     // None of these constraints allow values larger than 32 bits.  Check
8580     // that the value fits in an int.
8581     if (CVal != CVal64)
8582       return;
8583 
8584     switch (ConstraintLetter) {
8585       case 'j':
8586         // Constant suitable for movw, must be between 0 and
8587         // 65535.
8588         if (Subtarget->hasV6T2Ops())
8589           if (CVal >= 0 && CVal <= 65535)
8590             break;
8591         return;
8592       case 'I':
8593         if (Subtarget->isThumb1Only()) {
8594           // This must be a constant between 0 and 255, for ADD
8595           // immediates.
8596           if (CVal >= 0 && CVal <= 255)
8597             break;
8598         } else if (Subtarget->isThumb2()) {
8599           // A constant that can be used as an immediate value in a
8600           // data-processing instruction.
8601           if (ARM_AM::getT2SOImmVal(CVal) != -1)
8602             break;
8603         } else {
8604           // A constant that can be used as an immediate value in a
8605           // data-processing instruction.
8606           if (ARM_AM::getSOImmVal(CVal) != -1)
8607             break;
8608         }
8609         return;
8610 
8611       case 'J':
8612         if (Subtarget->isThumb()) {  // FIXME thumb2
8613           // This must be a constant between -255 and -1, for negated ADD
8614           // immediates. This can be used in GCC with an "n" modifier that
8615           // prints the negated value, for use with SUB instructions. It is
8616           // not useful otherwise but is implemented for compatibility.
8617           if (CVal >= -255 && CVal <= -1)
8618             break;
8619         } else {
8620           // This must be a constant between -4095 and 4095. It is not clear
8621           // what this constraint is intended for. Implemented for
8622           // compatibility with GCC.
8623           if (CVal >= -4095 && CVal <= 4095)
8624             break;
8625         }
8626         return;
8627 
8628       case 'K':
8629         if (Subtarget->isThumb1Only()) {
8630           // A 32-bit value where only one byte has a nonzero value. Exclude
8631           // zero to match GCC. This constraint is used by GCC internally for
8632           // constants that can be loaded with a move/shift combination.
8633           // It is not useful otherwise but is implemented for compatibility.
8634           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
8635             break;
8636         } else if (Subtarget->isThumb2()) {
8637           // A constant whose bitwise inverse can be used as an immediate
8638           // value in a data-processing instruction. This can be used in GCC
8639           // with a "B" modifier that prints the inverted value, for use with
8640           // BIC and MVN instructions. It is not useful otherwise but is
8641           // implemented for compatibility.
8642           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
8643             break;
8644         } else {
8645           // A constant whose bitwise inverse can be used as an immediate
8646           // value in a data-processing instruction. This can be used in GCC
8647           // with a "B" modifier that prints the inverted value, for use with
8648           // BIC and MVN instructions. It is not useful otherwise but is
8649           // implemented for compatibility.
8650           if (ARM_AM::getSOImmVal(~CVal) != -1)
8651             break;
8652         }
8653         return;
8654 
8655       case 'L':
8656         if (Subtarget->isThumb1Only()) {
8657           // This must be a constant between -7 and 7,
8658           // for 3-operand ADD/SUB immediate instructions.
8659           if (CVal >= -7 && CVal < 7)
8660             break;
8661         } else if (Subtarget->isThumb2()) {
8662           // A constant whose negation can be used as an immediate value in a
8663           // data-processing instruction. This can be used in GCC with an "n"
8664           // modifier that prints the negated value, for use with SUB
8665           // instructions. It is not useful otherwise but is implemented for
8666           // compatibility.
8667           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
8668             break;
8669         } else {
8670           // A constant whose negation can be used as an immediate value in a
8671           // data-processing instruction. This can be used in GCC with an "n"
8672           // modifier that prints the negated value, for use with SUB
8673           // instructions. It is not useful otherwise but is implemented for
8674           // compatibility.
8675           if (ARM_AM::getSOImmVal(-CVal) != -1)
8676             break;
8677         }
8678         return;
8679 
8680       case 'M':
8681         if (Subtarget->isThumb()) { // FIXME thumb2
8682           // This must be a multiple of 4 between 0 and 1020, for
8683           // ADD sp + immediate.
8684           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
8685             break;
8686         } else {
8687           // A power of two or a constant between 0 and 32.  This is used in
8688           // GCC for the shift amount on shifted register operands, but it is
8689           // useful in general for any shift amounts.
8690           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
8691             break;
8692         }
8693         return;
8694 
8695       case 'N':
8696         if (Subtarget->isThumb()) {  // FIXME thumb2
8697           // This must be a constant between 0 and 31, for shift amounts.
8698           if (CVal >= 0 && CVal <= 31)
8699             break;
8700         }
8701         return;
8702 
8703       case 'O':
8704         if (Subtarget->isThumb()) {  // FIXME thumb2
8705           // This must be a multiple of 4 between -508 and 508, for
8706           // ADD/SUB sp = sp + immediate.
8707           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
8708             break;
8709         }
8710         return;
8711     }
8712     Result = DAG.getTargetConstant(CVal, Op.getValueType());
8713     break;
8714   }
8715 
8716   if (Result.getNode()) {
8717     Ops.push_back(Result);
8718     return;
8719   }
8720   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8721 }
8722 
8723 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const8724 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
8725   // The ARM target isn't yet aware of offsets.
8726   return false;
8727 }
8728 
isBitFieldInvertedMask(unsigned v)8729 bool ARM::isBitFieldInvertedMask(unsigned v) {
8730   if (v == 0xffffffff)
8731     return 0;
8732   // there can be 1's on either or both "outsides", all the "inside"
8733   // bits must be 0's
8734   unsigned int lsb = 0, msb = 31;
8735   while (v & (1 << msb)) --msb;
8736   while (v & (1 << lsb)) ++lsb;
8737   for (unsigned int i = lsb; i <= msb; ++i) {
8738     if (v & (1 << i))
8739       return 0;
8740   }
8741   return 1;
8742 }
8743 
8744 /// isFPImmLegal - Returns true if the target can instruction select the
8745 /// specified FP immediate natively. If false, the legalizer will
8746 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT) const8747 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
8748   if (!Subtarget->hasVFP3())
8749     return false;
8750   if (VT == MVT::f32)
8751     return ARM_AM::getFP32Imm(Imm) != -1;
8752   if (VT == MVT::f64)
8753     return ARM_AM::getFP64Imm(Imm) != -1;
8754   return false;
8755 }
8756 
8757 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
8758 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
8759 /// specified in the intrinsic calls.
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,unsigned Intrinsic) const8760 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
8761                                            const CallInst &I,
8762                                            unsigned Intrinsic) const {
8763   switch (Intrinsic) {
8764   case Intrinsic::arm_neon_vld1:
8765   case Intrinsic::arm_neon_vld2:
8766   case Intrinsic::arm_neon_vld3:
8767   case Intrinsic::arm_neon_vld4:
8768   case Intrinsic::arm_neon_vld2lane:
8769   case Intrinsic::arm_neon_vld3lane:
8770   case Intrinsic::arm_neon_vld4lane: {
8771     Info.opc = ISD::INTRINSIC_W_CHAIN;
8772     // Conservatively set memVT to the entire set of vectors loaded.
8773     uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8;
8774     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
8775     Info.ptrVal = I.getArgOperand(0);
8776     Info.offset = 0;
8777     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
8778     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
8779     Info.vol = false; // volatile loads with NEON intrinsics not supported
8780     Info.readMem = true;
8781     Info.writeMem = false;
8782     return true;
8783   }
8784   case Intrinsic::arm_neon_vst1:
8785   case Intrinsic::arm_neon_vst2:
8786   case Intrinsic::arm_neon_vst3:
8787   case Intrinsic::arm_neon_vst4:
8788   case Intrinsic::arm_neon_vst2lane:
8789   case Intrinsic::arm_neon_vst3lane:
8790   case Intrinsic::arm_neon_vst4lane: {
8791     Info.opc = ISD::INTRINSIC_VOID;
8792     // Conservatively set memVT to the entire set of vectors stored.
8793     unsigned NumElts = 0;
8794     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
8795       Type *ArgTy = I.getArgOperand(ArgI)->getType();
8796       if (!ArgTy->isVectorTy())
8797         break;
8798       NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
8799     }
8800     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
8801     Info.ptrVal = I.getArgOperand(0);
8802     Info.offset = 0;
8803     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
8804     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
8805     Info.vol = false; // volatile stores with NEON intrinsics not supported
8806     Info.readMem = false;
8807     Info.writeMem = true;
8808     return true;
8809   }
8810   case Intrinsic::arm_strexd: {
8811     Info.opc = ISD::INTRINSIC_W_CHAIN;
8812     Info.memVT = MVT::i64;
8813     Info.ptrVal = I.getArgOperand(2);
8814     Info.offset = 0;
8815     Info.align = 8;
8816     Info.vol = true;
8817     Info.readMem = false;
8818     Info.writeMem = true;
8819     return true;
8820   }
8821   case Intrinsic::arm_ldrexd: {
8822     Info.opc = ISD::INTRINSIC_W_CHAIN;
8823     Info.memVT = MVT::i64;
8824     Info.ptrVal = I.getArgOperand(0);
8825     Info.offset = 0;
8826     Info.align = 8;
8827     Info.vol = true;
8828     Info.readMem = true;
8829     Info.writeMem = false;
8830     return true;
8831   }
8832   default:
8833     break;
8834   }
8835 
8836   return false;
8837 }
8838