1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMTargetMachine.h"
20 #include "ARMSubtarget.h"
21 #include "ARMConstantPoolValue.h"
22 #include "MCTargetDesc/ARMAddressingModes.h"
23 #include "llvm/CallingConv.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/GlobalVariable.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/Operator.h"
30 #include "llvm/CodeGen/Analysis.h"
31 #include "llvm/CodeGen/FastISel.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineConstantPool.h"
36 #include "llvm/CodeGen/MachineFrameInfo.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/Support/CallSite.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/GetElementPtrTypeIterator.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetOptions.h"
48 using namespace llvm;
49
50 extern cl::opt<bool> EnableARMLongCalls;
51
52 namespace {
53
54 // All possible address modes, plus some.
55 typedef struct Address {
56 enum {
57 RegBase,
58 FrameIndexBase
59 } BaseType;
60
61 union {
62 unsigned Reg;
63 int FI;
64 } Base;
65
66 int Offset;
67
68 // Innocuous defaults for our address.
Address__anone47f17580111::Address69 Address()
70 : BaseType(RegBase), Offset(0) {
71 Base.Reg = 0;
72 }
73 } Address;
74
75 class ARMFastISel : public FastISel {
76
77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
78 /// make the right decision when generating code for different targets.
79 const ARMSubtarget *Subtarget;
80 const TargetMachine &TM;
81 const TargetInstrInfo &TII;
82 const TargetLowering &TLI;
83 ARMFunctionInfo *AFI;
84
85 // Convenience variables to avoid some queries.
86 bool isThumb2;
87 LLVMContext *Context;
88
89 public:
ARMFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
91 const TargetLibraryInfo *libInfo)
92 : FastISel(funcInfo, libInfo),
93 TM(funcInfo.MF->getTarget()),
94 TII(*TM.getInstrInfo()),
95 TLI(*TM.getTargetLowering()) {
96 Subtarget = &TM.getSubtarget<ARMSubtarget>();
97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
98 isThumb2 = AFI->isThumbFunction();
99 Context = &funcInfo.Fn->getContext();
100 }
101
102 // Code from FastISel.cpp.
103 private:
104 unsigned FastEmitInst_(unsigned MachineInstOpcode,
105 const TargetRegisterClass *RC);
106 unsigned FastEmitInst_r(unsigned MachineInstOpcode,
107 const TargetRegisterClass *RC,
108 unsigned Op0, bool Op0IsKill);
109 unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
110 const TargetRegisterClass *RC,
111 unsigned Op0, bool Op0IsKill,
112 unsigned Op1, bool Op1IsKill);
113 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
114 const TargetRegisterClass *RC,
115 unsigned Op0, bool Op0IsKill,
116 unsigned Op1, bool Op1IsKill,
117 unsigned Op2, bool Op2IsKill);
118 unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
119 const TargetRegisterClass *RC,
120 unsigned Op0, bool Op0IsKill,
121 uint64_t Imm);
122 unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
123 const TargetRegisterClass *RC,
124 unsigned Op0, bool Op0IsKill,
125 const ConstantFP *FPImm);
126 unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
127 const TargetRegisterClass *RC,
128 unsigned Op0, bool Op0IsKill,
129 unsigned Op1, bool Op1IsKill,
130 uint64_t Imm);
131 unsigned FastEmitInst_i(unsigned MachineInstOpcode,
132 const TargetRegisterClass *RC,
133 uint64_t Imm);
134 unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
135 const TargetRegisterClass *RC,
136 uint64_t Imm1, uint64_t Imm2);
137
138 unsigned FastEmitInst_extractsubreg(MVT RetVT,
139 unsigned Op0, bool Op0IsKill,
140 uint32_t Idx);
141
142 // Backend specific FastISel code.
143 private:
144 virtual bool TargetSelectInstruction(const Instruction *I);
145 virtual unsigned TargetMaterializeConstant(const Constant *C);
146 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
147 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
148 const LoadInst *LI);
149 private:
150 #include "ARMGenFastISel.inc"
151
152 // Instruction selection routines.
153 private:
154 bool SelectLoad(const Instruction *I);
155 bool SelectStore(const Instruction *I);
156 bool SelectBranch(const Instruction *I);
157 bool SelectIndirectBr(const Instruction *I);
158 bool SelectCmp(const Instruction *I);
159 bool SelectFPExt(const Instruction *I);
160 bool SelectFPTrunc(const Instruction *I);
161 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
162 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
163 bool SelectIToFP(const Instruction *I, bool isSigned);
164 bool SelectFPToI(const Instruction *I, bool isSigned);
165 bool SelectDiv(const Instruction *I, bool isSigned);
166 bool SelectRem(const Instruction *I, bool isSigned);
167 bool SelectCall(const Instruction *I, const char *IntrMemName);
168 bool SelectIntrinsicCall(const IntrinsicInst &I);
169 bool SelectSelect(const Instruction *I);
170 bool SelectRet(const Instruction *I);
171 bool SelectTrunc(const Instruction *I);
172 bool SelectIntExt(const Instruction *I);
173 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
174
175 // Utility routines.
176 private:
177 bool isTypeLegal(Type *Ty, MVT &VT);
178 bool isLoadTypeLegal(Type *Ty, MVT &VT);
179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
180 bool isZExt);
181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
182 unsigned Alignment = 0, bool isZExt = true,
183 bool allocReg = true);
184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
185 unsigned Alignment = 0);
186 bool ARMComputeAddress(const Value *Obj, Address &Addr);
187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
188 bool ARMIsMemCpySmall(uint64_t Len);
189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
192 unsigned ARMMaterializeInt(const Constant *C, EVT VT);
193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
196 unsigned ARMSelectCallOp(bool UseReg);
197
198 // Call handling routines.
199 private:
200 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
201 bool Return,
202 bool isVarArg);
203 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
204 SmallVectorImpl<unsigned> &ArgRegs,
205 SmallVectorImpl<MVT> &ArgVTs,
206 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
207 SmallVectorImpl<unsigned> &RegArgs,
208 CallingConv::ID CC,
209 unsigned &NumBytes,
210 bool isVarArg);
211 unsigned getLibcallReg(const Twine &Name);
212 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
213 const Instruction *I, CallingConv::ID CC,
214 unsigned &NumBytes, bool isVarArg);
215 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
216
217 // OptionalDef handling routines.
218 private:
219 bool isARMNEONPred(const MachineInstr *MI);
220 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
221 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
222 void AddLoadStoreOperands(EVT VT, Address &Addr,
223 const MachineInstrBuilder &MIB,
224 unsigned Flags, bool useAM3);
225 };
226
227 } // end anonymous namespace
228
229 #include "ARMGenCallingConv.inc"
230
231 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
232 // we don't care about implicit defs here, just places we'll need to add a
233 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
DefinesOptionalPredicate(MachineInstr * MI,bool * CPSR)234 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
235 if (!MI->hasOptionalDef())
236 return false;
237
238 // Look to see if our OptionalDef is defining CPSR or CCR.
239 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
240 const MachineOperand &MO = MI->getOperand(i);
241 if (!MO.isReg() || !MO.isDef()) continue;
242 if (MO.getReg() == ARM::CPSR)
243 *CPSR = true;
244 }
245 return true;
246 }
247
isARMNEONPred(const MachineInstr * MI)248 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
249 const MCInstrDesc &MCID = MI->getDesc();
250
251 // If we're a thumb2 or not NEON function we were handled via isPredicable.
252 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
253 AFI->isThumb2Function())
254 return false;
255
256 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
257 if (MCID.OpInfo[i].isPredicate())
258 return true;
259
260 return false;
261 }
262
263 // If the machine is predicable go ahead and add the predicate operands, if
264 // it needs default CC operands add those.
265 // TODO: If we want to support thumb1 then we'll need to deal with optional
266 // CPSR defs that need to be added before the remaining operands. See s_cc_out
267 // for descriptions why.
268 const MachineInstrBuilder &
AddOptionalDefs(const MachineInstrBuilder & MIB)269 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
270 MachineInstr *MI = &*MIB;
271
272 // Do we use a predicate? or...
273 // Are we NEON in ARM mode and have a predicate operand? If so, I know
274 // we're not predicable but add it anyways.
275 if (TII.isPredicable(MI) || isARMNEONPred(MI))
276 AddDefaultPred(MIB);
277
278 // Do we optionally set a predicate? Preds is size > 0 iff the predicate
279 // defines CPSR. All other OptionalDefines in ARM are the CCR register.
280 bool CPSR = false;
281 if (DefinesOptionalPredicate(MI, &CPSR)) {
282 if (CPSR)
283 AddDefaultT1CC(MIB);
284 else
285 AddDefaultCC(MIB);
286 }
287 return MIB;
288 }
289
FastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)290 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
291 const TargetRegisterClass* RC) {
292 unsigned ResultReg = createResultReg(RC);
293 const MCInstrDesc &II = TII.get(MachineInstOpcode);
294
295 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
296 return ResultReg;
297 }
298
FastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill)299 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
300 const TargetRegisterClass *RC,
301 unsigned Op0, bool Op0IsKill) {
302 unsigned ResultReg = createResultReg(RC);
303 const MCInstrDesc &II = TII.get(MachineInstOpcode);
304
305 if (II.getNumDefs() >= 1) {
306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
307 .addReg(Op0, Op0IsKill * RegState::Kill));
308 } else {
309 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
310 .addReg(Op0, Op0IsKill * RegState::Kill));
311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
312 TII.get(TargetOpcode::COPY), ResultReg)
313 .addReg(II.ImplicitDefs[0]));
314 }
315 return ResultReg;
316 }
317
FastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill)318 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
319 const TargetRegisterClass *RC,
320 unsigned Op0, bool Op0IsKill,
321 unsigned Op1, bool Op1IsKill) {
322 unsigned ResultReg = createResultReg(RC);
323 const MCInstrDesc &II = TII.get(MachineInstOpcode);
324
325 if (II.getNumDefs() >= 1) {
326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
327 .addReg(Op0, Op0IsKill * RegState::Kill)
328 .addReg(Op1, Op1IsKill * RegState::Kill));
329 } else {
330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
331 .addReg(Op0, Op0IsKill * RegState::Kill)
332 .addReg(Op1, Op1IsKill * RegState::Kill));
333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
334 TII.get(TargetOpcode::COPY), ResultReg)
335 .addReg(II.ImplicitDefs[0]));
336 }
337 return ResultReg;
338 }
339
FastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,unsigned Op2,bool Op2IsKill)340 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
341 const TargetRegisterClass *RC,
342 unsigned Op0, bool Op0IsKill,
343 unsigned Op1, bool Op1IsKill,
344 unsigned Op2, bool Op2IsKill) {
345 unsigned ResultReg = createResultReg(RC);
346 const MCInstrDesc &II = TII.get(MachineInstOpcode);
347
348 if (II.getNumDefs() >= 1) {
349 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
350 .addReg(Op0, Op0IsKill * RegState::Kill)
351 .addReg(Op1, Op1IsKill * RegState::Kill)
352 .addReg(Op2, Op2IsKill * RegState::Kill));
353 } else {
354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
355 .addReg(Op0, Op0IsKill * RegState::Kill)
356 .addReg(Op1, Op1IsKill * RegState::Kill)
357 .addReg(Op2, Op2IsKill * RegState::Kill));
358 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
359 TII.get(TargetOpcode::COPY), ResultReg)
360 .addReg(II.ImplicitDefs[0]));
361 }
362 return ResultReg;
363 }
364
FastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm)365 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
366 const TargetRegisterClass *RC,
367 unsigned Op0, bool Op0IsKill,
368 uint64_t Imm) {
369 unsigned ResultReg = createResultReg(RC);
370 const MCInstrDesc &II = TII.get(MachineInstOpcode);
371
372 if (II.getNumDefs() >= 1) {
373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
374 .addReg(Op0, Op0IsKill * RegState::Kill)
375 .addImm(Imm));
376 } else {
377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
378 .addReg(Op0, Op0IsKill * RegState::Kill)
379 .addImm(Imm));
380 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
381 TII.get(TargetOpcode::COPY), ResultReg)
382 .addReg(II.ImplicitDefs[0]));
383 }
384 return ResultReg;
385 }
386
FastEmitInst_rf(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,const ConstantFP * FPImm)387 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
388 const TargetRegisterClass *RC,
389 unsigned Op0, bool Op0IsKill,
390 const ConstantFP *FPImm) {
391 unsigned ResultReg = createResultReg(RC);
392 const MCInstrDesc &II = TII.get(MachineInstOpcode);
393
394 if (II.getNumDefs() >= 1) {
395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
396 .addReg(Op0, Op0IsKill * RegState::Kill)
397 .addFPImm(FPImm));
398 } else {
399 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
400 .addReg(Op0, Op0IsKill * RegState::Kill)
401 .addFPImm(FPImm));
402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
403 TII.get(TargetOpcode::COPY), ResultReg)
404 .addReg(II.ImplicitDefs[0]));
405 }
406 return ResultReg;
407 }
408
FastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm)409 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
410 const TargetRegisterClass *RC,
411 unsigned Op0, bool Op0IsKill,
412 unsigned Op1, bool Op1IsKill,
413 uint64_t Imm) {
414 unsigned ResultReg = createResultReg(RC);
415 const MCInstrDesc &II = TII.get(MachineInstOpcode);
416
417 if (II.getNumDefs() >= 1) {
418 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
419 .addReg(Op0, Op0IsKill * RegState::Kill)
420 .addReg(Op1, Op1IsKill * RegState::Kill)
421 .addImm(Imm));
422 } else {
423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
424 .addReg(Op0, Op0IsKill * RegState::Kill)
425 .addReg(Op1, Op1IsKill * RegState::Kill)
426 .addImm(Imm));
427 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
428 TII.get(TargetOpcode::COPY), ResultReg)
429 .addReg(II.ImplicitDefs[0]));
430 }
431 return ResultReg;
432 }
433
FastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)434 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
435 const TargetRegisterClass *RC,
436 uint64_t Imm) {
437 unsigned ResultReg = createResultReg(RC);
438 const MCInstrDesc &II = TII.get(MachineInstOpcode);
439
440 if (II.getNumDefs() >= 1) {
441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
442 .addImm(Imm));
443 } else {
444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
445 .addImm(Imm));
446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
447 TII.get(TargetOpcode::COPY), ResultReg)
448 .addReg(II.ImplicitDefs[0]));
449 }
450 return ResultReg;
451 }
452
FastEmitInst_ii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm1,uint64_t Imm2)453 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
454 const TargetRegisterClass *RC,
455 uint64_t Imm1, uint64_t Imm2) {
456 unsigned ResultReg = createResultReg(RC);
457 const MCInstrDesc &II = TII.get(MachineInstOpcode);
458
459 if (II.getNumDefs() >= 1) {
460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
461 .addImm(Imm1).addImm(Imm2));
462 } else {
463 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
464 .addImm(Imm1).addImm(Imm2));
465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
466 TII.get(TargetOpcode::COPY),
467 ResultReg)
468 .addReg(II.ImplicitDefs[0]));
469 }
470 return ResultReg;
471 }
472
FastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,bool Op0IsKill,uint32_t Idx)473 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
474 unsigned Op0, bool Op0IsKill,
475 uint32_t Idx) {
476 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
477 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
478 "Cannot yet extract from physregs");
479
480 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
481 DL, TII.get(TargetOpcode::COPY), ResultReg)
482 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
483 return ResultReg;
484 }
485
486 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
487 // checks from the various callers.
ARMMoveToFPReg(EVT VT,unsigned SrcReg)488 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
489 if (VT == MVT::f64) return 0;
490
491 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
492 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
493 TII.get(ARM::VMOVSR), MoveReg)
494 .addReg(SrcReg));
495 return MoveReg;
496 }
497
ARMMoveToIntReg(EVT VT,unsigned SrcReg)498 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
499 if (VT == MVT::i64) return 0;
500
501 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
502 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
503 TII.get(ARM::VMOVRS), MoveReg)
504 .addReg(SrcReg));
505 return MoveReg;
506 }
507
508 // For double width floating point we need to materialize two constants
509 // (the high and the low) into integer registers then use a move to get
510 // the combined constant into an FP reg.
ARMMaterializeFP(const ConstantFP * CFP,EVT VT)511 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
512 const APFloat Val = CFP->getValueAPF();
513 bool is64bit = VT == MVT::f64;
514
515 // This checks to see if we can use VFP3 instructions to materialize
516 // a constant, otherwise we have to go through the constant pool.
517 if (TLI.isFPImmLegal(Val, VT)) {
518 int Imm;
519 unsigned Opc;
520 if (is64bit) {
521 Imm = ARM_AM::getFP64Imm(Val);
522 Opc = ARM::FCONSTD;
523 } else {
524 Imm = ARM_AM::getFP32Imm(Val);
525 Opc = ARM::FCONSTS;
526 }
527 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
528 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
529 DestReg)
530 .addImm(Imm));
531 return DestReg;
532 }
533
534 // Require VFP2 for loading fp constants.
535 if (!Subtarget->hasVFP2()) return false;
536
537 // MachineConstantPool wants an explicit alignment.
538 unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
539 if (Align == 0) {
540 // TODO: Figure out if this is correct.
541 Align = TD.getTypeAllocSize(CFP->getType());
542 }
543 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
544 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
545 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
546
547 // The extra reg is for addrmode5.
548 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
549 DestReg)
550 .addConstantPoolIndex(Idx)
551 .addReg(0));
552 return DestReg;
553 }
554
ARMMaterializeInt(const Constant * C,EVT VT)555 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
556
557 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
558 return false;
559
560 // If we can do this in a single instruction without a constant pool entry
561 // do so now.
562 const ConstantInt *CI = cast<ConstantInt>(C);
563 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
564 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
565 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
566 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
567 TII.get(Opc), ImmReg)
568 .addImm(CI->getZExtValue()));
569 return ImmReg;
570 }
571
572 // Use MVN to emit negative constants.
573 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
574 unsigned Imm = (unsigned)~(CI->getSExtValue());
575 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
576 (ARM_AM::getSOImmVal(Imm) != -1);
577 if (UseImm) {
578 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
579 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
580 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
581 TII.get(Opc), ImmReg)
582 .addImm(Imm));
583 return ImmReg;
584 }
585 }
586
587 // Load from constant pool. For now 32-bit only.
588 if (VT != MVT::i32)
589 return false;
590
591 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
592
593 // MachineConstantPool wants an explicit alignment.
594 unsigned Align = TD.getPrefTypeAlignment(C->getType());
595 if (Align == 0) {
596 // TODO: Figure out if this is correct.
597 Align = TD.getTypeAllocSize(C->getType());
598 }
599 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
600
601 if (isThumb2)
602 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
603 TII.get(ARM::t2LDRpci), DestReg)
604 .addConstantPoolIndex(Idx));
605 else
606 // The extra immediate is for addrmode2.
607 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
608 TII.get(ARM::LDRcp), DestReg)
609 .addConstantPoolIndex(Idx)
610 .addImm(0));
611
612 return DestReg;
613 }
614
ARMMaterializeGV(const GlobalValue * GV,EVT VT)615 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
616 // For now 32-bit only.
617 if (VT != MVT::i32) return 0;
618
619 Reloc::Model RelocM = TM.getRelocationModel();
620 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
621 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
622
623 // Use movw+movt when possible, it avoids constant pool entries.
624 // Darwin targets don't support movt with Reloc::Static, see
625 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support
626 // static movt relocations.
627 if (Subtarget->useMovt() &&
628 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) {
629 unsigned Opc;
630 switch (RelocM) {
631 case Reloc::PIC_:
632 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
633 break;
634 case Reloc::DynamicNoPIC:
635 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn;
636 break;
637 default:
638 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
639 break;
640 }
641 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
642 DestReg).addGlobalAddress(GV));
643 } else {
644 // MachineConstantPool wants an explicit alignment.
645 unsigned Align = TD.getPrefTypeAlignment(GV->getType());
646 if (Align == 0) {
647 // TODO: Figure out if this is correct.
648 Align = TD.getTypeAllocSize(GV->getType());
649 }
650
651 // Grab index.
652 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
653 (Subtarget->isThumb() ? 4 : 8);
654 unsigned Id = AFI->createPICLabelUId();
655 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
656 ARMCP::CPValue,
657 PCAdj);
658 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
659
660 // Load value.
661 MachineInstrBuilder MIB;
662 if (isThumb2) {
663 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
664 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
665 .addConstantPoolIndex(Idx);
666 if (RelocM == Reloc::PIC_)
667 MIB.addImm(Id);
668 AddOptionalDefs(MIB);
669 } else {
670 // The extra immediate is for addrmode2.
671 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
672 DestReg)
673 .addConstantPoolIndex(Idx)
674 .addImm(0);
675 AddOptionalDefs(MIB);
676
677 if (RelocM == Reloc::PIC_) {
678 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
679 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
680
681 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
682 DL, TII.get(Opc), NewDestReg)
683 .addReg(DestReg)
684 .addImm(Id);
685 AddOptionalDefs(MIB);
686 return NewDestReg;
687 }
688 }
689 }
690
691 if (IsIndirect) {
692 MachineInstrBuilder MIB;
693 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
694 if (isThumb2)
695 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
696 TII.get(ARM::t2LDRi12), NewDestReg)
697 .addReg(DestReg)
698 .addImm(0);
699 else
700 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
701 NewDestReg)
702 .addReg(DestReg)
703 .addImm(0);
704 DestReg = NewDestReg;
705 AddOptionalDefs(MIB);
706 }
707
708 return DestReg;
709 }
710
TargetMaterializeConstant(const Constant * C)711 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
712 EVT VT = TLI.getValueType(C->getType(), true);
713
714 // Only handle simple types.
715 if (!VT.isSimple()) return 0;
716
717 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
718 return ARMMaterializeFP(CFP, VT);
719 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
720 return ARMMaterializeGV(GV, VT);
721 else if (isa<ConstantInt>(C))
722 return ARMMaterializeInt(C, VT);
723
724 return 0;
725 }
726
727 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
728
TargetMaterializeAlloca(const AllocaInst * AI)729 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
730 // Don't handle dynamic allocas.
731 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
732
733 MVT VT;
734 if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
735
736 DenseMap<const AllocaInst*, int>::iterator SI =
737 FuncInfo.StaticAllocaMap.find(AI);
738
739 // This will get lowered later into the correct offsets and registers
740 // via rewriteXFrameIndex.
741 if (SI != FuncInfo.StaticAllocaMap.end()) {
742 const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
743 unsigned ResultReg = createResultReg(RC);
744 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
745 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
746 TII.get(Opc), ResultReg)
747 .addFrameIndex(SI->second)
748 .addImm(0));
749 return ResultReg;
750 }
751
752 return 0;
753 }
754
isTypeLegal(Type * Ty,MVT & VT)755 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
756 EVT evt = TLI.getValueType(Ty, true);
757
758 // Only handle simple types.
759 if (evt == MVT::Other || !evt.isSimple()) return false;
760 VT = evt.getSimpleVT();
761
762 // Handle all legal types, i.e. a register that will directly hold this
763 // value.
764 return TLI.isTypeLegal(VT);
765 }
766
isLoadTypeLegal(Type * Ty,MVT & VT)767 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
768 if (isTypeLegal(Ty, VT)) return true;
769
770 // If this is a type than can be sign or zero-extended to a basic operation
771 // go ahead and accept it now.
772 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
773 return true;
774
775 return false;
776 }
777
778 // Computes the address to get to an object.
ARMComputeAddress(const Value * Obj,Address & Addr)779 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
780 // Some boilerplate from the X86 FastISel.
781 const User *U = NULL;
782 unsigned Opcode = Instruction::UserOp1;
783 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
784 // Don't walk into other basic blocks unless the object is an alloca from
785 // another block, otherwise it may not have a virtual register assigned.
786 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
787 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
788 Opcode = I->getOpcode();
789 U = I;
790 }
791 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
792 Opcode = C->getOpcode();
793 U = C;
794 }
795
796 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
797 if (Ty->getAddressSpace() > 255)
798 // Fast instruction selection doesn't support the special
799 // address spaces.
800 return false;
801
802 switch (Opcode) {
803 default:
804 break;
805 case Instruction::BitCast: {
806 // Look through bitcasts.
807 return ARMComputeAddress(U->getOperand(0), Addr);
808 }
809 case Instruction::IntToPtr: {
810 // Look past no-op inttoptrs.
811 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
812 return ARMComputeAddress(U->getOperand(0), Addr);
813 break;
814 }
815 case Instruction::PtrToInt: {
816 // Look past no-op ptrtoints.
817 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
818 return ARMComputeAddress(U->getOperand(0), Addr);
819 break;
820 }
821 case Instruction::GetElementPtr: {
822 Address SavedAddr = Addr;
823 int TmpOffset = Addr.Offset;
824
825 // Iterate through the GEP folding the constants into offsets where
826 // we can.
827 gep_type_iterator GTI = gep_type_begin(U);
828 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
829 i != e; ++i, ++GTI) {
830 const Value *Op = *i;
831 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
832 const StructLayout *SL = TD.getStructLayout(STy);
833 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
834 TmpOffset += SL->getElementOffset(Idx);
835 } else {
836 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
837 for (;;) {
838 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
839 // Constant-offset addressing.
840 TmpOffset += CI->getSExtValue() * S;
841 break;
842 }
843 if (isa<AddOperator>(Op) &&
844 (!isa<Instruction>(Op) ||
845 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
846 == FuncInfo.MBB) &&
847 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
848 // An add (in the same block) with a constant operand. Fold the
849 // constant.
850 ConstantInt *CI =
851 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
852 TmpOffset += CI->getSExtValue() * S;
853 // Iterate on the other operand.
854 Op = cast<AddOperator>(Op)->getOperand(0);
855 continue;
856 }
857 // Unsupported
858 goto unsupported_gep;
859 }
860 }
861 }
862
863 // Try to grab the base operand now.
864 Addr.Offset = TmpOffset;
865 if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
866
867 // We failed, restore everything and try the other options.
868 Addr = SavedAddr;
869
870 unsupported_gep:
871 break;
872 }
873 case Instruction::Alloca: {
874 const AllocaInst *AI = cast<AllocaInst>(Obj);
875 DenseMap<const AllocaInst*, int>::iterator SI =
876 FuncInfo.StaticAllocaMap.find(AI);
877 if (SI != FuncInfo.StaticAllocaMap.end()) {
878 Addr.BaseType = Address::FrameIndexBase;
879 Addr.Base.FI = SI->second;
880 return true;
881 }
882 break;
883 }
884 }
885
886 // Try to get this in a register if nothing else has worked.
887 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
888 return Addr.Base.Reg != 0;
889 }
890
ARMSimplifyAddress(Address & Addr,EVT VT,bool useAM3)891 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
892
893 assert(VT.isSimple() && "Non-simple types are invalid here!");
894
895 bool needsLowering = false;
896 switch (VT.getSimpleVT().SimpleTy) {
897 default: llvm_unreachable("Unhandled load/store type!");
898 case MVT::i1:
899 case MVT::i8:
900 case MVT::i16:
901 case MVT::i32:
902 if (!useAM3) {
903 // Integer loads/stores handle 12-bit offsets.
904 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
905 // Handle negative offsets.
906 if (needsLowering && isThumb2)
907 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
908 Addr.Offset > -256);
909 } else {
910 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
911 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
912 }
913 break;
914 case MVT::f32:
915 case MVT::f64:
916 // Floating point operands handle 8-bit offsets.
917 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
918 break;
919 }
920
921 // If this is a stack pointer and the offset needs to be simplified then
922 // put the alloca address into a register, set the base type back to
923 // register and continue. This should almost never happen.
924 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
925 const TargetRegisterClass *RC = isThumb2 ?
926 (const TargetRegisterClass*)&ARM::tGPRRegClass :
927 (const TargetRegisterClass*)&ARM::GPRRegClass;
928 unsigned ResultReg = createResultReg(RC);
929 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
930 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
931 TII.get(Opc), ResultReg)
932 .addFrameIndex(Addr.Base.FI)
933 .addImm(0));
934 Addr.Base.Reg = ResultReg;
935 Addr.BaseType = Address::RegBase;
936 }
937
938 // Since the offset is too large for the load/store instruction
939 // get the reg+offset into a register.
940 if (needsLowering) {
941 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
942 /*Op0IsKill*/false, Addr.Offset, MVT::i32);
943 Addr.Offset = 0;
944 }
945 }
946
AddLoadStoreOperands(EVT VT,Address & Addr,const MachineInstrBuilder & MIB,unsigned Flags,bool useAM3)947 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
948 const MachineInstrBuilder &MIB,
949 unsigned Flags, bool useAM3) {
950 // addrmode5 output depends on the selection dag addressing dividing the
951 // offset by 4 that it then later multiplies. Do this here as well.
952 if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
953 VT.getSimpleVT().SimpleTy == MVT::f64)
954 Addr.Offset /= 4;
955
956 // Frame base works a bit differently. Handle it separately.
957 if (Addr.BaseType == Address::FrameIndexBase) {
958 int FI = Addr.Base.FI;
959 int Offset = Addr.Offset;
960 MachineMemOperand *MMO =
961 FuncInfo.MF->getMachineMemOperand(
962 MachinePointerInfo::getFixedStack(FI, Offset),
963 Flags,
964 MFI.getObjectSize(FI),
965 MFI.getObjectAlignment(FI));
966 // Now add the rest of the operands.
967 MIB.addFrameIndex(FI);
968
969 // ARM halfword load/stores and signed byte loads need an additional
970 // operand.
971 if (useAM3) {
972 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
973 MIB.addReg(0);
974 MIB.addImm(Imm);
975 } else {
976 MIB.addImm(Addr.Offset);
977 }
978 MIB.addMemOperand(MMO);
979 } else {
980 // Now add the rest of the operands.
981 MIB.addReg(Addr.Base.Reg);
982
983 // ARM halfword load/stores and signed byte loads need an additional
984 // operand.
985 if (useAM3) {
986 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
987 MIB.addReg(0);
988 MIB.addImm(Imm);
989 } else {
990 MIB.addImm(Addr.Offset);
991 }
992 }
993 AddOptionalDefs(MIB);
994 }
995
ARMEmitLoad(EVT VT,unsigned & ResultReg,Address & Addr,unsigned Alignment,bool isZExt,bool allocReg)996 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
997 unsigned Alignment, bool isZExt, bool allocReg) {
998 assert(VT.isSimple() && "Non-simple types are invalid here!");
999 unsigned Opc;
1000 bool useAM3 = false;
1001 bool needVMOV = false;
1002 const TargetRegisterClass *RC;
1003 switch (VT.getSimpleVT().SimpleTy) {
1004 // This is mostly going to be Neon/vector support.
1005 default: return false;
1006 case MVT::i1:
1007 case MVT::i8:
1008 if (isThumb2) {
1009 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1010 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
1011 else
1012 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
1013 } else {
1014 if (isZExt) {
1015 Opc = ARM::LDRBi12;
1016 } else {
1017 Opc = ARM::LDRSB;
1018 useAM3 = true;
1019 }
1020 }
1021 RC = &ARM::GPRRegClass;
1022 break;
1023 case MVT::i16:
1024 if (isThumb2) {
1025 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1026 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
1027 else
1028 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
1029 } else {
1030 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
1031 useAM3 = true;
1032 }
1033 RC = &ARM::GPRRegClass;
1034 break;
1035 case MVT::i32:
1036 if (isThumb2) {
1037 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1038 Opc = ARM::t2LDRi8;
1039 else
1040 Opc = ARM::t2LDRi12;
1041 } else {
1042 Opc = ARM::LDRi12;
1043 }
1044 RC = &ARM::GPRRegClass;
1045 break;
1046 case MVT::f32:
1047 if (!Subtarget->hasVFP2()) return false;
1048 // Unaligned loads need special handling. Floats require word-alignment.
1049 if (Alignment && Alignment < 4) {
1050 needVMOV = true;
1051 VT = MVT::i32;
1052 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1053 RC = &ARM::GPRRegClass;
1054 } else {
1055 Opc = ARM::VLDRS;
1056 RC = TLI.getRegClassFor(VT);
1057 }
1058 break;
1059 case MVT::f64:
1060 if (!Subtarget->hasVFP2()) return false;
1061 // FIXME: Unaligned loads need special handling. Doublewords require
1062 // word-alignment.
1063 if (Alignment && Alignment < 4)
1064 return false;
1065
1066 Opc = ARM::VLDRD;
1067 RC = TLI.getRegClassFor(VT);
1068 break;
1069 }
1070 // Simplify this down to something we can handle.
1071 ARMSimplifyAddress(Addr, VT, useAM3);
1072
1073 // Create the base instruction, then add the operands.
1074 if (allocReg)
1075 ResultReg = createResultReg(RC);
1076 assert (ResultReg > 255 && "Expected an allocated virtual register.");
1077 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1078 TII.get(Opc), ResultReg);
1079 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
1080
1081 // If we had an unaligned load of a float we've converted it to an regular
1082 // load. Now we must move from the GRP to the FP register.
1083 if (needVMOV) {
1084 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1085 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1086 TII.get(ARM::VMOVSR), MoveReg)
1087 .addReg(ResultReg));
1088 ResultReg = MoveReg;
1089 }
1090 return true;
1091 }
1092
SelectLoad(const Instruction * I)1093 bool ARMFastISel::SelectLoad(const Instruction *I) {
1094 // Atomic loads need special handling.
1095 if (cast<LoadInst>(I)->isAtomic())
1096 return false;
1097
1098 // Verify we have a legal type before going any further.
1099 MVT VT;
1100 if (!isLoadTypeLegal(I->getType(), VT))
1101 return false;
1102
1103 // See if we can handle this address.
1104 Address Addr;
1105 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1106
1107 unsigned ResultReg;
1108 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1109 return false;
1110 UpdateValueMap(I, ResultReg);
1111 return true;
1112 }
1113
ARMEmitStore(EVT VT,unsigned SrcReg,Address & Addr,unsigned Alignment)1114 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
1115 unsigned Alignment) {
1116 unsigned StrOpc;
1117 bool useAM3 = false;
1118 switch (VT.getSimpleVT().SimpleTy) {
1119 // This is mostly going to be Neon/vector support.
1120 default: return false;
1121 case MVT::i1: {
1122 unsigned Res = createResultReg(isThumb2 ?
1123 (const TargetRegisterClass*)&ARM::tGPRRegClass :
1124 (const TargetRegisterClass*)&ARM::GPRRegClass);
1125 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1126 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1127 TII.get(Opc), Res)
1128 .addReg(SrcReg).addImm(1));
1129 SrcReg = Res;
1130 } // Fallthrough here.
1131 case MVT::i8:
1132 if (isThumb2) {
1133 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1134 StrOpc = ARM::t2STRBi8;
1135 else
1136 StrOpc = ARM::t2STRBi12;
1137 } else {
1138 StrOpc = ARM::STRBi12;
1139 }
1140 break;
1141 case MVT::i16:
1142 if (isThumb2) {
1143 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1144 StrOpc = ARM::t2STRHi8;
1145 else
1146 StrOpc = ARM::t2STRHi12;
1147 } else {
1148 StrOpc = ARM::STRH;
1149 useAM3 = true;
1150 }
1151 break;
1152 case MVT::i32:
1153 if (isThumb2) {
1154 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1155 StrOpc = ARM::t2STRi8;
1156 else
1157 StrOpc = ARM::t2STRi12;
1158 } else {
1159 StrOpc = ARM::STRi12;
1160 }
1161 break;
1162 case MVT::f32:
1163 if (!Subtarget->hasVFP2()) return false;
1164 // Unaligned stores need special handling. Floats require word-alignment.
1165 if (Alignment && Alignment < 4) {
1166 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1167 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1168 TII.get(ARM::VMOVRS), MoveReg)
1169 .addReg(SrcReg));
1170 SrcReg = MoveReg;
1171 VT = MVT::i32;
1172 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1173 } else {
1174 StrOpc = ARM::VSTRS;
1175 }
1176 break;
1177 case MVT::f64:
1178 if (!Subtarget->hasVFP2()) return false;
1179 // FIXME: Unaligned stores need special handling. Doublewords require
1180 // word-alignment.
1181 if (Alignment && Alignment < 4)
1182 return false;
1183
1184 StrOpc = ARM::VSTRD;
1185 break;
1186 }
1187 // Simplify this down to something we can handle.
1188 ARMSimplifyAddress(Addr, VT, useAM3);
1189
1190 // Create the base instruction, then add the operands.
1191 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1192 TII.get(StrOpc))
1193 .addReg(SrcReg);
1194 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1195 return true;
1196 }
1197
SelectStore(const Instruction * I)1198 bool ARMFastISel::SelectStore(const Instruction *I) {
1199 Value *Op0 = I->getOperand(0);
1200 unsigned SrcReg = 0;
1201
1202 // Atomic stores need special handling.
1203 if (cast<StoreInst>(I)->isAtomic())
1204 return false;
1205
1206 // Verify we have a legal type before going any further.
1207 MVT VT;
1208 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1209 return false;
1210
1211 // Get the value to be stored into a register.
1212 SrcReg = getRegForValue(Op0);
1213 if (SrcReg == 0) return false;
1214
1215 // See if we can handle this address.
1216 Address Addr;
1217 if (!ARMComputeAddress(I->getOperand(1), Addr))
1218 return false;
1219
1220 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1221 return false;
1222 return true;
1223 }
1224
getComparePred(CmpInst::Predicate Pred)1225 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1226 switch (Pred) {
1227 // Needs two compares...
1228 case CmpInst::FCMP_ONE:
1229 case CmpInst::FCMP_UEQ:
1230 default:
1231 // AL is our "false" for now. The other two need more compares.
1232 return ARMCC::AL;
1233 case CmpInst::ICMP_EQ:
1234 case CmpInst::FCMP_OEQ:
1235 return ARMCC::EQ;
1236 case CmpInst::ICMP_SGT:
1237 case CmpInst::FCMP_OGT:
1238 return ARMCC::GT;
1239 case CmpInst::ICMP_SGE:
1240 case CmpInst::FCMP_OGE:
1241 return ARMCC::GE;
1242 case CmpInst::ICMP_UGT:
1243 case CmpInst::FCMP_UGT:
1244 return ARMCC::HI;
1245 case CmpInst::FCMP_OLT:
1246 return ARMCC::MI;
1247 case CmpInst::ICMP_ULE:
1248 case CmpInst::FCMP_OLE:
1249 return ARMCC::LS;
1250 case CmpInst::FCMP_ORD:
1251 return ARMCC::VC;
1252 case CmpInst::FCMP_UNO:
1253 return ARMCC::VS;
1254 case CmpInst::FCMP_UGE:
1255 return ARMCC::PL;
1256 case CmpInst::ICMP_SLT:
1257 case CmpInst::FCMP_ULT:
1258 return ARMCC::LT;
1259 case CmpInst::ICMP_SLE:
1260 case CmpInst::FCMP_ULE:
1261 return ARMCC::LE;
1262 case CmpInst::FCMP_UNE:
1263 case CmpInst::ICMP_NE:
1264 return ARMCC::NE;
1265 case CmpInst::ICMP_UGE:
1266 return ARMCC::HS;
1267 case CmpInst::ICMP_ULT:
1268 return ARMCC::LO;
1269 }
1270 }
1271
SelectBranch(const Instruction * I)1272 bool ARMFastISel::SelectBranch(const Instruction *I) {
1273 const BranchInst *BI = cast<BranchInst>(I);
1274 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1275 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1276
1277 // Simple branch support.
1278
1279 // If we can, avoid recomputing the compare - redoing it could lead to wonky
1280 // behavior.
1281 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1282 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1283
1284 // Get the compare predicate.
1285 // Try to take advantage of fallthrough opportunities.
1286 CmpInst::Predicate Predicate = CI->getPredicate();
1287 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1288 std::swap(TBB, FBB);
1289 Predicate = CmpInst::getInversePredicate(Predicate);
1290 }
1291
1292 ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1293
1294 // We may not handle every CC for now.
1295 if (ARMPred == ARMCC::AL) return false;
1296
1297 // Emit the compare.
1298 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1299 return false;
1300
1301 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1303 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1304 FastEmitBranch(FBB, DL);
1305 FuncInfo.MBB->addSuccessor(TBB);
1306 return true;
1307 }
1308 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1309 MVT SourceVT;
1310 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1311 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1312 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1313 unsigned OpReg = getRegForValue(TI->getOperand(0));
1314 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1315 TII.get(TstOpc))
1316 .addReg(OpReg).addImm(1));
1317
1318 unsigned CCMode = ARMCC::NE;
1319 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1320 std::swap(TBB, FBB);
1321 CCMode = ARMCC::EQ;
1322 }
1323
1324 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1326 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1327
1328 FastEmitBranch(FBB, DL);
1329 FuncInfo.MBB->addSuccessor(TBB);
1330 return true;
1331 }
1332 } else if (const ConstantInt *CI =
1333 dyn_cast<ConstantInt>(BI->getCondition())) {
1334 uint64_t Imm = CI->getZExtValue();
1335 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1336 FastEmitBranch(Target, DL);
1337 return true;
1338 }
1339
1340 unsigned CmpReg = getRegForValue(BI->getCondition());
1341 if (CmpReg == 0) return false;
1342
1343 // We've been divorced from our compare! Our block was split, and
1344 // now our compare lives in a predecessor block. We musn't
1345 // re-compare here, as the children of the compare aren't guaranteed
1346 // live across the block boundary (we *could* check for this).
1347 // Regardless, the compare has been done in the predecessor block,
1348 // and it left a value for us in a virtual register. Ergo, we test
1349 // the one-bit value left in the virtual register.
1350 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
1352 .addReg(CmpReg).addImm(1));
1353
1354 unsigned CCMode = ARMCC::NE;
1355 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1356 std::swap(TBB, FBB);
1357 CCMode = ARMCC::EQ;
1358 }
1359
1360 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1361 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1362 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1363 FastEmitBranch(FBB, DL);
1364 FuncInfo.MBB->addSuccessor(TBB);
1365 return true;
1366 }
1367
SelectIndirectBr(const Instruction * I)1368 bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1369 unsigned AddrReg = getRegForValue(I->getOperand(0));
1370 if (AddrReg == 0) return false;
1371
1372 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
1374 .addReg(AddrReg));
1375 return true;
1376 }
1377
ARMEmitCmp(const Value * Src1Value,const Value * Src2Value,bool isZExt)1378 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1379 bool isZExt) {
1380 Type *Ty = Src1Value->getType();
1381 EVT SrcVT = TLI.getValueType(Ty, true);
1382 if (!SrcVT.isSimple()) return false;
1383
1384 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1385 if (isFloat && !Subtarget->hasVFP2())
1386 return false;
1387
1388 // Check to see if the 2nd operand is a constant that we can encode directly
1389 // in the compare.
1390 int Imm = 0;
1391 bool UseImm = false;
1392 bool isNegativeImm = false;
1393 // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1394 // Thus, Src1Value may be a ConstantInt, but we're missing it.
1395 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1396 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1397 SrcVT == MVT::i1) {
1398 const APInt &CIVal = ConstInt->getValue();
1399 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1400 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
1401 // then a cmn, because there is no way to represent 2147483648 as a
1402 // signed 32-bit int.
1403 if (Imm < 0 && Imm != (int)0x80000000) {
1404 isNegativeImm = true;
1405 Imm = -Imm;
1406 }
1407 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1408 (ARM_AM::getSOImmVal(Imm) != -1);
1409 }
1410 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1411 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1412 if (ConstFP->isZero() && !ConstFP->isNegative())
1413 UseImm = true;
1414 }
1415
1416 unsigned CmpOpc;
1417 bool isICmp = true;
1418 bool needsExt = false;
1419 switch (SrcVT.getSimpleVT().SimpleTy) {
1420 default: return false;
1421 // TODO: Verify compares.
1422 case MVT::f32:
1423 isICmp = false;
1424 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1425 break;
1426 case MVT::f64:
1427 isICmp = false;
1428 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1429 break;
1430 case MVT::i1:
1431 case MVT::i8:
1432 case MVT::i16:
1433 needsExt = true;
1434 // Intentional fall-through.
1435 case MVT::i32:
1436 if (isThumb2) {
1437 if (!UseImm)
1438 CmpOpc = ARM::t2CMPrr;
1439 else
1440 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1441 } else {
1442 if (!UseImm)
1443 CmpOpc = ARM::CMPrr;
1444 else
1445 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1446 }
1447 break;
1448 }
1449
1450 unsigned SrcReg1 = getRegForValue(Src1Value);
1451 if (SrcReg1 == 0) return false;
1452
1453 unsigned SrcReg2 = 0;
1454 if (!UseImm) {
1455 SrcReg2 = getRegForValue(Src2Value);
1456 if (SrcReg2 == 0) return false;
1457 }
1458
1459 // We have i1, i8, or i16, we need to either zero extend or sign extend.
1460 if (needsExt) {
1461 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1462 if (SrcReg1 == 0) return false;
1463 if (!UseImm) {
1464 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1465 if (SrcReg2 == 0) return false;
1466 }
1467 }
1468
1469 if (!UseImm) {
1470 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1471 TII.get(CmpOpc))
1472 .addReg(SrcReg1).addReg(SrcReg2));
1473 } else {
1474 MachineInstrBuilder MIB;
1475 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1476 .addReg(SrcReg1);
1477
1478 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1479 if (isICmp)
1480 MIB.addImm(Imm);
1481 AddOptionalDefs(MIB);
1482 }
1483
1484 // For floating point we need to move the result to a comparison register
1485 // that we can then use for branches.
1486 if (Ty->isFloatTy() || Ty->isDoubleTy())
1487 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1488 TII.get(ARM::FMSTAT)));
1489 return true;
1490 }
1491
SelectCmp(const Instruction * I)1492 bool ARMFastISel::SelectCmp(const Instruction *I) {
1493 const CmpInst *CI = cast<CmpInst>(I);
1494
1495 // Get the compare predicate.
1496 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
1497
1498 // We may not handle every CC for now.
1499 if (ARMPred == ARMCC::AL) return false;
1500
1501 // Emit the compare.
1502 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1503 return false;
1504
1505 // Now set a register based on the comparison. Explicitly set the predicates
1506 // here.
1507 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1508 const TargetRegisterClass *RC = isThumb2 ?
1509 (const TargetRegisterClass*)&ARM::rGPRRegClass :
1510 (const TargetRegisterClass*)&ARM::GPRRegClass;
1511 unsigned DestReg = createResultReg(RC);
1512 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1513 unsigned ZeroReg = TargetMaterializeConstant(Zero);
1514 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
1515 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
1516 .addReg(ZeroReg).addImm(1)
1517 .addImm(ARMPred).addReg(ARM::CPSR);
1518
1519 UpdateValueMap(I, DestReg);
1520 return true;
1521 }
1522
SelectFPExt(const Instruction * I)1523 bool ARMFastISel::SelectFPExt(const Instruction *I) {
1524 // Make sure we have VFP and that we're extending float to double.
1525 if (!Subtarget->hasVFP2()) return false;
1526
1527 Value *V = I->getOperand(0);
1528 if (!I->getType()->isDoubleTy() ||
1529 !V->getType()->isFloatTy()) return false;
1530
1531 unsigned Op = getRegForValue(V);
1532 if (Op == 0) return false;
1533
1534 unsigned Result = createResultReg(&ARM::DPRRegClass);
1535 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1536 TII.get(ARM::VCVTDS), Result)
1537 .addReg(Op));
1538 UpdateValueMap(I, Result);
1539 return true;
1540 }
1541
SelectFPTrunc(const Instruction * I)1542 bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1543 // Make sure we have VFP and that we're truncating double to float.
1544 if (!Subtarget->hasVFP2()) return false;
1545
1546 Value *V = I->getOperand(0);
1547 if (!(I->getType()->isFloatTy() &&
1548 V->getType()->isDoubleTy())) return false;
1549
1550 unsigned Op = getRegForValue(V);
1551 if (Op == 0) return false;
1552
1553 unsigned Result = createResultReg(&ARM::SPRRegClass);
1554 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1555 TII.get(ARM::VCVTSD), Result)
1556 .addReg(Op));
1557 UpdateValueMap(I, Result);
1558 return true;
1559 }
1560
SelectIToFP(const Instruction * I,bool isSigned)1561 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
1562 // Make sure we have VFP.
1563 if (!Subtarget->hasVFP2()) return false;
1564
1565 MVT DstVT;
1566 Type *Ty = I->getType();
1567 if (!isTypeLegal(Ty, DstVT))
1568 return false;
1569
1570 Value *Src = I->getOperand(0);
1571 EVT SrcVT = TLI.getValueType(Src->getType(), true);
1572 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1573 return false;
1574
1575 unsigned SrcReg = getRegForValue(Src);
1576 if (SrcReg == 0) return false;
1577
1578 // Handle sign-extension.
1579 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1580 EVT DestVT = MVT::i32;
1581 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
1582 /*isZExt*/!isSigned);
1583 if (SrcReg == 0) return false;
1584 }
1585
1586 // The conversion routine works on fp-reg to fp-reg and the operand above
1587 // was an integer, move it to the fp registers if possible.
1588 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1589 if (FP == 0) return false;
1590
1591 unsigned Opc;
1592 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1593 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1594 else return false;
1595
1596 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1597 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1598 ResultReg)
1599 .addReg(FP));
1600 UpdateValueMap(I, ResultReg);
1601 return true;
1602 }
1603
SelectFPToI(const Instruction * I,bool isSigned)1604 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
1605 // Make sure we have VFP.
1606 if (!Subtarget->hasVFP2()) return false;
1607
1608 MVT DstVT;
1609 Type *RetTy = I->getType();
1610 if (!isTypeLegal(RetTy, DstVT))
1611 return false;
1612
1613 unsigned Op = getRegForValue(I->getOperand(0));
1614 if (Op == 0) return false;
1615
1616 unsigned Opc;
1617 Type *OpTy = I->getOperand(0)->getType();
1618 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1619 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1620 else return false;
1621
1622 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
1623 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1624 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1625 ResultReg)
1626 .addReg(Op));
1627
1628 // This result needs to be in an integer register, but the conversion only
1629 // takes place in fp-regs.
1630 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1631 if (IntReg == 0) return false;
1632
1633 UpdateValueMap(I, IntReg);
1634 return true;
1635 }
1636
SelectSelect(const Instruction * I)1637 bool ARMFastISel::SelectSelect(const Instruction *I) {
1638 MVT VT;
1639 if (!isTypeLegal(I->getType(), VT))
1640 return false;
1641
1642 // Things need to be register sized for register moves.
1643 if (VT != MVT::i32) return false;
1644 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
1645
1646 unsigned CondReg = getRegForValue(I->getOperand(0));
1647 if (CondReg == 0) return false;
1648 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1649 if (Op1Reg == 0) return false;
1650
1651 // Check to see if we can use an immediate in the conditional move.
1652 int Imm = 0;
1653 bool UseImm = false;
1654 bool isNegativeImm = false;
1655 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1656 assert (VT == MVT::i32 && "Expecting an i32.");
1657 Imm = (int)ConstInt->getValue().getZExtValue();
1658 if (Imm < 0) {
1659 isNegativeImm = true;
1660 Imm = ~Imm;
1661 }
1662 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1663 (ARM_AM::getSOImmVal(Imm) != -1);
1664 }
1665
1666 unsigned Op2Reg = 0;
1667 if (!UseImm) {
1668 Op2Reg = getRegForValue(I->getOperand(2));
1669 if (Op2Reg == 0) return false;
1670 }
1671
1672 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
1673 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1674 .addReg(CondReg).addImm(0));
1675
1676 unsigned MovCCOpc;
1677 if (!UseImm) {
1678 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1679 } else {
1680 if (!isNegativeImm) {
1681 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1682 } else {
1683 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1684 }
1685 }
1686 unsigned ResultReg = createResultReg(RC);
1687 if (!UseImm)
1688 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1689 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
1690 else
1691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1692 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
1693 UpdateValueMap(I, ResultReg);
1694 return true;
1695 }
1696
SelectDiv(const Instruction * I,bool isSigned)1697 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
1698 MVT VT;
1699 Type *Ty = I->getType();
1700 if (!isTypeLegal(Ty, VT))
1701 return false;
1702
1703 // If we have integer div support we should have selected this automagically.
1704 // In case we have a real miss go ahead and return false and we'll pick
1705 // it up later.
1706 if (Subtarget->hasDivide()) return false;
1707
1708 // Otherwise emit a libcall.
1709 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1710 if (VT == MVT::i8)
1711 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1712 else if (VT == MVT::i16)
1713 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1714 else if (VT == MVT::i32)
1715 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1716 else if (VT == MVT::i64)
1717 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1718 else if (VT == MVT::i128)
1719 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1720 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1721
1722 return ARMEmitLibcall(I, LC);
1723 }
1724
SelectRem(const Instruction * I,bool isSigned)1725 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
1726 MVT VT;
1727 Type *Ty = I->getType();
1728 if (!isTypeLegal(Ty, VT))
1729 return false;
1730
1731 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1732 if (VT == MVT::i8)
1733 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1734 else if (VT == MVT::i16)
1735 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1736 else if (VT == MVT::i32)
1737 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1738 else if (VT == MVT::i64)
1739 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1740 else if (VT == MVT::i128)
1741 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1742 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1743
1744 return ARMEmitLibcall(I, LC);
1745 }
1746
SelectBinaryIntOp(const Instruction * I,unsigned ISDOpcode)1747 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1748 EVT DestVT = TLI.getValueType(I->getType(), true);
1749
1750 // We can get here in the case when we have a binary operation on a non-legal
1751 // type and the target independent selector doesn't know how to handle it.
1752 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1753 return false;
1754
1755 unsigned Opc;
1756 switch (ISDOpcode) {
1757 default: return false;
1758 case ISD::ADD:
1759 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1760 break;
1761 case ISD::OR:
1762 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1763 break;
1764 case ISD::SUB:
1765 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1766 break;
1767 }
1768
1769 unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1770 if (SrcReg1 == 0) return false;
1771
1772 // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1773 // in the instruction, rather then materializing the value in a register.
1774 unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1775 if (SrcReg2 == 0) return false;
1776
1777 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1778 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1779 TII.get(Opc), ResultReg)
1780 .addReg(SrcReg1).addReg(SrcReg2));
1781 UpdateValueMap(I, ResultReg);
1782 return true;
1783 }
1784
SelectBinaryFPOp(const Instruction * I,unsigned ISDOpcode)1785 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
1786 EVT VT = TLI.getValueType(I->getType(), true);
1787
1788 // We can get here in the case when we want to use NEON for our fp
1789 // operations, but can't figure out how to. Just use the vfp instructions
1790 // if we have them.
1791 // FIXME: It'd be nice to use NEON instructions.
1792 Type *Ty = I->getType();
1793 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
1794 if (isFloat && !Subtarget->hasVFP2())
1795 return false;
1796
1797 unsigned Opc;
1798 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1799 switch (ISDOpcode) {
1800 default: return false;
1801 case ISD::FADD:
1802 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1803 break;
1804 case ISD::FSUB:
1805 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1806 break;
1807 case ISD::FMUL:
1808 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1809 break;
1810 }
1811 unsigned Op1 = getRegForValue(I->getOperand(0));
1812 if (Op1 == 0) return false;
1813
1814 unsigned Op2 = getRegForValue(I->getOperand(1));
1815 if (Op2 == 0) return false;
1816
1817 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
1818 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1819 TII.get(Opc), ResultReg)
1820 .addReg(Op1).addReg(Op2));
1821 UpdateValueMap(I, ResultReg);
1822 return true;
1823 }
1824
1825 // Call Handling Code
1826
1827 // This is largely taken directly from CCAssignFnForNode
1828 // TODO: We may not support all of this.
CCAssignFnForCall(CallingConv::ID CC,bool Return,bool isVarArg)1829 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1830 bool Return,
1831 bool isVarArg) {
1832 switch (CC) {
1833 default:
1834 llvm_unreachable("Unsupported calling convention");
1835 case CallingConv::Fast:
1836 if (Subtarget->hasVFP2() && !isVarArg) {
1837 if (!Subtarget->isAAPCS_ABI())
1838 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1839 // For AAPCS ABI targets, just use VFP variant of the calling convention.
1840 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1841 }
1842 // Fallthrough
1843 case CallingConv::C:
1844 // Use target triple & subtarget features to do actual dispatch.
1845 if (Subtarget->isAAPCS_ABI()) {
1846 if (Subtarget->hasVFP2() &&
1847 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
1848 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1849 else
1850 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1851 } else
1852 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1853 case CallingConv::ARM_AAPCS_VFP:
1854 if (!isVarArg)
1855 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1856 // Fall through to soft float variant, variadic functions don't
1857 // use hard floating point ABI.
1858 case CallingConv::ARM_AAPCS:
1859 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1860 case CallingConv::ARM_APCS:
1861 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1862 case CallingConv::GHC:
1863 if (Return)
1864 llvm_unreachable("Can't return in GHC call convention");
1865 else
1866 return CC_ARM_APCS_GHC;
1867 }
1868 }
1869
ProcessCallArgs(SmallVectorImpl<Value * > & Args,SmallVectorImpl<unsigned> & ArgRegs,SmallVectorImpl<MVT> & ArgVTs,SmallVectorImpl<ISD::ArgFlagsTy> & ArgFlags,SmallVectorImpl<unsigned> & RegArgs,CallingConv::ID CC,unsigned & NumBytes,bool isVarArg)1870 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1871 SmallVectorImpl<unsigned> &ArgRegs,
1872 SmallVectorImpl<MVT> &ArgVTs,
1873 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1874 SmallVectorImpl<unsigned> &RegArgs,
1875 CallingConv::ID CC,
1876 unsigned &NumBytes,
1877 bool isVarArg) {
1878 SmallVector<CCValAssign, 16> ArgLocs;
1879 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
1880 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1881 CCAssignFnForCall(CC, false, isVarArg));
1882
1883 // Check that we can handle all of the arguments. If we can't, then bail out
1884 // now before we add code to the MBB.
1885 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1886 CCValAssign &VA = ArgLocs[i];
1887 MVT ArgVT = ArgVTs[VA.getValNo()];
1888
1889 // We don't handle NEON/vector parameters yet.
1890 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1891 return false;
1892
1893 // Now copy/store arg to correct locations.
1894 if (VA.isRegLoc() && !VA.needsCustom()) {
1895 continue;
1896 } else if (VA.needsCustom()) {
1897 // TODO: We need custom lowering for vector (v2f64) args.
1898 if (VA.getLocVT() != MVT::f64 ||
1899 // TODO: Only handle register args for now.
1900 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
1901 return false;
1902 } else {
1903 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) {
1904 default:
1905 return false;
1906 case MVT::i1:
1907 case MVT::i8:
1908 case MVT::i16:
1909 case MVT::i32:
1910 break;
1911 case MVT::f32:
1912 if (!Subtarget->hasVFP2())
1913 return false;
1914 break;
1915 case MVT::f64:
1916 if (!Subtarget->hasVFP2())
1917 return false;
1918 break;
1919 }
1920 }
1921 }
1922
1923 // At the point, we are able to handle the call's arguments in fast isel.
1924
1925 // Get a count of how many bytes are to be pushed on the stack.
1926 NumBytes = CCInfo.getNextStackOffset();
1927
1928 // Issue CALLSEQ_START
1929 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1930 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1931 TII.get(AdjStackDown))
1932 .addImm(NumBytes));
1933
1934 // Process the args.
1935 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1936 CCValAssign &VA = ArgLocs[i];
1937 unsigned Arg = ArgRegs[VA.getValNo()];
1938 MVT ArgVT = ArgVTs[VA.getValNo()];
1939
1940 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
1941 "We don't handle NEON/vector parameters yet.");
1942
1943 // Handle arg promotion, etc.
1944 switch (VA.getLocInfo()) {
1945 case CCValAssign::Full: break;
1946 case CCValAssign::SExt: {
1947 MVT DestVT = VA.getLocVT();
1948 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
1949 assert (Arg != 0 && "Failed to emit a sext");
1950 ArgVT = DestVT;
1951 break;
1952 }
1953 case CCValAssign::AExt:
1954 // Intentional fall-through. Handle AExt and ZExt.
1955 case CCValAssign::ZExt: {
1956 MVT DestVT = VA.getLocVT();
1957 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
1958 assert (Arg != 0 && "Failed to emit a sext");
1959 ArgVT = DestVT;
1960 break;
1961 }
1962 case CCValAssign::BCvt: {
1963 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
1964 /*TODO: Kill=*/false);
1965 assert(BC != 0 && "Failed to emit a bitcast!");
1966 Arg = BC;
1967 ArgVT = VA.getLocVT();
1968 break;
1969 }
1970 default: llvm_unreachable("Unknown arg promotion!");
1971 }
1972
1973 // Now copy/store arg to correct locations.
1974 if (VA.isRegLoc() && !VA.needsCustom()) {
1975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1976 VA.getLocReg())
1977 .addReg(Arg);
1978 RegArgs.push_back(VA.getLocReg());
1979 } else if (VA.needsCustom()) {
1980 // TODO: We need custom lowering for vector (v2f64) args.
1981 assert(VA.getLocVT() == MVT::f64 &&
1982 "Custom lowering for v2f64 args not available");
1983
1984 CCValAssign &NextVA = ArgLocs[++i];
1985
1986 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
1987 "We only handle register args!");
1988
1989 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1990 TII.get(ARM::VMOVRRD), VA.getLocReg())
1991 .addReg(NextVA.getLocReg(), RegState::Define)
1992 .addReg(Arg));
1993 RegArgs.push_back(VA.getLocReg());
1994 RegArgs.push_back(NextVA.getLocReg());
1995 } else {
1996 assert(VA.isMemLoc());
1997 // Need to store on the stack.
1998 Address Addr;
1999 Addr.BaseType = Address::RegBase;
2000 Addr.Base.Reg = ARM::SP;
2001 Addr.Offset = VA.getLocMemOffset();
2002
2003 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2004 assert(EmitRet && "Could not emit a store for argument!");
2005 }
2006 }
2007
2008 return true;
2009 }
2010
FinishCall(MVT RetVT,SmallVectorImpl<unsigned> & UsedRegs,const Instruction * I,CallingConv::ID CC,unsigned & NumBytes,bool isVarArg)2011 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
2012 const Instruction *I, CallingConv::ID CC,
2013 unsigned &NumBytes, bool isVarArg) {
2014 // Issue CALLSEQ_END
2015 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2016 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2017 TII.get(AdjStackUp))
2018 .addImm(NumBytes).addImm(0));
2019
2020 // Now the return value.
2021 if (RetVT != MVT::isVoid) {
2022 SmallVector<CCValAssign, 16> RVLocs;
2023 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
2024 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2025
2026 // Copy all of the result registers out of their specified physreg.
2027 if (RVLocs.size() == 2 && RetVT == MVT::f64) {
2028 // For this move we copy into two registers and then move into the
2029 // double fp reg we want.
2030 EVT DestVT = RVLocs[0].getValVT();
2031 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
2032 unsigned ResultReg = createResultReg(DstRC);
2033 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2034 TII.get(ARM::VMOVDRR), ResultReg)
2035 .addReg(RVLocs[0].getLocReg())
2036 .addReg(RVLocs[1].getLocReg()));
2037
2038 UsedRegs.push_back(RVLocs[0].getLocReg());
2039 UsedRegs.push_back(RVLocs[1].getLocReg());
2040
2041 // Finally update the result.
2042 UpdateValueMap(I, ResultReg);
2043 } else {
2044 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
2045 EVT CopyVT = RVLocs[0].getValVT();
2046
2047 // Special handling for extended integers.
2048 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2049 CopyVT = MVT::i32;
2050
2051 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
2052
2053 unsigned ResultReg = createResultReg(DstRC);
2054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2055 ResultReg).addReg(RVLocs[0].getLocReg());
2056 UsedRegs.push_back(RVLocs[0].getLocReg());
2057
2058 // Finally update the result.
2059 UpdateValueMap(I, ResultReg);
2060 }
2061 }
2062
2063 return true;
2064 }
2065
SelectRet(const Instruction * I)2066 bool ARMFastISel::SelectRet(const Instruction *I) {
2067 const ReturnInst *Ret = cast<ReturnInst>(I);
2068 const Function &F = *I->getParent()->getParent();
2069
2070 if (!FuncInfo.CanLowerReturn)
2071 return false;
2072
2073 CallingConv::ID CC = F.getCallingConv();
2074 if (Ret->getNumOperands() > 0) {
2075 SmallVector<ISD::OutputArg, 4> Outs;
2076 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
2077 Outs, TLI);
2078
2079 // Analyze operands of the call, assigning locations to each operand.
2080 SmallVector<CCValAssign, 16> ValLocs;
2081 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext());
2082 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
2083 F.isVarArg()));
2084
2085 const Value *RV = Ret->getOperand(0);
2086 unsigned Reg = getRegForValue(RV);
2087 if (Reg == 0)
2088 return false;
2089
2090 // Only handle a single return value for now.
2091 if (ValLocs.size() != 1)
2092 return false;
2093
2094 CCValAssign &VA = ValLocs[0];
2095
2096 // Don't bother handling odd stuff for now.
2097 if (VA.getLocInfo() != CCValAssign::Full)
2098 return false;
2099 // Only handle register returns for now.
2100 if (!VA.isRegLoc())
2101 return false;
2102
2103 unsigned SrcReg = Reg + VA.getValNo();
2104 EVT RVVT = TLI.getValueType(RV->getType());
2105 EVT DestVT = VA.getValVT();
2106 // Special handling for extended integers.
2107 if (RVVT != DestVT) {
2108 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2109 return false;
2110
2111 assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2112
2113 // Perform extension if flagged as either zext or sext. Otherwise, do
2114 // nothing.
2115 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2116 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2117 if (SrcReg == 0) return false;
2118 }
2119 }
2120
2121 // Make the copy.
2122 unsigned DstReg = VA.getLocReg();
2123 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2124 // Avoid a cross-class copy. This is very unlikely.
2125 if (!SrcRC->contains(DstReg))
2126 return false;
2127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2128 DstReg).addReg(SrcReg);
2129
2130 // Mark the register as live out of the function.
2131 MRI.addLiveOut(VA.getLocReg());
2132 }
2133
2134 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
2135 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2136 TII.get(RetOpc)));
2137 return true;
2138 }
2139
ARMSelectCallOp(bool UseReg)2140 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
2141 if (UseReg)
2142 return isThumb2 ? ARM::tBLXr : ARM::BLX;
2143 else
2144 return isThumb2 ? ARM::tBL : ARM::BL;
2145 }
2146
getLibcallReg(const Twine & Name)2147 unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
2148 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
2149 GlobalValue::ExternalLinkage, 0, Name);
2150 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType()));
2151 }
2152
2153 // A quick function that will emit a call for a named libcall in F with the
2154 // vector of passed arguments for the Instruction in I. We can assume that we
2155 // can emit a call for any libcall we can produce. This is an abridged version
2156 // of the full call infrastructure since we won't need to worry about things
2157 // like computed function pointers or strange arguments at call sites.
2158 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
2159 // with X86.
ARMEmitLibcall(const Instruction * I,RTLIB::Libcall Call)2160 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2161 CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
2162
2163 // Handle *simple* calls for now.
2164 Type *RetTy = I->getType();
2165 MVT RetVT;
2166 if (RetTy->isVoidTy())
2167 RetVT = MVT::isVoid;
2168 else if (!isTypeLegal(RetTy, RetVT))
2169 return false;
2170
2171 // Can't handle non-double multi-reg retvals.
2172 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2173 SmallVector<CCValAssign, 16> RVLocs;
2174 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
2175 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
2176 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2177 return false;
2178 }
2179
2180 // Set up the argument vectors.
2181 SmallVector<Value*, 8> Args;
2182 SmallVector<unsigned, 8> ArgRegs;
2183 SmallVector<MVT, 8> ArgVTs;
2184 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2185 Args.reserve(I->getNumOperands());
2186 ArgRegs.reserve(I->getNumOperands());
2187 ArgVTs.reserve(I->getNumOperands());
2188 ArgFlags.reserve(I->getNumOperands());
2189 for (unsigned i = 0; i < I->getNumOperands(); ++i) {
2190 Value *Op = I->getOperand(i);
2191 unsigned Arg = getRegForValue(Op);
2192 if (Arg == 0) return false;
2193
2194 Type *ArgTy = Op->getType();
2195 MVT ArgVT;
2196 if (!isTypeLegal(ArgTy, ArgVT)) return false;
2197
2198 ISD::ArgFlagsTy Flags;
2199 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2200 Flags.setOrigAlign(OriginalAlignment);
2201
2202 Args.push_back(Op);
2203 ArgRegs.push_back(Arg);
2204 ArgVTs.push_back(ArgVT);
2205 ArgFlags.push_back(Flags);
2206 }
2207
2208 // Handle the arguments now that we've gotten them.
2209 SmallVector<unsigned, 4> RegArgs;
2210 unsigned NumBytes;
2211 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2212 RegArgs, CC, NumBytes, false))
2213 return false;
2214
2215 unsigned CalleeReg = 0;
2216 if (EnableARMLongCalls) {
2217 CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2218 if (CalleeReg == 0) return false;
2219 }
2220
2221 // Issue the call.
2222 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
2223 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2224 DL, TII.get(CallOpc));
2225 // BL / BLX don't take a predicate, but tBL / tBLX do.
2226 if (isThumb2)
2227 AddDefaultPred(MIB);
2228 if (EnableARMLongCalls)
2229 MIB.addReg(CalleeReg);
2230 else
2231 MIB.addExternalSymbol(TLI.getLibcallName(Call));
2232
2233 // Add implicit physical register uses to the call.
2234 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2235 MIB.addReg(RegArgs[i], RegState::Implicit);
2236
2237 // Add a register mask with the call-preserved registers.
2238 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2239 MIB.addRegMask(TRI.getCallPreservedMask(CC));
2240
2241 // Finish off the call including any return values.
2242 SmallVector<unsigned, 4> UsedRegs;
2243 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
2244
2245 // Set all unused physreg defs as dead.
2246 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2247
2248 return true;
2249 }
2250
SelectCall(const Instruction * I,const char * IntrMemName=0)2251 bool ARMFastISel::SelectCall(const Instruction *I,
2252 const char *IntrMemName = 0) {
2253 const CallInst *CI = cast<CallInst>(I);
2254 const Value *Callee = CI->getCalledValue();
2255
2256 // Can't handle inline asm.
2257 if (isa<InlineAsm>(Callee)) return false;
2258
2259 // Check the calling convention.
2260 ImmutableCallSite CS(CI);
2261 CallingConv::ID CC = CS.getCallingConv();
2262
2263 // TODO: Avoid some calling conventions?
2264
2265 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
2266 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
2267 bool isVarArg = FTy->isVarArg();
2268
2269 // Handle *simple* calls for now.
2270 Type *RetTy = I->getType();
2271 MVT RetVT;
2272 if (RetTy->isVoidTy())
2273 RetVT = MVT::isVoid;
2274 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2275 RetVT != MVT::i8 && RetVT != MVT::i1)
2276 return false;
2277
2278 // Can't handle non-double multi-reg retvals.
2279 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2280 RetVT != MVT::i16 && RetVT != MVT::i32) {
2281 SmallVector<CCValAssign, 16> RVLocs;
2282 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
2283 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2284 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2285 return false;
2286 }
2287
2288 // Set up the argument vectors.
2289 SmallVector<Value*, 8> Args;
2290 SmallVector<unsigned, 8> ArgRegs;
2291 SmallVector<MVT, 8> ArgVTs;
2292 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2293 unsigned arg_size = CS.arg_size();
2294 Args.reserve(arg_size);
2295 ArgRegs.reserve(arg_size);
2296 ArgVTs.reserve(arg_size);
2297 ArgFlags.reserve(arg_size);
2298 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
2299 i != e; ++i) {
2300 // If we're lowering a memory intrinsic instead of a regular call, skip the
2301 // last two arguments, which shouldn't be passed to the underlying function.
2302 if (IntrMemName && e-i <= 2)
2303 break;
2304
2305 ISD::ArgFlagsTy Flags;
2306 unsigned AttrInd = i - CS.arg_begin() + 1;
2307 if (CS.paramHasAttr(AttrInd, Attribute::SExt))
2308 Flags.setSExt();
2309 if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
2310 Flags.setZExt();
2311
2312 // FIXME: Only handle *easy* calls for now.
2313 if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
2314 CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
2315 CS.paramHasAttr(AttrInd, Attribute::Nest) ||
2316 CS.paramHasAttr(AttrInd, Attribute::ByVal))
2317 return false;
2318
2319 Type *ArgTy = (*i)->getType();
2320 MVT ArgVT;
2321 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2322 ArgVT != MVT::i1)
2323 return false;
2324
2325 unsigned Arg = getRegForValue(*i);
2326 if (Arg == 0)
2327 return false;
2328
2329 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2330 Flags.setOrigAlign(OriginalAlignment);
2331
2332 Args.push_back(*i);
2333 ArgRegs.push_back(Arg);
2334 ArgVTs.push_back(ArgVT);
2335 ArgFlags.push_back(Flags);
2336 }
2337
2338 // Handle the arguments now that we've gotten them.
2339 SmallVector<unsigned, 4> RegArgs;
2340 unsigned NumBytes;
2341 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2342 RegArgs, CC, NumBytes, isVarArg))
2343 return false;
2344
2345 bool UseReg = false;
2346 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2347 if (!GV || EnableARMLongCalls) UseReg = true;
2348
2349 unsigned CalleeReg = 0;
2350 if (UseReg) {
2351 if (IntrMemName)
2352 CalleeReg = getLibcallReg(IntrMemName);
2353 else
2354 CalleeReg = getRegForValue(Callee);
2355
2356 if (CalleeReg == 0) return false;
2357 }
2358
2359 // Issue the call.
2360 unsigned CallOpc = ARMSelectCallOp(UseReg);
2361 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2362 DL, TII.get(CallOpc));
2363
2364 // ARM calls don't take a predicate, but tBL / tBLX do.
2365 if(isThumb2)
2366 AddDefaultPred(MIB);
2367 if (UseReg)
2368 MIB.addReg(CalleeReg);
2369 else if (!IntrMemName)
2370 MIB.addGlobalAddress(GV, 0, 0);
2371 else
2372 MIB.addExternalSymbol(IntrMemName, 0);
2373
2374 // Add implicit physical register uses to the call.
2375 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2376 MIB.addReg(RegArgs[i], RegState::Implicit);
2377
2378 // Add a register mask with the call-preserved registers.
2379 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2380 MIB.addRegMask(TRI.getCallPreservedMask(CC));
2381
2382 // Finish off the call including any return values.
2383 SmallVector<unsigned, 4> UsedRegs;
2384 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2385 return false;
2386
2387 // Set all unused physreg defs as dead.
2388 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2389
2390 return true;
2391 }
2392
ARMIsMemCpySmall(uint64_t Len)2393 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2394 return Len <= 16;
2395 }
2396
ARMTryEmitSmallMemCpy(Address Dest,Address Src,uint64_t Len)2397 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
2398 uint64_t Len) {
2399 // Make sure we don't bloat code by inlining very large memcpy's.
2400 if (!ARMIsMemCpySmall(Len))
2401 return false;
2402
2403 // We don't care about alignment here since we just emit integer accesses.
2404 while (Len) {
2405 MVT VT;
2406 if (Len >= 4)
2407 VT = MVT::i32;
2408 else if (Len >= 2)
2409 VT = MVT::i16;
2410 else {
2411 assert(Len == 1);
2412 VT = MVT::i8;
2413 }
2414
2415 bool RV;
2416 unsigned ResultReg;
2417 RV = ARMEmitLoad(VT, ResultReg, Src);
2418 assert (RV == true && "Should be able to handle this load.");
2419 RV = ARMEmitStore(VT, ResultReg, Dest);
2420 assert (RV == true && "Should be able to handle this store.");
2421 (void)RV;
2422
2423 unsigned Size = VT.getSizeInBits()/8;
2424 Len -= Size;
2425 Dest.Offset += Size;
2426 Src.Offset += Size;
2427 }
2428
2429 return true;
2430 }
2431
SelectIntrinsicCall(const IntrinsicInst & I)2432 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2433 // FIXME: Handle more intrinsics.
2434 switch (I.getIntrinsicID()) {
2435 default: return false;
2436 case Intrinsic::frameaddress: {
2437 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
2438 MFI->setFrameAddressIsTaken(true);
2439
2440 unsigned LdrOpc;
2441 const TargetRegisterClass *RC;
2442 if (isThumb2) {
2443 LdrOpc = ARM::t2LDRi12;
2444 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass;
2445 } else {
2446 LdrOpc = ARM::LDRi12;
2447 RC = (const TargetRegisterClass*)&ARM::GPRRegClass;
2448 }
2449
2450 const ARMBaseRegisterInfo *RegInfo =
2451 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo());
2452 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
2453 unsigned SrcReg = FramePtr;
2454
2455 // Recursively load frame address
2456 // ldr r0 [fp]
2457 // ldr r0 [r0]
2458 // ldr r0 [r0]
2459 // ...
2460 unsigned DestReg;
2461 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
2462 while (Depth--) {
2463 DestReg = createResultReg(RC);
2464 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2465 TII.get(LdrOpc), DestReg)
2466 .addReg(SrcReg).addImm(0));
2467 SrcReg = DestReg;
2468 }
2469 UpdateValueMap(&I, SrcReg);
2470 return true;
2471 }
2472 case Intrinsic::memcpy:
2473 case Intrinsic::memmove: {
2474 const MemTransferInst &MTI = cast<MemTransferInst>(I);
2475 // Don't handle volatile.
2476 if (MTI.isVolatile())
2477 return false;
2478
2479 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
2480 // we would emit dead code because we don't currently handle memmoves.
2481 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2482 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2483 // Small memcpy's are common enough that we want to do them without a call
2484 // if possible.
2485 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2486 if (ARMIsMemCpySmall(Len)) {
2487 Address Dest, Src;
2488 if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2489 !ARMComputeAddress(MTI.getRawSource(), Src))
2490 return false;
2491 if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
2492 return true;
2493 }
2494 }
2495
2496 if (!MTI.getLength()->getType()->isIntegerTy(32))
2497 return false;
2498
2499 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2500 return false;
2501
2502 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2503 return SelectCall(&I, IntrMemName);
2504 }
2505 case Intrinsic::memset: {
2506 const MemSetInst &MSI = cast<MemSetInst>(I);
2507 // Don't handle volatile.
2508 if (MSI.isVolatile())
2509 return false;
2510
2511 if (!MSI.getLength()->getType()->isIntegerTy(32))
2512 return false;
2513
2514 if (MSI.getDestAddressSpace() > 255)
2515 return false;
2516
2517 return SelectCall(&I, "memset");
2518 }
2519 case Intrinsic::trap: {
2520 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP));
2521 return true;
2522 }
2523 }
2524 }
2525
SelectTrunc(const Instruction * I)2526 bool ARMFastISel::SelectTrunc(const Instruction *I) {
2527 // The high bits for a type smaller than the register size are assumed to be
2528 // undefined.
2529 Value *Op = I->getOperand(0);
2530
2531 EVT SrcVT, DestVT;
2532 SrcVT = TLI.getValueType(Op->getType(), true);
2533 DestVT = TLI.getValueType(I->getType(), true);
2534
2535 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2536 return false;
2537 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2538 return false;
2539
2540 unsigned SrcReg = getRegForValue(Op);
2541 if (!SrcReg) return false;
2542
2543 // Because the high bits are undefined, a truncate doesn't generate
2544 // any code.
2545 UpdateValueMap(I, SrcReg);
2546 return true;
2547 }
2548
ARMEmitIntExt(EVT SrcVT,unsigned SrcReg,EVT DestVT,bool isZExt)2549 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
2550 bool isZExt) {
2551 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2552 return 0;
2553
2554 unsigned Opc;
2555 bool isBoolZext = false;
2556 if (!SrcVT.isSimple()) return 0;
2557 switch (SrcVT.getSimpleVT().SimpleTy) {
2558 default: return 0;
2559 case MVT::i16:
2560 if (!Subtarget->hasV6Ops()) return 0;
2561 if (isZExt)
2562 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH;
2563 else
2564 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
2565 break;
2566 case MVT::i8:
2567 if (!Subtarget->hasV6Ops()) return 0;
2568 if (isZExt)
2569 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB;
2570 else
2571 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
2572 break;
2573 case MVT::i1:
2574 if (isZExt) {
2575 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
2576 isBoolZext = true;
2577 break;
2578 }
2579 return 0;
2580 }
2581
2582 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
2583 MachineInstrBuilder MIB;
2584 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
2585 .addReg(SrcReg);
2586 if (isBoolZext)
2587 MIB.addImm(1);
2588 else
2589 MIB.addImm(0);
2590 AddOptionalDefs(MIB);
2591 return ResultReg;
2592 }
2593
SelectIntExt(const Instruction * I)2594 bool ARMFastISel::SelectIntExt(const Instruction *I) {
2595 // On ARM, in general, integer casts don't involve legal types; this code
2596 // handles promotable integers.
2597 Type *DestTy = I->getType();
2598 Value *Src = I->getOperand(0);
2599 Type *SrcTy = Src->getType();
2600
2601 EVT SrcVT, DestVT;
2602 SrcVT = TLI.getValueType(SrcTy, true);
2603 DestVT = TLI.getValueType(DestTy, true);
2604
2605 bool isZExt = isa<ZExtInst>(I);
2606 unsigned SrcReg = getRegForValue(Src);
2607 if (!SrcReg) return false;
2608
2609 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2610 if (ResultReg == 0) return false;
2611 UpdateValueMap(I, ResultReg);
2612 return true;
2613 }
2614
SelectShift(const Instruction * I,ARM_AM::ShiftOpc ShiftTy)2615 bool ARMFastISel::SelectShift(const Instruction *I,
2616 ARM_AM::ShiftOpc ShiftTy) {
2617 // We handle thumb2 mode by target independent selector
2618 // or SelectionDAG ISel.
2619 if (isThumb2)
2620 return false;
2621
2622 // Only handle i32 now.
2623 EVT DestVT = TLI.getValueType(I->getType(), true);
2624 if (DestVT != MVT::i32)
2625 return false;
2626
2627 unsigned Opc = ARM::MOVsr;
2628 unsigned ShiftImm;
2629 Value *Src2Value = I->getOperand(1);
2630 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2631 ShiftImm = CI->getZExtValue();
2632
2633 // Fall back to selection DAG isel if the shift amount
2634 // is zero or greater than the width of the value type.
2635 if (ShiftImm == 0 || ShiftImm >=32)
2636 return false;
2637
2638 Opc = ARM::MOVsi;
2639 }
2640
2641 Value *Src1Value = I->getOperand(0);
2642 unsigned Reg1 = getRegForValue(Src1Value);
2643 if (Reg1 == 0) return false;
2644
2645 unsigned Reg2 = 0;
2646 if (Opc == ARM::MOVsr) {
2647 Reg2 = getRegForValue(Src2Value);
2648 if (Reg2 == 0) return false;
2649 }
2650
2651 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
2652 if(ResultReg == 0) return false;
2653
2654 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2655 TII.get(Opc), ResultReg)
2656 .addReg(Reg1);
2657
2658 if (Opc == ARM::MOVsi)
2659 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2660 else if (Opc == ARM::MOVsr) {
2661 MIB.addReg(Reg2);
2662 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2663 }
2664
2665 AddOptionalDefs(MIB);
2666 UpdateValueMap(I, ResultReg);
2667 return true;
2668 }
2669
2670 // TODO: SoftFP support.
TargetSelectInstruction(const Instruction * I)2671 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
2672
2673 switch (I->getOpcode()) {
2674 case Instruction::Load:
2675 return SelectLoad(I);
2676 case Instruction::Store:
2677 return SelectStore(I);
2678 case Instruction::Br:
2679 return SelectBranch(I);
2680 case Instruction::IndirectBr:
2681 return SelectIndirectBr(I);
2682 case Instruction::ICmp:
2683 case Instruction::FCmp:
2684 return SelectCmp(I);
2685 case Instruction::FPExt:
2686 return SelectFPExt(I);
2687 case Instruction::FPTrunc:
2688 return SelectFPTrunc(I);
2689 case Instruction::SIToFP:
2690 return SelectIToFP(I, /*isSigned*/ true);
2691 case Instruction::UIToFP:
2692 return SelectIToFP(I, /*isSigned*/ false);
2693 case Instruction::FPToSI:
2694 return SelectFPToI(I, /*isSigned*/ true);
2695 case Instruction::FPToUI:
2696 return SelectFPToI(I, /*isSigned*/ false);
2697 case Instruction::Add:
2698 return SelectBinaryIntOp(I, ISD::ADD);
2699 case Instruction::Or:
2700 return SelectBinaryIntOp(I, ISD::OR);
2701 case Instruction::Sub:
2702 return SelectBinaryIntOp(I, ISD::SUB);
2703 case Instruction::FAdd:
2704 return SelectBinaryFPOp(I, ISD::FADD);
2705 case Instruction::FSub:
2706 return SelectBinaryFPOp(I, ISD::FSUB);
2707 case Instruction::FMul:
2708 return SelectBinaryFPOp(I, ISD::FMUL);
2709 case Instruction::SDiv:
2710 return SelectDiv(I, /*isSigned*/ true);
2711 case Instruction::UDiv:
2712 return SelectDiv(I, /*isSigned*/ false);
2713 case Instruction::SRem:
2714 return SelectRem(I, /*isSigned*/ true);
2715 case Instruction::URem:
2716 return SelectRem(I, /*isSigned*/ false);
2717 case Instruction::Call:
2718 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2719 return SelectIntrinsicCall(*II);
2720 return SelectCall(I);
2721 case Instruction::Select:
2722 return SelectSelect(I);
2723 case Instruction::Ret:
2724 return SelectRet(I);
2725 case Instruction::Trunc:
2726 return SelectTrunc(I);
2727 case Instruction::ZExt:
2728 case Instruction::SExt:
2729 return SelectIntExt(I);
2730 case Instruction::Shl:
2731 return SelectShift(I, ARM_AM::lsl);
2732 case Instruction::LShr:
2733 return SelectShift(I, ARM_AM::lsr);
2734 case Instruction::AShr:
2735 return SelectShift(I, ARM_AM::asr);
2736 default: break;
2737 }
2738 return false;
2739 }
2740
2741 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
2742 /// vreg is being provided by the specified load instruction. If possible,
2743 /// try to fold the load as an operand to the instruction, returning true if
2744 /// successful.
TryToFoldLoad(MachineInstr * MI,unsigned OpNo,const LoadInst * LI)2745 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
2746 const LoadInst *LI) {
2747 // Verify we have a legal type before going any further.
2748 MVT VT;
2749 if (!isLoadTypeLegal(LI->getType(), VT))
2750 return false;
2751
2752 // Combine load followed by zero- or sign-extend.
2753 // ldrb r1, [r0] ldrb r1, [r0]
2754 // uxtb r2, r1 =>
2755 // mov r3, r2 mov r3, r1
2756 bool isZExt = true;
2757 switch(MI->getOpcode()) {
2758 default: return false;
2759 case ARM::SXTH:
2760 case ARM::t2SXTH:
2761 isZExt = false;
2762 case ARM::UXTH:
2763 case ARM::t2UXTH:
2764 if (VT != MVT::i16)
2765 return false;
2766 break;
2767 case ARM::SXTB:
2768 case ARM::t2SXTB:
2769 isZExt = false;
2770 case ARM::UXTB:
2771 case ARM::t2UXTB:
2772 if (VT != MVT::i8)
2773 return false;
2774 break;
2775 }
2776 // See if we can handle this address.
2777 Address Addr;
2778 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2779
2780 unsigned ResultReg = MI->getOperand(0).getReg();
2781 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
2782 return false;
2783 MI->eraseFromParent();
2784 return true;
2785 }
2786
2787 namespace llvm {
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)2788 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
2789 const TargetLibraryInfo *libInfo) {
2790 // Completely untested on non-iOS.
2791 const TargetMachine &TM = funcInfo.MF->getTarget();
2792
2793 // Darwin and thumb1 only for now.
2794 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
2795 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only())
2796 return new ARMFastISel(funcInfo, libInfo);
2797 return 0;
2798 }
2799 }
2800