1 //===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMBaseRegisterInfo.h"
19 #include "ARMCallingConv.h"
20 #include "ARMConstantPoolValue.h"
21 #include "ARMISelLowering.h"
22 #include "ARMMachineFunctionInfo.h"
23 #include "ARMSubtarget.h"
24 #include "MCTargetDesc/ARMAddressingModes.h"
25 #include "MCTargetDesc/ARMBaseInfo.h"
26 #include "Utils/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/CodeGen/CallingConvLower.h"
32 #include "llvm/CodeGen/FastISel.h"
33 #include "llvm/CodeGen/FunctionLoweringInfo.h"
34 #include "llvm/CodeGen/ISDOpcodes.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineConstantPool.h"
37 #include "llvm/CodeGen/MachineFrameInfo.h"
38 #include "llvm/CodeGen/MachineFunction.h"
39 #include "llvm/CodeGen/MachineInstr.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineMemOperand.h"
42 #include "llvm/CodeGen/MachineOperand.h"
43 #include "llvm/CodeGen/MachineRegisterInfo.h"
44 #include "llvm/CodeGen/RuntimeLibcalls.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetOpcodes.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/ValueTypes.h"
50 #include "llvm/IR/Argument.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/CallSite.h"
53 #include "llvm/IR/CallingConv.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DerivedTypes.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GetElementPtrTypeIterator.h"
60 #include "llvm/IR/GlobalValue.h"
61 #include "llvm/IR/GlobalVariable.h"
62 #include "llvm/IR/InstrTypes.h"
63 #include "llvm/IR/Instruction.h"
64 #include "llvm/IR/Instructions.h"
65 #include "llvm/IR/IntrinsicInst.h"
66 #include "llvm/IR/Intrinsics.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/Operator.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/MC/MCInstrDesc.h"
73 #include "llvm/MC/MCRegisterInfo.h"
74 #include "llvm/Support/Casting.h"
75 #include "llvm/Support/Compiler.h"
76 #include "llvm/Support/ErrorHandling.h"
77 #include "llvm/Support/MachineValueType.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Target/TargetMachine.h"
80 #include "llvm/Target/TargetOptions.h"
81 #include <cassert>
82 #include <cstdint>
83 #include <utility>
84
85 using namespace llvm;
86
87 namespace {
88
89 // All possible address modes, plus some.
90 struct Address {
91 enum {
92 RegBase,
93 FrameIndexBase
94 } BaseType = RegBase;
95
96 union {
97 unsigned Reg;
98 int FI;
99 } Base;
100
101 int Offset = 0;
102
103 // Innocuous defaults for our address.
Address__anon966e91b30111::Address104 Address() {
105 Base.Reg = 0;
106 }
107 };
108
109 class ARMFastISel final : public FastISel {
110 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
111 /// make the right decision when generating code for different targets.
112 const ARMSubtarget *Subtarget;
113 Module &M;
114 const TargetMachine &TM;
115 const TargetInstrInfo &TII;
116 const TargetLowering &TLI;
117 ARMFunctionInfo *AFI;
118
119 // Convenience variables to avoid some queries.
120 bool isThumb2;
121 LLVMContext *Context;
122
123 public:
ARMFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)124 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
125 const TargetLibraryInfo *libInfo)
126 : FastISel(funcInfo, libInfo),
127 Subtarget(
128 &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())),
129 M(const_cast<Module &>(*funcInfo.Fn->getParent())),
130 TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
131 TLI(*Subtarget->getTargetLowering()) {
132 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
133 isThumb2 = AFI->isThumbFunction();
134 Context = &funcInfo.Fn->getContext();
135 }
136
137 private:
138 // Code from FastISel.cpp.
139
140 unsigned fastEmitInst_r(unsigned MachineInstOpcode,
141 const TargetRegisterClass *RC,
142 unsigned Op0, bool Op0IsKill);
143 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
144 const TargetRegisterClass *RC,
145 unsigned Op0, bool Op0IsKill,
146 unsigned Op1, bool Op1IsKill);
147 unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
148 const TargetRegisterClass *RC,
149 unsigned Op0, bool Op0IsKill,
150 uint64_t Imm);
151 unsigned fastEmitInst_i(unsigned MachineInstOpcode,
152 const TargetRegisterClass *RC,
153 uint64_t Imm);
154
155 // Backend specific FastISel code.
156
157 bool fastSelectInstruction(const Instruction *I) override;
158 unsigned fastMaterializeConstant(const Constant *C) override;
159 unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
160 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
161 const LoadInst *LI) override;
162 bool fastLowerArguments() override;
163
164 #include "ARMGenFastISel.inc"
165
166 // Instruction selection routines.
167
168 bool SelectLoad(const Instruction *I);
169 bool SelectStore(const Instruction *I);
170 bool SelectBranch(const Instruction *I);
171 bool SelectIndirectBr(const Instruction *I);
172 bool SelectCmp(const Instruction *I);
173 bool SelectFPExt(const Instruction *I);
174 bool SelectFPTrunc(const Instruction *I);
175 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
176 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
177 bool SelectIToFP(const Instruction *I, bool isSigned);
178 bool SelectFPToI(const Instruction *I, bool isSigned);
179 bool SelectDiv(const Instruction *I, bool isSigned);
180 bool SelectRem(const Instruction *I, bool isSigned);
181 bool SelectCall(const Instruction *I, const char *IntrMemName);
182 bool SelectIntrinsicCall(const IntrinsicInst &I);
183 bool SelectSelect(const Instruction *I);
184 bool SelectRet(const Instruction *I);
185 bool SelectTrunc(const Instruction *I);
186 bool SelectIntExt(const Instruction *I);
187 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
188
189 // Utility routines.
190
191 bool isPositionIndependent() const;
192 bool isTypeLegal(Type *Ty, MVT &VT);
193 bool isLoadTypeLegal(Type *Ty, MVT &VT);
194 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
195 bool isZExt, bool isEquality);
196 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
197 unsigned Alignment = 0, bool isZExt = true,
198 bool allocReg = true);
199 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
200 unsigned Alignment = 0);
201 bool ARMComputeAddress(const Value *Obj, Address &Addr);
202 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);
203 bool ARMIsMemCpySmall(uint64_t Len);
204 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
205 unsigned Alignment);
206 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
207 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
208 unsigned ARMMaterializeInt(const Constant *C, MVT VT);
209 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
210 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
211 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
212 unsigned ARMSelectCallOp(bool UseReg);
213 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
214
getTargetLowering()215 const TargetLowering *getTargetLowering() { return &TLI; }
216
217 // Call handling routines.
218
219 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
220 bool Return,
221 bool isVarArg);
222 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
223 SmallVectorImpl<unsigned> &ArgRegs,
224 SmallVectorImpl<MVT> &ArgVTs,
225 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
226 SmallVectorImpl<unsigned> &RegArgs,
227 CallingConv::ID CC,
228 unsigned &NumBytes,
229 bool isVarArg);
230 unsigned getLibcallReg(const Twine &Name);
231 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
232 const Instruction *I, CallingConv::ID CC,
233 unsigned &NumBytes, bool isVarArg);
234 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
235
236 // OptionalDef handling routines.
237
238 bool isARMNEONPred(const MachineInstr *MI);
239 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
240 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
241 void AddLoadStoreOperands(MVT VT, Address &Addr,
242 const MachineInstrBuilder &MIB,
243 MachineMemOperand::Flags Flags, bool useAM3);
244 };
245
246 } // end anonymous namespace
247
248 #include "ARMGenCallingConv.inc"
249
250 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
251 // we don't care about implicit defs here, just places we'll need to add a
252 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
DefinesOptionalPredicate(MachineInstr * MI,bool * CPSR)253 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
254 if (!MI->hasOptionalDef())
255 return false;
256
257 // Look to see if our OptionalDef is defining CPSR or CCR.
258 for (const MachineOperand &MO : MI->operands()) {
259 if (!MO.isReg() || !MO.isDef()) continue;
260 if (MO.getReg() == ARM::CPSR)
261 *CPSR = true;
262 }
263 return true;
264 }
265
isARMNEONPred(const MachineInstr * MI)266 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
267 const MCInstrDesc &MCID = MI->getDesc();
268
269 // If we're a thumb2 or not NEON function we'll be handled via isPredicable.
270 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
271 AFI->isThumb2Function())
272 return MI->isPredicable();
273
274 for (const MCOperandInfo &opInfo : MCID.operands())
275 if (opInfo.isPredicate())
276 return true;
277
278 return false;
279 }
280
281 // If the machine is predicable go ahead and add the predicate operands, if
282 // it needs default CC operands add those.
283 // TODO: If we want to support thumb1 then we'll need to deal with optional
284 // CPSR defs that need to be added before the remaining operands. See s_cc_out
285 // for descriptions why.
286 const MachineInstrBuilder &
AddOptionalDefs(const MachineInstrBuilder & MIB)287 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
288 MachineInstr *MI = &*MIB;
289
290 // Do we use a predicate? or...
291 // Are we NEON in ARM mode and have a predicate operand? If so, I know
292 // we're not predicable but add it anyways.
293 if (isARMNEONPred(MI))
294 MIB.add(predOps(ARMCC::AL));
295
296 // Do we optionally set a predicate? Preds is size > 0 iff the predicate
297 // defines CPSR. All other OptionalDefines in ARM are the CCR register.
298 bool CPSR = false;
299 if (DefinesOptionalPredicate(MI, &CPSR))
300 MIB.add(CPSR ? t1CondCodeOp() : condCodeOp());
301 return MIB;
302 }
303
fastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill)304 unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
305 const TargetRegisterClass *RC,
306 unsigned Op0, bool Op0IsKill) {
307 unsigned ResultReg = createResultReg(RC);
308 const MCInstrDesc &II = TII.get(MachineInstOpcode);
309
310 // Make sure the input operand is sufficiently constrained to be legal
311 // for this instruction.
312 Op0 = constrainOperandRegClass(II, Op0, 1);
313 if (II.getNumDefs() >= 1) {
314 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
315 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill));
316 } else {
317 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
318 .addReg(Op0, Op0IsKill * RegState::Kill));
319 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
320 TII.get(TargetOpcode::COPY), ResultReg)
321 .addReg(II.ImplicitDefs[0]));
322 }
323 return ResultReg;
324 }
325
fastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill)326 unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
327 const TargetRegisterClass *RC,
328 unsigned Op0, bool Op0IsKill,
329 unsigned Op1, bool Op1IsKill) {
330 unsigned ResultReg = createResultReg(RC);
331 const MCInstrDesc &II = TII.get(MachineInstOpcode);
332
333 // Make sure the input operands are sufficiently constrained to be legal
334 // for this instruction.
335 Op0 = constrainOperandRegClass(II, Op0, 1);
336 Op1 = constrainOperandRegClass(II, Op1, 2);
337
338 if (II.getNumDefs() >= 1) {
339 AddOptionalDefs(
340 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
341 .addReg(Op0, Op0IsKill * RegState::Kill)
342 .addReg(Op1, Op1IsKill * RegState::Kill));
343 } else {
344 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
345 .addReg(Op0, Op0IsKill * RegState::Kill)
346 .addReg(Op1, Op1IsKill * RegState::Kill));
347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
348 TII.get(TargetOpcode::COPY), ResultReg)
349 .addReg(II.ImplicitDefs[0]));
350 }
351 return ResultReg;
352 }
353
fastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm)354 unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
355 const TargetRegisterClass *RC,
356 unsigned Op0, bool Op0IsKill,
357 uint64_t Imm) {
358 unsigned ResultReg = createResultReg(RC);
359 const MCInstrDesc &II = TII.get(MachineInstOpcode);
360
361 // Make sure the input operand is sufficiently constrained to be legal
362 // for this instruction.
363 Op0 = constrainOperandRegClass(II, Op0, 1);
364 if (II.getNumDefs() >= 1) {
365 AddOptionalDefs(
366 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
367 .addReg(Op0, Op0IsKill * RegState::Kill)
368 .addImm(Imm));
369 } else {
370 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
371 .addReg(Op0, Op0IsKill * RegState::Kill)
372 .addImm(Imm));
373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
374 TII.get(TargetOpcode::COPY), ResultReg)
375 .addReg(II.ImplicitDefs[0]));
376 }
377 return ResultReg;
378 }
379
fastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)380 unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
381 const TargetRegisterClass *RC,
382 uint64_t Imm) {
383 unsigned ResultReg = createResultReg(RC);
384 const MCInstrDesc &II = TII.get(MachineInstOpcode);
385
386 if (II.getNumDefs() >= 1) {
387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
388 ResultReg).addImm(Imm));
389 } else {
390 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
391 .addImm(Imm));
392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
393 TII.get(TargetOpcode::COPY), ResultReg)
394 .addReg(II.ImplicitDefs[0]));
395 }
396 return ResultReg;
397 }
398
399 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
400 // checks from the various callers.
ARMMoveToFPReg(MVT VT,unsigned SrcReg)401 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
402 if (VT == MVT::f64) return 0;
403
404 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
405 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
406 TII.get(ARM::VMOVSR), MoveReg)
407 .addReg(SrcReg));
408 return MoveReg;
409 }
410
ARMMoveToIntReg(MVT VT,unsigned SrcReg)411 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
412 if (VT == MVT::i64) return 0;
413
414 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
416 TII.get(ARM::VMOVRS), MoveReg)
417 .addReg(SrcReg));
418 return MoveReg;
419 }
420
421 // For double width floating point we need to materialize two constants
422 // (the high and the low) into integer registers then use a move to get
423 // the combined constant into an FP reg.
ARMMaterializeFP(const ConstantFP * CFP,MVT VT)424 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
425 const APFloat Val = CFP->getValueAPF();
426 bool is64bit = VT == MVT::f64;
427
428 // This checks to see if we can use VFP3 instructions to materialize
429 // a constant, otherwise we have to go through the constant pool.
430 if (TLI.isFPImmLegal(Val, VT)) {
431 int Imm;
432 unsigned Opc;
433 if (is64bit) {
434 Imm = ARM_AM::getFP64Imm(Val);
435 Opc = ARM::FCONSTD;
436 } else {
437 Imm = ARM_AM::getFP32Imm(Val);
438 Opc = ARM::FCONSTS;
439 }
440 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
442 TII.get(Opc), DestReg).addImm(Imm));
443 return DestReg;
444 }
445
446 // Require VFP2 for loading fp constants.
447 if (!Subtarget->hasVFP2()) return false;
448
449 // MachineConstantPool wants an explicit alignment.
450 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
451 if (Align == 0) {
452 // TODO: Figure out if this is correct.
453 Align = DL.getTypeAllocSize(CFP->getType());
454 }
455 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
456 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
457 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
458
459 // The extra reg is for addrmode5.
460 AddOptionalDefs(
461 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
462 .addConstantPoolIndex(Idx)
463 .addReg(0));
464 return DestReg;
465 }
466
ARMMaterializeInt(const Constant * C,MVT VT)467 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
468 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
469 return 0;
470
471 // If we can do this in a single instruction without a constant pool entry
472 // do so now.
473 const ConstantInt *CI = cast<ConstantInt>(C);
474 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
475 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
476 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
477 &ARM::GPRRegClass;
478 unsigned ImmReg = createResultReg(RC);
479 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
480 TII.get(Opc), ImmReg)
481 .addImm(CI->getZExtValue()));
482 return ImmReg;
483 }
484
485 // Use MVN to emit negative constants.
486 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
487 unsigned Imm = (unsigned)~(CI->getSExtValue());
488 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
489 (ARM_AM::getSOImmVal(Imm) != -1);
490 if (UseImm) {
491 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
492 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
493 &ARM::GPRRegClass;
494 unsigned ImmReg = createResultReg(RC);
495 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
496 TII.get(Opc), ImmReg)
497 .addImm(Imm));
498 return ImmReg;
499 }
500 }
501
502 unsigned ResultReg = 0;
503 if (Subtarget->useMovt(*FuncInfo.MF))
504 ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
505
506 if (ResultReg)
507 return ResultReg;
508
509 // Load from constant pool. For now 32-bit only.
510 if (VT != MVT::i32)
511 return 0;
512
513 // MachineConstantPool wants an explicit alignment.
514 unsigned Align = DL.getPrefTypeAlignment(C->getType());
515 if (Align == 0) {
516 // TODO: Figure out if this is correct.
517 Align = DL.getTypeAllocSize(C->getType());
518 }
519 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
520 ResultReg = createResultReg(TLI.getRegClassFor(VT));
521 if (isThumb2)
522 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
523 TII.get(ARM::t2LDRpci), ResultReg)
524 .addConstantPoolIndex(Idx));
525 else {
526 // The extra immediate is for addrmode2.
527 ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0);
528 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
529 TII.get(ARM::LDRcp), ResultReg)
530 .addConstantPoolIndex(Idx)
531 .addImm(0));
532 }
533 return ResultReg;
534 }
535
isPositionIndependent() const536 bool ARMFastISel::isPositionIndependent() const {
537 return TLI.isPositionIndependent();
538 }
539
ARMMaterializeGV(const GlobalValue * GV,MVT VT)540 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
541 // For now 32-bit only.
542 if (VT != MVT::i32 || GV->isThreadLocal()) return 0;
543
544 // ROPI/RWPI not currently supported.
545 if (Subtarget->isROPI() || Subtarget->isRWPI())
546 return 0;
547
548 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
549 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
550 : &ARM::GPRRegClass;
551 unsigned DestReg = createResultReg(RC);
552
553 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
554 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
555 bool IsThreadLocal = GVar && GVar->isThreadLocal();
556 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0;
557
558 bool IsPositionIndependent = isPositionIndependent();
559 // Use movw+movt when possible, it avoids constant pool entries.
560 // Non-darwin targets only support static movt relocations in FastISel.
561 if (Subtarget->useMovt(*FuncInfo.MF) &&
562 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
563 unsigned Opc;
564 unsigned char TF = 0;
565 if (Subtarget->isTargetMachO())
566 TF = ARMII::MO_NONLAZY;
567
568 if (IsPositionIndependent)
569 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
570 else
571 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
572 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
573 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
574 } else {
575 // MachineConstantPool wants an explicit alignment.
576 unsigned Align = DL.getPrefTypeAlignment(GV->getType());
577 if (Align == 0) {
578 // TODO: Figure out if this is correct.
579 Align = DL.getTypeAllocSize(GV->getType());
580 }
581
582 if (Subtarget->isTargetELF() && IsPositionIndependent)
583 return ARMLowerPICELF(GV, Align, VT);
584
585 // Grab index.
586 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
587 unsigned Id = AFI->createPICLabelUId();
588 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
589 ARMCP::CPValue,
590 PCAdj);
591 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
592
593 // Load value.
594 MachineInstrBuilder MIB;
595 if (isThumb2) {
596 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
597 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
598 DestReg).addConstantPoolIndex(Idx);
599 if (IsPositionIndependent)
600 MIB.addImm(Id);
601 AddOptionalDefs(MIB);
602 } else {
603 // The extra immediate is for addrmode2.
604 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
605 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
606 TII.get(ARM::LDRcp), DestReg)
607 .addConstantPoolIndex(Idx)
608 .addImm(0);
609 AddOptionalDefs(MIB);
610
611 if (IsPositionIndependent) {
612 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
613 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
614
615 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
616 DbgLoc, TII.get(Opc), NewDestReg)
617 .addReg(DestReg)
618 .addImm(Id);
619 AddOptionalDefs(MIB);
620 return NewDestReg;
621 }
622 }
623 }
624
625 if (IsIndirect) {
626 MachineInstrBuilder MIB;
627 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
628 if (isThumb2)
629 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
630 TII.get(ARM::t2LDRi12), NewDestReg)
631 .addReg(DestReg)
632 .addImm(0);
633 else
634 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
635 TII.get(ARM::LDRi12), NewDestReg)
636 .addReg(DestReg)
637 .addImm(0);
638 DestReg = NewDestReg;
639 AddOptionalDefs(MIB);
640 }
641
642 return DestReg;
643 }
644
fastMaterializeConstant(const Constant * C)645 unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) {
646 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
647
648 // Only handle simple types.
649 if (!CEVT.isSimple()) return 0;
650 MVT VT = CEVT.getSimpleVT();
651
652 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
653 return ARMMaterializeFP(CFP, VT);
654 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
655 return ARMMaterializeGV(GV, VT);
656 else if (isa<ConstantInt>(C))
657 return ARMMaterializeInt(C, VT);
658
659 return 0;
660 }
661
662 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
663
fastMaterializeAlloca(const AllocaInst * AI)664 unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
665 // Don't handle dynamic allocas.
666 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
667
668 MVT VT;
669 if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
670
671 DenseMap<const AllocaInst*, int>::iterator SI =
672 FuncInfo.StaticAllocaMap.find(AI);
673
674 // This will get lowered later into the correct offsets and registers
675 // via rewriteXFrameIndex.
676 if (SI != FuncInfo.StaticAllocaMap.end()) {
677 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
678 const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
679 unsigned ResultReg = createResultReg(RC);
680 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
681
682 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
683 TII.get(Opc), ResultReg)
684 .addFrameIndex(SI->second)
685 .addImm(0));
686 return ResultReg;
687 }
688
689 return 0;
690 }
691
isTypeLegal(Type * Ty,MVT & VT)692 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
693 EVT evt = TLI.getValueType(DL, Ty, true);
694
695 // Only handle simple types.
696 if (evt == MVT::Other || !evt.isSimple()) return false;
697 VT = evt.getSimpleVT();
698
699 // Handle all legal types, i.e. a register that will directly hold this
700 // value.
701 return TLI.isTypeLegal(VT);
702 }
703
isLoadTypeLegal(Type * Ty,MVT & VT)704 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
705 if (isTypeLegal(Ty, VT)) return true;
706
707 // If this is a type than can be sign or zero-extended to a basic operation
708 // go ahead and accept it now.
709 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
710 return true;
711
712 return false;
713 }
714
715 // Computes the address to get to an object.
ARMComputeAddress(const Value * Obj,Address & Addr)716 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
717 // Some boilerplate from the X86 FastISel.
718 const User *U = nullptr;
719 unsigned Opcode = Instruction::UserOp1;
720 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
721 // Don't walk into other basic blocks unless the object is an alloca from
722 // another block, otherwise it may not have a virtual register assigned.
723 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
724 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
725 Opcode = I->getOpcode();
726 U = I;
727 }
728 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
729 Opcode = C->getOpcode();
730 U = C;
731 }
732
733 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
734 if (Ty->getAddressSpace() > 255)
735 // Fast instruction selection doesn't support the special
736 // address spaces.
737 return false;
738
739 switch (Opcode) {
740 default:
741 break;
742 case Instruction::BitCast:
743 // Look through bitcasts.
744 return ARMComputeAddress(U->getOperand(0), Addr);
745 case Instruction::IntToPtr:
746 // Look past no-op inttoptrs.
747 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
748 TLI.getPointerTy(DL))
749 return ARMComputeAddress(U->getOperand(0), Addr);
750 break;
751 case Instruction::PtrToInt:
752 // Look past no-op ptrtoints.
753 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
754 return ARMComputeAddress(U->getOperand(0), Addr);
755 break;
756 case Instruction::GetElementPtr: {
757 Address SavedAddr = Addr;
758 int TmpOffset = Addr.Offset;
759
760 // Iterate through the GEP folding the constants into offsets where
761 // we can.
762 gep_type_iterator GTI = gep_type_begin(U);
763 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
764 i != e; ++i, ++GTI) {
765 const Value *Op = *i;
766 if (StructType *STy = GTI.getStructTypeOrNull()) {
767 const StructLayout *SL = DL.getStructLayout(STy);
768 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
769 TmpOffset += SL->getElementOffset(Idx);
770 } else {
771 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
772 while (true) {
773 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
774 // Constant-offset addressing.
775 TmpOffset += CI->getSExtValue() * S;
776 break;
777 }
778 if (canFoldAddIntoGEP(U, Op)) {
779 // A compatible add with a constant operand. Fold the constant.
780 ConstantInt *CI =
781 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
782 TmpOffset += CI->getSExtValue() * S;
783 // Iterate on the other operand.
784 Op = cast<AddOperator>(Op)->getOperand(0);
785 continue;
786 }
787 // Unsupported
788 goto unsupported_gep;
789 }
790 }
791 }
792
793 // Try to grab the base operand now.
794 Addr.Offset = TmpOffset;
795 if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
796
797 // We failed, restore everything and try the other options.
798 Addr = SavedAddr;
799
800 unsupported_gep:
801 break;
802 }
803 case Instruction::Alloca: {
804 const AllocaInst *AI = cast<AllocaInst>(Obj);
805 DenseMap<const AllocaInst*, int>::iterator SI =
806 FuncInfo.StaticAllocaMap.find(AI);
807 if (SI != FuncInfo.StaticAllocaMap.end()) {
808 Addr.BaseType = Address::FrameIndexBase;
809 Addr.Base.FI = SI->second;
810 return true;
811 }
812 break;
813 }
814 }
815
816 // Try to get this in a register if nothing else has worked.
817 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
818 return Addr.Base.Reg != 0;
819 }
820
ARMSimplifyAddress(Address & Addr,MVT VT,bool useAM3)821 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
822 bool needsLowering = false;
823 switch (VT.SimpleTy) {
824 default: llvm_unreachable("Unhandled load/store type!");
825 case MVT::i1:
826 case MVT::i8:
827 case MVT::i16:
828 case MVT::i32:
829 if (!useAM3) {
830 // Integer loads/stores handle 12-bit offsets.
831 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
832 // Handle negative offsets.
833 if (needsLowering && isThumb2)
834 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
835 Addr.Offset > -256);
836 } else {
837 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
838 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
839 }
840 break;
841 case MVT::f32:
842 case MVT::f64:
843 // Floating point operands handle 8-bit offsets.
844 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
845 break;
846 }
847
848 // If this is a stack pointer and the offset needs to be simplified then
849 // put the alloca address into a register, set the base type back to
850 // register and continue. This should almost never happen.
851 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
852 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
853 : &ARM::GPRRegClass;
854 unsigned ResultReg = createResultReg(RC);
855 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
856 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
857 TII.get(Opc), ResultReg)
858 .addFrameIndex(Addr.Base.FI)
859 .addImm(0));
860 Addr.Base.Reg = ResultReg;
861 Addr.BaseType = Address::RegBase;
862 }
863
864 // Since the offset is too large for the load/store instruction
865 // get the reg+offset into a register.
866 if (needsLowering) {
867 Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
868 /*Op0IsKill*/false, Addr.Offset, MVT::i32);
869 Addr.Offset = 0;
870 }
871 }
872
AddLoadStoreOperands(MVT VT,Address & Addr,const MachineInstrBuilder & MIB,MachineMemOperand::Flags Flags,bool useAM3)873 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
874 const MachineInstrBuilder &MIB,
875 MachineMemOperand::Flags Flags,
876 bool useAM3) {
877 // addrmode5 output depends on the selection dag addressing dividing the
878 // offset by 4 that it then later multiplies. Do this here as well.
879 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64)
880 Addr.Offset /= 4;
881
882 // Frame base works a bit differently. Handle it separately.
883 if (Addr.BaseType == Address::FrameIndexBase) {
884 int FI = Addr.Base.FI;
885 int Offset = Addr.Offset;
886 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
887 MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags,
888 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
889 // Now add the rest of the operands.
890 MIB.addFrameIndex(FI);
891
892 // ARM halfword load/stores and signed byte loads need an additional
893 // operand.
894 if (useAM3) {
895 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
896 MIB.addReg(0);
897 MIB.addImm(Imm);
898 } else {
899 MIB.addImm(Addr.Offset);
900 }
901 MIB.addMemOperand(MMO);
902 } else {
903 // Now add the rest of the operands.
904 MIB.addReg(Addr.Base.Reg);
905
906 // ARM halfword load/stores and signed byte loads need an additional
907 // operand.
908 if (useAM3) {
909 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
910 MIB.addReg(0);
911 MIB.addImm(Imm);
912 } else {
913 MIB.addImm(Addr.Offset);
914 }
915 }
916 AddOptionalDefs(MIB);
917 }
918
ARMEmitLoad(MVT VT,unsigned & ResultReg,Address & Addr,unsigned Alignment,bool isZExt,bool allocReg)919 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
920 unsigned Alignment, bool isZExt, bool allocReg) {
921 unsigned Opc;
922 bool useAM3 = false;
923 bool needVMOV = false;
924 const TargetRegisterClass *RC;
925 switch (VT.SimpleTy) {
926 // This is mostly going to be Neon/vector support.
927 default: return false;
928 case MVT::i1:
929 case MVT::i8:
930 if (isThumb2) {
931 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
932 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
933 else
934 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
935 } else {
936 if (isZExt) {
937 Opc = ARM::LDRBi12;
938 } else {
939 Opc = ARM::LDRSB;
940 useAM3 = true;
941 }
942 }
943 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
944 break;
945 case MVT::i16:
946 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
947 return false;
948
949 if (isThumb2) {
950 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
951 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
952 else
953 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
954 } else {
955 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
956 useAM3 = true;
957 }
958 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
959 break;
960 case MVT::i32:
961 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
962 return false;
963
964 if (isThumb2) {
965 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
966 Opc = ARM::t2LDRi8;
967 else
968 Opc = ARM::t2LDRi12;
969 } else {
970 Opc = ARM::LDRi12;
971 }
972 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
973 break;
974 case MVT::f32:
975 if (!Subtarget->hasVFP2()) return false;
976 // Unaligned loads need special handling. Floats require word-alignment.
977 if (Alignment && Alignment < 4) {
978 needVMOV = true;
979 VT = MVT::i32;
980 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
981 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
982 } else {
983 Opc = ARM::VLDRS;
984 RC = TLI.getRegClassFor(VT);
985 }
986 break;
987 case MVT::f64:
988 if (!Subtarget->hasVFP2()) return false;
989 // FIXME: Unaligned loads need special handling. Doublewords require
990 // word-alignment.
991 if (Alignment && Alignment < 4)
992 return false;
993
994 Opc = ARM::VLDRD;
995 RC = TLI.getRegClassFor(VT);
996 break;
997 }
998 // Simplify this down to something we can handle.
999 ARMSimplifyAddress(Addr, VT, useAM3);
1000
1001 // Create the base instruction, then add the operands.
1002 if (allocReg)
1003 ResultReg = createResultReg(RC);
1004 assert(ResultReg > 255 && "Expected an allocated virtual register.");
1005 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1006 TII.get(Opc), ResultReg);
1007 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
1008
1009 // If we had an unaligned load of a float we've converted it to an regular
1010 // load. Now we must move from the GRP to the FP register.
1011 if (needVMOV) {
1012 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1013 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1014 TII.get(ARM::VMOVSR), MoveReg)
1015 .addReg(ResultReg));
1016 ResultReg = MoveReg;
1017 }
1018 return true;
1019 }
1020
SelectLoad(const Instruction * I)1021 bool ARMFastISel::SelectLoad(const Instruction *I) {
1022 // Atomic loads need special handling.
1023 if (cast<LoadInst>(I)->isAtomic())
1024 return false;
1025
1026 const Value *SV = I->getOperand(0);
1027 if (TLI.supportSwiftError()) {
1028 // Swifterror values can come from either a function parameter with
1029 // swifterror attribute or an alloca with swifterror attribute.
1030 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1031 if (Arg->hasSwiftErrorAttr())
1032 return false;
1033 }
1034
1035 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1036 if (Alloca->isSwiftError())
1037 return false;
1038 }
1039 }
1040
1041 // Verify we have a legal type before going any further.
1042 MVT VT;
1043 if (!isLoadTypeLegal(I->getType(), VT))
1044 return false;
1045
1046 // See if we can handle this address.
1047 Address Addr;
1048 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1049
1050 unsigned ResultReg;
1051 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1052 return false;
1053 updateValueMap(I, ResultReg);
1054 return true;
1055 }
1056
ARMEmitStore(MVT VT,unsigned SrcReg,Address & Addr,unsigned Alignment)1057 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
1058 unsigned Alignment) {
1059 unsigned StrOpc;
1060 bool useAM3 = false;
1061 switch (VT.SimpleTy) {
1062 // This is mostly going to be Neon/vector support.
1063 default: return false;
1064 case MVT::i1: {
1065 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1066 : &ARM::GPRRegClass);
1067 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1068 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
1069 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1070 TII.get(Opc), Res)
1071 .addReg(SrcReg).addImm(1));
1072 SrcReg = Res;
1073 LLVM_FALLTHROUGH;
1074 }
1075 case MVT::i8:
1076 if (isThumb2) {
1077 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1078 StrOpc = ARM::t2STRBi8;
1079 else
1080 StrOpc = ARM::t2STRBi12;
1081 } else {
1082 StrOpc = ARM::STRBi12;
1083 }
1084 break;
1085 case MVT::i16:
1086 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1087 return false;
1088
1089 if (isThumb2) {
1090 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1091 StrOpc = ARM::t2STRHi8;
1092 else
1093 StrOpc = ARM::t2STRHi12;
1094 } else {
1095 StrOpc = ARM::STRH;
1096 useAM3 = true;
1097 }
1098 break;
1099 case MVT::i32:
1100 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1101 return false;
1102
1103 if (isThumb2) {
1104 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1105 StrOpc = ARM::t2STRi8;
1106 else
1107 StrOpc = ARM::t2STRi12;
1108 } else {
1109 StrOpc = ARM::STRi12;
1110 }
1111 break;
1112 case MVT::f32:
1113 if (!Subtarget->hasVFP2()) return false;
1114 // Unaligned stores need special handling. Floats require word-alignment.
1115 if (Alignment && Alignment < 4) {
1116 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1117 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1118 TII.get(ARM::VMOVRS), MoveReg)
1119 .addReg(SrcReg));
1120 SrcReg = MoveReg;
1121 VT = MVT::i32;
1122 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1123 } else {
1124 StrOpc = ARM::VSTRS;
1125 }
1126 break;
1127 case MVT::f64:
1128 if (!Subtarget->hasVFP2()) return false;
1129 // FIXME: Unaligned stores need special handling. Doublewords require
1130 // word-alignment.
1131 if (Alignment && Alignment < 4)
1132 return false;
1133
1134 StrOpc = ARM::VSTRD;
1135 break;
1136 }
1137 // Simplify this down to something we can handle.
1138 ARMSimplifyAddress(Addr, VT, useAM3);
1139
1140 // Create the base instruction, then add the operands.
1141 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
1142 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1143 TII.get(StrOpc))
1144 .addReg(SrcReg);
1145 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1146 return true;
1147 }
1148
SelectStore(const Instruction * I)1149 bool ARMFastISel::SelectStore(const Instruction *I) {
1150 Value *Op0 = I->getOperand(0);
1151 unsigned SrcReg = 0;
1152
1153 // Atomic stores need special handling.
1154 if (cast<StoreInst>(I)->isAtomic())
1155 return false;
1156
1157 const Value *PtrV = I->getOperand(1);
1158 if (TLI.supportSwiftError()) {
1159 // Swifterror values can come from either a function parameter with
1160 // swifterror attribute or an alloca with swifterror attribute.
1161 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1162 if (Arg->hasSwiftErrorAttr())
1163 return false;
1164 }
1165
1166 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1167 if (Alloca->isSwiftError())
1168 return false;
1169 }
1170 }
1171
1172 // Verify we have a legal type before going any further.
1173 MVT VT;
1174 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1175 return false;
1176
1177 // Get the value to be stored into a register.
1178 SrcReg = getRegForValue(Op0);
1179 if (SrcReg == 0) return false;
1180
1181 // See if we can handle this address.
1182 Address Addr;
1183 if (!ARMComputeAddress(I->getOperand(1), Addr))
1184 return false;
1185
1186 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1187 return false;
1188 return true;
1189 }
1190
getComparePred(CmpInst::Predicate Pred)1191 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1192 switch (Pred) {
1193 // Needs two compares...
1194 case CmpInst::FCMP_ONE:
1195 case CmpInst::FCMP_UEQ:
1196 default:
1197 // AL is our "false" for now. The other two need more compares.
1198 return ARMCC::AL;
1199 case CmpInst::ICMP_EQ:
1200 case CmpInst::FCMP_OEQ:
1201 return ARMCC::EQ;
1202 case CmpInst::ICMP_SGT:
1203 case CmpInst::FCMP_OGT:
1204 return ARMCC::GT;
1205 case CmpInst::ICMP_SGE:
1206 case CmpInst::FCMP_OGE:
1207 return ARMCC::GE;
1208 case CmpInst::ICMP_UGT:
1209 case CmpInst::FCMP_UGT:
1210 return ARMCC::HI;
1211 case CmpInst::FCMP_OLT:
1212 return ARMCC::MI;
1213 case CmpInst::ICMP_ULE:
1214 case CmpInst::FCMP_OLE:
1215 return ARMCC::LS;
1216 case CmpInst::FCMP_ORD:
1217 return ARMCC::VC;
1218 case CmpInst::FCMP_UNO:
1219 return ARMCC::VS;
1220 case CmpInst::FCMP_UGE:
1221 return ARMCC::PL;
1222 case CmpInst::ICMP_SLT:
1223 case CmpInst::FCMP_ULT:
1224 return ARMCC::LT;
1225 case CmpInst::ICMP_SLE:
1226 case CmpInst::FCMP_ULE:
1227 return ARMCC::LE;
1228 case CmpInst::FCMP_UNE:
1229 case CmpInst::ICMP_NE:
1230 return ARMCC::NE;
1231 case CmpInst::ICMP_UGE:
1232 return ARMCC::HS;
1233 case CmpInst::ICMP_ULT:
1234 return ARMCC::LO;
1235 }
1236 }
1237
SelectBranch(const Instruction * I)1238 bool ARMFastISel::SelectBranch(const Instruction *I) {
1239 const BranchInst *BI = cast<BranchInst>(I);
1240 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1241 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1242
1243 // Simple branch support.
1244
1245 // If we can, avoid recomputing the compare - redoing it could lead to wonky
1246 // behavior.
1247 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1248 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1249 // Get the compare predicate.
1250 // Try to take advantage of fallthrough opportunities.
1251 CmpInst::Predicate Predicate = CI->getPredicate();
1252 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1253 std::swap(TBB, FBB);
1254 Predicate = CmpInst::getInversePredicate(Predicate);
1255 }
1256
1257 ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1258
1259 // We may not handle every CC for now.
1260 if (ARMPred == ARMCC::AL) return false;
1261
1262 // Emit the compare.
1263 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
1264 CI->isEquality()))
1265 return false;
1266
1267 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1268 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
1269 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1270 finishCondBranch(BI->getParent(), TBB, FBB);
1271 return true;
1272 }
1273 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1274 MVT SourceVT;
1275 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1276 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1277 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1278 unsigned OpReg = getRegForValue(TI->getOperand(0));
1279 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
1280 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1281 TII.get(TstOpc))
1282 .addReg(OpReg).addImm(1));
1283
1284 unsigned CCMode = ARMCC::NE;
1285 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1286 std::swap(TBB, FBB);
1287 CCMode = ARMCC::EQ;
1288 }
1289
1290 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
1292 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1293
1294 finishCondBranch(BI->getParent(), TBB, FBB);
1295 return true;
1296 }
1297 } else if (const ConstantInt *CI =
1298 dyn_cast<ConstantInt>(BI->getCondition())) {
1299 uint64_t Imm = CI->getZExtValue();
1300 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1301 fastEmitBranch(Target, DbgLoc);
1302 return true;
1303 }
1304
1305 unsigned CmpReg = getRegForValue(BI->getCondition());
1306 if (CmpReg == 0) return false;
1307
1308 // We've been divorced from our compare! Our block was split, and
1309 // now our compare lives in a predecessor block. We musn't
1310 // re-compare here, as the children of the compare aren't guaranteed
1311 // live across the block boundary (we *could* check for this).
1312 // Regardless, the compare has been done in the predecessor block,
1313 // and it left a value for us in a virtual register. Ergo, we test
1314 // the one-bit value left in the virtual register.
1315 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1316 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
1317 AddOptionalDefs(
1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc))
1319 .addReg(CmpReg)
1320 .addImm(1));
1321
1322 unsigned CCMode = ARMCC::NE;
1323 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1324 std::swap(TBB, FBB);
1325 CCMode = ARMCC::EQ;
1326 }
1327
1328 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
1330 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1331 finishCondBranch(BI->getParent(), TBB, FBB);
1332 return true;
1333 }
1334
SelectIndirectBr(const Instruction * I)1335 bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1336 unsigned AddrReg = getRegForValue(I->getOperand(0));
1337 if (AddrReg == 0) return false;
1338
1339 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1340 assert(isThumb2 || Subtarget->hasV4TOps());
1341
1342 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1343 TII.get(Opc)).addReg(AddrReg));
1344
1345 const IndirectBrInst *IB = cast<IndirectBrInst>(I);
1346 for (const BasicBlock *SuccBB : IB->successors())
1347 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
1348
1349 return true;
1350 }
1351
ARMEmitCmp(const Value * Src1Value,const Value * Src2Value,bool isZExt,bool isEquality)1352 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1353 bool isZExt, bool isEquality) {
1354 Type *Ty = Src1Value->getType();
1355 EVT SrcEVT = TLI.getValueType(DL, Ty, true);
1356 if (!SrcEVT.isSimple()) return false;
1357 MVT SrcVT = SrcEVT.getSimpleVT();
1358
1359 if (Ty->isFloatTy() && !Subtarget->hasVFP2())
1360 return false;
1361
1362 if (Ty->isDoubleTy() && (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()))
1363 return false;
1364
1365 // Check to see if the 2nd operand is a constant that we can encode directly
1366 // in the compare.
1367 int Imm = 0;
1368 bool UseImm = false;
1369 bool isNegativeImm = false;
1370 // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1371 // Thus, Src1Value may be a ConstantInt, but we're missing it.
1372 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1373 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1374 SrcVT == MVT::i1) {
1375 const APInt &CIVal = ConstInt->getValue();
1376 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1377 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
1378 // then a cmn, because there is no way to represent 2147483648 as a
1379 // signed 32-bit int.
1380 if (Imm < 0 && Imm != (int)0x80000000) {
1381 isNegativeImm = true;
1382 Imm = -Imm;
1383 }
1384 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1385 (ARM_AM::getSOImmVal(Imm) != -1);
1386 }
1387 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1388 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1389 if (ConstFP->isZero() && !ConstFP->isNegative())
1390 UseImm = true;
1391 }
1392
1393 unsigned CmpOpc;
1394 bool isICmp = true;
1395 bool needsExt = false;
1396 switch (SrcVT.SimpleTy) {
1397 default: return false;
1398 // TODO: Verify compares.
1399 case MVT::f32:
1400 isICmp = false;
1401 // Equality comparisons shouldn't raise Invalid on uordered inputs.
1402 if (isEquality)
1403 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1404 else
1405 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1406 break;
1407 case MVT::f64:
1408 isICmp = false;
1409 // Equality comparisons shouldn't raise Invalid on uordered inputs.
1410 if (isEquality)
1411 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1412 else
1413 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1414 break;
1415 case MVT::i1:
1416 case MVT::i8:
1417 case MVT::i16:
1418 needsExt = true;
1419 LLVM_FALLTHROUGH;
1420 case MVT::i32:
1421 if (isThumb2) {
1422 if (!UseImm)
1423 CmpOpc = ARM::t2CMPrr;
1424 else
1425 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1426 } else {
1427 if (!UseImm)
1428 CmpOpc = ARM::CMPrr;
1429 else
1430 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1431 }
1432 break;
1433 }
1434
1435 unsigned SrcReg1 = getRegForValue(Src1Value);
1436 if (SrcReg1 == 0) return false;
1437
1438 unsigned SrcReg2 = 0;
1439 if (!UseImm) {
1440 SrcReg2 = getRegForValue(Src2Value);
1441 if (SrcReg2 == 0) return false;
1442 }
1443
1444 // We have i1, i8, or i16, we need to either zero extend or sign extend.
1445 if (needsExt) {
1446 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1447 if (SrcReg1 == 0) return false;
1448 if (!UseImm) {
1449 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1450 if (SrcReg2 == 0) return false;
1451 }
1452 }
1453
1454 const MCInstrDesc &II = TII.get(CmpOpc);
1455 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
1456 if (!UseImm) {
1457 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
1458 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1459 .addReg(SrcReg1).addReg(SrcReg2));
1460 } else {
1461 MachineInstrBuilder MIB;
1462 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1463 .addReg(SrcReg1);
1464
1465 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1466 if (isICmp)
1467 MIB.addImm(Imm);
1468 AddOptionalDefs(MIB);
1469 }
1470
1471 // For floating point we need to move the result to a comparison register
1472 // that we can then use for branches.
1473 if (Ty->isFloatTy() || Ty->isDoubleTy())
1474 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1475 TII.get(ARM::FMSTAT)));
1476 return true;
1477 }
1478
SelectCmp(const Instruction * I)1479 bool ARMFastISel::SelectCmp(const Instruction *I) {
1480 const CmpInst *CI = cast<CmpInst>(I);
1481
1482 // Get the compare predicate.
1483 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
1484
1485 // We may not handle every CC for now.
1486 if (ARMPred == ARMCC::AL) return false;
1487
1488 // Emit the compare.
1489 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
1490 CI->isEquality()))
1491 return false;
1492
1493 // Now set a register based on the comparison. Explicitly set the predicates
1494 // here.
1495 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1496 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
1497 : &ARM::GPRRegClass;
1498 unsigned DestReg = createResultReg(RC);
1499 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1500 unsigned ZeroReg = fastMaterializeConstant(Zero);
1501 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
1502 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg)
1503 .addReg(ZeroReg).addImm(1)
1504 .addImm(ARMPred).addReg(ARM::CPSR);
1505
1506 updateValueMap(I, DestReg);
1507 return true;
1508 }
1509
SelectFPExt(const Instruction * I)1510 bool ARMFastISel::SelectFPExt(const Instruction *I) {
1511 // Make sure we have VFP and that we're extending float to double.
1512 if (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()) return false;
1513
1514 Value *V = I->getOperand(0);
1515 if (!I->getType()->isDoubleTy() ||
1516 !V->getType()->isFloatTy()) return false;
1517
1518 unsigned Op = getRegForValue(V);
1519 if (Op == 0) return false;
1520
1521 unsigned Result = createResultReg(&ARM::DPRRegClass);
1522 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1523 TII.get(ARM::VCVTDS), Result)
1524 .addReg(Op));
1525 updateValueMap(I, Result);
1526 return true;
1527 }
1528
SelectFPTrunc(const Instruction * I)1529 bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1530 // Make sure we have VFP and that we're truncating double to float.
1531 if (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()) return false;
1532
1533 Value *V = I->getOperand(0);
1534 if (!(I->getType()->isFloatTy() &&
1535 V->getType()->isDoubleTy())) return false;
1536
1537 unsigned Op = getRegForValue(V);
1538 if (Op == 0) return false;
1539
1540 unsigned Result = createResultReg(&ARM::SPRRegClass);
1541 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1542 TII.get(ARM::VCVTSD), Result)
1543 .addReg(Op));
1544 updateValueMap(I, Result);
1545 return true;
1546 }
1547
SelectIToFP(const Instruction * I,bool isSigned)1548 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
1549 // Make sure we have VFP.
1550 if (!Subtarget->hasVFP2()) return false;
1551
1552 MVT DstVT;
1553 Type *Ty = I->getType();
1554 if (!isTypeLegal(Ty, DstVT))
1555 return false;
1556
1557 Value *Src = I->getOperand(0);
1558 EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true);
1559 if (!SrcEVT.isSimple())
1560 return false;
1561 MVT SrcVT = SrcEVT.getSimpleVT();
1562 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1563 return false;
1564
1565 unsigned SrcReg = getRegForValue(Src);
1566 if (SrcReg == 0) return false;
1567
1568 // Handle sign-extension.
1569 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1570 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1571 /*isZExt*/!isSigned);
1572 if (SrcReg == 0) return false;
1573 }
1574
1575 // The conversion routine works on fp-reg to fp-reg and the operand above
1576 // was an integer, move it to the fp registers if possible.
1577 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1578 if (FP == 0) return false;
1579
1580 unsigned Opc;
1581 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1582 else if (Ty->isDoubleTy() && !Subtarget->isFPOnlySP())
1583 Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1584 else return false;
1585
1586 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1587 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1588 TII.get(Opc), ResultReg).addReg(FP));
1589 updateValueMap(I, ResultReg);
1590 return true;
1591 }
1592
SelectFPToI(const Instruction * I,bool isSigned)1593 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
1594 // Make sure we have VFP.
1595 if (!Subtarget->hasVFP2()) return false;
1596
1597 MVT DstVT;
1598 Type *RetTy = I->getType();
1599 if (!isTypeLegal(RetTy, DstVT))
1600 return false;
1601
1602 unsigned Op = getRegForValue(I->getOperand(0));
1603 if (Op == 0) return false;
1604
1605 unsigned Opc;
1606 Type *OpTy = I->getOperand(0)->getType();
1607 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1608 else if (OpTy->isDoubleTy() && !Subtarget->isFPOnlySP())
1609 Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1610 else return false;
1611
1612 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
1613 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1614 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1615 TII.get(Opc), ResultReg).addReg(Op));
1616
1617 // This result needs to be in an integer register, but the conversion only
1618 // takes place in fp-regs.
1619 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1620 if (IntReg == 0) return false;
1621
1622 updateValueMap(I, IntReg);
1623 return true;
1624 }
1625
SelectSelect(const Instruction * I)1626 bool ARMFastISel::SelectSelect(const Instruction *I) {
1627 MVT VT;
1628 if (!isTypeLegal(I->getType(), VT))
1629 return false;
1630
1631 // Things need to be register sized for register moves.
1632 if (VT != MVT::i32) return false;
1633
1634 unsigned CondReg = getRegForValue(I->getOperand(0));
1635 if (CondReg == 0) return false;
1636 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1637 if (Op1Reg == 0) return false;
1638
1639 // Check to see if we can use an immediate in the conditional move.
1640 int Imm = 0;
1641 bool UseImm = false;
1642 bool isNegativeImm = false;
1643 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1644 assert(VT == MVT::i32 && "Expecting an i32.");
1645 Imm = (int)ConstInt->getValue().getZExtValue();
1646 if (Imm < 0) {
1647 isNegativeImm = true;
1648 Imm = ~Imm;
1649 }
1650 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1651 (ARM_AM::getSOImmVal(Imm) != -1);
1652 }
1653
1654 unsigned Op2Reg = 0;
1655 if (!UseImm) {
1656 Op2Reg = getRegForValue(I->getOperand(2));
1657 if (Op2Reg == 0) return false;
1658 }
1659
1660 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1661 CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0);
1662 AddOptionalDefs(
1663 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc))
1664 .addReg(CondReg)
1665 .addImm(1));
1666
1667 unsigned MovCCOpc;
1668 const TargetRegisterClass *RC;
1669 if (!UseImm) {
1670 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1671 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1672 } else {
1673 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1674 if (!isNegativeImm)
1675 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1676 else
1677 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1678 }
1679 unsigned ResultReg = createResultReg(RC);
1680 if (!UseImm) {
1681 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
1682 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
1683 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
1684 ResultReg)
1685 .addReg(Op2Reg)
1686 .addReg(Op1Reg)
1687 .addImm(ARMCC::NE)
1688 .addReg(ARM::CPSR);
1689 } else {
1690 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
1691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
1692 ResultReg)
1693 .addReg(Op1Reg)
1694 .addImm(Imm)
1695 .addImm(ARMCC::EQ)
1696 .addReg(ARM::CPSR);
1697 }
1698 updateValueMap(I, ResultReg);
1699 return true;
1700 }
1701
SelectDiv(const Instruction * I,bool isSigned)1702 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
1703 MVT VT;
1704 Type *Ty = I->getType();
1705 if (!isTypeLegal(Ty, VT))
1706 return false;
1707
1708 // If we have integer div support we should have selected this automagically.
1709 // In case we have a real miss go ahead and return false and we'll pick
1710 // it up later.
1711 if (Subtarget->hasDivideInThumbMode())
1712 return false;
1713
1714 // Otherwise emit a libcall.
1715 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1716 if (VT == MVT::i8)
1717 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1718 else if (VT == MVT::i16)
1719 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1720 else if (VT == MVT::i32)
1721 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1722 else if (VT == MVT::i64)
1723 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1724 else if (VT == MVT::i128)
1725 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1726 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1727
1728 return ARMEmitLibcall(I, LC);
1729 }
1730
SelectRem(const Instruction * I,bool isSigned)1731 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
1732 MVT VT;
1733 Type *Ty = I->getType();
1734 if (!isTypeLegal(Ty, VT))
1735 return false;
1736
1737 // Many ABIs do not provide a libcall for standalone remainder, so we need to
1738 // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double
1739 // multi-reg returns, we'll have to bail out.
1740 if (!TLI.hasStandaloneRem(VT)) {
1741 return false;
1742 }
1743
1744 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1745 if (VT == MVT::i8)
1746 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1747 else if (VT == MVT::i16)
1748 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1749 else if (VT == MVT::i32)
1750 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1751 else if (VT == MVT::i64)
1752 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1753 else if (VT == MVT::i128)
1754 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1755 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1756
1757 return ARMEmitLibcall(I, LC);
1758 }
1759
SelectBinaryIntOp(const Instruction * I,unsigned ISDOpcode)1760 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1761 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1762
1763 // We can get here in the case when we have a binary operation on a non-legal
1764 // type and the target independent selector doesn't know how to handle it.
1765 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1766 return false;
1767
1768 unsigned Opc;
1769 switch (ISDOpcode) {
1770 default: return false;
1771 case ISD::ADD:
1772 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1773 break;
1774 case ISD::OR:
1775 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1776 break;
1777 case ISD::SUB:
1778 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1779 break;
1780 }
1781
1782 unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1783 if (SrcReg1 == 0) return false;
1784
1785 // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1786 // in the instruction, rather then materializing the value in a register.
1787 unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1788 if (SrcReg2 == 0) return false;
1789
1790 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1791 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
1792 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
1793 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1794 TII.get(Opc), ResultReg)
1795 .addReg(SrcReg1).addReg(SrcReg2));
1796 updateValueMap(I, ResultReg);
1797 return true;
1798 }
1799
SelectBinaryFPOp(const Instruction * I,unsigned ISDOpcode)1800 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
1801 EVT FPVT = TLI.getValueType(DL, I->getType(), true);
1802 if (!FPVT.isSimple()) return false;
1803 MVT VT = FPVT.getSimpleVT();
1804
1805 // FIXME: Support vector types where possible.
1806 if (VT.isVector())
1807 return false;
1808
1809 // We can get here in the case when we want to use NEON for our fp
1810 // operations, but can't figure out how to. Just use the vfp instructions
1811 // if we have them.
1812 // FIXME: It'd be nice to use NEON instructions.
1813 Type *Ty = I->getType();
1814 if (Ty->isFloatTy() && !Subtarget->hasVFP2())
1815 return false;
1816 if (Ty->isDoubleTy() && (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()))
1817 return false;
1818
1819 unsigned Opc;
1820 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1821 switch (ISDOpcode) {
1822 default: return false;
1823 case ISD::FADD:
1824 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1825 break;
1826 case ISD::FSUB:
1827 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1828 break;
1829 case ISD::FMUL:
1830 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1831 break;
1832 }
1833 unsigned Op1 = getRegForValue(I->getOperand(0));
1834 if (Op1 == 0) return false;
1835
1836 unsigned Op2 = getRegForValue(I->getOperand(1));
1837 if (Op2 == 0) return false;
1838
1839 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
1840 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1841 TII.get(Opc), ResultReg)
1842 .addReg(Op1).addReg(Op2));
1843 updateValueMap(I, ResultReg);
1844 return true;
1845 }
1846
1847 // Call Handling Code
1848
1849 // This is largely taken directly from CCAssignFnForNode
1850 // TODO: We may not support all of this.
CCAssignFnForCall(CallingConv::ID CC,bool Return,bool isVarArg)1851 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1852 bool Return,
1853 bool isVarArg) {
1854 switch (CC) {
1855 default:
1856 report_fatal_error("Unsupported calling convention");
1857 case CallingConv::Fast:
1858 if (Subtarget->hasVFP2() && !isVarArg) {
1859 if (!Subtarget->isAAPCS_ABI())
1860 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1861 // For AAPCS ABI targets, just use VFP variant of the calling convention.
1862 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1863 }
1864 LLVM_FALLTHROUGH;
1865 case CallingConv::C:
1866 case CallingConv::CXX_FAST_TLS:
1867 // Use target triple & subtarget features to do actual dispatch.
1868 if (Subtarget->isAAPCS_ABI()) {
1869 if (Subtarget->hasVFP2() &&
1870 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
1871 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1872 else
1873 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1874 } else {
1875 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1876 }
1877 case CallingConv::ARM_AAPCS_VFP:
1878 case CallingConv::Swift:
1879 if (!isVarArg)
1880 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1881 // Fall through to soft float variant, variadic functions don't
1882 // use hard floating point ABI.
1883 LLVM_FALLTHROUGH;
1884 case CallingConv::ARM_AAPCS:
1885 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1886 case CallingConv::ARM_APCS:
1887 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1888 case CallingConv::GHC:
1889 if (Return)
1890 report_fatal_error("Can't return in GHC call convention");
1891 else
1892 return CC_ARM_APCS_GHC;
1893 }
1894 }
1895
ProcessCallArgs(SmallVectorImpl<Value * > & Args,SmallVectorImpl<unsigned> & ArgRegs,SmallVectorImpl<MVT> & ArgVTs,SmallVectorImpl<ISD::ArgFlagsTy> & ArgFlags,SmallVectorImpl<unsigned> & RegArgs,CallingConv::ID CC,unsigned & NumBytes,bool isVarArg)1896 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1897 SmallVectorImpl<unsigned> &ArgRegs,
1898 SmallVectorImpl<MVT> &ArgVTs,
1899 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1900 SmallVectorImpl<unsigned> &RegArgs,
1901 CallingConv::ID CC,
1902 unsigned &NumBytes,
1903 bool isVarArg) {
1904 SmallVector<CCValAssign, 16> ArgLocs;
1905 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1906 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1907 CCAssignFnForCall(CC, false, isVarArg));
1908
1909 // Check that we can handle all of the arguments. If we can't, then bail out
1910 // now before we add code to the MBB.
1911 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1912 CCValAssign &VA = ArgLocs[i];
1913 MVT ArgVT = ArgVTs[VA.getValNo()];
1914
1915 // We don't handle NEON/vector parameters yet.
1916 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1917 return false;
1918
1919 // Now copy/store arg to correct locations.
1920 if (VA.isRegLoc() && !VA.needsCustom()) {
1921 continue;
1922 } else if (VA.needsCustom()) {
1923 // TODO: We need custom lowering for vector (v2f64) args.
1924 if (VA.getLocVT() != MVT::f64 ||
1925 // TODO: Only handle register args for now.
1926 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
1927 return false;
1928 } else {
1929 switch (ArgVT.SimpleTy) {
1930 default:
1931 return false;
1932 case MVT::i1:
1933 case MVT::i8:
1934 case MVT::i16:
1935 case MVT::i32:
1936 break;
1937 case MVT::f32:
1938 if (!Subtarget->hasVFP2())
1939 return false;
1940 break;
1941 case MVT::f64:
1942 if (!Subtarget->hasVFP2())
1943 return false;
1944 break;
1945 }
1946 }
1947 }
1948
1949 // At the point, we are able to handle the call's arguments in fast isel.
1950
1951 // Get a count of how many bytes are to be pushed on the stack.
1952 NumBytes = CCInfo.getNextStackOffset();
1953
1954 // Issue CALLSEQ_START
1955 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1956 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1957 TII.get(AdjStackDown))
1958 .addImm(NumBytes).addImm(0));
1959
1960 // Process the args.
1961 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1962 CCValAssign &VA = ArgLocs[i];
1963 const Value *ArgVal = Args[VA.getValNo()];
1964 unsigned Arg = ArgRegs[VA.getValNo()];
1965 MVT ArgVT = ArgVTs[VA.getValNo()];
1966
1967 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
1968 "We don't handle NEON/vector parameters yet.");
1969
1970 // Handle arg promotion, etc.
1971 switch (VA.getLocInfo()) {
1972 case CCValAssign::Full: break;
1973 case CCValAssign::SExt: {
1974 MVT DestVT = VA.getLocVT();
1975 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
1976 assert(Arg != 0 && "Failed to emit a sext");
1977 ArgVT = DestVT;
1978 break;
1979 }
1980 case CCValAssign::AExt:
1981 // Intentional fall-through. Handle AExt and ZExt.
1982 case CCValAssign::ZExt: {
1983 MVT DestVT = VA.getLocVT();
1984 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
1985 assert(Arg != 0 && "Failed to emit a zext");
1986 ArgVT = DestVT;
1987 break;
1988 }
1989 case CCValAssign::BCvt: {
1990 unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
1991 /*TODO: Kill=*/false);
1992 assert(BC != 0 && "Failed to emit a bitcast!");
1993 Arg = BC;
1994 ArgVT = VA.getLocVT();
1995 break;
1996 }
1997 default: llvm_unreachable("Unknown arg promotion!");
1998 }
1999
2000 // Now copy/store arg to correct locations.
2001 if (VA.isRegLoc() && !VA.needsCustom()) {
2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2003 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
2004 RegArgs.push_back(VA.getLocReg());
2005 } else if (VA.needsCustom()) {
2006 // TODO: We need custom lowering for vector (v2f64) args.
2007 assert(VA.getLocVT() == MVT::f64 &&
2008 "Custom lowering for v2f64 args not available");
2009
2010 // FIXME: ArgLocs[++i] may extend beyond ArgLocs.size()
2011 CCValAssign &NextVA = ArgLocs[++i];
2012
2013 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2014 "We only handle register args!");
2015
2016 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2017 TII.get(ARM::VMOVRRD), VA.getLocReg())
2018 .addReg(NextVA.getLocReg(), RegState::Define)
2019 .addReg(Arg));
2020 RegArgs.push_back(VA.getLocReg());
2021 RegArgs.push_back(NextVA.getLocReg());
2022 } else {
2023 assert(VA.isMemLoc());
2024 // Need to store on the stack.
2025
2026 // Don't emit stores for undef values.
2027 if (isa<UndefValue>(ArgVal))
2028 continue;
2029
2030 Address Addr;
2031 Addr.BaseType = Address::RegBase;
2032 Addr.Base.Reg = ARM::SP;
2033 Addr.Offset = VA.getLocMemOffset();
2034
2035 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2036 assert(EmitRet && "Could not emit a store for argument!");
2037 }
2038 }
2039
2040 return true;
2041 }
2042
FinishCall(MVT RetVT,SmallVectorImpl<unsigned> & UsedRegs,const Instruction * I,CallingConv::ID CC,unsigned & NumBytes,bool isVarArg)2043 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
2044 const Instruction *I, CallingConv::ID CC,
2045 unsigned &NumBytes, bool isVarArg) {
2046 // Issue CALLSEQ_END
2047 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2048 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2049 TII.get(AdjStackUp))
2050 .addImm(NumBytes).addImm(0));
2051
2052 // Now the return value.
2053 if (RetVT != MVT::isVoid) {
2054 SmallVector<CCValAssign, 16> RVLocs;
2055 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2056 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2057
2058 // Copy all of the result registers out of their specified physreg.
2059 if (RVLocs.size() == 2 && RetVT == MVT::f64) {
2060 // For this move we copy into two registers and then move into the
2061 // double fp reg we want.
2062 MVT DestVT = RVLocs[0].getValVT();
2063 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
2064 unsigned ResultReg = createResultReg(DstRC);
2065 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2066 TII.get(ARM::VMOVDRR), ResultReg)
2067 .addReg(RVLocs[0].getLocReg())
2068 .addReg(RVLocs[1].getLocReg()));
2069
2070 UsedRegs.push_back(RVLocs[0].getLocReg());
2071 UsedRegs.push_back(RVLocs[1].getLocReg());
2072
2073 // Finally update the result.
2074 updateValueMap(I, ResultReg);
2075 } else {
2076 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
2077 MVT CopyVT = RVLocs[0].getValVT();
2078
2079 // Special handling for extended integers.
2080 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2081 CopyVT = MVT::i32;
2082
2083 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
2084
2085 unsigned ResultReg = createResultReg(DstRC);
2086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2087 TII.get(TargetOpcode::COPY),
2088 ResultReg).addReg(RVLocs[0].getLocReg());
2089 UsedRegs.push_back(RVLocs[0].getLocReg());
2090
2091 // Finally update the result.
2092 updateValueMap(I, ResultReg);
2093 }
2094 }
2095
2096 return true;
2097 }
2098
SelectRet(const Instruction * I)2099 bool ARMFastISel::SelectRet(const Instruction *I) {
2100 const ReturnInst *Ret = cast<ReturnInst>(I);
2101 const Function &F = *I->getParent()->getParent();
2102
2103 if (!FuncInfo.CanLowerReturn)
2104 return false;
2105
2106 if (TLI.supportSwiftError() &&
2107 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2108 return false;
2109
2110 if (TLI.supportSplitCSR(FuncInfo.MF))
2111 return false;
2112
2113 // Build a list of return value registers.
2114 SmallVector<unsigned, 4> RetRegs;
2115
2116 CallingConv::ID CC = F.getCallingConv();
2117 if (Ret->getNumOperands() > 0) {
2118 SmallVector<ISD::OutputArg, 4> Outs;
2119 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
2120
2121 // Analyze operands of the call, assigning locations to each operand.
2122 SmallVector<CCValAssign, 16> ValLocs;
2123 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
2124 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
2125 F.isVarArg()));
2126
2127 const Value *RV = Ret->getOperand(0);
2128 unsigned Reg = getRegForValue(RV);
2129 if (Reg == 0)
2130 return false;
2131
2132 // Only handle a single return value for now.
2133 if (ValLocs.size() != 1)
2134 return false;
2135
2136 CCValAssign &VA = ValLocs[0];
2137
2138 // Don't bother handling odd stuff for now.
2139 if (VA.getLocInfo() != CCValAssign::Full)
2140 return false;
2141 // Only handle register returns for now.
2142 if (!VA.isRegLoc())
2143 return false;
2144
2145 unsigned SrcReg = Reg + VA.getValNo();
2146 EVT RVEVT = TLI.getValueType(DL, RV->getType());
2147 if (!RVEVT.isSimple()) return false;
2148 MVT RVVT = RVEVT.getSimpleVT();
2149 MVT DestVT = VA.getValVT();
2150 // Special handling for extended integers.
2151 if (RVVT != DestVT) {
2152 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2153 return false;
2154
2155 assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2156
2157 // Perform extension if flagged as either zext or sext. Otherwise, do
2158 // nothing.
2159 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2160 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2161 if (SrcReg == 0) return false;
2162 }
2163 }
2164
2165 // Make the copy.
2166 unsigned DstReg = VA.getLocReg();
2167 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2168 // Avoid a cross-class copy. This is very unlikely.
2169 if (!SrcRC->contains(DstReg))
2170 return false;
2171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2172 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
2173
2174 // Add register to return instruction.
2175 RetRegs.push_back(VA.getLocReg());
2176 }
2177
2178 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2179 TII.get(Subtarget->getReturnOpcode()));
2180 AddOptionalDefs(MIB);
2181 for (unsigned R : RetRegs)
2182 MIB.addReg(R, RegState::Implicit);
2183 return true;
2184 }
2185
ARMSelectCallOp(bool UseReg)2186 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
2187 if (UseReg)
2188 return isThumb2 ? ARM::tBLXr : ARM::BLX;
2189 else
2190 return isThumb2 ? ARM::tBL : ARM::BL;
2191 }
2192
getLibcallReg(const Twine & Name)2193 unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
2194 // Manually compute the global's type to avoid building it when unnecessary.
2195 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0);
2196 EVT LCREVT = TLI.getValueType(DL, GVTy);
2197 if (!LCREVT.isSimple()) return 0;
2198
2199 GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false,
2200 GlobalValue::ExternalLinkage, nullptr,
2201 Name);
2202 assert(GV->getType() == GVTy && "We miscomputed the type for the global!");
2203 return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
2204 }
2205
2206 // A quick function that will emit a call for a named libcall in F with the
2207 // vector of passed arguments for the Instruction in I. We can assume that we
2208 // can emit a call for any libcall we can produce. This is an abridged version
2209 // of the full call infrastructure since we won't need to worry about things
2210 // like computed function pointers or strange arguments at call sites.
2211 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
2212 // with X86.
ARMEmitLibcall(const Instruction * I,RTLIB::Libcall Call)2213 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2214 CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
2215
2216 // Handle *simple* calls for now.
2217 Type *RetTy = I->getType();
2218 MVT RetVT;
2219 if (RetTy->isVoidTy())
2220 RetVT = MVT::isVoid;
2221 else if (!isTypeLegal(RetTy, RetVT))
2222 return false;
2223
2224 // Can't handle non-double multi-reg retvals.
2225 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2226 SmallVector<CCValAssign, 16> RVLocs;
2227 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
2228 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
2229 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2230 return false;
2231 }
2232
2233 // Set up the argument vectors.
2234 SmallVector<Value*, 8> Args;
2235 SmallVector<unsigned, 8> ArgRegs;
2236 SmallVector<MVT, 8> ArgVTs;
2237 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2238 Args.reserve(I->getNumOperands());
2239 ArgRegs.reserve(I->getNumOperands());
2240 ArgVTs.reserve(I->getNumOperands());
2241 ArgFlags.reserve(I->getNumOperands());
2242 for (Value *Op : I->operands()) {
2243 unsigned Arg = getRegForValue(Op);
2244 if (Arg == 0) return false;
2245
2246 Type *ArgTy = Op->getType();
2247 MVT ArgVT;
2248 if (!isTypeLegal(ArgTy, ArgVT)) return false;
2249
2250 ISD::ArgFlagsTy Flags;
2251 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
2252 Flags.setOrigAlign(OriginalAlignment);
2253
2254 Args.push_back(Op);
2255 ArgRegs.push_back(Arg);
2256 ArgVTs.push_back(ArgVT);
2257 ArgFlags.push_back(Flags);
2258 }
2259
2260 // Handle the arguments now that we've gotten them.
2261 SmallVector<unsigned, 4> RegArgs;
2262 unsigned NumBytes;
2263 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2264 RegArgs, CC, NumBytes, false))
2265 return false;
2266
2267 unsigned CalleeReg = 0;
2268 if (Subtarget->genLongCalls()) {
2269 CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2270 if (CalleeReg == 0) return false;
2271 }
2272
2273 // Issue the call.
2274 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2275 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2276 DbgLoc, TII.get(CallOpc));
2277 // BL / BLX don't take a predicate, but tBL / tBLX do.
2278 if (isThumb2)
2279 MIB.add(predOps(ARMCC::AL));
2280 if (Subtarget->genLongCalls())
2281 MIB.addReg(CalleeReg);
2282 else
2283 MIB.addExternalSymbol(TLI.getLibcallName(Call));
2284
2285 // Add implicit physical register uses to the call.
2286 for (unsigned R : RegArgs)
2287 MIB.addReg(R, RegState::Implicit);
2288
2289 // Add a register mask with the call-preserved registers.
2290 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2291 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2292
2293 // Finish off the call including any return values.
2294 SmallVector<unsigned, 4> UsedRegs;
2295 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
2296
2297 // Set all unused physreg defs as dead.
2298 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2299
2300 return true;
2301 }
2302
SelectCall(const Instruction * I,const char * IntrMemName=nullptr)2303 bool ARMFastISel::SelectCall(const Instruction *I,
2304 const char *IntrMemName = nullptr) {
2305 const CallInst *CI = cast<CallInst>(I);
2306 const Value *Callee = CI->getCalledValue();
2307
2308 // Can't handle inline asm.
2309 if (isa<InlineAsm>(Callee)) return false;
2310
2311 // Allow SelectionDAG isel to handle tail calls.
2312 if (CI->isTailCall()) return false;
2313
2314 // Check the calling convention.
2315 ImmutableCallSite CS(CI);
2316 CallingConv::ID CC = CS.getCallingConv();
2317
2318 // TODO: Avoid some calling conventions?
2319
2320 FunctionType *FTy = CS.getFunctionType();
2321 bool isVarArg = FTy->isVarArg();
2322
2323 // Handle *simple* calls for now.
2324 Type *RetTy = I->getType();
2325 MVT RetVT;
2326 if (RetTy->isVoidTy())
2327 RetVT = MVT::isVoid;
2328 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2329 RetVT != MVT::i8 && RetVT != MVT::i1)
2330 return false;
2331
2332 // Can't handle non-double multi-reg retvals.
2333 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2334 RetVT != MVT::i16 && RetVT != MVT::i32) {
2335 SmallVector<CCValAssign, 16> RVLocs;
2336 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2337 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2338 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2339 return false;
2340 }
2341
2342 // Set up the argument vectors.
2343 SmallVector<Value*, 8> Args;
2344 SmallVector<unsigned, 8> ArgRegs;
2345 SmallVector<MVT, 8> ArgVTs;
2346 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2347 unsigned arg_size = CS.arg_size();
2348 Args.reserve(arg_size);
2349 ArgRegs.reserve(arg_size);
2350 ArgVTs.reserve(arg_size);
2351 ArgFlags.reserve(arg_size);
2352 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
2353 i != e; ++i) {
2354 // If we're lowering a memory intrinsic instead of a regular call, skip the
2355 // last argument, which shouldn't be passed to the underlying function.
2356 if (IntrMemName && e - i <= 1)
2357 break;
2358
2359 ISD::ArgFlagsTy Flags;
2360 unsigned ArgIdx = i - CS.arg_begin();
2361 if (CS.paramHasAttr(ArgIdx, Attribute::SExt))
2362 Flags.setSExt();
2363 if (CS.paramHasAttr(ArgIdx, Attribute::ZExt))
2364 Flags.setZExt();
2365
2366 // FIXME: Only handle *easy* calls for now.
2367 if (CS.paramHasAttr(ArgIdx, Attribute::InReg) ||
2368 CS.paramHasAttr(ArgIdx, Attribute::StructRet) ||
2369 CS.paramHasAttr(ArgIdx, Attribute::SwiftSelf) ||
2370 CS.paramHasAttr(ArgIdx, Attribute::SwiftError) ||
2371 CS.paramHasAttr(ArgIdx, Attribute::Nest) ||
2372 CS.paramHasAttr(ArgIdx, Attribute::ByVal))
2373 return false;
2374
2375 Type *ArgTy = (*i)->getType();
2376 MVT ArgVT;
2377 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2378 ArgVT != MVT::i1)
2379 return false;
2380
2381 unsigned Arg = getRegForValue(*i);
2382 if (Arg == 0)
2383 return false;
2384
2385 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
2386 Flags.setOrigAlign(OriginalAlignment);
2387
2388 Args.push_back(*i);
2389 ArgRegs.push_back(Arg);
2390 ArgVTs.push_back(ArgVT);
2391 ArgFlags.push_back(Flags);
2392 }
2393
2394 // Handle the arguments now that we've gotten them.
2395 SmallVector<unsigned, 4> RegArgs;
2396 unsigned NumBytes;
2397 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2398 RegArgs, CC, NumBytes, isVarArg))
2399 return false;
2400
2401 bool UseReg = false;
2402 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2403 if (!GV || Subtarget->genLongCalls()) UseReg = true;
2404
2405 unsigned CalleeReg = 0;
2406 if (UseReg) {
2407 if (IntrMemName)
2408 CalleeReg = getLibcallReg(IntrMemName);
2409 else
2410 CalleeReg = getRegForValue(Callee);
2411
2412 if (CalleeReg == 0) return false;
2413 }
2414
2415 // Issue the call.
2416 unsigned CallOpc = ARMSelectCallOp(UseReg);
2417 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2418 DbgLoc, TII.get(CallOpc));
2419
2420 // ARM calls don't take a predicate, but tBL / tBLX do.
2421 if(isThumb2)
2422 MIB.add(predOps(ARMCC::AL));
2423 if (UseReg)
2424 MIB.addReg(CalleeReg);
2425 else if (!IntrMemName)
2426 MIB.addGlobalAddress(GV, 0, 0);
2427 else
2428 MIB.addExternalSymbol(IntrMemName, 0);
2429
2430 // Add implicit physical register uses to the call.
2431 for (unsigned R : RegArgs)
2432 MIB.addReg(R, RegState::Implicit);
2433
2434 // Add a register mask with the call-preserved registers.
2435 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2436 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2437
2438 // Finish off the call including any return values.
2439 SmallVector<unsigned, 4> UsedRegs;
2440 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2441 return false;
2442
2443 // Set all unused physreg defs as dead.
2444 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2445
2446 return true;
2447 }
2448
ARMIsMemCpySmall(uint64_t Len)2449 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2450 return Len <= 16;
2451 }
2452
ARMTryEmitSmallMemCpy(Address Dest,Address Src,uint64_t Len,unsigned Alignment)2453 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
2454 uint64_t Len, unsigned Alignment) {
2455 // Make sure we don't bloat code by inlining very large memcpy's.
2456 if (!ARMIsMemCpySmall(Len))
2457 return false;
2458
2459 while (Len) {
2460 MVT VT;
2461 if (!Alignment || Alignment >= 4) {
2462 if (Len >= 4)
2463 VT = MVT::i32;
2464 else if (Len >= 2)
2465 VT = MVT::i16;
2466 else {
2467 assert(Len == 1 && "Expected a length of 1!");
2468 VT = MVT::i8;
2469 }
2470 } else {
2471 // Bound based on alignment.
2472 if (Len >= 2 && Alignment == 2)
2473 VT = MVT::i16;
2474 else {
2475 VT = MVT::i8;
2476 }
2477 }
2478
2479 bool RV;
2480 unsigned ResultReg;
2481 RV = ARMEmitLoad(VT, ResultReg, Src);
2482 assert(RV && "Should be able to handle this load.");
2483 RV = ARMEmitStore(VT, ResultReg, Dest);
2484 assert(RV && "Should be able to handle this store.");
2485 (void)RV;
2486
2487 unsigned Size = VT.getSizeInBits()/8;
2488 Len -= Size;
2489 Dest.Offset += Size;
2490 Src.Offset += Size;
2491 }
2492
2493 return true;
2494 }
2495
SelectIntrinsicCall(const IntrinsicInst & I)2496 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2497 // FIXME: Handle more intrinsics.
2498 switch (I.getIntrinsicID()) {
2499 default: return false;
2500 case Intrinsic::frameaddress: {
2501 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
2502 MFI.setFrameAddressIsTaken(true);
2503
2504 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2505 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
2506 : &ARM::GPRRegClass;
2507
2508 const ARMBaseRegisterInfo *RegInfo =
2509 static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo());
2510 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
2511 unsigned SrcReg = FramePtr;
2512
2513 // Recursively load frame address
2514 // ldr r0 [fp]
2515 // ldr r0 [r0]
2516 // ldr r0 [r0]
2517 // ...
2518 unsigned DestReg;
2519 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
2520 while (Depth--) {
2521 DestReg = createResultReg(RC);
2522 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2523 TII.get(LdrOpc), DestReg)
2524 .addReg(SrcReg).addImm(0));
2525 SrcReg = DestReg;
2526 }
2527 updateValueMap(&I, SrcReg);
2528 return true;
2529 }
2530 case Intrinsic::memcpy:
2531 case Intrinsic::memmove: {
2532 const MemTransferInst &MTI = cast<MemTransferInst>(I);
2533 // Don't handle volatile.
2534 if (MTI.isVolatile())
2535 return false;
2536
2537 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
2538 // we would emit dead code because we don't currently handle memmoves.
2539 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2540 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2541 // Small memcpy's are common enough that we want to do them without a call
2542 // if possible.
2543 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2544 if (ARMIsMemCpySmall(Len)) {
2545 Address Dest, Src;
2546 if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2547 !ARMComputeAddress(MTI.getRawSource(), Src))
2548 return false;
2549 unsigned Alignment = MinAlign(MTI.getDestAlignment(),
2550 MTI.getSourceAlignment());
2551 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2552 return true;
2553 }
2554 }
2555
2556 if (!MTI.getLength()->getType()->isIntegerTy(32))
2557 return false;
2558
2559 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2560 return false;
2561
2562 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2563 return SelectCall(&I, IntrMemName);
2564 }
2565 case Intrinsic::memset: {
2566 const MemSetInst &MSI = cast<MemSetInst>(I);
2567 // Don't handle volatile.
2568 if (MSI.isVolatile())
2569 return false;
2570
2571 if (!MSI.getLength()->getType()->isIntegerTy(32))
2572 return false;
2573
2574 if (MSI.getDestAddressSpace() > 255)
2575 return false;
2576
2577 return SelectCall(&I, "memset");
2578 }
2579 case Intrinsic::trap: {
2580 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(
2581 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP));
2582 return true;
2583 }
2584 }
2585 }
2586
SelectTrunc(const Instruction * I)2587 bool ARMFastISel::SelectTrunc(const Instruction *I) {
2588 // The high bits for a type smaller than the register size are assumed to be
2589 // undefined.
2590 Value *Op = I->getOperand(0);
2591
2592 EVT SrcVT, DestVT;
2593 SrcVT = TLI.getValueType(DL, Op->getType(), true);
2594 DestVT = TLI.getValueType(DL, I->getType(), true);
2595
2596 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2597 return false;
2598 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2599 return false;
2600
2601 unsigned SrcReg = getRegForValue(Op);
2602 if (!SrcReg) return false;
2603
2604 // Because the high bits are undefined, a truncate doesn't generate
2605 // any code.
2606 updateValueMap(I, SrcReg);
2607 return true;
2608 }
2609
ARMEmitIntExt(MVT SrcVT,unsigned SrcReg,MVT DestVT,bool isZExt)2610 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
2611 bool isZExt) {
2612 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2613 return 0;
2614 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2615 return 0;
2616
2617 // Table of which combinations can be emitted as a single instruction,
2618 // and which will require two.
2619 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2620 // ARM Thumb
2621 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops
2622 // ext: s z s z s z s z
2623 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2624 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2625 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2626 };
2627
2628 // Target registers for:
2629 // - For ARM can never be PC.
2630 // - For 16-bit Thumb are restricted to lower 8 registers.
2631 // - For 32-bit Thumb are restricted to non-SP and non-PC.
2632 static const TargetRegisterClass *RCTbl[2][2] = {
2633 // Instructions: Two Single
2634 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2635 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2636 };
2637
2638 // Table governing the instruction(s) to be emitted.
2639 static const struct InstructionTable {
2640 uint32_t Opc : 16;
2641 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0.
2642 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi.
2643 uint32_t Imm : 8; // All instructions have either a shift or a mask.
2644 } IT[2][2][3][2] = {
2645 { // Two instructions (first is left shift, second is in this table).
2646 { // ARM Opc S Shift Imm
2647 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 },
2648 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } },
2649 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 },
2650 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } },
2651 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 },
2652 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } }
2653 },
2654 { // Thumb Opc S Shift Imm
2655 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 },
2656 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } },
2657 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 },
2658 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } },
2659 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 },
2660 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } }
2661 }
2662 },
2663 { // Single instruction.
2664 { // ARM Opc S Shift Imm
2665 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 },
2666 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } },
2667 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 },
2668 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } },
2669 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 },
2670 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } }
2671 },
2672 { // Thumb Opc S Shift Imm
2673 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 },
2674 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } },
2675 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 },
2676 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } },
2677 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 },
2678 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } }
2679 }
2680 }
2681 };
2682
2683 unsigned SrcBits = SrcVT.getSizeInBits();
2684 unsigned DestBits = DestVT.getSizeInBits();
2685 (void) DestBits;
2686 assert((SrcBits < DestBits) && "can only extend to larger types");
2687 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2688 "other sizes unimplemented");
2689 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2690 "other sizes unimplemented");
2691
2692 bool hasV6Ops = Subtarget->hasV6Ops();
2693 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2}
2694 assert((Bitness < 3) && "sanity-check table bounds");
2695
2696 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2697 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2698 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt];
2699 unsigned Opc = ITP->Opc;
2700 assert(ARM::KILL != Opc && "Invalid table entry");
2701 unsigned hasS = ITP->hasS;
2702 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift;
2703 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) &&
2704 "only MOVsi has shift operand addressing mode");
2705 unsigned Imm = ITP->Imm;
2706
2707 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block).
2708 bool setsCPSR = &ARM::tGPRRegClass == RC;
2709 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2710 unsigned ResultReg;
2711 // MOVsi encodes shift and immediate in shift operand addressing mode.
2712 // The following condition has the same value when emitting two
2713 // instruction sequences: both are shifts.
2714 bool ImmIsSO = (Shift != ARM_AM::no_shift);
2715
2716 // Either one or two instructions are emitted.
2717 // They're always of the form:
2718 // dst = in OP imm
2719 // CPSR is set only by 16-bit Thumb instructions.
2720 // Predicate, if any, is AL.
2721 // S bit, if available, is always 0.
2722 // When two are emitted the first's result will feed as the second's input,
2723 // that value is then dead.
2724 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2725 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2726 ResultReg = createResultReg(RC);
2727 bool isLsl = (0 == Instr) && !isSingleInstr;
2728 unsigned Opcode = isLsl ? LSLOpc : Opc;
2729 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift;
2730 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
2731 bool isKill = 1 == Instr;
2732 MachineInstrBuilder MIB = BuildMI(
2733 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg);
2734 if (setsCPSR)
2735 MIB.addReg(ARM::CPSR, RegState::Define);
2736 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
2737 MIB.addReg(SrcReg, isKill * RegState::Kill)
2738 .addImm(ImmEnc)
2739 .add(predOps(ARMCC::AL));
2740 if (hasS)
2741 MIB.add(condCodeOp());
2742 // Second instruction consumes the first's result.
2743 SrcReg = ResultReg;
2744 }
2745
2746 return ResultReg;
2747 }
2748
SelectIntExt(const Instruction * I)2749 bool ARMFastISel::SelectIntExt(const Instruction *I) {
2750 // On ARM, in general, integer casts don't involve legal types; this code
2751 // handles promotable integers.
2752 Type *DestTy = I->getType();
2753 Value *Src = I->getOperand(0);
2754 Type *SrcTy = Src->getType();
2755
2756 bool isZExt = isa<ZExtInst>(I);
2757 unsigned SrcReg = getRegForValue(Src);
2758 if (!SrcReg) return false;
2759
2760 EVT SrcEVT, DestEVT;
2761 SrcEVT = TLI.getValueType(DL, SrcTy, true);
2762 DestEVT = TLI.getValueType(DL, DestTy, true);
2763 if (!SrcEVT.isSimple()) return false;
2764 if (!DestEVT.isSimple()) return false;
2765
2766 MVT SrcVT = SrcEVT.getSimpleVT();
2767 MVT DestVT = DestEVT.getSimpleVT();
2768 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2769 if (ResultReg == 0) return false;
2770 updateValueMap(I, ResultReg);
2771 return true;
2772 }
2773
SelectShift(const Instruction * I,ARM_AM::ShiftOpc ShiftTy)2774 bool ARMFastISel::SelectShift(const Instruction *I,
2775 ARM_AM::ShiftOpc ShiftTy) {
2776 // We handle thumb2 mode by target independent selector
2777 // or SelectionDAG ISel.
2778 if (isThumb2)
2779 return false;
2780
2781 // Only handle i32 now.
2782 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
2783 if (DestVT != MVT::i32)
2784 return false;
2785
2786 unsigned Opc = ARM::MOVsr;
2787 unsigned ShiftImm;
2788 Value *Src2Value = I->getOperand(1);
2789 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2790 ShiftImm = CI->getZExtValue();
2791
2792 // Fall back to selection DAG isel if the shift amount
2793 // is zero or greater than the width of the value type.
2794 if (ShiftImm == 0 || ShiftImm >=32)
2795 return false;
2796
2797 Opc = ARM::MOVsi;
2798 }
2799
2800 Value *Src1Value = I->getOperand(0);
2801 unsigned Reg1 = getRegForValue(Src1Value);
2802 if (Reg1 == 0) return false;
2803
2804 unsigned Reg2 = 0;
2805 if (Opc == ARM::MOVsr) {
2806 Reg2 = getRegForValue(Src2Value);
2807 if (Reg2 == 0) return false;
2808 }
2809
2810 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2811 if(ResultReg == 0) return false;
2812
2813 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2814 TII.get(Opc), ResultReg)
2815 .addReg(Reg1);
2816
2817 if (Opc == ARM::MOVsi)
2818 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2819 else if (Opc == ARM::MOVsr) {
2820 MIB.addReg(Reg2);
2821 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2822 }
2823
2824 AddOptionalDefs(MIB);
2825 updateValueMap(I, ResultReg);
2826 return true;
2827 }
2828
2829 // TODO: SoftFP support.
fastSelectInstruction(const Instruction * I)2830 bool ARMFastISel::fastSelectInstruction(const Instruction *I) {
2831 switch (I->getOpcode()) {
2832 case Instruction::Load:
2833 return SelectLoad(I);
2834 case Instruction::Store:
2835 return SelectStore(I);
2836 case Instruction::Br:
2837 return SelectBranch(I);
2838 case Instruction::IndirectBr:
2839 return SelectIndirectBr(I);
2840 case Instruction::ICmp:
2841 case Instruction::FCmp:
2842 return SelectCmp(I);
2843 case Instruction::FPExt:
2844 return SelectFPExt(I);
2845 case Instruction::FPTrunc:
2846 return SelectFPTrunc(I);
2847 case Instruction::SIToFP:
2848 return SelectIToFP(I, /*isSigned*/ true);
2849 case Instruction::UIToFP:
2850 return SelectIToFP(I, /*isSigned*/ false);
2851 case Instruction::FPToSI:
2852 return SelectFPToI(I, /*isSigned*/ true);
2853 case Instruction::FPToUI:
2854 return SelectFPToI(I, /*isSigned*/ false);
2855 case Instruction::Add:
2856 return SelectBinaryIntOp(I, ISD::ADD);
2857 case Instruction::Or:
2858 return SelectBinaryIntOp(I, ISD::OR);
2859 case Instruction::Sub:
2860 return SelectBinaryIntOp(I, ISD::SUB);
2861 case Instruction::FAdd:
2862 return SelectBinaryFPOp(I, ISD::FADD);
2863 case Instruction::FSub:
2864 return SelectBinaryFPOp(I, ISD::FSUB);
2865 case Instruction::FMul:
2866 return SelectBinaryFPOp(I, ISD::FMUL);
2867 case Instruction::SDiv:
2868 return SelectDiv(I, /*isSigned*/ true);
2869 case Instruction::UDiv:
2870 return SelectDiv(I, /*isSigned*/ false);
2871 case Instruction::SRem:
2872 return SelectRem(I, /*isSigned*/ true);
2873 case Instruction::URem:
2874 return SelectRem(I, /*isSigned*/ false);
2875 case Instruction::Call:
2876 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2877 return SelectIntrinsicCall(*II);
2878 return SelectCall(I);
2879 case Instruction::Select:
2880 return SelectSelect(I);
2881 case Instruction::Ret:
2882 return SelectRet(I);
2883 case Instruction::Trunc:
2884 return SelectTrunc(I);
2885 case Instruction::ZExt:
2886 case Instruction::SExt:
2887 return SelectIntExt(I);
2888 case Instruction::Shl:
2889 return SelectShift(I, ARM_AM::lsl);
2890 case Instruction::LShr:
2891 return SelectShift(I, ARM_AM::lsr);
2892 case Instruction::AShr:
2893 return SelectShift(I, ARM_AM::asr);
2894 default: break;
2895 }
2896 return false;
2897 }
2898
2899 // This table describes sign- and zero-extend instructions which can be
2900 // folded into a preceding load. All of these extends have an immediate
2901 // (sometimes a mask and sometimes a shift) that's applied after
2902 // extension.
2903 static const struct FoldableLoadExtendsStruct {
2904 uint16_t Opc[2]; // ARM, Thumb.
2905 uint8_t ExpectedImm;
2906 uint8_t isZExt : 1;
2907 uint8_t ExpectedVT : 7;
2908 } FoldableLoadExtends[] = {
2909 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2910 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2911 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2912 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2913 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2914 };
2915
2916 /// The specified machine instr operand is a vreg, and that
2917 /// vreg is being provided by the specified load instruction. If possible,
2918 /// try to fold the load as an operand to the instruction, returning true if
2919 /// successful.
tryToFoldLoadIntoMI(MachineInstr * MI,unsigned OpNo,const LoadInst * LI)2920 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
2921 const LoadInst *LI) {
2922 // Verify we have a legal type before going any further.
2923 MVT VT;
2924 if (!isLoadTypeLegal(LI->getType(), VT))
2925 return false;
2926
2927 // Combine load followed by zero- or sign-extend.
2928 // ldrb r1, [r0] ldrb r1, [r0]
2929 // uxtb r2, r1 =>
2930 // mov r3, r2 mov r3, r1
2931 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm())
2932 return false;
2933 const uint64_t Imm = MI->getOperand(2).getImm();
2934
2935 bool Found = false;
2936 bool isZExt;
2937 for (const FoldableLoadExtendsStruct &FLE : FoldableLoadExtends) {
2938 if (FLE.Opc[isThumb2] == MI->getOpcode() &&
2939 (uint64_t)FLE.ExpectedImm == Imm &&
2940 MVT((MVT::SimpleValueType)FLE.ExpectedVT) == VT) {
2941 Found = true;
2942 isZExt = FLE.isZExt;
2943 }
2944 }
2945 if (!Found) return false;
2946
2947 // See if we can handle this address.
2948 Address Addr;
2949 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2950
2951 unsigned ResultReg = MI->getOperand(0).getReg();
2952 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
2953 return false;
2954 MI->eraseFromParent();
2955 return true;
2956 }
2957
ARMLowerPICELF(const GlobalValue * GV,unsigned Align,MVT VT)2958 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
2959 unsigned Align, MVT VT) {
2960 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
2961
2962 LLVMContext *Context = &MF->getFunction().getContext();
2963 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2964 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2965 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
2966 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
2967 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
2968 /*AddCurrentAddress=*/UseGOT_PREL);
2969
2970 unsigned ConstAlign =
2971 MF->getDataLayout().getPrefTypeAlignment(Type::getInt32PtrTy(*Context));
2972 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2973
2974 unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2975 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2976 MachineInstrBuilder MIB =
2977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg)
2978 .addConstantPoolIndex(Idx);
2979 if (Opc == ARM::LDRcp)
2980 MIB.addImm(0);
2981 MIB.add(predOps(ARMCC::AL));
2982
2983 // Fix the address by adding pc.
2984 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
2985 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2986 : ARM::PICADD;
2987 DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0);
2988 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
2989 .addReg(TempReg)
2990 .addImm(ARMPCLabelIndex);
2991 if (!Subtarget->isThumb())
2992 MIB.add(predOps(ARMCC::AL));
2993
2994 if (UseGOT_PREL && Subtarget->isThumb()) {
2995 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
2996 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2997 TII.get(ARM::t2LDRi12), NewDestReg)
2998 .addReg(DestReg)
2999 .addImm(0);
3000 DestReg = NewDestReg;
3001 AddOptionalDefs(MIB);
3002 }
3003 return DestReg;
3004 }
3005
fastLowerArguments()3006 bool ARMFastISel::fastLowerArguments() {
3007 if (!FuncInfo.CanLowerReturn)
3008 return false;
3009
3010 const Function *F = FuncInfo.Fn;
3011 if (F->isVarArg())
3012 return false;
3013
3014 CallingConv::ID CC = F->getCallingConv();
3015 switch (CC) {
3016 default:
3017 return false;
3018 case CallingConv::Fast:
3019 case CallingConv::C:
3020 case CallingConv::ARM_AAPCS_VFP:
3021 case CallingConv::ARM_AAPCS:
3022 case CallingConv::ARM_APCS:
3023 case CallingConv::Swift:
3024 break;
3025 }
3026
3027 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments
3028 // which are passed in r0 - r3.
3029 for (const Argument &Arg : F->args()) {
3030 if (Arg.getArgNo() >= 4)
3031 return false;
3032
3033 if (Arg.hasAttribute(Attribute::InReg) ||
3034 Arg.hasAttribute(Attribute::StructRet) ||
3035 Arg.hasAttribute(Attribute::SwiftSelf) ||
3036 Arg.hasAttribute(Attribute::SwiftError) ||
3037 Arg.hasAttribute(Attribute::ByVal))
3038 return false;
3039
3040 Type *ArgTy = Arg.getType();
3041 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3042 return false;
3043
3044 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3045 if (!ArgVT.isSimple()) return false;
3046 switch (ArgVT.getSimpleVT().SimpleTy) {
3047 case MVT::i8:
3048 case MVT::i16:
3049 case MVT::i32:
3050 break;
3051 default:
3052 return false;
3053 }
3054 }
3055
3056 static const MCPhysReg GPRArgRegs[] = {
3057 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3058 };
3059
3060 const TargetRegisterClass *RC = &ARM::rGPRRegClass;
3061 for (const Argument &Arg : F->args()) {
3062 unsigned ArgNo = Arg.getArgNo();
3063 unsigned SrcReg = GPRArgRegs[ArgNo];
3064 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3065 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3066 // Without this, EmitLiveInCopies may eliminate the livein if its only
3067 // use is a bitcast (which isn't turned into an instruction).
3068 unsigned ResultReg = createResultReg(RC);
3069 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3070 TII.get(TargetOpcode::COPY),
3071 ResultReg).addReg(DstReg, getKillRegState(true));
3072 updateValueMap(&Arg, ResultReg);
3073 }
3074
3075 return true;
3076 }
3077
3078 namespace llvm {
3079
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)3080 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
3081 const TargetLibraryInfo *libInfo) {
3082 if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel())
3083 return new ARMFastISel(funcInfo, libInfo);
3084
3085 return nullptr;
3086 }
3087
3088 } // end namespace llvm
3089