1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41
42 #define DEBUG_TYPE "isel"
43 #include "llvm/DebugInfo.h"
44 #include "llvm/Function.h"
45 #include "llvm/GlobalVariable.h"
46 #include "llvm/Instructions.h"
47 #include "llvm/IntrinsicInst.h"
48 #include "llvm/Operator.h"
49 #include "llvm/CodeGen/Analysis.h"
50 #include "llvm/CodeGen/FastISel.h"
51 #include "llvm/CodeGen/FunctionLoweringInfo.h"
52 #include "llvm/CodeGen/MachineInstrBuilder.h"
53 #include "llvm/CodeGen/MachineModuleInfo.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/Analysis/Loads.h"
56 #include "llvm/Target/TargetData.h"
57 #include "llvm/Target/TargetInstrInfo.h"
58 #include "llvm/Target/TargetLibraryInfo.h"
59 #include "llvm/Target/TargetLowering.h"
60 #include "llvm/Target/TargetMachine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/ADT/Statistic.h"
64 using namespace llvm;
65
66 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
67 "target-independent selector");
68 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
69 "target-specific selector");
70 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
71
72 /// startNewBlock - Set the current block to which generated machine
73 /// instructions will be appended, and clear the local CSE map.
74 ///
startNewBlock()75 void FastISel::startNewBlock() {
76 LocalValueMap.clear();
77
78 EmitStartPt = 0;
79
80 // Advance the emit start point past any EH_LABEL instructions.
81 MachineBasicBlock::iterator
82 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
83 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
84 EmitStartPt = I;
85 ++I;
86 }
87 LastLocalValue = EmitStartPt;
88 }
89
flushLocalValueMap()90 void FastISel::flushLocalValueMap() {
91 LocalValueMap.clear();
92 LastLocalValue = EmitStartPt;
93 recomputeInsertPt();
94 }
95
hasTrivialKill(const Value * V) const96 bool FastISel::hasTrivialKill(const Value *V) const {
97 // Don't consider constants or arguments to have trivial kills.
98 const Instruction *I = dyn_cast<Instruction>(V);
99 if (!I)
100 return false;
101
102 // No-op casts are trivially coalesced by fast-isel.
103 if (const CastInst *Cast = dyn_cast<CastInst>(I))
104 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
105 !hasTrivialKill(Cast->getOperand(0)))
106 return false;
107
108 // GEPs with all zero indices are trivially coalesced by fast-isel.
109 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
110 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
111 return false;
112
113 // Only instructions with a single use in the same basic block are considered
114 // to have trivial kills.
115 return I->hasOneUse() &&
116 !(I->getOpcode() == Instruction::BitCast ||
117 I->getOpcode() == Instruction::PtrToInt ||
118 I->getOpcode() == Instruction::IntToPtr) &&
119 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
120 }
121
getRegForValue(const Value * V)122 unsigned FastISel::getRegForValue(const Value *V) {
123 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
124 // Don't handle non-simple values in FastISel.
125 if (!RealVT.isSimple())
126 return 0;
127
128 // Ignore illegal types. We must do this before looking up the value
129 // in ValueMap because Arguments are given virtual registers regardless
130 // of whether FastISel can handle them.
131 MVT VT = RealVT.getSimpleVT();
132 if (!TLI.isTypeLegal(VT)) {
133 // Handle integer promotions, though, because they're common and easy.
134 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
135 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
136 else
137 return 0;
138 }
139
140 // Look up the value to see if we already have a register for it.
141 unsigned Reg = lookUpRegForValue(V);
142 if (Reg != 0)
143 return Reg;
144
145 // In bottom-up mode, just create the virtual register which will be used
146 // to hold the value. It will be materialized later.
147 if (isa<Instruction>(V) &&
148 (!isa<AllocaInst>(V) ||
149 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
150 return FuncInfo.InitializeRegForValue(V);
151
152 SavePoint SaveInsertPt = enterLocalValueArea();
153
154 // Materialize the value in a register. Emit any instructions in the
155 // local value area.
156 Reg = materializeRegForValue(V, VT);
157
158 leaveLocalValueArea(SaveInsertPt);
159
160 return Reg;
161 }
162
163 /// materializeRegForValue - Helper for getRegForValue. This function is
164 /// called when the value isn't already available in a register and must
165 /// be materialized with new instructions.
materializeRegForValue(const Value * V,MVT VT)166 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
167 unsigned Reg = 0;
168
169 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
170 if (CI->getValue().getActiveBits() <= 64)
171 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
172 } else if (isa<AllocaInst>(V)) {
173 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
174 } else if (isa<ConstantPointerNull>(V)) {
175 // Translate this as an integer zero so that it can be
176 // local-CSE'd with actual integer zeros.
177 Reg =
178 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
179 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
180 if (CF->isNullValue()) {
181 Reg = TargetMaterializeFloatZero(CF);
182 } else {
183 // Try to emit the constant directly.
184 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
185 }
186
187 if (!Reg) {
188 // Try to emit the constant by using an integer constant with a cast.
189 const APFloat &Flt = CF->getValueAPF();
190 EVT IntVT = TLI.getPointerTy();
191
192 uint64_t x[2];
193 uint32_t IntBitWidth = IntVT.getSizeInBits();
194 bool isExact;
195 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
196 APFloat::rmTowardZero, &isExact);
197 if (isExact) {
198 APInt IntVal(IntBitWidth, x);
199
200 unsigned IntegerReg =
201 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
202 if (IntegerReg != 0)
203 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
204 IntegerReg, /*Kill=*/false);
205 }
206 }
207 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
208 if (!SelectOperator(Op, Op->getOpcode()))
209 if (!isa<Instruction>(Op) ||
210 !TargetSelectInstruction(cast<Instruction>(Op)))
211 return 0;
212 Reg = lookUpRegForValue(Op);
213 } else if (isa<UndefValue>(V)) {
214 Reg = createResultReg(TLI.getRegClassFor(VT));
215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
216 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
217 }
218
219 // If target-independent code couldn't handle the value, give target-specific
220 // code a try.
221 if (!Reg && isa<Constant>(V))
222 Reg = TargetMaterializeConstant(cast<Constant>(V));
223
224 // Don't cache constant materializations in the general ValueMap.
225 // To do so would require tracking what uses they dominate.
226 if (Reg != 0) {
227 LocalValueMap[V] = Reg;
228 LastLocalValue = MRI.getVRegDef(Reg);
229 }
230 return Reg;
231 }
232
lookUpRegForValue(const Value * V)233 unsigned FastISel::lookUpRegForValue(const Value *V) {
234 // Look up the value to see if we already have a register for it. We
235 // cache values defined by Instructions across blocks, and other values
236 // only locally. This is because Instructions already have the SSA
237 // def-dominates-use requirement enforced.
238 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
239 if (I != FuncInfo.ValueMap.end())
240 return I->second;
241 return LocalValueMap[V];
242 }
243
244 /// UpdateValueMap - Update the value map to include the new mapping for this
245 /// instruction, or insert an extra copy to get the result in a previous
246 /// determined register.
247 /// NOTE: This is only necessary because we might select a block that uses
248 /// a value before we select the block that defines the value. It might be
249 /// possible to fix this by selecting blocks in reverse postorder.
UpdateValueMap(const Value * I,unsigned Reg,unsigned NumRegs)250 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
251 if (!isa<Instruction>(I)) {
252 LocalValueMap[I] = Reg;
253 return;
254 }
255
256 unsigned &AssignedReg = FuncInfo.ValueMap[I];
257 if (AssignedReg == 0)
258 // Use the new register.
259 AssignedReg = Reg;
260 else if (Reg != AssignedReg) {
261 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
262 for (unsigned i = 0; i < NumRegs; i++)
263 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
264
265 AssignedReg = Reg;
266 }
267 }
268
getRegForGEPIndex(const Value * Idx)269 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
270 unsigned IdxN = getRegForValue(Idx);
271 if (IdxN == 0)
272 // Unhandled operand. Halt "fast" selection and bail.
273 return std::pair<unsigned, bool>(0, false);
274
275 bool IdxNIsKill = hasTrivialKill(Idx);
276
277 // If the index is smaller or larger than intptr_t, truncate or extend it.
278 MVT PtrVT = TLI.getPointerTy();
279 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
280 if (IdxVT.bitsLT(PtrVT)) {
281 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
282 IdxN, IdxNIsKill);
283 IdxNIsKill = true;
284 }
285 else if (IdxVT.bitsGT(PtrVT)) {
286 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
287 IdxN, IdxNIsKill);
288 IdxNIsKill = true;
289 }
290 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
291 }
292
recomputeInsertPt()293 void FastISel::recomputeInsertPt() {
294 if (getLastLocalValue()) {
295 FuncInfo.InsertPt = getLastLocalValue();
296 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
297 ++FuncInfo.InsertPt;
298 } else
299 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
300
301 // Now skip past any EH_LABELs, which must remain at the beginning.
302 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
303 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
304 ++FuncInfo.InsertPt;
305 }
306
removeDeadCode(MachineBasicBlock::iterator I,MachineBasicBlock::iterator E)307 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
308 MachineBasicBlock::iterator E) {
309 assert (I && E && std::distance(I, E) > 0 && "Invalid iterator!");
310 while (I != E) {
311 MachineInstr *Dead = &*I;
312 ++I;
313 Dead->eraseFromParent();
314 ++NumFastIselDead;
315 }
316 recomputeInsertPt();
317 }
318
enterLocalValueArea()319 FastISel::SavePoint FastISel::enterLocalValueArea() {
320 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
321 DebugLoc OldDL = DL;
322 recomputeInsertPt();
323 DL = DebugLoc();
324 SavePoint SP = { OldInsertPt, OldDL };
325 return SP;
326 }
327
leaveLocalValueArea(SavePoint OldInsertPt)328 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
329 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
330 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
331
332 // Restore the previous insert position.
333 FuncInfo.InsertPt = OldInsertPt.InsertPt;
334 DL = OldInsertPt.DL;
335 }
336
337 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
338 /// which has an opcode which directly corresponds to the given ISD opcode.
339 ///
SelectBinaryOp(const User * I,unsigned ISDOpcode)340 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
341 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
342 if (VT == MVT::Other || !VT.isSimple())
343 // Unhandled type. Halt "fast" selection and bail.
344 return false;
345
346 // We only handle legal types. For example, on x86-32 the instruction
347 // selector contains all of the 64-bit instructions from x86-64,
348 // under the assumption that i64 won't be used if the target doesn't
349 // support it.
350 if (!TLI.isTypeLegal(VT)) {
351 // MVT::i1 is special. Allow AND, OR, or XOR because they
352 // don't require additional zeroing, which makes them easy.
353 if (VT == MVT::i1 &&
354 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
355 ISDOpcode == ISD::XOR))
356 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
357 else
358 return false;
359 }
360
361 // Check if the first operand is a constant, and handle it as "ri". At -O0,
362 // we don't have anything that canonicalizes operand order.
363 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
364 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
365 unsigned Op1 = getRegForValue(I->getOperand(1));
366 if (Op1 == 0) return false;
367
368 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
369
370 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
371 Op1IsKill, CI->getZExtValue(),
372 VT.getSimpleVT());
373 if (ResultReg == 0) return false;
374
375 // We successfully emitted code for the given LLVM Instruction.
376 UpdateValueMap(I, ResultReg);
377 return true;
378 }
379
380
381 unsigned Op0 = getRegForValue(I->getOperand(0));
382 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
383 return false;
384
385 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
386
387 // Check if the second operand is a constant and handle it appropriately.
388 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
389 uint64_t Imm = CI->getZExtValue();
390
391 // Transform "sdiv exact X, 8" -> "sra X, 3".
392 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
393 cast<BinaryOperator>(I)->isExact() &&
394 isPowerOf2_64(Imm)) {
395 Imm = Log2_64(Imm);
396 ISDOpcode = ISD::SRA;
397 }
398
399 // Transform "urem x, pow2" -> "and x, pow2-1".
400 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
401 isPowerOf2_64(Imm)) {
402 --Imm;
403 ISDOpcode = ISD::AND;
404 }
405
406 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
407 Op0IsKill, Imm, VT.getSimpleVT());
408 if (ResultReg == 0) return false;
409
410 // We successfully emitted code for the given LLVM Instruction.
411 UpdateValueMap(I, ResultReg);
412 return true;
413 }
414
415 // Check if the second operand is a constant float.
416 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
417 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
418 ISDOpcode, Op0, Op0IsKill, CF);
419 if (ResultReg != 0) {
420 // We successfully emitted code for the given LLVM Instruction.
421 UpdateValueMap(I, ResultReg);
422 return true;
423 }
424 }
425
426 unsigned Op1 = getRegForValue(I->getOperand(1));
427 if (Op1 == 0)
428 // Unhandled operand. Halt "fast" selection and bail.
429 return false;
430
431 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
432
433 // Now we have both operands in registers. Emit the instruction.
434 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
435 ISDOpcode,
436 Op0, Op0IsKill,
437 Op1, Op1IsKill);
438 if (ResultReg == 0)
439 // Target-specific code wasn't able to find a machine opcode for
440 // the given ISD opcode and type. Halt "fast" selection and bail.
441 return false;
442
443 // We successfully emitted code for the given LLVM Instruction.
444 UpdateValueMap(I, ResultReg);
445 return true;
446 }
447
SelectGetElementPtr(const User * I)448 bool FastISel::SelectGetElementPtr(const User *I) {
449 unsigned N = getRegForValue(I->getOperand(0));
450 if (N == 0)
451 // Unhandled operand. Halt "fast" selection and bail.
452 return false;
453
454 bool NIsKill = hasTrivialKill(I->getOperand(0));
455
456 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
457 // into a single N = N + TotalOffset.
458 uint64_t TotalOffs = 0;
459 // FIXME: What's a good SWAG number for MaxOffs?
460 uint64_t MaxOffs = 2048;
461 Type *Ty = I->getOperand(0)->getType();
462 MVT VT = TLI.getPointerTy();
463 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
464 E = I->op_end(); OI != E; ++OI) {
465 const Value *Idx = *OI;
466 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
467 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
468 if (Field) {
469 // N = N + Offset
470 TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
471 if (TotalOffs >= MaxOffs) {
472 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
473 if (N == 0)
474 // Unhandled operand. Halt "fast" selection and bail.
475 return false;
476 NIsKill = true;
477 TotalOffs = 0;
478 }
479 }
480 Ty = StTy->getElementType(Field);
481 } else {
482 Ty = cast<SequentialType>(Ty)->getElementType();
483
484 // If this is a constant subscript, handle it quickly.
485 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
486 if (CI->isZero()) continue;
487 // N = N + Offset
488 TotalOffs +=
489 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
490 if (TotalOffs >= MaxOffs) {
491 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
492 if (N == 0)
493 // Unhandled operand. Halt "fast" selection and bail.
494 return false;
495 NIsKill = true;
496 TotalOffs = 0;
497 }
498 continue;
499 }
500 if (TotalOffs) {
501 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
502 if (N == 0)
503 // Unhandled operand. Halt "fast" selection and bail.
504 return false;
505 NIsKill = true;
506 TotalOffs = 0;
507 }
508
509 // N = N + Idx * ElementSize;
510 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
511 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
512 unsigned IdxN = Pair.first;
513 bool IdxNIsKill = Pair.second;
514 if (IdxN == 0)
515 // Unhandled operand. Halt "fast" selection and bail.
516 return false;
517
518 if (ElementSize != 1) {
519 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
520 if (IdxN == 0)
521 // Unhandled operand. Halt "fast" selection and bail.
522 return false;
523 IdxNIsKill = true;
524 }
525 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
526 if (N == 0)
527 // Unhandled operand. Halt "fast" selection and bail.
528 return false;
529 }
530 }
531 if (TotalOffs) {
532 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
533 if (N == 0)
534 // Unhandled operand. Halt "fast" selection and bail.
535 return false;
536 }
537
538 // We successfully emitted code for the given LLVM Instruction.
539 UpdateValueMap(I, N);
540 return true;
541 }
542
SelectCall(const User * I)543 bool FastISel::SelectCall(const User *I) {
544 const CallInst *Call = cast<CallInst>(I);
545
546 // Handle simple inline asms.
547 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
548 // Don't attempt to handle constraints.
549 if (!IA->getConstraintString().empty())
550 return false;
551
552 unsigned ExtraInfo = 0;
553 if (IA->hasSideEffects())
554 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
555 if (IA->isAlignStack())
556 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
557
558 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
559 TII.get(TargetOpcode::INLINEASM))
560 .addExternalSymbol(IA->getAsmString().c_str())
561 .addImm(ExtraInfo);
562 return true;
563 }
564
565 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
566 ComputeUsesVAFloatArgument(*Call, &MMI);
567
568 const Function *F = Call->getCalledFunction();
569 if (!F) return false;
570
571 // Handle selected intrinsic function calls.
572 switch (F->getIntrinsicID()) {
573 default: break;
574 // At -O0 we don't care about the lifetime intrinsics.
575 case Intrinsic::lifetime_start:
576 case Intrinsic::lifetime_end:
577 // The donothing intrinsic does, well, nothing.
578 case Intrinsic::donothing:
579 return true;
580
581 case Intrinsic::dbg_declare: {
582 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
583 if (!DIVariable(DI->getVariable()).Verify() ||
584 !FuncInfo.MF->getMMI().hasDebugInfo()) {
585 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
586 return true;
587 }
588
589 const Value *Address = DI->getAddress();
590 if (!Address || isa<UndefValue>(Address)) {
591 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
592 return true;
593 }
594
595 unsigned Reg = 0;
596 unsigned Offset = 0;
597 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
598 // Some arguments' frame index is recorded during argument lowering.
599 Offset = FuncInfo.getArgumentFrameIndex(Arg);
600 if (Offset)
601 Reg = TRI.getFrameRegister(*FuncInfo.MF);
602 }
603 if (!Reg)
604 Reg = lookUpRegForValue(Address);
605
606 // If we have a VLA that has a "use" in a metadata node that's then used
607 // here but it has no other uses, then we have a problem. E.g.,
608 //
609 // int foo (const int *x) {
610 // char a[*x];
611 // return 0;
612 // }
613 //
614 // If we assign 'a' a vreg and fast isel later on has to use the selection
615 // DAG isel, it will want to copy the value to the vreg. However, there are
616 // no uses, which goes counter to what selection DAG isel expects.
617 if (!Reg && !Address->use_empty() && isa<Instruction>(Address) &&
618 (!isa<AllocaInst>(Address) ||
619 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
620 Reg = FuncInfo.InitializeRegForValue(Address);
621
622 if (Reg)
623 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
624 TII.get(TargetOpcode::DBG_VALUE))
625 .addReg(Reg, RegState::Debug).addImm(Offset)
626 .addMetadata(DI->getVariable());
627 else
628 // We can't yet handle anything else here because it would require
629 // generating code, thus altering codegen because of debug info.
630 DEBUG(dbgs() << "Dropping debug info for " << DI);
631 return true;
632 }
633 case Intrinsic::dbg_value: {
634 // This form of DBG_VALUE is target-independent.
635 const DbgValueInst *DI = cast<DbgValueInst>(Call);
636 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
637 const Value *V = DI->getValue();
638 if (!V) {
639 // Currently the optimizer can produce this; insert an undef to
640 // help debugging. Probably the optimizer should not do this.
641 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
642 .addReg(0U).addImm(DI->getOffset())
643 .addMetadata(DI->getVariable());
644 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
645 if (CI->getBitWidth() > 64)
646 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
647 .addCImm(CI).addImm(DI->getOffset())
648 .addMetadata(DI->getVariable());
649 else
650 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
651 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
652 .addMetadata(DI->getVariable());
653 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
654 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
655 .addFPImm(CF).addImm(DI->getOffset())
656 .addMetadata(DI->getVariable());
657 } else if (unsigned Reg = lookUpRegForValue(V)) {
658 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
659 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
660 .addMetadata(DI->getVariable());
661 } else {
662 // We can't yet handle anything else here because it would require
663 // generating code, thus altering codegen because of debug info.
664 DEBUG(dbgs() << "Dropping debug info for " << DI);
665 }
666 return true;
667 }
668 case Intrinsic::objectsize: {
669 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
670 unsigned long long Res = CI->isZero() ? -1ULL : 0;
671 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
672 unsigned ResultReg = getRegForValue(ResCI);
673 if (ResultReg == 0)
674 return false;
675 UpdateValueMap(Call, ResultReg);
676 return true;
677 }
678 }
679
680 // Usually, it does not make sense to initialize a value,
681 // make an unrelated function call and use the value, because
682 // it tends to be spilled on the stack. So, we move the pointer
683 // to the last local value to the beginning of the block, so that
684 // all the values which have already been materialized,
685 // appear after the call. It also makes sense to skip intrinsics
686 // since they tend to be inlined.
687 if (!isa<IntrinsicInst>(F))
688 flushLocalValueMap();
689
690 // An arbitrary call. Bail.
691 return false;
692 }
693
SelectCast(const User * I,unsigned Opcode)694 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
695 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
696 EVT DstVT = TLI.getValueType(I->getType());
697
698 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
699 DstVT == MVT::Other || !DstVT.isSimple())
700 // Unhandled type. Halt "fast" selection and bail.
701 return false;
702
703 // Check if the destination type is legal.
704 if (!TLI.isTypeLegal(DstVT))
705 return false;
706
707 // Check if the source operand is legal.
708 if (!TLI.isTypeLegal(SrcVT))
709 return false;
710
711 unsigned InputReg = getRegForValue(I->getOperand(0));
712 if (!InputReg)
713 // Unhandled operand. Halt "fast" selection and bail.
714 return false;
715
716 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
717
718 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
719 DstVT.getSimpleVT(),
720 Opcode,
721 InputReg, InputRegIsKill);
722 if (!ResultReg)
723 return false;
724
725 UpdateValueMap(I, ResultReg);
726 return true;
727 }
728
SelectBitCast(const User * I)729 bool FastISel::SelectBitCast(const User *I) {
730 // If the bitcast doesn't change the type, just use the operand value.
731 if (I->getType() == I->getOperand(0)->getType()) {
732 unsigned Reg = getRegForValue(I->getOperand(0));
733 if (Reg == 0)
734 return false;
735 UpdateValueMap(I, Reg);
736 return true;
737 }
738
739 // Bitcasts of other values become reg-reg copies or BITCAST operators.
740 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
741 EVT DstVT = TLI.getValueType(I->getType());
742
743 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
744 DstVT == MVT::Other || !DstVT.isSimple() ||
745 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
746 // Unhandled type. Halt "fast" selection and bail.
747 return false;
748
749 unsigned Op0 = getRegForValue(I->getOperand(0));
750 if (Op0 == 0)
751 // Unhandled operand. Halt "fast" selection and bail.
752 return false;
753
754 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
755
756 // First, try to perform the bitcast by inserting a reg-reg copy.
757 unsigned ResultReg = 0;
758 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
759 const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
760 const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
761 // Don't attempt a cross-class copy. It will likely fail.
762 if (SrcClass == DstClass) {
763 ResultReg = createResultReg(DstClass);
764 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
765 ResultReg).addReg(Op0);
766 }
767 }
768
769 // If the reg-reg copy failed, select a BITCAST opcode.
770 if (!ResultReg)
771 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
772 ISD::BITCAST, Op0, Op0IsKill);
773
774 if (!ResultReg)
775 return false;
776
777 UpdateValueMap(I, ResultReg);
778 return true;
779 }
780
781 bool
SelectInstruction(const Instruction * I)782 FastISel::SelectInstruction(const Instruction *I) {
783 // Just before the terminator instruction, insert instructions to
784 // feed PHI nodes in successor blocks.
785 if (isa<TerminatorInst>(I))
786 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
787 return false;
788
789 DL = I->getDebugLoc();
790
791 MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
792
793 // As a special case, don't handle calls to builtin library functions that
794 // may be translated directly to target instructions.
795 if (const CallInst *Call = dyn_cast<CallInst>(I)) {
796 const Function *F = Call->getCalledFunction();
797 LibFunc::Func Func;
798 if (F && !F->hasLocalLinkage() && F->hasName() &&
799 LibInfo->getLibFunc(F->getName(), Func) &&
800 LibInfo->hasOptimizedCodeGen(Func))
801 return false;
802 }
803
804 // First, try doing target-independent selection.
805 if (SelectOperator(I, I->getOpcode())) {
806 ++NumFastIselSuccessIndependent;
807 DL = DebugLoc();
808 return true;
809 }
810 // Remove dead code. However, ignore call instructions since we've flushed
811 // the local value map and recomputed the insert point.
812 if (!isa<CallInst>(I)) {
813 recomputeInsertPt();
814 if (SavedInsertPt != FuncInfo.InsertPt)
815 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
816 }
817
818 // Next, try calling the target to attempt to handle the instruction.
819 SavedInsertPt = FuncInfo.InsertPt;
820 if (TargetSelectInstruction(I)) {
821 ++NumFastIselSuccessTarget;
822 DL = DebugLoc();
823 return true;
824 }
825 // Check for dead code and remove as necessary.
826 recomputeInsertPt();
827 if (SavedInsertPt != FuncInfo.InsertPt)
828 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
829
830 DL = DebugLoc();
831 return false;
832 }
833
834 /// FastEmitBranch - Emit an unconditional branch to the given block,
835 /// unless it is the immediate (fall-through) successor, and update
836 /// the CFG.
837 void
FastEmitBranch(MachineBasicBlock * MSucc,DebugLoc DL)838 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
839
840 if (FuncInfo.MBB->getBasicBlock()->size() > 1 && FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
841 // For more accurate line information if this is the only instruction
842 // in the block then emit it, otherwise we have the unconditional
843 // fall-through case, which needs no instructions.
844 } else {
845 // The unconditional branch case.
846 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
847 SmallVector<MachineOperand, 0>(), DL);
848 }
849 FuncInfo.MBB->addSuccessor(MSucc);
850 }
851
852 /// SelectFNeg - Emit an FNeg operation.
853 ///
854 bool
SelectFNeg(const User * I)855 FastISel::SelectFNeg(const User *I) {
856 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
857 if (OpReg == 0) return false;
858
859 bool OpRegIsKill = hasTrivialKill(I);
860
861 // If the target has ISD::FNEG, use it.
862 EVT VT = TLI.getValueType(I->getType());
863 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
864 ISD::FNEG, OpReg, OpRegIsKill);
865 if (ResultReg != 0) {
866 UpdateValueMap(I, ResultReg);
867 return true;
868 }
869
870 // Bitcast the value to integer, twiddle the sign bit with xor,
871 // and then bitcast it back to floating-point.
872 if (VT.getSizeInBits() > 64) return false;
873 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
874 if (!TLI.isTypeLegal(IntVT))
875 return false;
876
877 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
878 ISD::BITCAST, OpReg, OpRegIsKill);
879 if (IntReg == 0)
880 return false;
881
882 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
883 IntReg, /*Kill=*/true,
884 UINT64_C(1) << (VT.getSizeInBits()-1),
885 IntVT.getSimpleVT());
886 if (IntResultReg == 0)
887 return false;
888
889 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
890 ISD::BITCAST, IntResultReg, /*Kill=*/true);
891 if (ResultReg == 0)
892 return false;
893
894 UpdateValueMap(I, ResultReg);
895 return true;
896 }
897
898 bool
SelectExtractValue(const User * U)899 FastISel::SelectExtractValue(const User *U) {
900 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
901 if (!EVI)
902 return false;
903
904 // Make sure we only try to handle extracts with a legal result. But also
905 // allow i1 because it's easy.
906 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
907 if (!RealVT.isSimple())
908 return false;
909 MVT VT = RealVT.getSimpleVT();
910 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
911 return false;
912
913 const Value *Op0 = EVI->getOperand(0);
914 Type *AggTy = Op0->getType();
915
916 // Get the base result register.
917 unsigned ResultReg;
918 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
919 if (I != FuncInfo.ValueMap.end())
920 ResultReg = I->second;
921 else if (isa<Instruction>(Op0))
922 ResultReg = FuncInfo.InitializeRegForValue(Op0);
923 else
924 return false; // fast-isel can't handle aggregate constants at the moment
925
926 // Get the actual result register, which is an offset from the base register.
927 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
928
929 SmallVector<EVT, 4> AggValueVTs;
930 ComputeValueVTs(TLI, AggTy, AggValueVTs);
931
932 for (unsigned i = 0; i < VTIndex; i++)
933 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
934
935 UpdateValueMap(EVI, ResultReg);
936 return true;
937 }
938
939 bool
SelectOperator(const User * I,unsigned Opcode)940 FastISel::SelectOperator(const User *I, unsigned Opcode) {
941 switch (Opcode) {
942 case Instruction::Add:
943 return SelectBinaryOp(I, ISD::ADD);
944 case Instruction::FAdd:
945 return SelectBinaryOp(I, ISD::FADD);
946 case Instruction::Sub:
947 return SelectBinaryOp(I, ISD::SUB);
948 case Instruction::FSub:
949 // FNeg is currently represented in LLVM IR as a special case of FSub.
950 if (BinaryOperator::isFNeg(I))
951 return SelectFNeg(I);
952 return SelectBinaryOp(I, ISD::FSUB);
953 case Instruction::Mul:
954 return SelectBinaryOp(I, ISD::MUL);
955 case Instruction::FMul:
956 return SelectBinaryOp(I, ISD::FMUL);
957 case Instruction::SDiv:
958 return SelectBinaryOp(I, ISD::SDIV);
959 case Instruction::UDiv:
960 return SelectBinaryOp(I, ISD::UDIV);
961 case Instruction::FDiv:
962 return SelectBinaryOp(I, ISD::FDIV);
963 case Instruction::SRem:
964 return SelectBinaryOp(I, ISD::SREM);
965 case Instruction::URem:
966 return SelectBinaryOp(I, ISD::UREM);
967 case Instruction::FRem:
968 return SelectBinaryOp(I, ISD::FREM);
969 case Instruction::Shl:
970 return SelectBinaryOp(I, ISD::SHL);
971 case Instruction::LShr:
972 return SelectBinaryOp(I, ISD::SRL);
973 case Instruction::AShr:
974 return SelectBinaryOp(I, ISD::SRA);
975 case Instruction::And:
976 return SelectBinaryOp(I, ISD::AND);
977 case Instruction::Or:
978 return SelectBinaryOp(I, ISD::OR);
979 case Instruction::Xor:
980 return SelectBinaryOp(I, ISD::XOR);
981
982 case Instruction::GetElementPtr:
983 return SelectGetElementPtr(I);
984
985 case Instruction::Br: {
986 const BranchInst *BI = cast<BranchInst>(I);
987
988 if (BI->isUnconditional()) {
989 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
990 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
991 FastEmitBranch(MSucc, BI->getDebugLoc());
992 return true;
993 }
994
995 // Conditional branches are not handed yet.
996 // Halt "fast" selection and bail.
997 return false;
998 }
999
1000 case Instruction::Unreachable:
1001 // Nothing to emit.
1002 return true;
1003
1004 case Instruction::Alloca:
1005 // FunctionLowering has the static-sized case covered.
1006 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1007 return true;
1008
1009 // Dynamic-sized alloca is not handled yet.
1010 return false;
1011
1012 case Instruction::Call:
1013 return SelectCall(I);
1014
1015 case Instruction::BitCast:
1016 return SelectBitCast(I);
1017
1018 case Instruction::FPToSI:
1019 return SelectCast(I, ISD::FP_TO_SINT);
1020 case Instruction::ZExt:
1021 return SelectCast(I, ISD::ZERO_EXTEND);
1022 case Instruction::SExt:
1023 return SelectCast(I, ISD::SIGN_EXTEND);
1024 case Instruction::Trunc:
1025 return SelectCast(I, ISD::TRUNCATE);
1026 case Instruction::SIToFP:
1027 return SelectCast(I, ISD::SINT_TO_FP);
1028
1029 case Instruction::IntToPtr: // Deliberate fall-through.
1030 case Instruction::PtrToInt: {
1031 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1032 EVT DstVT = TLI.getValueType(I->getType());
1033 if (DstVT.bitsGT(SrcVT))
1034 return SelectCast(I, ISD::ZERO_EXTEND);
1035 if (DstVT.bitsLT(SrcVT))
1036 return SelectCast(I, ISD::TRUNCATE);
1037 unsigned Reg = getRegForValue(I->getOperand(0));
1038 if (Reg == 0) return false;
1039 UpdateValueMap(I, Reg);
1040 return true;
1041 }
1042
1043 case Instruction::ExtractValue:
1044 return SelectExtractValue(I);
1045
1046 case Instruction::PHI:
1047 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1048
1049 default:
1050 // Unhandled instruction. Halt "fast" selection and bail.
1051 return false;
1052 }
1053 }
1054
FastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)1055 FastISel::FastISel(FunctionLoweringInfo &funcInfo,
1056 const TargetLibraryInfo *libInfo)
1057 : FuncInfo(funcInfo),
1058 MRI(FuncInfo.MF->getRegInfo()),
1059 MFI(*FuncInfo.MF->getFrameInfo()),
1060 MCP(*FuncInfo.MF->getConstantPool()),
1061 TM(FuncInfo.MF->getTarget()),
1062 TD(*TM.getTargetData()),
1063 TII(*TM.getInstrInfo()),
1064 TLI(*TM.getTargetLowering()),
1065 TRI(*TM.getRegisterInfo()),
1066 LibInfo(libInfo) {
1067 }
1068
~FastISel()1069 FastISel::~FastISel() {}
1070
FastEmit_(MVT,MVT,unsigned)1071 unsigned FastISel::FastEmit_(MVT, MVT,
1072 unsigned) {
1073 return 0;
1074 }
1075
FastEmit_r(MVT,MVT,unsigned,unsigned,bool)1076 unsigned FastISel::FastEmit_r(MVT, MVT,
1077 unsigned,
1078 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1079 return 0;
1080 }
1081
FastEmit_rr(MVT,MVT,unsigned,unsigned,bool,unsigned,bool)1082 unsigned FastISel::FastEmit_rr(MVT, MVT,
1083 unsigned,
1084 unsigned /*Op0*/, bool /*Op0IsKill*/,
1085 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1086 return 0;
1087 }
1088
FastEmit_i(MVT,MVT,unsigned,uint64_t)1089 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1090 return 0;
1091 }
1092
FastEmit_f(MVT,MVT,unsigned,const ConstantFP *)1093 unsigned FastISel::FastEmit_f(MVT, MVT,
1094 unsigned, const ConstantFP * /*FPImm*/) {
1095 return 0;
1096 }
1097
FastEmit_ri(MVT,MVT,unsigned,unsigned,bool,uint64_t)1098 unsigned FastISel::FastEmit_ri(MVT, MVT,
1099 unsigned,
1100 unsigned /*Op0*/, bool /*Op0IsKill*/,
1101 uint64_t /*Imm*/) {
1102 return 0;
1103 }
1104
FastEmit_rf(MVT,MVT,unsigned,unsigned,bool,const ConstantFP *)1105 unsigned FastISel::FastEmit_rf(MVT, MVT,
1106 unsigned,
1107 unsigned /*Op0*/, bool /*Op0IsKill*/,
1108 const ConstantFP * /*FPImm*/) {
1109 return 0;
1110 }
1111
FastEmit_rri(MVT,MVT,unsigned,unsigned,bool,unsigned,bool,uint64_t)1112 unsigned FastISel::FastEmit_rri(MVT, MVT,
1113 unsigned,
1114 unsigned /*Op0*/, bool /*Op0IsKill*/,
1115 unsigned /*Op1*/, bool /*Op1IsKill*/,
1116 uint64_t /*Imm*/) {
1117 return 0;
1118 }
1119
1120 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1121 /// to emit an instruction with an immediate operand using FastEmit_ri.
1122 /// If that fails, it materializes the immediate into a register and try
1123 /// FastEmit_rr instead.
FastEmit_ri_(MVT VT,unsigned Opcode,unsigned Op0,bool Op0IsKill,uint64_t Imm,MVT ImmType)1124 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1125 unsigned Op0, bool Op0IsKill,
1126 uint64_t Imm, MVT ImmType) {
1127 // If this is a multiply by a power of two, emit this as a shift left.
1128 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1129 Opcode = ISD::SHL;
1130 Imm = Log2_64(Imm);
1131 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1132 // div x, 8 -> srl x, 3
1133 Opcode = ISD::SRL;
1134 Imm = Log2_64(Imm);
1135 }
1136
1137 // Horrible hack (to be removed), check to make sure shift amounts are
1138 // in-range.
1139 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1140 Imm >= VT.getSizeInBits())
1141 return 0;
1142
1143 // First check if immediate type is legal. If not, we can't use the ri form.
1144 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1145 if (ResultReg != 0)
1146 return ResultReg;
1147 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1148 if (MaterialReg == 0) {
1149 // This is a bit ugly/slow, but failing here means falling out of
1150 // fast-isel, which would be very slow.
1151 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1152 VT.getSizeInBits());
1153 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1154 }
1155 return FastEmit_rr(VT, VT, Opcode,
1156 Op0, Op0IsKill,
1157 MaterialReg, /*Kill=*/true);
1158 }
1159
createResultReg(const TargetRegisterClass * RC)1160 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1161 return MRI.createVirtualRegister(RC);
1162 }
1163
FastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)1164 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1165 const TargetRegisterClass* RC) {
1166 unsigned ResultReg = createResultReg(RC);
1167 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1168
1169 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1170 return ResultReg;
1171 }
1172
FastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill)1173 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1174 const TargetRegisterClass *RC,
1175 unsigned Op0, bool Op0IsKill) {
1176 unsigned ResultReg = createResultReg(RC);
1177 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1178
1179 if (II.getNumDefs() >= 1)
1180 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1181 .addReg(Op0, Op0IsKill * RegState::Kill);
1182 else {
1183 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1184 .addReg(Op0, Op0IsKill * RegState::Kill);
1185 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1186 ResultReg).addReg(II.ImplicitDefs[0]);
1187 }
1188
1189 return ResultReg;
1190 }
1191
FastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill)1192 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1193 const TargetRegisterClass *RC,
1194 unsigned Op0, bool Op0IsKill,
1195 unsigned Op1, bool Op1IsKill) {
1196 unsigned ResultReg = createResultReg(RC);
1197 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1198
1199 if (II.getNumDefs() >= 1)
1200 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1201 .addReg(Op0, Op0IsKill * RegState::Kill)
1202 .addReg(Op1, Op1IsKill * RegState::Kill);
1203 else {
1204 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1205 .addReg(Op0, Op0IsKill * RegState::Kill)
1206 .addReg(Op1, Op1IsKill * RegState::Kill);
1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1208 ResultReg).addReg(II.ImplicitDefs[0]);
1209 }
1210 return ResultReg;
1211 }
1212
FastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,unsigned Op2,bool Op2IsKill)1213 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1214 const TargetRegisterClass *RC,
1215 unsigned Op0, bool Op0IsKill,
1216 unsigned Op1, bool Op1IsKill,
1217 unsigned Op2, bool Op2IsKill) {
1218 unsigned ResultReg = createResultReg(RC);
1219 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1220
1221 if (II.getNumDefs() >= 1)
1222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1223 .addReg(Op0, Op0IsKill * RegState::Kill)
1224 .addReg(Op1, Op1IsKill * RegState::Kill)
1225 .addReg(Op2, Op2IsKill * RegState::Kill);
1226 else {
1227 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1228 .addReg(Op0, Op0IsKill * RegState::Kill)
1229 .addReg(Op1, Op1IsKill * RegState::Kill)
1230 .addReg(Op2, Op2IsKill * RegState::Kill);
1231 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1232 ResultReg).addReg(II.ImplicitDefs[0]);
1233 }
1234 return ResultReg;
1235 }
1236
FastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm)1237 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1238 const TargetRegisterClass *RC,
1239 unsigned Op0, bool Op0IsKill,
1240 uint64_t Imm) {
1241 unsigned ResultReg = createResultReg(RC);
1242 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1243
1244 if (II.getNumDefs() >= 1)
1245 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1246 .addReg(Op0, Op0IsKill * RegState::Kill)
1247 .addImm(Imm);
1248 else {
1249 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1250 .addReg(Op0, Op0IsKill * RegState::Kill)
1251 .addImm(Imm);
1252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1253 ResultReg).addReg(II.ImplicitDefs[0]);
1254 }
1255 return ResultReg;
1256 }
1257
FastEmitInst_rii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm1,uint64_t Imm2)1258 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1259 const TargetRegisterClass *RC,
1260 unsigned Op0, bool Op0IsKill,
1261 uint64_t Imm1, uint64_t Imm2) {
1262 unsigned ResultReg = createResultReg(RC);
1263 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1264
1265 if (II.getNumDefs() >= 1)
1266 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1267 .addReg(Op0, Op0IsKill * RegState::Kill)
1268 .addImm(Imm1)
1269 .addImm(Imm2);
1270 else {
1271 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1272 .addReg(Op0, Op0IsKill * RegState::Kill)
1273 .addImm(Imm1)
1274 .addImm(Imm2);
1275 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1276 ResultReg).addReg(II.ImplicitDefs[0]);
1277 }
1278 return ResultReg;
1279 }
1280
FastEmitInst_rf(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,const ConstantFP * FPImm)1281 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1282 const TargetRegisterClass *RC,
1283 unsigned Op0, bool Op0IsKill,
1284 const ConstantFP *FPImm) {
1285 unsigned ResultReg = createResultReg(RC);
1286 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1287
1288 if (II.getNumDefs() >= 1)
1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1290 .addReg(Op0, Op0IsKill * RegState::Kill)
1291 .addFPImm(FPImm);
1292 else {
1293 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1294 .addReg(Op0, Op0IsKill * RegState::Kill)
1295 .addFPImm(FPImm);
1296 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1297 ResultReg).addReg(II.ImplicitDefs[0]);
1298 }
1299 return ResultReg;
1300 }
1301
FastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm)1302 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1303 const TargetRegisterClass *RC,
1304 unsigned Op0, bool Op0IsKill,
1305 unsigned Op1, bool Op1IsKill,
1306 uint64_t Imm) {
1307 unsigned ResultReg = createResultReg(RC);
1308 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1309
1310 if (II.getNumDefs() >= 1)
1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1312 .addReg(Op0, Op0IsKill * RegState::Kill)
1313 .addReg(Op1, Op1IsKill * RegState::Kill)
1314 .addImm(Imm);
1315 else {
1316 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1317 .addReg(Op0, Op0IsKill * RegState::Kill)
1318 .addReg(Op1, Op1IsKill * RegState::Kill)
1319 .addImm(Imm);
1320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1321 ResultReg).addReg(II.ImplicitDefs[0]);
1322 }
1323 return ResultReg;
1324 }
1325
FastEmitInst_rrii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm1,uint64_t Imm2)1326 unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
1327 const TargetRegisterClass *RC,
1328 unsigned Op0, bool Op0IsKill,
1329 unsigned Op1, bool Op1IsKill,
1330 uint64_t Imm1, uint64_t Imm2) {
1331 unsigned ResultReg = createResultReg(RC);
1332 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1333
1334 if (II.getNumDefs() >= 1)
1335 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1336 .addReg(Op0, Op0IsKill * RegState::Kill)
1337 .addReg(Op1, Op1IsKill * RegState::Kill)
1338 .addImm(Imm1).addImm(Imm2);
1339 else {
1340 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1341 .addReg(Op0, Op0IsKill * RegState::Kill)
1342 .addReg(Op1, Op1IsKill * RegState::Kill)
1343 .addImm(Imm1).addImm(Imm2);
1344 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1345 ResultReg).addReg(II.ImplicitDefs[0]);
1346 }
1347 return ResultReg;
1348 }
1349
FastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)1350 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1351 const TargetRegisterClass *RC,
1352 uint64_t Imm) {
1353 unsigned ResultReg = createResultReg(RC);
1354 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1355
1356 if (II.getNumDefs() >= 1)
1357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1358 else {
1359 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1360 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1361 ResultReg).addReg(II.ImplicitDefs[0]);
1362 }
1363 return ResultReg;
1364 }
1365
FastEmitInst_ii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm1,uint64_t Imm2)1366 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1367 const TargetRegisterClass *RC,
1368 uint64_t Imm1, uint64_t Imm2) {
1369 unsigned ResultReg = createResultReg(RC);
1370 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1371
1372 if (II.getNumDefs() >= 1)
1373 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1374 .addImm(Imm1).addImm(Imm2);
1375 else {
1376 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1377 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1378 ResultReg).addReg(II.ImplicitDefs[0]);
1379 }
1380 return ResultReg;
1381 }
1382
FastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,bool Op0IsKill,uint32_t Idx)1383 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1384 unsigned Op0, bool Op0IsKill,
1385 uint32_t Idx) {
1386 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1387 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1388 "Cannot yet extract from physregs");
1389 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1390 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1391 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1392 DL, TII.get(TargetOpcode::COPY), ResultReg)
1393 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1394 return ResultReg;
1395 }
1396
1397 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1398 /// with all but the least significant bit set to zero.
FastEmitZExtFromI1(MVT VT,unsigned Op0,bool Op0IsKill)1399 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1400 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1401 }
1402
1403 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1404 /// Emit code to ensure constants are copied into registers when needed.
1405 /// Remember the virtual registers that need to be added to the Machine PHI
1406 /// nodes as input. We cannot just directly add them, because expansion
1407 /// might result in multiple MBB's for one BB. As such, the start of the
1408 /// BB might correspond to a different MBB than the end.
HandlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)1409 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1410 const TerminatorInst *TI = LLVMBB->getTerminator();
1411
1412 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1413 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1414
1415 // Check successor nodes' PHI nodes that expect a constant to be available
1416 // from this block.
1417 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1418 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1419 if (!isa<PHINode>(SuccBB->begin())) continue;
1420 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1421
1422 // If this terminator has multiple identical successors (common for
1423 // switches), only handle each succ once.
1424 if (!SuccsHandled.insert(SuccMBB)) continue;
1425
1426 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1427
1428 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1429 // nodes and Machine PHI nodes, but the incoming operands have not been
1430 // emitted yet.
1431 for (BasicBlock::const_iterator I = SuccBB->begin();
1432 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1433
1434 // Ignore dead phi's.
1435 if (PN->use_empty()) continue;
1436
1437 // Only handle legal types. Two interesting things to note here. First,
1438 // by bailing out early, we may leave behind some dead instructions,
1439 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1440 // own moves. Second, this check is necessary because FastISel doesn't
1441 // use CreateRegs to create registers, so it always creates
1442 // exactly one register for each non-void instruction.
1443 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1444 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1445 // Handle integer promotions, though, because they're common and easy.
1446 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
1447 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1448 else {
1449 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1450 return false;
1451 }
1452 }
1453
1454 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1455
1456 // Set the DebugLoc for the copy. Prefer the location of the operand
1457 // if there is one; use the location of the PHI otherwise.
1458 DL = PN->getDebugLoc();
1459 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1460 DL = Inst->getDebugLoc();
1461
1462 unsigned Reg = getRegForValue(PHIOp);
1463 if (Reg == 0) {
1464 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1465 return false;
1466 }
1467 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1468 DL = DebugLoc();
1469 }
1470 }
1471
1472 return true;
1473 }
1474