1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41
42 #include "llvm/ADT/Optional.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/Analysis/BranchProbabilityInfo.h"
45 #include "llvm/Analysis/Loads.h"
46 #include "llvm/Analysis/TargetLibraryInfo.h"
47 #include "llvm/CodeGen/Analysis.h"
48 #include "llvm/CodeGen/FastISel.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineFrameInfo.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineModuleInfo.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugInfo.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/GetElementPtrTypeIterator.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Mangler.h"
63 #include "llvm/IR/Operator.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Target/TargetInstrInfo.h"
68 #include "llvm/Target/TargetLowering.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include "llvm/Target/TargetSubtargetInfo.h"
71 using namespace llvm;
72
73 #define DEBUG_TYPE "isel"
74
75 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
76 "target-independent selector");
77 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
78 "target-specific selector");
79 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
80
setAttributes(ImmutableCallSite * CS,unsigned AttrIdx)81 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
82 unsigned AttrIdx) {
83 IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
84 IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
85 IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
86 IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
87 IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
88 IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
89 IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
90 IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
91 IsSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
92 IsSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
93 Alignment = CS->getParamAlignment(AttrIdx);
94 }
95
96 /// Set the current block to which generated machine instructions will be
97 /// appended, and clear the local CSE map.
startNewBlock()98 void FastISel::startNewBlock() {
99 LocalValueMap.clear();
100
101 // Instructions are appended to FuncInfo.MBB. If the basic block already
102 // contains labels or copies, use the last instruction as the last local
103 // value.
104 EmitStartPt = nullptr;
105 if (!FuncInfo.MBB->empty())
106 EmitStartPt = &FuncInfo.MBB->back();
107 LastLocalValue = EmitStartPt;
108 }
109
lowerArguments()110 bool FastISel::lowerArguments() {
111 if (!FuncInfo.CanLowerReturn)
112 // Fallback to SDISel argument lowering code to deal with sret pointer
113 // parameter.
114 return false;
115
116 if (!fastLowerArguments())
117 return false;
118
119 // Enter arguments into ValueMap for uses in non-entry BBs.
120 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
121 E = FuncInfo.Fn->arg_end();
122 I != E; ++I) {
123 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
124 assert(VI != LocalValueMap.end() && "Missed an argument?");
125 FuncInfo.ValueMap[&*I] = VI->second;
126 }
127 return true;
128 }
129
flushLocalValueMap()130 void FastISel::flushLocalValueMap() {
131 LocalValueMap.clear();
132 LastLocalValue = EmitStartPt;
133 recomputeInsertPt();
134 SavedInsertPt = FuncInfo.InsertPt;
135 }
136
hasTrivialKill(const Value * V)137 bool FastISel::hasTrivialKill(const Value *V) {
138 // Don't consider constants or arguments to have trivial kills.
139 const Instruction *I = dyn_cast<Instruction>(V);
140 if (!I)
141 return false;
142
143 // No-op casts are trivially coalesced by fast-isel.
144 if (const auto *Cast = dyn_cast<CastInst>(I))
145 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
146 !hasTrivialKill(Cast->getOperand(0)))
147 return false;
148
149 // Even the value might have only one use in the LLVM IR, it is possible that
150 // FastISel might fold the use into another instruction and now there is more
151 // than one use at the Machine Instruction level.
152 unsigned Reg = lookUpRegForValue(V);
153 if (Reg && !MRI.use_empty(Reg))
154 return false;
155
156 // GEPs with all zero indices are trivially coalesced by fast-isel.
157 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
158 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
159 return false;
160
161 // Only instructions with a single use in the same basic block are considered
162 // to have trivial kills.
163 return I->hasOneUse() &&
164 !(I->getOpcode() == Instruction::BitCast ||
165 I->getOpcode() == Instruction::PtrToInt ||
166 I->getOpcode() == Instruction::IntToPtr) &&
167 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
168 }
169
getRegForValue(const Value * V)170 unsigned FastISel::getRegForValue(const Value *V) {
171 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
172 // Don't handle non-simple values in FastISel.
173 if (!RealVT.isSimple())
174 return 0;
175
176 // Ignore illegal types. We must do this before looking up the value
177 // in ValueMap because Arguments are given virtual registers regardless
178 // of whether FastISel can handle them.
179 MVT VT = RealVT.getSimpleVT();
180 if (!TLI.isTypeLegal(VT)) {
181 // Handle integer promotions, though, because they're common and easy.
182 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
183 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
184 else
185 return 0;
186 }
187
188 // Look up the value to see if we already have a register for it.
189 unsigned Reg = lookUpRegForValue(V);
190 if (Reg)
191 return Reg;
192
193 // In bottom-up mode, just create the virtual register which will be used
194 // to hold the value. It will be materialized later.
195 if (isa<Instruction>(V) &&
196 (!isa<AllocaInst>(V) ||
197 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
198 return FuncInfo.InitializeRegForValue(V);
199
200 SavePoint SaveInsertPt = enterLocalValueArea();
201
202 // Materialize the value in a register. Emit any instructions in the
203 // local value area.
204 Reg = materializeRegForValue(V, VT);
205
206 leaveLocalValueArea(SaveInsertPt);
207
208 return Reg;
209 }
210
materializeConstant(const Value * V,MVT VT)211 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
212 unsigned Reg = 0;
213 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
214 if (CI->getValue().getActiveBits() <= 64)
215 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
216 } else if (isa<AllocaInst>(V))
217 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
218 else if (isa<ConstantPointerNull>(V))
219 // Translate this as an integer zero so that it can be
220 // local-CSE'd with actual integer zeros.
221 Reg = getRegForValue(
222 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
223 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
224 if (CF->isNullValue())
225 Reg = fastMaterializeFloatZero(CF);
226 else
227 // Try to emit the constant directly.
228 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
229
230 if (!Reg) {
231 // Try to emit the constant by using an integer constant with a cast.
232 const APFloat &Flt = CF->getValueAPF();
233 EVT IntVT = TLI.getPointerTy(DL);
234
235 uint64_t x[2];
236 uint32_t IntBitWidth = IntVT.getSizeInBits();
237 bool isExact;
238 (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
239 APFloat::rmTowardZero, &isExact);
240 if (isExact) {
241 APInt IntVal(IntBitWidth, x);
242
243 unsigned IntegerReg =
244 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
245 if (IntegerReg != 0)
246 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
247 /*Kill=*/false);
248 }
249 }
250 } else if (const auto *Op = dyn_cast<Operator>(V)) {
251 if (!selectOperator(Op, Op->getOpcode()))
252 if (!isa<Instruction>(Op) ||
253 !fastSelectInstruction(cast<Instruction>(Op)))
254 return 0;
255 Reg = lookUpRegForValue(Op);
256 } else if (isa<UndefValue>(V)) {
257 Reg = createResultReg(TLI.getRegClassFor(VT));
258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
259 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
260 }
261 return Reg;
262 }
263
264 /// Helper for getRegForValue. This function is called when the value isn't
265 /// already available in a register and must be materialized with new
266 /// instructions.
materializeRegForValue(const Value * V,MVT VT)267 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
268 unsigned Reg = 0;
269 // Give the target-specific code a try first.
270 if (isa<Constant>(V))
271 Reg = fastMaterializeConstant(cast<Constant>(V));
272
273 // If target-specific code couldn't or didn't want to handle the value, then
274 // give target-independent code a try.
275 if (!Reg)
276 Reg = materializeConstant(V, VT);
277
278 // Don't cache constant materializations in the general ValueMap.
279 // To do so would require tracking what uses they dominate.
280 if (Reg) {
281 LocalValueMap[V] = Reg;
282 LastLocalValue = MRI.getVRegDef(Reg);
283 }
284 return Reg;
285 }
286
lookUpRegForValue(const Value * V)287 unsigned FastISel::lookUpRegForValue(const Value *V) {
288 // Look up the value to see if we already have a register for it. We
289 // cache values defined by Instructions across blocks, and other values
290 // only locally. This is because Instructions already have the SSA
291 // def-dominates-use requirement enforced.
292 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
293 if (I != FuncInfo.ValueMap.end())
294 return I->second;
295 return LocalValueMap[V];
296 }
297
updateValueMap(const Value * I,unsigned Reg,unsigned NumRegs)298 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
299 if (!isa<Instruction>(I)) {
300 LocalValueMap[I] = Reg;
301 return;
302 }
303
304 unsigned &AssignedReg = FuncInfo.ValueMap[I];
305 if (AssignedReg == 0)
306 // Use the new register.
307 AssignedReg = Reg;
308 else if (Reg != AssignedReg) {
309 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
310 for (unsigned i = 0; i < NumRegs; i++)
311 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
312
313 AssignedReg = Reg;
314 }
315 }
316
getRegForGEPIndex(const Value * Idx)317 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
318 unsigned IdxN = getRegForValue(Idx);
319 if (IdxN == 0)
320 // Unhandled operand. Halt "fast" selection and bail.
321 return std::pair<unsigned, bool>(0, false);
322
323 bool IdxNIsKill = hasTrivialKill(Idx);
324
325 // If the index is smaller or larger than intptr_t, truncate or extend it.
326 MVT PtrVT = TLI.getPointerTy(DL);
327 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
328 if (IdxVT.bitsLT(PtrVT)) {
329 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
330 IdxNIsKill);
331 IdxNIsKill = true;
332 } else if (IdxVT.bitsGT(PtrVT)) {
333 IdxN =
334 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
335 IdxNIsKill = true;
336 }
337 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
338 }
339
recomputeInsertPt()340 void FastISel::recomputeInsertPt() {
341 if (getLastLocalValue()) {
342 FuncInfo.InsertPt = getLastLocalValue();
343 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
344 ++FuncInfo.InsertPt;
345 } else
346 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
347
348 // Now skip past any EH_LABELs, which must remain at the beginning.
349 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
350 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
351 ++FuncInfo.InsertPt;
352 }
353
removeDeadCode(MachineBasicBlock::iterator I,MachineBasicBlock::iterator E)354 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
355 MachineBasicBlock::iterator E) {
356 assert(static_cast<MachineInstr *>(I) && static_cast<MachineInstr *>(E) &&
357 std::distance(I, E) > 0 && "Invalid iterator!");
358 while (I != E) {
359 MachineInstr *Dead = &*I;
360 ++I;
361 Dead->eraseFromParent();
362 ++NumFastIselDead;
363 }
364 recomputeInsertPt();
365 }
366
enterLocalValueArea()367 FastISel::SavePoint FastISel::enterLocalValueArea() {
368 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
369 DebugLoc OldDL = DbgLoc;
370 recomputeInsertPt();
371 DbgLoc = DebugLoc();
372 SavePoint SP = {OldInsertPt, OldDL};
373 return SP;
374 }
375
leaveLocalValueArea(SavePoint OldInsertPt)376 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
377 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
378 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
379
380 // Restore the previous insert position.
381 FuncInfo.InsertPt = OldInsertPt.InsertPt;
382 DbgLoc = OldInsertPt.DL;
383 }
384
selectBinaryOp(const User * I,unsigned ISDOpcode)385 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
386 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
387 if (VT == MVT::Other || !VT.isSimple())
388 // Unhandled type. Halt "fast" selection and bail.
389 return false;
390
391 // We only handle legal types. For example, on x86-32 the instruction
392 // selector contains all of the 64-bit instructions from x86-64,
393 // under the assumption that i64 won't be used if the target doesn't
394 // support it.
395 if (!TLI.isTypeLegal(VT)) {
396 // MVT::i1 is special. Allow AND, OR, or XOR because they
397 // don't require additional zeroing, which makes them easy.
398 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
399 ISDOpcode == ISD::XOR))
400 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
401 else
402 return false;
403 }
404
405 // Check if the first operand is a constant, and handle it as "ri". At -O0,
406 // we don't have anything that canonicalizes operand order.
407 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
408 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
409 unsigned Op1 = getRegForValue(I->getOperand(1));
410 if (!Op1)
411 return false;
412 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
413
414 unsigned ResultReg =
415 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
416 CI->getZExtValue(), VT.getSimpleVT());
417 if (!ResultReg)
418 return false;
419
420 // We successfully emitted code for the given LLVM Instruction.
421 updateValueMap(I, ResultReg);
422 return true;
423 }
424
425 unsigned Op0 = getRegForValue(I->getOperand(0));
426 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
427 return false;
428 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
429
430 // Check if the second operand is a constant and handle it appropriately.
431 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
432 uint64_t Imm = CI->getSExtValue();
433
434 // Transform "sdiv exact X, 8" -> "sra X, 3".
435 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
436 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
437 Imm = Log2_64(Imm);
438 ISDOpcode = ISD::SRA;
439 }
440
441 // Transform "urem x, pow2" -> "and x, pow2-1".
442 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
443 isPowerOf2_64(Imm)) {
444 --Imm;
445 ISDOpcode = ISD::AND;
446 }
447
448 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
449 Op0IsKill, Imm, VT.getSimpleVT());
450 if (!ResultReg)
451 return false;
452
453 // We successfully emitted code for the given LLVM Instruction.
454 updateValueMap(I, ResultReg);
455 return true;
456 }
457
458 // Check if the second operand is a constant float.
459 if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
460 unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
461 ISDOpcode, Op0, Op0IsKill, CF);
462 if (ResultReg) {
463 // We successfully emitted code for the given LLVM Instruction.
464 updateValueMap(I, ResultReg);
465 return true;
466 }
467 }
468
469 unsigned Op1 = getRegForValue(I->getOperand(1));
470 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
471 return false;
472 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
473
474 // Now we have both operands in registers. Emit the instruction.
475 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
476 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
477 if (!ResultReg)
478 // Target-specific code wasn't able to find a machine opcode for
479 // the given ISD opcode and type. Halt "fast" selection and bail.
480 return false;
481
482 // We successfully emitted code for the given LLVM Instruction.
483 updateValueMap(I, ResultReg);
484 return true;
485 }
486
selectGetElementPtr(const User * I)487 bool FastISel::selectGetElementPtr(const User *I) {
488 unsigned N = getRegForValue(I->getOperand(0));
489 if (!N) // Unhandled operand. Halt "fast" selection and bail.
490 return false;
491 bool NIsKill = hasTrivialKill(I->getOperand(0));
492
493 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
494 // into a single N = N + TotalOffset.
495 uint64_t TotalOffs = 0;
496 // FIXME: What's a good SWAG number for MaxOffs?
497 uint64_t MaxOffs = 2048;
498 MVT VT = TLI.getPointerTy(DL);
499 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
500 GTI != E; ++GTI) {
501 const Value *Idx = GTI.getOperand();
502 if (auto *StTy = dyn_cast<StructType>(*GTI)) {
503 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
504 if (Field) {
505 // N = N + Offset
506 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
507 if (TotalOffs >= MaxOffs) {
508 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
509 if (!N) // Unhandled operand. Halt "fast" selection and bail.
510 return false;
511 NIsKill = true;
512 TotalOffs = 0;
513 }
514 }
515 } else {
516 Type *Ty = GTI.getIndexedType();
517
518 // If this is a constant subscript, handle it quickly.
519 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
520 if (CI->isZero())
521 continue;
522 // N = N + Offset
523 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
524 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
525 if (TotalOffs >= MaxOffs) {
526 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
527 if (!N) // Unhandled operand. Halt "fast" selection and bail.
528 return false;
529 NIsKill = true;
530 TotalOffs = 0;
531 }
532 continue;
533 }
534 if (TotalOffs) {
535 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
536 if (!N) // Unhandled operand. Halt "fast" selection and bail.
537 return false;
538 NIsKill = true;
539 TotalOffs = 0;
540 }
541
542 // N = N + Idx * ElementSize;
543 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
544 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
545 unsigned IdxN = Pair.first;
546 bool IdxNIsKill = Pair.second;
547 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
548 return false;
549
550 if (ElementSize != 1) {
551 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
552 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
553 return false;
554 IdxNIsKill = true;
555 }
556 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
557 if (!N) // Unhandled operand. Halt "fast" selection and bail.
558 return false;
559 }
560 }
561 if (TotalOffs) {
562 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
563 if (!N) // Unhandled operand. Halt "fast" selection and bail.
564 return false;
565 }
566
567 // We successfully emitted code for the given LLVM Instruction.
568 updateValueMap(I, N);
569 return true;
570 }
571
addStackMapLiveVars(SmallVectorImpl<MachineOperand> & Ops,const CallInst * CI,unsigned StartIdx)572 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
573 const CallInst *CI, unsigned StartIdx) {
574 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
575 Value *Val = CI->getArgOperand(i);
576 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
577 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
578 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
579 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
580 } else if (isa<ConstantPointerNull>(Val)) {
581 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
582 Ops.push_back(MachineOperand::CreateImm(0));
583 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
584 // Values coming from a stack location also require a sepcial encoding,
585 // but that is added later on by the target specific frame index
586 // elimination implementation.
587 auto SI = FuncInfo.StaticAllocaMap.find(AI);
588 if (SI != FuncInfo.StaticAllocaMap.end())
589 Ops.push_back(MachineOperand::CreateFI(SI->second));
590 else
591 return false;
592 } else {
593 unsigned Reg = getRegForValue(Val);
594 if (!Reg)
595 return false;
596 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
597 }
598 }
599 return true;
600 }
601
selectStackmap(const CallInst * I)602 bool FastISel::selectStackmap(const CallInst *I) {
603 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
604 // [live variables...])
605 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
606 "Stackmap cannot return a value.");
607
608 // The stackmap intrinsic only records the live variables (the arguments
609 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
610 // intrinsic, this won't be lowered to a function call. This means we don't
611 // have to worry about calling conventions and target-specific lowering code.
612 // Instead we perform the call lowering right here.
613 //
614 // CALLSEQ_START(0...)
615 // STACKMAP(id, nbytes, ...)
616 // CALLSEQ_END(0, 0)
617 //
618 SmallVector<MachineOperand, 32> Ops;
619
620 // Add the <id> and <numBytes> constants.
621 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
622 "Expected a constant integer.");
623 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
624 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
625
626 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
627 "Expected a constant integer.");
628 const auto *NumBytes =
629 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
630 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
631
632 // Push live variables for the stack map (skipping the first two arguments
633 // <id> and <numBytes>).
634 if (!addStackMapLiveVars(Ops, I, 2))
635 return false;
636
637 // We are not adding any register mask info here, because the stackmap doesn't
638 // clobber anything.
639
640 // Add scratch registers as implicit def and early clobber.
641 CallingConv::ID CC = I->getCallingConv();
642 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
643 for (unsigned i = 0; ScratchRegs[i]; ++i)
644 Ops.push_back(MachineOperand::CreateReg(
645 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
646 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
647
648 // Issue CALLSEQ_START
649 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
650 auto Builder =
651 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
652 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
653 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
654 Builder.addImm(0);
655
656 // Issue STACKMAP.
657 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
658 TII.get(TargetOpcode::STACKMAP));
659 for (auto const &MO : Ops)
660 MIB.addOperand(MO);
661
662 // Issue CALLSEQ_END
663 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
664 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
665 .addImm(0)
666 .addImm(0);
667
668 // Inform the Frame Information that we have a stackmap in this function.
669 FuncInfo.MF->getFrameInfo()->setHasStackMap();
670
671 return true;
672 }
673
674 /// \brief Lower an argument list according to the target calling convention.
675 ///
676 /// This is a helper for lowering intrinsics that follow a target calling
677 /// convention or require stack pointer adjustment. Only a subset of the
678 /// intrinsic's operands need to participate in the calling convention.
lowerCallOperands(const CallInst * CI,unsigned ArgIdx,unsigned NumArgs,const Value * Callee,bool ForceRetVoidTy,CallLoweringInfo & CLI)679 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
680 unsigned NumArgs, const Value *Callee,
681 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
682 ArgListTy Args;
683 Args.reserve(NumArgs);
684
685 // Populate the argument list.
686 // Attributes for args start at offset 1, after the return attribute.
687 ImmutableCallSite CS(CI);
688 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
689 ArgI != ArgE; ++ArgI) {
690 Value *V = CI->getOperand(ArgI);
691
692 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
693
694 ArgListEntry Entry;
695 Entry.Val = V;
696 Entry.Ty = V->getType();
697 Entry.setAttributes(&CS, AttrI);
698 Args.push_back(Entry);
699 }
700
701 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
702 : CI->getType();
703 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
704
705 return lowerCallTo(CLI);
706 }
707
setCallee(const DataLayout & DL,MCContext & Ctx,CallingConv::ID CC,Type * ResultTy,const char * Target,ArgListTy && ArgsList,unsigned FixedArgs)708 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
709 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
710 const char *Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
711 SmallString<32> MangledName;
712 Mangler::getNameWithPrefix(MangledName, Target, DL);
713 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
714 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
715 }
716
selectPatchpoint(const CallInst * I)717 bool FastISel::selectPatchpoint(const CallInst *I) {
718 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
719 // i32 <numBytes>,
720 // i8* <target>,
721 // i32 <numArgs>,
722 // [Args...],
723 // [live variables...])
724 CallingConv::ID CC = I->getCallingConv();
725 bool IsAnyRegCC = CC == CallingConv::AnyReg;
726 bool HasDef = !I->getType()->isVoidTy();
727 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
728
729 // Get the real number of arguments participating in the call <numArgs>
730 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
731 "Expected a constant integer.");
732 const auto *NumArgsVal =
733 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
734 unsigned NumArgs = NumArgsVal->getZExtValue();
735
736 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
737 // This includes all meta-operands up to but not including CC.
738 unsigned NumMetaOpers = PatchPointOpers::CCPos;
739 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
740 "Not enough arguments provided to the patchpoint intrinsic");
741
742 // For AnyRegCC the arguments are lowered later on manually.
743 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
744 CallLoweringInfo CLI;
745 CLI.setIsPatchPoint();
746 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
747 return false;
748
749 assert(CLI.Call && "No call instruction specified.");
750
751 SmallVector<MachineOperand, 32> Ops;
752
753 // Add an explicit result reg if we use the anyreg calling convention.
754 if (IsAnyRegCC && HasDef) {
755 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
756 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
757 CLI.NumResultRegs = 1;
758 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
759 }
760
761 // Add the <id> and <numBytes> constants.
762 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
763 "Expected a constant integer.");
764 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
765 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
766
767 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
768 "Expected a constant integer.");
769 const auto *NumBytes =
770 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
771 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
772
773 // Add the call target.
774 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
775 uint64_t CalleeConstAddr =
776 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
777 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
778 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
779 if (C->getOpcode() == Instruction::IntToPtr) {
780 uint64_t CalleeConstAddr =
781 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
782 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
783 } else
784 llvm_unreachable("Unsupported ConstantExpr.");
785 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
786 Ops.push_back(MachineOperand::CreateGA(GV, 0));
787 } else if (isa<ConstantPointerNull>(Callee))
788 Ops.push_back(MachineOperand::CreateImm(0));
789 else
790 llvm_unreachable("Unsupported callee address.");
791
792 // Adjust <numArgs> to account for any arguments that have been passed on
793 // the stack instead.
794 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
795 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
796
797 // Add the calling convention
798 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
799
800 // Add the arguments we omitted previously. The register allocator should
801 // place these in any free register.
802 if (IsAnyRegCC) {
803 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
804 unsigned Reg = getRegForValue(I->getArgOperand(i));
805 if (!Reg)
806 return false;
807 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
808 }
809 }
810
811 // Push the arguments from the call instruction.
812 for (auto Reg : CLI.OutRegs)
813 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
814
815 // Push live variables for the stack map.
816 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
817 return false;
818
819 // Push the register mask info.
820 Ops.push_back(MachineOperand::CreateRegMask(
821 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
822
823 // Add scratch registers as implicit def and early clobber.
824 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
825 for (unsigned i = 0; ScratchRegs[i]; ++i)
826 Ops.push_back(MachineOperand::CreateReg(
827 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
828 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
829
830 // Add implicit defs (return values).
831 for (auto Reg : CLI.InRegs)
832 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
833 /*IsImpl=*/true));
834
835 // Insert the patchpoint instruction before the call generated by the target.
836 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
837 TII.get(TargetOpcode::PATCHPOINT));
838
839 for (auto &MO : Ops)
840 MIB.addOperand(MO);
841
842 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
843
844 // Delete the original call instruction.
845 CLI.Call->eraseFromParent();
846
847 // Inform the Frame Information that we have a patchpoint in this function.
848 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
849
850 if (CLI.NumResultRegs)
851 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
852 return true;
853 }
854
855 /// Returns an AttributeSet representing the attributes applied to the return
856 /// value of the given call.
getReturnAttrs(FastISel::CallLoweringInfo & CLI)857 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
858 SmallVector<Attribute::AttrKind, 2> Attrs;
859 if (CLI.RetSExt)
860 Attrs.push_back(Attribute::SExt);
861 if (CLI.RetZExt)
862 Attrs.push_back(Attribute::ZExt);
863 if (CLI.IsInReg)
864 Attrs.push_back(Attribute::InReg);
865
866 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
867 Attrs);
868 }
869
lowerCallTo(const CallInst * CI,const char * SymName,unsigned NumArgs)870 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
871 unsigned NumArgs) {
872 MCContext &Ctx = MF->getContext();
873 SmallString<32> MangledName;
874 Mangler::getNameWithPrefix(MangledName, SymName, DL);
875 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
876 return lowerCallTo(CI, Sym, NumArgs);
877 }
878
lowerCallTo(const CallInst * CI,MCSymbol * Symbol,unsigned NumArgs)879 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
880 unsigned NumArgs) {
881 ImmutableCallSite CS(CI);
882
883 FunctionType *FTy = CS.getFunctionType();
884 Type *RetTy = CS.getType();
885
886 ArgListTy Args;
887 Args.reserve(NumArgs);
888
889 // Populate the argument list.
890 // Attributes for args start at offset 1, after the return attribute.
891 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
892 Value *V = CI->getOperand(ArgI);
893
894 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
895
896 ArgListEntry Entry;
897 Entry.Val = V;
898 Entry.Ty = V->getType();
899 Entry.setAttributes(&CS, ArgI + 1);
900 Args.push_back(Entry);
901 }
902
903 CallLoweringInfo CLI;
904 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
905
906 return lowerCallTo(CLI);
907 }
908
lowerCallTo(CallLoweringInfo & CLI)909 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
910 // Handle the incoming return values from the call.
911 CLI.clearIns();
912 SmallVector<EVT, 4> RetTys;
913 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
914
915 SmallVector<ISD::OutputArg, 4> Outs;
916 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
917
918 bool CanLowerReturn = TLI.CanLowerReturn(
919 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
920
921 // FIXME: sret demotion isn't supported yet - bail out.
922 if (!CanLowerReturn)
923 return false;
924
925 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
926 EVT VT = RetTys[I];
927 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
928 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
929 for (unsigned i = 0; i != NumRegs; ++i) {
930 ISD::InputArg MyFlags;
931 MyFlags.VT = RegisterVT;
932 MyFlags.ArgVT = VT;
933 MyFlags.Used = CLI.IsReturnValueUsed;
934 if (CLI.RetSExt)
935 MyFlags.Flags.setSExt();
936 if (CLI.RetZExt)
937 MyFlags.Flags.setZExt();
938 if (CLI.IsInReg)
939 MyFlags.Flags.setInReg();
940 CLI.Ins.push_back(MyFlags);
941 }
942 }
943
944 // Handle all of the outgoing arguments.
945 CLI.clearOuts();
946 for (auto &Arg : CLI.getArgs()) {
947 Type *FinalType = Arg.Ty;
948 if (Arg.IsByVal)
949 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
950 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
951 FinalType, CLI.CallConv, CLI.IsVarArg);
952
953 ISD::ArgFlagsTy Flags;
954 if (Arg.IsZExt)
955 Flags.setZExt();
956 if (Arg.IsSExt)
957 Flags.setSExt();
958 if (Arg.IsInReg)
959 Flags.setInReg();
960 if (Arg.IsSRet)
961 Flags.setSRet();
962 if (Arg.IsSwiftSelf)
963 Flags.setSwiftSelf();
964 if (Arg.IsSwiftError)
965 Flags.setSwiftError();
966 if (Arg.IsByVal)
967 Flags.setByVal();
968 if (Arg.IsInAlloca) {
969 Flags.setInAlloca();
970 // Set the byval flag for CCAssignFn callbacks that don't know about
971 // inalloca. This way we can know how many bytes we should've allocated
972 // and how many bytes a callee cleanup function will pop. If we port
973 // inalloca to more targets, we'll have to add custom inalloca handling in
974 // the various CC lowering callbacks.
975 Flags.setByVal();
976 }
977 if (Arg.IsByVal || Arg.IsInAlloca) {
978 PointerType *Ty = cast<PointerType>(Arg.Ty);
979 Type *ElementTy = Ty->getElementType();
980 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
981 // For ByVal, alignment should come from FE. BE will guess if this info is
982 // not there, but there are cases it cannot get right.
983 unsigned FrameAlign = Arg.Alignment;
984 if (!FrameAlign)
985 FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
986 Flags.setByValSize(FrameSize);
987 Flags.setByValAlign(FrameAlign);
988 }
989 if (Arg.IsNest)
990 Flags.setNest();
991 if (NeedsRegBlock)
992 Flags.setInConsecutiveRegs();
993 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
994 Flags.setOrigAlign(OriginalAlignment);
995
996 CLI.OutVals.push_back(Arg.Val);
997 CLI.OutFlags.push_back(Flags);
998 }
999
1000 if (!fastLowerCall(CLI))
1001 return false;
1002
1003 // Set all unused physreg defs as dead.
1004 assert(CLI.Call && "No call instruction specified.");
1005 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1006
1007 if (CLI.NumResultRegs && CLI.CS)
1008 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1009
1010 return true;
1011 }
1012
lowerCall(const CallInst * CI)1013 bool FastISel::lowerCall(const CallInst *CI) {
1014 ImmutableCallSite CS(CI);
1015
1016 FunctionType *FuncTy = CS.getFunctionType();
1017 Type *RetTy = CS.getType();
1018
1019 ArgListTy Args;
1020 ArgListEntry Entry;
1021 Args.reserve(CS.arg_size());
1022
1023 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1024 i != e; ++i) {
1025 Value *V = *i;
1026
1027 // Skip empty types
1028 if (V->getType()->isEmptyTy())
1029 continue;
1030
1031 Entry.Val = V;
1032 Entry.Ty = V->getType();
1033
1034 // Skip the first return-type Attribute to get to params.
1035 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1036 Args.push_back(Entry);
1037 }
1038
1039 // Check if target-independent constraints permit a tail call here.
1040 // Target-dependent constraints are checked within fastLowerCall.
1041 bool IsTailCall = CI->isTailCall();
1042 if (IsTailCall && !isInTailCallPosition(CS, TM))
1043 IsTailCall = false;
1044
1045 CallLoweringInfo CLI;
1046 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1047 .setTailCall(IsTailCall);
1048
1049 return lowerCallTo(CLI);
1050 }
1051
selectCall(const User * I)1052 bool FastISel::selectCall(const User *I) {
1053 const CallInst *Call = cast<CallInst>(I);
1054
1055 // Handle simple inline asms.
1056 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1057 // If the inline asm has side effects, then make sure that no local value
1058 // lives across by flushing the local value map.
1059 if (IA->hasSideEffects())
1060 flushLocalValueMap();
1061
1062 // Don't attempt to handle constraints.
1063 if (!IA->getConstraintString().empty())
1064 return false;
1065
1066 unsigned ExtraInfo = 0;
1067 if (IA->hasSideEffects())
1068 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1069 if (IA->isAlignStack())
1070 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1071
1072 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1073 TII.get(TargetOpcode::INLINEASM))
1074 .addExternalSymbol(IA->getAsmString().c_str())
1075 .addImm(ExtraInfo);
1076 return true;
1077 }
1078
1079 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1080 ComputeUsesVAFloatArgument(*Call, &MMI);
1081
1082 // Handle intrinsic function calls.
1083 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1084 return selectIntrinsicCall(II);
1085
1086 // Usually, it does not make sense to initialize a value,
1087 // make an unrelated function call and use the value, because
1088 // it tends to be spilled on the stack. So, we move the pointer
1089 // to the last local value to the beginning of the block, so that
1090 // all the values which have already been materialized,
1091 // appear after the call. It also makes sense to skip intrinsics
1092 // since they tend to be inlined.
1093 flushLocalValueMap();
1094
1095 return lowerCall(Call);
1096 }
1097
selectIntrinsicCall(const IntrinsicInst * II)1098 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1099 switch (II->getIntrinsicID()) {
1100 default:
1101 break;
1102 // At -O0 we don't care about the lifetime intrinsics.
1103 case Intrinsic::lifetime_start:
1104 case Intrinsic::lifetime_end:
1105 // The donothing intrinsic does, well, nothing.
1106 case Intrinsic::donothing:
1107 return true;
1108 case Intrinsic::dbg_declare: {
1109 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1110 assert(DI->getVariable() && "Missing variable");
1111 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1112 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1113 return true;
1114 }
1115
1116 const Value *Address = DI->getAddress();
1117 if (!Address || isa<UndefValue>(Address)) {
1118 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1119 return true;
1120 }
1121
1122 unsigned Offset = 0;
1123 Optional<MachineOperand> Op;
1124 if (const auto *Arg = dyn_cast<Argument>(Address))
1125 // Some arguments' frame index is recorded during argument lowering.
1126 Offset = FuncInfo.getArgumentFrameIndex(Arg);
1127 if (Offset)
1128 Op = MachineOperand::CreateFI(Offset);
1129 if (!Op)
1130 if (unsigned Reg = lookUpRegForValue(Address))
1131 Op = MachineOperand::CreateReg(Reg, false);
1132
1133 // If we have a VLA that has a "use" in a metadata node that's then used
1134 // here but it has no other uses, then we have a problem. E.g.,
1135 //
1136 // int foo (const int *x) {
1137 // char a[*x];
1138 // return 0;
1139 // }
1140 //
1141 // If we assign 'a' a vreg and fast isel later on has to use the selection
1142 // DAG isel, it will want to copy the value to the vreg. However, there are
1143 // no uses, which goes counter to what selection DAG isel expects.
1144 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1145 (!isa<AllocaInst>(Address) ||
1146 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1147 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1148 false);
1149
1150 if (Op) {
1151 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1152 "Expected inlined-at fields to agree");
1153 if (Op->isReg()) {
1154 Op->setIsDebug(true);
1155 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1156 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1157 DI->getVariable(), DI->getExpression());
1158 } else
1159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1160 TII.get(TargetOpcode::DBG_VALUE))
1161 .addOperand(*Op)
1162 .addImm(0)
1163 .addMetadata(DI->getVariable())
1164 .addMetadata(DI->getExpression());
1165 } else {
1166 // We can't yet handle anything else here because it would require
1167 // generating code, thus altering codegen because of debug info.
1168 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1169 }
1170 return true;
1171 }
1172 case Intrinsic::dbg_value: {
1173 // This form of DBG_VALUE is target-independent.
1174 const DbgValueInst *DI = cast<DbgValueInst>(II);
1175 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1176 const Value *V = DI->getValue();
1177 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1178 "Expected inlined-at fields to agree");
1179 if (!V) {
1180 // Currently the optimizer can produce this; insert an undef to
1181 // help debugging. Probably the optimizer should not do this.
1182 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1183 .addReg(0U)
1184 .addImm(DI->getOffset())
1185 .addMetadata(DI->getVariable())
1186 .addMetadata(DI->getExpression());
1187 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1188 if (CI->getBitWidth() > 64)
1189 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1190 .addCImm(CI)
1191 .addImm(DI->getOffset())
1192 .addMetadata(DI->getVariable())
1193 .addMetadata(DI->getExpression());
1194 else
1195 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1196 .addImm(CI->getZExtValue())
1197 .addImm(DI->getOffset())
1198 .addMetadata(DI->getVariable())
1199 .addMetadata(DI->getExpression());
1200 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1201 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1202 .addFPImm(CF)
1203 .addImm(DI->getOffset())
1204 .addMetadata(DI->getVariable())
1205 .addMetadata(DI->getExpression());
1206 } else if (unsigned Reg = lookUpRegForValue(V)) {
1207 // FIXME: This does not handle register-indirect values at offset 0.
1208 bool IsIndirect = DI->getOffset() != 0;
1209 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1210 DI->getOffset(), DI->getVariable(), DI->getExpression());
1211 } else {
1212 // We can't yet handle anything else here because it would require
1213 // generating code, thus altering codegen because of debug info.
1214 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1215 }
1216 return true;
1217 }
1218 case Intrinsic::objectsize: {
1219 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1220 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1221 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1222 unsigned ResultReg = getRegForValue(ResCI);
1223 if (!ResultReg)
1224 return false;
1225 updateValueMap(II, ResultReg);
1226 return true;
1227 }
1228 case Intrinsic::expect: {
1229 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1230 if (!ResultReg)
1231 return false;
1232 updateValueMap(II, ResultReg);
1233 return true;
1234 }
1235 case Intrinsic::experimental_stackmap:
1236 return selectStackmap(II);
1237 case Intrinsic::experimental_patchpoint_void:
1238 case Intrinsic::experimental_patchpoint_i64:
1239 return selectPatchpoint(II);
1240 }
1241
1242 return fastLowerIntrinsicCall(II);
1243 }
1244
selectCast(const User * I,unsigned Opcode)1245 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1246 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1247 EVT DstVT = TLI.getValueType(DL, I->getType());
1248
1249 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1250 !DstVT.isSimple())
1251 // Unhandled type. Halt "fast" selection and bail.
1252 return false;
1253
1254 // Check if the destination type is legal.
1255 if (!TLI.isTypeLegal(DstVT))
1256 return false;
1257
1258 // Check if the source operand is legal.
1259 if (!TLI.isTypeLegal(SrcVT))
1260 return false;
1261
1262 unsigned InputReg = getRegForValue(I->getOperand(0));
1263 if (!InputReg)
1264 // Unhandled operand. Halt "fast" selection and bail.
1265 return false;
1266
1267 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1268
1269 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1270 Opcode, InputReg, InputRegIsKill);
1271 if (!ResultReg)
1272 return false;
1273
1274 updateValueMap(I, ResultReg);
1275 return true;
1276 }
1277
selectBitCast(const User * I)1278 bool FastISel::selectBitCast(const User *I) {
1279 // If the bitcast doesn't change the type, just use the operand value.
1280 if (I->getType() == I->getOperand(0)->getType()) {
1281 unsigned Reg = getRegForValue(I->getOperand(0));
1282 if (!Reg)
1283 return false;
1284 updateValueMap(I, Reg);
1285 return true;
1286 }
1287
1288 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1289 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1290 EVT DstEVT = TLI.getValueType(DL, I->getType());
1291 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1292 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1293 // Unhandled type. Halt "fast" selection and bail.
1294 return false;
1295
1296 MVT SrcVT = SrcEVT.getSimpleVT();
1297 MVT DstVT = DstEVT.getSimpleVT();
1298 unsigned Op0 = getRegForValue(I->getOperand(0));
1299 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1300 return false;
1301 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1302
1303 // First, try to perform the bitcast by inserting a reg-reg copy.
1304 unsigned ResultReg = 0;
1305 if (SrcVT == DstVT) {
1306 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1307 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1308 // Don't attempt a cross-class copy. It will likely fail.
1309 if (SrcClass == DstClass) {
1310 ResultReg = createResultReg(DstClass);
1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1312 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1313 }
1314 }
1315
1316 // If the reg-reg copy failed, select a BITCAST opcode.
1317 if (!ResultReg)
1318 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1319
1320 if (!ResultReg)
1321 return false;
1322
1323 updateValueMap(I, ResultReg);
1324 return true;
1325 }
1326
1327 // Return true if we should copy from swift error to the final vreg as specified
1328 // by SwiftErrorWorklist.
shouldCopySwiftErrorsToFinalVRegs(const TargetLowering & TLI,FunctionLoweringInfo & FuncInfo)1329 static bool shouldCopySwiftErrorsToFinalVRegs(const TargetLowering &TLI,
1330 FunctionLoweringInfo &FuncInfo) {
1331 if (!TLI.supportSwiftError())
1332 return false;
1333 return FuncInfo.SwiftErrorWorklist.count(FuncInfo.MBB);
1334 }
1335
1336 // Remove local value instructions starting from the instruction after
1337 // SavedLastLocalValue to the current function insert point.
removeDeadLocalValueCode(MachineInstr * SavedLastLocalValue)1338 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1339 {
1340 MachineInstr *CurLastLocalValue = getLastLocalValue();
1341 if (CurLastLocalValue != SavedLastLocalValue) {
1342 // Find the first local value instruction to be deleted.
1343 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1344 // Otherwise it's the first instruction in the block.
1345 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1346 if (SavedLastLocalValue)
1347 ++FirstDeadInst;
1348 else
1349 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1350 setLastLocalValue(SavedLastLocalValue);
1351 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1352 }
1353 }
1354
selectInstruction(const Instruction * I)1355 bool FastISel::selectInstruction(const Instruction *I) {
1356 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1357 // Just before the terminator instruction, insert instructions to
1358 // feed PHI nodes in successor blocks.
1359 if (isa<TerminatorInst>(I)) {
1360 // If we need to materialize any vreg from worklist, we bail out of
1361 // FastISel.
1362 if (shouldCopySwiftErrorsToFinalVRegs(TLI, FuncInfo))
1363 return false;
1364 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1365 // PHI node handling may have generated local value instructions,
1366 // even though it failed to handle all PHI nodes.
1367 // We remove these instructions because SelectionDAGISel will generate
1368 // them again.
1369 removeDeadLocalValueCode(SavedLastLocalValue);
1370 return false;
1371 }
1372 }
1373
1374 // FastISel does not handle any operand bundles except OB_funclet.
1375 if (ImmutableCallSite CS = ImmutableCallSite(I))
1376 for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1377 if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1378 return false;
1379
1380 DbgLoc = I->getDebugLoc();
1381
1382 SavedInsertPt = FuncInfo.InsertPt;
1383
1384 if (const auto *Call = dyn_cast<CallInst>(I)) {
1385 const Function *F = Call->getCalledFunction();
1386 LibFunc::Func Func;
1387
1388 // As a special case, don't handle calls to builtin library functions that
1389 // may be translated directly to target instructions.
1390 if (F && !F->hasLocalLinkage() && F->hasName() &&
1391 LibInfo->getLibFunc(F->getName(), Func) &&
1392 LibInfo->hasOptimizedCodeGen(Func))
1393 return false;
1394
1395 // Don't handle Intrinsic::trap if a trap function is specified.
1396 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1397 Call->hasFnAttr("trap-func-name"))
1398 return false;
1399 }
1400
1401 // First, try doing target-independent selection.
1402 if (!SkipTargetIndependentISel) {
1403 if (selectOperator(I, I->getOpcode())) {
1404 ++NumFastIselSuccessIndependent;
1405 DbgLoc = DebugLoc();
1406 return true;
1407 }
1408 // Remove dead code.
1409 recomputeInsertPt();
1410 if (SavedInsertPt != FuncInfo.InsertPt)
1411 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1412 SavedInsertPt = FuncInfo.InsertPt;
1413 }
1414 // Next, try calling the target to attempt to handle the instruction.
1415 if (fastSelectInstruction(I)) {
1416 ++NumFastIselSuccessTarget;
1417 DbgLoc = DebugLoc();
1418 return true;
1419 }
1420 // Remove dead code.
1421 recomputeInsertPt();
1422 if (SavedInsertPt != FuncInfo.InsertPt)
1423 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1424
1425 DbgLoc = DebugLoc();
1426 // Undo phi node updates, because they will be added again by SelectionDAG.
1427 if (isa<TerminatorInst>(I)) {
1428 // PHI node handling may have generated local value instructions.
1429 // We remove them because SelectionDAGISel will generate them again.
1430 removeDeadLocalValueCode(SavedLastLocalValue);
1431 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1432 }
1433 return false;
1434 }
1435
1436 /// Emit an unconditional branch to the given block, unless it is the immediate
1437 /// (fall-through) successor, and update the CFG.
fastEmitBranch(MachineBasicBlock * MSucc,const DebugLoc & DbgLoc)1438 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1439 const DebugLoc &DbgLoc) {
1440 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1441 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1442 // For more accurate line information if this is the only instruction
1443 // in the block then emit it, otherwise we have the unconditional
1444 // fall-through case, which needs no instructions.
1445 } else {
1446 // The unconditional branch case.
1447 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1448 SmallVector<MachineOperand, 0>(), DbgLoc);
1449 }
1450 if (FuncInfo.BPI) {
1451 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1452 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1453 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1454 } else
1455 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1456 }
1457
finishCondBranch(const BasicBlock * BranchBB,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB)1458 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1459 MachineBasicBlock *TrueMBB,
1460 MachineBasicBlock *FalseMBB) {
1461 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1462 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1463 // successor/predecessor lists.
1464 if (TrueMBB != FalseMBB) {
1465 if (FuncInfo.BPI) {
1466 auto BranchProbability =
1467 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1468 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1469 } else
1470 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1471 }
1472
1473 fastEmitBranch(FalseMBB, DbgLoc);
1474 }
1475
1476 /// Emit an FNeg operation.
selectFNeg(const User * I)1477 bool FastISel::selectFNeg(const User *I) {
1478 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1479 if (!OpReg)
1480 return false;
1481 bool OpRegIsKill = hasTrivialKill(I);
1482
1483 // If the target has ISD::FNEG, use it.
1484 EVT VT = TLI.getValueType(DL, I->getType());
1485 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1486 OpReg, OpRegIsKill);
1487 if (ResultReg) {
1488 updateValueMap(I, ResultReg);
1489 return true;
1490 }
1491
1492 // Bitcast the value to integer, twiddle the sign bit with xor,
1493 // and then bitcast it back to floating-point.
1494 if (VT.getSizeInBits() > 64)
1495 return false;
1496 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1497 if (!TLI.isTypeLegal(IntVT))
1498 return false;
1499
1500 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1501 ISD::BITCAST, OpReg, OpRegIsKill);
1502 if (!IntReg)
1503 return false;
1504
1505 unsigned IntResultReg = fastEmit_ri_(
1506 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1507 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1508 if (!IntResultReg)
1509 return false;
1510
1511 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1512 IntResultReg, /*IsKill=*/true);
1513 if (!ResultReg)
1514 return false;
1515
1516 updateValueMap(I, ResultReg);
1517 return true;
1518 }
1519
selectExtractValue(const User * U)1520 bool FastISel::selectExtractValue(const User *U) {
1521 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1522 if (!EVI)
1523 return false;
1524
1525 // Make sure we only try to handle extracts with a legal result. But also
1526 // allow i1 because it's easy.
1527 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1528 if (!RealVT.isSimple())
1529 return false;
1530 MVT VT = RealVT.getSimpleVT();
1531 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1532 return false;
1533
1534 const Value *Op0 = EVI->getOperand(0);
1535 Type *AggTy = Op0->getType();
1536
1537 // Get the base result register.
1538 unsigned ResultReg;
1539 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1540 if (I != FuncInfo.ValueMap.end())
1541 ResultReg = I->second;
1542 else if (isa<Instruction>(Op0))
1543 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1544 else
1545 return false; // fast-isel can't handle aggregate constants at the moment
1546
1547 // Get the actual result register, which is an offset from the base register.
1548 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1549
1550 SmallVector<EVT, 4> AggValueVTs;
1551 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1552
1553 for (unsigned i = 0; i < VTIndex; i++)
1554 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1555
1556 updateValueMap(EVI, ResultReg);
1557 return true;
1558 }
1559
selectOperator(const User * I,unsigned Opcode)1560 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1561 switch (Opcode) {
1562 case Instruction::Add:
1563 return selectBinaryOp(I, ISD::ADD);
1564 case Instruction::FAdd:
1565 return selectBinaryOp(I, ISD::FADD);
1566 case Instruction::Sub:
1567 return selectBinaryOp(I, ISD::SUB);
1568 case Instruction::FSub:
1569 // FNeg is currently represented in LLVM IR as a special case of FSub.
1570 if (BinaryOperator::isFNeg(I))
1571 return selectFNeg(I);
1572 return selectBinaryOp(I, ISD::FSUB);
1573 case Instruction::Mul:
1574 return selectBinaryOp(I, ISD::MUL);
1575 case Instruction::FMul:
1576 return selectBinaryOp(I, ISD::FMUL);
1577 case Instruction::SDiv:
1578 return selectBinaryOp(I, ISD::SDIV);
1579 case Instruction::UDiv:
1580 return selectBinaryOp(I, ISD::UDIV);
1581 case Instruction::FDiv:
1582 return selectBinaryOp(I, ISD::FDIV);
1583 case Instruction::SRem:
1584 return selectBinaryOp(I, ISD::SREM);
1585 case Instruction::URem:
1586 return selectBinaryOp(I, ISD::UREM);
1587 case Instruction::FRem:
1588 return selectBinaryOp(I, ISD::FREM);
1589 case Instruction::Shl:
1590 return selectBinaryOp(I, ISD::SHL);
1591 case Instruction::LShr:
1592 return selectBinaryOp(I, ISD::SRL);
1593 case Instruction::AShr:
1594 return selectBinaryOp(I, ISD::SRA);
1595 case Instruction::And:
1596 return selectBinaryOp(I, ISD::AND);
1597 case Instruction::Or:
1598 return selectBinaryOp(I, ISD::OR);
1599 case Instruction::Xor:
1600 return selectBinaryOp(I, ISD::XOR);
1601
1602 case Instruction::GetElementPtr:
1603 return selectGetElementPtr(I);
1604
1605 case Instruction::Br: {
1606 const BranchInst *BI = cast<BranchInst>(I);
1607
1608 if (BI->isUnconditional()) {
1609 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1610 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1611 fastEmitBranch(MSucc, BI->getDebugLoc());
1612 return true;
1613 }
1614
1615 // Conditional branches are not handed yet.
1616 // Halt "fast" selection and bail.
1617 return false;
1618 }
1619
1620 case Instruction::Unreachable:
1621 if (TM.Options.TrapUnreachable)
1622 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1623 else
1624 return true;
1625
1626 case Instruction::Alloca:
1627 // FunctionLowering has the static-sized case covered.
1628 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1629 return true;
1630
1631 // Dynamic-sized alloca is not handled yet.
1632 return false;
1633
1634 case Instruction::Call:
1635 return selectCall(I);
1636
1637 case Instruction::BitCast:
1638 return selectBitCast(I);
1639
1640 case Instruction::FPToSI:
1641 return selectCast(I, ISD::FP_TO_SINT);
1642 case Instruction::ZExt:
1643 return selectCast(I, ISD::ZERO_EXTEND);
1644 case Instruction::SExt:
1645 return selectCast(I, ISD::SIGN_EXTEND);
1646 case Instruction::Trunc:
1647 return selectCast(I, ISD::TRUNCATE);
1648 case Instruction::SIToFP:
1649 return selectCast(I, ISD::SINT_TO_FP);
1650
1651 case Instruction::IntToPtr: // Deliberate fall-through.
1652 case Instruction::PtrToInt: {
1653 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1654 EVT DstVT = TLI.getValueType(DL, I->getType());
1655 if (DstVT.bitsGT(SrcVT))
1656 return selectCast(I, ISD::ZERO_EXTEND);
1657 if (DstVT.bitsLT(SrcVT))
1658 return selectCast(I, ISD::TRUNCATE);
1659 unsigned Reg = getRegForValue(I->getOperand(0));
1660 if (!Reg)
1661 return false;
1662 updateValueMap(I, Reg);
1663 return true;
1664 }
1665
1666 case Instruction::ExtractValue:
1667 return selectExtractValue(I);
1668
1669 case Instruction::PHI:
1670 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1671
1672 default:
1673 // Unhandled instruction. Halt "fast" selection and bail.
1674 return false;
1675 }
1676 }
1677
FastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo,bool SkipTargetIndependentISel)1678 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1679 const TargetLibraryInfo *LibInfo,
1680 bool SkipTargetIndependentISel)
1681 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1682 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1683 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1684 TII(*MF->getSubtarget().getInstrInfo()),
1685 TLI(*MF->getSubtarget().getTargetLowering()),
1686 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1687 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1688
~FastISel()1689 FastISel::~FastISel() {}
1690
fastLowerArguments()1691 bool FastISel::fastLowerArguments() { return false; }
1692
fastLowerCall(CallLoweringInfo &)1693 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1694
fastLowerIntrinsicCall(const IntrinsicInst *)1695 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1696 return false;
1697 }
1698
fastEmit_(MVT,MVT,unsigned)1699 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1700
fastEmit_r(MVT,MVT,unsigned,unsigned,bool)1701 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1702 bool /*Op0IsKill*/) {
1703 return 0;
1704 }
1705
fastEmit_rr(MVT,MVT,unsigned,unsigned,bool,unsigned,bool)1706 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1707 bool /*Op0IsKill*/, unsigned /*Op1*/,
1708 bool /*Op1IsKill*/) {
1709 return 0;
1710 }
1711
fastEmit_i(MVT,MVT,unsigned,uint64_t)1712 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1713 return 0;
1714 }
1715
fastEmit_f(MVT,MVT,unsigned,const ConstantFP *)1716 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1717 const ConstantFP * /*FPImm*/) {
1718 return 0;
1719 }
1720
fastEmit_ri(MVT,MVT,unsigned,unsigned,bool,uint64_t)1721 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1722 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1723 return 0;
1724 }
1725
fastEmit_rf(MVT,MVT,unsigned,unsigned,bool,const ConstantFP *)1726 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
1727 bool /*Op0IsKill*/,
1728 const ConstantFP * /*FPImm*/) {
1729 return 0;
1730 }
1731
fastEmit_rri(MVT,MVT,unsigned,unsigned,bool,unsigned,bool,uint64_t)1732 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
1733 bool /*Op0IsKill*/, unsigned /*Op1*/,
1734 bool /*Op1IsKill*/, uint64_t /*Imm*/) {
1735 return 0;
1736 }
1737
1738 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1739 /// instruction with an immediate operand using fastEmit_ri.
1740 /// If that fails, it materializes the immediate into a register and try
1741 /// fastEmit_rr instead.
fastEmit_ri_(MVT VT,unsigned Opcode,unsigned Op0,bool Op0IsKill,uint64_t Imm,MVT ImmType)1742 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1743 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1744 // If this is a multiply by a power of two, emit this as a shift left.
1745 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1746 Opcode = ISD::SHL;
1747 Imm = Log2_64(Imm);
1748 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1749 // div x, 8 -> srl x, 3
1750 Opcode = ISD::SRL;
1751 Imm = Log2_64(Imm);
1752 }
1753
1754 // Horrible hack (to be removed), check to make sure shift amounts are
1755 // in-range.
1756 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1757 Imm >= VT.getSizeInBits())
1758 return 0;
1759
1760 // First check if immediate type is legal. If not, we can't use the ri form.
1761 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1762 if (ResultReg)
1763 return ResultReg;
1764 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1765 bool IsImmKill = true;
1766 if (!MaterialReg) {
1767 // This is a bit ugly/slow, but failing here means falling out of
1768 // fast-isel, which would be very slow.
1769 IntegerType *ITy =
1770 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1771 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1772 if (!MaterialReg)
1773 return 0;
1774 // FIXME: If the materialized register here has no uses yet then this
1775 // will be the first use and we should be able to mark it as killed.
1776 // However, the local value area for materialising constant expressions
1777 // grows down, not up, which means that any constant expressions we generate
1778 // later which also use 'Imm' could be after this instruction and therefore
1779 // after this kill.
1780 IsImmKill = false;
1781 }
1782 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1783 }
1784
createResultReg(const TargetRegisterClass * RC)1785 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1786 return MRI.createVirtualRegister(RC);
1787 }
1788
constrainOperandRegClass(const MCInstrDesc & II,unsigned Op,unsigned OpNum)1789 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1790 unsigned OpNum) {
1791 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1792 const TargetRegisterClass *RegClass =
1793 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1794 if (!MRI.constrainRegClass(Op, RegClass)) {
1795 // If it's not legal to COPY between the register classes, something
1796 // has gone very wrong before we got here.
1797 unsigned NewOp = createResultReg(RegClass);
1798 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1799 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1800 return NewOp;
1801 }
1802 }
1803 return Op;
1804 }
1805
fastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)1806 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1807 const TargetRegisterClass *RC) {
1808 unsigned ResultReg = createResultReg(RC);
1809 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1810
1811 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1812 return ResultReg;
1813 }
1814
fastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill)1815 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1816 const TargetRegisterClass *RC, unsigned Op0,
1817 bool Op0IsKill) {
1818 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1819
1820 unsigned ResultReg = createResultReg(RC);
1821 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1822
1823 if (II.getNumDefs() >= 1)
1824 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1825 .addReg(Op0, getKillRegState(Op0IsKill));
1826 else {
1827 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1828 .addReg(Op0, getKillRegState(Op0IsKill));
1829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1830 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1831 }
1832
1833 return ResultReg;
1834 }
1835
fastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill)1836 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1837 const TargetRegisterClass *RC, unsigned Op0,
1838 bool Op0IsKill, unsigned Op1,
1839 bool Op1IsKill) {
1840 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1841
1842 unsigned ResultReg = createResultReg(RC);
1843 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1844 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1845
1846 if (II.getNumDefs() >= 1)
1847 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1848 .addReg(Op0, getKillRegState(Op0IsKill))
1849 .addReg(Op1, getKillRegState(Op1IsKill));
1850 else {
1851 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1852 .addReg(Op0, getKillRegState(Op0IsKill))
1853 .addReg(Op1, getKillRegState(Op1IsKill));
1854 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1855 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1856 }
1857 return ResultReg;
1858 }
1859
fastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,unsigned Op2,bool Op2IsKill)1860 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1861 const TargetRegisterClass *RC, unsigned Op0,
1862 bool Op0IsKill, unsigned Op1,
1863 bool Op1IsKill, unsigned Op2,
1864 bool Op2IsKill) {
1865 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1866
1867 unsigned ResultReg = createResultReg(RC);
1868 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1869 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1870 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1871
1872 if (II.getNumDefs() >= 1)
1873 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1874 .addReg(Op0, getKillRegState(Op0IsKill))
1875 .addReg(Op1, getKillRegState(Op1IsKill))
1876 .addReg(Op2, getKillRegState(Op2IsKill));
1877 else {
1878 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1879 .addReg(Op0, getKillRegState(Op0IsKill))
1880 .addReg(Op1, getKillRegState(Op1IsKill))
1881 .addReg(Op2, getKillRegState(Op2IsKill));
1882 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1883 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1884 }
1885 return ResultReg;
1886 }
1887
fastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm)1888 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1889 const TargetRegisterClass *RC, unsigned Op0,
1890 bool Op0IsKill, uint64_t Imm) {
1891 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1892
1893 unsigned ResultReg = createResultReg(RC);
1894 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1895
1896 if (II.getNumDefs() >= 1)
1897 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1898 .addReg(Op0, getKillRegState(Op0IsKill))
1899 .addImm(Imm);
1900 else {
1901 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1902 .addReg(Op0, getKillRegState(Op0IsKill))
1903 .addImm(Imm);
1904 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1905 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1906 }
1907 return ResultReg;
1908 }
1909
fastEmitInst_rii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm1,uint64_t Imm2)1910 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1911 const TargetRegisterClass *RC, unsigned Op0,
1912 bool Op0IsKill, uint64_t Imm1,
1913 uint64_t Imm2) {
1914 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1915
1916 unsigned ResultReg = createResultReg(RC);
1917 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1918
1919 if (II.getNumDefs() >= 1)
1920 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1921 .addReg(Op0, getKillRegState(Op0IsKill))
1922 .addImm(Imm1)
1923 .addImm(Imm2);
1924 else {
1925 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1926 .addReg(Op0, getKillRegState(Op0IsKill))
1927 .addImm(Imm1)
1928 .addImm(Imm2);
1929 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1930 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1931 }
1932 return ResultReg;
1933 }
1934
fastEmitInst_f(unsigned MachineInstOpcode,const TargetRegisterClass * RC,const ConstantFP * FPImm)1935 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1936 const TargetRegisterClass *RC,
1937 const ConstantFP *FPImm) {
1938 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1939
1940 unsigned ResultReg = createResultReg(RC);
1941
1942 if (II.getNumDefs() >= 1)
1943 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1944 .addFPImm(FPImm);
1945 else {
1946 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1947 .addFPImm(FPImm);
1948 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1949 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1950 }
1951 return ResultReg;
1952 }
1953
fastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm)1954 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1955 const TargetRegisterClass *RC, unsigned Op0,
1956 bool Op0IsKill, unsigned Op1,
1957 bool Op1IsKill, uint64_t Imm) {
1958 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1959
1960 unsigned ResultReg = createResultReg(RC);
1961 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1962 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1963
1964 if (II.getNumDefs() >= 1)
1965 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1966 .addReg(Op0, getKillRegState(Op0IsKill))
1967 .addReg(Op1, getKillRegState(Op1IsKill))
1968 .addImm(Imm);
1969 else {
1970 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1971 .addReg(Op0, getKillRegState(Op0IsKill))
1972 .addReg(Op1, getKillRegState(Op1IsKill))
1973 .addImm(Imm);
1974 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1975 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1976 }
1977 return ResultReg;
1978 }
1979
fastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)1980 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1981 const TargetRegisterClass *RC, uint64_t Imm) {
1982 unsigned ResultReg = createResultReg(RC);
1983 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1984
1985 if (II.getNumDefs() >= 1)
1986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1987 .addImm(Imm);
1988 else {
1989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1990 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1991 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1992 }
1993 return ResultReg;
1994 }
1995
fastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,bool Op0IsKill,uint32_t Idx)1996 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1997 bool Op0IsKill, uint32_t Idx) {
1998 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1999 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
2000 "Cannot yet extract from physregs");
2001 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2002 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2003 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2004 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2005 return ResultReg;
2006 }
2007
2008 /// Emit MachineInstrs to compute the value of Op with all but the least
2009 /// significant bit set to zero.
fastEmitZExtFromI1(MVT VT,unsigned Op0,bool Op0IsKill)2010 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2011 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2012 }
2013
2014 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2015 /// Emit code to ensure constants are copied into registers when needed.
2016 /// Remember the virtual registers that need to be added to the Machine PHI
2017 /// nodes as input. We cannot just directly add them, because expansion
2018 /// might result in multiple MBB's for one BB. As such, the start of the
2019 /// BB might correspond to a different MBB than the end.
handlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)2020 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2021 const TerminatorInst *TI = LLVMBB->getTerminator();
2022
2023 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2024 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2025
2026 // Check successor nodes' PHI nodes that expect a constant to be available
2027 // from this block.
2028 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2029 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2030 if (!isa<PHINode>(SuccBB->begin()))
2031 continue;
2032 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2033
2034 // If this terminator has multiple identical successors (common for
2035 // switches), only handle each succ once.
2036 if (!SuccsHandled.insert(SuccMBB).second)
2037 continue;
2038
2039 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2040
2041 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2042 // nodes and Machine PHI nodes, but the incoming operands have not been
2043 // emitted yet.
2044 for (BasicBlock::const_iterator I = SuccBB->begin();
2045 const auto *PN = dyn_cast<PHINode>(I); ++I) {
2046
2047 // Ignore dead phi's.
2048 if (PN->use_empty())
2049 continue;
2050
2051 // Only handle legal types. Two interesting things to note here. First,
2052 // by bailing out early, we may leave behind some dead instructions,
2053 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2054 // own moves. Second, this check is necessary because FastISel doesn't
2055 // use CreateRegs to create registers, so it always creates
2056 // exactly one register for each non-void instruction.
2057 EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2058 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2059 // Handle integer promotions, though, because they're common and easy.
2060 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2061 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2062 return false;
2063 }
2064 }
2065
2066 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2067
2068 // Set the DebugLoc for the copy. Prefer the location of the operand
2069 // if there is one; use the location of the PHI otherwise.
2070 DbgLoc = PN->getDebugLoc();
2071 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2072 DbgLoc = Inst->getDebugLoc();
2073
2074 unsigned Reg = getRegForValue(PHIOp);
2075 if (!Reg) {
2076 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2077 return false;
2078 }
2079 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2080 DbgLoc = DebugLoc();
2081 }
2082 }
2083
2084 return true;
2085 }
2086
tryToFoldLoad(const LoadInst * LI,const Instruction * FoldInst)2087 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2088 assert(LI->hasOneUse() &&
2089 "tryToFoldLoad expected a LoadInst with a single use");
2090 // We know that the load has a single use, but don't know what it is. If it
2091 // isn't one of the folded instructions, then we can't succeed here. Handle
2092 // this by scanning the single-use users of the load until we get to FoldInst.
2093 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2094
2095 const Instruction *TheUser = LI->user_back();
2096 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2097 // Stay in the right block.
2098 TheUser->getParent() == FoldInst->getParent() &&
2099 --MaxUsers) { // Don't scan too far.
2100 // If there are multiple or no uses of this instruction, then bail out.
2101 if (!TheUser->hasOneUse())
2102 return false;
2103
2104 TheUser = TheUser->user_back();
2105 }
2106
2107 // If we didn't find the fold instruction, then we failed to collapse the
2108 // sequence.
2109 if (TheUser != FoldInst)
2110 return false;
2111
2112 // Don't try to fold volatile loads. Target has to deal with alignment
2113 // constraints.
2114 if (LI->isVolatile())
2115 return false;
2116
2117 // Figure out which vreg this is going into. If there is no assigned vreg yet
2118 // then there actually was no reference to it. Perhaps the load is referenced
2119 // by a dead instruction.
2120 unsigned LoadReg = getRegForValue(LI);
2121 if (!LoadReg)
2122 return false;
2123
2124 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2125 // may mean that the instruction got lowered to multiple MIs, or the use of
2126 // the loaded value ended up being multiple operands of the result.
2127 if (!MRI.hasOneUse(LoadReg))
2128 return false;
2129
2130 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2131 MachineInstr *User = RI->getParent();
2132
2133 // Set the insertion point properly. Folding the load can cause generation of
2134 // other random instructions (like sign extends) for addressing modes; make
2135 // sure they get inserted in a logical place before the new instruction.
2136 FuncInfo.InsertPt = User;
2137 FuncInfo.MBB = User->getParent();
2138
2139 // Ask the target to try folding the load.
2140 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2141 }
2142
canFoldAddIntoGEP(const User * GEP,const Value * Add)2143 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2144 // Must be an add.
2145 if (!isa<AddOperator>(Add))
2146 return false;
2147 // Type size needs to match.
2148 if (DL.getTypeSizeInBits(GEP->getType()) !=
2149 DL.getTypeSizeInBits(Add->getType()))
2150 return false;
2151 // Must be in the same basic block.
2152 if (isa<Instruction>(Add) &&
2153 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2154 return false;
2155 // Must have a constant operand.
2156 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2157 }
2158
2159 MachineMemOperand *
createMachineMemOperandFor(const Instruction * I) const2160 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2161 const Value *Ptr;
2162 Type *ValTy;
2163 unsigned Alignment;
2164 unsigned Flags;
2165 bool IsVolatile;
2166
2167 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2168 Alignment = LI->getAlignment();
2169 IsVolatile = LI->isVolatile();
2170 Flags = MachineMemOperand::MOLoad;
2171 Ptr = LI->getPointerOperand();
2172 ValTy = LI->getType();
2173 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2174 Alignment = SI->getAlignment();
2175 IsVolatile = SI->isVolatile();
2176 Flags = MachineMemOperand::MOStore;
2177 Ptr = SI->getPointerOperand();
2178 ValTy = SI->getValueOperand()->getType();
2179 } else
2180 return nullptr;
2181
2182 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2183 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2184 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2185
2186 AAMDNodes AAInfo;
2187 I->getAAMetadata(AAInfo);
2188
2189 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2190 Alignment = DL.getABITypeAlignment(ValTy);
2191
2192 unsigned Size = DL.getTypeStoreSize(ValTy);
2193
2194 if (IsVolatile)
2195 Flags |= MachineMemOperand::MOVolatile;
2196 if (IsNonTemporal)
2197 Flags |= MachineMemOperand::MONonTemporal;
2198 if (IsInvariant)
2199 Flags |= MachineMemOperand::MOInvariant;
2200
2201 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2202 Alignment, AAInfo, Ranges);
2203 }
2204
optimizeCmpPredicate(const CmpInst * CI) const2205 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2206 // If both operands are the same, then try to optimize or fold the cmp.
2207 CmpInst::Predicate Predicate = CI->getPredicate();
2208 if (CI->getOperand(0) != CI->getOperand(1))
2209 return Predicate;
2210
2211 switch (Predicate) {
2212 default: llvm_unreachable("Invalid predicate!");
2213 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2214 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2215 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2216 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2217 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2218 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2219 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2220 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2221 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2222 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2223 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2224 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2225 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2226 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2227 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2228 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2229
2230 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2231 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2232 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2233 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2234 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2235 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2236 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2237 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2238 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2239 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2240 }
2241
2242 return Predicate;
2243 }
2244