1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
18 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/StackProtector.h"
24 #include "llvm/CodeGen/TargetInstrInfo.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/CodeGen/TargetRegisterInfo.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/Target/TargetMachine.h"
30
31 #define DEBUG_TYPE "globalisel-utils"
32
33 using namespace llvm;
34 using namespace MIPatternMatch;
35
constrainRegToClass(MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,Register Reg,const TargetRegisterClass & RegClass)36 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
37 const TargetInstrInfo &TII,
38 const RegisterBankInfo &RBI, Register Reg,
39 const TargetRegisterClass &RegClass) {
40 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
41 return MRI.createVirtualRegister(&RegClass);
42
43 return Reg;
44 }
45
constrainOperandRegClass(const MachineFunction & MF,const TargetRegisterInfo & TRI,MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,MachineInstr & InsertPt,const TargetRegisterClass & RegClass,const MachineOperand & RegMO)46 Register llvm::constrainOperandRegClass(
47 const MachineFunction &MF, const TargetRegisterInfo &TRI,
48 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
49 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
50 const TargetRegisterClass &RegClass, const MachineOperand &RegMO) {
51 Register Reg = RegMO.getReg();
52 // Assume physical registers are properly constrained.
53 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
54
55 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
56 // If we created a new virtual register because the class is not compatible
57 // then create a copy between the new and the old register.
58 if (ConstrainedReg != Reg) {
59 MachineBasicBlock::iterator InsertIt(&InsertPt);
60 MachineBasicBlock &MBB = *InsertPt.getParent();
61 if (RegMO.isUse()) {
62 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
63 TII.get(TargetOpcode::COPY), ConstrainedReg)
64 .addReg(Reg);
65 } else {
66 assert(RegMO.isDef() && "Must be a definition");
67 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
68 TII.get(TargetOpcode::COPY), Reg)
69 .addReg(ConstrainedReg);
70 }
71 } else {
72 if (GISelChangeObserver *Observer = MF.getObserver()) {
73 if (!RegMO.isDef()) {
74 MachineInstr *RegDef = MRI.getVRegDef(Reg);
75 Observer->changedInstr(*RegDef);
76 }
77 Observer->changingAllUsesOfReg(MRI, Reg);
78 Observer->finishedChangingAllUsesOfReg();
79 }
80 }
81 return ConstrainedReg;
82 }
83
constrainOperandRegClass(const MachineFunction & MF,const TargetRegisterInfo & TRI,MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,MachineInstr & InsertPt,const MCInstrDesc & II,const MachineOperand & RegMO,unsigned OpIdx)84 Register llvm::constrainOperandRegClass(
85 const MachineFunction &MF, const TargetRegisterInfo &TRI,
86 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
87 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
88 const MachineOperand &RegMO, unsigned OpIdx) {
89 Register Reg = RegMO.getReg();
90 // Assume physical registers are properly constrained.
91 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
92
93 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
94 // Some of the target independent instructions, like COPY, may not impose any
95 // register class constraints on some of their operands: If it's a use, we can
96 // skip constraining as the instruction defining the register would constrain
97 // it.
98
99 // We can't constrain unallocatable register classes, because we can't create
100 // virtual registers for these classes, so we need to let targets handled this
101 // case.
102 if (RegClass && !RegClass->isAllocatable())
103 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
104
105 if (!RegClass) {
106 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
107 "Register class constraint is required unless either the "
108 "instruction is target independent or the operand is a use");
109 // FIXME: Just bailing out like this here could be not enough, unless we
110 // expect the users of this function to do the right thing for PHIs and
111 // COPY:
112 // v1 = COPY v0
113 // v2 = COPY v1
114 // v1 here may end up not being constrained at all. Please notice that to
115 // reproduce the issue we likely need a destination pattern of a selection
116 // rule producing such extra copies, not just an input GMIR with them as
117 // every existing target using selectImpl handles copies before calling it
118 // and they never reach this function.
119 return Reg;
120 }
121 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
122 RegMO);
123 }
124
constrainSelectedInstRegOperands(MachineInstr & I,const TargetInstrInfo & TII,const TargetRegisterInfo & TRI,const RegisterBankInfo & RBI)125 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
126 const TargetInstrInfo &TII,
127 const TargetRegisterInfo &TRI,
128 const RegisterBankInfo &RBI) {
129 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
130 "A selected instruction is expected");
131 MachineBasicBlock &MBB = *I.getParent();
132 MachineFunction &MF = *MBB.getParent();
133 MachineRegisterInfo &MRI = MF.getRegInfo();
134
135 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
136 MachineOperand &MO = I.getOperand(OpI);
137
138 // There's nothing to be done on non-register operands.
139 if (!MO.isReg())
140 continue;
141
142 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
143 assert(MO.isReg() && "Unsupported non-reg operand");
144
145 Register Reg = MO.getReg();
146 // Physical registers don't need to be constrained.
147 if (Register::isPhysicalRegister(Reg))
148 continue;
149
150 // Register operands with a value of 0 (e.g. predicate operands) don't need
151 // to be constrained.
152 if (Reg == 0)
153 continue;
154
155 // If the operand is a vreg, we should constrain its regclass, and only
156 // insert COPYs if that's impossible.
157 // constrainOperandRegClass does that for us.
158 MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
159 MO, OpI));
160
161 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
162 // done.
163 if (MO.isUse()) {
164 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
165 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
166 I.tieOperands(DefIdx, OpI);
167 }
168 }
169 return true;
170 }
171
canReplaceReg(Register DstReg,Register SrcReg,MachineRegisterInfo & MRI)172 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
173 MachineRegisterInfo &MRI) {
174 // Give up if either DstReg or SrcReg is a physical register.
175 if (DstReg.isPhysical() || SrcReg.isPhysical())
176 return false;
177 // Give up if the types don't match.
178 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
179 return false;
180 // Replace if either DstReg has no constraints or the register
181 // constraints match.
182 return !MRI.getRegClassOrRegBank(DstReg) ||
183 MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
184 }
185
isTriviallyDead(const MachineInstr & MI,const MachineRegisterInfo & MRI)186 bool llvm::isTriviallyDead(const MachineInstr &MI,
187 const MachineRegisterInfo &MRI) {
188 // FIXME: This logical is mostly duplicated with
189 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
190 // MachineInstr::isLabel?
191
192 // Don't delete frame allocation labels.
193 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
194 return false;
195
196 // If we can move an instruction, we can remove it. Otherwise, it has
197 // a side-effect of some sort.
198 bool SawStore = false;
199 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
200 return false;
201
202 // Instructions without side-effects are dead iff they only define dead vregs.
203 for (auto &MO : MI.operands()) {
204 if (!MO.isReg() || !MO.isDef())
205 continue;
206
207 Register Reg = MO.getReg();
208 if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
209 return false;
210 }
211 return true;
212 }
213
reportGISelDiagnostic(DiagnosticSeverity Severity,MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)214 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
215 MachineFunction &MF,
216 const TargetPassConfig &TPC,
217 MachineOptimizationRemarkEmitter &MORE,
218 MachineOptimizationRemarkMissed &R) {
219 bool IsFatal = Severity == DS_Error &&
220 TPC.isGlobalISelAbortEnabled();
221 // Print the function name explicitly if we don't have a debug location (which
222 // makes the diagnostic less useful) or if we're going to emit a raw error.
223 if (!R.getLocation().isValid() || IsFatal)
224 R << (" (in function: " + MF.getName() + ")").str();
225
226 if (IsFatal)
227 report_fatal_error(R.getMsg());
228 else
229 MORE.emit(R);
230 }
231
reportGISelWarning(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)232 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
233 MachineOptimizationRemarkEmitter &MORE,
234 MachineOptimizationRemarkMissed &R) {
235 reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
236 }
237
reportGISelFailure(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)238 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
239 MachineOptimizationRemarkEmitter &MORE,
240 MachineOptimizationRemarkMissed &R) {
241 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
242 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
243 }
244
reportGISelFailure(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,const char * PassName,StringRef Msg,const MachineInstr & MI)245 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
246 MachineOptimizationRemarkEmitter &MORE,
247 const char *PassName, StringRef Msg,
248 const MachineInstr &MI) {
249 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
250 MI.getDebugLoc(), MI.getParent());
251 R << Msg;
252 // Printing MI is expensive; only do it if expensive remarks are enabled.
253 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
254 R << ": " << ore::MNV("Inst", MI);
255 reportGISelFailure(MF, TPC, MORE, R);
256 }
257
getConstantVRegVal(Register VReg,const MachineRegisterInfo & MRI)258 Optional<int64_t> llvm::getConstantVRegVal(Register VReg,
259 const MachineRegisterInfo &MRI) {
260 Optional<ValueAndVReg> ValAndVReg =
261 getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
262 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
263 "Value found while looking through instrs");
264 if (!ValAndVReg)
265 return None;
266 return ValAndVReg->Value;
267 }
268
getConstantVRegValWithLookThrough(Register VReg,const MachineRegisterInfo & MRI,bool LookThroughInstrs,bool HandleFConstant)269 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
270 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
271 bool HandleFConstant) {
272 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
273 MachineInstr *MI;
274 auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
275 return Opcode == TargetOpcode::G_CONSTANT ||
276 (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
277 };
278 auto GetImmediateValue = [HandleFConstant,
279 &MRI](const MachineInstr &MI) -> Optional<APInt> {
280 const MachineOperand &CstVal = MI.getOperand(1);
281 if (!CstVal.isImm() && !CstVal.isCImm() &&
282 (!HandleFConstant || !CstVal.isFPImm()))
283 return None;
284 if (!CstVal.isFPImm()) {
285 unsigned BitWidth =
286 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
287 APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
288 : CstVal.getCImm()->getValue();
289 assert(Val.getBitWidth() == BitWidth &&
290 "Value bitwidth doesn't match definition type");
291 return Val;
292 }
293 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
294 };
295 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
296 LookThroughInstrs) {
297 switch (MI->getOpcode()) {
298 case TargetOpcode::G_TRUNC:
299 case TargetOpcode::G_SEXT:
300 case TargetOpcode::G_ZEXT:
301 SeenOpcodes.push_back(std::make_pair(
302 MI->getOpcode(),
303 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
304 VReg = MI->getOperand(1).getReg();
305 break;
306 case TargetOpcode::COPY:
307 VReg = MI->getOperand(1).getReg();
308 if (Register::isPhysicalRegister(VReg))
309 return None;
310 break;
311 case TargetOpcode::G_INTTOPTR:
312 VReg = MI->getOperand(1).getReg();
313 break;
314 default:
315 return None;
316 }
317 }
318 if (!MI || !IsConstantOpcode(MI->getOpcode()))
319 return None;
320
321 Optional<APInt> MaybeVal = GetImmediateValue(*MI);
322 if (!MaybeVal)
323 return None;
324 APInt &Val = *MaybeVal;
325 while (!SeenOpcodes.empty()) {
326 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
327 switch (OpcodeAndSize.first) {
328 case TargetOpcode::G_TRUNC:
329 Val = Val.trunc(OpcodeAndSize.second);
330 break;
331 case TargetOpcode::G_SEXT:
332 Val = Val.sext(OpcodeAndSize.second);
333 break;
334 case TargetOpcode::G_ZEXT:
335 Val = Val.zext(OpcodeAndSize.second);
336 break;
337 }
338 }
339
340 if (Val.getBitWidth() > 64)
341 return None;
342
343 return ValueAndVReg{Val.getSExtValue(), VReg};
344 }
345
346 const ConstantFP *
getConstantFPVRegVal(Register VReg,const MachineRegisterInfo & MRI)347 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
348 MachineInstr *MI = MRI.getVRegDef(VReg);
349 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
350 return nullptr;
351 return MI->getOperand(1).getFPImm();
352 }
353
354 Optional<DefinitionAndSourceRegister>
getDefSrcRegIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)355 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
356 Register DefSrcReg = Reg;
357 auto *DefMI = MRI.getVRegDef(Reg);
358 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
359 if (!DstTy.isValid())
360 return None;
361 while (DefMI->getOpcode() == TargetOpcode::COPY) {
362 Register SrcReg = DefMI->getOperand(1).getReg();
363 auto SrcTy = MRI.getType(SrcReg);
364 if (!SrcTy.isValid())
365 break;
366 DefMI = MRI.getVRegDef(SrcReg);
367 DefSrcReg = SrcReg;
368 }
369 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
370 }
371
getDefIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)372 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
373 const MachineRegisterInfo &MRI) {
374 Optional<DefinitionAndSourceRegister> DefSrcReg =
375 getDefSrcRegIgnoringCopies(Reg, MRI);
376 return DefSrcReg ? DefSrcReg->MI : nullptr;
377 }
378
getSrcRegIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)379 Register llvm::getSrcRegIgnoringCopies(Register Reg,
380 const MachineRegisterInfo &MRI) {
381 Optional<DefinitionAndSourceRegister> DefSrcReg =
382 getDefSrcRegIgnoringCopies(Reg, MRI);
383 return DefSrcReg ? DefSrcReg->Reg : Register();
384 }
385
getOpcodeDef(unsigned Opcode,Register Reg,const MachineRegisterInfo & MRI)386 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
387 const MachineRegisterInfo &MRI) {
388 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
389 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
390 }
391
getAPFloatFromSize(double Val,unsigned Size)392 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
393 if (Size == 32)
394 return APFloat(float(Val));
395 if (Size == 64)
396 return APFloat(Val);
397 if (Size != 16)
398 llvm_unreachable("Unsupported FPConstant size");
399 bool Ignored;
400 APFloat APF(Val);
401 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
402 return APF;
403 }
404
ConstantFoldBinOp(unsigned Opcode,const Register Op1,const Register Op2,const MachineRegisterInfo & MRI)405 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
406 const Register Op2,
407 const MachineRegisterInfo &MRI) {
408 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
409 if (!MaybeOp2Cst)
410 return None;
411
412 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
413 if (!MaybeOp1Cst)
414 return None;
415
416 LLT Ty = MRI.getType(Op1);
417 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
418 APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
419 switch (Opcode) {
420 default:
421 break;
422 case TargetOpcode::G_ADD:
423 return C1 + C2;
424 case TargetOpcode::G_AND:
425 return C1 & C2;
426 case TargetOpcode::G_ASHR:
427 return C1.ashr(C2);
428 case TargetOpcode::G_LSHR:
429 return C1.lshr(C2);
430 case TargetOpcode::G_MUL:
431 return C1 * C2;
432 case TargetOpcode::G_OR:
433 return C1 | C2;
434 case TargetOpcode::G_SHL:
435 return C1 << C2;
436 case TargetOpcode::G_SUB:
437 return C1 - C2;
438 case TargetOpcode::G_XOR:
439 return C1 ^ C2;
440 case TargetOpcode::G_UDIV:
441 if (!C2.getBoolValue())
442 break;
443 return C1.udiv(C2);
444 case TargetOpcode::G_SDIV:
445 if (!C2.getBoolValue())
446 break;
447 return C1.sdiv(C2);
448 case TargetOpcode::G_UREM:
449 if (!C2.getBoolValue())
450 break;
451 return C1.urem(C2);
452 case TargetOpcode::G_SREM:
453 if (!C2.getBoolValue())
454 break;
455 return C1.srem(C2);
456 }
457
458 return None;
459 }
460
isKnownNeverNaN(Register Val,const MachineRegisterInfo & MRI,bool SNaN)461 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
462 bool SNaN) {
463 const MachineInstr *DefMI = MRI.getVRegDef(Val);
464 if (!DefMI)
465 return false;
466
467 const TargetMachine& TM = DefMI->getMF()->getTarget();
468 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
469 return true;
470
471 if (SNaN) {
472 // FP operations quiet. For now, just handle the ones inserted during
473 // legalization.
474 switch (DefMI->getOpcode()) {
475 case TargetOpcode::G_FPEXT:
476 case TargetOpcode::G_FPTRUNC:
477 case TargetOpcode::G_FCANONICALIZE:
478 return true;
479 default:
480 return false;
481 }
482 }
483
484 return false;
485 }
486
inferAlignFromPtrInfo(MachineFunction & MF,const MachinePointerInfo & MPO)487 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
488 const MachinePointerInfo &MPO) {
489 auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
490 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
491 MachineFrameInfo &MFI = MF.getFrameInfo();
492 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
493 MPO.Offset);
494 }
495
496 return Align(1);
497 }
498
getFunctionLiveInPhysReg(MachineFunction & MF,const TargetInstrInfo & TII,MCRegister PhysReg,const TargetRegisterClass & RC,LLT RegTy)499 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
500 const TargetInstrInfo &TII,
501 MCRegister PhysReg,
502 const TargetRegisterClass &RC,
503 LLT RegTy) {
504 DebugLoc DL; // FIXME: Is no location the right choice?
505 MachineBasicBlock &EntryMBB = MF.front();
506 MachineRegisterInfo &MRI = MF.getRegInfo();
507 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
508 if (LiveIn) {
509 MachineInstr *Def = MRI.getVRegDef(LiveIn);
510 if (Def) {
511 // FIXME: Should the verifier check this is in the entry block?
512 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
513 return LiveIn;
514 }
515
516 // It's possible the incoming argument register and copy was added during
517 // lowering, but later deleted due to being/becoming dead. If this happens,
518 // re-insert the copy.
519 } else {
520 // The live in register was not present, so add it.
521 LiveIn = MF.addLiveIn(PhysReg, &RC);
522 if (RegTy.isValid())
523 MRI.setType(LiveIn, RegTy);
524 }
525
526 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
527 .addReg(PhysReg);
528 if (!EntryMBB.isLiveIn(PhysReg))
529 EntryMBB.addLiveIn(PhysReg);
530 return LiveIn;
531 }
532
ConstantFoldExtOp(unsigned Opcode,const Register Op1,uint64_t Imm,const MachineRegisterInfo & MRI)533 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
534 uint64_t Imm,
535 const MachineRegisterInfo &MRI) {
536 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
537 if (MaybeOp1Cst) {
538 LLT Ty = MRI.getType(Op1);
539 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
540 switch (Opcode) {
541 default:
542 break;
543 case TargetOpcode::G_SEXT_INREG:
544 return C1.trunc(Imm).sext(C1.getBitWidth());
545 }
546 }
547 return None;
548 }
549
getSelectionDAGFallbackAnalysisUsage(AnalysisUsage & AU)550 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
551 AU.addPreserved<StackProtector>();
552 }
553
getLCMSize(unsigned OrigSize,unsigned TargetSize)554 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
555 unsigned Mul = OrigSize * TargetSize;
556 unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
557 return Mul / GCDSize;
558 }
559
getLCMType(LLT OrigTy,LLT TargetTy)560 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
561 const unsigned OrigSize = OrigTy.getSizeInBits();
562 const unsigned TargetSize = TargetTy.getSizeInBits();
563
564 if (OrigSize == TargetSize)
565 return OrigTy;
566
567 if (OrigTy.isVector()) {
568 const LLT OrigElt = OrigTy.getElementType();
569
570 if (TargetTy.isVector()) {
571 const LLT TargetElt = TargetTy.getElementType();
572
573 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
574 int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
575 TargetTy.getNumElements());
576 // Prefer the original element type.
577 int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
578 return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
579 }
580 } else {
581 if (OrigElt.getSizeInBits() == TargetSize)
582 return OrigTy;
583 }
584
585 unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
586 return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
587 }
588
589 if (TargetTy.isVector()) {
590 unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
591 return LLT::vector(LCMSize / OrigSize, OrigTy);
592 }
593
594 unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
595
596 // Preserve pointer types.
597 if (LCMSize == OrigSize)
598 return OrigTy;
599 if (LCMSize == TargetSize)
600 return TargetTy;
601
602 return LLT::scalar(LCMSize);
603 }
604
getGCDType(LLT OrigTy,LLT TargetTy)605 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
606 const unsigned OrigSize = OrigTy.getSizeInBits();
607 const unsigned TargetSize = TargetTy.getSizeInBits();
608
609 if (OrigSize == TargetSize)
610 return OrigTy;
611
612 if (OrigTy.isVector()) {
613 LLT OrigElt = OrigTy.getElementType();
614 if (TargetTy.isVector()) {
615 LLT TargetElt = TargetTy.getElementType();
616 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
617 int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
618 TargetTy.getNumElements());
619 return LLT::scalarOrVector(GCD, OrigElt);
620 }
621 } else {
622 // If the source is a vector of pointers, return a pointer element.
623 if (OrigElt.getSizeInBits() == TargetSize)
624 return OrigElt;
625 }
626
627 unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
628 if (GCD == OrigElt.getSizeInBits())
629 return OrigElt;
630
631 // If we can't produce the original element type, we have to use a smaller
632 // scalar.
633 if (GCD < OrigElt.getSizeInBits())
634 return LLT::scalar(GCD);
635 return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
636 }
637
638 if (TargetTy.isVector()) {
639 // Try to preserve the original element type.
640 LLT TargetElt = TargetTy.getElementType();
641 if (TargetElt.getSizeInBits() == OrigSize)
642 return OrigTy;
643 }
644
645 unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
646 return LLT::scalar(GCD);
647 }
648
getSplatIndex(MachineInstr & MI)649 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
650 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
651 "Only G_SHUFFLE_VECTOR can have a splat index!");
652 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
653 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
654
655 // If all elements are undefined, this shuffle can be considered a splat.
656 // Return 0 for better potential for callers to simplify.
657 if (FirstDefinedIdx == Mask.end())
658 return 0;
659
660 // Make sure all remaining elements are either undef or the same
661 // as the first non-undef value.
662 int SplatValue = *FirstDefinedIdx;
663 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
664 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
665 return None;
666
667 return SplatValue;
668 }
669
isBuildVectorOp(unsigned Opcode)670 static bool isBuildVectorOp(unsigned Opcode) {
671 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
672 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
673 }
674
675 // TODO: Handle mixed undef elements.
isBuildVectorConstantSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI,int64_t SplatValue)676 static bool isBuildVectorConstantSplat(const MachineInstr &MI,
677 const MachineRegisterInfo &MRI,
678 int64_t SplatValue) {
679 if (!isBuildVectorOp(MI.getOpcode()))
680 return false;
681
682 const unsigned NumOps = MI.getNumOperands();
683 for (unsigned I = 1; I != NumOps; ++I) {
684 Register Element = MI.getOperand(I).getReg();
685 if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
686 return false;
687 }
688
689 return true;
690 }
691
692 Optional<int64_t>
getBuildVectorConstantSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI)693 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
694 const MachineRegisterInfo &MRI) {
695 if (!isBuildVectorOp(MI.getOpcode()))
696 return None;
697
698 const unsigned NumOps = MI.getNumOperands();
699 Optional<int64_t> Scalar;
700 for (unsigned I = 1; I != NumOps; ++I) {
701 Register Element = MI.getOperand(I).getReg();
702 int64_t ElementValue;
703 if (!mi_match(Element, MRI, m_ICst(ElementValue)))
704 return None;
705 if (!Scalar)
706 Scalar = ElementValue;
707 else if (*Scalar != ElementValue)
708 return None;
709 }
710
711 return Scalar;
712 }
713
isBuildVectorAllZeros(const MachineInstr & MI,const MachineRegisterInfo & MRI)714 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
715 const MachineRegisterInfo &MRI) {
716 return isBuildVectorConstantSplat(MI, MRI, 0);
717 }
718
isBuildVectorAllOnes(const MachineInstr & MI,const MachineRegisterInfo & MRI)719 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
720 const MachineRegisterInfo &MRI) {
721 return isBuildVectorConstantSplat(MI, MRI, -1);
722 }
723
isConstTrueVal(const TargetLowering & TLI,int64_t Val,bool IsVector,bool IsFP)724 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
725 bool IsFP) {
726 switch (TLI.getBooleanContents(IsVector, IsFP)) {
727 case TargetLowering::UndefinedBooleanContent:
728 return Val & 0x1;
729 case TargetLowering::ZeroOrOneBooleanContent:
730 return Val == 1;
731 case TargetLowering::ZeroOrNegativeOneBooleanContent:
732 return Val == -1;
733 }
734 llvm_unreachable("Invalid boolean contents");
735 }
736
getICmpTrueVal(const TargetLowering & TLI,bool IsVector,bool IsFP)737 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
738 bool IsFP) {
739 switch (TLI.getBooleanContents(IsVector, IsFP)) {
740 case TargetLowering::UndefinedBooleanContent:
741 case TargetLowering::ZeroOrOneBooleanContent:
742 return 1;
743 case TargetLowering::ZeroOrNegativeOneBooleanContent:
744 return -1;
745 }
746 llvm_unreachable("Invalid boolean contents");
747 }
748