1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16
17 #include "GISelWorkList.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/LowLevelType.h"
21 #include "llvm/CodeGen/Register.h"
22 #include "llvm/IR/DebugLoc.h"
23 #include "llvm/Support/Alignment.h"
24 #include "llvm/Support/Casting.h"
25 #include <cstdint>
26
27 namespace llvm {
28
29 class AnalysisUsage;
30 class LostDebugLocObserver;
31 class MachineBasicBlock;
32 class BlockFrequencyInfo;
33 class GISelKnownBits;
34 class MachineFunction;
35 class MachineInstr;
36 class MachineOperand;
37 class MachineOptimizationRemarkEmitter;
38 class MachineOptimizationRemarkMissed;
39 struct MachinePointerInfo;
40 class MachineRegisterInfo;
41 class MCInstrDesc;
42 class ProfileSummaryInfo;
43 class RegisterBankInfo;
44 class TargetInstrInfo;
45 class TargetLowering;
46 class TargetPassConfig;
47 class TargetRegisterInfo;
48 class TargetRegisterClass;
49 class ConstantFP;
50 class APFloat;
51
52 // Convenience macros for dealing with vector reduction opcodes.
53 #define GISEL_VECREDUCE_CASES_ALL \
54 case TargetOpcode::G_VECREDUCE_SEQ_FADD: \
55 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: \
56 case TargetOpcode::G_VECREDUCE_FADD: \
57 case TargetOpcode::G_VECREDUCE_FMUL: \
58 case TargetOpcode::G_VECREDUCE_FMAX: \
59 case TargetOpcode::G_VECREDUCE_FMIN: \
60 case TargetOpcode::G_VECREDUCE_FMAXIMUM: \
61 case TargetOpcode::G_VECREDUCE_FMINIMUM: \
62 case TargetOpcode::G_VECREDUCE_ADD: \
63 case TargetOpcode::G_VECREDUCE_MUL: \
64 case TargetOpcode::G_VECREDUCE_AND: \
65 case TargetOpcode::G_VECREDUCE_OR: \
66 case TargetOpcode::G_VECREDUCE_XOR: \
67 case TargetOpcode::G_VECREDUCE_SMAX: \
68 case TargetOpcode::G_VECREDUCE_SMIN: \
69 case TargetOpcode::G_VECREDUCE_UMAX: \
70 case TargetOpcode::G_VECREDUCE_UMIN:
71
72 #define GISEL_VECREDUCE_CASES_NONSEQ \
73 case TargetOpcode::G_VECREDUCE_FADD: \
74 case TargetOpcode::G_VECREDUCE_FMUL: \
75 case TargetOpcode::G_VECREDUCE_FMAX: \
76 case TargetOpcode::G_VECREDUCE_FMIN: \
77 case TargetOpcode::G_VECREDUCE_FMAXIMUM: \
78 case TargetOpcode::G_VECREDUCE_FMINIMUM: \
79 case TargetOpcode::G_VECREDUCE_ADD: \
80 case TargetOpcode::G_VECREDUCE_MUL: \
81 case TargetOpcode::G_VECREDUCE_AND: \
82 case TargetOpcode::G_VECREDUCE_OR: \
83 case TargetOpcode::G_VECREDUCE_XOR: \
84 case TargetOpcode::G_VECREDUCE_SMAX: \
85 case TargetOpcode::G_VECREDUCE_SMIN: \
86 case TargetOpcode::G_VECREDUCE_UMAX: \
87 case TargetOpcode::G_VECREDUCE_UMIN:
88
89 /// Try to constrain Reg to the specified register class. If this fails,
90 /// create a new virtual register in the correct class.
91 ///
92 /// \return The virtual register constrained to the right register class.
93 Register constrainRegToClass(MachineRegisterInfo &MRI,
94 const TargetInstrInfo &TII,
95 const RegisterBankInfo &RBI, Register Reg,
96 const TargetRegisterClass &RegClass);
97
98 /// Constrain the Register operand OpIdx, so that it is now constrained to the
99 /// TargetRegisterClass passed as an argument (RegClass).
100 /// If this fails, create a new virtual register in the correct class and insert
101 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
102 /// In both cases, the function also updates the register of RegMo. The debug
103 /// location of \p InsertPt is used for the new copy.
104 ///
105 /// \return The virtual register constrained to the right register class.
106 Register constrainOperandRegClass(const MachineFunction &MF,
107 const TargetRegisterInfo &TRI,
108 MachineRegisterInfo &MRI,
109 const TargetInstrInfo &TII,
110 const RegisterBankInfo &RBI,
111 MachineInstr &InsertPt,
112 const TargetRegisterClass &RegClass,
113 MachineOperand &RegMO);
114
115 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
116 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
117 /// correct class and insert a COPY before \p InsertPt if it is a use or after
118 /// if it is a definition. In both cases, the function also updates the register
119 /// of RegMo.
120 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
121 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
122 /// InsertPt is used for the new copy.
123 ///
124 /// \return The virtual register constrained to the right register class.
125 Register constrainOperandRegClass(const MachineFunction &MF,
126 const TargetRegisterInfo &TRI,
127 MachineRegisterInfo &MRI,
128 const TargetInstrInfo &TII,
129 const RegisterBankInfo &RBI,
130 MachineInstr &InsertPt, const MCInstrDesc &II,
131 MachineOperand &RegMO, unsigned OpIdx);
132
133 /// Mutate the newly-selected instruction \p I to constrain its (possibly
134 /// generic) virtual register operands to the instruction's register class.
135 /// This could involve inserting COPYs before (for uses) or after (for defs).
136 /// This requires the number of operands to match the instruction description.
137 /// \returns whether operand regclass constraining succeeded.
138 ///
139 // FIXME: Not all instructions have the same number of operands. We should
140 // probably expose a constrain helper per operand and let the target selector
141 // constrain individual registers, like fast-isel.
142 bool constrainSelectedInstRegOperands(MachineInstr &I,
143 const TargetInstrInfo &TII,
144 const TargetRegisterInfo &TRI,
145 const RegisterBankInfo &RBI);
146
147 /// Check if DstReg can be replaced with SrcReg depending on the register
148 /// constraints.
149 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
150
151 /// Check whether an instruction \p MI is dead: it only defines dead virtual
152 /// registers, and doesn't have other side effects.
153 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
154
155 /// Report an ISel error as a missed optimization remark to the LLVMContext's
156 /// diagnostic stream. Set the FailedISel MachineFunction property.
157 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
158 MachineOptimizationRemarkEmitter &MORE,
159 MachineOptimizationRemarkMissed &R);
160
161 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
162 MachineOptimizationRemarkEmitter &MORE,
163 const char *PassName, StringRef Msg,
164 const MachineInstr &MI);
165
166 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
167 /// diagnostic stream.
168 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
169 MachineOptimizationRemarkEmitter &MORE,
170 MachineOptimizationRemarkMissed &R);
171
172 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
173 std::optional<APInt> getIConstantVRegVal(Register VReg,
174 const MachineRegisterInfo &MRI);
175
176 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
177 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
178 const MachineRegisterInfo &MRI);
179
180 /// Simple struct used to hold a constant integer value and a virtual
181 /// register.
182 struct ValueAndVReg {
183 APInt Value;
184 Register VReg;
185 };
186
187 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
188 /// on a G_CONSTANT returns its APInt value and def register.
189 std::optional<ValueAndVReg>
190 getIConstantVRegValWithLookThrough(Register VReg,
191 const MachineRegisterInfo &MRI,
192 bool LookThroughInstrs = true);
193
194 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
195 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
196 std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
197 Register VReg, const MachineRegisterInfo &MRI,
198 bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
199
200 struct FPValueAndVReg {
201 APFloat Value;
202 Register VReg;
203 };
204
205 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
206 /// on a G_FCONSTANT returns its APFloat value and def register.
207 std::optional<FPValueAndVReg>
208 getFConstantVRegValWithLookThrough(Register VReg,
209 const MachineRegisterInfo &MRI,
210 bool LookThroughInstrs = true);
211
212 const ConstantFP* getConstantFPVRegVal(Register VReg,
213 const MachineRegisterInfo &MRI);
214
215 /// See if Reg is defined by an single def instruction that is
216 /// Opcode. Also try to do trivial folding if it's a COPY with
217 /// same types. Returns null otherwise.
218 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
219 const MachineRegisterInfo &MRI);
220
221 /// Simple struct used to hold a Register value and the instruction which
222 /// defines it.
223 struct DefinitionAndSourceRegister {
224 MachineInstr *MI;
225 Register Reg;
226 };
227
228 /// Find the def instruction for \p Reg, and underlying value Register folding
229 /// away any copies.
230 ///
231 /// Also walks through hints such as G_ASSERT_ZEXT.
232 std::optional<DefinitionAndSourceRegister>
233 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
234
235 /// Find the def instruction for \p Reg, folding away any trivial copies. May
236 /// return nullptr if \p Reg is not a generic virtual register.
237 ///
238 /// Also walks through hints such as G_ASSERT_ZEXT.
239 MachineInstr *getDefIgnoringCopies(Register Reg,
240 const MachineRegisterInfo &MRI);
241
242 /// Find the source register for \p Reg, folding away any trivial copies. It
243 /// will be an output register of the instruction that getDefIgnoringCopies
244 /// returns. May return an invalid register if \p Reg is not a generic virtual
245 /// register.
246 ///
247 /// Also walks through hints such as G_ASSERT_ZEXT.
248 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
249
250 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
251 /// See if Reg is defined by an single def instruction of type T
252 /// Also try to do trivial folding if it's a COPY with
253 /// same types. Returns null otherwise.
254 template <class T>
getOpcodeDef(Register Reg,const MachineRegisterInfo & MRI)255 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
256 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
257 return dyn_cast_or_null<T>(DefMI);
258 }
259
260 /// Returns an APFloat from Val converted to the appropriate size.
261 APFloat getAPFloatFromSize(double Val, unsigned Size);
262
263 /// Modify analysis usage so it preserves passes required for the SelectionDAG
264 /// fallback.
265 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
266
267 std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
268 const Register Op2,
269 const MachineRegisterInfo &MRI);
270 std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
271 const Register Op2,
272 const MachineRegisterInfo &MRI);
273
274 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
275 /// Returns an empty vector on failure.
276 SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
277 const Register Op2,
278 const MachineRegisterInfo &MRI);
279
280 std::optional<APInt> ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
281 const Register Op0,
282 const MachineRegisterInfo &MRI);
283
284 std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
285 uint64_t Imm,
286 const MachineRegisterInfo &MRI);
287
288 std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
289 Register Src,
290 const MachineRegisterInfo &MRI);
291
292 /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
293 /// then it tries to do an element-wise constant fold.
294 std::optional<SmallVector<unsigned>>
295 ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
296
297 /// Test if the given value is known to have exactly one bit set. This differs
298 /// from computeKnownBits in that it doesn't necessarily determine which bit is
299 /// set.
300 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
301 GISelKnownBits *KnownBits = nullptr);
302
303 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
304 /// this returns if \p Val can be assumed to never be a signaling NaN.
305 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
306 bool SNaN = false);
307
308 /// Returns true if \p Val can be assumed to never be a signaling NaN.
isKnownNeverSNaN(Register Val,const MachineRegisterInfo & MRI)309 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
310 return isKnownNeverNaN(Val, MRI, true);
311 }
312
313 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
314
315 /// Return a virtual register corresponding to the incoming argument register \p
316 /// PhysReg. This register is expected to have class \p RC, and optional type \p
317 /// RegTy. This assumes all references to the register will use the same type.
318 ///
319 /// If there is an existing live-in argument register, it will be returned.
320 /// This will also ensure there is a valid copy
321 Register getFunctionLiveInPhysReg(MachineFunction &MF,
322 const TargetInstrInfo &TII,
323 MCRegister PhysReg,
324 const TargetRegisterClass &RC,
325 const DebugLoc &DL, LLT RegTy = LLT());
326
327 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
328 /// number of vector elements or scalar bitwidth. The intent is a
329 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
330 /// \p OrigTy elements, and unmerged into \p TargetTy
331 LLVM_READNONE
332 LLT getLCMType(LLT OrigTy, LLT TargetTy);
333
334 LLVM_READNONE
335 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
336 /// multiple of TargetTy.
337 LLT getCoverTy(LLT OrigTy, LLT TargetTy);
338
339 /// Return a type where the total size is the greatest common divisor of \p
340 /// OrigTy and \p TargetTy. This will try to either change the number of vector
341 /// elements, or bitwidth of scalars. The intent is the result type can be used
342 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
343 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
344 /// with intermediate casts) can re-form \p TargetTy.
345 ///
346 /// If these are vectors with different element types, this will try to produce
347 /// a vector with a compatible total size, but the element type of \p OrigTy. If
348 /// this can't be satisfied, this will produce a scalar smaller than the
349 /// original vector elements.
350 ///
351 /// In the worst case, this returns LLT::scalar(1)
352 LLVM_READNONE
353 LLT getGCDType(LLT OrigTy, LLT TargetTy);
354
355 /// Represents a value which can be a Register or a constant.
356 ///
357 /// This is useful in situations where an instruction may have an interesting
358 /// register operand or interesting constant operand. For a concrete example,
359 /// \see getVectorSplat.
360 class RegOrConstant {
361 int64_t Cst;
362 Register Reg;
363 bool IsReg;
364
365 public:
RegOrConstant(Register Reg)366 explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
RegOrConstant(int64_t Cst)367 explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
isReg()368 bool isReg() const { return IsReg; }
isCst()369 bool isCst() const { return !IsReg; }
getReg()370 Register getReg() const {
371 assert(isReg() && "Expected a register!");
372 return Reg;
373 }
getCst()374 int64_t getCst() const {
375 assert(isCst() && "Expected a constant!");
376 return Cst;
377 }
378 };
379
380 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
381 /// If \p MI is not a splat, returns std::nullopt.
382 std::optional<int> getSplatIndex(MachineInstr &MI);
383
384 /// \returns the scalar integral splat value of \p Reg if possible.
385 std::optional<APInt> getIConstantSplatVal(const Register Reg,
386 const MachineRegisterInfo &MRI);
387
388 /// \returns the scalar integral splat value defined by \p MI if possible.
389 std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
390 const MachineRegisterInfo &MRI);
391
392 /// \returns the scalar sign extended integral splat value of \p Reg if
393 /// possible.
394 std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
395 const MachineRegisterInfo &MRI);
396
397 /// \returns the scalar sign extended integral splat value defined by \p MI if
398 /// possible.
399 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
400 const MachineRegisterInfo &MRI);
401
402 /// Returns a floating point scalar constant of a build vector splat if it
403 /// exists. When \p AllowUndef == true some elements can be undef but not all.
404 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
405 const MachineRegisterInfo &MRI,
406 bool AllowUndef = true);
407
408 /// Return true if the specified register is defined by G_BUILD_VECTOR or
409 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
410 bool isBuildVectorConstantSplat(const Register Reg,
411 const MachineRegisterInfo &MRI,
412 int64_t SplatValue, bool AllowUndef);
413
414 /// Return true if the specified instruction is a G_BUILD_VECTOR or
415 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
416 bool isBuildVectorConstantSplat(const MachineInstr &MI,
417 const MachineRegisterInfo &MRI,
418 int64_t SplatValue, bool AllowUndef);
419
420 /// Return true if the specified instruction is a G_BUILD_VECTOR or
421 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
422 bool isBuildVectorAllZeros(const MachineInstr &MI,
423 const MachineRegisterInfo &MRI,
424 bool AllowUndef = false);
425
426 /// Return true if the specified instruction is a G_BUILD_VECTOR or
427 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
428 bool isBuildVectorAllOnes(const MachineInstr &MI,
429 const MachineRegisterInfo &MRI,
430 bool AllowUndef = false);
431
432 /// Return true if the specified instruction is known to be a constant, or a
433 /// vector of constants.
434 ///
435 /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
436 /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
437 /// such as G_GLOBAL_VALUE will also be considered.
438 bool isConstantOrConstantVector(const MachineInstr &MI,
439 const MachineRegisterInfo &MRI,
440 bool AllowFP = true,
441 bool AllowOpaqueConstants = true);
442
443 /// Return true if the value is a constant 0 integer or a splatted vector of a
444 /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
445 /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
446 /// for null values.
447 bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
448 bool AllowUndefs = false);
449
450 /// Return true if the value is a constant -1 integer or a splatted vector of a
451 /// constant -1 integer (with no undefs if \p AllowUndefs is false).
452 bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
453 const MachineRegisterInfo &MRI,
454 bool AllowUndefs = false);
455
456 /// \returns a value when \p MI is a vector splat. The splat can be either a
457 /// Register or a constant.
458 ///
459 /// Examples:
460 ///
461 /// \code
462 /// %reg = COPY $physreg
463 /// %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
464 /// \endcode
465 ///
466 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
467 /// containing %reg.
468 ///
469 /// \code
470 /// %cst = G_CONSTANT iN 4
471 /// %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
472 /// \endcode
473 ///
474 /// In the above case, this will return a RegOrConstant containing 4.
475 std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
476 const MachineRegisterInfo &MRI);
477
478 /// Determines if \p MI defines a constant integer or a build vector of
479 /// constant integers. Treats undef values as constants.
480 bool isConstantOrConstantVector(MachineInstr &MI,
481 const MachineRegisterInfo &MRI);
482
483 /// Determines if \p MI defines a constant integer or a splat vector of
484 /// constant integers.
485 /// \returns the scalar constant or std::nullopt.
486 std::optional<APInt>
487 isConstantOrConstantSplatVector(MachineInstr &MI,
488 const MachineRegisterInfo &MRI);
489
490 /// Attempt to match a unary predicate against a scalar/splat constant or every
491 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
492 /// value was undef.
493 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
494 std::function<bool(const Constant *ConstVal)> Match,
495 bool AllowUndefs = false);
496
497 /// Returns true if given the TargetLowering's boolean contents information,
498 /// the value \p Val contains a true value.
499 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
500 bool IsFP);
501 /// \returns true if given the TargetLowering's boolean contents information,
502 /// the value \p Val contains a false value.
503 bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
504 bool IsFP);
505
506 /// Returns an integer representing true, as defined by the
507 /// TargetBooleanContents.
508 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
509
510 /// Returns true if the given block should be optimized for size.
511 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
512 BlockFrequencyInfo *BFI);
513
514 using SmallInstListTy = GISelWorkList<4>;
515 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
516 LostDebugLocObserver *LocObserver,
517 SmallInstListTy &DeadInstChain);
518 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
519 LostDebugLocObserver *LocObserver = nullptr);
520 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
521 LostDebugLocObserver *LocObserver = nullptr);
522
523 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
524 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
525 void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
526
527 } // End namespace llvm.
528 #endif
529