1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This pass exposes codegen information to IR-level passes. Every
10 /// transformation that uses codegen information is broken into three parts:
11 /// 1. The IR-level analysis pass.
12 /// 2. The IR-level transformation interface which provides the needed
13 /// information.
14 /// 3. Codegen-level implementation which uses target-specific hooks.
15 ///
16 /// This file defines #2, which is the interface that IR-level transformations
17 /// use for querying the codegen.
18 ///
19 //===----------------------------------------------------------------------===//
20
21 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
22 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23
24 #include "llvm/ADT/APInt.h"
25 #include "llvm/IR/FMF.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/PassManager.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/AtomicOrdering.h"
30 #include "llvm/Support/BranchProbability.h"
31 #include "llvm/Support/InstructionCost.h"
32 #include <functional>
33 #include <optional>
34 #include <utility>
35
36 namespace llvm {
37
38 namespace Intrinsic {
39 typedef unsigned ID;
40 }
41
42 class AllocaInst;
43 class AssumptionCache;
44 class BlockFrequencyInfo;
45 class DominatorTree;
46 class BranchInst;
47 class Function;
48 class GlobalValue;
49 class InstCombiner;
50 class OptimizationRemarkEmitter;
51 class InterleavedAccessInfo;
52 class IntrinsicInst;
53 class LoadInst;
54 class Loop;
55 class LoopInfo;
56 class LoopVectorizationLegality;
57 class ProfileSummaryInfo;
58 class RecurrenceDescriptor;
59 class SCEV;
60 class ScalarEvolution;
61 class SmallBitVector;
62 class StoreInst;
63 class SwitchInst;
64 class TargetLibraryInfo;
65 class Type;
66 class VPIntrinsic;
67 struct KnownBits;
68
69 /// Information about a load/store intrinsic defined by the target.
70 struct MemIntrinsicInfo {
71 /// This is the pointer that the intrinsic is loading from or storing to.
72 /// If this is non-null, then analysis/optimization passes can assume that
73 /// this intrinsic is functionally equivalent to a load/store from this
74 /// pointer.
75 Value *PtrVal = nullptr;
76
77 // Ordering for atomic operations.
78 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
79
80 // Same Id is set by the target for corresponding load/store intrinsics.
81 unsigned short MatchingId = 0;
82
83 bool ReadMem = false;
84 bool WriteMem = false;
85 bool IsVolatile = false;
86
isUnorderedMemIntrinsicInfo87 bool isUnordered() const {
88 return (Ordering == AtomicOrdering::NotAtomic ||
89 Ordering == AtomicOrdering::Unordered) &&
90 !IsVolatile;
91 }
92 };
93
94 /// Attributes of a target dependent hardware loop.
95 struct HardwareLoopInfo {
96 HardwareLoopInfo() = delete;
97 HardwareLoopInfo(Loop *L);
98 Loop *L = nullptr;
99 BasicBlock *ExitBlock = nullptr;
100 BranchInst *ExitBranch = nullptr;
101 const SCEV *ExitCount = nullptr;
102 IntegerType *CountType = nullptr;
103 Value *LoopDecrement = nullptr; // Decrement the loop counter by this
104 // value in every iteration.
105 bool IsNestingLegal = false; // Can a hardware loop be a parent to
106 // another hardware loop?
107 bool CounterInReg = false; // Should loop counter be updated in
108 // the loop via a phi?
109 bool PerformEntryTest = false; // Generate the intrinsic which also performs
110 // icmp ne zero on the loop counter value and
111 // produces an i1 to guard the loop entry.
112 bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
113 DominatorTree &DT, bool ForceNestedLoop = false,
114 bool ForceHardwareLoopPHI = false);
115 bool canAnalyze(LoopInfo &LI);
116 };
117
118 class IntrinsicCostAttributes {
119 const IntrinsicInst *II = nullptr;
120 Type *RetTy = nullptr;
121 Intrinsic::ID IID;
122 SmallVector<Type *, 4> ParamTys;
123 SmallVector<const Value *, 4> Arguments;
124 FastMathFlags FMF;
125 // If ScalarizationCost is UINT_MAX, the cost of scalarizing the
126 // arguments and the return value will be computed based on types.
127 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
128
129 public:
130 IntrinsicCostAttributes(
131 Intrinsic::ID Id, const CallBase &CI,
132 InstructionCost ScalarCost = InstructionCost::getInvalid(),
133 bool TypeBasedOnly = false);
134
135 IntrinsicCostAttributes(
136 Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys,
137 FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr,
138 InstructionCost ScalarCost = InstructionCost::getInvalid());
139
140 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
141 ArrayRef<const Value *> Args);
142
143 IntrinsicCostAttributes(
144 Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args,
145 ArrayRef<Type *> Tys, FastMathFlags Flags = FastMathFlags(),
146 const IntrinsicInst *I = nullptr,
147 InstructionCost ScalarCost = InstructionCost::getInvalid());
148
getID()149 Intrinsic::ID getID() const { return IID; }
getInst()150 const IntrinsicInst *getInst() const { return II; }
getReturnType()151 Type *getReturnType() const { return RetTy; }
getFlags()152 FastMathFlags getFlags() const { return FMF; }
getScalarizationCost()153 InstructionCost getScalarizationCost() const { return ScalarizationCost; }
getArgs()154 const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
getArgTypes()155 const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }
156
isTypeBasedOnly()157 bool isTypeBasedOnly() const {
158 return Arguments.empty();
159 }
160
skipScalarizationCost()161 bool skipScalarizationCost() const { return ScalarizationCost.isValid(); }
162 };
163
164 enum class TailFoldingStyle {
165 /// Don't use tail folding
166 None,
167 /// Use predicate only to mask operations on data in the loop.
168 /// When the VL is not known to be a power-of-2, this method requires a
169 /// runtime overflow check for the i + VL in the loop because it compares the
170 /// scalar induction variable against the tripcount rounded up by VL which may
171 /// overflow. When the VL is a power-of-2, both the increment and uprounded
172 /// tripcount will overflow to 0, which does not require a runtime check
173 /// since the loop is exited when the loop induction variable equals the
174 /// uprounded trip-count, which are both 0.
175 Data,
176 /// Same as Data, but avoids using the get.active.lane.mask intrinsic to
177 /// calculate the mask and instead implements this with a
178 /// splat/stepvector/cmp.
179 /// FIXME: Can this kind be removed now that SelectionDAGBuilder expands the
180 /// active.lane.mask intrinsic when it is not natively supported?
181 DataWithoutLaneMask,
182 /// Use predicate to control both data and control flow.
183 /// This method always requires a runtime overflow check for the i + VL
184 /// increment inside the loop, because it uses the result direclty in the
185 /// active.lane.mask to calculate the mask for the next iteration. If the
186 /// increment overflows, the mask is no longer correct.
187 DataAndControlFlow,
188 /// Use predicate to control both data and control flow, but modify
189 /// the trip count so that a runtime overflow check can be avoided
190 /// and such that the scalar epilogue loop can always be removed.
191 DataAndControlFlowWithoutRuntimeCheck,
192 /// Use predicated EVL instructions for tail-folding.
193 /// Indicates that VP intrinsics should be used.
194 DataWithEVL,
195 };
196
197 struct TailFoldingInfo {
198 TargetLibraryInfo *TLI;
199 LoopVectorizationLegality *LVL;
200 InterleavedAccessInfo *IAI;
TailFoldingInfoTailFoldingInfo201 TailFoldingInfo(TargetLibraryInfo *TLI, LoopVectorizationLegality *LVL,
202 InterleavedAccessInfo *IAI)
203 : TLI(TLI), LVL(LVL), IAI(IAI) {}
204 };
205
206 class TargetTransformInfo;
207 typedef TargetTransformInfo TTI;
208
209 /// This pass provides access to the codegen interfaces that are needed
210 /// for IR-level transformations.
211 class TargetTransformInfo {
212 public:
213 /// Construct a TTI object using a type implementing the \c Concept
214 /// API below.
215 ///
216 /// This is used by targets to construct a TTI wrapping their target-specific
217 /// implementation that encodes appropriate costs for their target.
218 template <typename T> TargetTransformInfo(T Impl);
219
220 /// Construct a baseline TTI object using a minimal implementation of
221 /// the \c Concept API below.
222 ///
223 /// The TTI implementation will reflect the information in the DataLayout
224 /// provided if non-null.
225 explicit TargetTransformInfo(const DataLayout &DL);
226
227 // Provide move semantics.
228 TargetTransformInfo(TargetTransformInfo &&Arg);
229 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
230
231 // We need to define the destructor out-of-line to define our sub-classes
232 // out-of-line.
233 ~TargetTransformInfo();
234
235 /// Handle the invalidation of this information.
236 ///
237 /// When used as a result of \c TargetIRAnalysis this method will be called
238 /// when the function this was computed for changes. When it returns false,
239 /// the information is preserved across those changes.
invalidate(Function &,const PreservedAnalyses &,FunctionAnalysisManager::Invalidator &)240 bool invalidate(Function &, const PreservedAnalyses &,
241 FunctionAnalysisManager::Invalidator &) {
242 // FIXME: We should probably in some way ensure that the subtarget
243 // information for a function hasn't changed.
244 return false;
245 }
246
247 /// \name Generic Target Information
248 /// @{
249
250 /// The kind of cost model.
251 ///
252 /// There are several different cost models that can be customized by the
253 /// target. The normalization of each cost model may be target specific.
254 /// e.g. TCK_SizeAndLatency should be comparable to target thresholds such as
255 /// those derived from MCSchedModel::LoopMicroOpBufferSize etc.
256 enum TargetCostKind {
257 TCK_RecipThroughput, ///< Reciprocal throughput.
258 TCK_Latency, ///< The latency of instruction.
259 TCK_CodeSize, ///< Instruction code size.
260 TCK_SizeAndLatency ///< The weighted sum of size and latency.
261 };
262
263 /// Underlying constants for 'cost' values in this interface.
264 ///
265 /// Many APIs in this interface return a cost. This enum defines the
266 /// fundamental values that should be used to interpret (and produce) those
267 /// costs. The costs are returned as an int rather than a member of this
268 /// enumeration because it is expected that the cost of one IR instruction
269 /// may have a multiplicative factor to it or otherwise won't fit directly
270 /// into the enum. Moreover, it is common to sum or average costs which works
271 /// better as simple integral values. Thus this enum only provides constants.
272 /// Also note that the returned costs are signed integers to make it natural
273 /// to add, subtract, and test with zero (a common boundary condition). It is
274 /// not expected that 2^32 is a realistic cost to be modeling at any point.
275 ///
276 /// Note that these costs should usually reflect the intersection of code-size
277 /// cost and execution cost. A free instruction is typically one that folds
278 /// into another instruction. For example, reg-to-reg moves can often be
279 /// skipped by renaming the registers in the CPU, but they still are encoded
280 /// and thus wouldn't be considered 'free' here.
281 enum TargetCostConstants {
282 TCC_Free = 0, ///< Expected to fold away in lowering.
283 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
284 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
285 };
286
287 /// Estimate the cost of a GEP operation when lowered.
288 ///
289 /// \p PointeeType is the source element type of the GEP.
290 /// \p Ptr is the base pointer operand.
291 /// \p Operands is the list of indices following the base pointer.
292 ///
293 /// \p AccessType is a hint as to what type of memory might be accessed by
294 /// users of the GEP. getGEPCost will use it to determine if the GEP can be
295 /// folded into the addressing mode of a load/store. If AccessType is null,
296 /// then the resulting target type based off of PointeeType will be used as an
297 /// approximation.
298 InstructionCost
299 getGEPCost(Type *PointeeType, const Value *Ptr,
300 ArrayRef<const Value *> Operands, Type *AccessType = nullptr,
301 TargetCostKind CostKind = TCK_SizeAndLatency) const;
302
303 /// Describe known properties for a set of pointers.
304 struct PointersChainInfo {
305 /// All the GEPs in a set have same base address.
306 unsigned IsSameBaseAddress : 1;
307 /// These properties only valid if SameBaseAddress is set.
308 /// True if all pointers are separated by a unit stride.
309 unsigned IsUnitStride : 1;
310 /// True if distance between any two neigbouring pointers is a known value.
311 unsigned IsKnownStride : 1;
312 unsigned Reserved : 29;
313
isSameBasePointersChainInfo314 bool isSameBase() const { return IsSameBaseAddress; }
isUnitStridePointersChainInfo315 bool isUnitStride() const { return IsSameBaseAddress && IsUnitStride; }
isKnownStridePointersChainInfo316 bool isKnownStride() const { return IsSameBaseAddress && IsKnownStride; }
317
getUnitStridePointersChainInfo318 static PointersChainInfo getUnitStride() {
319 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/1,
320 /*IsKnownStride=*/1, 0};
321 }
getKnownStridePointersChainInfo322 static PointersChainInfo getKnownStride() {
323 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0,
324 /*IsKnownStride=*/1, 0};
325 }
getUnknownStridePointersChainInfo326 static PointersChainInfo getUnknownStride() {
327 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0,
328 /*IsKnownStride=*/0, 0};
329 }
330 };
331 static_assert(sizeof(PointersChainInfo) == 4, "Was size increase justified?");
332
333 /// Estimate the cost of a chain of pointers (typically pointer operands of a
334 /// chain of loads or stores within same block) operations set when lowered.
335 /// \p AccessTy is the type of the loads/stores that will ultimately use the
336 /// \p Ptrs.
337 InstructionCost
338 getPointersChainCost(ArrayRef<const Value *> Ptrs, const Value *Base,
339 const PointersChainInfo &Info, Type *AccessTy,
340 TargetCostKind CostKind = TTI::TCK_RecipThroughput
341
342 ) const;
343
344 /// \returns A value by which our inlining threshold should be multiplied.
345 /// This is primarily used to bump up the inlining threshold wholesale on
346 /// targets where calls are unusually expensive.
347 ///
348 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
349 /// individual classes of instructions would be better.
350 unsigned getInliningThresholdMultiplier() const;
351
352 unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const;
353 unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const;
354
355 /// \returns A value to be added to the inlining threshold.
356 unsigned adjustInliningThreshold(const CallBase *CB) const;
357
358 /// \returns The cost of having an Alloca in the caller if not inlined, to be
359 /// added to the threshold
360 unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const;
361
362 /// \returns Vector bonus in percent.
363 ///
364 /// Vector bonuses: We want to more aggressively inline vector-dense kernels
365 /// and apply this bonus based on the percentage of vector instructions. A
366 /// bonus is applied if the vector instructions exceed 50% and half that
367 /// amount is applied if it exceeds 10%. Note that these bonuses are some what
368 /// arbitrary and evolved over time by accident as much as because they are
369 /// principled bonuses.
370 /// FIXME: It would be nice to base the bonus values on something more
371 /// scientific. A target may has no bonus on vector instructions.
372 int getInlinerVectorBonusPercent() const;
373
374 /// \return the expected cost of a memcpy, which could e.g. depend on the
375 /// source/destination type and alignment and the number of bytes copied.
376 InstructionCost getMemcpyCost(const Instruction *I) const;
377
378 /// Returns the maximum memset / memcpy size in bytes that still makes it
379 /// profitable to inline the call.
380 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const;
381
382 /// \return The estimated number of case clusters when lowering \p 'SI'.
383 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
384 /// table.
385 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
386 unsigned &JTSize,
387 ProfileSummaryInfo *PSI,
388 BlockFrequencyInfo *BFI) const;
389
390 /// Estimate the cost of a given IR user when lowered.
391 ///
392 /// This can estimate the cost of either a ConstantExpr or Instruction when
393 /// lowered.
394 ///
395 /// \p Operands is a list of operands which can be a result of transformations
396 /// of the current operands. The number of the operands on the list must equal
397 /// to the number of the current operands the IR user has. Their order on the
398 /// list must be the same as the order of the current operands the IR user
399 /// has.
400 ///
401 /// The returned cost is defined in terms of \c TargetCostConstants, see its
402 /// comments for a detailed explanation of the cost values.
403 InstructionCost getInstructionCost(const User *U,
404 ArrayRef<const Value *> Operands,
405 TargetCostKind CostKind) const;
406
407 /// This is a helper function which calls the three-argument
408 /// getInstructionCost with \p Operands which are the current operands U has.
getInstructionCost(const User * U,TargetCostKind CostKind)409 InstructionCost getInstructionCost(const User *U,
410 TargetCostKind CostKind) const {
411 SmallVector<const Value *, 4> Operands(U->operand_values());
412 return getInstructionCost(U, Operands, CostKind);
413 }
414
415 /// If a branch or a select condition is skewed in one direction by more than
416 /// this factor, it is very likely to be predicted correctly.
417 BranchProbability getPredictableBranchThreshold() const;
418
419 /// Returns estimated penalty of a branch misprediction in latency. Indicates
420 /// how aggressive the target wants for eliminating unpredictable branches. A
421 /// zero return value means extra optimization applied to them should be
422 /// minimal.
423 InstructionCost getBranchMispredictPenalty() const;
424
425 /// Return true if branch divergence exists.
426 ///
427 /// Branch divergence has a significantly negative impact on GPU performance
428 /// when threads in the same wavefront take different paths due to conditional
429 /// branches.
430 ///
431 /// If \p F is passed, provides a context function. If \p F is known to only
432 /// execute in a single threaded environment, the target may choose to skip
433 /// uniformity analysis and assume all values are uniform.
434 bool hasBranchDivergence(const Function *F = nullptr) const;
435
436 /// Returns whether V is a source of divergence.
437 ///
438 /// This function provides the target-dependent information for
439 /// the target-independent UniformityAnalysis.
440 bool isSourceOfDivergence(const Value *V) const;
441
442 // Returns true for the target specific
443 // set of operations which produce uniform result
444 // even taking non-uniform arguments
445 bool isAlwaysUniform(const Value *V) const;
446
447 /// Query the target whether the specified address space cast from FromAS to
448 /// ToAS is valid.
449 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
450
451 /// Return false if a \p AS0 address cannot possibly alias a \p AS1 address.
452 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const;
453
454 /// Returns the address space ID for a target's 'flat' address space. Note
455 /// this is not necessarily the same as addrspace(0), which LLVM sometimes
456 /// refers to as the generic address space. The flat address space is a
457 /// generic address space that can be used access multiple segments of memory
458 /// with different address spaces. Access of a memory location through a
459 /// pointer with this address space is expected to be legal but slower
460 /// compared to the same memory location accessed through a pointer with a
461 /// different address space.
462 //
463 /// This is for targets with different pointer representations which can
464 /// be converted with the addrspacecast instruction. If a pointer is converted
465 /// to this address space, optimizations should attempt to replace the access
466 /// with the source address space.
467 ///
468 /// \returns ~0u if the target does not have such a flat address space to
469 /// optimize away.
470 unsigned getFlatAddressSpace() const;
471
472 /// Return any intrinsic address operand indexes which may be rewritten if
473 /// they use a flat address space pointer.
474 ///
475 /// \returns true if the intrinsic was handled.
476 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
477 Intrinsic::ID IID) const;
478
479 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
480
481 /// Return true if globals in this address space can have initializers other
482 /// than `undef`.
483 bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const;
484
485 unsigned getAssumedAddrSpace(const Value *V) const;
486
487 bool isSingleThreaded() const;
488
489 std::pair<const Value *, unsigned>
490 getPredicatedAddrSpace(const Value *V) const;
491
492 /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
493 /// NewV, which has a different address space. This should happen for every
494 /// operand index that collectFlatAddressOperands returned for the intrinsic.
495 /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
496 /// new value (which may be the original \p II with modified operands).
497 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
498 Value *NewV) const;
499
500 /// Test whether calls to a function lower to actual program function
501 /// calls.
502 ///
503 /// The idea is to test whether the program is likely to require a 'call'
504 /// instruction or equivalent in order to call the given function.
505 ///
506 /// FIXME: It's not clear that this is a good or useful query API. Client's
507 /// should probably move to simpler cost metrics using the above.
508 /// Alternatively, we could split the cost interface into distinct code-size
509 /// and execution-speed costs. This would allow modelling the core of this
510 /// query more accurately as a call is a single small instruction, but
511 /// incurs significant execution cost.
512 bool isLoweredToCall(const Function *F) const;
513
514 struct LSRCost {
515 /// TODO: Some of these could be merged. Also, a lexical ordering
516 /// isn't always optimal.
517 unsigned Insns;
518 unsigned NumRegs;
519 unsigned AddRecCost;
520 unsigned NumIVMuls;
521 unsigned NumBaseAdds;
522 unsigned ImmCost;
523 unsigned SetupCost;
524 unsigned ScaleCost;
525 };
526
527 /// Parameters that control the generic loop unrolling transformation.
528 struct UnrollingPreferences {
529 /// The cost threshold for the unrolled loop. Should be relative to the
530 /// getInstructionCost values returned by this API, and the expectation is
531 /// that the unrolled loop's instructions when run through that interface
532 /// should not exceed this cost. However, this is only an estimate. Also,
533 /// specific loops may be unrolled even with a cost above this threshold if
534 /// deemed profitable. Set this to UINT_MAX to disable the loop body cost
535 /// restriction.
536 unsigned Threshold;
537 /// If complete unrolling will reduce the cost of the loop, we will boost
538 /// the Threshold by a certain percent to allow more aggressive complete
539 /// unrolling. This value provides the maximum boost percentage that we
540 /// can apply to Threshold (The value should be no less than 100).
541 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
542 /// MaxPercentThresholdBoost / 100)
543 /// E.g. if complete unrolling reduces the loop execution time by 50%
544 /// then we boost the threshold by the factor of 2x. If unrolling is not
545 /// expected to reduce the running time, then we do not increase the
546 /// threshold.
547 unsigned MaxPercentThresholdBoost;
548 /// The cost threshold for the unrolled loop when optimizing for size (set
549 /// to UINT_MAX to disable).
550 unsigned OptSizeThreshold;
551 /// The cost threshold for the unrolled loop, like Threshold, but used
552 /// for partial/runtime unrolling (set to UINT_MAX to disable).
553 unsigned PartialThreshold;
554 /// The cost threshold for the unrolled loop when optimizing for size, like
555 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
556 /// UINT_MAX to disable).
557 unsigned PartialOptSizeThreshold;
558 /// A forced unrolling factor (the number of concatenated bodies of the
559 /// original loop in the unrolled loop body). When set to 0, the unrolling
560 /// transformation will select an unrolling factor based on the current cost
561 /// threshold and other factors.
562 unsigned Count;
563 /// Default unroll count for loops with run-time trip count.
564 unsigned DefaultUnrollRuntimeCount;
565 // Set the maximum unrolling factor. The unrolling factor may be selected
566 // using the appropriate cost threshold, but may not exceed this number
567 // (set to UINT_MAX to disable). This does not apply in cases where the
568 // loop is being fully unrolled.
569 unsigned MaxCount;
570 /// Set the maximum upper bound of trip count. Allowing the MaxUpperBound
571 /// to be overrided by a target gives more flexiblity on certain cases.
572 /// By default, MaxUpperBound uses UnrollMaxUpperBound which value is 8.
573 unsigned MaxUpperBound;
574 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
575 /// applies even if full unrolling is selected. This allows a target to fall
576 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
577 unsigned FullUnrollMaxCount;
578 // Represents number of instructions optimized when "back edge"
579 // becomes "fall through" in unrolled loop.
580 // For now we count a conditional branch on a backedge and a comparison
581 // feeding it.
582 unsigned BEInsns;
583 /// Allow partial unrolling (unrolling of loops to expand the size of the
584 /// loop body, not only to eliminate small constant-trip-count loops).
585 bool Partial;
586 /// Allow runtime unrolling (unrolling of loops to expand the size of the
587 /// loop body even when the number of loop iterations is not known at
588 /// compile time).
589 bool Runtime;
590 /// Allow generation of a loop remainder (extra iterations after unroll).
591 bool AllowRemainder;
592 /// Allow emitting expensive instructions (such as divisions) when computing
593 /// the trip count of a loop for runtime unrolling.
594 bool AllowExpensiveTripCount;
595 /// Apply loop unroll on any kind of loop
596 /// (mainly to loops that fail runtime unrolling).
597 bool Force;
598 /// Allow using trip count upper bound to unroll loops.
599 bool UpperBound;
600 /// Allow unrolling of all the iterations of the runtime loop remainder.
601 bool UnrollRemainder;
602 /// Allow unroll and jam. Used to enable unroll and jam for the target.
603 bool UnrollAndJam;
604 /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
605 /// value above is used during unroll and jam for the outer loop size.
606 /// This value is used in the same manner to limit the size of the inner
607 /// loop.
608 unsigned UnrollAndJamInnerLoopThreshold;
609 /// Don't allow loop unrolling to simulate more than this number of
610 /// iterations when checking full unroll profitability
611 unsigned MaxIterationsCountToAnalyze;
612 /// Don't disable runtime unroll for the loops which were vectorized.
613 bool UnrollVectorizedLoop = false;
614 };
615
616 /// Get target-customized preferences for the generic loop unrolling
617 /// transformation. The caller will initialize UP with the current
618 /// target-independent defaults.
619 void getUnrollingPreferences(Loop *L, ScalarEvolution &,
620 UnrollingPreferences &UP,
621 OptimizationRemarkEmitter *ORE) const;
622
623 /// Query the target whether it would be profitable to convert the given loop
624 /// into a hardware loop.
625 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
626 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
627 HardwareLoopInfo &HWLoopInfo) const;
628
629 /// Query the target whether it would be prefered to create a predicated
630 /// vector loop, which can avoid the need to emit a scalar epilogue loop.
631 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const;
632
633 /// Query the target what the preferred style of tail folding is.
634 /// \param IVUpdateMayOverflow Tells whether it is known if the IV update
635 /// may (or will never) overflow for the suggested VF/UF in the given loop.
636 /// Targets can use this information to select a more optimal tail folding
637 /// style. The value conservatively defaults to true, such that no assumptions
638 /// are made on overflow.
639 TailFoldingStyle
640 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const;
641
642 // Parameters that control the loop peeling transformation
643 struct PeelingPreferences {
644 /// A forced peeling factor (the number of bodied of the original loop
645 /// that should be peeled off before the loop body). When set to 0, the
646 /// a peeling factor based on profile information and other factors.
647 unsigned PeelCount;
648 /// Allow peeling off loop iterations.
649 bool AllowPeeling;
650 /// Allow peeling off loop iterations for loop nests.
651 bool AllowLoopNestsPeeling;
652 /// Allow peeling basing on profile. Uses to enable peeling off all
653 /// iterations basing on provided profile.
654 /// If the value is true the peeling cost model can decide to peel only
655 /// some iterations and in this case it will set this to false.
656 bool PeelProfiledIterations;
657 };
658
659 /// Get target-customized preferences for the generic loop peeling
660 /// transformation. The caller will initialize \p PP with the current
661 /// target-independent defaults with information from \p L and \p SE.
662 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
663 PeelingPreferences &PP) const;
664
665 /// Targets can implement their own combinations for target-specific
666 /// intrinsics. This function will be called from the InstCombine pass every
667 /// time a target-specific intrinsic is encountered.
668 ///
669 /// \returns std::nullopt to not do anything target specific or a value that
670 /// will be returned from the InstCombiner. It is possible to return null and
671 /// stop further processing of the intrinsic by returning nullptr.
672 std::optional<Instruction *> instCombineIntrinsic(InstCombiner & IC,
673 IntrinsicInst & II) const;
674 /// Can be used to implement target-specific instruction combining.
675 /// \see instCombineIntrinsic
676 std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
677 InstCombiner & IC, IntrinsicInst & II, APInt DemandedMask,
678 KnownBits & Known, bool &KnownBitsComputed) const;
679 /// Can be used to implement target-specific instruction combining.
680 /// \see instCombineIntrinsic
681 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
682 InstCombiner & IC, IntrinsicInst & II, APInt DemandedElts,
683 APInt & UndefElts, APInt & UndefElts2, APInt & UndefElts3,
684 std::function<void(Instruction *, unsigned, APInt, APInt &)>
685 SimplifyAndSetOp) const;
686 /// @}
687
688 /// \name Scalar Target Information
689 /// @{
690
691 /// Flags indicating the kind of support for population count.
692 ///
693 /// Compared to the SW implementation, HW support is supposed to
694 /// significantly boost the performance when the population is dense, and it
695 /// may or may not degrade performance if the population is sparse. A HW
696 /// support is considered as "Fast" if it can outperform, or is on a par
697 /// with, SW implementation when the population is sparse; otherwise, it is
698 /// considered as "Slow".
699 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
700
701 /// Return true if the specified immediate is legal add immediate, that
702 /// is the target has add instructions which can add a register with the
703 /// immediate without having to materialize the immediate into a register.
704 bool isLegalAddImmediate(int64_t Imm) const;
705
706 /// Return true if adding the specified scalable immediate is legal, that is
707 /// the target has add instructions which can add a register with the
708 /// immediate (multiplied by vscale) without having to materialize the
709 /// immediate into a register.
710 bool isLegalAddScalableImmediate(int64_t Imm) const;
711
712 /// Return true if the specified immediate is legal icmp immediate,
713 /// that is the target has icmp instructions which can compare a register
714 /// against the immediate without having to materialize the immediate into a
715 /// register.
716 bool isLegalICmpImmediate(int64_t Imm) const;
717
718 /// Return true if the addressing mode represented by AM is legal for
719 /// this target, for a load/store of the specified type.
720 /// The type may be VoidTy, in which case only return true if the addressing
721 /// mode is legal for a load/store of any legal type.
722 /// If target returns true in LSRWithInstrQueries(), I may be valid.
723 /// \param ScalableOffset represents a quantity of bytes multiplied by vscale,
724 /// an invariant value known only at runtime. Most targets should not accept
725 /// a scalable offset.
726 ///
727 /// TODO: Handle pre/postinc as well.
728 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
729 bool HasBaseReg, int64_t Scale,
730 unsigned AddrSpace = 0, Instruction *I = nullptr,
731 int64_t ScalableOffset = 0) const;
732
733 /// Return true if LSR cost of C1 is lower than C2.
734 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
735 const TargetTransformInfo::LSRCost &C2) const;
736
737 /// Return true if LSR major cost is number of registers. Targets which
738 /// implement their own isLSRCostLess and unset number of registers as major
739 /// cost should return false, otherwise return true.
740 bool isNumRegsMajorCostOfLSR() const;
741
742 /// Return true if LSR should drop a found solution if it's calculated to be
743 /// less profitable than the baseline.
744 bool shouldDropLSRSolutionIfLessProfitable() const;
745
746 /// \returns true if LSR should not optimize a chain that includes \p I.
747 bool isProfitableLSRChainElement(Instruction *I) const;
748
749 /// Return true if the target can fuse a compare and branch.
750 /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
751 /// calculation for the instructions in a loop.
752 bool canMacroFuseCmp() const;
753
754 /// Return true if the target can save a compare for loop count, for example
755 /// hardware loop saves a compare.
756 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
757 DominatorTree *DT, AssumptionCache *AC,
758 TargetLibraryInfo *LibInfo) const;
759
760 enum AddressingModeKind {
761 AMK_PreIndexed,
762 AMK_PostIndexed,
763 AMK_None
764 };
765
766 /// Return the preferred addressing mode LSR should make efforts to generate.
767 AddressingModeKind getPreferredAddressingMode(const Loop *L,
768 ScalarEvolution *SE) const;
769
770 /// Return true if the target supports masked store.
771 bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
772 /// Return true if the target supports masked load.
773 bool isLegalMaskedLoad(Type *DataType, Align Alignment) const;
774
775 /// Return true if the target supports nontemporal store.
776 bool isLegalNTStore(Type *DataType, Align Alignment) const;
777 /// Return true if the target supports nontemporal load.
778 bool isLegalNTLoad(Type *DataType, Align Alignment) const;
779
780 /// \Returns true if the target supports broadcasting a load to a vector of
781 /// type <NumElements x ElementTy>.
782 bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
783
784 /// Return true if the target supports masked scatter.
785 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
786 /// Return true if the target supports masked gather.
787 bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
788 /// Return true if the target forces scalarizing of llvm.masked.gather
789 /// intrinsics.
790 bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const;
791 /// Return true if the target forces scalarizing of llvm.masked.scatter
792 /// intrinsics.
793 bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const;
794
795 /// Return true if the target supports masked compress store.
796 bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const;
797 /// Return true if the target supports masked expand load.
798 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const;
799
800 /// Return true if the target supports strided load.
801 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const;
802
803 // Return true if the target supports masked vector histograms.
804 bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const;
805
806 /// Return true if this is an alternating opcode pattern that can be lowered
807 /// to a single instruction on the target. In X86 this is for the addsub
808 /// instruction which corrsponds to a Shuffle + Fadd + FSub pattern in IR.
809 /// This function expectes two opcodes: \p Opcode1 and \p Opcode2 being
810 /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
811 /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
812 /// \p VecTy is the vector type of the instruction to be generated.
813 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
814 const SmallBitVector &OpcodeMask) const;
815
816 /// Return true if we should be enabling ordered reductions for the target.
817 bool enableOrderedReductions() const;
818
819 /// Return true if the target has a unified operation to calculate division
820 /// and remainder. If so, the additional implicit multiplication and
821 /// subtraction required to calculate a remainder from division are free. This
822 /// can enable more aggressive transformations for division and remainder than
823 /// would typically be allowed using throughput or size cost models.
824 bool hasDivRemOp(Type *DataType, bool IsSigned) const;
825
826 /// Return true if the given instruction (assumed to be a memory access
827 /// instruction) has a volatile variant. If that's the case then we can avoid
828 /// addrspacecast to generic AS for volatile loads/stores. Default
829 /// implementation returns false, which prevents address space inference for
830 /// volatile loads/stores.
831 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
832
833 /// Return true if target doesn't mind addresses in vectors.
834 bool prefersVectorizedAddressing() const;
835
836 /// Return the cost of the scaling factor used in the addressing
837 /// mode represented by AM for this target, for a load/store
838 /// of the specified type.
839 /// If the AM is supported, the return value must be >= 0.
840 /// If the AM is not supported, it returns a negative value.
841 /// TODO: Handle pre/postinc as well.
842 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
843 StackOffset BaseOffset, bool HasBaseReg,
844 int64_t Scale,
845 unsigned AddrSpace = 0) const;
846
847 /// Return true if the loop strength reduce pass should make
848 /// Instruction* based TTI queries to isLegalAddressingMode(). This is
849 /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
850 /// immediate offset and no index register.
851 bool LSRWithInstrQueries() const;
852
853 /// Return true if it's free to truncate a value of type Ty1 to type
854 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
855 /// by referencing its sub-register AX.
856 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
857
858 /// Return true if it is profitable to hoist instruction in the
859 /// then/else to before if.
860 bool isProfitableToHoist(Instruction *I) const;
861
862 bool useAA() const;
863
864 /// Return true if this type is legal.
865 bool isTypeLegal(Type *Ty) const;
866
867 /// Returns the estimated number of registers required to represent \p Ty.
868 unsigned getRegUsageForType(Type *Ty) const;
869
870 /// Return true if switches should be turned into lookup tables for the
871 /// target.
872 bool shouldBuildLookupTables() const;
873
874 /// Return true if switches should be turned into lookup tables
875 /// containing this constant value for the target.
876 bool shouldBuildLookupTablesForConstant(Constant *C) const;
877
878 /// Return true if lookup tables should be turned into relative lookup tables.
879 bool shouldBuildRelLookupTables() const;
880
881 /// Return true if the input function which is cold at all call sites,
882 /// should use coldcc calling convention.
883 bool useColdCCForColdCall(Function &F) const;
884
885 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
886 /// are set if the demanded result elements need to be inserted and/or
887 /// extracted from vectors.
888 InstructionCost getScalarizationOverhead(VectorType *Ty,
889 const APInt &DemandedElts,
890 bool Insert, bool Extract,
891 TTI::TargetCostKind CostKind) const;
892
893 /// Estimate the overhead of scalarizing an instructions unique
894 /// non-constant operands. The (potentially vector) types to use for each of
895 /// argument are passes via Tys.
896 InstructionCost
897 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
898 ArrayRef<Type *> Tys,
899 TTI::TargetCostKind CostKind) const;
900
901 /// If target has efficient vector element load/store instructions, it can
902 /// return true here so that insertion/extraction costs are not added to
903 /// the scalarization cost of a load/store.
904 bool supportsEfficientVectorElementLoadStore() const;
905
906 /// If the target supports tail calls.
907 bool supportsTailCalls() const;
908
909 /// If target supports tail call on \p CB
910 bool supportsTailCallFor(const CallBase *CB) const;
911
912 /// Don't restrict interleaved unrolling to small loops.
913 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
914
915 /// Returns options for expansion of memcmp. IsZeroCmp is
916 // true if this is the expansion of memcmp(p1, p2, s) == 0.
917 struct MemCmpExpansionOptions {
918 // Return true if memcmp expansion is enabled.
919 operator bool() const { return MaxNumLoads > 0; }
920
921 // Maximum number of load operations.
922 unsigned MaxNumLoads = 0;
923
924 // The list of available load sizes (in bytes), sorted in decreasing order.
925 SmallVector<unsigned, 8> LoadSizes;
926
927 // For memcmp expansion when the memcmp result is only compared equal or
928 // not-equal to 0, allow up to this number of load pairs per block. As an
929 // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
930 // a0 = load2bytes &a[0]
931 // b0 = load2bytes &b[0]
932 // a2 = load1byte &a[2]
933 // b2 = load1byte &b[2]
934 // r = cmp eq (a0 ^ b0 | a2 ^ b2), 0
935 unsigned NumLoadsPerBlock = 1;
936
937 // Set to true to allow overlapping loads. For example, 7-byte compares can
938 // be done with two 4-byte compares instead of 4+2+1-byte compares. This
939 // requires all loads in LoadSizes to be doable in an unaligned way.
940 bool AllowOverlappingLoads = false;
941
942 // Sometimes, the amount of data that needs to be compared is smaller than
943 // the standard register size, but it cannot be loaded with just one load
944 // instruction. For example, if the size of the memory comparison is 6
945 // bytes, we can handle it more efficiently by loading all 6 bytes in a
946 // single block and generating an 8-byte number, instead of generating two
947 // separate blocks with conditional jumps for 4 and 2 byte loads. This
948 // approach simplifies the process and produces the comparison result as
949 // normal. This array lists the allowed sizes of memcmp tails that can be
950 // merged into one block
951 SmallVector<unsigned, 4> AllowedTailExpansions;
952 };
953 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
954 bool IsZeroCmp) const;
955
956 /// Should the Select Optimization pass be enabled and ran.
957 bool enableSelectOptimize() const;
958
959 /// Should the Select Optimization pass treat the given instruction like a
960 /// select, potentially converting it to a conditional branch. This can
961 /// include select-like instructions like or(zext(c), x) that can be converted
962 /// to selects.
963 bool shouldTreatInstructionLikeSelect(const Instruction *I) const;
964
965 /// Enable matching of interleaved access groups.
966 bool enableInterleavedAccessVectorization() const;
967
968 /// Enable matching of interleaved access groups that contain predicated
969 /// accesses or gaps and therefore vectorized using masked
970 /// vector loads/stores.
971 bool enableMaskedInterleavedAccessVectorization() const;
972
973 /// Indicate that it is potentially unsafe to automatically vectorize
974 /// floating-point operations because the semantics of vector and scalar
975 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
976 /// does not support IEEE-754 denormal numbers, while depending on the
977 /// platform, scalar floating-point math does.
978 /// This applies to floating-point math operations and calls, not memory
979 /// operations, shuffles, or casts.
980 bool isFPVectorizationPotentiallyUnsafe() const;
981
982 /// Determine if the target supports unaligned memory accesses.
983 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
984 unsigned AddressSpace = 0,
985 Align Alignment = Align(1),
986 unsigned *Fast = nullptr) const;
987
988 /// Return hardware support for population count.
989 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
990
991 /// Return true if the hardware has a fast square-root instruction.
992 bool haveFastSqrt(Type *Ty) const;
993
994 /// Return true if the cost of the instruction is too high to speculatively
995 /// execute and should be kept behind a branch.
996 /// This normally just wraps around a getInstructionCost() call, but some
997 /// targets might report a low TCK_SizeAndLatency value that is incompatible
998 /// with the fixed TCC_Expensive value.
999 /// NOTE: This assumes the instruction passes isSafeToSpeculativelyExecute().
1000 bool isExpensiveToSpeculativelyExecute(const Instruction *I) const;
1001
1002 /// Return true if it is faster to check if a floating-point value is NaN
1003 /// (or not-NaN) versus a comparison against a constant FP zero value.
1004 /// Targets should override this if materializing a 0.0 for comparison is
1005 /// generally as cheap as checking for ordered/unordered.
1006 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
1007
1008 /// Return the expected cost of supporting the floating point operation
1009 /// of the specified type.
1010 InstructionCost getFPOpCost(Type *Ty) const;
1011
1012 /// Return the expected cost of materializing for the given integer
1013 /// immediate of the specified type.
1014 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
1015 TargetCostKind CostKind) const;
1016
1017 /// Return the expected cost of materialization for the given integer
1018 /// immediate of the specified type for a given instruction. The cost can be
1019 /// zero if the immediate can be folded into the specified instruction.
1020 InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
1021 const APInt &Imm, Type *Ty,
1022 TargetCostKind CostKind,
1023 Instruction *Inst = nullptr) const;
1024 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
1025 const APInt &Imm, Type *Ty,
1026 TargetCostKind CostKind) const;
1027
1028 /// Return the expected cost for the given integer when optimising
1029 /// for size. This is different than the other integer immediate cost
1030 /// functions in that it is subtarget agnostic. This is useful when you e.g.
1031 /// target one ISA such as Aarch32 but smaller encodings could be possible
1032 /// with another such as Thumb. This return value is used as a penalty when
1033 /// the total costs for a constant is calculated (the bigger the cost, the
1034 /// more beneficial constant hoisting is).
1035 InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1036 const APInt &Imm, Type *Ty) const;
1037
1038 /// It can be advantageous to detach complex constants from their uses to make
1039 /// their generation cheaper. This hook allows targets to report when such
1040 /// transformations might negatively effect the code generation of the
1041 /// underlying operation. The motivating example is divides whereby hoisting
1042 /// constants prevents the code generator's ability to transform them into
1043 /// combinations of simpler operations.
1044 bool preferToKeepConstantsAttached(const Instruction &Inst,
1045 const Function &Fn) const;
1046
1047 /// @}
1048
1049 /// \name Vector Target Information
1050 /// @{
1051
1052 /// The various kinds of shuffle patterns for vector queries.
1053 enum ShuffleKind {
1054 SK_Broadcast, ///< Broadcast element 0 to all other elements.
1055 SK_Reverse, ///< Reverse the order of the vector.
1056 SK_Select, ///< Selects elements from the corresponding lane of
1057 ///< either source operand. This is equivalent to a
1058 ///< vector select with a constant condition operand.
1059 SK_Transpose, ///< Transpose two vectors.
1060 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
1061 SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset.
1062 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
1063 ///< with any shuffle mask.
1064 SK_PermuteSingleSrc, ///< Shuffle elements of single source vector with any
1065 ///< shuffle mask.
1066 SK_Splice ///< Concatenates elements from the first input vector
1067 ///< with elements of the second input vector. Returning
1068 ///< a vector of the same type as the input vectors.
1069 ///< Index indicates start offset in first input vector.
1070 };
1071
1072 /// Additional information about an operand's possible values.
1073 enum OperandValueKind {
1074 OK_AnyValue, // Operand can have any value.
1075 OK_UniformValue, // Operand is uniform (splat of a value).
1076 OK_UniformConstantValue, // Operand is uniform constant.
1077 OK_NonUniformConstantValue // Operand is a non uniform constant value.
1078 };
1079
1080 /// Additional properties of an operand's values.
1081 enum OperandValueProperties {
1082 OP_None = 0,
1083 OP_PowerOf2 = 1,
1084 OP_NegatedPowerOf2 = 2,
1085 };
1086
1087 // Describe the values an operand can take. We're in the process
1088 // of migrating uses of OperandValueKind and OperandValueProperties
1089 // to use this class, and then will change the internal representation.
1090 struct OperandValueInfo {
1091 OperandValueKind Kind = OK_AnyValue;
1092 OperandValueProperties Properties = OP_None;
1093
isConstantOperandValueInfo1094 bool isConstant() const {
1095 return Kind == OK_UniformConstantValue || Kind == OK_NonUniformConstantValue;
1096 }
isUniformOperandValueInfo1097 bool isUniform() const {
1098 return Kind == OK_UniformConstantValue || Kind == OK_UniformValue;
1099 }
isPowerOf2OperandValueInfo1100 bool isPowerOf2() const {
1101 return Properties == OP_PowerOf2;
1102 }
isNegatedPowerOf2OperandValueInfo1103 bool isNegatedPowerOf2() const {
1104 return Properties == OP_NegatedPowerOf2;
1105 }
1106
getNoPropsOperandValueInfo1107 OperandValueInfo getNoProps() const {
1108 return {Kind, OP_None};
1109 }
1110 };
1111
1112 /// \return the number of registers in the target-provided register class.
1113 unsigned getNumberOfRegisters(unsigned ClassID) const;
1114
1115 /// \return true if the target supports load/store that enables fault
1116 /// suppression of memory operands when the source condition is false.
1117 bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const;
1118
1119 /// \return the target-provided register class ID for the provided type,
1120 /// accounting for type promotion and other type-legalization techniques that
1121 /// the target might apply. However, it specifically does not account for the
1122 /// scalarization or splitting of vector types. Should a vector type require
1123 /// scalarization or splitting into multiple underlying vector registers, that
1124 /// type should be mapped to a register class containing no registers.
1125 /// Specifically, this is designed to provide a simple, high-level view of the
1126 /// register allocation later performed by the backend. These register classes
1127 /// don't necessarily map onto the register classes used by the backend.
1128 /// FIXME: It's not currently possible to determine how many registers
1129 /// are used by the provided type.
1130 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
1131
1132 /// \return the target-provided register class name
1133 const char *getRegisterClassName(unsigned ClassID) const;
1134
1135 enum RegisterKind { RGK_Scalar, RGK_FixedWidthVector, RGK_ScalableVector };
1136
1137 /// \return The width of the largest scalar or vector register type.
1138 TypeSize getRegisterBitWidth(RegisterKind K) const;
1139
1140 /// \return The width of the smallest vector register type.
1141 unsigned getMinVectorRegisterBitWidth() const;
1142
1143 /// \return The maximum value of vscale if the target specifies an
1144 /// architectural maximum vector length, and std::nullopt otherwise.
1145 std::optional<unsigned> getMaxVScale() const;
1146
1147 /// \return the value of vscale to tune the cost model for.
1148 std::optional<unsigned> getVScaleForTuning() const;
1149
1150 /// \return true if vscale is known to be a power of 2
1151 bool isVScaleKnownToBeAPowerOfTwo() const;
1152
1153 /// \return True if the vectorization factor should be chosen to
1154 /// make the vector of the smallest element type match the size of a
1155 /// vector register. For wider element types, this could result in
1156 /// creating vectors that span multiple vector registers.
1157 /// If false, the vectorization factor will be chosen based on the
1158 /// size of the widest element type.
1159 /// \p K Register Kind for vectorization.
1160 bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;
1161
1162 /// \return The minimum vectorization factor for types of given element
1163 /// bit width, or 0 if there is no minimum VF. The returned value only
1164 /// applies when shouldMaximizeVectorBandwidth returns true.
1165 /// If IsScalable is true, the returned ElementCount must be a scalable VF.
1166 ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
1167
1168 /// \return The maximum vectorization factor for types of given element
1169 /// bit width and opcode, or 0 if there is no maximum VF.
1170 /// Currently only used by the SLP vectorizer.
1171 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
1172
1173 /// \return The minimum vectorization factor for the store instruction. Given
1174 /// the initial estimation of the minimum vector factor and store value type,
1175 /// it tries to find possible lowest VF, which still might be profitable for
1176 /// the vectorization.
1177 /// \param VF Initial estimation of the minimum vector factor.
1178 /// \param ScalarMemTy Scalar memory type of the store operation.
1179 /// \param ScalarValTy Scalar type of the stored value.
1180 /// Currently only used by the SLP vectorizer.
1181 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
1182 Type *ScalarValTy) const;
1183
1184 /// \return True if it should be considered for address type promotion.
1185 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
1186 /// profitable without finding other extensions fed by the same input.
1187 bool shouldConsiderAddressTypePromotion(
1188 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
1189
1190 /// \return The size of a cache line in bytes.
1191 unsigned getCacheLineSize() const;
1192
1193 /// The possible cache levels
1194 enum class CacheLevel {
1195 L1D, // The L1 data cache
1196 L2D, // The L2 data cache
1197
1198 // We currently do not model L3 caches, as their sizes differ widely between
1199 // microarchitectures. Also, we currently do not have a use for L3 cache
1200 // size modeling yet.
1201 };
1202
1203 /// \return The size of the cache level in bytes, if available.
1204 std::optional<unsigned> getCacheSize(CacheLevel Level) const;
1205
1206 /// \return The associativity of the cache level, if available.
1207 std::optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
1208
1209 /// \return The minimum architectural page size for the target.
1210 std::optional<unsigned> getMinPageSize() const;
1211
1212 /// \return How much before a load we should place the prefetch
1213 /// instruction. This is currently measured in number of
1214 /// instructions.
1215 unsigned getPrefetchDistance() const;
1216
1217 /// Some HW prefetchers can handle accesses up to a certain constant stride.
1218 /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
1219 /// and the arguments provided are meant to serve as a basis for deciding this
1220 /// for a particular loop.
1221 ///
1222 /// \param NumMemAccesses Number of memory accesses in the loop.
1223 /// \param NumStridedMemAccesses Number of the memory accesses that
1224 /// ScalarEvolution could find a known stride
1225 /// for.
1226 /// \param NumPrefetches Number of software prefetches that will be
1227 /// emitted as determined by the addresses
1228 /// involved and the cache line size.
1229 /// \param HasCall True if the loop contains a call.
1230 ///
1231 /// \return This is the minimum stride in bytes where it makes sense to start
1232 /// adding SW prefetches. The default is 1, i.e. prefetch with any
1233 /// stride.
1234 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1235 unsigned NumStridedMemAccesses,
1236 unsigned NumPrefetches, bool HasCall) const;
1237
1238 /// \return The maximum number of iterations to prefetch ahead. If
1239 /// the required number of iterations is more than this number, no
1240 /// prefetching is performed.
1241 unsigned getMaxPrefetchIterationsAhead() const;
1242
1243 /// \return True if prefetching should also be done for writes.
1244 bool enableWritePrefetching() const;
1245
1246 /// \return if target want to issue a prefetch in address space \p AS.
1247 bool shouldPrefetchAddressSpace(unsigned AS) const;
1248
1249 /// \return The maximum interleave factor that any transform should try to
1250 /// perform for this target. This number depends on the level of parallelism
1251 /// and the number of execution units in the CPU.
1252 unsigned getMaxInterleaveFactor(ElementCount VF) const;
1253
1254 /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
1255 static OperandValueInfo getOperandInfo(const Value *V);
1256
1257 /// This is an approximation of reciprocal throughput of a math/logic op.
1258 /// A higher cost indicates less expected throughput.
1259 /// From Agner Fog's guides, reciprocal throughput is "the average number of
1260 /// clock cycles per instruction when the instructions are not part of a
1261 /// limiting dependency chain."
1262 /// Therefore, costs should be scaled to account for multiple execution units
1263 /// on the target that can process this type of instruction. For example, if
1264 /// there are 5 scalar integer units and 2 vector integer units that can
1265 /// calculate an 'add' in a single cycle, this model should indicate that the
1266 /// cost of the vector add instruction is 2.5 times the cost of the scalar
1267 /// add instruction.
1268 /// \p Args is an optional argument which holds the instruction operands
1269 /// values so the TTI can analyze those values searching for special
1270 /// cases or optimizations based on those values.
1271 /// \p CxtI is the optional original context instruction, if one exists, to
1272 /// provide even more information.
1273 /// \p TLibInfo is used to search for platform specific vector library
1274 /// functions for instructions that might be converted to calls (e.g. frem).
1275 InstructionCost getArithmeticInstrCost(
1276 unsigned Opcode, Type *Ty,
1277 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1278 TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None},
1279 TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None},
1280 ArrayRef<const Value *> Args = std::nullopt,
1281 const Instruction *CxtI = nullptr,
1282 const TargetLibraryInfo *TLibInfo = nullptr) const;
1283
1284 /// Returns the cost estimation for alternating opcode pattern that can be
1285 /// lowered to a single instruction on the target. In X86 this is for the
1286 /// addsub instruction which corrsponds to a Shuffle + Fadd + FSub pattern in
1287 /// IR. This function expects two opcodes: \p Opcode1 and \p Opcode2 being
1288 /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
1289 /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
1290 /// \p VecTy is the vector type of the instruction to be generated.
1291 InstructionCost getAltInstrCost(
1292 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
1293 const SmallBitVector &OpcodeMask,
1294 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1295
1296 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
1297 /// The exact mask may be passed as Mask, or else the array will be empty.
1298 /// The index and subtype parameters are used by the subvector insertion and
1299 /// extraction shuffle kinds to show the insert/extract point and the type of
1300 /// the subvector being inserted/extracted. The operands of the shuffle can be
1301 /// passed through \p Args, which helps improve the cost estimation in some
1302 /// cases, like in broadcast loads.
1303 /// NOTE: For subvector extractions Tp represents the source type.
1304 InstructionCost getShuffleCost(
1305 ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask = std::nullopt,
1306 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, int Index = 0,
1307 VectorType *SubTp = nullptr, ArrayRef<const Value *> Args = std::nullopt,
1308 const Instruction *CxtI = nullptr) const;
1309
1310 /// Represents a hint about the context in which a cast is used.
1311 ///
1312 /// For zext/sext, the context of the cast is the operand, which must be a
1313 /// load of some kind. For trunc, the context is of the cast is the single
1314 /// user of the instruction, which must be a store of some kind.
1315 ///
1316 /// This enum allows the vectorizer to give getCastInstrCost an idea of the
1317 /// type of cast it's dealing with, as not every cast is equal. For instance,
1318 /// the zext of a load may be free, but the zext of an interleaving load can
1319 //// be (very) expensive!
1320 ///
1321 /// See \c getCastContextHint to compute a CastContextHint from a cast
1322 /// Instruction*. Callers can use it if they don't need to override the
1323 /// context and just want it to be calculated from the instruction.
1324 ///
1325 /// FIXME: This handles the types of load/store that the vectorizer can
1326 /// produce, which are the cases where the context instruction is most
1327 /// likely to be incorrect. There are other situations where that can happen
1328 /// too, which might be handled here but in the long run a more general
1329 /// solution of costing multiple instructions at the same times may be better.
1330 enum class CastContextHint : uint8_t {
1331 None, ///< The cast is not used with a load/store of any kind.
1332 Normal, ///< The cast is used with a normal load/store.
1333 Masked, ///< The cast is used with a masked load/store.
1334 GatherScatter, ///< The cast is used with a gather/scatter.
1335 Interleave, ///< The cast is used with an interleaved load/store.
1336 Reversed, ///< The cast is used with a reversed load/store.
1337 };
1338
1339 /// Calculates a CastContextHint from \p I.
1340 /// This should be used by callers of getCastInstrCost if they wish to
1341 /// determine the context from some instruction.
1342 /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
1343 /// or if it's another type of cast.
1344 static CastContextHint getCastContextHint(const Instruction *I);
1345
1346 /// \return The expected cost of cast instructions, such as bitcast, trunc,
1347 /// zext, etc. If there is an existing instruction that holds Opcode, it
1348 /// may be passed in the 'I' parameter.
1349 InstructionCost
1350 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1351 TTI::CastContextHint CCH,
1352 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1353 const Instruction *I = nullptr) const;
1354
1355 /// \return The expected cost of a sign- or zero-extended vector extract. Use
1356 /// Index = -1 to indicate that there is no information about the index value.
1357 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1358 VectorType *VecTy,
1359 unsigned Index) const;
1360
1361 /// \return The expected cost of control-flow related instructions such as
1362 /// Phi, Ret, Br, Switch.
1363 InstructionCost
1364 getCFInstrCost(unsigned Opcode,
1365 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1366 const Instruction *I = nullptr) const;
1367
1368 /// \returns The expected cost of compare and select instructions. If there
1369 /// is an existing instruction that holds Opcode, it may be passed in the
1370 /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
1371 /// is using a compare with the specified predicate as condition. When vector
1372 /// types are passed, \p VecPred must be used for all lanes.
1373 InstructionCost
1374 getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1375 CmpInst::Predicate VecPred,
1376 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1377 const Instruction *I = nullptr) const;
1378
1379 /// \return The expected cost of vector Insert and Extract.
1380 /// Use -1 to indicate that there is no information on the index value.
1381 /// This is used when the instruction is not available; a typical use
1382 /// case is to provision the cost of vectorization/scalarization in
1383 /// vectorizer passes.
1384 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1385 TTI::TargetCostKind CostKind,
1386 unsigned Index = -1, Value *Op0 = nullptr,
1387 Value *Op1 = nullptr) const;
1388
1389 /// \return The expected cost of vector Insert and Extract.
1390 /// This is used when instruction is available, and implementation
1391 /// asserts 'I' is not nullptr.
1392 ///
1393 /// A typical suitable use case is cost estimation when vector instruction
1394 /// exists (e.g., from basic blocks during transformation).
1395 InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
1396 TTI::TargetCostKind CostKind,
1397 unsigned Index = -1) const;
1398
1399 /// \return The cost of replication shuffle of \p VF elements typed \p EltTy
1400 /// \p ReplicationFactor times.
1401 ///
1402 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
1403 /// <0,0,0,1,1,1,2,2,2,3,3,3>
1404 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
1405 int VF,
1406 const APInt &DemandedDstElts,
1407 TTI::TargetCostKind CostKind) const;
1408
1409 /// \return The cost of Load and Store instructions.
1410 InstructionCost
1411 getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1412 unsigned AddressSpace,
1413 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1414 OperandValueInfo OpdInfo = {OK_AnyValue, OP_None},
1415 const Instruction *I = nullptr) const;
1416
1417 /// \return The cost of VP Load and Store instructions.
1418 InstructionCost
1419 getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1420 unsigned AddressSpace,
1421 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1422 const Instruction *I = nullptr) const;
1423
1424 /// \return The cost of masked Load and Store instructions.
1425 InstructionCost getMaskedMemoryOpCost(
1426 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1427 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1428
1429 /// \return The cost of Gather or Scatter operation
1430 /// \p Opcode - is a type of memory access Load or Store
1431 /// \p DataTy - a vector type of the data to be loaded or stored
1432 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
1433 /// \p VariableMask - true when the memory access is predicated with a mask
1434 /// that is not a compile-time constant
1435 /// \p Alignment - alignment of single element
1436 /// \p I - the optional original context instruction, if one exists, e.g. the
1437 /// load/store to transform or the call to the gather/scatter intrinsic
1438 InstructionCost getGatherScatterOpCost(
1439 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1440 Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1441 const Instruction *I = nullptr) const;
1442
1443 /// \return The cost of strided memory operations.
1444 /// \p Opcode - is a type of memory access Load or Store
1445 /// \p DataTy - a vector type of the data to be loaded or stored
1446 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
1447 /// \p VariableMask - true when the memory access is predicated with a mask
1448 /// that is not a compile-time constant
1449 /// \p Alignment - alignment of single element
1450 /// \p I - the optional original context instruction, if one exists, e.g. the
1451 /// load/store to transform or the call to the gather/scatter intrinsic
1452 InstructionCost getStridedMemoryOpCost(
1453 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1454 Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1455 const Instruction *I = nullptr) const;
1456
1457 /// \return The cost of the interleaved memory operation.
1458 /// \p Opcode is the memory operation code
1459 /// \p VecTy is the vector type of the interleaved access.
1460 /// \p Factor is the interleave factor
1461 /// \p Indices is the indices for interleaved load members (as interleaved
1462 /// load allows gaps)
1463 /// \p Alignment is the alignment of the memory operation
1464 /// \p AddressSpace is address space of the pointer.
1465 /// \p UseMaskForCond indicates if the memory access is predicated.
1466 /// \p UseMaskForGaps indicates if gaps should be masked.
1467 InstructionCost getInterleavedMemoryOpCost(
1468 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1469 Align Alignment, unsigned AddressSpace,
1470 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1471 bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
1472
1473 /// A helper function to determine the type of reduction algorithm used
1474 /// for a given \p Opcode and set of FastMathFlags \p FMF.
requiresOrderedReduction(std::optional<FastMathFlags> FMF)1475 static bool requiresOrderedReduction(std::optional<FastMathFlags> FMF) {
1476 return FMF && !(*FMF).allowReassoc();
1477 }
1478
1479 /// Calculate the cost of vector reduction intrinsics.
1480 ///
1481 /// This is the cost of reducing the vector value of type \p Ty to a scalar
1482 /// value using the operation denoted by \p Opcode. The FastMathFlags
1483 /// parameter \p FMF indicates what type of reduction we are performing:
1484 /// 1. Tree-wise. This is the typical 'fast' reduction performed that
1485 /// involves successively splitting a vector into half and doing the
1486 /// operation on the pair of halves until you have a scalar value. For
1487 /// example:
1488 /// (v0, v1, v2, v3)
1489 /// ((v0+v2), (v1+v3), undef, undef)
1490 /// ((v0+v2+v1+v3), undef, undef, undef)
1491 /// This is the default behaviour for integer operations, whereas for
1492 /// floating point we only do this if \p FMF indicates that
1493 /// reassociation is allowed.
1494 /// 2. Ordered. For a vector with N elements this involves performing N
1495 /// operations in lane order, starting with an initial scalar value, i.e.
1496 /// result = InitVal + v0
1497 /// result = result + v1
1498 /// result = result + v2
1499 /// result = result + v3
1500 /// This is only the case for FP operations and when reassociation is not
1501 /// allowed.
1502 ///
1503 InstructionCost getArithmeticReductionCost(
1504 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1505 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1506
1507 InstructionCost getMinMaxReductionCost(
1508 Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF = FastMathFlags(),
1509 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1510
1511 /// Calculate the cost of an extended reduction pattern, similar to
1512 /// getArithmeticReductionCost of an Add reduction with multiply and optional
1513 /// extensions. This is the cost of as:
1514 /// ResTy vecreduce.add(mul (A, B)).
1515 /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)).
1516 InstructionCost getMulAccReductionCost(
1517 bool IsUnsigned, Type *ResTy, VectorType *Ty,
1518 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1519
1520 /// Calculate the cost of an extended reduction pattern, similar to
1521 /// getArithmeticReductionCost of a reduction with an extension.
1522 /// This is the cost of as:
1523 /// ResTy vecreduce.opcode(ext(Ty A)).
1524 InstructionCost getExtendedReductionCost(
1525 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1526 FastMathFlags FMF,
1527 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1528
1529 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
1530 /// Three cases are handled: 1. scalar instruction 2. vector instruction
1531 /// 3. scalar instruction which is to be vectorized.
1532 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1533 TTI::TargetCostKind CostKind) const;
1534
1535 /// \returns The cost of Call instructions.
1536 InstructionCost getCallInstrCost(
1537 Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1538 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
1539
1540 /// \returns The number of pieces into which the provided type must be
1541 /// split during legalization. Zero is returned when the answer is unknown.
1542 unsigned getNumberOfParts(Type *Tp) const;
1543
1544 /// \returns The cost of the address computation. For most targets this can be
1545 /// merged into the instruction indexing mode. Some targets might want to
1546 /// distinguish between address computation for memory operations on vector
1547 /// types and scalar types. Such targets should override this function.
1548 /// The 'SE' parameter holds pointer for the scalar evolution object which
1549 /// is used in order to get the Ptr step value in case of constant stride.
1550 /// The 'Ptr' parameter holds SCEV of the access pointer.
1551 InstructionCost getAddressComputationCost(Type *Ty,
1552 ScalarEvolution *SE = nullptr,
1553 const SCEV *Ptr = nullptr) const;
1554
1555 /// \returns The cost, if any, of keeping values of the given types alive
1556 /// over a callsite.
1557 ///
1558 /// Some types may require the use of register classes that do not have
1559 /// any callee-saved registers, so would require a spill and fill.
1560 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
1561
1562 /// \returns True if the intrinsic is a supported memory intrinsic. Info
1563 /// will contain additional information - whether the intrinsic may write
1564 /// or read to memory, volatility and the pointer. Info is undefined
1565 /// if false is returned.
1566 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
1567
1568 /// \returns The maximum element size, in bytes, for an element
1569 /// unordered-atomic memory intrinsic.
1570 unsigned getAtomicMemIntrinsicMaxElementSize() const;
1571
1572 /// \returns A value which is the result of the given memory intrinsic. New
1573 /// instructions may be created to extract the result from the given intrinsic
1574 /// memory operation. Returns nullptr if the target cannot create a result
1575 /// from the given intrinsic.
1576 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1577 Type *ExpectedType) const;
1578
1579 /// \returns The type to use in a loop expansion of a memcpy call.
1580 Type *getMemcpyLoopLoweringType(
1581 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1582 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
1583 std::optional<uint32_t> AtomicElementSize = std::nullopt) const;
1584
1585 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
1586 /// \param RemainingBytes The number of bytes to copy.
1587 ///
1588 /// Calculates the operand types to use when copying \p RemainingBytes of
1589 /// memory, where source and destination alignments are \p SrcAlign and
1590 /// \p DestAlign respectively.
1591 void getMemcpyLoopResidualLoweringType(
1592 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1593 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1594 Align SrcAlign, Align DestAlign,
1595 std::optional<uint32_t> AtomicCpySize = std::nullopt) const;
1596
1597 /// \returns True if the two functions have compatible attributes for inlining
1598 /// purposes.
1599 bool areInlineCompatible(const Function *Caller,
1600 const Function *Callee) const;
1601
1602 /// Returns a penalty for invoking call \p Call in \p F.
1603 /// For example, if a function F calls a function G, which in turn calls
1604 /// function H, then getInlineCallPenalty(F, H()) would return the
1605 /// penalty of calling H from F, e.g. after inlining G into F.
1606 /// \p DefaultCallPenalty is passed to give a default penalty that
1607 /// the target can amend or override.
1608 unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
1609 unsigned DefaultCallPenalty) const;
1610
1611 /// \returns True if the caller and callee agree on how \p Types will be
1612 /// passed to or returned from the callee.
1613 /// to the callee.
1614 /// \param Types List of types to check.
1615 bool areTypesABICompatible(const Function *Caller, const Function *Callee,
1616 const ArrayRef<Type *> &Types) const;
1617
1618 /// The type of load/store indexing.
1619 enum MemIndexedMode {
1620 MIM_Unindexed, ///< No indexing.
1621 MIM_PreInc, ///< Pre-incrementing.
1622 MIM_PreDec, ///< Pre-decrementing.
1623 MIM_PostInc, ///< Post-incrementing.
1624 MIM_PostDec ///< Post-decrementing.
1625 };
1626
1627 /// \returns True if the specified indexed load for the given type is legal.
1628 bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
1629
1630 /// \returns True if the specified indexed store for the given type is legal.
1631 bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
1632
1633 /// \returns The bitwidth of the largest vector type that should be used to
1634 /// load/store in the given address space.
1635 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
1636
1637 /// \returns True if the load instruction is legal to vectorize.
1638 bool isLegalToVectorizeLoad(LoadInst *LI) const;
1639
1640 /// \returns True if the store instruction is legal to vectorize.
1641 bool isLegalToVectorizeStore(StoreInst *SI) const;
1642
1643 /// \returns True if it is legal to vectorize the given load chain.
1644 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
1645 unsigned AddrSpace) const;
1646
1647 /// \returns True if it is legal to vectorize the given store chain.
1648 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
1649 unsigned AddrSpace) const;
1650
1651 /// \returns True if it is legal to vectorize the given reduction kind.
1652 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
1653 ElementCount VF) const;
1654
1655 /// \returns True if the given type is supported for scalable vectors
1656 bool isElementTypeLegalForScalableVector(Type *Ty) const;
1657
1658 /// \returns The new vector factor value if the target doesn't support \p
1659 /// SizeInBytes loads or has a better vector factor.
1660 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1661 unsigned ChainSizeInBytes,
1662 VectorType *VecTy) const;
1663
1664 /// \returns The new vector factor value if the target doesn't support \p
1665 /// SizeInBytes stores or has a better vector factor.
1666 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1667 unsigned ChainSizeInBytes,
1668 VectorType *VecTy) const;
1669
1670 /// Flags describing the kind of vector reduction.
1671 struct ReductionFlags {
1672 ReductionFlags() = default;
1673 bool IsMaxOp =
1674 false; ///< If the op a min/max kind, true if it's a max operation.
1675 bool IsSigned = false; ///< Whether the operation is a signed int reduction.
1676 bool NoNaN =
1677 false; ///< If op is an fp min/max, whether NaNs may be present.
1678 };
1679
1680 /// \returns True if the targets prefers fixed width vectorization if the
1681 /// loop vectorizer's cost-model assigns an equal cost to the fixed and
1682 /// scalable version of the vectorized loop.
1683 bool preferFixedOverScalableIfEqualCost() const;
1684
1685 /// \returns True if the target prefers reductions in loop.
1686 bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1687 ReductionFlags Flags) const;
1688
1689 /// \returns True if the target prefers reductions select kept in the loop
1690 /// when tail folding. i.e.
1691 /// loop:
1692 /// p = phi (0, s)
1693 /// a = add (p, x)
1694 /// s = select (mask, a, p)
1695 /// vecreduce.add(s)
1696 ///
1697 /// As opposed to the normal scheme of p = phi (0, a) which allows the select
1698 /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
1699 /// by the target, this can lead to cleaner code generation.
1700 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1701 ReductionFlags Flags) const;
1702
1703 /// Return true if the loop vectorizer should consider vectorizing an
1704 /// otherwise scalar epilogue loop.
1705 bool preferEpilogueVectorization() const;
1706
1707 /// \returns True if the target wants to expand the given reduction intrinsic
1708 /// into a shuffle sequence.
1709 bool shouldExpandReduction(const IntrinsicInst *II) const;
1710
1711 enum struct ReductionShuffle { SplitHalf, Pairwise };
1712
1713 /// \returns The shuffle sequence pattern used to expand the given reduction
1714 /// intrinsic.
1715 ReductionShuffle
1716 getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const;
1717
1718 /// \returns the size cost of rematerializing a GlobalValue address relative
1719 /// to a stack reload.
1720 unsigned getGISelRematGlobalCost() const;
1721
1722 /// \returns the lower bound of a trip count to decide on vectorization
1723 /// while tail-folding.
1724 unsigned getMinTripCountTailFoldingThreshold() const;
1725
1726 /// \returns True if the target supports scalable vectors.
1727 bool supportsScalableVectors() const;
1728
1729 /// \return true when scalable vectorization is preferred.
1730 bool enableScalableVectorization() const;
1731
1732 /// \name Vector Predication Information
1733 /// @{
1734 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
1735 /// in hardware, for the given opcode and type/alignment. (see LLVM Language
1736 /// Reference - "Vector Predication Intrinsics").
1737 /// Use of %evl is discouraged when that is not the case.
1738 bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
1739 Align Alignment) const;
1740
1741 struct VPLegalization {
1742 enum VPTransform {
1743 // keep the predicating parameter
1744 Legal = 0,
1745 // where legal, discard the predicate parameter
1746 Discard = 1,
1747 // transform into something else that is also predicating
1748 Convert = 2
1749 };
1750
1751 // How to transform the EVL parameter.
1752 // Legal: keep the EVL parameter as it is.
1753 // Discard: Ignore the EVL parameter where it is safe to do so.
1754 // Convert: Fold the EVL into the mask parameter.
1755 VPTransform EVLParamStrategy;
1756
1757 // How to transform the operator.
1758 // Legal: The target supports this operator.
1759 // Convert: Convert this to a non-VP operation.
1760 // The 'Discard' strategy is invalid.
1761 VPTransform OpStrategy;
1762
shouldDoNothingVPLegalization1763 bool shouldDoNothing() const {
1764 return (EVLParamStrategy == Legal) && (OpStrategy == Legal);
1765 }
VPLegalizationVPLegalization1766 VPLegalization(VPTransform EVLParamStrategy, VPTransform OpStrategy)
1767 : EVLParamStrategy(EVLParamStrategy), OpStrategy(OpStrategy) {}
1768 };
1769
1770 /// \returns How the target needs this vector-predicated operation to be
1771 /// transformed.
1772 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
1773 /// @}
1774
1775 /// \returns Whether a 32-bit branch instruction is available in Arm or Thumb
1776 /// state.
1777 ///
1778 /// Used by the LowerTypeTests pass, which constructs an IR inline assembler
1779 /// node containing a jump table in a format suitable for the target, so it
1780 /// needs to know what format of jump table it can legally use.
1781 ///
1782 /// For non-Arm targets, this function isn't used. It defaults to returning
1783 /// false, but it shouldn't matter what it returns anyway.
1784 bool hasArmWideBranch(bool Thumb) const;
1785
1786 /// \return The maximum number of function arguments the target supports.
1787 unsigned getMaxNumArgs() const;
1788
1789 /// @}
1790
1791 private:
1792 /// The abstract base class used to type erase specific TTI
1793 /// implementations.
1794 class Concept;
1795
1796 /// The template model for the base class which wraps a concrete
1797 /// implementation in a type erased interface.
1798 template <typename T> class Model;
1799
1800 std::unique_ptr<Concept> TTIImpl;
1801 };
1802
1803 class TargetTransformInfo::Concept {
1804 public:
1805 virtual ~Concept() = 0;
1806 virtual const DataLayout &getDataLayout() const = 0;
1807 virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
1808 ArrayRef<const Value *> Operands,
1809 Type *AccessType,
1810 TTI::TargetCostKind CostKind) = 0;
1811 virtual InstructionCost
1812 getPointersChainCost(ArrayRef<const Value *> Ptrs, const Value *Base,
1813 const TTI::PointersChainInfo &Info, Type *AccessTy,
1814 TTI::TargetCostKind CostKind) = 0;
1815 virtual unsigned getInliningThresholdMultiplier() const = 0;
1816 virtual unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const = 0;
1817 virtual unsigned
1818 getInliningCostBenefitAnalysisProfitableMultiplier() const = 0;
1819 virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
1820 virtual int getInlinerVectorBonusPercent() const = 0;
1821 virtual unsigned getCallerAllocaCost(const CallBase *CB,
1822 const AllocaInst *AI) const = 0;
1823 virtual InstructionCost getMemcpyCost(const Instruction *I) = 0;
1824 virtual uint64_t getMaxMemIntrinsicInlineSizeThreshold() const = 0;
1825 virtual unsigned
1826 getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
1827 ProfileSummaryInfo *PSI,
1828 BlockFrequencyInfo *BFI) = 0;
1829 virtual InstructionCost getInstructionCost(const User *U,
1830 ArrayRef<const Value *> Operands,
1831 TargetCostKind CostKind) = 0;
1832 virtual BranchProbability getPredictableBranchThreshold() = 0;
1833 virtual InstructionCost getBranchMispredictPenalty() = 0;
1834 virtual bool hasBranchDivergence(const Function *F = nullptr) = 0;
1835 virtual bool isSourceOfDivergence(const Value *V) = 0;
1836 virtual bool isAlwaysUniform(const Value *V) = 0;
1837 virtual bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
1838 virtual bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const = 0;
1839 virtual unsigned getFlatAddressSpace() = 0;
1840 virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1841 Intrinsic::ID IID) const = 0;
1842 virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
1843 virtual bool
1844 canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const = 0;
1845 virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
1846 virtual bool isSingleThreaded() const = 0;
1847 virtual std::pair<const Value *, unsigned>
1848 getPredicatedAddrSpace(const Value *V) const = 0;
1849 virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1850 Value *OldV,
1851 Value *NewV) const = 0;
1852 virtual bool isLoweredToCall(const Function *F) = 0;
1853 virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
1854 UnrollingPreferences &UP,
1855 OptimizationRemarkEmitter *ORE) = 0;
1856 virtual void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1857 PeelingPreferences &PP) = 0;
1858 virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1859 AssumptionCache &AC,
1860 TargetLibraryInfo *LibInfo,
1861 HardwareLoopInfo &HWLoopInfo) = 0;
1862 virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) = 0;
1863 virtual TailFoldingStyle
1864 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) = 0;
1865 virtual std::optional<Instruction *> instCombineIntrinsic(
1866 InstCombiner &IC, IntrinsicInst &II) = 0;
1867 virtual std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
1868 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask,
1869 KnownBits & Known, bool &KnownBitsComputed) = 0;
1870 virtual std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1871 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts,
1872 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
1873 std::function<void(Instruction *, unsigned, APInt, APInt &)>
1874 SimplifyAndSetOp) = 0;
1875 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
1876 virtual bool isLegalAddScalableImmediate(int64_t Imm) = 0;
1877 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
1878 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
1879 int64_t BaseOffset, bool HasBaseReg,
1880 int64_t Scale, unsigned AddrSpace,
1881 Instruction *I,
1882 int64_t ScalableOffset) = 0;
1883 virtual bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
1884 const TargetTransformInfo::LSRCost &C2) = 0;
1885 virtual bool isNumRegsMajorCostOfLSR() = 0;
1886 virtual bool shouldDropLSRSolutionIfLessProfitable() const = 0;
1887 virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
1888 virtual bool canMacroFuseCmp() = 0;
1889 virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1890 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1891 TargetLibraryInfo *LibInfo) = 0;
1892 virtual AddressingModeKind
1893 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0;
1894 virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
1895 virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
1896 virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
1897 virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
1898 virtual bool isLegalBroadcastLoad(Type *ElementTy,
1899 ElementCount NumElements) const = 0;
1900 virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0;
1901 virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0;
1902 virtual bool forceScalarizeMaskedGather(VectorType *DataType,
1903 Align Alignment) = 0;
1904 virtual bool forceScalarizeMaskedScatter(VectorType *DataType,
1905 Align Alignment) = 0;
1906 virtual bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) = 0;
1907 virtual bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) = 0;
1908 virtual bool isLegalStridedLoadStore(Type *DataType, Align Alignment) = 0;
1909 virtual bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) = 0;
1910 virtual bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
1911 unsigned Opcode1,
1912 const SmallBitVector &OpcodeMask) const = 0;
1913 virtual bool enableOrderedReductions() = 0;
1914 virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
1915 virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
1916 virtual bool prefersVectorizedAddressing() = 0;
1917 virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1918 StackOffset BaseOffset,
1919 bool HasBaseReg, int64_t Scale,
1920 unsigned AddrSpace) = 0;
1921 virtual bool LSRWithInstrQueries() = 0;
1922 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
1923 virtual bool isProfitableToHoist(Instruction *I) = 0;
1924 virtual bool useAA() = 0;
1925 virtual bool isTypeLegal(Type *Ty) = 0;
1926 virtual unsigned getRegUsageForType(Type *Ty) = 0;
1927 virtual bool shouldBuildLookupTables() = 0;
1928 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
1929 virtual bool shouldBuildRelLookupTables() = 0;
1930 virtual bool useColdCCForColdCall(Function &F) = 0;
1931 virtual InstructionCost getScalarizationOverhead(VectorType *Ty,
1932 const APInt &DemandedElts,
1933 bool Insert, bool Extract,
1934 TargetCostKind CostKind) = 0;
1935 virtual InstructionCost
1936 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1937 ArrayRef<Type *> Tys,
1938 TargetCostKind CostKind) = 0;
1939 virtual bool supportsEfficientVectorElementLoadStore() = 0;
1940 virtual bool supportsTailCalls() = 0;
1941 virtual bool supportsTailCallFor(const CallBase *CB) = 0;
1942 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
1943 virtual MemCmpExpansionOptions
1944 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
1945 virtual bool enableSelectOptimize() = 0;
1946 virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0;
1947 virtual bool enableInterleavedAccessVectorization() = 0;
1948 virtual bool enableMaskedInterleavedAccessVectorization() = 0;
1949 virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
1950 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1951 unsigned BitWidth,
1952 unsigned AddressSpace,
1953 Align Alignment,
1954 unsigned *Fast) = 0;
1955 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
1956 virtual bool haveFastSqrt(Type *Ty) = 0;
1957 virtual bool isExpensiveToSpeculativelyExecute(const Instruction *I) = 0;
1958 virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
1959 virtual InstructionCost getFPOpCost(Type *Ty) = 0;
1960 virtual InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1961 const APInt &Imm, Type *Ty) = 0;
1962 virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
1963 TargetCostKind CostKind) = 0;
1964 virtual InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
1965 const APInt &Imm, Type *Ty,
1966 TargetCostKind CostKind,
1967 Instruction *Inst = nullptr) = 0;
1968 virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
1969 const APInt &Imm, Type *Ty,
1970 TargetCostKind CostKind) = 0;
1971 virtual bool preferToKeepConstantsAttached(const Instruction &Inst,
1972 const Function &Fn) const = 0;
1973 virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0;
1974 virtual bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const = 0;
1975 virtual unsigned getRegisterClassForType(bool Vector,
1976 Type *Ty = nullptr) const = 0;
1977 virtual const char *getRegisterClassName(unsigned ClassID) const = 0;
1978 virtual TypeSize getRegisterBitWidth(RegisterKind K) const = 0;
1979 virtual unsigned getMinVectorRegisterBitWidth() const = 0;
1980 virtual std::optional<unsigned> getMaxVScale() const = 0;
1981 virtual std::optional<unsigned> getVScaleForTuning() const = 0;
1982 virtual bool isVScaleKnownToBeAPowerOfTwo() const = 0;
1983 virtual bool
1984 shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const = 0;
1985 virtual ElementCount getMinimumVF(unsigned ElemWidth,
1986 bool IsScalable) const = 0;
1987 virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
1988 virtual unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
1989 Type *ScalarValTy) const = 0;
1990 virtual bool shouldConsiderAddressTypePromotion(
1991 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
1992 virtual unsigned getCacheLineSize() const = 0;
1993 virtual std::optional<unsigned> getCacheSize(CacheLevel Level) const = 0;
1994 virtual std::optional<unsigned> getCacheAssociativity(CacheLevel Level)
1995 const = 0;
1996 virtual std::optional<unsigned> getMinPageSize() const = 0;
1997
1998 /// \return How much before a load we should place the prefetch
1999 /// instruction. This is currently measured in number of
2000 /// instructions.
2001 virtual unsigned getPrefetchDistance() const = 0;
2002
2003 /// \return Some HW prefetchers can handle accesses up to a certain
2004 /// constant stride. This is the minimum stride in bytes where it
2005 /// makes sense to start adding SW prefetches. The default is 1,
2006 /// i.e. prefetch with any stride. Sometimes prefetching is beneficial
2007 /// even below the HW prefetcher limit, and the arguments provided are
2008 /// meant to serve as a basis for deciding this for a particular loop.
2009 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
2010 unsigned NumStridedMemAccesses,
2011 unsigned NumPrefetches,
2012 bool HasCall) const = 0;
2013
2014 /// \return The maximum number of iterations to prefetch ahead. If
2015 /// the required number of iterations is more than this number, no
2016 /// prefetching is performed.
2017 virtual unsigned getMaxPrefetchIterationsAhead() const = 0;
2018
2019 /// \return True if prefetching should also be done for writes.
2020 virtual bool enableWritePrefetching() const = 0;
2021
2022 /// \return if target want to issue a prefetch in address space \p AS.
2023 virtual bool shouldPrefetchAddressSpace(unsigned AS) const = 0;
2024
2025 virtual unsigned getMaxInterleaveFactor(ElementCount VF) = 0;
2026 virtual InstructionCost getArithmeticInstrCost(
2027 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
2028 OperandValueInfo Opd1Info, OperandValueInfo Opd2Info,
2029 ArrayRef<const Value *> Args, const Instruction *CxtI = nullptr) = 0;
2030 virtual InstructionCost getAltInstrCost(
2031 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
2032 const SmallBitVector &OpcodeMask,
2033 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const = 0;
2034
2035 virtual InstructionCost
2036 getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask,
2037 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
2038 ArrayRef<const Value *> Args, const Instruction *CxtI) = 0;
2039 virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst,
2040 Type *Src, CastContextHint CCH,
2041 TTI::TargetCostKind CostKind,
2042 const Instruction *I) = 0;
2043 virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
2044 VectorType *VecTy,
2045 unsigned Index) = 0;
2046 virtual InstructionCost getCFInstrCost(unsigned Opcode,
2047 TTI::TargetCostKind CostKind,
2048 const Instruction *I = nullptr) = 0;
2049 virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2050 Type *CondTy,
2051 CmpInst::Predicate VecPred,
2052 TTI::TargetCostKind CostKind,
2053 const Instruction *I) = 0;
2054 virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
2055 TTI::TargetCostKind CostKind,
2056 unsigned Index, Value *Op0,
2057 Value *Op1) = 0;
2058 virtual InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
2059 TTI::TargetCostKind CostKind,
2060 unsigned Index) = 0;
2061
2062 virtual InstructionCost
2063 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
2064 const APInt &DemandedDstElts,
2065 TTI::TargetCostKind CostKind) = 0;
2066
2067 virtual InstructionCost
2068 getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2069 unsigned AddressSpace, TTI::TargetCostKind CostKind,
2070 OperandValueInfo OpInfo, const Instruction *I) = 0;
2071 virtual InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src,
2072 Align Alignment,
2073 unsigned AddressSpace,
2074 TTI::TargetCostKind CostKind,
2075 const Instruction *I) = 0;
2076 virtual InstructionCost
2077 getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2078 unsigned AddressSpace,
2079 TTI::TargetCostKind CostKind) = 0;
2080 virtual InstructionCost
2081 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2082 bool VariableMask, Align Alignment,
2083 TTI::TargetCostKind CostKind,
2084 const Instruction *I = nullptr) = 0;
2085 virtual InstructionCost
2086 getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2087 bool VariableMask, Align Alignment,
2088 TTI::TargetCostKind CostKind,
2089 const Instruction *I = nullptr) = 0;
2090
2091 virtual InstructionCost getInterleavedMemoryOpCost(
2092 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
2093 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
2094 bool UseMaskForCond = false, bool UseMaskForGaps = false) = 0;
2095 virtual InstructionCost
2096 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
2097 std::optional<FastMathFlags> FMF,
2098 TTI::TargetCostKind CostKind) = 0;
2099 virtual InstructionCost
2100 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
2101 TTI::TargetCostKind CostKind) = 0;
2102 virtual InstructionCost getExtendedReductionCost(
2103 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
2104 FastMathFlags FMF,
2105 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
2106 virtual InstructionCost getMulAccReductionCost(
2107 bool IsUnsigned, Type *ResTy, VectorType *Ty,
2108 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
2109 virtual InstructionCost
2110 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2111 TTI::TargetCostKind CostKind) = 0;
2112 virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy,
2113 ArrayRef<Type *> Tys,
2114 TTI::TargetCostKind CostKind) = 0;
2115 virtual unsigned getNumberOfParts(Type *Tp) = 0;
2116 virtual InstructionCost
2117 getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) = 0;
2118 virtual InstructionCost
2119 getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
2120 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
2121 MemIntrinsicInfo &Info) = 0;
2122 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
2123 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2124 Type *ExpectedType) = 0;
2125 virtual Type *getMemcpyLoopLoweringType(
2126 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
2127 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
2128 std::optional<uint32_t> AtomicElementSize) const = 0;
2129
2130 virtual void getMemcpyLoopResidualLoweringType(
2131 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
2132 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
2133 Align SrcAlign, Align DestAlign,
2134 std::optional<uint32_t> AtomicCpySize) const = 0;
2135 virtual bool areInlineCompatible(const Function *Caller,
2136 const Function *Callee) const = 0;
2137 virtual unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
2138 unsigned DefaultCallPenalty) const = 0;
2139 virtual bool areTypesABICompatible(const Function *Caller,
2140 const Function *Callee,
2141 const ArrayRef<Type *> &Types) const = 0;
2142 virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
2143 virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0;
2144 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
2145 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
2146 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
2147 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
2148 Align Alignment,
2149 unsigned AddrSpace) const = 0;
2150 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
2151 Align Alignment,
2152 unsigned AddrSpace) const = 0;
2153 virtual bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
2154 ElementCount VF) const = 0;
2155 virtual bool isElementTypeLegalForScalableVector(Type *Ty) const = 0;
2156 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
2157 unsigned ChainSizeInBytes,
2158 VectorType *VecTy) const = 0;
2159 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
2160 unsigned ChainSizeInBytes,
2161 VectorType *VecTy) const = 0;
2162 virtual bool preferFixedOverScalableIfEqualCost() const = 0;
2163 virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
2164 ReductionFlags) const = 0;
2165 virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
2166 ReductionFlags) const = 0;
2167 virtual bool preferEpilogueVectorization() const = 0;
2168
2169 virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
2170 virtual ReductionShuffle
2171 getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const = 0;
2172 virtual unsigned getGISelRematGlobalCost() const = 0;
2173 virtual unsigned getMinTripCountTailFoldingThreshold() const = 0;
2174 virtual bool enableScalableVectorization() const = 0;
2175 virtual bool supportsScalableVectors() const = 0;
2176 virtual bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
2177 Align Alignment) const = 0;
2178 virtual VPLegalization
2179 getVPLegalizationStrategy(const VPIntrinsic &PI) const = 0;
2180 virtual bool hasArmWideBranch(bool Thumb) const = 0;
2181 virtual unsigned getMaxNumArgs() const = 0;
2182 };
2183
2184 template <typename T>
2185 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
2186 T Impl;
2187
2188 public:
Model(T Impl)2189 Model(T Impl) : Impl(std::move(Impl)) {}
2190 ~Model() override = default;
2191
getDataLayout()2192 const DataLayout &getDataLayout() const override {
2193 return Impl.getDataLayout();
2194 }
2195
2196 InstructionCost
getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands,Type * AccessType,TargetTransformInfo::TargetCostKind CostKind)2197 getGEPCost(Type *PointeeType, const Value *Ptr,
2198 ArrayRef<const Value *> Operands, Type *AccessType,
2199 TargetTransformInfo::TargetCostKind CostKind) override {
2200 return Impl.getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
2201 }
getPointersChainCost(ArrayRef<const Value * > Ptrs,const Value * Base,const PointersChainInfo & Info,Type * AccessTy,TargetCostKind CostKind)2202 InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs,
2203 const Value *Base,
2204 const PointersChainInfo &Info,
2205 Type *AccessTy,
2206 TargetCostKind CostKind) override {
2207 return Impl.getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
2208 }
getInliningThresholdMultiplier()2209 unsigned getInliningThresholdMultiplier() const override {
2210 return Impl.getInliningThresholdMultiplier();
2211 }
adjustInliningThreshold(const CallBase * CB)2212 unsigned adjustInliningThreshold(const CallBase *CB) override {
2213 return Impl.adjustInliningThreshold(CB);
2214 }
getInliningCostBenefitAnalysisSavingsMultiplier()2215 unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const override {
2216 return Impl.getInliningCostBenefitAnalysisSavingsMultiplier();
2217 }
getInliningCostBenefitAnalysisProfitableMultiplier()2218 unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const override {
2219 return Impl.getInliningCostBenefitAnalysisProfitableMultiplier();
2220 }
getInlinerVectorBonusPercent()2221 int getInlinerVectorBonusPercent() const override {
2222 return Impl.getInlinerVectorBonusPercent();
2223 }
getCallerAllocaCost(const CallBase * CB,const AllocaInst * AI)2224 unsigned getCallerAllocaCost(const CallBase *CB,
2225 const AllocaInst *AI) const override {
2226 return Impl.getCallerAllocaCost(CB, AI);
2227 }
getMemcpyCost(const Instruction * I)2228 InstructionCost getMemcpyCost(const Instruction *I) override {
2229 return Impl.getMemcpyCost(I);
2230 }
2231
getMaxMemIntrinsicInlineSizeThreshold()2232 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override {
2233 return Impl.getMaxMemIntrinsicInlineSizeThreshold();
2234 }
2235
getInstructionCost(const User * U,ArrayRef<const Value * > Operands,TargetCostKind CostKind)2236 InstructionCost getInstructionCost(const User *U,
2237 ArrayRef<const Value *> Operands,
2238 TargetCostKind CostKind) override {
2239 return Impl.getInstructionCost(U, Operands, CostKind);
2240 }
getPredictableBranchThreshold()2241 BranchProbability getPredictableBranchThreshold() override {
2242 return Impl.getPredictableBranchThreshold();
2243 }
getBranchMispredictPenalty()2244 InstructionCost getBranchMispredictPenalty() override {
2245 return Impl.getBranchMispredictPenalty();
2246 }
2247 bool hasBranchDivergence(const Function *F = nullptr) override {
2248 return Impl.hasBranchDivergence(F);
2249 }
isSourceOfDivergence(const Value * V)2250 bool isSourceOfDivergence(const Value *V) override {
2251 return Impl.isSourceOfDivergence(V);
2252 }
2253
isAlwaysUniform(const Value * V)2254 bool isAlwaysUniform(const Value *V) override {
2255 return Impl.isAlwaysUniform(V);
2256 }
2257
isValidAddrSpaceCast(unsigned FromAS,unsigned ToAS)2258 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
2259 return Impl.isValidAddrSpaceCast(FromAS, ToAS);
2260 }
2261
addrspacesMayAlias(unsigned AS0,unsigned AS1)2262 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
2263 return Impl.addrspacesMayAlias(AS0, AS1);
2264 }
2265
getFlatAddressSpace()2266 unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); }
2267
collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID)2268 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
2269 Intrinsic::ID IID) const override {
2270 return Impl.collectFlatAddressOperands(OpIndexes, IID);
2271 }
2272
isNoopAddrSpaceCast(unsigned FromAS,unsigned ToAS)2273 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
2274 return Impl.isNoopAddrSpaceCast(FromAS, ToAS);
2275 }
2276
2277 bool
canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS)2278 canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const override {
2279 return Impl.canHaveNonUndefGlobalInitializerInAddressSpace(AS);
2280 }
2281
getAssumedAddrSpace(const Value * V)2282 unsigned getAssumedAddrSpace(const Value *V) const override {
2283 return Impl.getAssumedAddrSpace(V);
2284 }
2285
isSingleThreaded()2286 bool isSingleThreaded() const override { return Impl.isSingleThreaded(); }
2287
2288 std::pair<const Value *, unsigned>
getPredicatedAddrSpace(const Value * V)2289 getPredicatedAddrSpace(const Value *V) const override {
2290 return Impl.getPredicatedAddrSpace(V);
2291 }
2292
rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV)2293 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
2294 Value *NewV) const override {
2295 return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
2296 }
2297
isLoweredToCall(const Function * F)2298 bool isLoweredToCall(const Function *F) override {
2299 return Impl.isLoweredToCall(F);
2300 }
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)2301 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2302 UnrollingPreferences &UP,
2303 OptimizationRemarkEmitter *ORE) override {
2304 return Impl.getUnrollingPreferences(L, SE, UP, ORE);
2305 }
getPeelingPreferences(Loop * L,ScalarEvolution & SE,PeelingPreferences & PP)2306 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2307 PeelingPreferences &PP) override {
2308 return Impl.getPeelingPreferences(L, SE, PP);
2309 }
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)2310 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
2311 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
2312 HardwareLoopInfo &HWLoopInfo) override {
2313 return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
2314 }
preferPredicateOverEpilogue(TailFoldingInfo * TFI)2315 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) override {
2316 return Impl.preferPredicateOverEpilogue(TFI);
2317 }
2318 TailFoldingStyle
2319 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) override {
2320 return Impl.getPreferredTailFoldingStyle(IVUpdateMayOverflow);
2321 }
2322 std::optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II)2323 instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) override {
2324 return Impl.instCombineIntrinsic(IC, II);
2325 }
2326 std::optional<Value *>
simplifyDemandedUseBitsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedMask,KnownBits & Known,bool & KnownBitsComputed)2327 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
2328 APInt DemandedMask, KnownBits &Known,
2329 bool &KnownBitsComputed) override {
2330 return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
2331 KnownBitsComputed);
2332 }
simplifyDemandedVectorEltsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedElts,APInt & UndefElts,APInt & UndefElts2,APInt & UndefElts3,std::function<void (Instruction *,unsigned,APInt,APInt &)> SimplifyAndSetOp)2333 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
2334 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
2335 APInt &UndefElts2, APInt &UndefElts3,
2336 std::function<void(Instruction *, unsigned, APInt, APInt &)>
2337 SimplifyAndSetOp) override {
2338 return Impl.simplifyDemandedVectorEltsIntrinsic(
2339 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
2340 SimplifyAndSetOp);
2341 }
isLegalAddImmediate(int64_t Imm)2342 bool isLegalAddImmediate(int64_t Imm) override {
2343 return Impl.isLegalAddImmediate(Imm);
2344 }
isLegalAddScalableImmediate(int64_t Imm)2345 bool isLegalAddScalableImmediate(int64_t Imm) override {
2346 return Impl.isLegalAddScalableImmediate(Imm);
2347 }
isLegalICmpImmediate(int64_t Imm)2348 bool isLegalICmpImmediate(int64_t Imm) override {
2349 return Impl.isLegalICmpImmediate(Imm);
2350 }
isLegalAddressingMode(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace,Instruction * I,int64_t ScalableOffset)2351 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
2352 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
2353 Instruction *I, int64_t ScalableOffset) override {
2354 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
2355 AddrSpace, I, ScalableOffset);
2356 }
isLSRCostLess(const TargetTransformInfo::LSRCost & C1,const TargetTransformInfo::LSRCost & C2)2357 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
2358 const TargetTransformInfo::LSRCost &C2) override {
2359 return Impl.isLSRCostLess(C1, C2);
2360 }
isNumRegsMajorCostOfLSR()2361 bool isNumRegsMajorCostOfLSR() override {
2362 return Impl.isNumRegsMajorCostOfLSR();
2363 }
shouldDropLSRSolutionIfLessProfitable()2364 bool shouldDropLSRSolutionIfLessProfitable() const override {
2365 return Impl.shouldDropLSRSolutionIfLessProfitable();
2366 }
isProfitableLSRChainElement(Instruction * I)2367 bool isProfitableLSRChainElement(Instruction *I) override {
2368 return Impl.isProfitableLSRChainElement(I);
2369 }
canMacroFuseCmp()2370 bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); }
canSaveCmp(Loop * L,BranchInst ** BI,ScalarEvolution * SE,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * LibInfo)2371 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
2372 DominatorTree *DT, AssumptionCache *AC,
2373 TargetLibraryInfo *LibInfo) override {
2374 return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
2375 }
2376 AddressingModeKind
getPreferredAddressingMode(const Loop * L,ScalarEvolution * SE)2377 getPreferredAddressingMode(const Loop *L,
2378 ScalarEvolution *SE) const override {
2379 return Impl.getPreferredAddressingMode(L, SE);
2380 }
isLegalMaskedStore(Type * DataType,Align Alignment)2381 bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
2382 return Impl.isLegalMaskedStore(DataType, Alignment);
2383 }
isLegalMaskedLoad(Type * DataType,Align Alignment)2384 bool isLegalMaskedLoad(Type *DataType, Align Alignment) override {
2385 return Impl.isLegalMaskedLoad(DataType, Alignment);
2386 }
isLegalNTStore(Type * DataType,Align Alignment)2387 bool isLegalNTStore(Type *DataType, Align Alignment) override {
2388 return Impl.isLegalNTStore(DataType, Alignment);
2389 }
isLegalNTLoad(Type * DataType,Align Alignment)2390 bool isLegalNTLoad(Type *DataType, Align Alignment) override {
2391 return Impl.isLegalNTLoad(DataType, Alignment);
2392 }
isLegalBroadcastLoad(Type * ElementTy,ElementCount NumElements)2393 bool isLegalBroadcastLoad(Type *ElementTy,
2394 ElementCount NumElements) const override {
2395 return Impl.isLegalBroadcastLoad(ElementTy, NumElements);
2396 }
isLegalMaskedScatter(Type * DataType,Align Alignment)2397 bool isLegalMaskedScatter(Type *DataType, Align Alignment) override {
2398 return Impl.isLegalMaskedScatter(DataType, Alignment);
2399 }
isLegalMaskedGather(Type * DataType,Align Alignment)2400 bool isLegalMaskedGather(Type *DataType, Align Alignment) override {
2401 return Impl.isLegalMaskedGather(DataType, Alignment);
2402 }
forceScalarizeMaskedGather(VectorType * DataType,Align Alignment)2403 bool forceScalarizeMaskedGather(VectorType *DataType,
2404 Align Alignment) override {
2405 return Impl.forceScalarizeMaskedGather(DataType, Alignment);
2406 }
forceScalarizeMaskedScatter(VectorType * DataType,Align Alignment)2407 bool forceScalarizeMaskedScatter(VectorType *DataType,
2408 Align Alignment) override {
2409 return Impl.forceScalarizeMaskedScatter(DataType, Alignment);
2410 }
isLegalMaskedCompressStore(Type * DataType,Align Alignment)2411 bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) override {
2412 return Impl.isLegalMaskedCompressStore(DataType, Alignment);
2413 }
isLegalMaskedExpandLoad(Type * DataType,Align Alignment)2414 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) override {
2415 return Impl.isLegalMaskedExpandLoad(DataType, Alignment);
2416 }
isLegalStridedLoadStore(Type * DataType,Align Alignment)2417 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) override {
2418 return Impl.isLegalStridedLoadStore(DataType, Alignment);
2419 }
isLegalMaskedVectorHistogram(Type * AddrType,Type * DataType)2420 bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) override {
2421 return Impl.isLegalMaskedVectorHistogram(AddrType, DataType);
2422 }
isLegalAltInstr(VectorType * VecTy,unsigned Opcode0,unsigned Opcode1,const SmallBitVector & OpcodeMask)2423 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
2424 const SmallBitVector &OpcodeMask) const override {
2425 return Impl.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
2426 }
enableOrderedReductions()2427 bool enableOrderedReductions() override {
2428 return Impl.enableOrderedReductions();
2429 }
hasDivRemOp(Type * DataType,bool IsSigned)2430 bool hasDivRemOp(Type *DataType, bool IsSigned) override {
2431 return Impl.hasDivRemOp(DataType, IsSigned);
2432 }
hasVolatileVariant(Instruction * I,unsigned AddrSpace)2433 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
2434 return Impl.hasVolatileVariant(I, AddrSpace);
2435 }
prefersVectorizedAddressing()2436 bool prefersVectorizedAddressing() override {
2437 return Impl.prefersVectorizedAddressing();
2438 }
getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,StackOffset BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace)2439 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
2440 StackOffset BaseOffset, bool HasBaseReg,
2441 int64_t Scale,
2442 unsigned AddrSpace) override {
2443 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
2444 AddrSpace);
2445 }
LSRWithInstrQueries()2446 bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); }
isTruncateFree(Type * Ty1,Type * Ty2)2447 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
2448 return Impl.isTruncateFree(Ty1, Ty2);
2449 }
isProfitableToHoist(Instruction * I)2450 bool isProfitableToHoist(Instruction *I) override {
2451 return Impl.isProfitableToHoist(I);
2452 }
useAA()2453 bool useAA() override { return Impl.useAA(); }
isTypeLegal(Type * Ty)2454 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
getRegUsageForType(Type * Ty)2455 unsigned getRegUsageForType(Type *Ty) override {
2456 return Impl.getRegUsageForType(Ty);
2457 }
shouldBuildLookupTables()2458 bool shouldBuildLookupTables() override {
2459 return Impl.shouldBuildLookupTables();
2460 }
shouldBuildLookupTablesForConstant(Constant * C)2461 bool shouldBuildLookupTablesForConstant(Constant *C) override {
2462 return Impl.shouldBuildLookupTablesForConstant(C);
2463 }
shouldBuildRelLookupTables()2464 bool shouldBuildRelLookupTables() override {
2465 return Impl.shouldBuildRelLookupTables();
2466 }
useColdCCForColdCall(Function & F)2467 bool useColdCCForColdCall(Function &F) override {
2468 return Impl.useColdCCForColdCall(F);
2469 }
2470
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract,TargetCostKind CostKind)2471 InstructionCost getScalarizationOverhead(VectorType *Ty,
2472 const APInt &DemandedElts,
2473 bool Insert, bool Extract,
2474 TargetCostKind CostKind) override {
2475 return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
2476 CostKind);
2477 }
2478 InstructionCost
getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,ArrayRef<Type * > Tys,TargetCostKind CostKind)2479 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
2480 ArrayRef<Type *> Tys,
2481 TargetCostKind CostKind) override {
2482 return Impl.getOperandsScalarizationOverhead(Args, Tys, CostKind);
2483 }
2484
supportsEfficientVectorElementLoadStore()2485 bool supportsEfficientVectorElementLoadStore() override {
2486 return Impl.supportsEfficientVectorElementLoadStore();
2487 }
2488
supportsTailCalls()2489 bool supportsTailCalls() override { return Impl.supportsTailCalls(); }
supportsTailCallFor(const CallBase * CB)2490 bool supportsTailCallFor(const CallBase *CB) override {
2491 return Impl.supportsTailCallFor(CB);
2492 }
2493
enableAggressiveInterleaving(bool LoopHasReductions)2494 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
2495 return Impl.enableAggressiveInterleaving(LoopHasReductions);
2496 }
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp)2497 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
2498 bool IsZeroCmp) const override {
2499 return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
2500 }
enableSelectOptimize()2501 bool enableSelectOptimize() override {
2502 return Impl.enableSelectOptimize();
2503 }
shouldTreatInstructionLikeSelect(const Instruction * I)2504 bool shouldTreatInstructionLikeSelect(const Instruction *I) override {
2505 return Impl.shouldTreatInstructionLikeSelect(I);
2506 }
enableInterleavedAccessVectorization()2507 bool enableInterleavedAccessVectorization() override {
2508 return Impl.enableInterleavedAccessVectorization();
2509 }
enableMaskedInterleavedAccessVectorization()2510 bool enableMaskedInterleavedAccessVectorization() override {
2511 return Impl.enableMaskedInterleavedAccessVectorization();
2512 }
isFPVectorizationPotentiallyUnsafe()2513 bool isFPVectorizationPotentiallyUnsafe() override {
2514 return Impl.isFPVectorizationPotentiallyUnsafe();
2515 }
allowsMisalignedMemoryAccesses(LLVMContext & Context,unsigned BitWidth,unsigned AddressSpace,Align Alignment,unsigned * Fast)2516 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
2517 unsigned AddressSpace, Align Alignment,
2518 unsigned *Fast) override {
2519 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
2520 Alignment, Fast);
2521 }
getPopcntSupport(unsigned IntTyWidthInBit)2522 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
2523 return Impl.getPopcntSupport(IntTyWidthInBit);
2524 }
haveFastSqrt(Type * Ty)2525 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
2526
isExpensiveToSpeculativelyExecute(const Instruction * I)2527 bool isExpensiveToSpeculativelyExecute(const Instruction* I) override {
2528 return Impl.isExpensiveToSpeculativelyExecute(I);
2529 }
2530
isFCmpOrdCheaperThanFCmpZero(Type * Ty)2531 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
2532 return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
2533 }
2534
getFPOpCost(Type * Ty)2535 InstructionCost getFPOpCost(Type *Ty) override {
2536 return Impl.getFPOpCost(Ty);
2537 }
2538
getIntImmCodeSizeCost(unsigned Opc,unsigned Idx,const APInt & Imm,Type * Ty)2539 InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
2540 const APInt &Imm, Type *Ty) override {
2541 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
2542 }
getIntImmCost(const APInt & Imm,Type * Ty,TargetCostKind CostKind)2543 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
2544 TargetCostKind CostKind) override {
2545 return Impl.getIntImmCost(Imm, Ty, CostKind);
2546 }
2547 InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
2548 const APInt &Imm, Type *Ty,
2549 TargetCostKind CostKind,
2550 Instruction *Inst = nullptr) override {
2551 return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
2552 }
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TargetCostKind CostKind)2553 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
2554 const APInt &Imm, Type *Ty,
2555 TargetCostKind CostKind) override {
2556 return Impl.getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
2557 }
preferToKeepConstantsAttached(const Instruction & Inst,const Function & Fn)2558 bool preferToKeepConstantsAttached(const Instruction &Inst,
2559 const Function &Fn) const override {
2560 return Impl.preferToKeepConstantsAttached(Inst, Fn);
2561 }
getNumberOfRegisters(unsigned ClassID)2562 unsigned getNumberOfRegisters(unsigned ClassID) const override {
2563 return Impl.getNumberOfRegisters(ClassID);
2564 }
2565 bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const override {
2566 return Impl.hasConditionalLoadStoreForType(Ty);
2567 }
2568 unsigned getRegisterClassForType(bool Vector,
2569 Type *Ty = nullptr) const override {
2570 return Impl.getRegisterClassForType(Vector, Ty);
2571 }
getRegisterClassName(unsigned ClassID)2572 const char *getRegisterClassName(unsigned ClassID) const override {
2573 return Impl.getRegisterClassName(ClassID);
2574 }
getRegisterBitWidth(RegisterKind K)2575 TypeSize getRegisterBitWidth(RegisterKind K) const override {
2576 return Impl.getRegisterBitWidth(K);
2577 }
getMinVectorRegisterBitWidth()2578 unsigned getMinVectorRegisterBitWidth() const override {
2579 return Impl.getMinVectorRegisterBitWidth();
2580 }
getMaxVScale()2581 std::optional<unsigned> getMaxVScale() const override {
2582 return Impl.getMaxVScale();
2583 }
getVScaleForTuning()2584 std::optional<unsigned> getVScaleForTuning() const override {
2585 return Impl.getVScaleForTuning();
2586 }
isVScaleKnownToBeAPowerOfTwo()2587 bool isVScaleKnownToBeAPowerOfTwo() const override {
2588 return Impl.isVScaleKnownToBeAPowerOfTwo();
2589 }
shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K)2590 bool shouldMaximizeVectorBandwidth(
2591 TargetTransformInfo::RegisterKind K) const override {
2592 return Impl.shouldMaximizeVectorBandwidth(K);
2593 }
getMinimumVF(unsigned ElemWidth,bool IsScalable)2594 ElementCount getMinimumVF(unsigned ElemWidth,
2595 bool IsScalable) const override {
2596 return Impl.getMinimumVF(ElemWidth, IsScalable);
2597 }
getMaximumVF(unsigned ElemWidth,unsigned Opcode)2598 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
2599 return Impl.getMaximumVF(ElemWidth, Opcode);
2600 }
getStoreMinimumVF(unsigned VF,Type * ScalarMemTy,Type * ScalarValTy)2601 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
2602 Type *ScalarValTy) const override {
2603 return Impl.getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
2604 }
shouldConsiderAddressTypePromotion(const Instruction & I,bool & AllowPromotionWithoutCommonHeader)2605 bool shouldConsiderAddressTypePromotion(
2606 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
2607 return Impl.shouldConsiderAddressTypePromotion(
2608 I, AllowPromotionWithoutCommonHeader);
2609 }
getCacheLineSize()2610 unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); }
getCacheSize(CacheLevel Level)2611 std::optional<unsigned> getCacheSize(CacheLevel Level) const override {
2612 return Impl.getCacheSize(Level);
2613 }
2614 std::optional<unsigned>
getCacheAssociativity(CacheLevel Level)2615 getCacheAssociativity(CacheLevel Level) const override {
2616 return Impl.getCacheAssociativity(Level);
2617 }
2618
getMinPageSize()2619 std::optional<unsigned> getMinPageSize() const override {
2620 return Impl.getMinPageSize();
2621 }
2622
2623 /// Return the preferred prefetch distance in terms of instructions.
2624 ///
getPrefetchDistance()2625 unsigned getPrefetchDistance() const override {
2626 return Impl.getPrefetchDistance();
2627 }
2628
2629 /// Return the minimum stride necessary to trigger software
2630 /// prefetching.
2631 ///
getMinPrefetchStride(unsigned NumMemAccesses,unsigned NumStridedMemAccesses,unsigned NumPrefetches,bool HasCall)2632 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
2633 unsigned NumStridedMemAccesses,
2634 unsigned NumPrefetches,
2635 bool HasCall) const override {
2636 return Impl.getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
2637 NumPrefetches, HasCall);
2638 }
2639
2640 /// Return the maximum prefetch distance in terms of loop
2641 /// iterations.
2642 ///
getMaxPrefetchIterationsAhead()2643 unsigned getMaxPrefetchIterationsAhead() const override {
2644 return Impl.getMaxPrefetchIterationsAhead();
2645 }
2646
2647 /// \return True if prefetching should also be done for writes.
enableWritePrefetching()2648 bool enableWritePrefetching() const override {
2649 return Impl.enableWritePrefetching();
2650 }
2651
2652 /// \return if target want to issue a prefetch in address space \p AS.
shouldPrefetchAddressSpace(unsigned AS)2653 bool shouldPrefetchAddressSpace(unsigned AS) const override {
2654 return Impl.shouldPrefetchAddressSpace(AS);
2655 }
2656
getMaxInterleaveFactor(ElementCount VF)2657 unsigned getMaxInterleaveFactor(ElementCount VF) override {
2658 return Impl.getMaxInterleaveFactor(VF);
2659 }
getEstimatedNumberOfCaseClusters(const SwitchInst & SI,unsigned & JTSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)2660 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
2661 unsigned &JTSize,
2662 ProfileSummaryInfo *PSI,
2663 BlockFrequencyInfo *BFI) override {
2664 return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
2665 }
2666 InstructionCost getArithmeticInstrCost(
2667 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
2668 OperandValueInfo Opd1Info, OperandValueInfo Opd2Info,
2669 ArrayRef<const Value *> Args,
2670 const Instruction *CxtI = nullptr) override {
2671 return Impl.getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
2672 Args, CxtI);
2673 }
getAltInstrCost(VectorType * VecTy,unsigned Opcode0,unsigned Opcode1,const SmallBitVector & OpcodeMask,TTI::TargetCostKind CostKind)2674 InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
2675 unsigned Opcode1,
2676 const SmallBitVector &OpcodeMask,
2677 TTI::TargetCostKind CostKind) const override {
2678 return Impl.getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
2679 }
2680
getShuffleCost(ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,TTI::TargetCostKind CostKind,int Index,VectorType * SubTp,ArrayRef<const Value * > Args,const Instruction * CxtI)2681 InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
2682 ArrayRef<int> Mask,
2683 TTI::TargetCostKind CostKind, int Index,
2684 VectorType *SubTp,
2685 ArrayRef<const Value *> Args,
2686 const Instruction *CxtI) override {
2687 return Impl.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args,
2688 CxtI);
2689 }
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)2690 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
2691 CastContextHint CCH,
2692 TTI::TargetCostKind CostKind,
2693 const Instruction *I) override {
2694 return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
2695 }
getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)2696 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
2697 VectorType *VecTy,
2698 unsigned Index) override {
2699 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
2700 }
2701 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
2702 const Instruction *I = nullptr) override {
2703 return Impl.getCFInstrCost(Opcode, CostKind, I);
2704 }
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)2705 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2706 CmpInst::Predicate VecPred,
2707 TTI::TargetCostKind CostKind,
2708 const Instruction *I) override {
2709 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2710 }
getVectorInstrCost(unsigned Opcode,Type * Val,TTI::TargetCostKind CostKind,unsigned Index,Value * Op0,Value * Op1)2711 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
2712 TTI::TargetCostKind CostKind,
2713 unsigned Index, Value *Op0,
2714 Value *Op1) override {
2715 return Impl.getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
2716 }
getVectorInstrCost(const Instruction & I,Type * Val,TTI::TargetCostKind CostKind,unsigned Index)2717 InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
2718 TTI::TargetCostKind CostKind,
2719 unsigned Index) override {
2720 return Impl.getVectorInstrCost(I, Val, CostKind, Index);
2721 }
2722 InstructionCost
getReplicationShuffleCost(Type * EltTy,int ReplicationFactor,int VF,const APInt & DemandedDstElts,TTI::TargetCostKind CostKind)2723 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
2724 const APInt &DemandedDstElts,
2725 TTI::TargetCostKind CostKind) override {
2726 return Impl.getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
2727 DemandedDstElts, CostKind);
2728 }
getMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,OperandValueInfo OpInfo,const Instruction * I)2729 InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2730 unsigned AddressSpace,
2731 TTI::TargetCostKind CostKind,
2732 OperandValueInfo OpInfo,
2733 const Instruction *I) override {
2734 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
2735 OpInfo, I);
2736 }
getVPMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)2737 InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2738 unsigned AddressSpace,
2739 TTI::TargetCostKind CostKind,
2740 const Instruction *I) override {
2741 return Impl.getVPMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2742 CostKind, I);
2743 }
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)2744 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
2745 Align Alignment, unsigned AddressSpace,
2746 TTI::TargetCostKind CostKind) override {
2747 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2748 CostKind);
2749 }
2750 InstructionCost
2751 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2752 bool VariableMask, Align Alignment,
2753 TTI::TargetCostKind CostKind,
2754 const Instruction *I = nullptr) override {
2755 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
2756 Alignment, CostKind, I);
2757 }
2758 InstructionCost
2759 getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2760 bool VariableMask, Align Alignment,
2761 TTI::TargetCostKind CostKind,
2762 const Instruction *I = nullptr) override {
2763 return Impl.getStridedMemoryOpCost(Opcode, DataTy, Ptr, VariableMask,
2764 Alignment, CostKind, I);
2765 }
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)2766 InstructionCost getInterleavedMemoryOpCost(
2767 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
2768 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
2769 bool UseMaskForCond, bool UseMaskForGaps) override {
2770 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2771 Alignment, AddressSpace, CostKind,
2772 UseMaskForCond, UseMaskForGaps);
2773 }
2774 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,std::optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)2775 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
2776 std::optional<FastMathFlags> FMF,
2777 TTI::TargetCostKind CostKind) override {
2778 return Impl.getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
2779 }
2780 InstructionCost
getMinMaxReductionCost(Intrinsic::ID IID,VectorType * Ty,FastMathFlags FMF,TTI::TargetCostKind CostKind)2781 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
2782 TTI::TargetCostKind CostKind) override {
2783 return Impl.getMinMaxReductionCost(IID, Ty, FMF, CostKind);
2784 }
2785 InstructionCost
getExtendedReductionCost(unsigned Opcode,bool IsUnsigned,Type * ResTy,VectorType * Ty,FastMathFlags FMF,TTI::TargetCostKind CostKind)2786 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
2787 VectorType *Ty, FastMathFlags FMF,
2788 TTI::TargetCostKind CostKind) override {
2789 return Impl.getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
2790 CostKind);
2791 }
2792 InstructionCost
getMulAccReductionCost(bool IsUnsigned,Type * ResTy,VectorType * Ty,TTI::TargetCostKind CostKind)2793 getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty,
2794 TTI::TargetCostKind CostKind) override {
2795 return Impl.getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind);
2796 }
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)2797 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2798 TTI::TargetCostKind CostKind) override {
2799 return Impl.getIntrinsicInstrCost(ICA, CostKind);
2800 }
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys,TTI::TargetCostKind CostKind)2801 InstructionCost getCallInstrCost(Function *F, Type *RetTy,
2802 ArrayRef<Type *> Tys,
2803 TTI::TargetCostKind CostKind) override {
2804 return Impl.getCallInstrCost(F, RetTy, Tys, CostKind);
2805 }
getNumberOfParts(Type * Tp)2806 unsigned getNumberOfParts(Type *Tp) override {
2807 return Impl.getNumberOfParts(Tp);
2808 }
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)2809 InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
2810 const SCEV *Ptr) override {
2811 return Impl.getAddressComputationCost(Ty, SE, Ptr);
2812 }
getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys)2813 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
2814 return Impl.getCostOfKeepingLiveOverCall(Tys);
2815 }
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)2816 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
2817 MemIntrinsicInfo &Info) override {
2818 return Impl.getTgtMemIntrinsic(Inst, Info);
2819 }
getAtomicMemIntrinsicMaxElementSize()2820 unsigned getAtomicMemIntrinsicMaxElementSize() const override {
2821 return Impl.getAtomicMemIntrinsicMaxElementSize();
2822 }
getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType)2823 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2824 Type *ExpectedType) override {
2825 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
2826 }
getMemcpyLoopLoweringType(LLVMContext & Context,Value * Length,unsigned SrcAddrSpace,unsigned DestAddrSpace,Align SrcAlign,Align DestAlign,std::optional<uint32_t> AtomicElementSize)2827 Type *getMemcpyLoopLoweringType(
2828 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
2829 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
2830 std::optional<uint32_t> AtomicElementSize) const override {
2831 return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
2832 DestAddrSpace, SrcAlign, DestAlign,
2833 AtomicElementSize);
2834 }
getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type * > & OpsOut,LLVMContext & Context,unsigned RemainingBytes,unsigned SrcAddrSpace,unsigned DestAddrSpace,Align SrcAlign,Align DestAlign,std::optional<uint32_t> AtomicCpySize)2835 void getMemcpyLoopResidualLoweringType(
2836 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
2837 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
2838 Align SrcAlign, Align DestAlign,
2839 std::optional<uint32_t> AtomicCpySize) const override {
2840 Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
2841 SrcAddrSpace, DestAddrSpace,
2842 SrcAlign, DestAlign, AtomicCpySize);
2843 }
areInlineCompatible(const Function * Caller,const Function * Callee)2844 bool areInlineCompatible(const Function *Caller,
2845 const Function *Callee) const override {
2846 return Impl.areInlineCompatible(Caller, Callee);
2847 }
getInlineCallPenalty(const Function * F,const CallBase & Call,unsigned DefaultCallPenalty)2848 unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
2849 unsigned DefaultCallPenalty) const override {
2850 return Impl.getInlineCallPenalty(F, Call, DefaultCallPenalty);
2851 }
areTypesABICompatible(const Function * Caller,const Function * Callee,const ArrayRef<Type * > & Types)2852 bool areTypesABICompatible(const Function *Caller, const Function *Callee,
2853 const ArrayRef<Type *> &Types) const override {
2854 return Impl.areTypesABICompatible(Caller, Callee, Types);
2855 }
isIndexedLoadLegal(MemIndexedMode Mode,Type * Ty)2856 bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
2857 return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
2858 }
isIndexedStoreLegal(MemIndexedMode Mode,Type * Ty)2859 bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
2860 return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
2861 }
getLoadStoreVecRegBitWidth(unsigned AddrSpace)2862 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
2863 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
2864 }
isLegalToVectorizeLoad(LoadInst * LI)2865 bool isLegalToVectorizeLoad(LoadInst *LI) const override {
2866 return Impl.isLegalToVectorizeLoad(LI);
2867 }
isLegalToVectorizeStore(StoreInst * SI)2868 bool isLegalToVectorizeStore(StoreInst *SI) const override {
2869 return Impl.isLegalToVectorizeStore(SI);
2870 }
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace)2871 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
2872 unsigned AddrSpace) const override {
2873 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
2874 AddrSpace);
2875 }
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace)2876 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
2877 unsigned AddrSpace) const override {
2878 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
2879 AddrSpace);
2880 }
isLegalToVectorizeReduction(const RecurrenceDescriptor & RdxDesc,ElementCount VF)2881 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
2882 ElementCount VF) const override {
2883 return Impl.isLegalToVectorizeReduction(RdxDesc, VF);
2884 }
isElementTypeLegalForScalableVector(Type * Ty)2885 bool isElementTypeLegalForScalableVector(Type *Ty) const override {
2886 return Impl.isElementTypeLegalForScalableVector(Ty);
2887 }
getLoadVectorFactor(unsigned VF,unsigned LoadSize,unsigned ChainSizeInBytes,VectorType * VecTy)2888 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
2889 unsigned ChainSizeInBytes,
2890 VectorType *VecTy) const override {
2891 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
2892 }
getStoreVectorFactor(unsigned VF,unsigned StoreSize,unsigned ChainSizeInBytes,VectorType * VecTy)2893 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
2894 unsigned ChainSizeInBytes,
2895 VectorType *VecTy) const override {
2896 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
2897 }
preferFixedOverScalableIfEqualCost()2898 bool preferFixedOverScalableIfEqualCost() const override {
2899 return Impl.preferFixedOverScalableIfEqualCost();
2900 }
preferInLoopReduction(unsigned Opcode,Type * Ty,ReductionFlags Flags)2901 bool preferInLoopReduction(unsigned Opcode, Type *Ty,
2902 ReductionFlags Flags) const override {
2903 return Impl.preferInLoopReduction(Opcode, Ty, Flags);
2904 }
preferPredicatedReductionSelect(unsigned Opcode,Type * Ty,ReductionFlags Flags)2905 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
2906 ReductionFlags Flags) const override {
2907 return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
2908 }
preferEpilogueVectorization()2909 bool preferEpilogueVectorization() const override {
2910 return Impl.preferEpilogueVectorization();
2911 }
2912
shouldExpandReduction(const IntrinsicInst * II)2913 bool shouldExpandReduction(const IntrinsicInst *II) const override {
2914 return Impl.shouldExpandReduction(II);
2915 }
2916
2917 ReductionShuffle
getPreferredExpandedReductionShuffle(const IntrinsicInst * II)2918 getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const override {
2919 return Impl.getPreferredExpandedReductionShuffle(II);
2920 }
2921
getGISelRematGlobalCost()2922 unsigned getGISelRematGlobalCost() const override {
2923 return Impl.getGISelRematGlobalCost();
2924 }
2925
getMinTripCountTailFoldingThreshold()2926 unsigned getMinTripCountTailFoldingThreshold() const override {
2927 return Impl.getMinTripCountTailFoldingThreshold();
2928 }
2929
supportsScalableVectors()2930 bool supportsScalableVectors() const override {
2931 return Impl.supportsScalableVectors();
2932 }
2933
enableScalableVectorization()2934 bool enableScalableVectorization() const override {
2935 return Impl.enableScalableVectorization();
2936 }
2937
hasActiveVectorLength(unsigned Opcode,Type * DataType,Align Alignment)2938 bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
2939 Align Alignment) const override {
2940 return Impl.hasActiveVectorLength(Opcode, DataType, Alignment);
2941 }
2942
2943 VPLegalization
getVPLegalizationStrategy(const VPIntrinsic & PI)2944 getVPLegalizationStrategy(const VPIntrinsic &PI) const override {
2945 return Impl.getVPLegalizationStrategy(PI);
2946 }
2947
hasArmWideBranch(bool Thumb)2948 bool hasArmWideBranch(bool Thumb) const override {
2949 return Impl.hasArmWideBranch(Thumb);
2950 }
2951
getMaxNumArgs()2952 unsigned getMaxNumArgs() const override {
2953 return Impl.getMaxNumArgs();
2954 }
2955 };
2956
2957 template <typename T>
TargetTransformInfo(T Impl)2958 TargetTransformInfo::TargetTransformInfo(T Impl)
2959 : TTIImpl(new Model<T>(Impl)) {}
2960
2961 /// Analysis pass providing the \c TargetTransformInfo.
2962 ///
2963 /// The core idea of the TargetIRAnalysis is to expose an interface through
2964 /// which LLVM targets can analyze and provide information about the middle
2965 /// end's target-independent IR. This supports use cases such as target-aware
2966 /// cost modeling of IR constructs.
2967 ///
2968 /// This is a function analysis because much of the cost modeling for targets
2969 /// is done in a subtarget specific way and LLVM supports compiling different
2970 /// functions targeting different subtargets in order to support runtime
2971 /// dispatch according to the observed subtarget.
2972 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
2973 public:
2974 typedef TargetTransformInfo Result;
2975
2976 /// Default construct a target IR analysis.
2977 ///
2978 /// This will use the module's datalayout to construct a baseline
2979 /// conservative TTI result.
2980 TargetIRAnalysis();
2981
2982 /// Construct an IR analysis pass around a target-provide callback.
2983 ///
2984 /// The callback will be called with a particular function for which the TTI
2985 /// is needed and must return a TTI object for that function.
2986 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
2987
2988 // Value semantics. We spell out the constructors for MSVC.
TargetIRAnalysis(const TargetIRAnalysis & Arg)2989 TargetIRAnalysis(const TargetIRAnalysis &Arg)
2990 : TTICallback(Arg.TTICallback) {}
TargetIRAnalysis(TargetIRAnalysis && Arg)2991 TargetIRAnalysis(TargetIRAnalysis &&Arg)
2992 : TTICallback(std::move(Arg.TTICallback)) {}
2993 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
2994 TTICallback = RHS.TTICallback;
2995 return *this;
2996 }
2997 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
2998 TTICallback = std::move(RHS.TTICallback);
2999 return *this;
3000 }
3001
3002 Result run(const Function &F, FunctionAnalysisManager &);
3003
3004 private:
3005 friend AnalysisInfoMixin<TargetIRAnalysis>;
3006 static AnalysisKey Key;
3007
3008 /// The callback used to produce a result.
3009 ///
3010 /// We use a completely opaque callback so that targets can provide whatever
3011 /// mechanism they desire for constructing the TTI for a given function.
3012 ///
3013 /// FIXME: Should we really use std::function? It's relatively inefficient.
3014 /// It might be possible to arrange for even stateful callbacks to outlive
3015 /// the analysis and thus use a function_ref which would be lighter weight.
3016 /// This may also be less error prone as the callback is likely to reference
3017 /// the external TargetMachine, and that reference needs to never dangle.
3018 std::function<Result(const Function &)> TTICallback;
3019
3020 /// Helper function used as the callback in the default constructor.
3021 static Result getDefaultTTI(const Function &F);
3022 };
3023
3024 /// Wrapper pass for TargetTransformInfo.
3025 ///
3026 /// This pass can be constructed from a TTI object which it stores internally
3027 /// and is queried by passes.
3028 class TargetTransformInfoWrapperPass : public ImmutablePass {
3029 TargetIRAnalysis TIRA;
3030 std::optional<TargetTransformInfo> TTI;
3031
3032 virtual void anchor();
3033
3034 public:
3035 static char ID;
3036
3037 /// We must provide a default constructor for the pass but it should
3038 /// never be used.
3039 ///
3040 /// Use the constructor below or call one of the creation routines.
3041 TargetTransformInfoWrapperPass();
3042
3043 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
3044
3045 TargetTransformInfo &getTTI(const Function &F);
3046 };
3047
3048 /// Create an analysis pass wrapper around a TTI object.
3049 ///
3050 /// This analysis pass just holds the TTI instance and makes it available to
3051 /// clients.
3052 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
3053
3054 } // namespace llvm
3055
3056 #endif
3057