1 //===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines a wrapper around MCSchedModel that allows the interface to 11 // benefit from information currently only available in TargetInstrInfo. 12 // Ideally, the scheduling interface would be fully defined in the MC layer. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_CODEGEN_TARGETSCHEDULE_H 17 #define LLVM_CODEGEN_TARGETSCHEDULE_H 18 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/MC/MCInstrItineraries.h" 21 #include "llvm/MC/MCSchedule.h" 22 #include "llvm/Target/TargetSubtargetInfo.h" 23 24 namespace llvm { 25 26 class TargetRegisterInfo; 27 class TargetSubtargetInfo; 28 class TargetInstrInfo; 29 class MachineInstr; 30 31 /// Provide an instruction scheduling machine model to CodeGen passes. 32 class TargetSchedModel { 33 // For efficiency, hold a copy of the statically defined MCSchedModel for this 34 // processor. 35 MCSchedModel SchedModel; 36 InstrItineraryData InstrItins; 37 const TargetSubtargetInfo *STI; 38 const TargetInstrInfo *TII; 39 40 SmallVector<unsigned, 16> ResourceFactors; 41 unsigned MicroOpFactor; // Multiply to normalize microops to resource units. 42 unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor. 43 44 unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const; 45 46 public: TargetSchedModel()47 TargetSchedModel(): SchedModel(MCSchedModel::GetDefaultSchedModel()), STI(nullptr), TII(nullptr) {} 48 49 /// \brief Initialize the machine model for instruction scheduling. 50 /// 51 /// The machine model API keeps a copy of the top-level MCSchedModel table 52 /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve 53 /// dynamic properties. 54 void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti, 55 const TargetInstrInfo *tii); 56 57 /// Return the MCSchedClassDesc for this instruction. 58 const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const; 59 60 /// \brief TargetInstrInfo getter. getInstrInfo()61 const TargetInstrInfo *getInstrInfo() const { return TII; } 62 63 /// \brief Return true if this machine model includes an instruction-level 64 /// scheduling model. 65 /// 66 /// This is more detailed than the course grain IssueWidth and default 67 /// latency properties, but separate from the per-cycle itinerary data. 68 bool hasInstrSchedModel() const; 69 getMCSchedModel()70 const MCSchedModel *getMCSchedModel() const { return &SchedModel; } 71 72 /// \brief Return true if this machine model includes cycle-to-cycle itinerary 73 /// data. 74 /// 75 /// This models scheduling at each stage in the processor pipeline. 76 bool hasInstrItineraries() const; 77 getInstrItineraries()78 const InstrItineraryData *getInstrItineraries() const { 79 if (hasInstrItineraries()) 80 return &InstrItins; 81 return nullptr; 82 } 83 84 /// \brief Return true if this machine model includes an instruction-level 85 /// scheduling model or cycle-to-cycle itinerary data. hasInstrSchedModelOrItineraries()86 bool hasInstrSchedModelOrItineraries() const { 87 return hasInstrSchedModel() || hasInstrItineraries(); 88 } 89 90 /// \brief Identify the processor corresponding to the current subtarget. getProcessorID()91 unsigned getProcessorID() const { return SchedModel.getProcessorID(); } 92 93 /// \brief Maximum number of micro-ops that may be scheduled per cycle. getIssueWidth()94 unsigned getIssueWidth() const { return SchedModel.IssueWidth; } 95 96 /// \brief Return the number of issue slots required for this MI. 97 unsigned getNumMicroOps(const MachineInstr *MI, 98 const MCSchedClassDesc *SC = nullptr) const; 99 100 /// \brief Get the number of kinds of resources for this target. getNumProcResourceKinds()101 unsigned getNumProcResourceKinds() const { 102 return SchedModel.getNumProcResourceKinds(); 103 } 104 105 /// \brief Get a processor resource by ID for convenience. getProcResource(unsigned PIdx)106 const MCProcResourceDesc *getProcResource(unsigned PIdx) const { 107 return SchedModel.getProcResource(PIdx); 108 } 109 110 #ifndef NDEBUG getResourceName(unsigned PIdx)111 const char *getResourceName(unsigned PIdx) const { 112 if (!PIdx) 113 return "MOps"; 114 return SchedModel.getProcResource(PIdx)->Name; 115 } 116 #endif 117 118 typedef const MCWriteProcResEntry *ProcResIter; 119 120 // \brief Get an iterator into the processor resources consumed by this 121 // scheduling class. getWriteProcResBegin(const MCSchedClassDesc * SC)122 ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const { 123 // The subtarget holds a single resource table for all processors. 124 return STI->getWriteProcResBegin(SC); 125 } getWriteProcResEnd(const MCSchedClassDesc * SC)126 ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const { 127 return STI->getWriteProcResEnd(SC); 128 } 129 130 /// \brief Multiply the number of units consumed for a resource by this factor 131 /// to normalize it relative to other resources. getResourceFactor(unsigned ResIdx)132 unsigned getResourceFactor(unsigned ResIdx) const { 133 return ResourceFactors[ResIdx]; 134 } 135 136 /// \brief Multiply number of micro-ops by this factor to normalize it 137 /// relative to other resources. getMicroOpFactor()138 unsigned getMicroOpFactor() const { 139 return MicroOpFactor; 140 } 141 142 /// \brief Multiply cycle count by this factor to normalize it relative to 143 /// other resources. This is the number of resource units per cycle. getLatencyFactor()144 unsigned getLatencyFactor() const { 145 return ResourceLCM; 146 } 147 148 /// \brief Number of micro-ops that may be buffered for OOO execution. getMicroOpBufferSize()149 unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; } 150 151 /// \brief Number of resource units that may be buffered for OOO execution. 152 /// \return The buffer size in resource units or -1 for unlimited. getResourceBufferSize(unsigned PIdx)153 int getResourceBufferSize(unsigned PIdx) const { 154 return SchedModel.getProcResource(PIdx)->BufferSize; 155 } 156 157 /// \brief Compute operand latency based on the available machine model. 158 /// 159 /// Compute and return the latency of the given data dependent def and use 160 /// when the operand indices are already known. UseMI may be NULL for an 161 /// unknown user. 162 unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, 163 const MachineInstr *UseMI, unsigned UseOperIdx) 164 const; 165 166 /// \brief Compute the instruction latency based on the available machine 167 /// model. 168 /// 169 /// Compute and return the expected latency of this instruction independent of 170 /// a particular use. computeOperandLatency is the preferred API, but this is 171 /// occasionally useful to help estimate instruction cost. 172 /// 173 /// If UseDefaultDefLatency is false and no new machine sched model is 174 /// present this method falls back to TII->getInstrLatency with an empty 175 /// instruction itinerary (this is so we preserve the previous behavior of the 176 /// if converter after moving it to TargetSchedModel). 177 unsigned computeInstrLatency(const MachineInstr *MI, 178 bool UseDefaultDefLatency = true) const; 179 unsigned computeInstrLatency(unsigned Opcode) const; 180 181 /// \brief Output dependency latency of a pair of defs of the same register. 182 /// 183 /// This is typically one cycle. 184 unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx, 185 const MachineInstr *DepMI) const; 186 }; 187 188 } // namespace llvm 189 190 #endif 191