1 //===- HexagonSubtarget.cpp - Hexagon Subtarget Information ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the Hexagon specific subclass of TargetSubtarget.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "Hexagon.h"
15 #include "HexagonInstrInfo.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "MCTargetDesc/HexagonMCTargetDesc.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineOperand.h"
25 #include "llvm/CodeGen/MachineScheduler.h"
26 #include "llvm/CodeGen/ScheduleDAG.h"
27 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <map>
33
34 using namespace llvm;
35
36 #define DEBUG_TYPE "hexagon-subtarget"
37
38 #define GET_SUBTARGETINFO_CTOR
39 #define GET_SUBTARGETINFO_TARGET_DESC
40 #include "HexagonGenSubtargetInfo.inc"
41
42
43 static cl::opt<bool> EnableBSBSched("enable-bsb-sched",
44 cl::Hidden, cl::ZeroOrMore, cl::init(true));
45
46 static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched",
47 cl::Hidden, cl::ZeroOrMore, cl::init(false));
48
49 static cl::opt<bool> EnableDotCurSched("enable-cur-sched",
50 cl::Hidden, cl::ZeroOrMore, cl::init(true),
51 cl::desc("Enable the scheduler to generate .cur"));
52
53 static cl::opt<bool> DisableHexagonMISched("disable-hexagon-misched",
54 cl::Hidden, cl::ZeroOrMore, cl::init(false),
55 cl::desc("Disable Hexagon MI Scheduling"));
56
57 static cl::opt<bool> EnableSubregLiveness("hexagon-subreg-liveness",
58 cl::Hidden, cl::ZeroOrMore, cl::init(true),
59 cl::desc("Enable subregister liveness tracking for Hexagon"));
60
61 static cl::opt<bool> OverrideLongCalls("hexagon-long-calls",
62 cl::Hidden, cl::ZeroOrMore, cl::init(false),
63 cl::desc("If present, forces/disables the use of long calls"));
64
65 static cl::opt<bool> EnablePredicatedCalls("hexagon-pred-calls",
66 cl::Hidden, cl::ZeroOrMore, cl::init(false),
67 cl::desc("Consider calls to be predicable"));
68
69 static cl::opt<bool> SchedPredsCloser("sched-preds-closer",
70 cl::Hidden, cl::ZeroOrMore, cl::init(true));
71
72 static cl::opt<bool> SchedRetvalOptimization("sched-retval-optimization",
73 cl::Hidden, cl::ZeroOrMore, cl::init(true));
74
75 static cl::opt<bool> EnableCheckBankConflict("hexagon-check-bank-conflict",
76 cl::Hidden, cl::ZeroOrMore, cl::init(true),
77 cl::desc("Enable checking for cache bank conflicts"));
78
79
HexagonSubtarget(const Triple & TT,StringRef CPU,StringRef FS,const TargetMachine & TM)80 HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
81 StringRef FS, const TargetMachine &TM)
82 : HexagonGenSubtargetInfo(TT, CPU, FS), OptLevel(TM.getOptLevel()),
83 CPUString(Hexagon_MC::selectHexagonCPU(CPU)),
84 InstrInfo(initializeSubtargetDependencies(CPU, FS)),
85 RegInfo(getHwMode()), TLInfo(TM, *this),
86 InstrItins(getInstrItineraryForCPU(CPUString)) {
87 // Beware of the default constructor of InstrItineraryData: it will
88 // reset all members to 0.
89 assert(InstrItins.Itineraries != nullptr && "InstrItins not initialized");
90 }
91
92 HexagonSubtarget &
initializeSubtargetDependencies(StringRef CPU,StringRef FS)93 HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
94 static std::map<StringRef, Hexagon::ArchEnum> CpuTable{
95 {"generic", Hexagon::ArchEnum::V60},
96 {"hexagonv4", Hexagon::ArchEnum::V4},
97 {"hexagonv5", Hexagon::ArchEnum::V5},
98 {"hexagonv55", Hexagon::ArchEnum::V55},
99 {"hexagonv60", Hexagon::ArchEnum::V60},
100 {"hexagonv62", Hexagon::ArchEnum::V62},
101 {"hexagonv65", Hexagon::ArchEnum::V65},
102 };
103
104 auto FoundIt = CpuTable.find(CPUString);
105 if (FoundIt != CpuTable.end())
106 HexagonArchVersion = FoundIt->second;
107 else
108 llvm_unreachable("Unrecognized Hexagon processor version");
109
110 UseHVX128BOps = false;
111 UseHVX64BOps = false;
112 UseLongCalls = false;
113
114 UseBSBScheduling = hasV60Ops() && EnableBSBSched;
115
116 ParseSubtargetFeatures(CPUString, FS);
117
118 if (OverrideLongCalls.getPosition())
119 UseLongCalls = OverrideLongCalls;
120
121 FeatureBitset Features = getFeatureBits();
122 if (HexagonDisableDuplex)
123 setFeatureBits(Features.set(Hexagon::FeatureDuplex, false));
124 setFeatureBits(Hexagon_MC::completeHVXFeatures(Features));
125
126 return *this;
127 }
128
apply(ScheduleDAGInstrs * DAG)129 void HexagonSubtarget::UsrOverflowMutation::apply(ScheduleDAGInstrs *DAG) {
130 for (SUnit &SU : DAG->SUnits) {
131 if (!SU.isInstr())
132 continue;
133 SmallVector<SDep, 4> Erase;
134 for (auto &D : SU.Preds)
135 if (D.getKind() == SDep::Output && D.getReg() == Hexagon::USR_OVF)
136 Erase.push_back(D);
137 for (auto &E : Erase)
138 SU.removePred(E);
139 }
140 }
141
apply(ScheduleDAGInstrs * DAG)142 void HexagonSubtarget::HVXMemLatencyMutation::apply(ScheduleDAGInstrs *DAG) {
143 for (SUnit &SU : DAG->SUnits) {
144 // Update the latency of chain edges between v60 vector load or store
145 // instructions to be 1. These instruction cannot be scheduled in the
146 // same packet.
147 MachineInstr &MI1 = *SU.getInstr();
148 auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII);
149 bool IsStoreMI1 = MI1.mayStore();
150 bool IsLoadMI1 = MI1.mayLoad();
151 if (!QII->isHVXVec(MI1) || !(IsStoreMI1 || IsLoadMI1))
152 continue;
153 for (SDep &SI : SU.Succs) {
154 if (SI.getKind() != SDep::Order || SI.getLatency() != 0)
155 continue;
156 MachineInstr &MI2 = *SI.getSUnit()->getInstr();
157 if (!QII->isHVXVec(MI2))
158 continue;
159 if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) {
160 SI.setLatency(1);
161 SU.setHeightDirty();
162 // Change the dependence in the opposite direction too.
163 for (SDep &PI : SI.getSUnit()->Preds) {
164 if (PI.getSUnit() != &SU || PI.getKind() != SDep::Order)
165 continue;
166 PI.setLatency(1);
167 SI.getSUnit()->setDepthDirty();
168 }
169 }
170 }
171 }
172 }
173
174 // Check if a call and subsequent A2_tfrpi instructions should maintain
175 // scheduling affinity. We are looking for the TFRI to be consumed in
176 // the next instruction. This should help reduce the instances of
177 // double register pairs being allocated and scheduled before a call
178 // when not used until after the call. This situation is exacerbated
179 // by the fact that we allocate the pair from the callee saves list,
180 // leading to excess spills and restores.
shouldTFRICallBind(const HexagonInstrInfo & HII,const SUnit & Inst1,const SUnit & Inst2) const181 bool HexagonSubtarget::CallMutation::shouldTFRICallBind(
182 const HexagonInstrInfo &HII, const SUnit &Inst1,
183 const SUnit &Inst2) const {
184 if (Inst1.getInstr()->getOpcode() != Hexagon::A2_tfrpi)
185 return false;
186
187 // TypeXTYPE are 64 bit operations.
188 unsigned Type = HII.getType(*Inst2.getInstr());
189 return Type == HexagonII::TypeS_2op || Type == HexagonII::TypeS_3op ||
190 Type == HexagonII::TypeALU64 || Type == HexagonII::TypeM;
191 }
192
apply(ScheduleDAGInstrs * DAGInstrs)193 void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
194 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
195 SUnit* LastSequentialCall = nullptr;
196 // Map from virtual register to physical register from the copy.
197 DenseMap<unsigned, unsigned> VRegHoldingReg;
198 // Map from the physical register to the instruction that uses virtual
199 // register. This is used to create the barrier edge.
200 DenseMap<unsigned, SUnit *> LastVRegUse;
201 auto &TRI = *DAG->MF.getSubtarget().getRegisterInfo();
202 auto &HII = *DAG->MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
203
204 // Currently we only catch the situation when compare gets scheduled
205 // before preceding call.
206 for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) {
207 // Remember the call.
208 if (DAG->SUnits[su].getInstr()->isCall())
209 LastSequentialCall = &DAG->SUnits[su];
210 // Look for a compare that defines a predicate.
211 else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall)
212 DAG->addEdge(&DAG->SUnits[su], SDep(LastSequentialCall, SDep::Barrier));
213 // Look for call and tfri* instructions.
214 else if (SchedPredsCloser && LastSequentialCall && su > 1 && su < e-1 &&
215 shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1]))
216 DAG->addEdge(&DAG->SUnits[su], SDep(&DAG->SUnits[su-1], SDep::Barrier));
217 // Prevent redundant register copies due to reads and writes of physical
218 // registers. The original motivation for this was the code generated
219 // between two calls, which are caused both the return value and the
220 // argument for the next call being in %r0.
221 // Example:
222 // 1: <call1>
223 // 2: %vreg = COPY %r0
224 // 3: <use of %vreg>
225 // 4: %r0 = ...
226 // 5: <call2>
227 // The scheduler would often swap 3 and 4, so an additional register is
228 // needed. This code inserts a Barrier dependence between 3 & 4 to prevent
229 // this.
230 // The code below checks for all the physical registers, not just R0/D0/V0.
231 else if (SchedRetvalOptimization) {
232 const MachineInstr *MI = DAG->SUnits[su].getInstr();
233 if (MI->isCopy() &&
234 TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) {
235 // %vregX = COPY %r0
236 VRegHoldingReg[MI->getOperand(0).getReg()] = MI->getOperand(1).getReg();
237 LastVRegUse.erase(MI->getOperand(1).getReg());
238 } else {
239 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
240 const MachineOperand &MO = MI->getOperand(i);
241 if (!MO.isReg())
242 continue;
243 if (MO.isUse() && !MI->isCopy() &&
244 VRegHoldingReg.count(MO.getReg())) {
245 // <use of %vregX>
246 LastVRegUse[VRegHoldingReg[MO.getReg()]] = &DAG->SUnits[su];
247 } else if (MO.isDef() &&
248 TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
249 for (MCRegAliasIterator AI(MO.getReg(), &TRI, true); AI.isValid();
250 ++AI) {
251 if (LastVRegUse.count(*AI) &&
252 LastVRegUse[*AI] != &DAG->SUnits[su])
253 // %r0 = ...
254 DAG->addEdge(&DAG->SUnits[su], SDep(LastVRegUse[*AI], SDep::Barrier));
255 LastVRegUse.erase(*AI);
256 }
257 }
258 }
259 }
260 }
261 }
262 }
263
apply(ScheduleDAGInstrs * DAG)264 void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) {
265 if (!EnableCheckBankConflict)
266 return;
267
268 const auto &HII = static_cast<const HexagonInstrInfo&>(*DAG->TII);
269
270 // Create artificial edges between loads that could likely cause a bank
271 // conflict. Since such loads would normally not have any dependency
272 // between them, we cannot rely on existing edges.
273 for (unsigned i = 0, e = DAG->SUnits.size(); i != e; ++i) {
274 SUnit &S0 = DAG->SUnits[i];
275 MachineInstr &L0 = *S0.getInstr();
276 if (!L0.mayLoad() || L0.mayStore() ||
277 HII.getAddrMode(L0) != HexagonII::BaseImmOffset)
278 continue;
279 int Offset0;
280 unsigned Size0;
281 unsigned Base0 = HII.getBaseAndOffset(L0, Offset0, Size0);
282 // Is the access size is longer than the L1 cache line, skip the check.
283 if (Base0 == 0 || Size0 >= 32)
284 continue;
285 // Scan only up to 32 instructions ahead (to avoid n^2 complexity).
286 for (unsigned j = i+1, m = std::min(i+32, e); j != m; ++j) {
287 SUnit &S1 = DAG->SUnits[j];
288 MachineInstr &L1 = *S1.getInstr();
289 if (!L1.mayLoad() || L1.mayStore() ||
290 HII.getAddrMode(L1) != HexagonII::BaseImmOffset)
291 continue;
292 int Offset1;
293 unsigned Size1;
294 unsigned Base1 = HII.getBaseAndOffset(L1, Offset1, Size1);
295 if (Base1 == 0 || Size1 >= 32 || Base0 != Base1)
296 continue;
297 // Check bits 3 and 4 of the offset: if they differ, a bank conflict
298 // is unlikely.
299 if (((Offset0 ^ Offset1) & 0x18) != 0)
300 continue;
301 // Bits 3 and 4 are the same, add an artificial edge and set extra
302 // latency.
303 SDep A(&S0, SDep::Artificial);
304 A.setLatency(1);
305 S1.addPred(A, true);
306 }
307 }
308 }
309
310 /// Enable use of alias analysis during code generation (during MI
311 /// scheduling, DAGCombine, etc.).
useAA() const312 bool HexagonSubtarget::useAA() const {
313 if (OptLevel != CodeGenOpt::None)
314 return true;
315 return false;
316 }
317
318 /// Perform target specific adjustments to the latency of a schedule
319 /// dependency.
adjustSchedDependency(SUnit * Src,SUnit * Dst,SDep & Dep) const320 void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
321 SDep &Dep) const {
322 MachineInstr *SrcInst = Src->getInstr();
323 MachineInstr *DstInst = Dst->getInstr();
324 if (!Src->isInstr() || !Dst->isInstr())
325 return;
326
327 const HexagonInstrInfo *QII = getInstrInfo();
328
329 // Instructions with .new operands have zero latency.
330 SmallSet<SUnit *, 4> ExclSrc;
331 SmallSet<SUnit *, 4> ExclDst;
332 if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
333 isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
334 Dep.setLatency(0);
335 return;
336 }
337
338 if (!hasV60Ops())
339 return;
340
341 // Set the latency for a copy to zero since we hope that is will get removed.
342 if (DstInst->isCopy())
343 Dep.setLatency(0);
344
345 // If it's a REG_SEQUENCE/COPY, use its destination instruction to determine
346 // the correct latency.
347 if ((DstInst->isRegSequence() || DstInst->isCopy()) && Dst->NumSuccs == 1) {
348 unsigned DReg = DstInst->getOperand(0).getReg();
349 MachineInstr *DDst = Dst->Succs[0].getSUnit()->getInstr();
350 unsigned UseIdx = -1;
351 for (unsigned OpNum = 0; OpNum < DDst->getNumOperands(); OpNum++) {
352 const MachineOperand &MO = DDst->getOperand(OpNum);
353 if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == DReg) {
354 UseIdx = OpNum;
355 break;
356 }
357 }
358 int DLatency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst,
359 0, *DDst, UseIdx));
360 DLatency = std::max(DLatency, 0);
361 Dep.setLatency((unsigned)DLatency);
362 }
363
364 // Try to schedule uses near definitions to generate .cur.
365 ExclSrc.clear();
366 ExclDst.clear();
367 if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
368 isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
369 Dep.setLatency(0);
370 return;
371 }
372
373 updateLatency(*SrcInst, *DstInst, Dep);
374 }
375
getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>> & Mutations) const376 void HexagonSubtarget::getPostRAMutations(
377 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
378 Mutations.push_back(llvm::make_unique<UsrOverflowMutation>());
379 Mutations.push_back(llvm::make_unique<HVXMemLatencyMutation>());
380 Mutations.push_back(llvm::make_unique<BankConflictMutation>());
381 }
382
getSMSMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>> & Mutations) const383 void HexagonSubtarget::getSMSMutations(
384 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
385 Mutations.push_back(llvm::make_unique<UsrOverflowMutation>());
386 Mutations.push_back(llvm::make_unique<HVXMemLatencyMutation>());
387 }
388
389 // Pin the vtable to this file.
anchor()390 void HexagonSubtarget::anchor() {}
391
enableMachineScheduler() const392 bool HexagonSubtarget::enableMachineScheduler() const {
393 if (DisableHexagonMISched.getNumOccurrences())
394 return !DisableHexagonMISched;
395 return true;
396 }
397
usePredicatedCalls() const398 bool HexagonSubtarget::usePredicatedCalls() const {
399 return EnablePredicatedCalls;
400 }
401
updateLatency(MachineInstr & SrcInst,MachineInstr & DstInst,SDep & Dep) const402 void HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
403 MachineInstr &DstInst, SDep &Dep) const {
404 if (Dep.isArtificial()) {
405 Dep.setLatency(1);
406 return;
407 }
408
409 if (!hasV60Ops())
410 return;
411
412 auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
413
414 // BSB scheduling.
415 if (QII.isHVXVec(SrcInst) || useBSBScheduling())
416 Dep.setLatency((Dep.getLatency() + 1) >> 1);
417 }
418
restoreLatency(SUnit * Src,SUnit * Dst) const419 void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const {
420 MachineInstr *SrcI = Src->getInstr();
421 for (auto &I : Src->Succs) {
422 if (!I.isAssignedRegDep() || I.getSUnit() != Dst)
423 continue;
424 unsigned DepR = I.getReg();
425 int DefIdx = -1;
426 for (unsigned OpNum = 0; OpNum < SrcI->getNumOperands(); OpNum++) {
427 const MachineOperand &MO = SrcI->getOperand(OpNum);
428 if (MO.isReg() && MO.isDef() && MO.getReg() == DepR)
429 DefIdx = OpNum;
430 }
431 assert(DefIdx >= 0 && "Def Reg not found in Src MI");
432 MachineInstr *DstI = Dst->getInstr();
433 SDep T = I;
434 for (unsigned OpNum = 0; OpNum < DstI->getNumOperands(); OpNum++) {
435 const MachineOperand &MO = DstI->getOperand(OpNum);
436 if (MO.isReg() && MO.isUse() && MO.getReg() == DepR) {
437 int Latency = (InstrInfo.getOperandLatency(&InstrItins, *SrcI,
438 DefIdx, *DstI, OpNum));
439
440 // For some instructions (ex: COPY), we might end up with < 0 latency
441 // as they don't have any Itinerary class associated with them.
442 Latency = std::max(Latency, 0);
443
444 I.setLatency(Latency);
445 updateLatency(*SrcI, *DstI, I);
446 }
447 }
448
449 // Update the latency of opposite edge too.
450 T.setSUnit(Src);
451 auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T);
452 assert(F != Dst->Preds.end());
453 F->setLatency(I.getLatency());
454 }
455 }
456
457 /// Change the latency between the two SUnits.
changeLatency(SUnit * Src,SUnit * Dst,unsigned Lat) const458 void HexagonSubtarget::changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat)
459 const {
460 for (auto &I : Src->Succs) {
461 if (!I.isAssignedRegDep() || I.getSUnit() != Dst)
462 continue;
463 SDep T = I;
464 I.setLatency(Lat);
465
466 // Update the latency of opposite edge too.
467 T.setSUnit(Src);
468 auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T);
469 assert(F != Dst->Preds.end());
470 F->setLatency(Lat);
471 }
472 }
473
474 /// If the SUnit has a zero latency edge, return the other SUnit.
getZeroLatency(SUnit * N,SmallVector<SDep,4> & Deps)475 static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
476 for (auto &I : Deps)
477 if (I.isAssignedRegDep() && I.getLatency() == 0 &&
478 !I.getSUnit()->getInstr()->isPseudo())
479 return I.getSUnit();
480 return nullptr;
481 }
482
483 // Return true if these are the best two instructions to schedule
484 // together with a zero latency. Only one dependence should have a zero
485 // latency. If there are multiple choices, choose the best, and change
486 // the others, if needed.
isBestZeroLatency(SUnit * Src,SUnit * Dst,const HexagonInstrInfo * TII,SmallSet<SUnit *,4> & ExclSrc,SmallSet<SUnit *,4> & ExclDst) const487 bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
488 const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc,
489 SmallSet<SUnit*, 4> &ExclDst) const {
490 MachineInstr &SrcInst = *Src->getInstr();
491 MachineInstr &DstInst = *Dst->getInstr();
492
493 // Ignore Boundary SU nodes as these have null instructions.
494 if (Dst->isBoundaryNode())
495 return false;
496
497 if (SrcInst.isPHI() || DstInst.isPHI())
498 return false;
499
500 if (!TII->isToBeScheduledASAP(SrcInst, DstInst) &&
501 !TII->canExecuteInBundle(SrcInst, DstInst))
502 return false;
503
504 // The architecture doesn't allow three dependent instructions in the same
505 // packet. So, if the destination has a zero latency successor, then it's
506 // not a candidate for a zero latency predecessor.
507 if (getZeroLatency(Dst, Dst->Succs) != nullptr)
508 return false;
509
510 // Check if the Dst instruction is the best candidate first.
511 SUnit *Best = nullptr;
512 SUnit *DstBest = nullptr;
513 SUnit *SrcBest = getZeroLatency(Dst, Dst->Preds);
514 if (SrcBest == nullptr || Src->NodeNum >= SrcBest->NodeNum) {
515 // Check that Src doesn't have a better candidate.
516 DstBest = getZeroLatency(Src, Src->Succs);
517 if (DstBest == nullptr || Dst->NodeNum <= DstBest->NodeNum)
518 Best = Dst;
519 }
520 if (Best != Dst)
521 return false;
522
523 // The caller frequently adds the same dependence twice. If so, then
524 // return true for this case too.
525 if ((Src == SrcBest && Dst == DstBest ) ||
526 (SrcBest == nullptr && Dst == DstBest) ||
527 (Src == SrcBest && Dst == nullptr))
528 return true;
529
530 // Reassign the latency for the previous bests, which requires setting
531 // the dependence edge in both directions.
532 if (SrcBest != nullptr) {
533 if (!hasV60Ops())
534 changeLatency(SrcBest, Dst, 1);
535 else
536 restoreLatency(SrcBest, Dst);
537 }
538 if (DstBest != nullptr) {
539 if (!hasV60Ops())
540 changeLatency(Src, DstBest, 1);
541 else
542 restoreLatency(Src, DstBest);
543 }
544
545 // Attempt to find another opprotunity for zero latency in a different
546 // dependence.
547 if (SrcBest && DstBest)
548 // If there is an edge from SrcBest to DstBst, then try to change that
549 // to 0 now.
550 changeLatency(SrcBest, DstBest, 0);
551 else if (DstBest) {
552 // Check if the previous best destination instruction has a new zero
553 // latency dependence opportunity.
554 ExclSrc.insert(Src);
555 for (auto &I : DstBest->Preds)
556 if (ExclSrc.count(I.getSUnit()) == 0 &&
557 isBestZeroLatency(I.getSUnit(), DstBest, TII, ExclSrc, ExclDst))
558 changeLatency(I.getSUnit(), DstBest, 0);
559 } else if (SrcBest) {
560 // Check if previous best source instruction has a new zero latency
561 // dependence opportunity.
562 ExclDst.insert(Dst);
563 for (auto &I : SrcBest->Succs)
564 if (ExclDst.count(I.getSUnit()) == 0 &&
565 isBestZeroLatency(SrcBest, I.getSUnit(), TII, ExclSrc, ExclDst))
566 changeLatency(SrcBest, I.getSUnit(), 0);
567 }
568
569 return true;
570 }
571
getL1CacheLineSize() const572 unsigned HexagonSubtarget::getL1CacheLineSize() const {
573 return 32;
574 }
575
getL1PrefetchDistance() const576 unsigned HexagonSubtarget::getL1PrefetchDistance() const {
577 return 32;
578 }
579
enableSubRegLiveness() const580 bool HexagonSubtarget::enableSubRegLiveness() const {
581 return EnableSubregLiveness;
582 }
583