• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===//
2 //
3 //                        The Subzero Code Generator
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// \brief Implements the skeleton of the TargetLowering class.
12 ///
13 /// Specifically this invokes the appropriate lowering method for a given
14 /// instruction kind and driving global register allocation. It also implements
15 /// the non-deleted instruction iteration in LoweringContext.
16 ///
17 //===----------------------------------------------------------------------===//
18 
19 #include "IceTargetLowering.h"
20 
21 #include "IceBitVector.h"
22 #include "IceCfg.h" // setError()
23 #include "IceCfgNode.h"
24 #include "IceGlobalContext.h"
25 #include "IceGlobalInits.h"
26 #include "IceInstVarIter.h"
27 #include "IceLiveness.h"
28 #include "IceOperand.h"
29 #include "IceRegAlloc.h"
30 
31 #include <string>
32 #include <vector>
33 
34 #define TARGET_LOWERING_CLASS_FOR(t) Target_##t
35 
36 // We prevent target-specific implementation details from leaking outside their
37 // implementations by forbidding #include of target-specific header files
38 // anywhere outside their own files. To create target-specific objects
39 // (TargetLowering, TargetDataLowering, and TargetHeaderLowering) we use the
40 // following named constructors. For reference, each target Foo needs to
41 // implement the following named constructors and initializer:
42 //
43 // namespace Foo {
44 //   unique_ptr<Ice::TargetLowering> createTargetLowering(Ice::Cfg *);
45 //   unique_ptr<Ice::TargetDataLowering>
46 //       createTargetDataLowering(Ice::GlobalContext*);
47 //   unique_ptr<Ice::TargetHeaderLowering>
48 //       createTargetHeaderLowering(Ice::GlobalContext *);
49 //   void staticInit(::Ice::GlobalContext *);
50 // }
51 #define SUBZERO_TARGET(X)                                                      \
52   namespace X {                                                                \
53   std::unique_ptr<::Ice::TargetLowering>                                       \
54   createTargetLowering(::Ice::Cfg *Func);                                      \
55   std::unique_ptr<::Ice::TargetDataLowering>                                   \
56   createTargetDataLowering(::Ice::GlobalContext *Ctx);                         \
57   std::unique_ptr<::Ice::TargetHeaderLowering>                                 \
58   createTargetHeaderLowering(::Ice::GlobalContext *Ctx);                       \
59   void staticInit(::Ice::GlobalContext *Ctx);                                  \
60   bool shouldBePooled(const ::Ice::Constant *C);                               \
61   ::Ice::Type getPointerType();                                                \
62   } // end of namespace X
63 #include "SZTargets.def"
64 #undef SUBZERO_TARGET
65 
66 namespace Ice {
init(CfgNode * N)67 void LoweringContext::init(CfgNode *N) {
68   Node = N;
69   End = getNode()->getInsts().end();
70   rewind();
71   advanceForward(Next);
72 }
73 
rewind()74 void LoweringContext::rewind() {
75   Begin = getNode()->getInsts().begin();
76   Cur = Begin;
77   skipDeleted(Cur);
78   Next = Cur;
79   availabilityReset();
80 }
81 
insert(Inst * Instr)82 void LoweringContext::insert(Inst *Instr) {
83   getNode()->getInsts().insert(Next, Instr);
84   LastInserted = Instr;
85 }
86 
skipDeleted(InstList::iterator & I) const87 void LoweringContext::skipDeleted(InstList::iterator &I) const {
88   while (I != End && I->isDeleted())
89     ++I;
90 }
91 
advanceForward(InstList::iterator & I) const92 void LoweringContext::advanceForward(InstList::iterator &I) const {
93   if (I != End) {
94     ++I;
95     skipDeleted(I);
96   }
97 }
98 
getLastInserted() const99 Inst *LoweringContext::getLastInserted() const {
100   assert(LastInserted);
101   return LastInserted;
102 }
103 
availabilityReset()104 void LoweringContext::availabilityReset() {
105   LastDest = nullptr;
106   LastSrc = nullptr;
107 }
108 
availabilityUpdate()109 void LoweringContext::availabilityUpdate() {
110   availabilityReset();
111   Inst *Instr = LastInserted;
112   if (Instr == nullptr)
113     return;
114   if (!Instr->isVarAssign())
115     return;
116   // Since isVarAssign() is true, the source operand must be a Variable.
117   LastDest = Instr->getDest();
118   LastSrc = llvm::cast<Variable>(Instr->getSrc(0));
119 }
120 
availabilityGet(Operand * Src) const121 Variable *LoweringContext::availabilityGet(Operand *Src) const {
122   assert(Src);
123   if (Src == LastDest)
124     return LastSrc;
125   return nullptr;
126 }
127 
128 namespace {
129 
printRegisterSet(Ostream & Str,const SmallBitVector & Bitset,std::function<std::string (RegNumT)> getRegName,const std::string & LineIndentString)130 void printRegisterSet(Ostream &Str, const SmallBitVector &Bitset,
131                       std::function<std::string(RegNumT)> getRegName,
132                       const std::string &LineIndentString) {
133   constexpr size_t RegistersPerLine = 16;
134   size_t Count = 0;
135   for (RegNumT RegNum : RegNumBVIter(Bitset)) {
136     if (Count == 0) {
137       Str << LineIndentString;
138     } else {
139       Str << ",";
140     }
141     if (Count > 0 && Count % RegistersPerLine == 0)
142       Str << "\n" << LineIndentString;
143     ++Count;
144     Str << getRegName(RegNum);
145   }
146   if (Count)
147     Str << "\n";
148 }
149 
150 // Splits "<class>:<reg>" into "<class>" plus "<reg>".  If there is no <class>
151 // component, the result is "" plus "<reg>".
splitToClassAndName(const std::string & RegName,std::string * SplitRegClass,std::string * SplitRegName)152 void splitToClassAndName(const std::string &RegName, std::string *SplitRegClass,
153                          std::string *SplitRegName) {
154   constexpr const char Separator[] = ":";
155   constexpr size_t SeparatorWidth = llvm::array_lengthof(Separator) - 1;
156   size_t Pos = RegName.find(Separator);
157   if (Pos == std::string::npos) {
158     *SplitRegClass = "";
159     *SplitRegName = RegName;
160   } else {
161     *SplitRegClass = RegName.substr(0, Pos);
162     *SplitRegName = RegName.substr(Pos + SeparatorWidth);
163   }
164 }
165 
badTargetFatalError(TargetArch Target)166 LLVM_ATTRIBUTE_NORETURN void badTargetFatalError(TargetArch Target) {
167   llvm::report_fatal_error("Unsupported target: " +
168                            std::string(targetArchString(Target)));
169 }
170 
171 } // end of anonymous namespace
172 
filterTypeToRegisterSet(GlobalContext * Ctx,int32_t NumRegs,SmallBitVector TypeToRegisterSet[],size_t TypeToRegisterSetSize,std::function<std::string (RegNumT)> getRegName,std::function<const char * (RegClass)> getRegClassName)173 void TargetLowering::filterTypeToRegisterSet(
174     GlobalContext *Ctx, int32_t NumRegs, SmallBitVector TypeToRegisterSet[],
175     size_t TypeToRegisterSetSize,
176     std::function<std::string(RegNumT)> getRegName,
177     std::function<const char *(RegClass)> getRegClassName) {
178   std::vector<SmallBitVector> UseSet(TypeToRegisterSetSize,
179                                      SmallBitVector(NumRegs));
180   std::vector<SmallBitVector> ExcludeSet(TypeToRegisterSetSize,
181                                          SmallBitVector(NumRegs));
182 
183   std::unordered_map<std::string, RegNumT> RegNameToIndex;
184   for (int32_t RegIndex = 0; RegIndex < NumRegs; ++RegIndex) {
185     const auto RegNum = RegNumT::fromInt(RegIndex);
186     RegNameToIndex[getRegName(RegNum)] = RegNum;
187   }
188 
189   std::vector<std::string> BadRegNames;
190 
191   // The processRegList function iterates across the RegNames vector.  Each
192   // entry in the vector is a string of the form "<reg>" or "<class>:<reg>".
193   // The register class and register number are computed, and the corresponding
194   // bit is set in RegSet[][].  If "<class>:" is missing, then the bit is set
195   // for all classes.
196   auto processRegList = [&](const std::vector<std::string> &RegNames,
197                             std::vector<SmallBitVector> &RegSet) {
198     for (const std::string &RegClassAndName : RegNames) {
199       std::string RClass;
200       std::string RName;
201       splitToClassAndName(RegClassAndName, &RClass, &RName);
202       if (!RegNameToIndex.count(RName)) {
203         BadRegNames.push_back(RName);
204         continue;
205       }
206       const int32_t RegIndex = RegNameToIndex.at(RName);
207       for (SizeT TypeIndex = 0; TypeIndex < TypeToRegisterSetSize;
208            ++TypeIndex) {
209         if (RClass.empty() ||
210             RClass == getRegClassName(static_cast<RegClass>(TypeIndex))) {
211           RegSet[TypeIndex][RegIndex] = TypeToRegisterSet[TypeIndex][RegIndex];
212         }
213       }
214     }
215   };
216 
217   processRegList(getFlags().getUseRestrictedRegisters(), UseSet);
218   processRegList(getFlags().getExcludedRegisters(), ExcludeSet);
219 
220   if (!BadRegNames.empty()) {
221     std::string Buffer;
222     llvm::raw_string_ostream StrBuf(Buffer);
223     StrBuf << "Unrecognized use/exclude registers:";
224     for (const auto &RegName : BadRegNames)
225       StrBuf << " " << RegName;
226     llvm::report_fatal_error(StrBuf.str());
227   }
228 
229   // Apply filters.
230   for (size_t TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; ++TypeIndex) {
231     SmallBitVector *TypeBitSet = &TypeToRegisterSet[TypeIndex];
232     SmallBitVector *UseBitSet = &UseSet[TypeIndex];
233     SmallBitVector *ExcludeBitSet = &ExcludeSet[TypeIndex];
234     if (UseBitSet->any())
235       *TypeBitSet = *UseBitSet;
236     (*TypeBitSet).reset(*ExcludeBitSet);
237   }
238 
239   // Display filtered register sets, if requested.
240   if (BuildDefs::dump() && NumRegs &&
241       (getFlags().getVerbose() & IceV_AvailableRegs)) {
242     Ostream &Str = Ctx->getStrDump();
243     const std::string Indent = "  ";
244     const std::string IndentTwice = Indent + Indent;
245     Str << "Registers available for register allocation:\n";
246     for (size_t TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; ++TypeIndex) {
247       Str << Indent << getRegClassName(static_cast<RegClass>(TypeIndex))
248           << ":\n";
249       printRegisterSet(Str, TypeToRegisterSet[TypeIndex], getRegName,
250                        IndentTwice);
251     }
252     Str << "\n";
253   }
254 }
255 
256 std::unique_ptr<TargetLowering>
createLowering(TargetArch Target,Cfg * Func)257 TargetLowering::createLowering(TargetArch Target, Cfg *Func) {
258   switch (Target) {
259   default:
260     badTargetFatalError(Target);
261 #define SUBZERO_TARGET(X)                                                      \
262   case TARGET_LOWERING_CLASS_FOR(X):                                           \
263     return ::X::createTargetLowering(Func);
264 #include "SZTargets.def"
265 #undef SUBZERO_TARGET
266   }
267 }
268 
staticInit(GlobalContext * Ctx)269 void TargetLowering::staticInit(GlobalContext *Ctx) {
270   const TargetArch Target = getFlags().getTargetArch();
271   // Call the specified target's static initializer.
272   switch (Target) {
273   default:
274     badTargetFatalError(Target);
275 #define SUBZERO_TARGET(X)                                                      \
276   case TARGET_LOWERING_CLASS_FOR(X): {                                         \
277     static bool InitGuard##X = false;                                          \
278     if (InitGuard##X) {                                                        \
279       return;                                                                  \
280     }                                                                          \
281     InitGuard##X = true;                                                       \
282     ::X::staticInit(Ctx);                                                      \
283   } break;
284 #include "SZTargets.def"
285 #undef SUBZERO_TARGET
286   }
287 }
288 
shouldBePooled(const Constant * C)289 bool TargetLowering::shouldBePooled(const Constant *C) {
290   const TargetArch Target = getFlags().getTargetArch();
291   switch (Target) {
292   default:
293     return false;
294 #define SUBZERO_TARGET(X)                                                      \
295   case TARGET_LOWERING_CLASS_FOR(X):                                           \
296     return ::X::shouldBePooled(C);
297 #include "SZTargets.def"
298 #undef SUBZERO_TARGET
299   }
300 }
301 
getPointerType()302 ::Ice::Type TargetLowering::getPointerType() {
303   const TargetArch Target = getFlags().getTargetArch();
304   switch (Target) {
305   default:
306     return ::Ice::IceType_void;
307 #define SUBZERO_TARGET(X)                                                      \
308   case TARGET_LOWERING_CLASS_FOR(X):                                           \
309     return ::X::getPointerType();
310 #include "SZTargets.def"
311 #undef SUBZERO_TARGET
312   }
313 }
314 
TargetLowering(Cfg * Func)315 TargetLowering::TargetLowering(Cfg *Func)
316     : Func(Func), Ctx(Func->getContext()) {}
317 
genTargetHelperCalls()318 void TargetLowering::genTargetHelperCalls() {
319   TimerMarker T(TimerStack::TT_genHelpers, Func);
320   Utils::BoolFlagSaver _(GeneratingTargetHelpers, true);
321   for (CfgNode *Node : Func->getNodes()) {
322     Context.init(Node);
323     while (!Context.atEnd()) {
324       PostIncrLoweringContext _(Context);
325       genTargetHelperCallFor(iteratorToInst(Context.getCur()));
326     }
327   }
328 }
329 
doAddressOpt()330 void TargetLowering::doAddressOpt() {
331   doAddressOptOther();
332   if (llvm::isa<InstLoad>(*Context.getCur()))
333     doAddressOptLoad();
334   else if (llvm::isa<InstStore>(*Context.getCur()))
335     doAddressOptStore();
336   else if (auto *Intrinsic =
337                llvm::dyn_cast<InstIntrinsic>(&*Context.getCur())) {
338     if (Intrinsic->getIntrinsicID() == Intrinsics::LoadSubVector)
339       doAddressOptLoadSubVector();
340     else if (Intrinsic->getIntrinsicID() == Intrinsics::StoreSubVector)
341       doAddressOptStoreSubVector();
342   }
343   Context.advanceCur();
344   Context.advanceNext();
345 }
346 
347 // Lowers a single instruction according to the information in Context, by
348 // checking the Context.Cur instruction kind and calling the appropriate
349 // lowering method. The lowering method should insert target instructions at
350 // the Cur.Next insertion point, and should not delete the Context.Cur
351 // instruction or advance Context.Cur.
352 //
353 // The lowering method may look ahead in the instruction stream as desired, and
354 // lower additional instructions in conjunction with the current one, for
355 // example fusing a compare and branch. If it does, it should advance
356 // Context.Cur to point to the next non-deleted instruction to process, and it
357 // should delete any additional instructions it consumes.
lower()358 void TargetLowering::lower() {
359   assert(!Context.atEnd());
360   Inst *Instr = iteratorToInst(Context.getCur());
361   Instr->deleteIfDead();
362   if (!Instr->isDeleted() && !llvm::isa<InstFakeDef>(Instr) &&
363       !llvm::isa<InstFakeUse>(Instr)) {
364     // Mark the current instruction as deleted before lowering, otherwise the
365     // Dest variable will likely get marked as non-SSA. See
366     // Variable::setDefinition(). However, just pass-through FakeDef and
367     // FakeUse instructions that might have been inserted prior to lowering.
368     Instr->setDeleted();
369     switch (Instr->getKind()) {
370     case Inst::Alloca:
371       lowerAlloca(llvm::cast<InstAlloca>(Instr));
372       break;
373     case Inst::Arithmetic:
374       lowerArithmetic(llvm::cast<InstArithmetic>(Instr));
375       break;
376     case Inst::Assign:
377       lowerAssign(llvm::cast<InstAssign>(Instr));
378       break;
379     case Inst::Br:
380       lowerBr(llvm::cast<InstBr>(Instr));
381       break;
382     case Inst::Breakpoint:
383       lowerBreakpoint(llvm::cast<InstBreakpoint>(Instr));
384       break;
385     case Inst::Call:
386       lowerCall(llvm::cast<InstCall>(Instr));
387       break;
388     case Inst::Cast:
389       lowerCast(llvm::cast<InstCast>(Instr));
390       break;
391     case Inst::ExtractElement:
392       lowerExtractElement(llvm::cast<InstExtractElement>(Instr));
393       break;
394     case Inst::Fcmp:
395       lowerFcmp(llvm::cast<InstFcmp>(Instr));
396       break;
397     case Inst::Icmp:
398       lowerIcmp(llvm::cast<InstIcmp>(Instr));
399       break;
400     case Inst::InsertElement:
401       lowerInsertElement(llvm::cast<InstInsertElement>(Instr));
402       break;
403     case Inst::Intrinsic: {
404       auto *Intrinsic = llvm::cast<InstIntrinsic>(Instr);
405       if (Intrinsic->getIntrinsicInfo().ReturnsTwice)
406         setCallsReturnsTwice(true);
407       lowerIntrinsic(Intrinsic);
408       break;
409     }
410     case Inst::Load:
411       lowerLoad(llvm::cast<InstLoad>(Instr));
412       break;
413     case Inst::Phi:
414       lowerPhi(llvm::cast<InstPhi>(Instr));
415       break;
416     case Inst::Ret:
417       lowerRet(llvm::cast<InstRet>(Instr));
418       break;
419     case Inst::Select:
420       lowerSelect(llvm::cast<InstSelect>(Instr));
421       break;
422     case Inst::ShuffleVector:
423       lowerShuffleVector(llvm::cast<InstShuffleVector>(Instr));
424       break;
425     case Inst::Store:
426       lowerStore(llvm::cast<InstStore>(Instr));
427       break;
428     case Inst::Switch:
429       lowerSwitch(llvm::cast<InstSwitch>(Instr));
430       break;
431     case Inst::Unreachable:
432       lowerUnreachable(llvm::cast<InstUnreachable>(Instr));
433       break;
434     default:
435       lowerOther(Instr);
436       break;
437     }
438 
439     postLower();
440   }
441 
442   Context.advanceCur();
443   Context.advanceNext();
444 }
445 
lowerInst(CfgNode * Node,InstList::iterator Next,InstHighLevel * Instr)446 void TargetLowering::lowerInst(CfgNode *Node, InstList::iterator Next,
447                                InstHighLevel *Instr) {
448   // TODO(stichnot): Consider modifying the design/implementation to avoid
449   // multiple init() calls when using lowerInst() to lower several instructions
450   // in the same node.
451   Context.init(Node);
452   Context.setNext(Next);
453   Context.insert(Instr);
454   --Next;
455   assert(iteratorToInst(Next) == Instr);
456   Context.setCur(Next);
457   lower();
458 }
459 
lowerOther(const Inst * Instr)460 void TargetLowering::lowerOther(const Inst *Instr) {
461   (void)Instr;
462   Func->setError("Can't lower unsupported instruction type");
463 }
464 
465 // Drives register allocation, allowing all physical registers (except perhaps
466 // for the frame pointer) to be allocated. This set of registers could
467 // potentially be parameterized if we want to restrict registers e.g. for
468 // performance testing.
regAlloc(RegAllocKind Kind)469 void TargetLowering::regAlloc(RegAllocKind Kind) {
470   TimerMarker T(TimerStack::TT_regAlloc, Func);
471   LinearScan LinearScan(Func);
472   RegSetMask RegInclude = RegSet_None;
473   RegSetMask RegExclude = RegSet_None;
474   RegInclude |= RegSet_CallerSave;
475   RegInclude |= RegSet_CalleeSave;
476   if (hasFramePointer())
477     RegExclude |= RegSet_FramePointer;
478   SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude);
479   bool Repeat = (Kind == RAK_Global && getFlags().getRepeatRegAlloc());
480   CfgSet<Variable *> EmptySet;
481   do {
482     LinearScan.init(Kind, EmptySet);
483     LinearScan.scan(RegMask);
484     if (!LinearScan.hasEvictions())
485       Repeat = false;
486     Kind = RAK_SecondChance;
487   } while (Repeat);
488   // TODO(stichnot): Run the register allocator one more time to do stack slot
489   // coalescing.  The idea would be to initialize the Unhandled list with the
490   // set of Variables that have no register and a non-empty live range, and
491   // model an infinite number of registers.  Maybe use the register aliasing
492   // mechanism to get better packing of narrower slots.
493   if (getFlags().getSplitGlobalVars())
494     postRegallocSplitting(RegMask);
495 }
496 
497 namespace {
getInstructionsInRange(CfgNode * Node,InstNumberT Start,InstNumberT End)498 CfgVector<Inst *> getInstructionsInRange(CfgNode *Node, InstNumberT Start,
499                                          InstNumberT End) {
500   CfgVector<Inst *> Result;
501   bool Started = false;
502   auto Process = [&](InstList &Insts) {
503     for (auto &Instr : Insts) {
504       if (Instr.isDeleted()) {
505         continue;
506       }
507       if (Instr.getNumber() == Start) {
508         Started = true;
509       }
510       if (Started) {
511         Result.emplace_back(&Instr);
512       }
513       if (Instr.getNumber() == End) {
514         break;
515       }
516     }
517   };
518   Process(Node->getPhis());
519   Process(Node->getInsts());
520   // TODO(manasijm): Investigate why checking >= End significantly changes
521   // output. Should not happen when renumbering produces monotonically
522   // increasing instruction numbers and live ranges begin and end on non-deleted
523   // instructions.
524   return Result;
525 }
526 } // namespace
527 
postRegallocSplitting(const SmallBitVector & RegMask)528 void TargetLowering::postRegallocSplitting(const SmallBitVector &RegMask) {
529   // Splits the live ranges of global(/multi block) variables and runs the
530   // register allocator to find registers for as many of the new variables as
531   // possible.
532   // TODO(manasijm): Merge the small liveranges back into multi-block ones when
533   // the variables get the same register. This will reduce the amount of new
534   // instructions inserted. This might involve a full dataflow analysis.
535   // Also, modify the preference mechanism in the register allocator to match.
536 
537   TimerMarker _(TimerStack::TT_splitGlobalVars, Func);
538   CfgSet<Variable *> SplitCandidates;
539 
540   // Find variables that do not have registers but are allowed to. Also skip
541   // variables with single segment live ranges as they are not split further in
542   // this function.
543   for (Variable *Var : Func->getVariables()) {
544     if (!Var->mustNotHaveReg() && !Var->hasReg()) {
545       if (Var->getLiveRange().getNumSegments() > 1)
546         SplitCandidates.insert(Var);
547     }
548   }
549   if (SplitCandidates.empty())
550     return;
551 
552   CfgSet<Variable *> ExtraVars;
553 
554   struct UseInfo {
555     Variable *Replacing = nullptr;
556     Inst *FirstUse = nullptr;
557     Inst *LastDef = nullptr;
558     SizeT UseCount = 0;
559   };
560   CfgUnorderedMap<Variable *, UseInfo> VarInfo;
561   // Split the live ranges of the viable variables by node.
562   // Compute metadata (UseInfo) for each of the resulting variables.
563   for (auto *Var : SplitCandidates) {
564     for (auto &Segment : Var->getLiveRange().getSegments()) {
565       UseInfo Info;
566       Info.Replacing = Var;
567       auto *Node = Var->getLiveRange().getNodeForSegment(Segment.first);
568 
569       for (auto *Instr :
570            getInstructionsInRange(Node, Segment.first, Segment.second)) {
571         for (SizeT i = 0; i < Instr->getSrcSize(); ++i) {
572           // It's safe to iterate over the top-level src operands rather than
573           // using FOREACH_VAR_IN_INST(), because any variables inside e.g.
574           // mem operands should already have registers.
575           if (auto *Var = llvm::dyn_cast<Variable>(Instr->getSrc(i))) {
576             if (Var == Info.Replacing) {
577               if (Info.FirstUse == nullptr && !llvm::isa<InstPhi>(Instr)) {
578                 Info.FirstUse = Instr;
579               }
580               Info.UseCount++;
581             }
582           }
583         }
584         if (Instr->getDest() == Info.Replacing && !llvm::isa<InstPhi>(Instr)) {
585           Info.LastDef = Instr;
586         }
587       }
588 
589       static constexpr SizeT MinUseThreshold = 3;
590       // Skip if variable has less than `MinUseThreshold` uses in the segment.
591       if (Info.UseCount < MinUseThreshold)
592         continue;
593 
594       if (!Info.FirstUse && !Info.LastDef) {
595         continue;
596       }
597 
598       LiveRange LR;
599       LR.addSegment(Segment);
600       Variable *NewVar = Func->makeVariable(Var->getType());
601 
602       NewVar->setLiveRange(LR);
603 
604       VarInfo[NewVar] = Info;
605 
606       ExtraVars.insert(NewVar);
607     }
608   }
609   // Run the register allocator with all these new variables included
610   LinearScan RegAlloc(Func);
611   RegAlloc.init(RAK_Global, SplitCandidates);
612   RegAlloc.scan(RegMask);
613 
614   // Modify the Cfg to use the new variables that now have registers.
615   for (auto *ExtraVar : ExtraVars) {
616     if (!ExtraVar->hasReg()) {
617       continue;
618     }
619 
620     auto &Info = VarInfo[ExtraVar];
621 
622     assert(ExtraVar->getLiveRange().getSegments().size() == 1);
623     auto Segment = ExtraVar->getLiveRange().getSegments()[0];
624 
625     auto *Node =
626         Info.Replacing->getLiveRange().getNodeForSegment(Segment.first);
627 
628     auto RelevantInsts =
629         getInstructionsInRange(Node, Segment.first, Segment.second);
630 
631     if (RelevantInsts.empty())
632       continue;
633 
634     // Replace old variables
635     for (auto *Instr : RelevantInsts) {
636       if (llvm::isa<InstPhi>(Instr))
637         continue;
638       // TODO(manasijm): Figure out how to safely enable replacing phi dest
639       // variables. The issue is that we can not insert low level mov
640       // instructions into the PhiList.
641       for (SizeT i = 0; i < Instr->getSrcSize(); ++i) {
642         // FOREACH_VAR_IN_INST() not needed. Same logic as above.
643         if (auto *Var = llvm::dyn_cast<Variable>(Instr->getSrc(i))) {
644           if (Var == Info.Replacing) {
645             Instr->replaceSource(i, ExtraVar);
646           }
647         }
648       }
649       if (Instr->getDest() == Info.Replacing) {
650         Instr->replaceDest(ExtraVar);
651       }
652     }
653 
654     assert(Info.FirstUse != Info.LastDef);
655     assert(Info.FirstUse || Info.LastDef);
656 
657     // Insert spill code
658     if (Info.FirstUse != nullptr) {
659       auto *NewInst =
660           Func->getTarget()->createLoweredMove(ExtraVar, Info.Replacing);
661       Node->getInsts().insert(instToIterator(Info.FirstUse), NewInst);
662     }
663     if (Info.LastDef != nullptr) {
664       auto *NewInst =
665           Func->getTarget()->createLoweredMove(Info.Replacing, ExtraVar);
666       Node->getInsts().insertAfter(instToIterator(Info.LastDef), NewInst);
667     }
668   }
669 }
670 
markRedefinitions()671 void TargetLowering::markRedefinitions() {
672   // Find (non-SSA) instructions where the Dest variable appears in some source
673   // operand, and set the IsDestRedefined flag to keep liveness analysis
674   // consistent.
675   for (auto Instr = Context.getCur(), E = Context.getNext(); Instr != E;
676        ++Instr) {
677     if (Instr->isDeleted())
678       continue;
679     Variable *Dest = Instr->getDest();
680     if (Dest == nullptr)
681       continue;
682     FOREACH_VAR_IN_INST(Var, *Instr) {
683       if (Var == Dest) {
684         Instr->setDestRedefined();
685         break;
686       }
687     }
688   }
689 }
690 
addFakeDefUses(const Inst * Instr)691 void TargetLowering::addFakeDefUses(const Inst *Instr) {
692   FOREACH_VAR_IN_INST(Var, *Instr) {
693     if (auto *Var64 = llvm::dyn_cast<Variable64On32>(Var)) {
694       Context.insert<InstFakeUse>(Var64->getLo());
695       Context.insert<InstFakeUse>(Var64->getHi());
696     } else if (auto *VarVec = llvm::dyn_cast<VariableVecOn32>(Var)) {
697       for (Variable *Var : VarVec->getContainers()) {
698         Context.insert<InstFakeUse>(Var);
699       }
700     } else {
701       Context.insert<InstFakeUse>(Var);
702     }
703   }
704   Variable *Dest = Instr->getDest();
705   if (Dest == nullptr)
706     return;
707   if (auto *Var64 = llvm::dyn_cast<Variable64On32>(Dest)) {
708     Context.insert<InstFakeDef>(Var64->getLo());
709     Context.insert<InstFakeDef>(Var64->getHi());
710   } else if (auto *VarVec = llvm::dyn_cast<VariableVecOn32>(Dest)) {
711     for (Variable *Var : VarVec->getContainers()) {
712       Context.insert<InstFakeDef>(Var);
713     }
714   } else {
715     Context.insert<InstFakeDef>(Dest);
716   }
717 }
718 
sortVarsByAlignment(VarList & Dest,const VarList & Source) const719 void TargetLowering::sortVarsByAlignment(VarList &Dest,
720                                          const VarList &Source) const {
721   Dest = Source;
722   // Instead of std::sort, we could do a bucket sort with log2(alignment) as
723   // the buckets, if performance is an issue.
724   std::sort(Dest.begin(), Dest.end(),
725             [this](const Variable *V1, const Variable *V2) {
726               const size_t WidthV1 = typeWidthInBytesOnStack(V1->getType());
727               const size_t WidthV2 = typeWidthInBytesOnStack(V2->getType());
728               if (WidthV1 == WidthV2)
729                 return V1->getIndex() < V2->getIndex();
730               return WidthV1 > WidthV2;
731             });
732 }
733 
getVarStackSlotParams(VarList & SortedSpilledVariables,SmallBitVector & RegsUsed,size_t * GlobalsSize,size_t * SpillAreaSizeBytes,uint32_t * SpillAreaAlignmentBytes,uint32_t * LocalsSlotsAlignmentBytes,std::function<bool (Variable *)> TargetVarHook)734 void TargetLowering::getVarStackSlotParams(
735     VarList &SortedSpilledVariables, SmallBitVector &RegsUsed,
736     size_t *GlobalsSize, size_t *SpillAreaSizeBytes,
737     uint32_t *SpillAreaAlignmentBytes, uint32_t *LocalsSlotsAlignmentBytes,
738     std::function<bool(Variable *)> TargetVarHook) {
739   const VariablesMetadata *VMetadata = Func->getVMetadata();
740   BitVector IsVarReferenced(Func->getNumVariables());
741   for (CfgNode *Node : Func->getNodes()) {
742     for (Inst &Instr : Node->getInsts()) {
743       if (Instr.isDeleted())
744         continue;
745       if (const Variable *Var = Instr.getDest())
746         IsVarReferenced[Var->getIndex()] = true;
747       FOREACH_VAR_IN_INST(Var, Instr) {
748         IsVarReferenced[Var->getIndex()] = true;
749       }
750     }
751   }
752 
753   // If SimpleCoalescing is false, each variable without a register gets its
754   // own unique stack slot, which leads to large stack frames. If
755   // SimpleCoalescing is true, then each "global" variable without a register
756   // gets its own slot, but "local" variable slots are reused across basic
757   // blocks. E.g., if A and B are local to block 1 and C is local to block 2,
758   // then C may share a slot with A or B.
759   //
760   // We cannot coalesce stack slots if this function calls a "returns twice"
761   // function. In that case, basic blocks may be revisited, and variables local
762   // to those basic blocks are actually live until after the called function
763   // returns a second time.
764   const bool SimpleCoalescing = !callsReturnsTwice();
765 
766   CfgVector<size_t> LocalsSize(Func->getNumNodes());
767   const VarList &Variables = Func->getVariables();
768   VarList SpilledVariables;
769   for (Variable *Var : Variables) {
770     if (Var->hasReg()) {
771       // Don't consider a rematerializable variable to be an actual register use
772       // (specifically of the frame pointer).  Otherwise, the prolog may decide
773       // to save the frame pointer twice - once because of the explicit need for
774       // a frame pointer, and once because of an active use of a callee-save
775       // register.
776       if (!Var->isRematerializable())
777         RegsUsed[Var->getRegNum()] = true;
778       continue;
779     }
780     // An argument either does not need a stack slot (if passed in a register)
781     // or already has one (if passed on the stack).
782     if (Var->getIsArg()) {
783       if (!Var->hasReg()) {
784         assert(!Var->hasStackOffset());
785         Var->setHasStackOffset();
786       }
787       continue;
788     }
789     // An unreferenced variable doesn't need a stack slot.
790     if (!IsVarReferenced[Var->getIndex()])
791       continue;
792     // Check a target-specific variable (it may end up sharing stack slots) and
793     // not need accounting here.
794     if (TargetVarHook(Var))
795       continue;
796     assert(!Var->hasStackOffset());
797     Var->setHasStackOffset();
798     SpilledVariables.push_back(Var);
799   }
800 
801   SortedSpilledVariables.reserve(SpilledVariables.size());
802   sortVarsByAlignment(SortedSpilledVariables, SpilledVariables);
803 
804   for (Variable *Var : SortedSpilledVariables) {
805     size_t Increment = typeWidthInBytesOnStack(Var->getType());
806     // We have sorted by alignment, so the first variable we encounter that is
807     // located in each area determines the max alignment for the area.
808     if (!*SpillAreaAlignmentBytes)
809       *SpillAreaAlignmentBytes = Increment;
810     if (SimpleCoalescing && VMetadata->isTracked(Var)) {
811       if (VMetadata->isMultiBlock(Var)) {
812         *GlobalsSize += Increment;
813       } else {
814         SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
815         LocalsSize[NodeIndex] += Increment;
816         if (LocalsSize[NodeIndex] > *SpillAreaSizeBytes)
817           *SpillAreaSizeBytes = LocalsSize[NodeIndex];
818         if (!*LocalsSlotsAlignmentBytes)
819           *LocalsSlotsAlignmentBytes = Increment;
820       }
821     } else {
822       *SpillAreaSizeBytes += Increment;
823     }
824   }
825   // For testing legalization of large stack offsets on targets with limited
826   // offset bits in instruction encodings, add some padding.
827   *SpillAreaSizeBytes += getFlags().getTestStackExtra();
828 }
829 
alignStackSpillAreas(uint32_t SpillAreaStartOffset,uint32_t SpillAreaAlignmentBytes,size_t GlobalsSize,uint32_t LocalsSlotsAlignmentBytes,uint32_t * SpillAreaPaddingBytes,uint32_t * LocalsSlotsPaddingBytes)830 void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset,
831                                           uint32_t SpillAreaAlignmentBytes,
832                                           size_t GlobalsSize,
833                                           uint32_t LocalsSlotsAlignmentBytes,
834                                           uint32_t *SpillAreaPaddingBytes,
835                                           uint32_t *LocalsSlotsPaddingBytes) {
836   if (SpillAreaAlignmentBytes) {
837     uint32_t PaddingStart = SpillAreaStartOffset;
838     uint32_t SpillAreaStart =
839         Utils::applyAlignment(PaddingStart, SpillAreaAlignmentBytes);
840     *SpillAreaPaddingBytes = SpillAreaStart - PaddingStart;
841   }
842 
843   // If there are separate globals and locals areas, make sure the locals area
844   // is aligned by padding the end of the globals area.
845   if (LocalsSlotsAlignmentBytes) {
846     uint32_t GlobalsAndSubsequentPaddingSize = GlobalsSize;
847     GlobalsAndSubsequentPaddingSize =
848         Utils::applyAlignment(GlobalsSize, LocalsSlotsAlignmentBytes);
849     *LocalsSlotsPaddingBytes = GlobalsAndSubsequentPaddingSize - GlobalsSize;
850   }
851 }
852 
assignVarStackSlots(VarList & SortedSpilledVariables,size_t SpillAreaPaddingBytes,size_t SpillAreaSizeBytes,size_t GlobalsAndSubsequentPaddingSize,bool UsesFramePointer)853 void TargetLowering::assignVarStackSlots(VarList &SortedSpilledVariables,
854                                          size_t SpillAreaPaddingBytes,
855                                          size_t SpillAreaSizeBytes,
856                                          size_t GlobalsAndSubsequentPaddingSize,
857                                          bool UsesFramePointer) {
858   const VariablesMetadata *VMetadata = Func->getVMetadata();
859   // For testing legalization of large stack offsets on targets with limited
860   // offset bits in instruction encodings, add some padding. This assumes that
861   // SpillAreaSizeBytes has accounted for the extra test padding. When
862   // UseFramePointer is true, the offset depends on the padding, not just the
863   // SpillAreaSizeBytes. On the other hand, when UseFramePointer is false, the
864   // offsets depend on the gap between SpillAreaSizeBytes and
865   // SpillAreaPaddingBytes, so we don't increment that.
866   size_t TestPadding = getFlags().getTestStackExtra();
867   if (UsesFramePointer)
868     SpillAreaPaddingBytes += TestPadding;
869   size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
870   size_t NextStackOffset = SpillAreaPaddingBytes;
871   CfgVector<size_t> LocalsSize(Func->getNumNodes());
872   const bool SimpleCoalescing = !callsReturnsTwice();
873 
874   for (Variable *Var : SortedSpilledVariables) {
875     size_t Increment = typeWidthInBytesOnStack(Var->getType());
876     if (SimpleCoalescing && VMetadata->isTracked(Var)) {
877       if (VMetadata->isMultiBlock(Var)) {
878         GlobalsSpaceUsed += Increment;
879         NextStackOffset = GlobalsSpaceUsed;
880       } else {
881         SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
882         LocalsSize[NodeIndex] += Increment;
883         NextStackOffset = SpillAreaPaddingBytes +
884                           GlobalsAndSubsequentPaddingSize +
885                           LocalsSize[NodeIndex];
886       }
887     } else {
888       NextStackOffset += Increment;
889     }
890     if (UsesFramePointer)
891       Var->setStackOffset(-NextStackOffset);
892     else
893       Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset);
894   }
895 }
896 
makeHelperCall(RuntimeHelper FuncID,Variable * Dest,SizeT MaxSrcs)897 InstCall *TargetLowering::makeHelperCall(RuntimeHelper FuncID, Variable *Dest,
898                                          SizeT MaxSrcs) {
899   constexpr bool HasTailCall = false;
900   Constant *CallTarget = Ctx->getRuntimeHelperFunc(FuncID);
901   InstCall *Call =
902       InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall);
903   return Call;
904 }
905 
shouldOptimizeMemIntrins()906 bool TargetLowering::shouldOptimizeMemIntrins() {
907   return Func->getOptLevel() >= Opt_1 || getFlags().getForceMemIntrinOpt();
908 }
909 
scalarizeArithmetic(InstArithmetic::OpKind Kind,Variable * Dest,Operand * Src0,Operand * Src1)910 void TargetLowering::scalarizeArithmetic(InstArithmetic::OpKind Kind,
911                                          Variable *Dest, Operand *Src0,
912                                          Operand *Src1) {
913   scalarizeInstruction(
914       Dest,
915       [this, Kind](Variable *Dest, Operand *Src0, Operand *Src1) {
916         return Context.insert<InstArithmetic>(Kind, Dest, Src0, Src1);
917       },
918       Src0, Src1);
919 }
920 
emitWithoutPrefix(const ConstantRelocatable * C,const char * Suffix) const921 void TargetLowering::emitWithoutPrefix(const ConstantRelocatable *C,
922                                        const char *Suffix) const {
923   if (!BuildDefs::dump())
924     return;
925   Ostream &Str = Ctx->getStrEmit();
926   const std::string &EmitStr = C->getEmitString();
927   if (!EmitStr.empty()) {
928     // C has a custom emit string, so we use it instead of the canonical
929     // Name + Offset form.
930     Str << EmitStr;
931     return;
932   }
933   Str << C->getName() << Suffix;
934   RelocOffsetT Offset = C->getOffset();
935   if (Offset) {
936     if (Offset > 0)
937       Str << "+";
938     Str << Offset;
939   }
940 }
941 
942 std::unique_ptr<TargetDataLowering>
createLowering(GlobalContext * Ctx)943 TargetDataLowering::createLowering(GlobalContext *Ctx) {
944   TargetArch Target = getFlags().getTargetArch();
945   switch (Target) {
946   default:
947     badTargetFatalError(Target);
948 #define SUBZERO_TARGET(X)                                                      \
949   case TARGET_LOWERING_CLASS_FOR(X):                                           \
950     return ::X::createTargetDataLowering(Ctx);
951 #include "SZTargets.def"
952 #undef SUBZERO_TARGET
953   }
954 }
955 
956 TargetDataLowering::~TargetDataLowering() = default;
957 
958 namespace {
959 
960 // dataSectionSuffix decides whether to use SectionSuffix or VarName as data
961 // section suffix. Essentially, when using separate data sections for globals
962 // SectionSuffix is not necessary.
dataSectionSuffix(const std::string & SectionSuffix,const std::string & VarName,const bool DataSections)963 std::string dataSectionSuffix(const std::string &SectionSuffix,
964                               const std::string &VarName,
965                               const bool DataSections) {
966   if (SectionSuffix.empty() && !DataSections) {
967     return "";
968   }
969 
970   if (DataSections) {
971     // With data sections we don't need to use the SectionSuffix.
972     return "." + VarName;
973   }
974 
975   assert(!SectionSuffix.empty());
976   return "." + SectionSuffix;
977 }
978 
979 } // end of anonymous namespace
980 
emitGlobal(const VariableDeclaration & Var,const std::string & SectionSuffix)981 void TargetDataLowering::emitGlobal(const VariableDeclaration &Var,
982                                     const std::string &SectionSuffix) {
983   if (!BuildDefs::dump())
984     return;
985 
986   // If external and not initialized, this must be a cross test. Don't generate
987   // a declaration for such cases.
988   const bool IsExternal = Var.isExternal() || getFlags().getDisableInternal();
989   if (IsExternal && !Var.hasInitializer())
990     return;
991 
992   Ostream &Str = Ctx->getStrEmit();
993   const bool HasNonzeroInitializer = Var.hasNonzeroInitializer();
994   const bool IsConstant = Var.getIsConstant();
995   const SizeT Size = Var.getNumBytes();
996   const std::string Name = Var.getName().toString();
997 
998   Str << "\t.type\t" << Name << ",%object\n";
999 
1000   const bool UseDataSections = getFlags().getDataSections();
1001   const std::string Suffix =
1002       dataSectionSuffix(SectionSuffix, Name, UseDataSections);
1003   if (IsConstant)
1004     Str << "\t.section\t.rodata" << Suffix << ",\"a\",%progbits\n";
1005   else if (HasNonzeroInitializer)
1006     Str << "\t.section\t.data" << Suffix << ",\"aw\",%progbits\n";
1007   else
1008     Str << "\t.section\t.bss" << Suffix << ",\"aw\",%nobits\n";
1009 
1010   if (IsExternal)
1011     Str << "\t.globl\t" << Name << "\n";
1012 
1013   const uint32_t Align = Var.getAlignment();
1014   if (Align > 1) {
1015     assert(llvm::isPowerOf2_32(Align));
1016     // Use the .p2align directive, since the .align N directive can either
1017     // interpret N as bytes, or power of 2 bytes, depending on the target.
1018     Str << "\t.p2align\t" << llvm::Log2_32(Align) << "\n";
1019   }
1020 
1021   Str << Name << ":\n";
1022 
1023   if (HasNonzeroInitializer) {
1024     for (const auto *Init : Var.getInitializers()) {
1025       switch (Init->getKind()) {
1026       case VariableDeclaration::Initializer::DataInitializerKind: {
1027         const auto &Data =
1028             llvm::cast<VariableDeclaration::DataInitializer>(Init)
1029                 ->getContents();
1030         for (SizeT i = 0; i < Init->getNumBytes(); ++i) {
1031           Str << "\t.byte\t" << (((unsigned)Data[i]) & 0xff) << "\n";
1032         }
1033         break;
1034       }
1035       case VariableDeclaration::Initializer::ZeroInitializerKind:
1036         Str << "\t.zero\t" << Init->getNumBytes() << "\n";
1037         break;
1038       case VariableDeclaration::Initializer::RelocInitializerKind: {
1039         const auto *Reloc =
1040             llvm::cast<VariableDeclaration::RelocInitializer>(Init);
1041         Str << "\t" << getEmit32Directive() << "\t";
1042         Str << Reloc->getDeclaration()->getName();
1043         if (Reloc->hasFixup()) {
1044           // TODO(jpp): this is ARM32 specific.
1045           Str << "(GOTOFF)";
1046         }
1047         if (RelocOffsetT Offset = Reloc->getOffset()) {
1048           if (Offset >= 0 || (Offset == INT32_MIN))
1049             Str << " + " << Offset;
1050           else
1051             Str << " - " << -Offset;
1052         }
1053         Str << "\n";
1054         break;
1055       }
1056       }
1057     }
1058   } else {
1059     // NOTE: for non-constant zero initializers, this is BSS (no bits), so an
1060     // ELF writer would not write to the file, and only track virtual offsets,
1061     // but the .s writer still needs this .zero and cannot simply use the .size
1062     // to advance offsets.
1063     Str << "\t.zero\t" << Size << "\n";
1064   }
1065 
1066   Str << "\t.size\t" << Name << ", " << Size << "\n";
1067 }
1068 
1069 std::unique_ptr<TargetHeaderLowering>
createLowering(GlobalContext * Ctx)1070 TargetHeaderLowering::createLowering(GlobalContext *Ctx) {
1071   TargetArch Target = getFlags().getTargetArch();
1072   switch (Target) {
1073   default:
1074     badTargetFatalError(Target);
1075 #define SUBZERO_TARGET(X)                                                      \
1076   case TARGET_LOWERING_CLASS_FOR(X):                                           \
1077     return ::X::createTargetHeaderLowering(Ctx);
1078 #include "SZTargets.def"
1079 #undef SUBZERO_TARGET
1080   }
1081 }
1082 
1083 TargetHeaderLowering::~TargetHeaderLowering() = default;
1084 
1085 } // end of namespace Ice
1086