1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25
26 #include <algorithm>
27 #include <stack>
28 #include <limits>
29 #if __cplusplus >= 201103L
30 #include <unordered_map>
31 #else
32 #include <tr1/unordered_map>
33 #endif
34
35 namespace nv50_ir {
36
37 #if __cplusplus >= 201103L
38 using std::hash;
39 using std::unordered_map;
40 #else
41 using std::tr1::hash;
42 using std::tr1::unordered_map;
43 #endif
44
45 #define MAX_REGISTER_FILE_SIZE 256
46
47 class RegisterSet
48 {
49 public:
50 RegisterSet(const Target *);
51
52 void init(const Target *);
53 void reset(DataFile, bool resetMax = false);
54
55 void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
56 void intersect(DataFile f, const RegisterSet *);
57
58 bool assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg);
59 void release(DataFile f, int32_t reg, unsigned int size);
60 void occupy(DataFile f, int32_t reg, unsigned int size);
61 void occupy(const Value *);
62 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
63 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
64 bool testOccupy(const Value *);
65 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
66
getMaxAssigned(DataFile f) const67 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
68
getFileSize(DataFile f) const69 inline unsigned int getFileSize(DataFile f) const
70 {
71 return last[f] + 1;
72 }
73
units(DataFile f,unsigned int size) const74 inline unsigned int units(DataFile f, unsigned int size) const
75 {
76 return size >> unit[f];
77 }
78 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
idToBytes(const Value * v) const79 inline unsigned int idToBytes(const Value *v) const
80 {
81 return v->reg.data.id * MIN2(v->reg.size, 4);
82 }
idToUnits(const Value * v) const83 inline unsigned int idToUnits(const Value *v) const
84 {
85 return units(v->reg.file, idToBytes(v));
86 }
bytesToId(Value * v,unsigned int bytes) const87 inline int bytesToId(Value *v, unsigned int bytes) const
88 {
89 if (v->reg.size < 4)
90 return units(v->reg.file, bytes);
91 return bytes / 4;
92 }
unitsToId(DataFile f,int u,uint8_t size) const93 inline int unitsToId(DataFile f, int u, uint8_t size) const
94 {
95 if (u < 0)
96 return -1;
97 return (size < 4) ? u : ((u << unit[f]) / 4);
98 }
99
100 void print(DataFile f) const;
101
102 const bool restrictedGPR16Range;
103
104 private:
105 BitSet bits[LAST_REGISTER_FILE + 1];
106
107 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
108
109 int last[LAST_REGISTER_FILE + 1];
110 int fill[LAST_REGISTER_FILE + 1];
111 };
112
113 void
reset(DataFile f,bool resetMax)114 RegisterSet::reset(DataFile f, bool resetMax)
115 {
116 bits[f].fill(0);
117 if (resetMax)
118 fill[f] = -1;
119 }
120
121 void
init(const Target * targ)122 RegisterSet::init(const Target *targ)
123 {
124 for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
125 DataFile f = static_cast<DataFile>(rf);
126 last[rf] = targ->getFileSize(f) - 1;
127 unit[rf] = targ->getFileUnit(f);
128 fill[rf] = -1;
129 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
130 bits[rf].allocate(last[rf] + 1, true);
131 }
132 }
133
RegisterSet(const Target * targ)134 RegisterSet::RegisterSet(const Target *targ)
135 : restrictedGPR16Range(targ->getChipset() < 0xc0)
136 {
137 init(targ);
138 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
139 reset(static_cast<DataFile>(i));
140 }
141
142 void
periodicMask(DataFile f,uint32_t lock,uint32_t unlock)143 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
144 {
145 bits[f].periodicMask32(lock, unlock);
146 }
147
148 void
intersect(DataFile f,const RegisterSet * set)149 RegisterSet::intersect(DataFile f, const RegisterSet *set)
150 {
151 bits[f] |= set->bits[f];
152 }
153
154 void
print(DataFile f) const155 RegisterSet::print(DataFile f) const
156 {
157 INFO("GPR:");
158 bits[f].print();
159 INFO("\n");
160 }
161
162 bool
assign(int32_t & reg,DataFile f,unsigned int size,unsigned int maxReg)163 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg)
164 {
165 reg = bits[f].findFreeRange(size, maxReg);
166 if (reg < 0)
167 return false;
168 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
169 return true;
170 }
171
172 bool
isOccupied(DataFile f,int32_t reg,unsigned int size) const173 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
174 {
175 return bits[f].testRange(reg, size);
176 }
177
178 void
occupy(const Value * v)179 RegisterSet::occupy(const Value *v)
180 {
181 occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
182 }
183
184 void
occupyMask(DataFile f,int32_t reg,uint8_t mask)185 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
186 {
187 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
188 }
189
190 void
occupy(DataFile f,int32_t reg,unsigned int size)191 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
192 {
193 bits[f].setRange(reg, size);
194
195 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
196
197 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
198 }
199
200 bool
testOccupy(const Value * v)201 RegisterSet::testOccupy(const Value *v)
202 {
203 return testOccupy(v->reg.file,
204 idToUnits(v), v->reg.size >> unit[v->reg.file]);
205 }
206
207 bool
testOccupy(DataFile f,int32_t reg,unsigned int size)208 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
209 {
210 if (isOccupied(f, reg, size))
211 return false;
212 occupy(f, reg, size);
213 return true;
214 }
215
216 void
release(DataFile f,int32_t reg,unsigned int size)217 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
218 {
219 bits[f].clrRange(reg, size);
220
221 INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
222 }
223
224 class RegAlloc
225 {
226 public:
RegAlloc(Program * program)227 RegAlloc(Program *program) : prog(program), sequence(0) { }
228
229 bool exec();
230 bool execFunc();
231
232 private:
233 class PhiMovesPass : public Pass {
234 private:
235 virtual bool visit(BasicBlock *);
236 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
237 inline void splitEdges(BasicBlock *b);
238 };
239
240 class ArgumentMovesPass : public Pass {
241 private:
242 virtual bool visit(BasicBlock *);
243 };
244
245 class BuildIntervalsPass : public Pass {
246 private:
247 virtual bool visit(BasicBlock *);
248 void collectLiveValues(BasicBlock *);
249 void addLiveRange(Value *, const BasicBlock *, int end);
250 };
251
252 class InsertConstraintsPass : public Pass {
253 public:
254 bool exec(Function *func);
255 private:
256 virtual bool visit(BasicBlock *);
257
258 void insertConstraintMove(Instruction *, int s);
259 bool insertConstraintMoves();
260
261 void condenseDefs(Instruction *);
262 void condenseDefs(Instruction *, const int first, const int last);
263 void condenseSrcs(Instruction *, const int first, const int last);
264
265 void addHazard(Instruction *i, const ValueRef *src);
266 void textureMask(TexInstruction *);
267 void addConstraint(Instruction *, int s, int n);
268 bool detectConflict(Instruction *, int s);
269
270 // target specific functions, TODO: put in subclass or Target
271 void texConstraintNV50(TexInstruction *);
272 void texConstraintNVC0(TexInstruction *);
273 void texConstraintNVE0(TexInstruction *);
274 void texConstraintGM107(TexInstruction *);
275
276 bool isScalarTexGM107(TexInstruction *);
277 void handleScalarTexGM107(TexInstruction *);
278
279 std::list<Instruction *> constrList;
280
281 const Target *targ;
282 };
283
284 bool buildLiveSets(BasicBlock *);
285
286 private:
287 Program *prog;
288 Function *func;
289
290 // instructions in control flow / chronological order
291 ArrayList insns;
292
293 int sequence; // for manual passes through CFG
294 };
295
296 typedef std::pair<Value *, Value *> ValuePair;
297
298 class MergedDefs
299 {
300 private:
entry(Value * val)301 std::list<ValueDef *>& entry(Value *val) {
302 auto it = defs.find(val);
303
304 if (it == defs.end()) {
305 std::list<ValueDef *> &res = defs[val];
306 res = val->defs;
307 return res;
308 } else {
309 return (*it).second;
310 }
311 }
312
313 std::unordered_map<Value *, std::list<ValueDef *> > defs;
314
315 public:
operator ()(Value * val)316 std::list<ValueDef *>& operator()(Value *val) {
317 return entry(val);
318 }
319
add(Value * val,const std::list<ValueDef * > & vals)320 void add(Value *val, const std::list<ValueDef *> &vals) {
321 assert(val);
322 std::list<ValueDef *> &valdefs = entry(val);
323 valdefs.insert(valdefs.end(), vals.begin(), vals.end());
324 }
325
removeDefsOfInstruction(Instruction * insn)326 void removeDefsOfInstruction(Instruction *insn) {
327 for (int d = 0; insn->defExists(d); ++d) {
328 ValueDef *def = &insn->def(d);
329 defs.erase(def->get());
330 for (auto &p : defs)
331 p.second.remove(def);
332 }
333 }
334
merge()335 void merge() {
336 for (auto &p : defs)
337 p.first->defs = p.second;
338 }
339 };
340
341 class SpillCodeInserter
342 {
343 public:
SpillCodeInserter(Function * fn,MergedDefs & mergedDefs)344 SpillCodeInserter(Function *fn, MergedDefs &mergedDefs) : func(fn), mergedDefs(mergedDefs), stackSize(0), stackBase(0) { }
345
346 bool run(const std::list<ValuePair>&);
347
348 Symbol *assignSlot(const Interval&, const unsigned int size);
349 Value *offsetSlot(Value *, const LValue *);
getStackSize() const350 inline int32_t getStackSize() const { return stackSize; }
351
352 private:
353 Function *func;
354 MergedDefs &mergedDefs;
355
356 struct SpillSlot
357 {
358 Interval occup;
359 std::list<Value *> residents; // needed to recalculate occup
360 Symbol *sym;
361 int32_t offset;
sizenv50_ir::SpillCodeInserter::SpillSlot362 inline uint8_t size() const { return sym->reg.size; }
363 };
364 std::list<SpillSlot> slots;
365 int32_t stackSize;
366 int32_t stackBase;
367
368 LValue *unspill(Instruction *usei, LValue *, Value *slot);
369 void spill(Instruction *defi, Value *slot, LValue *);
370 };
371
372 void
addLiveRange(Value * val,const BasicBlock * bb,int end)373 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
374 const BasicBlock *bb,
375 int end)
376 {
377 Instruction *insn = val->getUniqueInsn();
378
379 if (!insn)
380 insn = bb->getFirst();
381
382 assert(bb->getFirst()->serial <= bb->getExit()->serial);
383 assert(bb->getExit()->serial + 1 >= end);
384
385 int begin = insn->serial;
386 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
387 begin = bb->getEntry()->serial;
388
389 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
390 val->id, begin, insn->serial, end);
391
392 if (begin != end) // empty ranges are only added as hazards for fixed regs
393 val->livei.extend(begin, end);
394 }
395
396 bool
needNewElseBlock(BasicBlock * b,BasicBlock * p)397 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
398 {
399 if (b->cfg.incidentCount() <= 1)
400 return false;
401
402 int n = 0;
403 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
404 if (ei.getType() == Graph::Edge::TREE ||
405 ei.getType() == Graph::Edge::FORWARD)
406 ++n;
407 return (n == 2);
408 }
409
410 struct PhiMapHash {
operator ()nv50_ir::PhiMapHash411 size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
412 return hash<Instruction*>()(val.first) * 31 +
413 hash<BasicBlock*>()(val.second);
414 }
415 };
416
417 typedef unordered_map<
418 std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
419
420 // Critical edges need to be split up so that work can be inserted along
421 // specific edge transitions. Unfortunately manipulating incident edges into a
422 // BB invalidates all the PHI nodes since their sources are implicitly ordered
423 // by incident edge order.
424 //
425 // TODO: Make it so that that is not the case, and PHI nodes store pointers to
426 // the original BBs.
427 void
splitEdges(BasicBlock * bb)428 RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
429 {
430 BasicBlock *pb, *pn;
431 Instruction *phi;
432 Graph::EdgeIterator ei;
433 std::stack<BasicBlock *> stack;
434 int j = 0;
435
436 for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
437 pb = BasicBlock::get(ei.getNode());
438 assert(pb);
439 if (needNewElseBlock(bb, pb))
440 stack.push(pb);
441 }
442
443 // No critical edges were found, no need to perform any work.
444 if (stack.empty())
445 return;
446
447 // We're about to, potentially, reorder the inbound edges. This means that
448 // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
449 // nodes after the graph has been modified.
450 PhiMap phis;
451
452 j = 0;
453 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
454 pb = BasicBlock::get(ei.getNode());
455 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
456 phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
457 }
458
459 while (!stack.empty()) {
460 pb = stack.top();
461 pn = new BasicBlock(func);
462 stack.pop();
463
464 pb->cfg.detach(&bb->cfg);
465 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
466 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
467
468 assert(pb->getExit()->op != OP_CALL);
469 if (pb->getExit()->asFlow()->target.bb == bb)
470 pb->getExit()->asFlow()->target.bb = pn;
471
472 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
473 PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
474 assert(it != phis.end());
475 phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
476 phis.erase(it);
477 }
478 }
479
480 // Now go through and fix up all of the phi node sources.
481 j = 0;
482 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
483 pb = BasicBlock::get(ei.getNode());
484 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
485 PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
486 assert(it != phis.end());
487
488 phi->setSrc(j, it->second);
489 }
490 }
491 }
492
493 // For each operand of each PHI in b, generate a new value by inserting a MOV
494 // at the end of the block it is coming from and replace the operand with its
495 // result. This eliminates liveness conflicts and enables us to let values be
496 // copied to the right register if such a conflict exists nonetheless.
497 //
498 // These MOVs are also crucial in making sure the live intervals of phi srces
499 // are extended until the end of the loop, since they are not included in the
500 // live-in sets.
501 bool
visit(BasicBlock * bb)502 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
503 {
504 Instruction *phi, *mov;
505
506 splitEdges(bb);
507
508 // insert MOVs (phi->src(j) should stem from j-th in-BB)
509 int j = 0;
510 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
511 BasicBlock *pb = BasicBlock::get(ei.getNode());
512 if (!pb->isTerminated())
513 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
514
515 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
516 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
517 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
518
519 mov->setSrc(0, phi->getSrc(j));
520 mov->setDef(0, tmp);
521 phi->setSrc(j, tmp);
522
523 pb->insertBefore(pb->getExit(), mov);
524 }
525 ++j;
526 }
527
528 return true;
529 }
530
531 bool
visit(BasicBlock * bb)532 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
533 {
534 // Bind function call inputs/outputs to the same physical register
535 // the callee uses, inserting moves as appropriate for the case a
536 // conflict arises.
537 for (Instruction *i = bb->getEntry(); i; i = i->next) {
538 FlowInstruction *cal = i->asFlow();
539 // TODO: Handle indirect calls.
540 // Right now they should only be generated for builtins.
541 if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
542 continue;
543 RegisterSet clobberSet(prog->getTarget());
544
545 // Bind input values.
546 for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
547 const int t = cal->indirect ? (s - 1) : s;
548 LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
549 tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
550
551 Instruction *mov =
552 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
553 mov->setDef(0, tmp);
554 mov->setSrc(0, cal->getSrc(s));
555 cal->setSrc(s, tmp);
556
557 bb->insertBefore(cal, mov);
558 }
559
560 // Bind output values.
561 for (int d = 0; cal->defExists(d); ++d) {
562 LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
563 tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
564
565 Instruction *mov =
566 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
567 mov->setSrc(0, tmp);
568 mov->setDef(0, cal->getDef(d));
569 cal->setDef(d, tmp);
570
571 bb->insertAfter(cal, mov);
572 clobberSet.occupy(tmp);
573 }
574
575 // Bind clobbered values.
576 for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
577 it != cal->target.fn->clobbers.end();
578 ++it) {
579 if (clobberSet.testOccupy(*it)) {
580 Value *tmp = new_LValue(func, (*it)->asLValue());
581 tmp->reg.data.id = (*it)->reg.data.id;
582 cal->setDef(cal->defCount(), tmp);
583 }
584 }
585 }
586
587 // Update the clobber set of the function.
588 if (BasicBlock::get(func->cfgExit) == bb) {
589 func->buildDefSets();
590 for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
591 if (bb->defSet.test(i))
592 func->clobbers.push_back(func->getLValue(i));
593 }
594
595 return true;
596 }
597
598 // Build the set of live-in variables of bb.
599 bool
buildLiveSets(BasicBlock * bb)600 RegAlloc::buildLiveSets(BasicBlock *bb)
601 {
602 Function *f = bb->getFunction();
603 BasicBlock *bn;
604 Instruction *i;
605 unsigned int s, d;
606
607 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
608
609 bb->liveSet.allocate(func->allLValues.getSize(), false);
610
611 int n = 0;
612 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
613 bn = BasicBlock::get(ei.getNode());
614 if (bn == bb)
615 continue;
616 if (bn->cfg.visit(sequence))
617 if (!buildLiveSets(bn))
618 return false;
619 if (n++ || bb->liveSet.marker)
620 bb->liveSet |= bn->liveSet;
621 else
622 bb->liveSet = bn->liveSet;
623 }
624 if (!n && !bb->liveSet.marker)
625 bb->liveSet.fill(0);
626 bb->liveSet.marker = true;
627
628 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
629 INFO("BB:%i live set of out blocks:\n", bb->getId());
630 bb->liveSet.print();
631 }
632
633 // if (!bb->getEntry())
634 // return true;
635
636 if (bb == BasicBlock::get(f->cfgExit)) {
637 for (std::deque<ValueRef>::iterator it = f->outs.begin();
638 it != f->outs.end(); ++it) {
639 assert(it->get()->asLValue());
640 bb->liveSet.set(it->get()->id);
641 }
642 }
643
644 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
645 for (d = 0; i->defExists(d); ++d)
646 bb->liveSet.clr(i->getDef(d)->id);
647 for (s = 0; i->srcExists(s); ++s)
648 if (i->getSrc(s)->asLValue())
649 bb->liveSet.set(i->getSrc(s)->id);
650 }
651 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
652 bb->liveSet.clr(i->getDef(0)->id);
653
654 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
655 INFO("BB:%i live set after propagation:\n", bb->getId());
656 bb->liveSet.print();
657 }
658
659 return true;
660 }
661
662 void
collectLiveValues(BasicBlock * bb)663 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
664 {
665 BasicBlock *bbA = NULL, *bbB = NULL;
666
667 if (bb->cfg.outgoingCount()) {
668 // trickery to save a loop of OR'ing liveSets
669 // aliasing works fine with BitSet::setOr
670 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
671 if (bbA) {
672 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
673 bbA = bb;
674 } else {
675 bbA = bbB;
676 }
677 bbB = BasicBlock::get(ei.getNode());
678 }
679 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
680 } else
681 if (bb->cfg.incidentCount()) {
682 bb->liveSet.fill(0);
683 }
684 }
685
686 bool
visit(BasicBlock * bb)687 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
688 {
689 collectLiveValues(bb);
690
691 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
692
693 // go through out blocks and delete phi sources that do not originate from
694 // the current block from the live set
695 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
696 BasicBlock *out = BasicBlock::get(ei.getNode());
697
698 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
699 bb->liveSet.clr(i->getDef(0)->id);
700
701 for (int s = 0; i->srcExists(s); ++s) {
702 assert(i->src(s).getInsn());
703 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
704 bb->liveSet.set(i->getSrc(s)->id);
705 else
706 bb->liveSet.clr(i->getSrc(s)->id);
707 }
708 }
709 }
710
711 // remaining live-outs are live until end
712 if (bb->getExit()) {
713 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
714 if (bb->liveSet.test(j))
715 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
716 }
717
718 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
719 for (int d = 0; i->defExists(d); ++d) {
720 bb->liveSet.clr(i->getDef(d)->id);
721 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
722 i->getDef(d)->livei.extend(i->serial, i->serial);
723 }
724
725 for (int s = 0; i->srcExists(s); ++s) {
726 if (!i->getSrc(s)->asLValue())
727 continue;
728 if (!bb->liveSet.test(i->getSrc(s)->id)) {
729 bb->liveSet.set(i->getSrc(s)->id);
730 addLiveRange(i->getSrc(s), bb, i->serial);
731 }
732 }
733 }
734
735 if (bb == BasicBlock::get(func->cfg.getRoot())) {
736 for (std::deque<ValueDef>::iterator it = func->ins.begin();
737 it != func->ins.end(); ++it) {
738 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
739 it->get()->livei.extend(0, 1);
740 }
741 }
742
743 return true;
744 }
745
746
747 #define JOIN_MASK_PHI (1 << 0)
748 #define JOIN_MASK_UNION (1 << 1)
749 #define JOIN_MASK_MOV (1 << 2)
750 #define JOIN_MASK_TEX (1 << 3)
751
752 class GCRA
753 {
754 public:
755 GCRA(Function *, SpillCodeInserter&, MergedDefs&);
756 ~GCRA();
757
758 bool allocateRegisters(ArrayList& insns);
759
760 void printNodeInfo() const;
761
762 private:
763 class RIG_Node : public Graph::Node
764 {
765 public:
766 RIG_Node();
767
768 void init(const RegisterSet&, LValue *);
769
770 void addInterference(RIG_Node *);
771 void addRegPreference(RIG_Node *);
772
getValue() const773 inline LValue *getValue() const
774 {
775 return reinterpret_cast<LValue *>(data);
776 }
setValue(LValue * lval)777 inline void setValue(LValue *lval) { data = lval; }
778
getCompMask() const779 inline uint8_t getCompMask() const
780 {
781 return ((1 << colors) - 1) << (reg & 7);
782 }
783
get(const Graph::EdgeIterator & ei)784 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
785 {
786 return static_cast<RIG_Node *>(ei.getNode());
787 }
788
789 public:
790 uint32_t degree;
791 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
792 uint16_t maxReg;
793 uint16_t colors;
794
795 DataFile f;
796 int32_t reg;
797
798 float weight;
799
800 // list pointers for simplify() phase
801 RIG_Node *next;
802 RIG_Node *prev;
803
804 // union of the live intervals of all coalesced values (we want to retain
805 // the separate intervals for testing interference of compound values)
806 Interval livei;
807
808 std::list<RIG_Node *> prefRegs;
809 };
810
811 private:
getNode(const LValue * v) const812 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
813
814 void buildRIG(ArrayList&);
815 bool coalesce(ArrayList&);
816 bool doCoalesce(ArrayList&, unsigned int mask);
817 void calculateSpillWeights();
818 bool simplify();
819 bool selectRegisters();
820 void cleanup(const bool success);
821
822 void simplifyEdge(RIG_Node *, RIG_Node *);
823 void simplifyNode(RIG_Node *);
824
825 bool coalesceValues(Value *, Value *, bool force);
826 void resolveSplitsAndMerges();
827 void makeCompound(Instruction *, bool isSplit);
828
829 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
830
831 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
832 void checkList(std::list<RIG_Node *>&);
833
834 private:
835 std::stack<uint32_t> stack;
836
837 // list headers for simplify() phase
838 RIG_Node lo[2];
839 RIG_Node hi;
840
841 Graph RIG;
842 RIG_Node *nodes;
843 unsigned int nodeCount;
844
845 Function *func;
846 Program *prog;
847
848 struct RelDegree {
849 uint8_t data[17][17];
850
RelDegreenv50_ir::GCRA::RelDegree851 RelDegree() {
852 for (int i = 1; i <= 16; ++i)
853 for (int j = 1; j <= 16; ++j)
854 data[i][j] = j * ((i + j - 1) / j);
855 }
856
operator []nv50_ir::GCRA::RelDegree857 const uint8_t* operator[](std::size_t i) const {
858 return data[i];
859 }
860 };
861
862 static const RelDegree relDegree;
863
864 RegisterSet regs;
865
866 // need to fixup register id for participants of OP_MERGE/SPLIT
867 std::list<Instruction *> merges;
868 std::list<Instruction *> splits;
869
870 SpillCodeInserter& spill;
871 std::list<ValuePair> mustSpill;
872
873 MergedDefs &mergedDefs;
874 };
875
876 const GCRA::RelDegree GCRA::relDegree;
877
RIG_Node()878 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
879 {
880 colors = 0;
881 }
882
883 void
printNodeInfo() const884 GCRA::printNodeInfo() const
885 {
886 for (unsigned int i = 0; i < nodeCount; ++i) {
887 if (!nodes[i].colors)
888 continue;
889 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
890 i,
891 nodes[i].f,nodes[i].reg,nodes[i].colors,
892 nodes[i].weight,
893 nodes[i].degree, nodes[i].degreeLimit);
894
895 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
896 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
897 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
898 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
899 INFO("\n");
900 }
901 }
902
903 static bool
isShortRegOp(Instruction * insn)904 isShortRegOp(Instruction *insn)
905 {
906 // Immediates are always in src1 (except zeroes, which end up getting
907 // replaced with a zero reg). Every other situation can be resolved by
908 // using a long encoding.
909 return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE &&
910 insn->getSrc(1)->reg.data.u64;
911 }
912
913 // Check if this LValue is ever used in an instruction that can't be encoded
914 // with long registers (i.e. > r63)
915 static bool
isShortRegVal(LValue * lval)916 isShortRegVal(LValue *lval)
917 {
918 if (lval->getInsn() == NULL)
919 return false;
920 for (Value::DefCIterator def = lval->defs.begin();
921 def != lval->defs.end(); ++def)
922 if (isShortRegOp((*def)->getInsn()))
923 return true;
924 for (Value::UseCIterator use = lval->uses.begin();
925 use != lval->uses.end(); ++use)
926 if (isShortRegOp((*use)->getInsn()))
927 return true;
928 return false;
929 }
930
931 void
init(const RegisterSet & regs,LValue * lval)932 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
933 {
934 setValue(lval);
935 if (lval->reg.data.id >= 0)
936 lval->noSpill = lval->fixedReg = 1;
937
938 colors = regs.units(lval->reg.file, lval->reg.size);
939 f = lval->reg.file;
940 reg = -1;
941 if (lval->reg.data.id >= 0)
942 reg = regs.idToUnits(lval);
943
944 weight = std::numeric_limits<float>::infinity();
945 degree = 0;
946 maxReg = regs.getFileSize(f);
947 // On nv50, we lose a bit of gpr encoding when there's an embedded
948 // immediate.
949 if (regs.restrictedGPR16Range && f == FILE_GPR && (lval->reg.size == 2 || isShortRegVal(lval)))
950 maxReg /= 2;
951 degreeLimit = maxReg;
952 degreeLimit -= relDegree[1][colors] - 1;
953
954 livei.insert(lval->livei);
955 }
956
957 bool
coalesceValues(Value * dst,Value * src,bool force)958 GCRA::coalesceValues(Value *dst, Value *src, bool force)
959 {
960 LValue *rep = dst->join->asLValue();
961 LValue *val = src->join->asLValue();
962
963 if (!force && val->reg.data.id >= 0) {
964 rep = src->join->asLValue();
965 val = dst->join->asLValue();
966 }
967 RIG_Node *nRep = &nodes[rep->id];
968 RIG_Node *nVal = &nodes[val->id];
969
970 if (src->reg.file != dst->reg.file) {
971 if (!force)
972 return false;
973 WARN("forced coalescing of values in different files !\n");
974 }
975 if (!force && dst->reg.size != src->reg.size)
976 return false;
977
978 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
979 if (force) {
980 if (val->reg.data.id >= 0)
981 WARN("forced coalescing of values in different fixed regs !\n");
982 } else {
983 if (val->reg.data.id >= 0)
984 return false;
985 // make sure that there is no overlap with the fixed register of rep
986 for (ArrayList::Iterator it = func->allLValues.iterator();
987 !it.end(); it.next()) {
988 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
989 assert(reg);
990 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
991 return false;
992 }
993 }
994 }
995
996 if (!force && nRep->livei.overlaps(nVal->livei))
997 return false;
998
999 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
1000 rep->id, rep->reg.data.id, val->id);
1001
1002 // set join pointer of all values joined with val
1003 const std::list<ValueDef *> &defs = mergedDefs(val);
1004 for (ValueDef *def : defs)
1005 def->get()->join = rep;
1006 assert(rep->join == rep && val->join == rep);
1007
1008 // add val's definitions to rep and extend the live interval of its RIG node
1009 mergedDefs.add(rep, defs);
1010 nRep->livei.unify(nVal->livei);
1011 nRep->degreeLimit = MIN2(nRep->degreeLimit, nVal->degreeLimit);
1012 nRep->maxReg = MIN2(nRep->maxReg, nVal->maxReg);
1013 return true;
1014 }
1015
1016 bool
coalesce(ArrayList & insns)1017 GCRA::coalesce(ArrayList& insns)
1018 {
1019 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
1020 if (!ret)
1021 return false;
1022 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
1023 case 0x50:
1024 case 0x80:
1025 case 0x90:
1026 case 0xa0:
1027 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
1028 break;
1029 case 0xc0:
1030 case 0xd0:
1031 case 0xe0:
1032 case 0xf0:
1033 case 0x100:
1034 case 0x110:
1035 case 0x120:
1036 case 0x130:
1037 case 0x140:
1038 case 0x160:
1039 ret = doCoalesce(insns, JOIN_MASK_UNION);
1040 break;
1041 default:
1042 break;
1043 }
1044 if (!ret)
1045 return false;
1046 return doCoalesce(insns, JOIN_MASK_MOV);
1047 }
1048
makeCompMask(int compSize,int base,int size)1049 static inline uint8_t makeCompMask(int compSize, int base, int size)
1050 {
1051 uint8_t m = ((1 << size) - 1) << base;
1052
1053 switch (compSize) {
1054 case 1:
1055 return 0xff;
1056 case 2:
1057 m |= (m << 2);
1058 return (m << 4) | m;
1059 case 3:
1060 case 4:
1061 return (m << 4) | m;
1062 default:
1063 assert(compSize <= 8);
1064 return m;
1065 }
1066 }
1067
1068 // Used when coalescing moves. The non-compound value will become one, e.g.:
1069 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
1070 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
copyCompound(Value * dst,Value * src)1071 static inline void copyCompound(Value *dst, Value *src)
1072 {
1073 LValue *ldst = dst->asLValue();
1074 LValue *lsrc = src->asLValue();
1075
1076 if (ldst->compound && !lsrc->compound) {
1077 LValue *swap = lsrc;
1078 lsrc = ldst;
1079 ldst = swap;
1080 }
1081
1082 ldst->compound = lsrc->compound;
1083 ldst->compMask = lsrc->compMask;
1084 }
1085
1086 void
makeCompound(Instruction * insn,bool split)1087 GCRA::makeCompound(Instruction *insn, bool split)
1088 {
1089 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
1090
1091 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
1092 INFO("makeCompound(split = %i): ", split);
1093 insn->print();
1094 }
1095
1096 const unsigned int size = getNode(rep)->colors;
1097 unsigned int base = 0;
1098
1099 if (!rep->compound)
1100 rep->compMask = 0xff;
1101 rep->compound = 1;
1102
1103 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
1104 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
1105
1106 val->compound = 1;
1107 if (!val->compMask)
1108 val->compMask = 0xff;
1109 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
1110 assert(val->compMask);
1111
1112 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
1113 rep->id, rep->compMask, val->id, val->compMask);
1114
1115 base += getNode(val)->colors;
1116 }
1117 assert(base == size);
1118 }
1119
1120 bool
doCoalesce(ArrayList & insns,unsigned int mask)1121 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
1122 {
1123 int c, n;
1124
1125 for (n = 0; n < insns.getSize(); ++n) {
1126 Instruction *i;
1127 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
1128
1129 switch (insn->op) {
1130 case OP_PHI:
1131 if (!(mask & JOIN_MASK_PHI))
1132 break;
1133 for (c = 0; insn->srcExists(c); ++c)
1134 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
1135 // this is bad
1136 ERROR("failed to coalesce phi operands\n");
1137 return false;
1138 }
1139 break;
1140 case OP_UNION:
1141 case OP_MERGE:
1142 if (!(mask & JOIN_MASK_UNION))
1143 break;
1144 for (c = 0; insn->srcExists(c); ++c)
1145 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
1146 if (insn->op == OP_MERGE) {
1147 merges.push_back(insn);
1148 if (insn->srcExists(1))
1149 makeCompound(insn, false);
1150 }
1151 break;
1152 case OP_SPLIT:
1153 if (!(mask & JOIN_MASK_UNION))
1154 break;
1155 splits.push_back(insn);
1156 for (c = 0; insn->defExists(c); ++c)
1157 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
1158 makeCompound(insn, true);
1159 break;
1160 case OP_MOV:
1161 if (!(mask & JOIN_MASK_MOV))
1162 break;
1163 i = NULL;
1164 if (!insn->getDef(0)->uses.empty())
1165 i = (*insn->getDef(0)->uses.begin())->getInsn();
1166 // if this is a contraint-move there will only be a single use
1167 if (i && i->op == OP_MERGE) // do we really still need this ?
1168 break;
1169 i = insn->getSrc(0)->getUniqueInsn();
1170 if (i && !i->constrainedDefs()) {
1171 if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
1172 copyCompound(insn->getSrc(0), insn->getDef(0));
1173 }
1174 break;
1175 case OP_TEX:
1176 case OP_TXB:
1177 case OP_TXL:
1178 case OP_TXF:
1179 case OP_TXQ:
1180 case OP_TXD:
1181 case OP_TXG:
1182 case OP_TXLQ:
1183 case OP_TEXCSAA:
1184 case OP_TEXPREP:
1185 if (!(mask & JOIN_MASK_TEX))
1186 break;
1187 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1188 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1189 break;
1190 default:
1191 break;
1192 }
1193 }
1194 return true;
1195 }
1196
1197 void
addInterference(RIG_Node * node)1198 GCRA::RIG_Node::addInterference(RIG_Node *node)
1199 {
1200 this->degree += relDegree[node->colors][colors];
1201 node->degree += relDegree[colors][node->colors];
1202
1203 this->attach(node, Graph::Edge::CROSS);
1204 }
1205
1206 void
addRegPreference(RIG_Node * node)1207 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1208 {
1209 prefRegs.push_back(node);
1210 }
1211
GCRA(Function * fn,SpillCodeInserter & spill,MergedDefs & mergedDefs)1212 GCRA::GCRA(Function *fn, SpillCodeInserter& spill, MergedDefs& mergedDefs) :
1213 func(fn),
1214 regs(fn->getProgram()->getTarget()),
1215 spill(spill),
1216 mergedDefs(mergedDefs)
1217 {
1218 prog = func->getProgram();
1219 }
1220
~GCRA()1221 GCRA::~GCRA()
1222 {
1223 if (nodes)
1224 delete[] nodes;
1225 }
1226
1227 void
checkList(std::list<RIG_Node * > & lst)1228 GCRA::checkList(std::list<RIG_Node *>& lst)
1229 {
1230 GCRA::RIG_Node *prev = NULL;
1231
1232 for (std::list<RIG_Node *>::iterator it = lst.begin();
1233 it != lst.end();
1234 ++it) {
1235 assert((*it)->getValue()->join == (*it)->getValue());
1236 if (prev)
1237 assert(prev->livei.begin() <= (*it)->livei.begin());
1238 prev = *it;
1239 }
1240 }
1241
1242 void
insertOrderedTail(std::list<RIG_Node * > & list,RIG_Node * node)1243 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1244 {
1245 if (node->livei.isEmpty())
1246 return;
1247 // only the intervals of joined values don't necessarily arrive in order
1248 std::list<RIG_Node *>::iterator prev, it;
1249 for (it = list.end(); it != list.begin(); it = prev) {
1250 prev = it;
1251 --prev;
1252 if ((*prev)->livei.begin() <= node->livei.begin())
1253 break;
1254 }
1255 list.insert(it, node);
1256 }
1257
1258 void
buildRIG(ArrayList & insns)1259 GCRA::buildRIG(ArrayList& insns)
1260 {
1261 std::list<RIG_Node *> values, active;
1262
1263 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1264 it != func->ins.end(); ++it)
1265 insertOrderedTail(values, getNode(it->get()->asLValue()));
1266
1267 for (int i = 0; i < insns.getSize(); ++i) {
1268 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1269 for (int d = 0; insn->defExists(d); ++d)
1270 if (insn->getDef(d)->rep() == insn->getDef(d))
1271 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1272 }
1273 checkList(values);
1274
1275 while (!values.empty()) {
1276 RIG_Node *cur = values.front();
1277
1278 for (std::list<RIG_Node *>::iterator it = active.begin();
1279 it != active.end();) {
1280 RIG_Node *node = *it;
1281
1282 if (node->livei.end() <= cur->livei.begin()) {
1283 it = active.erase(it);
1284 } else {
1285 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1286 cur->addInterference(node);
1287 ++it;
1288 }
1289 }
1290 values.pop_front();
1291 active.push_back(cur);
1292 }
1293 }
1294
1295 void
calculateSpillWeights()1296 GCRA::calculateSpillWeights()
1297 {
1298 for (unsigned int i = 0; i < nodeCount; ++i) {
1299 RIG_Node *const n = &nodes[i];
1300 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1301 continue;
1302 if (nodes[i].reg >= 0) {
1303 // update max reg
1304 regs.occupy(n->f, n->reg, n->colors);
1305 continue;
1306 }
1307 LValue *val = nodes[i].getValue();
1308
1309 if (!val->noSpill) {
1310 int rc = 0;
1311 for (ValueDef *def : mergedDefs(val))
1312 rc += def->get()->refCount();
1313
1314 nodes[i].weight =
1315 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1316 }
1317
1318 if (nodes[i].degree < nodes[i].degreeLimit) {
1319 int l = 0;
1320 if (val->reg.size > 4)
1321 l = 1;
1322 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1323 } else {
1324 DLLIST_ADDHEAD(&hi, &nodes[i]);
1325 }
1326 }
1327 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1328 printNodeInfo();
1329 }
1330
1331 void
simplifyEdge(RIG_Node * a,RIG_Node * b)1332 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1333 {
1334 bool move = b->degree >= b->degreeLimit;
1335
1336 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1337 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1338 a->getValue()->id, a->degree, a->degreeLimit,
1339 b->getValue()->id, b->degree, b->degreeLimit);
1340
1341 b->degree -= relDegree[a->colors][b->colors];
1342
1343 move = move && b->degree < b->degreeLimit;
1344 if (move && !DLLIST_EMPTY(b)) {
1345 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1346 DLLIST_DEL(b);
1347 DLLIST_ADDTAIL(&lo[l], b);
1348 }
1349 }
1350
1351 void
simplifyNode(RIG_Node * node)1352 GCRA::simplifyNode(RIG_Node *node)
1353 {
1354 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1355 simplifyEdge(node, RIG_Node::get(ei));
1356
1357 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1358 simplifyEdge(node, RIG_Node::get(ei));
1359
1360 DLLIST_DEL(node);
1361 stack.push(node->getValue()->id);
1362
1363 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1364 node->getValue()->id,
1365 (node->degree < node->degreeLimit) ? "" : "(spill)");
1366 }
1367
1368 bool
simplify()1369 GCRA::simplify()
1370 {
1371 for (;;) {
1372 if (!DLLIST_EMPTY(&lo[0])) {
1373 do {
1374 simplifyNode(lo[0].next);
1375 } while (!DLLIST_EMPTY(&lo[0]));
1376 } else
1377 if (!DLLIST_EMPTY(&lo[1])) {
1378 simplifyNode(lo[1].next);
1379 } else
1380 if (!DLLIST_EMPTY(&hi)) {
1381 RIG_Node *best = hi.next;
1382 unsigned bestMaxReg = best->maxReg;
1383 float bestScore = best->weight / (float)best->degree;
1384 // Spill candidate. First go through the ones with the highest max
1385 // register, then the ones with lower. That way the ones with the
1386 // lowest requirement will be allocated first, since it's a stack.
1387 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1388 float score = it->weight / (float)it->degree;
1389 if (score < bestScore || it->maxReg > bestMaxReg) {
1390 best = it;
1391 bestScore = score;
1392 bestMaxReg = it->maxReg;
1393 }
1394 }
1395 if (isinf(bestScore)) {
1396 ERROR("no viable spill candidates left\n");
1397 return false;
1398 }
1399 simplifyNode(best);
1400 } else {
1401 return true;
1402 }
1403 }
1404 }
1405
1406 void
checkInterference(const RIG_Node * node,Graph::EdgeIterator & ei)1407 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1408 {
1409 const RIG_Node *intf = RIG_Node::get(ei);
1410
1411 if (intf->reg < 0)
1412 return;
1413 LValue *vA = node->getValue();
1414 LValue *vB = intf->getValue();
1415
1416 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1417
1418 if (vA->compound | vB->compound) {
1419 // NOTE: this only works for >aligned< register tuples !
1420 for (const ValueDef *D : mergedDefs(vA)) {
1421 for (const ValueDef *d : mergedDefs(vB)) {
1422 const LValue *vD = D->get()->asLValue();
1423 const LValue *vd = d->get()->asLValue();
1424
1425 if (!vD->livei.overlaps(vd->livei)) {
1426 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1427 vD->id, vd->id);
1428 continue;
1429 }
1430
1431 uint8_t mask = vD->compound ? vD->compMask : ~0;
1432 if (vd->compound) {
1433 assert(vB->compound);
1434 mask &= vd->compMask & vB->compMask;
1435 } else {
1436 mask &= intfMask;
1437 }
1438
1439 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1440 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1441 vD->id,
1442 vD->compound ? vD->compMask : 0xff,
1443 vd->id,
1444 vd->compound ? vd->compMask : intfMask,
1445 vB->compMask, intf->reg & ~7, mask);
1446 if (mask)
1447 regs.occupyMask(node->f, intf->reg & ~7, mask);
1448 }
1449 }
1450 } else {
1451 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1452 "(%%%i) X (%%%i): $r%i + %u\n",
1453 vA->id, vB->id, intf->reg, intf->colors);
1454 regs.occupy(node->f, intf->reg, intf->colors);
1455 }
1456 }
1457
1458 bool
selectRegisters()1459 GCRA::selectRegisters()
1460 {
1461 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1462
1463 while (!stack.empty()) {
1464 RIG_Node *node = &nodes[stack.top()];
1465 stack.pop();
1466
1467 regs.reset(node->f);
1468
1469 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1470 node->getValue()->id, node->colors);
1471
1472 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1473 checkInterference(node, ei);
1474 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1475 checkInterference(node, ei);
1476
1477 if (!node->prefRegs.empty()) {
1478 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1479 it != node->prefRegs.end();
1480 ++it) {
1481 if ((*it)->reg >= 0 &&
1482 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1483 node->reg = (*it)->reg;
1484 break;
1485 }
1486 }
1487 }
1488 if (node->reg >= 0)
1489 continue;
1490 LValue *lval = node->getValue();
1491 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1492 regs.print(node->f);
1493 bool ret = regs.assign(node->reg, node->f, node->colors, node->maxReg);
1494 if (ret) {
1495 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1496 lval->compMask = node->getCompMask();
1497 } else {
1498 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1499 lval->id, lval->reg.size);
1500 Symbol *slot = NULL;
1501 if (lval->reg.file == FILE_GPR)
1502 slot = spill.assignSlot(node->livei, lval->reg.size);
1503 mustSpill.push_back(ValuePair(lval, slot));
1504 }
1505 }
1506 if (!mustSpill.empty())
1507 return false;
1508 for (unsigned int i = 0; i < nodeCount; ++i) {
1509 LValue *lval = nodes[i].getValue();
1510 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1511 lval->reg.data.id =
1512 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1513 }
1514 return true;
1515 }
1516
1517 bool
allocateRegisters(ArrayList & insns)1518 GCRA::allocateRegisters(ArrayList& insns)
1519 {
1520 bool ret;
1521
1522 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1523 "allocateRegisters to %u instructions\n", insns.getSize());
1524
1525 nodeCount = func->allLValues.getSize();
1526 nodes = new RIG_Node[nodeCount];
1527 if (!nodes)
1528 return false;
1529 for (unsigned int i = 0; i < nodeCount; ++i) {
1530 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1531 if (lval) {
1532 nodes[i].init(regs, lval);
1533 RIG.insert(&nodes[i]);
1534
1535 if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL) {
1536 Instruction *insn = lval->getInsn();
1537 if (insn->op != OP_MAD && insn->op != OP_FMA && insn->op != OP_SAD)
1538 continue;
1539 // For both of the cases below, we only want to add the preference
1540 // if all arguments are in registers.
1541 if (insn->src(0).getFile() != FILE_GPR ||
1542 insn->src(1).getFile() != FILE_GPR ||
1543 insn->src(2).getFile() != FILE_GPR)
1544 continue;
1545 if (prog->getTarget()->getChipset() < 0xc0) {
1546 // Outputting a flag is not supported with short encodings nor
1547 // with immediate arguments.
1548 // See handleMADforNV50.
1549 if (insn->flagsDef >= 0)
1550 continue;
1551 } else {
1552 // We can only fold immediate arguments if dst == src2. This
1553 // only matters if one of the first two arguments is an
1554 // immediate. This form is also only supported for floats.
1555 // See handleMADforNVC0.
1556 ImmediateValue imm;
1557 if (insn->dType != TYPE_F32)
1558 continue;
1559 if (!insn->src(0).getImmediate(imm) &&
1560 !insn->src(1).getImmediate(imm))
1561 continue;
1562 }
1563
1564 nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
1565 }
1566 }
1567 }
1568
1569 // coalesce first, we use only 1 RIG node for a group of joined values
1570 ret = coalesce(insns);
1571 if (!ret)
1572 goto out;
1573
1574 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1575 func->printLiveIntervals();
1576
1577 buildRIG(insns);
1578 calculateSpillWeights();
1579 ret = simplify();
1580 if (!ret)
1581 goto out;
1582
1583 ret = selectRegisters();
1584 if (!ret) {
1585 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1586 "selectRegisters failed, inserting spill code ...\n");
1587 regs.reset(FILE_GPR, true);
1588 spill.run(mustSpill);
1589 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1590 func->print();
1591 } else {
1592 mergedDefs.merge();
1593 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1594 }
1595
1596 out:
1597 cleanup(ret);
1598 return ret;
1599 }
1600
1601 void
cleanup(const bool success)1602 GCRA::cleanup(const bool success)
1603 {
1604 mustSpill.clear();
1605
1606 for (ArrayList::Iterator it = func->allLValues.iterator();
1607 !it.end(); it.next()) {
1608 LValue *lval = reinterpret_cast<LValue *>(it.get());
1609
1610 lval->livei.clear();
1611
1612 lval->compound = 0;
1613 lval->compMask = 0;
1614
1615 if (lval->join == lval)
1616 continue;
1617
1618 if (success)
1619 lval->reg.data.id = lval->join->reg.data.id;
1620 else
1621 lval->join = lval;
1622 }
1623
1624 if (success)
1625 resolveSplitsAndMerges();
1626 splits.clear(); // avoid duplicate entries on next coalesce pass
1627 merges.clear();
1628
1629 delete[] nodes;
1630 nodes = NULL;
1631 hi.next = hi.prev = &hi;
1632 lo[0].next = lo[0].prev = &lo[0];
1633 lo[1].next = lo[1].prev = &lo[1];
1634 }
1635
1636 Symbol *
assignSlot(const Interval & livei,const unsigned int size)1637 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1638 {
1639 SpillSlot slot;
1640 int32_t offsetBase = stackSize;
1641 int32_t offset;
1642 std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1643
1644 if (offsetBase % size)
1645 offsetBase += size - (offsetBase % size);
1646
1647 slot.sym = NULL;
1648
1649 for (offset = offsetBase; offset < stackSize; offset += size) {
1650 const int32_t entryEnd = offset + size;
1651 while (it != slots.end() && it->offset < offset)
1652 ++it;
1653 if (it == slots.end()) // no slots left
1654 break;
1655 std::list<SpillSlot>::iterator bgn = it;
1656
1657 while (it != slots.end() && it->offset < entryEnd) {
1658 it->occup.print();
1659 if (it->occup.overlaps(livei))
1660 break;
1661 ++it;
1662 }
1663 if (it == slots.end() || it->offset >= entryEnd) {
1664 // fits
1665 for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1666 bgn->occup.insert(livei);
1667 if (bgn->size() == size)
1668 slot.sym = bgn->sym;
1669 }
1670 break;
1671 }
1672 }
1673 if (!slot.sym) {
1674 stackSize = offset + size;
1675 slot.offset = offset;
1676 slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1677 if (!func->stackPtr)
1678 offset += func->tlsBase;
1679 slot.sym->setAddress(NULL, offset);
1680 slot.sym->reg.size = size;
1681 slots.insert(pos, slot)->occup.insert(livei);
1682 }
1683 return slot.sym;
1684 }
1685
1686 Value *
offsetSlot(Value * base,const LValue * lval)1687 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1688 {
1689 if (!lval->compound || (lval->compMask & 0x1))
1690 return base;
1691 Value *slot = cloneShallow(func, base);
1692
1693 slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1694 slot->reg.size = lval->reg.size;
1695
1696 return slot;
1697 }
1698
1699 void
spill(Instruction * defi,Value * slot,LValue * lval)1700 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1701 {
1702 const DataType ty = typeOfSize(lval->reg.size);
1703
1704 slot = offsetSlot(slot, lval);
1705
1706 Instruction *st;
1707 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1708 lval->noSpill = 1;
1709 if (ty != TYPE_B96) {
1710 st = new_Instruction(func, OP_STORE, ty);
1711 st->setSrc(0, slot);
1712 st->setSrc(1, lval);
1713 } else {
1714 st = new_Instruction(func, OP_SPLIT, ty);
1715 st->setSrc(0, lval);
1716 for (int d = 0; d < lval->reg.size / 4; ++d)
1717 st->setDef(d, new_LValue(func, FILE_GPR));
1718
1719 for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
1720 Value *tmp = cloneShallow(func, slot);
1721 tmp->reg.size = 4;
1722 tmp->reg.data.offset += 4 * d;
1723
1724 Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
1725 s->setSrc(0, tmp);
1726 s->setSrc(1, st->getDef(d));
1727 defi->bb->insertAfter(defi, s);
1728 }
1729 }
1730 } else {
1731 st = new_Instruction(func, OP_CVT, ty);
1732 st->setDef(0, slot);
1733 st->setSrc(0, lval);
1734 if (lval->reg.file == FILE_FLAGS)
1735 st->flagsSrc = 0;
1736 }
1737 defi->bb->insertAfter(defi, st);
1738 }
1739
1740 LValue *
unspill(Instruction * usei,LValue * lval,Value * slot)1741 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1742 {
1743 const DataType ty = typeOfSize(lval->reg.size);
1744
1745 slot = offsetSlot(slot, lval);
1746 lval = cloneShallow(func, lval);
1747
1748 Instruction *ld;
1749 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1750 lval->noSpill = 1;
1751 if (ty != TYPE_B96) {
1752 ld = new_Instruction(func, OP_LOAD, ty);
1753 } else {
1754 ld = new_Instruction(func, OP_MERGE, ty);
1755 for (int d = 0; d < lval->reg.size / 4; ++d) {
1756 Value *tmp = cloneShallow(func, slot);
1757 LValue *val;
1758 tmp->reg.size = 4;
1759 tmp->reg.data.offset += 4 * d;
1760
1761 Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
1762 l->setDef(0, (val = new_LValue(func, FILE_GPR)));
1763 l->setSrc(0, tmp);
1764 usei->bb->insertBefore(usei, l);
1765 ld->setSrc(d, val);
1766 val->noSpill = 1;
1767 }
1768 ld->setDef(0, lval);
1769 usei->bb->insertBefore(usei, ld);
1770 return lval;
1771 }
1772 } else {
1773 ld = new_Instruction(func, OP_CVT, ty);
1774 }
1775 ld->setDef(0, lval);
1776 ld->setSrc(0, slot);
1777 if (lval->reg.file == FILE_FLAGS)
1778 ld->flagsDef = 0;
1779
1780 usei->bb->insertBefore(usei, ld);
1781 return lval;
1782 }
1783
1784 static bool
value_cmp(ValueRef * a,ValueRef * b)1785 value_cmp(ValueRef *a, ValueRef *b) {
1786 Instruction *ai = a->getInsn(), *bi = b->getInsn();
1787 if (ai->bb != bi->bb)
1788 return ai->bb->getId() < bi->bb->getId();
1789 return ai->serial < bi->serial;
1790 }
1791
1792 // For each value that is to be spilled, go through all its definitions.
1793 // A value can have multiple definitions if it has been coalesced before.
1794 // For each definition, first go through all its uses and insert an unspill
1795 // instruction before it, then replace the use with the temporary register.
1796 // Unspill can be either a load from memory or simply a move to another
1797 // register file.
1798 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1799 // if we have spilled to a memory location, or simply with the new register.
1800 // No load or conversion instruction should be needed.
1801 bool
run(const std::list<ValuePair> & lst)1802 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1803 {
1804 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1805 ++it) {
1806 LValue *lval = it->first->asLValue();
1807 Symbol *mem = it->second ? it->second->asSym() : NULL;
1808
1809 // Keep track of which instructions to delete later. Deleting them
1810 // inside the loop is unsafe since a single instruction may have
1811 // multiple destinations that all need to be spilled (like OP_SPLIT).
1812 unordered_set<Instruction *> to_del;
1813
1814 std::list<ValueDef *> &defs = mergedDefs(lval);
1815 for (Value::DefIterator d = defs.begin(); d != defs.end();
1816 ++d) {
1817 Value *slot = mem ?
1818 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1819 Value *tmp = NULL;
1820 Instruction *last = NULL;
1821
1822 LValue *dval = (*d)->get()->asLValue();
1823 Instruction *defi = (*d)->getInsn();
1824
1825 // Sort all the uses by BB/instruction so that we don't unspill
1826 // multiple times in a row, and also remove a source of
1827 // non-determinism.
1828 std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
1829 std::sort(refs.begin(), refs.end(), value_cmp);
1830
1831 // Unspill at each use *before* inserting spill instructions,
1832 // we don't want to have the spill instructions in the use list here.
1833 for (std::vector<ValueRef*>::const_iterator it = refs.begin();
1834 it != refs.end(); ++it) {
1835 ValueRef *u = *it;
1836 Instruction *usei = u->getInsn();
1837 assert(usei);
1838 if (usei->isPseudo()) {
1839 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1840 last = NULL;
1841 } else {
1842 if (!last || (usei != last->next && usei != last))
1843 tmp = unspill(usei, dval, slot);
1844 last = usei;
1845 }
1846 u->set(tmp);
1847 }
1848
1849 assert(defi);
1850 if (defi->isPseudo()) {
1851 d = defs.erase(d);
1852 --d;
1853 if (slot->reg.file == FILE_MEMORY_LOCAL)
1854 to_del.insert(defi);
1855 else
1856 defi->setDef(0, slot);
1857 } else {
1858 spill(defi, slot, dval);
1859 }
1860 }
1861
1862 for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
1863 it != to_del.end(); ++it) {
1864 mergedDefs.removeDefsOfInstruction(*it);
1865 delete_Instruction(func->getProgram(), *it);
1866 }
1867 }
1868
1869 // TODO: We're not trying to reuse old slots in a potential next iteration.
1870 // We have to update the slots' livei intervals to be able to do that.
1871 stackBase = stackSize;
1872 slots.clear();
1873 return true;
1874 }
1875
1876 bool
exec()1877 RegAlloc::exec()
1878 {
1879 for (IteratorRef it = prog->calls.iteratorDFS(false);
1880 !it->end(); it->next()) {
1881 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1882
1883 func->tlsBase = prog->tlsSize;
1884 if (!execFunc())
1885 return false;
1886 prog->tlsSize += func->tlsSize;
1887 }
1888 return true;
1889 }
1890
1891 bool
execFunc()1892 RegAlloc::execFunc()
1893 {
1894 MergedDefs mergedDefs;
1895 InsertConstraintsPass insertConstr;
1896 PhiMovesPass insertPhiMoves;
1897 ArgumentMovesPass insertArgMoves;
1898 BuildIntervalsPass buildIntervals;
1899 SpillCodeInserter insertSpills(func, mergedDefs);
1900
1901 GCRA gcra(func, insertSpills, mergedDefs);
1902
1903 unsigned int i, retries;
1904 bool ret;
1905
1906 if (!func->ins.empty()) {
1907 // Insert a nop at the entry so inputs only used by the first instruction
1908 // don't count as having an empty live range.
1909 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1910 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1911 }
1912
1913 ret = insertConstr.exec(func);
1914 if (!ret)
1915 goto out;
1916
1917 ret = insertPhiMoves.run(func);
1918 if (!ret)
1919 goto out;
1920
1921 ret = insertArgMoves.run(func);
1922 if (!ret)
1923 goto out;
1924
1925 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1926 for (retries = 0; retries < 3; ++retries) {
1927 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1928 INFO("Retry: %i\n", retries);
1929 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1930 func->print();
1931
1932 // spilling to registers may add live ranges, need to rebuild everything
1933 ret = true;
1934 for (sequence = func->cfg.nextSequence(), i = 0;
1935 ret && i <= func->loopNestingBound;
1936 sequence = func->cfg.nextSequence(), ++i)
1937 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1938 // reset marker
1939 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1940 !bi.end(); bi.next())
1941 BasicBlock::get(bi)->liveSet.marker = false;
1942 if (!ret)
1943 break;
1944 func->orderInstructions(this->insns);
1945
1946 ret = buildIntervals.run(func);
1947 if (!ret)
1948 break;
1949 ret = gcra.allocateRegisters(insns);
1950 if (ret)
1951 break; // success
1952 }
1953 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1954
1955 func->tlsSize = insertSpills.getStackSize();
1956 out:
1957 return ret;
1958 }
1959
1960 // TODO: check if modifying Instruction::join here breaks anything
1961 void
resolveSplitsAndMerges()1962 GCRA::resolveSplitsAndMerges()
1963 {
1964 for (std::list<Instruction *>::iterator it = splits.begin();
1965 it != splits.end();
1966 ++it) {
1967 Instruction *split = *it;
1968 unsigned int reg = regs.idToBytes(split->getSrc(0));
1969 for (int d = 0; split->defExists(d); ++d) {
1970 Value *v = split->getDef(d);
1971 v->reg.data.id = regs.bytesToId(v, reg);
1972 v->join = v;
1973 reg += v->reg.size;
1974 }
1975 }
1976 splits.clear();
1977
1978 for (std::list<Instruction *>::iterator it = merges.begin();
1979 it != merges.end();
1980 ++it) {
1981 Instruction *merge = *it;
1982 unsigned int reg = regs.idToBytes(merge->getDef(0));
1983 for (int s = 0; merge->srcExists(s); ++s) {
1984 Value *v = merge->getSrc(s);
1985 v->reg.data.id = regs.bytesToId(v, reg);
1986 v->join = v;
1987 // If the value is defined by a phi/union node, we also need to
1988 // perform the same fixup on that node's sources, since after RA
1989 // their registers should be identical.
1990 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1991 Instruction *phi = v->getInsn();
1992 for (int phis = 0; phi->srcExists(phis); ++phis) {
1993 phi->getSrc(phis)->join = v;
1994 phi->getSrc(phis)->reg.data.id = v->reg.data.id;
1995 }
1996 }
1997 reg += v->reg.size;
1998 }
1999 }
2000 merges.clear();
2001 }
2002
registerAllocation()2003 bool Program::registerAllocation()
2004 {
2005 RegAlloc ra(this);
2006 return ra.exec();
2007 }
2008
2009 bool
exec(Function * ir)2010 RegAlloc::InsertConstraintsPass::exec(Function *ir)
2011 {
2012 constrList.clear();
2013
2014 bool ret = run(ir, true, true);
2015 if (ret)
2016 ret = insertConstraintMoves();
2017 return ret;
2018 }
2019
2020 // TODO: make part of texture insn
2021 void
textureMask(TexInstruction * tex)2022 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
2023 {
2024 Value *def[4];
2025 int c, k, d;
2026 uint8_t mask = 0;
2027
2028 for (d = 0, k = 0, c = 0; c < 4; ++c) {
2029 if (!(tex->tex.mask & (1 << c)))
2030 continue;
2031 if (tex->getDef(k)->refCount()) {
2032 mask |= 1 << c;
2033 def[d++] = tex->getDef(k);
2034 }
2035 ++k;
2036 }
2037 tex->tex.mask = mask;
2038
2039 for (c = 0; c < d; ++c)
2040 tex->setDef(c, def[c]);
2041 for (; c < 4; ++c)
2042 tex->setDef(c, NULL);
2043 }
2044
2045 bool
detectConflict(Instruction * cst,int s)2046 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
2047 {
2048 Value *v = cst->getSrc(s);
2049
2050 // current register allocation can't handle it if a value participates in
2051 // multiple constraints
2052 for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
2053 if (cst != (*it)->getInsn())
2054 return true;
2055 }
2056
2057 // can start at s + 1 because detectConflict is called on all sources
2058 for (int c = s + 1; cst->srcExists(c); ++c)
2059 if (v == cst->getSrc(c))
2060 return true;
2061
2062 Instruction *defi = v->getInsn();
2063
2064 return (!defi || defi->constrainedDefs());
2065 }
2066
2067 void
addConstraint(Instruction * i,int s,int n)2068 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
2069 {
2070 Instruction *cst;
2071 int d;
2072
2073 // first, look for an existing identical constraint op
2074 for (std::list<Instruction *>::iterator it = constrList.begin();
2075 it != constrList.end();
2076 ++it) {
2077 cst = (*it);
2078 if (!i->bb->dominatedBy(cst->bb))
2079 break;
2080 for (d = 0; d < n; ++d)
2081 if (cst->getSrc(d) != i->getSrc(d + s))
2082 break;
2083 if (d >= n) {
2084 for (d = 0; d < n; ++d, ++s)
2085 i->setSrc(s, cst->getDef(d));
2086 return;
2087 }
2088 }
2089 cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
2090
2091 for (d = 0; d < n; ++s, ++d) {
2092 cst->setDef(d, new_LValue(func, FILE_GPR));
2093 cst->setSrc(d, i->getSrc(s));
2094 i->setSrc(s, cst->getDef(d));
2095 }
2096 i->bb->insertBefore(i, cst);
2097
2098 constrList.push_back(cst);
2099 }
2100
2101 // Add a dummy use of the pointer source of >= 8 byte loads after the load
2102 // to prevent it from being assigned a register which overlapping the load's
2103 // destination, which would produce random corruptions.
2104 void
addHazard(Instruction * i,const ValueRef * src)2105 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
2106 {
2107 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
2108 hzd->setSrc(0, src->get());
2109 i->bb->insertAfter(i, hzd);
2110
2111 }
2112
2113 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
2114 void
condenseDefs(Instruction * insn)2115 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
2116 {
2117 int n;
2118 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n);
2119 condenseDefs(insn, 0, n - 1);
2120 }
2121
2122 void
condenseDefs(Instruction * insn,const int a,const int b)2123 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn,
2124 const int a, const int b)
2125 {
2126 uint8_t size = 0;
2127 if (a >= b)
2128 return;
2129 for (int s = a; s <= b; ++s)
2130 size += insn->getDef(s)->reg.size;
2131 if (!size)
2132 return;
2133
2134 LValue *lval = new_LValue(func, FILE_GPR);
2135 lval->reg.size = size;
2136
2137 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
2138 split->setSrc(0, lval);
2139 for (int d = a; d <= b; ++d) {
2140 split->setDef(d - a, insn->getDef(d));
2141 insn->setDef(d, NULL);
2142 }
2143 insn->setDef(a, lval);
2144
2145 for (int k = a + 1, d = b + 1; insn->defExists(d); ++d, ++k) {
2146 insn->setDef(k, insn->getDef(d));
2147 insn->setDef(d, NULL);
2148 }
2149 // carry over predicate if any (mainly for OP_UNION uses)
2150 split->setPredicate(insn->cc, insn->getPredicate());
2151
2152 insn->bb->insertAfter(insn, split);
2153 constrList.push_back(split);
2154 }
2155
2156 void
condenseSrcs(Instruction * insn,const int a,const int b)2157 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
2158 const int a, const int b)
2159 {
2160 uint8_t size = 0;
2161 if (a >= b)
2162 return;
2163 for (int s = a; s <= b; ++s)
2164 size += insn->getSrc(s)->reg.size;
2165 if (!size)
2166 return;
2167 LValue *lval = new_LValue(func, FILE_GPR);
2168 lval->reg.size = size;
2169
2170 Value *save[3];
2171 insn->takeExtraSources(0, save);
2172
2173 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
2174 merge->setDef(0, lval);
2175 for (int s = a, i = 0; s <= b; ++s, ++i) {
2176 merge->setSrc(i, insn->getSrc(s));
2177 }
2178 insn->moveSources(b + 1, a - b);
2179 insn->setSrc(a, lval);
2180 insn->bb->insertBefore(insn, merge);
2181
2182 insn->putExtraSources(0, save);
2183
2184 constrList.push_back(merge);
2185 }
2186
2187 bool
isScalarTexGM107(TexInstruction * tex)2188 RegAlloc::InsertConstraintsPass::isScalarTexGM107(TexInstruction *tex)
2189 {
2190 if (tex->tex.sIndirectSrc >= 0 ||
2191 tex->tex.rIndirectSrc >= 0 ||
2192 tex->tex.derivAll)
2193 return false;
2194
2195 if (tex->tex.mask == 5 || tex->tex.mask == 6)
2196 return false;
2197
2198 switch (tex->op) {
2199 case OP_TEX:
2200 case OP_TXF:
2201 case OP_TXG:
2202 case OP_TXL:
2203 break;
2204 default:
2205 return false;
2206 }
2207
2208 // legal variants:
2209 // TEXS.1D.LZ
2210 // TEXS.2D
2211 // TEXS.2D.LZ
2212 // TEXS.2D.LL
2213 // TEXS.2D.DC
2214 // TEXS.2D.LL.DC
2215 // TEXS.2D.LZ.DC
2216 // TEXS.A2D
2217 // TEXS.A2D.LZ
2218 // TEXS.A2D.LZ.DC
2219 // TEXS.3D
2220 // TEXS.3D.LZ
2221 // TEXS.CUBE
2222 // TEXS.CUBE.LL
2223
2224 // TLDS.1D.LZ
2225 // TLDS.1D.LL
2226 // TLDS.2D.LZ
2227 // TLSD.2D.LZ.AOFFI
2228 // TLDS.2D.LZ.MZ
2229 // TLDS.2D.LL
2230 // TLDS.2D.LL.AOFFI
2231 // TLDS.A2D.LZ
2232 // TLDS.3D.LZ
2233
2234 // TLD4S: all 2D/RECT variants and only offset
2235
2236 switch (tex->op) {
2237 case OP_TEX:
2238 if (tex->tex.useOffsets)
2239 return false;
2240
2241 switch (tex->tex.target.getEnum()) {
2242 case TEX_TARGET_1D:
2243 case TEX_TARGET_2D_ARRAY_SHADOW:
2244 return tex->tex.levelZero;
2245 case TEX_TARGET_CUBE:
2246 return !tex->tex.levelZero;
2247 case TEX_TARGET_2D:
2248 case TEX_TARGET_2D_ARRAY:
2249 case TEX_TARGET_2D_SHADOW:
2250 case TEX_TARGET_3D:
2251 case TEX_TARGET_RECT:
2252 case TEX_TARGET_RECT_SHADOW:
2253 return true;
2254 default:
2255 return false;
2256 }
2257
2258 case OP_TXL:
2259 if (tex->tex.useOffsets)
2260 return false;
2261
2262 switch (tex->tex.target.getEnum()) {
2263 case TEX_TARGET_2D:
2264 case TEX_TARGET_2D_SHADOW:
2265 case TEX_TARGET_RECT:
2266 case TEX_TARGET_RECT_SHADOW:
2267 case TEX_TARGET_CUBE:
2268 return true;
2269 default:
2270 return false;
2271 }
2272
2273 case OP_TXF:
2274 switch (tex->tex.target.getEnum()) {
2275 case TEX_TARGET_1D:
2276 return !tex->tex.useOffsets;
2277 case TEX_TARGET_2D:
2278 case TEX_TARGET_RECT:
2279 return true;
2280 case TEX_TARGET_2D_ARRAY:
2281 case TEX_TARGET_2D_MS:
2282 case TEX_TARGET_3D:
2283 return !tex->tex.useOffsets && tex->tex.levelZero;
2284 default:
2285 return false;
2286 }
2287
2288 case OP_TXG:
2289 if (tex->tex.useOffsets > 1)
2290 return false;
2291 if (tex->tex.mask != 0x3 && tex->tex.mask != 0xf)
2292 return false;
2293
2294 switch (tex->tex.target.getEnum()) {
2295 case TEX_TARGET_2D:
2296 case TEX_TARGET_2D_MS:
2297 case TEX_TARGET_2D_SHADOW:
2298 case TEX_TARGET_RECT:
2299 case TEX_TARGET_RECT_SHADOW:
2300 return true;
2301 default:
2302 return false;
2303 }
2304
2305 default:
2306 return false;
2307 }
2308 }
2309
2310 void
handleScalarTexGM107(TexInstruction * tex)2311 RegAlloc::InsertConstraintsPass::handleScalarTexGM107(TexInstruction *tex)
2312 {
2313 int defCount = tex->defCount(0xff);
2314 int srcCount = tex->srcCount(0xff);
2315
2316 tex->tex.scalar = true;
2317
2318 // 1. handle defs
2319 if (defCount > 3)
2320 condenseDefs(tex, 2, 3);
2321 if (defCount > 1)
2322 condenseDefs(tex, 0, 1);
2323
2324 // 2. handle srcs
2325 // special case for TXF.A2D
2326 if (tex->op == OP_TXF && tex->tex.target == TEX_TARGET_2D_ARRAY) {
2327 assert(srcCount >= 3);
2328 condenseSrcs(tex, 1, 2);
2329 } else {
2330 if (srcCount > 3)
2331 condenseSrcs(tex, 2, 3);
2332 // only if we have more than 2 sources
2333 if (srcCount > 2)
2334 condenseSrcs(tex, 0, 1);
2335 }
2336
2337 assert(!tex->defExists(2) && !tex->srcExists(2));
2338 }
2339
2340 void
texConstraintGM107(TexInstruction * tex)2341 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
2342 {
2343 int n, s;
2344
2345 if (isTextureOp(tex->op))
2346 textureMask(tex);
2347
2348 if (targ->getChipset() < NVISA_GV100_CHIPSET) {
2349 if (isScalarTexGM107(tex)) {
2350 handleScalarTexGM107(tex);
2351 return;
2352 }
2353
2354 assert(!tex->tex.scalar);
2355 condenseDefs(tex);
2356 } else {
2357 if (isTextureOp(tex->op)) {
2358 int defCount = tex->defCount(0xff);
2359 if (defCount > 3)
2360 condenseDefs(tex, 2, 3);
2361 if (defCount > 1)
2362 condenseDefs(tex, 0, 1);
2363 } else {
2364 condenseDefs(tex);
2365 }
2366 }
2367
2368 if (isSurfaceOp(tex->op)) {
2369 int s = tex->tex.target.getDim() +
2370 (tex->tex.target.isArray() || tex->tex.target.isCube());
2371 int n = 0;
2372
2373 switch (tex->op) {
2374 case OP_SUSTB:
2375 case OP_SUSTP:
2376 n = 4;
2377 break;
2378 case OP_SUREDB:
2379 case OP_SUREDP:
2380 if (tex->subOp == NV50_IR_SUBOP_ATOM_CAS)
2381 n = 2;
2382 break;
2383 default:
2384 break;
2385 }
2386
2387 if (s > 1)
2388 condenseSrcs(tex, 0, s - 1);
2389 if (n > 1)
2390 condenseSrcs(tex, 1, n); // do not condense the tex handle
2391 } else
2392 if (isTextureOp(tex->op)) {
2393 if (tex->op != OP_TXQ) {
2394 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2395 if (tex->op == OP_TXD) {
2396 // Indirect handle belongs in the first arg
2397 if (tex->tex.rIndirectSrc >= 0)
2398 s++;
2399 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
2400 s++;
2401 }
2402 n = tex->srcCount(0xff, true) - s;
2403 // TODO: Is this necessary? Perhaps just has to be aligned to the
2404 // level that the first arg is, not necessarily to 4. This
2405 // requirement has not been rigorously verified, as it has been on
2406 // Kepler.
2407 if (n > 0 && n < 3) {
2408 if (tex->srcExists(n + s)) // move potential predicate out of the way
2409 tex->moveSources(n + s, 3 - n);
2410 while (n < 3)
2411 tex->setSrc(s + n++, new_LValue(func, FILE_GPR));
2412 }
2413 } else {
2414 s = tex->srcCount(0xff, true);
2415 n = 0;
2416 }
2417
2418 if (s > 1)
2419 condenseSrcs(tex, 0, s - 1);
2420 if (n > 1) // NOTE: first call modified positions already
2421 condenseSrcs(tex, 1, n);
2422 }
2423 }
2424
2425 void
texConstraintNVE0(TexInstruction * tex)2426 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
2427 {
2428 if (isTextureOp(tex->op))
2429 textureMask(tex);
2430 condenseDefs(tex);
2431
2432 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
2433 condenseSrcs(tex, 3, 6);
2434 } else
2435 if (isTextureOp(tex->op)) {
2436 int n = tex->srcCount(0xff, true);
2437 int s = n > 4 ? 4 : n;
2438 if (n > 4 && n < 7) {
2439 if (tex->srcExists(n)) // move potential predicate out of the way
2440 tex->moveSources(n, 7 - n);
2441
2442 while (n < 7)
2443 tex->setSrc(n++, new_LValue(func, FILE_GPR));
2444 }
2445 if (s > 1)
2446 condenseSrcs(tex, 0, s - 1);
2447 if (n > 4)
2448 condenseSrcs(tex, 1, n - s);
2449 }
2450 }
2451
2452 void
texConstraintNVC0(TexInstruction * tex)2453 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
2454 {
2455 int n, s;
2456
2457 if (isTextureOp(tex->op))
2458 textureMask(tex);
2459
2460 if (tex->op == OP_TXQ) {
2461 s = tex->srcCount(0xff);
2462 n = 0;
2463 } else if (isSurfaceOp(tex->op)) {
2464 s = tex->tex.target.getDim() + (tex->tex.target.isArray() || tex->tex.target.isCube());
2465 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP)
2466 n = 4;
2467 else
2468 n = 0;
2469 } else {
2470 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2471 if (!tex->tex.target.isArray() &&
2472 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
2473 ++s;
2474 if (tex->op == OP_TXD && tex->tex.useOffsets)
2475 ++s;
2476 n = tex->srcCount(0xff) - s;
2477 assert(n <= 4);
2478 }
2479
2480 if (s > 1)
2481 condenseSrcs(tex, 0, s - 1);
2482 if (n > 1) // NOTE: first call modified positions already
2483 condenseSrcs(tex, 1, n);
2484
2485 condenseDefs(tex);
2486 }
2487
2488 void
texConstraintNV50(TexInstruction * tex)2489 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
2490 {
2491 Value *pred = tex->getPredicate();
2492 if (pred)
2493 tex->setPredicate(tex->cc, NULL);
2494
2495 textureMask(tex);
2496
2497 assert(tex->defExists(0) && tex->srcExists(0));
2498 // make src and def count match
2499 int c;
2500 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2501 if (!tex->srcExists(c))
2502 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2503 else
2504 insertConstraintMove(tex, c);
2505 if (!tex->defExists(c))
2506 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2507 }
2508 if (pred)
2509 tex->setPredicate(tex->cc, pred);
2510 condenseDefs(tex);
2511 condenseSrcs(tex, 0, c - 1);
2512 }
2513
2514 // Insert constraint markers for instructions whose multiple sources must be
2515 // located in consecutive registers.
2516 bool
visit(BasicBlock * bb)2517 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2518 {
2519 TexInstruction *tex;
2520 Instruction *next;
2521 int s, size;
2522
2523 targ = bb->getProgram()->getTarget();
2524
2525 for (Instruction *i = bb->getEntry(); i; i = next) {
2526 next = i->next;
2527
2528 if ((tex = i->asTex())) {
2529 switch (targ->getChipset() & ~0xf) {
2530 case 0x50:
2531 case 0x80:
2532 case 0x90:
2533 case 0xa0:
2534 texConstraintNV50(tex);
2535 break;
2536 case 0xc0:
2537 case 0xd0:
2538 texConstraintNVC0(tex);
2539 break;
2540 case 0xe0:
2541 case 0xf0:
2542 case 0x100:
2543 texConstraintNVE0(tex);
2544 break;
2545 case 0x110:
2546 case 0x120:
2547 case 0x130:
2548 case 0x140:
2549 case 0x160:
2550 texConstraintGM107(tex);
2551 break;
2552 default:
2553 break;
2554 }
2555 } else
2556 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2557 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2558 assert(i->srcExists(s));
2559 size -= i->getSrc(s)->reg.size;
2560 }
2561 condenseSrcs(i, 1, s - 1);
2562 } else
2563 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2564 condenseDefs(i);
2565 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2566 addHazard(i, i->src(0).getIndirect(0));
2567 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2568 addHazard(i, i->src(0).getIndirect(1));
2569 } else
2570 if (i->op == OP_UNION ||
2571 i->op == OP_MERGE ||
2572 i->op == OP_SPLIT) {
2573 constrList.push_back(i);
2574 }
2575 }
2576 return true;
2577 }
2578
2579 void
insertConstraintMove(Instruction * cst,int s)2580 RegAlloc::InsertConstraintsPass::insertConstraintMove(Instruction *cst, int s)
2581 {
2582 const uint8_t size = cst->src(s).getSize();
2583
2584 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2585
2586 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2587
2588 bool imm = defi->op == OP_MOV &&
2589 defi->src(0).getFile() == FILE_IMMEDIATE;
2590 bool load = defi->op == OP_LOAD &&
2591 defi->src(0).getFile() == FILE_MEMORY_CONST &&
2592 !defi->src(0).isIndirect(0);
2593 // catch some cases where don't really need MOVs
2594 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs()) {
2595 if (imm || load) {
2596 // Move the defi right before the cst. No point in expanding
2597 // the range.
2598 defi->bb->remove(defi);
2599 cst->bb->insertBefore(cst, defi);
2600 }
2601 return;
2602 }
2603
2604 LValue *lval = new_LValue(func, cst->src(s).getFile());
2605 lval->reg.size = size;
2606
2607 Instruction *mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2608 mov->setDef(0, lval);
2609 mov->setSrc(0, cst->getSrc(s));
2610
2611 if (load) {
2612 mov->op = OP_LOAD;
2613 mov->setSrc(0, defi->getSrc(0));
2614 } else if (imm) {
2615 mov->setSrc(0, defi->getSrc(0));
2616 }
2617
2618 if (defi->getPredicate())
2619 mov->setPredicate(defi->cc, defi->getPredicate());
2620
2621 cst->setSrc(s, mov->getDef(0));
2622 cst->bb->insertBefore(cst, mov);
2623
2624 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2625 }
2626
2627 // Insert extra moves so that, if multiple register constraints on a value are
2628 // in conflict, these conflicts can be resolved.
2629 bool
insertConstraintMoves()2630 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2631 {
2632 for (std::list<Instruction *>::iterator it = constrList.begin();
2633 it != constrList.end();
2634 ++it) {
2635 Instruction *cst = *it;
2636 Instruction *mov;
2637
2638 if (cst->op == OP_SPLIT && 0) {
2639 // spilling splits is annoying, just make sure they're separate
2640 for (int d = 0; cst->defExists(d); ++d) {
2641 if (!cst->getDef(d)->refCount())
2642 continue;
2643 LValue *lval = new_LValue(func, cst->def(d).getFile());
2644 const uint8_t size = cst->def(d).getSize();
2645 lval->reg.size = size;
2646
2647 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2648 mov->setSrc(0, lval);
2649 mov->setDef(0, cst->getDef(d));
2650 cst->setDef(d, mov->getSrc(0));
2651 cst->bb->insertAfter(cst, mov);
2652
2653 cst->getSrc(0)->asLValue()->noSpill = 1;
2654 mov->getSrc(0)->asLValue()->noSpill = 1;
2655 }
2656 } else
2657 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2658 for (int s = 0; cst->srcExists(s); ++s) {
2659 const uint8_t size = cst->src(s).getSize();
2660
2661 if (!cst->getSrc(s)->defs.size()) {
2662 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2663 mov->setDef(0, cst->getSrc(s));
2664 cst->bb->insertBefore(cst, mov);
2665 continue;
2666 }
2667
2668 insertConstraintMove(cst, s);
2669 }
2670 }
2671 }
2672
2673 return true;
2674 }
2675
2676 } // namespace nv50_ir
2677