• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  * Copyright © 2018 Google
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include "aco_builder.h"
27 #include "aco_ir.h"
28 #include "aco_util.h"
29 
30 #include "common/sid.h"
31 
32 #include <algorithm>
33 #include <cstring>
34 #include <map>
35 #include <set>
36 #include <stack>
37 #include <unordered_map>
38 #include <unordered_set>
39 #include <vector>
40 
41 namespace std {
42 template <> struct hash<aco::Temp> {
operator ()std::hash43    size_t operator()(aco::Temp temp) const noexcept
44    {
45       uint32_t v;
46       std::memcpy(&v, &temp, sizeof(temp));
47       return std::hash<uint32_t>{}(v);
48    }
49 };
50 } // namespace std
51 
52 /*
53  * Implements the spilling algorithm on SSA-form from
54  * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
55  * by Matthias Braun and Sebastian Hack.
56  */
57 
58 namespace aco {
59 
60 namespace {
61 
62 struct remat_info {
63    Instruction* instr;
64 };
65 
66 struct spill_ctx {
67    RegisterDemand target_pressure;
68    Program* program;
69    aco::monotonic_buffer_resource memory;
70 
71    std::vector<std::vector<RegisterDemand>> register_demand;
72    std::vector<aco::map<Temp, Temp>> renames;
73    std::vector<aco::unordered_map<Temp, uint32_t>> spills_entry;
74    std::vector<aco::unordered_map<Temp, uint32_t>> spills_exit;
75 
76    std::vector<bool> processed;
77    std::stack<Block*, std::vector<Block*>> loop_header;
78 
79    using next_use_distance_startend_type = aco::unordered_map<Temp, std::pair<uint32_t, uint32_t>>;
80    std::vector<next_use_distance_startend_type> next_use_distances_start;
81    std::vector<next_use_distance_startend_type> next_use_distances_end;
82    std::vector<std::vector<std::pair<Temp, uint32_t>>> local_next_use_distance; /* Working buffer */
83    std::vector<std::pair<RegClass, std::unordered_set<uint32_t>>> interferences;
84    std::vector<std::vector<uint32_t>> affinities;
85    std::vector<bool> is_reloaded;
86    aco::unordered_map<Temp, remat_info> remat;
87    std::set<Instruction*> unused_remats;
88    unsigned wave_size;
89 
90    unsigned sgpr_spill_slots;
91    unsigned vgpr_spill_slots;
92    Temp scratch_rsrc;
93 
spill_ctxaco::__anon371ad4ba0111::spill_ctx94    spill_ctx(const RegisterDemand target_pressure_, Program* program_,
95              std::vector<std::vector<RegisterDemand>> register_demand_)
96        : target_pressure(target_pressure_), program(program_), memory(),
97          register_demand(std::move(register_demand_)),
98          renames(program->blocks.size(), aco::map<Temp, Temp>(memory)),
99          spills_entry(program->blocks.size(), aco::unordered_map<Temp, uint32_t>(memory)),
100          spills_exit(program->blocks.size(), aco::unordered_map<Temp, uint32_t>(memory)),
101          processed(program->blocks.size(), false),
102          next_use_distances_start(program->blocks.size(), next_use_distance_startend_type(memory)),
103          next_use_distances_end(program->blocks.size(), next_use_distance_startend_type(memory)),
104          remat(memory), wave_size(program->wave_size), sgpr_spill_slots(0), vgpr_spill_slots(0)
105    {}
106 
add_affinityaco::__anon371ad4ba0111::spill_ctx107    void add_affinity(uint32_t first, uint32_t second)
108    {
109       unsigned found_first = affinities.size();
110       unsigned found_second = affinities.size();
111       for (unsigned i = 0; i < affinities.size(); i++) {
112          std::vector<uint32_t>& vec = affinities[i];
113          for (uint32_t entry : vec) {
114             if (entry == first)
115                found_first = i;
116             else if (entry == second)
117                found_second = i;
118          }
119       }
120       if (found_first == affinities.size() && found_second == affinities.size()) {
121          affinities.emplace_back(std::vector<uint32_t>({first, second}));
122       } else if (found_first < affinities.size() && found_second == affinities.size()) {
123          affinities[found_first].push_back(second);
124       } else if (found_second < affinities.size() && found_first == affinities.size()) {
125          affinities[found_second].push_back(first);
126       } else if (found_first != found_second) {
127          /* merge second into first */
128          affinities[found_first].insert(affinities[found_first].end(),
129                                         affinities[found_second].begin(),
130                                         affinities[found_second].end());
131          affinities.erase(std::next(affinities.begin(), found_second));
132       } else {
133          assert(found_first == found_second);
134       }
135    }
136 
add_interferenceaco::__anon371ad4ba0111::spill_ctx137    void add_interference(uint32_t first, uint32_t second)
138    {
139       if (interferences[first].first.type() != interferences[second].first.type())
140          return;
141 
142       bool inserted = interferences[first].second.insert(second).second;
143       if (inserted)
144          interferences[second].second.insert(first);
145    }
146 
allocate_spill_idaco::__anon371ad4ba0111::spill_ctx147    uint32_t allocate_spill_id(RegClass rc)
148    {
149       interferences.emplace_back(rc, std::unordered_set<uint32_t>());
150       is_reloaded.push_back(false);
151       return next_spill_id++;
152    }
153 
154    uint32_t next_spill_id = 0;
155 };
156 
157 int32_t
get_dominator(int idx_a,int idx_b,Program * program,bool is_linear)158 get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
159 {
160 
161    if (idx_a == -1)
162       return idx_b;
163    if (idx_b == -1)
164       return idx_a;
165    if (is_linear) {
166       while (idx_a != idx_b) {
167          if (idx_a > idx_b)
168             idx_a = program->blocks[idx_a].linear_idom;
169          else
170             idx_b = program->blocks[idx_b].linear_idom;
171       }
172    } else {
173       while (idx_a != idx_b) {
174          if (idx_a > idx_b)
175             idx_a = program->blocks[idx_a].logical_idom;
176          else
177             idx_b = program->blocks[idx_b].logical_idom;
178       }
179    }
180    assert(idx_a != -1);
181    return idx_a;
182 }
183 
184 void
next_uses_per_block(spill_ctx & ctx,unsigned block_idx,uint32_t & worklist)185 next_uses_per_block(spill_ctx& ctx, unsigned block_idx, uint32_t& worklist)
186 {
187    Block* block = &ctx.program->blocks[block_idx];
188    ctx.next_use_distances_start[block_idx] = ctx.next_use_distances_end[block_idx];
189    auto& next_use_distances_start = ctx.next_use_distances_start[block_idx];
190 
191    /* to compute the next use distance at the beginning of the block, we have to add the block's
192     * size */
193    for (std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>::iterator it =
194            next_use_distances_start.begin();
195         it != next_use_distances_start.end(); ++it)
196       it->second.second = it->second.second + block->instructions.size();
197 
198    int idx = block->instructions.size() - 1;
199    while (idx >= 0) {
200       aco_ptr<Instruction>& instr = block->instructions[idx];
201 
202       if (instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi)
203          break;
204 
205       for (const Definition& def : instr->definitions) {
206          if (def.isTemp())
207             next_use_distances_start.erase(def.getTemp());
208       }
209 
210       for (const Operand& op : instr->operands) {
211          /* omit exec mask */
212          if (op.isFixed() && op.physReg() == exec)
213             continue;
214          if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
215             continue;
216          if (op.isTemp())
217             next_use_distances_start[op.getTemp()] = {block_idx, idx};
218       }
219       idx--;
220    }
221 
222    assert(block_idx != 0 || next_use_distances_start.empty());
223    std::unordered_set<Temp> phi_defs;
224    while (idx >= 0) {
225       aco_ptr<Instruction>& instr = block->instructions[idx];
226       assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
227 
228       std::pair<uint32_t, uint32_t> distance{block_idx, 0};
229 
230       auto it = instr->definitions[0].isTemp()
231                    ? next_use_distances_start.find(instr->definitions[0].getTemp())
232                    : next_use_distances_start.end();
233       if (it != next_use_distances_start.end() &&
234           phi_defs.insert(instr->definitions[0].getTemp()).second) {
235          distance = it->second;
236       }
237 
238       for (unsigned i = 0; i < instr->operands.size(); i++) {
239          unsigned pred_idx =
240             instr->opcode == aco_opcode::p_phi ? block->logical_preds[i] : block->linear_preds[i];
241          if (instr->operands[i].isTemp()) {
242             auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
243                std::make_pair(instr->operands[i].getTemp(), distance));
244             const bool inserted = insert_result.second;
245             std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
246             if (inserted || entry_distance != distance)
247                worklist = std::max(worklist, pred_idx + 1);
248             entry_distance = distance;
249          }
250       }
251       idx--;
252    }
253 
254    /* all remaining live vars must be live-out at the predecessors */
255    for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances_start) {
256       Temp temp = pair.first;
257       if (phi_defs.count(temp)) {
258          continue;
259       }
260       uint32_t distance = pair.second.second;
261       uint32_t dom = pair.second.first;
262       std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
263       for (unsigned pred_idx : preds) {
264          if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
265             distance += 0xFFFF;
266          auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
267             std::make_pair(temp, std::pair<uint32_t, uint32_t>{}));
268          const bool inserted = insert_result.second;
269          std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
270 
271          if (!inserted) {
272             dom = get_dominator(dom, entry_distance.first, ctx.program, temp.is_linear());
273             distance = std::min(entry_distance.second, distance);
274          }
275          if (entry_distance != std::pair<uint32_t, uint32_t>{dom, distance}) {
276             worklist = std::max(worklist, pred_idx + 1);
277             entry_distance = {dom, distance};
278          }
279       }
280    }
281 }
282 
283 void
compute_global_next_uses(spill_ctx & ctx)284 compute_global_next_uses(spill_ctx& ctx)
285 {
286    uint32_t worklist = ctx.program->blocks.size();
287    while (worklist) {
288       unsigned block_idx = --worklist;
289       next_uses_per_block(ctx, block_idx, worklist);
290    }
291 }
292 
293 bool
should_rematerialize(aco_ptr<Instruction> & instr)294 should_rematerialize(aco_ptr<Instruction>& instr)
295 {
296    /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
297    if (instr->format != Format::VOP1 && instr->format != Format::SOP1 &&
298        instr->format != Format::PSEUDO && instr->format != Format::SOPK)
299       return false;
300    /* TODO: pseudo-instruction rematerialization is only supported for
301     * p_create_vector/p_parallelcopy */
302    if (instr->isPseudo() && instr->opcode != aco_opcode::p_create_vector &&
303        instr->opcode != aco_opcode::p_parallelcopy)
304       return false;
305    if (instr->isSOPK() && instr->opcode != aco_opcode::s_movk_i32)
306       return false;
307 
308    for (const Operand& op : instr->operands) {
309       /* TODO: rematerialization using temporaries isn't yet supported */
310       if (!op.isConstant())
311          return false;
312    }
313 
314    /* TODO: rematerialization with multiple definitions isn't yet supported */
315    if (instr->definitions.size() > 1)
316       return false;
317 
318    return true;
319 }
320 
321 aco_ptr<Instruction>
do_reload(spill_ctx & ctx,Temp tmp,Temp new_name,uint32_t spill_id)322 do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
323 {
324    std::unordered_map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
325    if (remat != ctx.remat.end()) {
326       Instruction* instr = remat->second.instr;
327       assert((instr->isVOP1() || instr->isSOP1() || instr->isPseudo() || instr->isSOPK()) &&
328              "unsupported");
329       assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector ||
330               instr->opcode == aco_opcode::p_parallelcopy) &&
331              "unsupported");
332       assert(instr->definitions.size() == 1 && "unsupported");
333 
334       aco_ptr<Instruction> res;
335       if (instr->isVOP1()) {
336          res.reset(create_instruction<VALU_instruction>(
337             instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
338       } else if (instr->isSOP1()) {
339          res.reset(create_instruction<SOP1_instruction>(
340             instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
341       } else if (instr->isPseudo()) {
342          res.reset(create_instruction<Pseudo_instruction>(
343             instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
344       } else if (instr->isSOPK()) {
345          res.reset(create_instruction<SOPK_instruction>(
346             instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
347          res->sopk().imm = instr->sopk().imm;
348       }
349       for (unsigned i = 0; i < instr->operands.size(); i++) {
350          res->operands[i] = instr->operands[i];
351          if (instr->operands[i].isTemp()) {
352             assert(false && "unsupported");
353             if (ctx.remat.count(instr->operands[i].getTemp()))
354                ctx.unused_remats.erase(ctx.remat[instr->operands[i].getTemp()].instr);
355          }
356       }
357       res->definitions[0] = Definition(new_name);
358       return res;
359    } else {
360       aco_ptr<Pseudo_instruction> reload{
361          create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
362       reload->operands[0] = Operand::c32(spill_id);
363       reload->definitions[0] = Definition(new_name);
364       ctx.is_reloaded[spill_id] = true;
365       return reload;
366    }
367 }
368 
369 void
get_rematerialize_info(spill_ctx & ctx)370 get_rematerialize_info(spill_ctx& ctx)
371 {
372    for (Block& block : ctx.program->blocks) {
373       bool logical = false;
374       for (aco_ptr<Instruction>& instr : block.instructions) {
375          if (instr->opcode == aco_opcode::p_logical_start)
376             logical = true;
377          else if (instr->opcode == aco_opcode::p_logical_end)
378             logical = false;
379          if (logical && should_rematerialize(instr)) {
380             for (const Definition& def : instr->definitions) {
381                if (def.isTemp()) {
382                   ctx.remat[def.getTemp()] = remat_info{instr.get()};
383                   ctx.unused_remats.insert(instr.get());
384                }
385             }
386          }
387       }
388    }
389 }
390 
391 void
update_local_next_uses(spill_ctx & ctx,Block * block,std::vector<std::vector<std::pair<Temp,uint32_t>>> & local_next_uses)392 update_local_next_uses(spill_ctx& ctx, Block* block,
393                        std::vector<std::vector<std::pair<Temp, uint32_t>>>& local_next_uses)
394 {
395    if (local_next_uses.size() < block->instructions.size()) {
396       /* Allocate more next-use-maps. Note that by never reducing the vector size, we enable
397        * future calls to this function to re-use already allocated map memory. */
398       local_next_uses.resize(block->instructions.size());
399    }
400 
401    local_next_uses[block->instructions.size() - 1].clear();
402    for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
403         ctx.next_use_distances_end[block->index]) {
404       local_next_uses[block->instructions.size() - 1].push_back(std::make_pair<Temp, uint32_t>(
405          (Temp)pair.first, pair.second.second + block->instructions.size()));
406    }
407 
408    for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
409       aco_ptr<Instruction>& instr = block->instructions[idx];
410       if (!instr)
411          break;
412       if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
413          break;
414 
415       if (idx != (int)block->instructions.size() - 1) {
416          local_next_uses[idx] = local_next_uses[idx + 1];
417       }
418 
419       for (const Operand& op : instr->operands) {
420          if (op.isFixed() && op.physReg() == exec)
421             continue;
422          if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
423             continue;
424          if (op.isTemp()) {
425             auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
426                                    [op](auto& pair) { return pair.first == op.getTemp(); });
427             if (it == local_next_uses[idx].end()) {
428                local_next_uses[idx].push_back(std::make_pair<Temp, uint32_t>(op.getTemp(), idx));
429             } else {
430                it->second = idx;
431             }
432          }
433       }
434       for (const Definition& def : instr->definitions) {
435          if (def.isTemp()) {
436             auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
437                                    [def](auto& pair) { return pair.first == def.getTemp(); });
438             if (it != local_next_uses[idx].end()) {
439                local_next_uses[idx].erase(it);
440             }
441          }
442       }
443    }
444 }
445 
446 RegisterDemand
get_demand_before(spill_ctx & ctx,unsigned block_idx,unsigned idx)447 get_demand_before(spill_ctx& ctx, unsigned block_idx, unsigned idx)
448 {
449    if (idx == 0) {
450       RegisterDemand demand = ctx.register_demand[block_idx][idx];
451       aco_ptr<Instruction>& instr = ctx.program->blocks[block_idx].instructions[idx];
452       aco_ptr<Instruction> instr_before(nullptr);
453       return get_demand_before(demand, instr, instr_before);
454    } else {
455       return ctx.register_demand[block_idx][idx - 1];
456    }
457 }
458 
459 RegisterDemand
get_live_in_demand(spill_ctx & ctx,unsigned block_idx)460 get_live_in_demand(spill_ctx& ctx, unsigned block_idx)
461 {
462    unsigned idx = 0;
463    RegisterDemand reg_pressure = RegisterDemand();
464    Block& block = ctx.program->blocks[block_idx];
465    for (aco_ptr<Instruction>& phi : block.instructions) {
466       if (!is_phi(phi))
467          break;
468       idx++;
469 
470       /* Killed phi definitions increase pressure in the predecessor but not
471        * the block they're in. Since the loops below are both to control
472        * pressure of the start of this block and the ends of it's
473        * predecessors, we need to count killed unspilled phi definitions here. */
474       if (phi->definitions[0].isTemp() && phi->definitions[0].isKill() &&
475           !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()))
476          reg_pressure += phi->definitions[0].getTemp();
477    }
478 
479    reg_pressure += get_demand_before(ctx, block_idx, idx);
480 
481    /* Consider register pressure from linear predecessors. This can affect
482     * reg_pressure if the branch instructions define sgprs. */
483    for (unsigned pred : block.linear_preds)
484       reg_pressure.sgpr =
485          std::max<int16_t>(reg_pressure.sgpr, ctx.register_demand[pred].back().sgpr);
486 
487    return reg_pressure;
488 }
489 
490 RegisterDemand
init_live_in_vars(spill_ctx & ctx,Block * block,unsigned block_idx)491 init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
492 {
493    RegisterDemand spilled_registers;
494 
495    /* first block, nothing was spilled before */
496    if (block->linear_preds.empty())
497       return {0, 0};
498 
499    /* next use distances at the beginning of the current block */
500    const auto& next_use_distances = ctx.next_use_distances_start[block_idx];
501 
502    /* loop header block */
503    if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
504       assert(block->linear_preds[0] == block_idx - 1);
505       assert(block->logical_preds[0] == block_idx - 1);
506 
507       /* create new loop_info */
508       ctx.loop_header.emplace(block);
509 
510       /* check how many live-through variables should be spilled */
511       RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
512       RegisterDemand loop_demand = reg_pressure;
513       unsigned i = block_idx;
514       while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
515          assert(ctx.program->blocks.size() > i);
516          loop_demand.update(ctx.program->blocks[i++].register_demand);
517       }
518       unsigned loop_end = i;
519 
520       for (auto spilled : ctx.spills_exit[block_idx - 1]) {
521          auto it = next_use_distances.find(spilled.first);
522 
523          /* variable is not live at loop entry: probably a phi operand */
524          if (it == next_use_distances.end())
525             continue;
526 
527          /* keep constants and live-through variables spilled */
528          if (it->second.first >= loop_end || ctx.remat.count(spilled.first)) {
529             ctx.spills_entry[block_idx][spilled.first] = spilled.second;
530             spilled_registers += spilled.first;
531             loop_demand -= spilled.first;
532          }
533       }
534 
535       /* select live-through variables and constants */
536       RegType type = RegType::vgpr;
537       while (loop_demand.exceeds(ctx.target_pressure)) {
538          /* if VGPR demand is low enough, select SGPRs */
539          if (type == RegType::vgpr && loop_demand.vgpr <= ctx.target_pressure.vgpr)
540             type = RegType::sgpr;
541          /* if SGPR demand is low enough, break */
542          if (type == RegType::sgpr && loop_demand.sgpr <= ctx.target_pressure.sgpr)
543             break;
544 
545          unsigned distance = 0;
546          Temp to_spill;
547          for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
548               next_use_distances) {
549             if (pair.first.type() == type &&
550                 (pair.second.first >= loop_end ||
551                  (ctx.remat.count(pair.first) && type == RegType::sgpr)) &&
552                 pair.second.second > distance && !ctx.spills_entry[block_idx].count(pair.first)) {
553                to_spill = pair.first;
554                distance = pair.second.second;
555             }
556          }
557 
558          /* select SGPRs or break */
559          if (distance == 0) {
560             if (type == RegType::sgpr)
561                break;
562             type = RegType::sgpr;
563             continue;
564          }
565 
566          uint32_t spill_id;
567          if (!ctx.spills_exit[block_idx - 1].count(to_spill)) {
568             spill_id = ctx.allocate_spill_id(to_spill.regClass());
569          } else {
570             spill_id = ctx.spills_exit[block_idx - 1][to_spill];
571          }
572 
573          ctx.spills_entry[block_idx][to_spill] = spill_id;
574          spilled_registers += to_spill;
575          loop_demand -= to_spill;
576       }
577 
578       /* shortcut */
579       if (!loop_demand.exceeds(ctx.target_pressure))
580          return spilled_registers;
581 
582       /* if reg pressure is too high at beginning of loop, add variables with furthest use */
583       reg_pressure -= spilled_registers;
584 
585       while (reg_pressure.exceeds(ctx.target_pressure)) {
586          unsigned distance = 0;
587          Temp to_spill;
588          type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
589 
590          for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
591               next_use_distances) {
592             if (pair.first.type() == type && pair.second.second > distance &&
593                 !ctx.spills_entry[block_idx].count(pair.first)) {
594                to_spill = pair.first;
595                distance = pair.second.second;
596             }
597          }
598          assert(distance != 0);
599          ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
600          spilled_registers += to_spill;
601          reg_pressure -= to_spill;
602       }
603 
604       return spilled_registers;
605    }
606 
607    /* branch block */
608    if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
609       /* keep variables spilled if they are alive and not used in the current block */
610       unsigned pred_idx = block->linear_preds[0];
611       for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
612          if (pair.first.type() != RegType::sgpr) {
613             continue;
614          }
615          auto next_use_distance_it = next_use_distances.find(pair.first);
616          if (next_use_distance_it != next_use_distances.end() &&
617              next_use_distance_it->second.first != block_idx) {
618             ctx.spills_entry[block_idx].insert(pair);
619             spilled_registers.sgpr += pair.first.size();
620          }
621       }
622       if (block->logical_preds.size() == 1) {
623          pred_idx = block->logical_preds[0];
624          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
625             if (pair.first.type() != RegType::vgpr) {
626                continue;
627             }
628             auto next_use_distance_it = next_use_distances.find(pair.first);
629             if (next_use_distance_it != next_use_distances.end() &&
630                 next_use_distance_it->second.first != block_idx) {
631                ctx.spills_entry[block_idx].insert(pair);
632                spilled_registers.vgpr += pair.first.size();
633             }
634          }
635       }
636 
637       /* if register demand is still too high, we just keep all spilled live vars
638        * and process the block */
639       if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
640          pred_idx = block->linear_preds[0];
641          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
642             if (pair.first.type() == RegType::sgpr && next_use_distances.count(pair.first) &&
643                 ctx.spills_entry[block_idx].insert(pair).second) {
644                spilled_registers.sgpr += pair.first.size();
645             }
646          }
647       }
648       if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr &&
649           block->logical_preds.size() == 1) {
650          pred_idx = block->logical_preds[0];
651          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
652             if (pair.first.type() == RegType::vgpr && next_use_distances.count(pair.first) &&
653                 ctx.spills_entry[block_idx].insert(pair).second) {
654                spilled_registers.vgpr += pair.first.size();
655             }
656          }
657       }
658 
659       return spilled_registers;
660    }
661 
662    /* else: merge block */
663    std::map<Temp, bool> partial_spills;
664 
665    /* keep variables spilled on all incoming paths */
666    for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances) {
667       std::vector<unsigned>& preds =
668          pair.first.is_linear() ? block->linear_preds : block->logical_preds;
669       /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload
670        * it. Otherwise, if any predecessor reloads it, ensure it's reloaded on all other
671        * predecessors. The idea is that it's better in practice to rematerialize redundantly than to
672        * create lots of phis. */
673       /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db
674        * doesn't seem to exercise this path much) */
675       bool remat = ctx.remat.count(pair.first);
676       bool spill = !remat;
677       uint32_t spill_id = 0;
678       for (unsigned pred_idx : preds) {
679          /* variable is not even live at the predecessor: probably from a phi */
680          if (!ctx.next_use_distances_end[pred_idx].count(pair.first)) {
681             spill = false;
682             break;
683          }
684          if (!ctx.spills_exit[pred_idx].count(pair.first)) {
685             partial_spills.emplace(pair.first, false);
686             if (!remat)
687                spill = false;
688          } else {
689             partial_spills[pair.first] = true;
690             /* it might be that on one incoming path, the variable has a different spill_id, but
691              * add_couple_code() will take care of that. */
692             spill_id = ctx.spills_exit[pred_idx][pair.first];
693             if (remat)
694                spill = true;
695          }
696       }
697       if (spill) {
698          ctx.spills_entry[block_idx][pair.first] = spill_id;
699          partial_spills.erase(pair.first);
700          spilled_registers += pair.first;
701       }
702    }
703 
704    /* same for phis */
705    for (aco_ptr<Instruction>& phi : block->instructions) {
706       if (!is_phi(phi))
707          break;
708       if (!phi->definitions[0].isTemp())
709          continue;
710 
711       std::vector<unsigned>& preds =
712          phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
713       bool is_all_spilled = true;
714       for (unsigned i = 0; i < phi->operands.size(); i++) {
715          if (phi->operands[i].isUndefined())
716             continue;
717          is_all_spilled &= phi->operands[i].isTemp() &&
718                            ctx.spills_exit[preds[i]].count(phi->operands[i].getTemp());
719       }
720 
721       if (is_all_spilled) {
722          /* The phi is spilled at all predecessors. Keep it spilled. */
723          ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] =
724             ctx.allocate_spill_id(phi->definitions[0].regClass());
725          spilled_registers += phi->definitions[0].getTemp();
726       } else {
727          /* Phis might increase the register pressure. */
728          partial_spills[phi->definitions[0].getTemp()] = true;
729       }
730    }
731 
732    /* if reg pressure at first instruction is still too high, add partially spilled variables */
733    RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
734    reg_pressure -= spilled_registers;
735 
736    while (reg_pressure.exceeds(ctx.target_pressure)) {
737       assert(!partial_spills.empty());
738       std::map<Temp, bool>::iterator it = partial_spills.begin();
739       Temp to_spill = Temp();
740       bool is_spilled_or_phi = false;
741       unsigned distance = 0;
742       RegType type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
743 
744       while (it != partial_spills.end()) {
745          assert(!ctx.spills_entry[block_idx].count(it->first));
746 
747          if (it->first.type() == type && ((it->second && !is_spilled_or_phi) ||
748                                           (it->second == is_spilled_or_phi &&
749                                            next_use_distances.at(it->first).second > distance))) {
750             distance = next_use_distances.at(it->first).second;
751             to_spill = it->first;
752             is_spilled_or_phi = it->second;
753          }
754          ++it;
755       }
756       assert(distance != 0);
757 
758       ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
759       partial_spills.erase(to_spill);
760       spilled_registers += to_spill;
761       reg_pressure -= to_spill;
762    }
763 
764    return spilled_registers;
765 }
766 
767 void
add_coupling_code(spill_ctx & ctx,Block * block,unsigned block_idx)768 add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
769 {
770    /* no coupling code necessary */
771    if (block->linear_preds.size() == 0)
772       return;
773 
774    std::vector<aco_ptr<Instruction>> instructions;
775    /* branch block: TODO take other branch into consideration */
776    if (block->linear_preds.size() == 1 &&
777        !(block->kind & (block_kind_loop_exit | block_kind_loop_header))) {
778       assert(ctx.processed[block->linear_preds[0]]);
779       assert(ctx.register_demand[block_idx].size() == block->instructions.size());
780       std::vector<RegisterDemand> reg_demand;
781       unsigned insert_idx = 0;
782       RegisterDemand demand_before = get_demand_before(ctx, block_idx, 0);
783 
784       for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
785            ctx.next_use_distances_start[block_idx]) {
786          const unsigned pred_idx = block->linear_preds[0];
787 
788          if (!live.first.is_linear())
789             continue;
790          /* still spilled */
791          if (ctx.spills_entry[block_idx].count(live.first))
792             continue;
793 
794          /* in register at end of predecessor */
795          auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
796          if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
797             std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
798             if (it != ctx.renames[pred_idx].end())
799                ctx.renames[block_idx].insert(*it);
800             continue;
801          }
802 
803          /* variable is spilled at predecessor and live at current block: create reload instruction */
804          Temp new_name = ctx.program->allocateTmp(live.first.regClass());
805          aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, spills_exit_it->second);
806          instructions.emplace_back(std::move(reload));
807          reg_demand.push_back(demand_before);
808          ctx.renames[block_idx][live.first] = new_name;
809       }
810 
811       if (block->logical_preds.size() == 1) {
812          do {
813             assert(insert_idx < block->instructions.size());
814             instructions.emplace_back(std::move(block->instructions[insert_idx]));
815             reg_demand.push_back(ctx.register_demand[block_idx][insert_idx]);
816             insert_idx++;
817          } while (instructions.back()->opcode != aco_opcode::p_logical_start);
818 
819          unsigned pred_idx = block->logical_preds[0];
820          for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
821               ctx.next_use_distances_start[block_idx]) {
822             if (live.first.is_linear())
823                continue;
824             /* still spilled */
825             if (ctx.spills_entry[block_idx].count(live.first))
826                continue;
827 
828             /* in register at end of predecessor */
829             auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
830             if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
831                std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
832                if (it != ctx.renames[pred_idx].end())
833                   ctx.renames[block_idx].insert(*it);
834                continue;
835             }
836 
837             /* variable is spilled at predecessor and live at current block:
838              * create reload instruction */
839             Temp new_name = ctx.program->allocateTmp(live.first.regClass());
840             aco_ptr<Instruction> reload =
841                do_reload(ctx, live.first, new_name, spills_exit_it->second);
842             instructions.emplace_back(std::move(reload));
843             reg_demand.emplace_back(reg_demand.back());
844             ctx.renames[block_idx][live.first] = new_name;
845          }
846       }
847 
848       /* combine new reload instructions with original block */
849       if (!instructions.empty()) {
850          reg_demand.insert(reg_demand.end(),
851                            std::next(ctx.register_demand[block->index].begin(), insert_idx),
852                            ctx.register_demand[block->index].end());
853          ctx.register_demand[block_idx] = std::move(reg_demand);
854          instructions.insert(instructions.end(),
855                              std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
856                                 std::next(block->instructions.begin(), insert_idx)),
857                              std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
858                                 block->instructions.end()));
859          block->instructions = std::move(instructions);
860       }
861       return;
862    }
863 
864    /* loop header and merge blocks: check if all (linear) predecessors have been processed */
865    for (ASSERTED unsigned pred : block->linear_preds)
866       assert(ctx.processed[pred]);
867 
868    /* iterate the phi nodes for which operands to spill at the predecessor */
869    for (aco_ptr<Instruction>& phi : block->instructions) {
870       if (!is_phi(phi))
871          break;
872 
873       /* if the phi is not spilled, add to instructions */
874       if (!phi->definitions[0].isTemp() ||
875           !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp())) {
876          instructions.emplace_back(std::move(phi));
877          continue;
878       }
879 
880       std::vector<unsigned>& preds =
881          phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
882       uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
883 
884       for (unsigned i = 0; i < phi->operands.size(); i++) {
885          if (phi->operands[i].isUndefined())
886             continue;
887 
888          unsigned pred_idx = preds[i];
889          Operand spill_op = phi->operands[i];
890 
891          if (spill_op.isTemp()) {
892             assert(phi->operands[i].isKill());
893             Temp var = phi->operands[i].getTemp();
894 
895             std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
896             /* prevent the defining instruction from being DCE'd if it could be rematerialized */
897             if (rename_it == ctx.renames[preds[i]].end() && ctx.remat.count(var))
898                ctx.unused_remats.erase(ctx.remat[var].instr);
899 
900             /* check if variable is already spilled at predecessor */
901             auto spilled = ctx.spills_exit[pred_idx].find(var);
902             if (spilled != ctx.spills_exit[pred_idx].end()) {
903                if (spilled->second != def_spill_id)
904                   ctx.add_affinity(def_spill_id, spilled->second);
905                continue;
906             }
907 
908             /* rename if necessary */
909             if (rename_it != ctx.renames[pred_idx].end()) {
910                spill_op.setTemp(rename_it->second);
911                ctx.renames[pred_idx].erase(rename_it);
912             }
913          }
914 
915          uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
916 
917          /* add interferences and affinity */
918          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx])
919             ctx.add_interference(spill_id, pair.second);
920          ctx.add_affinity(def_spill_id, spill_id);
921 
922          aco_ptr<Pseudo_instruction> spill{
923             create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
924          spill->operands[0] = spill_op;
925          spill->operands[1] = Operand::c32(spill_id);
926          Block& pred = ctx.program->blocks[pred_idx];
927          unsigned idx = pred.instructions.size();
928          do {
929             assert(idx != 0);
930             idx--;
931          } while (phi->opcode == aco_opcode::p_phi &&
932                   pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
933          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
934          pred.instructions.insert(it, std::move(spill));
935 
936          /* Add the original name to predecessor's spilled variables */
937          if (spill_op.isTemp())
938             ctx.spills_exit[pred_idx][phi->operands[i].getTemp()] = spill_id;
939       }
940 
941       /* remove phi from instructions */
942       phi.reset();
943    }
944 
945    /* iterate all (other) spilled variables for which to spill at the predecessor */
946    // TODO: would be better to have them sorted: first vgprs and first with longest distance
947    for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
948       std::vector<unsigned> preds =
949          pair.first.is_linear() ? block->linear_preds : block->logical_preds;
950 
951       for (unsigned pred_idx : preds) {
952          /* variable is already spilled at predecessor */
953          auto spilled = ctx.spills_exit[pred_idx].find(pair.first);
954          if (spilled != ctx.spills_exit[pred_idx].end()) {
955             if (spilled->second != pair.second)
956                ctx.add_affinity(pair.second, spilled->second);
957             continue;
958          }
959 
960          /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
961          if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
962             continue;
963 
964          /* add interferences between spilled variable and predecessors exit spills */
965          for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
966             if (exit_spill.first == pair.first)
967                continue;
968             ctx.add_interference(exit_spill.second, pair.second);
969          }
970 
971          /* variable is in register at predecessor and has to be spilled */
972          /* rename if necessary */
973          Temp var = pair.first;
974          std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
975          if (rename_it != ctx.renames[pred_idx].end()) {
976             var = rename_it->second;
977             ctx.renames[pred_idx].erase(rename_it);
978          }
979 
980          aco_ptr<Pseudo_instruction> spill{
981             create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
982          spill->operands[0] = Operand(var);
983          spill->operands[1] = Operand::c32(pair.second);
984          Block& pred = ctx.program->blocks[pred_idx];
985          unsigned idx = pred.instructions.size();
986          do {
987             assert(idx != 0);
988             idx--;
989          } while (pair.first.type() == RegType::vgpr &&
990                   pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
991          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
992          pred.instructions.insert(it, std::move(spill));
993          ctx.spills_exit[pred.index][pair.first] = pair.second;
994       }
995    }
996 
997    /* iterate phis for which operands to reload */
998    for (aco_ptr<Instruction>& phi : instructions) {
999       assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
1000       assert(!phi->definitions[0].isTemp() ||
1001              !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()));
1002 
1003       std::vector<unsigned>& preds =
1004          phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
1005       for (unsigned i = 0; i < phi->operands.size(); i++) {
1006          if (!phi->operands[i].isTemp())
1007             continue;
1008          unsigned pred_idx = preds[i];
1009 
1010          /* if the operand was reloaded, rename */
1011          if (!ctx.spills_exit[pred_idx].count(phi->operands[i].getTemp())) {
1012             std::map<Temp, Temp>::iterator it =
1013                ctx.renames[pred_idx].find(phi->operands[i].getTemp());
1014             if (it != ctx.renames[pred_idx].end()) {
1015                phi->operands[i].setTemp(it->second);
1016                /* prevent the defining instruction from being DCE'd if it could be rematerialized */
1017             } else {
1018                auto remat_it = ctx.remat.find(phi->operands[i].getTemp());
1019                if (remat_it != ctx.remat.end()) {
1020                   ctx.unused_remats.erase(remat_it->second.instr);
1021                }
1022             }
1023             continue;
1024          }
1025 
1026          Temp tmp = phi->operands[i].getTemp();
1027 
1028          /* reload phi operand at end of predecessor block */
1029          Temp new_name = ctx.program->allocateTmp(tmp.regClass());
1030          Block& pred = ctx.program->blocks[pred_idx];
1031          unsigned idx = pred.instructions.size();
1032          do {
1033             assert(idx != 0);
1034             idx--;
1035          } while (phi->opcode == aco_opcode::p_phi &&
1036                   pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1037          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1038          aco_ptr<Instruction> reload =
1039             do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
1040 
1041          /* reload spilled exec mask directly to exec */
1042          if (!phi->definitions[0].isTemp()) {
1043             assert(phi->definitions[0].isFixed() && phi->definitions[0].physReg() == exec);
1044             reload->definitions[0] = phi->definitions[0];
1045             phi->operands[i] = Operand(exec, ctx.program->lane_mask);
1046          } else {
1047             ctx.spills_exit[pred_idx].erase(tmp);
1048             ctx.renames[pred_idx][tmp] = new_name;
1049             phi->operands[i].setTemp(new_name);
1050          }
1051 
1052          pred.instructions.insert(it, std::move(reload));
1053       }
1054    }
1055 
1056    /* iterate live variables for which to reload */
1057    // TODO: reload at current block if variable is spilled on all predecessors
1058    for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
1059         ctx.next_use_distances_start[block_idx]) {
1060       /* skip spilled variables */
1061       if (ctx.spills_entry[block_idx].count(pair.first))
1062          continue;
1063       std::vector<unsigned> preds =
1064          pair.first.is_linear() ? block->linear_preds : block->logical_preds;
1065 
1066       /* variable is dead at predecessor, it must be from a phi */
1067       bool is_dead = false;
1068       for (unsigned pred_idx : preds) {
1069          if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
1070             is_dead = true;
1071       }
1072       if (is_dead)
1073          continue;
1074       for (unsigned pred_idx : preds) {
1075          /* the variable is not spilled at the predecessor */
1076          if (!ctx.spills_exit[pred_idx].count(pair.first))
1077             continue;
1078 
1079          /* variable is spilled at predecessor and has to be reloaded */
1080          Temp new_name = ctx.program->allocateTmp(pair.first.regClass());
1081          Block& pred = ctx.program->blocks[pred_idx];
1082          unsigned idx = pred.instructions.size();
1083          do {
1084             assert(idx != 0);
1085             idx--;
1086          } while (pair.first.type() == RegType::vgpr &&
1087                   pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1088          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1089 
1090          aco_ptr<Instruction> reload =
1091             do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
1092          pred.instructions.insert(it, std::move(reload));
1093 
1094          ctx.spills_exit[pred.index].erase(pair.first);
1095          ctx.renames[pred.index][pair.first] = new_name;
1096       }
1097 
1098       /* check if we have to create a new phi for this variable */
1099       Temp rename = Temp();
1100       bool is_same = true;
1101       for (unsigned pred_idx : preds) {
1102          if (!ctx.renames[pred_idx].count(pair.first)) {
1103             if (rename == Temp())
1104                rename = pair.first;
1105             else
1106                is_same = rename == pair.first;
1107          } else {
1108             if (rename == Temp())
1109                rename = ctx.renames[pred_idx][pair.first];
1110             else
1111                is_same = rename == ctx.renames[pred_idx][pair.first];
1112          }
1113 
1114          if (!is_same)
1115             break;
1116       }
1117 
1118       if (!is_same) {
1119          /* the variable was renamed differently in the predecessors: we have to create a phi */
1120          aco_opcode opcode = pair.first.is_linear() ? aco_opcode::p_linear_phi : aco_opcode::p_phi;
1121          aco_ptr<Pseudo_instruction> phi{
1122             create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
1123          rename = ctx.program->allocateTmp(pair.first.regClass());
1124          for (unsigned i = 0; i < phi->operands.size(); i++) {
1125             Temp tmp;
1126             if (ctx.renames[preds[i]].count(pair.first)) {
1127                tmp = ctx.renames[preds[i]][pair.first];
1128             } else if (preds[i] >= block_idx) {
1129                tmp = rename;
1130             } else {
1131                tmp = pair.first;
1132                /* prevent the defining instruction from being DCE'd if it could be rematerialized */
1133                if (ctx.remat.count(tmp))
1134                   ctx.unused_remats.erase(ctx.remat[tmp].instr);
1135             }
1136             phi->operands[i] = Operand(tmp);
1137          }
1138          phi->definitions[0] = Definition(rename);
1139          instructions.emplace_back(std::move(phi));
1140       }
1141 
1142       /* the variable was renamed: add new name to renames */
1143       if (!(rename == Temp() || rename == pair.first))
1144          ctx.renames[block_idx][pair.first] = rename;
1145    }
1146 
1147    /* combine phis with instructions */
1148    unsigned idx = 0;
1149    while (!block->instructions[idx]) {
1150       idx++;
1151    }
1152 
1153    if (!ctx.processed[block_idx]) {
1154       assert(!(block->kind & block_kind_loop_header));
1155       RegisterDemand demand_before = get_demand_before(ctx, block_idx, idx);
1156       ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(),
1157                                               ctx.register_demand[block->index].begin() + idx);
1158       ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(),
1159                                                instructions.size(), demand_before);
1160    }
1161 
1162    std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
1163    instructions.insert(
1164       instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
1165       std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
1166    block->instructions = std::move(instructions);
1167 }
1168 
1169 void
process_block(spill_ctx & ctx,unsigned block_idx,Block * block,RegisterDemand spilled_registers)1170 process_block(spill_ctx& ctx, unsigned block_idx, Block* block, RegisterDemand spilled_registers)
1171 {
1172    assert(!ctx.processed[block_idx]);
1173 
1174    std::vector<aco_ptr<Instruction>> instructions;
1175    unsigned idx = 0;
1176 
1177    /* phis are handled separately */
1178    while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
1179           block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
1180       instructions.emplace_back(std::move(block->instructions[idx++]));
1181    }
1182 
1183    if (block->register_demand.exceeds(ctx.target_pressure)) {
1184       update_local_next_uses(ctx, block, ctx.local_next_use_distance);
1185    } else {
1186       /* We won't use local_next_use_distance, so no initialization needed */
1187    }
1188 
1189    auto& current_spills = ctx.spills_exit[block_idx];
1190 
1191    while (idx < block->instructions.size()) {
1192       aco_ptr<Instruction>& instr = block->instructions[idx];
1193 
1194       /* Spilling is handled as part of phis (they should always have the same or higher register
1195        * demand). If we try to spill here, we might not be able to reduce the register demand enough
1196        * because there is no path to spill constant/undef phi operands. */
1197       if (instr->opcode == aco_opcode::p_branch) {
1198          instructions.emplace_back(std::move(instr));
1199          idx++;
1200          continue;
1201       }
1202 
1203       std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1204 
1205       /* rename and reload operands */
1206       for (Operand& op : instr->operands) {
1207          if (!op.isTemp())
1208             continue;
1209          if (!current_spills.count(op.getTemp())) {
1210             /* the Operand is in register: check if it was renamed */
1211             auto rename_it = ctx.renames[block_idx].find(op.getTemp());
1212             if (rename_it != ctx.renames[block_idx].end()) {
1213                op.setTemp(rename_it->second);
1214             } else {
1215                /* prevent its defining instruction from being DCE'd if it could be rematerialized */
1216                auto remat_it = ctx.remat.find(op.getTemp());
1217                if (remat_it != ctx.remat.end()) {
1218                   ctx.unused_remats.erase(remat_it->second.instr);
1219                }
1220             }
1221             continue;
1222          }
1223          /* the Operand is spilled: add it to reloads */
1224          Temp new_tmp = ctx.program->allocateTmp(op.regClass());
1225          ctx.renames[block_idx][op.getTemp()] = new_tmp;
1226          reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1227          current_spills.erase(op.getTemp());
1228          op.setTemp(new_tmp);
1229          spilled_registers -= new_tmp;
1230       }
1231 
1232       /* check if register demand is low enough before and after the current instruction */
1233       if (block->register_demand.exceeds(ctx.target_pressure)) {
1234 
1235          RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1236          new_demand.update(get_demand_before(ctx, block_idx, idx));
1237 
1238          assert(!ctx.local_next_use_distance.empty());
1239 
1240          /* if reg pressure is too high, spill variable with furthest next use */
1241          while ((new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1242             unsigned distance = 0;
1243             Temp to_spill;
1244             bool do_rematerialize = false;
1245             RegType type = RegType::sgpr;
1246             if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr)
1247                type = RegType::vgpr;
1248 
1249             for (std::pair<Temp, uint32_t> pair : ctx.local_next_use_distance[idx]) {
1250                if (pair.first.type() != type)
1251                   continue;
1252                bool can_rematerialize = ctx.remat.count(pair.first);
1253                if (((pair.second > distance && can_rematerialize == do_rematerialize) ||
1254                     (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1255                    !current_spills.count(pair.first)) {
1256                   to_spill = pair.first;
1257                   distance = pair.second;
1258                   do_rematerialize = can_rematerialize;
1259                }
1260             }
1261 
1262             assert(distance != 0 && distance > idx);
1263             uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1264 
1265             /* add interferences with currently spilled variables */
1266             for (std::pair<Temp, uint32_t> pair : current_spills)
1267                ctx.add_interference(spill_id, pair.second);
1268             for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads)
1269                ctx.add_interference(spill_id, pair.second.second);
1270 
1271             current_spills[to_spill] = spill_id;
1272             spilled_registers += to_spill;
1273 
1274             /* rename if necessary */
1275             if (ctx.renames[block_idx].count(to_spill)) {
1276                to_spill = ctx.renames[block_idx][to_spill];
1277             }
1278 
1279             /* add spill to new instructions */
1280             aco_ptr<Pseudo_instruction> spill{
1281                create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1282             spill->operands[0] = Operand(to_spill);
1283             spill->operands[1] = Operand::c32(spill_id);
1284             instructions.emplace_back(std::move(spill));
1285          }
1286       }
1287 
1288       /* add reloads and instruction to new instructions */
1289       for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads) {
1290          aco_ptr<Instruction> reload =
1291             do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1292          instructions.emplace_back(std::move(reload));
1293       }
1294       instructions.emplace_back(std::move(instr));
1295       idx++;
1296    }
1297 
1298    block->instructions = std::move(instructions);
1299 }
1300 
1301 void
spill_block(spill_ctx & ctx,unsigned block_idx)1302 spill_block(spill_ctx& ctx, unsigned block_idx)
1303 {
1304    Block* block = &ctx.program->blocks[block_idx];
1305 
1306    /* determine set of variables which are spilled at the beginning of the block */
1307    RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1308 
1309    /* add interferences for spilled variables */
1310    for (auto it = ctx.spills_entry[block_idx].begin(); it != ctx.spills_entry[block_idx].end();
1311         ++it) {
1312       for (auto it2 = std::next(it); it2 != ctx.spills_entry[block_idx].end(); ++it2)
1313          ctx.add_interference(it->second, it2->second);
1314    }
1315 
1316    bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1317    if (!is_loop_header) {
1318       /* add spill/reload code on incoming control flow edges */
1319       add_coupling_code(ctx, block, block_idx);
1320    }
1321 
1322    const auto& current_spills = ctx.spills_entry[block_idx];
1323 
1324    /* check conditions to process this block */
1325    bool process = (block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1326                   !ctx.renames[block_idx].empty() || ctx.unused_remats.size();
1327 
1328    for (auto it = current_spills.begin(); !process && it != current_spills.end(); ++it) {
1329       if (ctx.next_use_distances_start[block_idx].at(it->first).first == block_idx)
1330          process = true;
1331    }
1332 
1333    assert(ctx.spills_exit[block_idx].empty());
1334    ctx.spills_exit[block_idx] = current_spills;
1335    if (process) {
1336       process_block(ctx, block_idx, block, spilled_registers);
1337    }
1338 
1339    ctx.processed[block_idx] = true;
1340 
1341    /* check if the next block leaves the current loop */
1342    if (block->loop_nest_depth == 0 ||
1343        ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1344       return;
1345 
1346    Block* loop_header = ctx.loop_header.top();
1347 
1348    /* preserve original renames at end of loop header block */
1349    aco::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1350 
1351    /* add coupling code to all loop header predecessors */
1352    add_coupling_code(ctx, loop_header, loop_header->index);
1353 
1354    /* propagate new renames through loop: i.e. repair the SSA */
1355    renames.swap(ctx.renames[loop_header->index]);
1356    for (std::pair<Temp, Temp> rename : renames) {
1357       for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1358          Block& current = ctx.program->blocks[idx];
1359          std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1360 
1361          /* first rename phis */
1362          while (instr_it != current.instructions.end()) {
1363             aco_ptr<Instruction>& phi = *instr_it;
1364             if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1365                break;
1366             /* no need to rename the loop header phis once again. this happened in
1367              * add_coupling_code() */
1368             if (idx == loop_header->index) {
1369                instr_it++;
1370                continue;
1371             }
1372 
1373             for (Operand& op : phi->operands) {
1374                if (!op.isTemp())
1375                   continue;
1376                if (op.getTemp() == rename.first)
1377                   op.setTemp(rename.second);
1378             }
1379             instr_it++;
1380          }
1381 
1382          /* variable is not live at beginning of this block */
1383          if (ctx.next_use_distances_start[idx].count(rename.first) == 0)
1384             continue;
1385 
1386          /* if the variable is live at the block's exit, add rename */
1387          if (ctx.next_use_distances_end[idx].count(rename.first) != 0)
1388             ctx.renames[idx].insert(rename);
1389 
1390          /* rename all uses in this block */
1391          bool renamed = false;
1392          while (!renamed && instr_it != current.instructions.end()) {
1393             aco_ptr<Instruction>& instr = *instr_it;
1394             for (Operand& op : instr->operands) {
1395                if (!op.isTemp())
1396                   continue;
1397                if (op.getTemp() == rename.first) {
1398                   op.setTemp(rename.second);
1399                   /* we can stop with this block as soon as the variable is spilled */
1400                   if (instr->opcode == aco_opcode::p_spill)
1401                      renamed = true;
1402                }
1403             }
1404             instr_it++;
1405          }
1406       }
1407    }
1408 
1409    /* remove loop header info from stack */
1410    ctx.loop_header.pop();
1411 }
1412 
1413 Temp
load_scratch_resource(spill_ctx & ctx,Builder & bld,bool apply_scratch_offset)1414 load_scratch_resource(spill_ctx& ctx, Builder& bld, bool apply_scratch_offset)
1415 {
1416    Temp private_segment_buffer = ctx.program->private_segment_buffer;
1417    if (!private_segment_buffer.bytes()) {
1418       Temp addr_lo =
1419          bld.sop1(aco_opcode::p_load_symbol, bld.def(s1), Operand::c32(aco_symbol_scratch_addr_lo));
1420       Temp addr_hi =
1421          bld.sop1(aco_opcode::p_load_symbol, bld.def(s1), Operand::c32(aco_symbol_scratch_addr_hi));
1422       private_segment_buffer =
1423          bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), addr_lo, addr_hi);
1424    } else if (ctx.program->stage.hw != AC_HW_COMPUTE_SHADER) {
1425       private_segment_buffer =
1426          bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, Operand::zero());
1427    }
1428 
1429    if (apply_scratch_offset) {
1430       Temp addr_lo = bld.tmp(s1);
1431       Temp addr_hi = bld.tmp(s1);
1432       bld.pseudo(aco_opcode::p_split_vector, Definition(addr_lo), Definition(addr_hi),
1433                  private_segment_buffer);
1434 
1435       Temp carry = bld.tmp(s1);
1436       addr_lo = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), addr_lo,
1437                          ctx.program->scratch_offset);
1438       addr_hi = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), addr_hi,
1439                          Operand::c32(0), bld.scc(carry));
1440 
1441       private_segment_buffer =
1442          bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), addr_lo, addr_hi);
1443    }
1444 
1445    uint32_t rsrc_conf =
1446       S_008F0C_ADD_TID_ENABLE(1) | S_008F0C_INDEX_STRIDE(ctx.program->wave_size == 64 ? 3 : 2);
1447 
1448    if (ctx.program->gfx_level >= GFX10) {
1449       rsrc_conf |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
1450                    S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1451                    S_008F0C_RESOURCE_LEVEL(ctx.program->gfx_level < GFX11);
1452    } else if (ctx.program->gfx_level <= GFX7) {
1453       /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
1454       rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1455                    S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1456    }
1457    /* older generations need element size = 4 bytes. element size removed in GFX9 */
1458    if (ctx.program->gfx_level <= GFX8)
1459       rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
1460 
1461    return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer,
1462                      Operand::c32(-1u), Operand::c32(rsrc_conf));
1463 }
1464 
1465 void
setup_vgpr_spill_reload(spill_ctx & ctx,Block & block,std::vector<aco_ptr<Instruction>> & instructions,uint32_t spill_slot,Temp & scratch_offset,unsigned * offset)1466 setup_vgpr_spill_reload(spill_ctx& ctx, Block& block,
1467                         std::vector<aco_ptr<Instruction>>& instructions, uint32_t spill_slot,
1468                         Temp& scratch_offset, unsigned* offset)
1469 {
1470    uint32_t scratch_size = ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
1471 
1472    uint32_t offset_range;
1473    if (ctx.program->gfx_level >= GFX9) {
1474       offset_range =
1475          ctx.program->dev.scratch_global_offset_max - ctx.program->dev.scratch_global_offset_min;
1476    } else {
1477       if (scratch_size < 4095)
1478          offset_range = 4095 - scratch_size;
1479       else
1480          offset_range = 0;
1481    }
1482 
1483    bool overflow = (ctx.vgpr_spill_slots - 1) * 4 > offset_range;
1484 
1485    Builder rsrc_bld(ctx.program);
1486    if (block.kind & block_kind_top_level) {
1487       rsrc_bld.reset(&instructions);
1488    } else if (ctx.scratch_rsrc == Temp() && (!overflow || ctx.program->gfx_level < GFX9)) {
1489       Block* tl_block = &block;
1490       while (!(tl_block->kind & block_kind_top_level))
1491          tl_block = &ctx.program->blocks[tl_block->linear_idom];
1492 
1493       /* find p_logical_end */
1494       std::vector<aco_ptr<Instruction>>& prev_instructions = tl_block->instructions;
1495       unsigned idx = prev_instructions.size() - 1;
1496       while (prev_instructions[idx]->opcode != aco_opcode::p_logical_end)
1497          idx--;
1498       rsrc_bld.reset(&prev_instructions, std::next(prev_instructions.begin(), idx));
1499    }
1500 
1501    /* If spilling overflows the constant offset range at any point, we need to emit the soffset
1502     * before every spill/reload to avoid increasing register demand.
1503     */
1504    Builder offset_bld = rsrc_bld;
1505    if (overflow)
1506       offset_bld.reset(&instructions);
1507 
1508    *offset = spill_slot * 4;
1509    if (ctx.program->gfx_level >= GFX9) {
1510       *offset += ctx.program->dev.scratch_global_offset_min;
1511 
1512       if (ctx.scratch_rsrc == Temp() || overflow) {
1513          int32_t saddr = scratch_size - ctx.program->dev.scratch_global_offset_min;
1514          if ((int32_t)*offset > (int32_t)ctx.program->dev.scratch_global_offset_max) {
1515             saddr += (int32_t)*offset;
1516             *offset = 0;
1517          }
1518 
1519          /* GFX9+ uses scratch_* instructions, which don't use a resource. */
1520          ctx.scratch_rsrc = offset_bld.copy(offset_bld.def(s1), Operand::c32(saddr));
1521       }
1522    } else {
1523       if (ctx.scratch_rsrc == Temp())
1524          ctx.scratch_rsrc = load_scratch_resource(ctx, rsrc_bld, overflow);
1525 
1526       if (overflow) {
1527          uint32_t soffset =
1528             ctx.program->config->scratch_bytes_per_wave + *offset * ctx.program->wave_size;
1529          *offset = 0;
1530 
1531          scratch_offset = offset_bld.copy(offset_bld.def(s1), Operand::c32(soffset));
1532       } else {
1533          *offset += scratch_size;
1534       }
1535    }
1536 }
1537 
1538 void
spill_vgpr(spill_ctx & ctx,Block & block,std::vector<aco_ptr<Instruction>> & instructions,aco_ptr<Instruction> & spill,std::vector<uint32_t> & slots)1539 spill_vgpr(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions,
1540            aco_ptr<Instruction>& spill, std::vector<uint32_t>& slots)
1541 {
1542    ctx.program->config->spilled_vgprs += spill->operands[0].size();
1543 
1544    uint32_t spill_id = spill->operands[1].constantValue();
1545    uint32_t spill_slot = slots[spill_id];
1546 
1547    Temp scratch_offset = ctx.program->scratch_offset;
1548    unsigned offset;
1549    setup_vgpr_spill_reload(ctx, block, instructions, spill_slot, scratch_offset, &offset);
1550 
1551    assert(spill->operands[0].isTemp());
1552    Temp temp = spill->operands[0].getTemp();
1553    assert(temp.type() == RegType::vgpr && !temp.is_linear());
1554 
1555    Builder bld(ctx.program, &instructions);
1556    if (temp.size() > 1) {
1557       Instruction* split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector,
1558                                                                 Format::PSEUDO, 1, temp.size())};
1559       split->operands[0] = Operand(temp);
1560       for (unsigned i = 0; i < temp.size(); i++)
1561          split->definitions[i] = bld.def(v1);
1562       bld.insert(split);
1563       for (unsigned i = 0; i < temp.size(); i++, offset += 4) {
1564          Temp elem = split->definitions[i].getTemp();
1565          if (ctx.program->gfx_level >= GFX9) {
1566             bld.scratch(aco_opcode::scratch_store_dword, Operand(v1), ctx.scratch_rsrc, elem,
1567                         offset, memory_sync_info(storage_vgpr_spill, semantic_private));
1568          } else {
1569             Instruction* instr = bld.mubuf(aco_opcode::buffer_store_dword, ctx.scratch_rsrc,
1570                                            Operand(v1), scratch_offset, elem, offset, false, true);
1571             instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1572          }
1573       }
1574    } else if (ctx.program->gfx_level >= GFX9) {
1575       bld.scratch(aco_opcode::scratch_store_dword, Operand(v1), ctx.scratch_rsrc, temp, offset,
1576                   memory_sync_info(storage_vgpr_spill, semantic_private));
1577    } else {
1578       Instruction* instr = bld.mubuf(aco_opcode::buffer_store_dword, ctx.scratch_rsrc, Operand(v1),
1579                                      scratch_offset, temp, offset, false, true);
1580       instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1581    }
1582 }
1583 
1584 void
reload_vgpr(spill_ctx & ctx,Block & block,std::vector<aco_ptr<Instruction>> & instructions,aco_ptr<Instruction> & reload,std::vector<uint32_t> & slots)1585 reload_vgpr(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions,
1586             aco_ptr<Instruction>& reload, std::vector<uint32_t>& slots)
1587 {
1588    uint32_t spill_id = reload->operands[0].constantValue();
1589    uint32_t spill_slot = slots[spill_id];
1590 
1591    Temp scratch_offset = ctx.program->scratch_offset;
1592    unsigned offset;
1593    setup_vgpr_spill_reload(ctx, block, instructions, spill_slot, scratch_offset, &offset);
1594 
1595    Definition def = reload->definitions[0];
1596 
1597    Builder bld(ctx.program, &instructions);
1598    if (def.size() > 1) {
1599       Instruction* vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
1600                                                               Format::PSEUDO, def.size(), 1)};
1601       vec->definitions[0] = def;
1602       for (unsigned i = 0; i < def.size(); i++, offset += 4) {
1603          Temp tmp = bld.tmp(v1);
1604          vec->operands[i] = Operand(tmp);
1605          if (ctx.program->gfx_level >= GFX9) {
1606             bld.scratch(aco_opcode::scratch_load_dword, Definition(tmp), Operand(v1),
1607                         ctx.scratch_rsrc, offset,
1608                         memory_sync_info(storage_vgpr_spill, semantic_private));
1609          } else {
1610             Instruction* instr =
1611                bld.mubuf(aco_opcode::buffer_load_dword, Definition(tmp), ctx.scratch_rsrc,
1612                          Operand(v1), scratch_offset, offset, false, true);
1613             instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1614          }
1615       }
1616       bld.insert(vec);
1617    } else if (ctx.program->gfx_level >= GFX9) {
1618       bld.scratch(aco_opcode::scratch_load_dword, def, Operand(v1), ctx.scratch_rsrc, offset,
1619                   memory_sync_info(storage_vgpr_spill, semantic_private));
1620    } else {
1621       Instruction* instr = bld.mubuf(aco_opcode::buffer_load_dword, def, ctx.scratch_rsrc,
1622                                      Operand(v1), scratch_offset, offset, false, true);
1623       instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1624    }
1625 }
1626 
1627 void
add_interferences(spill_ctx & ctx,std::vector<bool> & is_assigned,std::vector<uint32_t> & slots,std::vector<bool> & slots_used,unsigned id)1628 add_interferences(spill_ctx& ctx, std::vector<bool>& is_assigned, std::vector<uint32_t>& slots,
1629                   std::vector<bool>& slots_used, unsigned id)
1630 {
1631    for (unsigned other : ctx.interferences[id].second) {
1632       if (!is_assigned[other])
1633          continue;
1634 
1635       RegClass other_rc = ctx.interferences[other].first;
1636       unsigned slot = slots[other];
1637       std::fill(slots_used.begin() + slot, slots_used.begin() + slot + other_rc.size(), true);
1638    }
1639 }
1640 
1641 unsigned
find_available_slot(std::vector<bool> & used,unsigned wave_size,unsigned size,bool is_sgpr)1642 find_available_slot(std::vector<bool>& used, unsigned wave_size, unsigned size, bool is_sgpr)
1643 {
1644    unsigned wave_size_minus_one = wave_size - 1;
1645    unsigned slot = 0;
1646 
1647    while (true) {
1648       bool available = true;
1649       for (unsigned i = 0; i < size; i++) {
1650          if (slot + i < used.size() && used[slot + i]) {
1651             available = false;
1652             break;
1653          }
1654       }
1655       if (!available) {
1656          slot++;
1657          continue;
1658       }
1659 
1660       if (is_sgpr && ((slot & wave_size_minus_one) > wave_size - size)) {
1661          slot = align(slot, wave_size);
1662          continue;
1663       }
1664 
1665       std::fill(used.begin(), used.end(), false);
1666 
1667       if (slot + size > used.size())
1668          used.resize(slot + size);
1669 
1670       return slot;
1671    }
1672 }
1673 
1674 void
assign_spill_slots_helper(spill_ctx & ctx,RegType type,std::vector<bool> & is_assigned,std::vector<uint32_t> & slots,unsigned * num_slots)1675 assign_spill_slots_helper(spill_ctx& ctx, RegType type, std::vector<bool>& is_assigned,
1676                           std::vector<uint32_t>& slots, unsigned* num_slots)
1677 {
1678    std::vector<bool> slots_used;
1679 
1680    /* assign slots for ids with affinities first */
1681    for (std::vector<uint32_t>& vec : ctx.affinities) {
1682       if (ctx.interferences[vec[0]].first.type() != type)
1683          continue;
1684 
1685       for (unsigned id : vec) {
1686          if (!ctx.is_reloaded[id])
1687             continue;
1688 
1689          add_interferences(ctx, is_assigned, slots, slots_used, id);
1690       }
1691 
1692       unsigned slot = find_available_slot(
1693          slots_used, ctx.wave_size, ctx.interferences[vec[0]].first.size(), type == RegType::sgpr);
1694 
1695       for (unsigned id : vec) {
1696          assert(!is_assigned[id]);
1697 
1698          if (ctx.is_reloaded[id]) {
1699             slots[id] = slot;
1700             is_assigned[id] = true;
1701          }
1702       }
1703    }
1704 
1705    /* assign slots for ids without affinities */
1706    for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1707       if (is_assigned[id] || !ctx.is_reloaded[id] || ctx.interferences[id].first.type() != type)
1708          continue;
1709 
1710       add_interferences(ctx, is_assigned, slots, slots_used, id);
1711 
1712       unsigned slot = find_available_slot(
1713          slots_used, ctx.wave_size, ctx.interferences[id].first.size(), type == RegType::sgpr);
1714 
1715       slots[id] = slot;
1716       is_assigned[id] = true;
1717    }
1718 
1719    *num_slots = slots_used.size();
1720 }
1721 
1722 void
end_unused_spill_vgprs(spill_ctx & ctx,Block & block,std::vector<Temp> & vgpr_spill_temps,const std::vector<uint32_t> & slots,const aco::unordered_map<Temp,uint32_t> & spills)1723 end_unused_spill_vgprs(spill_ctx& ctx, Block& block, std::vector<Temp>& vgpr_spill_temps,
1724                        const std::vector<uint32_t>& slots,
1725                        const aco::unordered_map<Temp, uint32_t>& spills)
1726 {
1727    std::vector<bool> is_used(vgpr_spill_temps.size());
1728    for (std::pair<Temp, uint32_t> pair : spills) {
1729       if (pair.first.type() == RegType::sgpr && ctx.is_reloaded[pair.second])
1730          is_used[slots[pair.second] / ctx.wave_size] = true;
1731    }
1732 
1733    std::vector<Temp> temps;
1734    for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1735       if (vgpr_spill_temps[i].id() && !is_used[i]) {
1736          temps.push_back(vgpr_spill_temps[i]);
1737          vgpr_spill_temps[i] = Temp();
1738       }
1739    }
1740    if (temps.empty() || block.linear_preds.empty())
1741       return;
1742 
1743    aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
1744       aco_opcode::p_end_linear_vgpr, Format::PSEUDO, temps.size(), 0)};
1745    for (unsigned i = 0; i < temps.size(); i++)
1746       destr->operands[i] = Operand(temps[i]);
1747 
1748    std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1749    while (is_phi(*it))
1750       ++it;
1751    block.instructions.insert(it, std::move(destr));
1752 }
1753 
1754 void
assign_spill_slots(spill_ctx & ctx,unsigned spills_to_vgpr)1755 assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)
1756 {
1757    std::vector<uint32_t> slots(ctx.interferences.size());
1758    std::vector<bool> is_assigned(ctx.interferences.size());
1759 
1760    /* first, handle affinities: just merge all interferences into both spill ids */
1761    for (std::vector<uint32_t>& vec : ctx.affinities) {
1762       for (unsigned i = 0; i < vec.size(); i++) {
1763          for (unsigned j = i + 1; j < vec.size(); j++) {
1764             assert(vec[i] != vec[j]);
1765             bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1766             ctx.is_reloaded[vec[i]] = reloaded;
1767             ctx.is_reloaded[vec[j]] = reloaded;
1768          }
1769       }
1770    }
1771    for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1772       for (ASSERTED uint32_t id : ctx.interferences[i].second)
1773          assert(i != id);
1774 
1775    /* for each spill slot, assign as many spill ids as possible */
1776    assign_spill_slots_helper(ctx, RegType::sgpr, is_assigned, slots, &ctx.sgpr_spill_slots);
1777    assign_spill_slots_helper(ctx, RegType::vgpr, is_assigned, slots, &ctx.vgpr_spill_slots);
1778 
1779    for (unsigned id = 0; id < is_assigned.size(); id++)
1780       assert(is_assigned[id] || !ctx.is_reloaded[id]);
1781 
1782    for (std::vector<uint32_t>& vec : ctx.affinities) {
1783       for (unsigned i = 0; i < vec.size(); i++) {
1784          for (unsigned j = i + 1; j < vec.size(); j++) {
1785             assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1786             if (!is_assigned[vec[i]])
1787                continue;
1788             assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1789             assert(ctx.interferences[vec[i]].first.type() ==
1790                    ctx.interferences[vec[j]].first.type());
1791             assert(slots[vec[i]] == slots[vec[j]]);
1792          }
1793       }
1794    }
1795 
1796    /* hope, we didn't mess up */
1797    std::vector<Temp> vgpr_spill_temps((ctx.sgpr_spill_slots + ctx.wave_size - 1) / ctx.wave_size);
1798    assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1799 
1800    /* replace pseudo instructions with actual hardware instructions */
1801    unsigned last_top_level_block_idx = 0;
1802    for (Block& block : ctx.program->blocks) {
1803 
1804       if (block.kind & block_kind_top_level) {
1805          last_top_level_block_idx = block.index;
1806 
1807          end_unused_spill_vgprs(ctx, block, vgpr_spill_temps, slots, ctx.spills_entry[block.index]);
1808 
1809          /* If the block has no predecessors (for example in RT resume shaders),
1810           * we cannot reuse the current scratch_rsrc temp because its definition is unreachable */
1811          if (block.linear_preds.empty())
1812             ctx.scratch_rsrc = Temp();
1813       }
1814 
1815       std::vector<aco_ptr<Instruction>>::iterator it;
1816       std::vector<aco_ptr<Instruction>> instructions;
1817       instructions.reserve(block.instructions.size());
1818       Builder bld(ctx.program, &instructions);
1819       for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1820 
1821          if ((*it)->opcode == aco_opcode::p_spill) {
1822             uint32_t spill_id = (*it)->operands[1].constantValue();
1823 
1824             if (!ctx.is_reloaded[spill_id]) {
1825                /* never reloaded, so don't spill */
1826             } else if (!is_assigned[spill_id]) {
1827                unreachable("No spill slot assigned for spill id");
1828             } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1829                spill_vgpr(ctx, block, instructions, *it, slots);
1830             } else {
1831                ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1832 
1833                uint32_t spill_slot = slots[spill_id];
1834 
1835                /* check if the linear vgpr already exists */
1836                if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1837                   Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1838                   vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1839                   aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
1840                      aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1841                   create->definitions[0] = Definition(linear_vgpr);
1842                   /* find the right place to insert this definition */
1843                   if (last_top_level_block_idx == block.index) {
1844                      /* insert right before the current instruction */
1845                      instructions.emplace_back(std::move(create));
1846                   } else {
1847                      assert(last_top_level_block_idx < block.index);
1848                      /* insert before the branch at last top level block */
1849                      std::vector<aco_ptr<Instruction>>& block_instrs =
1850                         ctx.program->blocks[last_top_level_block_idx].instructions;
1851                      block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
1852                   }
1853                }
1854 
1855                /* spill sgpr: just add the vgpr temp to operands */
1856                Pseudo_instruction* spill =
1857                   create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1858                spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1859                spill->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
1860                spill->operands[2] = (*it)->operands[0];
1861                instructions.emplace_back(aco_ptr<Instruction>(spill));
1862             }
1863 
1864          } else if ((*it)->opcode == aco_opcode::p_reload) {
1865             uint32_t spill_id = (*it)->operands[0].constantValue();
1866             assert(ctx.is_reloaded[spill_id]);
1867 
1868             if (!is_assigned[spill_id]) {
1869                unreachable("No spill slot assigned for spill id");
1870             } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1871                reload_vgpr(ctx, block, instructions, *it, slots);
1872             } else {
1873                uint32_t spill_slot = slots[spill_id];
1874 
1875                /* check if the linear vgpr already exists */
1876                if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1877                   Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1878                   vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1879                   aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
1880                      aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1881                   create->definitions[0] = Definition(linear_vgpr);
1882                   /* find the right place to insert this definition */
1883                   if (last_top_level_block_idx == block.index) {
1884                      /* insert right before the current instruction */
1885                      instructions.emplace_back(std::move(create));
1886                   } else {
1887                      assert(last_top_level_block_idx < block.index);
1888                      /* insert before the branch at last top level block */
1889                      std::vector<aco_ptr<Instruction>>& block_instrs =
1890                         ctx.program->blocks[last_top_level_block_idx].instructions;
1891                      block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
1892                   }
1893                }
1894 
1895                /* reload sgpr: just add the vgpr temp to operands */
1896                Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(
1897                   aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1898                reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1899                reload->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
1900                reload->definitions[0] = (*it)->definitions[0];
1901                instructions.emplace_back(aco_ptr<Instruction>(reload));
1902             }
1903          } else if (!ctx.unused_remats.count(it->get())) {
1904             instructions.emplace_back(std::move(*it));
1905          }
1906       }
1907       block.instructions = std::move(instructions);
1908    }
1909 
1910    /* update required scratch memory */
1911    ctx.program->config->scratch_bytes_per_wave += ctx.vgpr_spill_slots * 4 * ctx.program->wave_size;
1912 }
1913 
1914 } /* end namespace */
1915 
1916 void
spill(Program * program,live & live_vars)1917 spill(Program* program, live& live_vars)
1918 {
1919    program->config->spilled_vgprs = 0;
1920    program->config->spilled_sgprs = 0;
1921 
1922    program->progress = CompilationProgress::after_spilling;
1923 
1924    /* no spilling when register pressure is low enough */
1925    if (program->num_waves > 0)
1926       return;
1927 
1928    /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1929    lower_to_cssa(program, live_vars);
1930 
1931    /* calculate target register demand */
1932    const RegisterDemand demand = program->max_reg_demand; /* current max */
1933    const uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
1934    const uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
1935    uint16_t extra_vgprs = 0;
1936    uint16_t extra_sgprs = 0;
1937 
1938    /* calculate extra VGPRs required for spilling SGPRs */
1939    if (demand.sgpr > sgpr_limit) {
1940       unsigned sgpr_spills = demand.sgpr - sgpr_limit;
1941       extra_vgprs = DIV_ROUND_UP(sgpr_spills * 2, program->wave_size) + 1;
1942    }
1943    /* add extra SGPRs required for spilling VGPRs */
1944    if (demand.vgpr + extra_vgprs > vgpr_limit) {
1945       if (program->gfx_level >= GFX9)
1946          extra_sgprs = 1; /* SADDR */
1947       else
1948          extra_sgprs = 5; /* scratch_resource (s4) + scratch_offset (s1) */
1949       if (demand.sgpr + extra_sgprs > sgpr_limit) {
1950          /* re-calculate in case something has changed */
1951          unsigned sgpr_spills = demand.sgpr + extra_sgprs - sgpr_limit;
1952          extra_vgprs = DIV_ROUND_UP(sgpr_spills * 2, program->wave_size) + 1;
1953       }
1954    }
1955    /* the spiller has to target the following register demand */
1956    const RegisterDemand target(vgpr_limit - extra_vgprs, sgpr_limit - extra_sgprs);
1957 
1958    /* initialize ctx */
1959    spill_ctx ctx(target, program, live_vars.register_demand);
1960    compute_global_next_uses(ctx);
1961    get_rematerialize_info(ctx);
1962 
1963    /* create spills and reloads */
1964    for (unsigned i = 0; i < program->blocks.size(); i++)
1965       spill_block(ctx, i);
1966 
1967    /* assign spill slots and DCE rematerialized code */
1968    assign_spill_slots(ctx, extra_vgprs);
1969 
1970    /* update live variable information */
1971    live_vars = live_var_analysis(program);
1972 
1973    assert(program->num_waves > 0);
1974 }
1975 
1976 } // namespace aco
1977