• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  * Copyright © 2018 Google
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include "aco_ir.h"
27 #include "aco_builder.h"
28 #include "sid.h"
29 
30 #include <map>
31 #include <set>
32 #include <stack>
33 
34 /*
35  * Implements the spilling algorithm on SSA-form from
36  * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
37  * by Matthias Braun and Sebastian Hack.
38  */
39 
40 namespace aco {
41 
42 namespace {
43 
44 struct remat_info {
45    Instruction *instr;
46 };
47 
48 struct spill_ctx {
49    RegisterDemand target_pressure;
50    Program* program;
51    std::vector<std::vector<RegisterDemand>> register_demand;
52    std::vector<std::map<Temp, Temp>> renames;
53    std::vector<std::map<Temp, uint32_t>> spills_entry;
54    std::vector<std::map<Temp, uint32_t>> spills_exit;
55    std::vector<bool> processed;
56    std::stack<Block*> loop_header;
57    std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
58    std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
59    std::vector<std::pair<RegClass, std::unordered_set<uint32_t>>> interferences;
60    std::vector<std::vector<uint32_t>> affinities;
61    std::vector<bool> is_reloaded;
62    std::map<Temp, remat_info> remat;
63    std::map<Instruction *, bool> remat_used;
64    unsigned wave_size;
65 
spill_ctxaco::__anonb331a64d0111::spill_ctx66    spill_ctx(const RegisterDemand target_pressure, Program* program,
67              std::vector<std::vector<RegisterDemand>> register_demand)
68       : target_pressure(target_pressure), program(program),
69         register_demand(std::move(register_demand)), renames(program->blocks.size()),
70         spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
71         processed(program->blocks.size(), false), wave_size(program->wave_size) {}
72 
add_affinityaco::__anonb331a64d0111::spill_ctx73    void add_affinity(uint32_t first, uint32_t second)
74    {
75       unsigned found_first = affinities.size();
76       unsigned found_second = affinities.size();
77       for (unsigned i = 0; i < affinities.size(); i++) {
78          std::vector<uint32_t>& vec = affinities[i];
79          for (uint32_t entry : vec) {
80             if (entry == first)
81                found_first = i;
82             else if (entry == second)
83                found_second = i;
84          }
85       }
86       if (found_first == affinities.size() && found_second == affinities.size()) {
87          affinities.emplace_back(std::vector<uint32_t>({first, second}));
88       } else if (found_first < affinities.size() && found_second == affinities.size()) {
89          affinities[found_first].push_back(second);
90       } else if (found_second < affinities.size() && found_first == affinities.size()) {
91          affinities[found_second].push_back(first);
92       } else if (found_first != found_second) {
93          /* merge second into first */
94          affinities[found_first].insert(affinities[found_first].end(), affinities[found_second].begin(), affinities[found_second].end());
95          affinities.erase(std::next(affinities.begin(), found_second));
96       } else {
97          assert(found_first == found_second);
98       }
99    }
100 
add_interferenceaco::__anonb331a64d0111::spill_ctx101    void add_interference(uint32_t first, uint32_t second)
102    {
103       if (interferences[first].first.type() != interferences[second].first.type())
104          return;
105 
106       bool inserted = interferences[first].second.insert(second).second;
107       if (inserted)
108          interferences[second].second.insert(first);
109    }
110 
allocate_spill_idaco::__anonb331a64d0111::spill_ctx111    uint32_t allocate_spill_id(RegClass rc)
112    {
113       interferences.emplace_back(rc, std::unordered_set<uint32_t>());
114       is_reloaded.push_back(false);
115       return next_spill_id++;
116    }
117 
118    uint32_t next_spill_id = 0;
119 };
120 
get_dominator(int idx_a,int idx_b,Program * program,bool is_linear)121 int32_t get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
122 {
123 
124    if (idx_a == -1)
125       return idx_b;
126    if (idx_b == -1)
127       return idx_a;
128    if (is_linear) {
129       while (idx_a != idx_b) {
130          if (idx_a > idx_b)
131             idx_a = program->blocks[idx_a].linear_idom;
132          else
133             idx_b = program->blocks[idx_b].linear_idom;
134       }
135    } else {
136       while (idx_a != idx_b) {
137          if (idx_a > idx_b)
138             idx_a = program->blocks[idx_a].logical_idom;
139          else
140             idx_b = program->blocks[idx_b].logical_idom;
141       }
142    }
143    assert(idx_a != -1);
144    return idx_a;
145 }
146 
next_uses_per_block(spill_ctx & ctx,unsigned block_idx,std::set<uint32_t> & worklist)147 void next_uses_per_block(spill_ctx& ctx, unsigned block_idx, std::set<uint32_t>& worklist)
148 {
149    Block* block = &ctx.program->blocks[block_idx];
150    std::map<Temp, std::pair<uint32_t, uint32_t>> next_uses = ctx.next_use_distances_end[block_idx];
151 
152    /* to compute the next use distance at the beginning of the block, we have to add the block's size */
153    for (std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = next_uses.begin(); it != next_uses.end(); ++it)
154       it->second.second = it->second.second + block->instructions.size();
155 
156    int idx = block->instructions.size() - 1;
157    while (idx >= 0) {
158       aco_ptr<Instruction>& instr = block->instructions[idx];
159 
160       if (instr->opcode == aco_opcode::p_linear_phi ||
161           instr->opcode == aco_opcode::p_phi)
162          break;
163 
164       for (const Definition& def : instr->definitions) {
165          if (def.isTemp())
166             next_uses.erase(def.getTemp());
167       }
168 
169       for (const Operand& op : instr->operands) {
170          /* omit exec mask */
171          if (op.isFixed() && op.physReg() == exec)
172             continue;
173          if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
174             continue;
175          if (op.isTemp())
176             next_uses[op.getTemp()] = {block_idx, idx};
177       }
178       idx--;
179    }
180 
181    assert(block_idx != 0 || next_uses.empty());
182    ctx.next_use_distances_start[block_idx] = next_uses;
183    while (idx >= 0) {
184       aco_ptr<Instruction>& instr = block->instructions[idx];
185       assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
186 
187       for (unsigned i = 0; i < instr->operands.size(); i++) {
188          unsigned pred_idx = instr->opcode == aco_opcode::p_phi ?
189                              block->logical_preds[i] :
190                              block->linear_preds[i];
191          if (instr->operands[i].isTemp()) {
192             if (instr->operands[i].getTemp() == ctx.program->blocks[pred_idx].live_out_exec)
193                continue;
194             if (ctx.next_use_distances_end[pred_idx].find(instr->operands[i].getTemp()) == ctx.next_use_distances_end[pred_idx].end() ||
195                 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] != std::pair<uint32_t, uint32_t>{block_idx, 0})
196                worklist.insert(pred_idx);
197             ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] = {block_idx, 0};
198          }
199       }
200       next_uses.erase(instr->definitions[0].getTemp());
201       idx--;
202    }
203 
204    /* all remaining live vars must be live-out at the predecessors */
205    for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : next_uses) {
206       Temp temp = pair.first;
207       uint32_t distance = pair.second.second;
208       uint32_t dom = pair.second.first;
209       std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
210       for (unsigned pred_idx : preds) {
211          if (temp == ctx.program->blocks[pred_idx].live_out_exec)
212             continue;
213          if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
214             distance += 0xFFFF;
215          if (ctx.next_use_distances_end[pred_idx].find(temp) != ctx.next_use_distances_end[pred_idx].end()) {
216             dom = get_dominator(dom, ctx.next_use_distances_end[pred_idx][temp].first, ctx.program, temp.is_linear());
217             distance = std::min(ctx.next_use_distances_end[pred_idx][temp].second, distance);
218          }
219          if (ctx.next_use_distances_end[pred_idx][temp] != std::pair<uint32_t, uint32_t>{dom, distance})
220             worklist.insert(pred_idx);
221          ctx.next_use_distances_end[pred_idx][temp] = {dom, distance};
222       }
223    }
224 
225 }
226 
compute_global_next_uses(spill_ctx & ctx)227 void compute_global_next_uses(spill_ctx& ctx)
228 {
229    ctx.next_use_distances_start.resize(ctx.program->blocks.size());
230    ctx.next_use_distances_end.resize(ctx.program->blocks.size());
231    std::set<uint32_t> worklist;
232    for (Block& block : ctx.program->blocks)
233       worklist.insert(block.index);
234 
235    while (!worklist.empty()) {
236       std::set<unsigned>::reverse_iterator b_it = worklist.rbegin();
237       unsigned block_idx = *b_it;
238       worklist.erase(block_idx);
239       next_uses_per_block(ctx, block_idx, worklist);
240    }
241 }
242 
should_rematerialize(aco_ptr<Instruction> & instr)243 bool should_rematerialize(aco_ptr<Instruction>& instr)
244 {
245    /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
246    if (instr->format != Format::VOP1 && instr->format != Format::SOP1 && instr->format != Format::PSEUDO && instr->format != Format::SOPK)
247       return false;
248    /* TODO: pseudo-instruction rematerialization is only supported for p_create_vector/p_parallelcopy */
249    if (instr->format == Format::PSEUDO && instr->opcode != aco_opcode::p_create_vector &&
250        instr->opcode != aco_opcode::p_parallelcopy)
251       return false;
252    if (instr->format == Format::SOPK && instr->opcode != aco_opcode::s_movk_i32)
253       return false;
254 
255    for (const Operand& op : instr->operands) {
256       /* TODO: rematerialization using temporaries isn't yet supported */
257       if (op.isTemp())
258          return false;
259    }
260 
261    /* TODO: rematerialization with multiple definitions isn't yet supported */
262    if (instr->definitions.size() > 1)
263       return false;
264 
265    return true;
266 }
267 
do_reload(spill_ctx & ctx,Temp tmp,Temp new_name,uint32_t spill_id)268 aco_ptr<Instruction> do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
269 {
270    std::map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
271    if (remat != ctx.remat.end()) {
272       Instruction *instr = remat->second.instr;
273       assert((instr->format == Format::VOP1 || instr->format == Format::SOP1 || instr->format == Format::PSEUDO || instr->format == Format::SOPK) && "unsupported");
274       assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector || instr->opcode == aco_opcode::p_parallelcopy) && "unsupported");
275       assert(instr->definitions.size() == 1 && "unsupported");
276 
277       aco_ptr<Instruction> res;
278       if (instr->format == Format::VOP1) {
279          res.reset(create_instruction<VOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
280       } else if (instr->format == Format::SOP1) {
281          res.reset(create_instruction<SOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
282       } else if (instr->format == Format::PSEUDO) {
283          res.reset(create_instruction<Pseudo_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
284       } else if (instr->format == Format::SOPK) {
285          res.reset(create_instruction<SOPK_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
286          static_cast<SOPK_instruction*>(res.get())->imm = static_cast<SOPK_instruction*>(instr)->imm;
287       }
288       for (unsigned i = 0; i < instr->operands.size(); i++) {
289          res->operands[i] = instr->operands[i];
290          if (instr->operands[i].isTemp()) {
291             assert(false && "unsupported");
292             if (ctx.remat.count(instr->operands[i].getTemp()))
293                ctx.remat_used[ctx.remat[instr->operands[i].getTemp()].instr] = true;
294          }
295       }
296       res->definitions[0] = Definition(new_name);
297       return res;
298    } else {
299       aco_ptr<Pseudo_instruction> reload{create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
300       reload->operands[0] = Operand(spill_id);
301       reload->definitions[0] = Definition(new_name);
302       ctx.is_reloaded[spill_id] = true;
303       return reload;
304    }
305 }
306 
get_rematerialize_info(spill_ctx & ctx)307 void get_rematerialize_info(spill_ctx& ctx)
308 {
309    for (Block& block : ctx.program->blocks) {
310       bool logical = false;
311       for (aco_ptr<Instruction>& instr : block.instructions) {
312          if (instr->opcode == aco_opcode::p_logical_start)
313             logical = true;
314          else if (instr->opcode == aco_opcode::p_logical_end)
315             logical = false;
316          if (logical && should_rematerialize(instr)) {
317             for (const Definition& def : instr->definitions) {
318                if (def.isTemp()) {
319                   ctx.remat[def.getTemp()] = (remat_info){instr.get()};
320                   ctx.remat_used[instr.get()] = false;
321                }
322             }
323          }
324       }
325    }
326 }
327 
local_next_uses(spill_ctx & ctx,Block * block)328 std::vector<std::map<Temp, uint32_t>> local_next_uses(spill_ctx& ctx, Block* block)
329 {
330    std::vector<std::map<Temp, uint32_t>> local_next_uses(block->instructions.size());
331 
332    std::map<Temp, uint32_t> next_uses;
333    for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block->index])
334       next_uses[pair.first] = pair.second.second + block->instructions.size();
335 
336    for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
337       aco_ptr<Instruction>& instr = block->instructions[idx];
338       if (!instr)
339          break;
340       if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
341          break;
342 
343       for (const Operand& op : instr->operands) {
344          if (op.isFixed() && op.physReg() == exec)
345             continue;
346          if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
347             continue;
348          if (op.isTemp())
349             next_uses[op.getTemp()] = idx;
350       }
351       for (const Definition& def : instr->definitions) {
352          if (def.isTemp())
353             next_uses.erase(def.getTemp());
354       }
355       local_next_uses[idx] = next_uses;
356    }
357    return local_next_uses;
358 }
359 
360 
init_live_in_vars(spill_ctx & ctx,Block * block,unsigned block_idx)361 RegisterDemand init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
362 {
363    RegisterDemand spilled_registers;
364 
365    /* first block, nothing was spilled before */
366    if (block_idx == 0)
367       return {0, 0};
368 
369    /* loop header block */
370    if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
371       assert(block->linear_preds[0] == block_idx - 1);
372       assert(block->logical_preds[0] == block_idx - 1);
373 
374       /* create new loop_info */
375       ctx.loop_header.emplace(block);
376 
377       /* check how many live-through variables should be spilled */
378       RegisterDemand new_demand;
379       unsigned i = block_idx;
380       while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
381          assert(ctx.program->blocks.size() > i);
382          new_demand.update(ctx.program->blocks[i].register_demand);
383          i++;
384       }
385       unsigned loop_end = i;
386 
387       /* keep live-through spilled */
388       for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
389          if (pair.second.first < loop_end)
390             continue;
391 
392          Temp to_spill = pair.first;
393          auto it = ctx.spills_exit[block_idx - 1].find(to_spill);
394          if (it == ctx.spills_exit[block_idx - 1].end())
395             continue;
396 
397          ctx.spills_entry[block_idx][to_spill] = it->second;
398          spilled_registers += to_spill;
399       }
400 
401       /* select live-through vgpr variables */
402       while (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
403          unsigned distance = 0;
404          Temp to_spill;
405          for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
406             if (pair.first.type() == RegType::vgpr &&
407                 pair.second.first >= loop_end &&
408                 pair.second.second > distance &&
409                 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
410                to_spill = pair.first;
411                distance = pair.second.second;
412             }
413          }
414          if (distance == 0)
415             break;
416 
417          uint32_t spill_id;
418          if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
419             spill_id = ctx.allocate_spill_id(to_spill.regClass());
420          } else {
421             spill_id = ctx.spills_exit[block_idx - 1][to_spill];
422          }
423 
424          ctx.spills_entry[block_idx][to_spill] = spill_id;
425          spilled_registers.vgpr += to_spill.size();
426       }
427 
428       /* select live-through sgpr variables */
429       while (new_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
430          unsigned distance = 0;
431          Temp to_spill;
432          for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
433             if (pair.first.type() == RegType::sgpr &&
434                 pair.second.first >= loop_end &&
435                 pair.second.second > distance &&
436                 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
437                to_spill = pair.first;
438                distance = pair.second.second;
439             }
440          }
441          if (distance == 0)
442             break;
443 
444          uint32_t spill_id;
445          if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
446             spill_id = ctx.allocate_spill_id(to_spill.regClass());
447          } else {
448             spill_id = ctx.spills_exit[block_idx - 1][to_spill];
449          }
450 
451          ctx.spills_entry[block_idx][to_spill] = spill_id;
452          spilled_registers.sgpr += to_spill.size();
453       }
454 
455 
456 
457       /* shortcut */
458       if (!RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure))
459          return spilled_registers;
460 
461       /* if reg pressure is too high at beginning of loop, add variables with furthest use */
462       unsigned idx = 0;
463       while (block->instructions[idx]->opcode == aco_opcode::p_phi || block->instructions[idx]->opcode == aco_opcode::p_linear_phi)
464          idx++;
465 
466       assert(idx != 0 && "loop without phis: TODO");
467       idx--;
468       RegisterDemand reg_pressure = ctx.register_demand[block_idx][idx] - spilled_registers;
469       /* Consider register pressure from linear predecessors. This can affect
470        * reg_pressure if the branch instructions define sgprs. */
471       for (unsigned pred : block->linear_preds) {
472          reg_pressure.sgpr = std::max<int16_t>(
473             reg_pressure.sgpr, ctx.register_demand[pred].back().sgpr - spilled_registers.sgpr);
474       }
475 
476       while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
477          unsigned distance = 0;
478          Temp to_spill;
479          for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
480             if (pair.first.type() == RegType::sgpr &&
481                 pair.second.second > distance &&
482                 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
483                to_spill = pair.first;
484                distance = pair.second.second;
485             }
486          }
487          assert(distance != 0);
488 
489          ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
490          spilled_registers.sgpr += to_spill.size();
491          reg_pressure.sgpr -= to_spill.size();
492       }
493       while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
494          unsigned distance = 0;
495          Temp to_spill;
496          for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
497             if (pair.first.type() == RegType::vgpr &&
498                 pair.second.second > distance &&
499                 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
500                to_spill = pair.first;
501                distance = pair.second.second;
502             }
503          }
504          assert(distance != 0);
505          ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
506          spilled_registers.vgpr += to_spill.size();
507          reg_pressure.vgpr -= to_spill.size();
508       }
509 
510       return spilled_registers;
511    }
512 
513    /* branch block */
514    if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
515       /* keep variables spilled if they are alive and not used in the current block */
516       unsigned pred_idx = block->linear_preds[0];
517       for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
518          if (pair.first.type() == RegType::sgpr &&
519              ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
520              ctx.next_use_distances_start[block_idx][pair.first].first != block_idx) {
521             ctx.spills_entry[block_idx].insert(pair);
522             spilled_registers.sgpr += pair.first.size();
523          }
524       }
525       if (block->logical_preds.size() == 1) {
526          pred_idx = block->logical_preds[0];
527          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
528             if (pair.first.type() == RegType::vgpr &&
529                 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
530                 ctx.next_use_distances_start[block_idx][pair.first].first != block_idx) {
531                ctx.spills_entry[block_idx].insert(pair);
532                spilled_registers.vgpr += pair.first.size();
533             }
534          }
535       }
536 
537       /* if register demand is still too high, we just keep all spilled live vars and process the block */
538       if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
539          pred_idx = block->linear_preds[0];
540          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
541             if (pair.first.type() == RegType::sgpr &&
542                 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
543                 ctx.spills_entry[block_idx].insert(pair).second) {
544                spilled_registers.sgpr += pair.first.size();
545             }
546          }
547       }
548       if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr && block->logical_preds.size() == 1) {
549          pred_idx = block->logical_preds[0];
550          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
551             if (pair.first.type() == RegType::vgpr &&
552                 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
553                 ctx.spills_entry[block_idx].insert(pair).second) {
554                spilled_registers.vgpr += pair.first.size();
555             }
556          }
557       }
558 
559       return spilled_registers;
560    }
561 
562    /* else: merge block */
563    std::set<Temp> partial_spills;
564 
565    /* keep variables spilled on all incoming paths */
566    for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
567       std::vector<unsigned>& preds = pair.first.is_linear() ? block->linear_preds : block->logical_preds;
568       /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload it.
569        * Otherwise, if any predecessor reloads it, ensure it's reloaded on all other predecessors.
570        * The idea is that it's better in practice to rematerialize redundantly than to create lots of phis. */
571       /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db doesn't seem to exercise this path much) */
572       bool remat = ctx.remat.count(pair.first);
573       bool spill = !remat;
574       uint32_t spill_id = 0;
575       for (unsigned pred_idx : preds) {
576          /* variable is not even live at the predecessor: probably from a phi */
577          if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end()) {
578             spill = false;
579             break;
580          }
581          if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end()) {
582             if (!remat)
583                spill = false;
584          } else {
585             partial_spills.insert(pair.first);
586             /* it might be that on one incoming path, the variable has a different spill_id, but add_couple_code() will take care of that. */
587             spill_id = ctx.spills_exit[pred_idx][pair.first];
588             if (remat)
589                spill = true;
590          }
591       }
592       if (spill) {
593          ctx.spills_entry[block_idx][pair.first] = spill_id;
594          partial_spills.erase(pair.first);
595          spilled_registers += pair.first;
596       }
597    }
598 
599    /* same for phis */
600    unsigned idx = 0;
601    while (block->instructions[idx]->opcode == aco_opcode::p_linear_phi ||
602           block->instructions[idx]->opcode == aco_opcode::p_phi) {
603       aco_ptr<Instruction>& phi = block->instructions[idx];
604       std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
605       bool spill = true;
606 
607       for (unsigned i = 0; i < phi->operands.size(); i++) {
608          if (phi->operands[i].isUndefined())
609             continue;
610          assert(phi->operands[i].isTemp());
611          if (ctx.spills_exit[preds[i]].find(phi->operands[i].getTemp()) == ctx.spills_exit[preds[i]].end())
612             spill = false;
613          else
614             partial_spills.insert(phi->definitions[0].getTemp());
615       }
616       if (spill) {
617          ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] = ctx.allocate_spill_id(phi->definitions[0].regClass());
618          partial_spills.erase(phi->definitions[0].getTemp());
619          spilled_registers += phi->definitions[0].getTemp();
620       }
621 
622       idx++;
623    }
624 
625    /* if reg pressure at first instruction is still too high, add partially spilled variables */
626    RegisterDemand reg_pressure;
627    if (idx == 0) {
628       for (const Definition& def : block->instructions[idx]->definitions) {
629          if (def.isTemp()) {
630             reg_pressure -= def.getTemp();
631          }
632       }
633       for (const Operand& op : block->instructions[idx]->operands) {
634          if (op.isTemp() && op.isFirstKill()) {
635             reg_pressure += op.getTemp();
636          }
637       }
638    } else {
639       for (unsigned i = 0; i < idx; i++) {
640          aco_ptr<Instruction>& instr = block->instructions[i];
641          assert(is_phi(instr));
642          /* Killed phi definitions increase pressure in the predecessor but not
643           * the block they're in. Since the loops below are both to control
644           * pressure of the start of this block and the ends of it's
645           * predecessors, we need to count killed unspilled phi definitions here. */
646          if (instr->definitions[0].isKill() &&
647              !ctx.spills_entry[block_idx].count(instr->definitions[0].getTemp()))
648             reg_pressure += instr->definitions[0].getTemp();
649       }
650       idx--;
651    }
652    reg_pressure += ctx.register_demand[block_idx][idx] - spilled_registers;
653 
654    /* Consider register pressure from linear predecessors. This can affect
655     * reg_pressure if the branch instructions define sgprs. */
656    for (unsigned pred : block->linear_preds) {
657       reg_pressure.sgpr = std::max<int16_t>(
658          reg_pressure.sgpr, ctx.register_demand[pred].back().sgpr - spilled_registers.sgpr);
659    }
660 
661    while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
662       assert(!partial_spills.empty());
663 
664       std::set<Temp>::iterator it = partial_spills.begin();
665       Temp to_spill = Temp();
666       unsigned distance = 0;
667       while (it != partial_spills.end()) {
668          assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
669 
670          if (it->type() == RegType::sgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
671             distance = ctx.next_use_distances_start[block_idx][*it].second;
672             to_spill = *it;
673          }
674          ++it;
675       }
676       assert(distance != 0);
677 
678       ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
679       partial_spills.erase(to_spill);
680       spilled_registers.sgpr += to_spill.size();
681       reg_pressure.sgpr -= to_spill.size();
682    }
683 
684    while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
685       assert(!partial_spills.empty());
686 
687       std::set<Temp>::iterator it = partial_spills.begin();
688       Temp to_spill = Temp();
689       unsigned distance = 0;
690       while (it != partial_spills.end()) {
691          assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
692 
693          if (it->type() == RegType::vgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
694             distance = ctx.next_use_distances_start[block_idx][*it].second;
695             to_spill = *it;
696          }
697          ++it;
698       }
699       assert(distance != 0);
700 
701       ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
702       partial_spills.erase(to_spill);
703       spilled_registers.vgpr += to_spill.size();
704       reg_pressure.vgpr -= to_spill.size();
705    }
706 
707    return spilled_registers;
708 }
709 
710 
get_demand_before(spill_ctx & ctx,unsigned block_idx,unsigned idx)711 RegisterDemand get_demand_before(spill_ctx& ctx, unsigned block_idx, unsigned idx)
712 {
713    if (idx == 0) {
714       RegisterDemand demand = ctx.register_demand[block_idx][idx];
715       aco_ptr<Instruction>& instr = ctx.program->blocks[block_idx].instructions[idx];
716       aco_ptr<Instruction> instr_before(nullptr);
717       return get_demand_before(demand, instr, instr_before);
718    } else {
719       return ctx.register_demand[block_idx][idx - 1];
720    }
721 }
722 
add_coupling_code(spill_ctx & ctx,Block * block,unsigned block_idx)723 void add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
724 {
725    /* no coupling code necessary */
726    if (block->linear_preds.size() == 0)
727       return;
728 
729    std::vector<aco_ptr<Instruction>> instructions;
730    /* branch block: TODO take other branch into consideration */
731    if (block->linear_preds.size() == 1 && !(block->kind & (block_kind_loop_exit | block_kind_loop_header))) {
732       assert(ctx.processed[block->linear_preds[0]]);
733       assert(ctx.register_demand[block_idx].size() == block->instructions.size());
734       std::vector<RegisterDemand> reg_demand;
735       unsigned insert_idx = 0;
736       unsigned pred_idx = block->linear_preds[0];
737       RegisterDemand demand_before = get_demand_before(ctx, block_idx, 0);
738 
739       for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
740          if (!live.first.is_linear())
741             continue;
742          /* still spilled */
743          if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
744             continue;
745 
746          /* in register at end of predecessor */
747          if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
748             std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
749             if (it != ctx.renames[pred_idx].end())
750                ctx.renames[block_idx].insert(*it);
751             continue;
752          }
753 
754          /* variable is spilled at predecessor and live at current block: create reload instruction */
755          Temp new_name = ctx.program->allocateTmp(live.first.regClass());
756          aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
757          instructions.emplace_back(std::move(reload));
758          reg_demand.push_back(demand_before);
759          ctx.renames[block_idx][live.first] = new_name;
760       }
761 
762       if (block->logical_preds.size() == 1) {
763          do {
764             assert(insert_idx < block->instructions.size());
765             instructions.emplace_back(std::move(block->instructions[insert_idx]));
766             reg_demand.push_back(ctx.register_demand[block_idx][insert_idx]);
767             insert_idx++;
768          } while (instructions.back()->opcode != aco_opcode::p_logical_start);
769 
770          unsigned pred_idx = block->logical_preds[0];
771          for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
772             if (live.first.is_linear())
773                continue;
774             /* still spilled */
775             if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
776                continue;
777 
778             /* in register at end of predecessor */
779             if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
780                std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
781                if (it != ctx.renames[pred_idx].end())
782                   ctx.renames[block_idx].insert(*it);
783                continue;
784             }
785 
786             /* variable is spilled at predecessor and live at current block: create reload instruction */
787             Temp new_name = ctx.program->allocateTmp(live.first.regClass());
788             aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
789             instructions.emplace_back(std::move(reload));
790             reg_demand.emplace_back(reg_demand.back());
791             ctx.renames[block_idx][live.first] = new_name;
792          }
793       }
794 
795       /* combine new reload instructions with original block */
796       if (!instructions.empty()) {
797          reg_demand.insert(reg_demand.end(), std::next(ctx.register_demand[block->index].begin(), insert_idx),
798                            ctx.register_demand[block->index].end());
799          ctx.register_demand[block_idx] = std::move(reg_demand);
800          instructions.insert(instructions.end(),
801                              std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(std::next(block->instructions.begin(), insert_idx)),
802                              std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
803          block->instructions = std::move(instructions);
804       }
805       return;
806    }
807 
808    /* loop header and merge blocks: check if all (linear) predecessors have been processed */
809    for (ASSERTED unsigned pred : block->linear_preds)
810       assert(ctx.processed[pred]);
811 
812    /* iterate the phi nodes for which operands to spill at the predecessor */
813    for (aco_ptr<Instruction>& phi : block->instructions) {
814       if (phi->opcode != aco_opcode::p_phi &&
815           phi->opcode != aco_opcode::p_linear_phi)
816          break;
817 
818       /* if the phi is not spilled, add to instructions */
819       if (ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end()) {
820          instructions.emplace_back(std::move(phi));
821          continue;
822       }
823 
824       std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
825       uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
826 
827       for (unsigned i = 0; i < phi->operands.size(); i++) {
828          if (phi->operands[i].isUndefined())
829             continue;
830 
831          unsigned pred_idx = preds[i];
832          assert(phi->operands[i].isTemp() && phi->operands[i].isKill());
833          Temp var = phi->operands[i].getTemp();
834 
835          std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
836          /* prevent the definining instruction from being DCE'd if it could be rematerialized */
837          if (rename_it == ctx.renames[preds[i]].end() && ctx.remat.count(var))
838             ctx.remat_used[ctx.remat[var].instr] = true;
839 
840          /* build interferences between the phi def and all spilled variables at the predecessor blocks */
841          for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
842             if (var == pair.first)
843                continue;
844             ctx.add_interference(def_spill_id, pair.second);
845          }
846 
847          /* check if variable is already spilled at predecessor */
848          std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(var);
849          if (spilled != ctx.spills_exit[pred_idx].end()) {
850             if (spilled->second != def_spill_id)
851                ctx.add_affinity(def_spill_id, spilled->second);
852             continue;
853          }
854 
855          /* rename if necessary */
856          if (rename_it != ctx.renames[pred_idx].end()) {
857             var = rename_it->second;
858             ctx.renames[pred_idx].erase(rename_it);
859          }
860 
861          uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
862          ctx.add_affinity(def_spill_id, spill_id);
863          aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
864          spill->operands[0] = Operand(var);
865          spill->operands[1] = Operand(spill_id);
866          Block& pred = ctx.program->blocks[pred_idx];
867          unsigned idx = pred.instructions.size();
868          do {
869             assert(idx != 0);
870             idx--;
871          } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
872          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
873          pred.instructions.insert(it, std::move(spill));
874          ctx.spills_exit[pred_idx][phi->operands[i].getTemp()] = spill_id;
875       }
876 
877       /* remove phi from instructions */
878       phi.reset();
879    }
880 
881    /* iterate all (other) spilled variables for which to spill at the predecessor */
882    // TODO: would be better to have them sorted: first vgprs and first with longest distance
883    for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
884       std::vector<unsigned> preds = pair.first.is_linear() ? block->linear_preds : block->logical_preds;
885 
886       for (unsigned pred_idx : preds) {
887          /* variable is already spilled at predecessor */
888          std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(pair.first);
889          if (spilled != ctx.spills_exit[pred_idx].end()) {
890             if (spilled->second != pair.second)
891                ctx.add_affinity(pair.second, spilled->second);
892             continue;
893          }
894 
895          /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
896          if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
897             continue;
898 
899          /* add interferences between spilled variable and predecessors exit spills */
900          for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
901             if (exit_spill.first == pair.first)
902                continue;
903             ctx.add_interference(exit_spill.second, pair.second);
904          }
905 
906          /* variable is in register at predecessor and has to be spilled */
907          /* rename if necessary */
908          Temp var = pair.first;
909          std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
910          if (rename_it != ctx.renames[pred_idx].end()) {
911             var = rename_it->second;
912             ctx.renames[pred_idx].erase(rename_it);
913          }
914 
915          aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
916          spill->operands[0] = Operand(var);
917          spill->operands[1] = Operand(pair.second);
918          Block& pred = ctx.program->blocks[pred_idx];
919          unsigned idx = pred.instructions.size();
920          do {
921             assert(idx != 0);
922             idx--;
923          } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
924          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
925          pred.instructions.insert(it, std::move(spill));
926          ctx.spills_exit[pred.index][pair.first] = pair.second;
927       }
928    }
929 
930    /* iterate phis for which operands to reload */
931    for (aco_ptr<Instruction>& phi : instructions) {
932       assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
933       assert(ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end());
934 
935       std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
936       for (unsigned i = 0; i < phi->operands.size(); i++) {
937          if (!phi->operands[i].isTemp())
938             continue;
939          unsigned pred_idx = preds[i];
940 
941          /* rename operand */
942          if (ctx.spills_exit[pred_idx].find(phi->operands[i].getTemp()) == ctx.spills_exit[pred_idx].end()) {
943             std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(phi->operands[i].getTemp());
944             if (it != ctx.renames[pred_idx].end())
945                phi->operands[i].setTemp(it->second);
946             /* prevent the definining instruction from being DCE'd if it could be rematerialized */
947             else if (ctx.remat.count(phi->operands[i].getTemp()))
948                ctx.remat_used[ctx.remat[phi->operands[i].getTemp()].instr] = true;
949             continue;
950          }
951 
952          Temp tmp = phi->operands[i].getTemp();
953 
954          /* reload phi operand at end of predecessor block */
955          Temp new_name = ctx.program->allocateTmp(tmp.regClass());
956          Block& pred = ctx.program->blocks[pred_idx];
957          unsigned idx = pred.instructions.size();
958          do {
959             assert(idx != 0);
960             idx--;
961          } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
962          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
963 
964          aco_ptr<Instruction> reload = do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
965          pred.instructions.insert(it, std::move(reload));
966 
967          ctx.spills_exit[pred_idx].erase(tmp);
968          ctx.renames[pred_idx][tmp] = new_name;
969          phi->operands[i].setTemp(new_name);
970       }
971    }
972 
973    /* iterate live variables for which to reload */
974    // TODO: reload at current block if variable is spilled on all predecessors
975    for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
976       /* skip spilled variables */
977       if (ctx.spills_entry[block_idx].find(pair.first) != ctx.spills_entry[block_idx].end())
978          continue;
979       std::vector<unsigned> preds = pair.first.is_linear() ? block->linear_preds : block->logical_preds;
980 
981       /* variable is dead at predecessor, it must be from a phi */
982       bool is_dead = false;
983       for (unsigned pred_idx : preds) {
984          if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
985             is_dead = true;
986       }
987       if (is_dead)
988          continue;
989       for (unsigned pred_idx : preds) {
990          /* the variable is not spilled at the predecessor */
991          if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end())
992             continue;
993 
994          /* variable is spilled at predecessor and has to be reloaded */
995          Temp new_name = ctx.program->allocateTmp(pair.first.regClass());
996          Block& pred = ctx.program->blocks[pred_idx];
997          unsigned idx = pred.instructions.size();
998          do {
999             assert(idx != 0);
1000             idx--;
1001          } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1002          std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1003 
1004          aco_ptr<Instruction> reload = do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
1005          pred.instructions.insert(it, std::move(reload));
1006 
1007          ctx.spills_exit[pred.index].erase(pair.first);
1008          ctx.renames[pred.index][pair.first] = new_name;
1009       }
1010 
1011       /* check if we have to create a new phi for this variable */
1012       Temp rename = Temp();
1013       bool is_same = true;
1014       for (unsigned pred_idx : preds) {
1015          if (ctx.renames[pred_idx].find(pair.first) == ctx.renames[pred_idx].end()) {
1016             if (rename == Temp())
1017                rename = pair.first;
1018             else
1019                is_same = rename == pair.first;
1020          } else {
1021             if (rename == Temp())
1022                rename = ctx.renames[pred_idx][pair.first];
1023             else
1024                is_same = rename == ctx.renames[pred_idx][pair.first];
1025          }
1026 
1027          if (!is_same)
1028             break;
1029       }
1030 
1031       if (!is_same) {
1032          /* the variable was renamed differently in the predecessors: we have to create a phi */
1033          aco_opcode opcode = pair.first.is_linear() ? aco_opcode::p_linear_phi : aco_opcode::p_phi;
1034          aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
1035          rename = ctx.program->allocateTmp(pair.first.regClass());
1036          for (unsigned i = 0; i < phi->operands.size(); i++) {
1037             Temp tmp;
1038             if (ctx.renames[preds[i]].find(pair.first) != ctx.renames[preds[i]].end()) {
1039                tmp = ctx.renames[preds[i]][pair.first];
1040             } else if (preds[i] >= block_idx) {
1041                tmp = rename;
1042             } else {
1043                tmp = pair.first;
1044                /* prevent the definining instruction from being DCE'd if it could be rematerialized */
1045                if (ctx.remat.count(tmp))
1046                   ctx.remat_used[ctx.remat[tmp].instr] = true;
1047             }
1048             phi->operands[i] = Operand(tmp);
1049          }
1050          phi->definitions[0] = Definition(rename);
1051          instructions.emplace_back(std::move(phi));
1052       }
1053 
1054       /* the variable was renamed: add new name to renames */
1055       if (!(rename == Temp() || rename == pair.first))
1056          ctx.renames[block_idx][pair.first] = rename;
1057    }
1058 
1059    /* combine phis with instructions */
1060    unsigned idx = 0;
1061    while (!block->instructions[idx]) {
1062       idx++;
1063    }
1064 
1065    if (!ctx.processed[block_idx]) {
1066       assert(!(block->kind & block_kind_loop_header));
1067       RegisterDemand demand_before = get_demand_before(ctx, block_idx, idx);
1068       ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(), ctx.register_demand[block->index].begin() + idx);
1069       ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(), instructions.size(), demand_before);
1070    }
1071 
1072    std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
1073    instructions.insert(instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
1074                std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
1075    block->instructions = std::move(instructions);
1076 }
1077 
process_block(spill_ctx & ctx,unsigned block_idx,Block * block,std::map<Temp,uint32_t> & current_spills,RegisterDemand spilled_registers)1078 void process_block(spill_ctx& ctx, unsigned block_idx, Block* block,
1079                    std::map<Temp, uint32_t> &current_spills, RegisterDemand spilled_registers)
1080 {
1081    assert(!ctx.processed[block_idx]);
1082 
1083    std::vector<std::map<Temp, uint32_t>> local_next_use_distance;
1084    std::vector<aco_ptr<Instruction>> instructions;
1085    unsigned idx = 0;
1086 
1087    /* phis are handled separetely */
1088    while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
1089           block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
1090       instructions.emplace_back(std::move(block->instructions[idx++]));
1091    }
1092 
1093    if (block->register_demand.exceeds(ctx.target_pressure))
1094       local_next_use_distance = local_next_uses(ctx, block);
1095 
1096    while (idx < block->instructions.size()) {
1097       aco_ptr<Instruction>& instr = block->instructions[idx];
1098 
1099       std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1100       std::map<Temp, uint32_t> spills;
1101       /* rename and reload operands */
1102       for (Operand& op : instr->operands) {
1103          if (!op.isTemp())
1104             continue;
1105          if (current_spills.find(op.getTemp()) == current_spills.end()) {
1106             /* the Operand is in register: check if it was renamed */
1107             if (ctx.renames[block_idx].find(op.getTemp()) != ctx.renames[block_idx].end())
1108                op.setTemp(ctx.renames[block_idx][op.getTemp()]);
1109             /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1110             else if (ctx.remat.count(op.getTemp()))
1111                ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1112             continue;
1113          }
1114          /* the Operand is spilled: add it to reloads */
1115          Temp new_tmp = ctx.program->allocateTmp(op.regClass());
1116          ctx.renames[block_idx][op.getTemp()] = new_tmp;
1117          reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1118          current_spills.erase(op.getTemp());
1119          op.setTemp(new_tmp);
1120          spilled_registers -= new_tmp;
1121       }
1122 
1123       /* check if register demand is low enough before and after the current instruction */
1124       if (block->register_demand.exceeds(ctx.target_pressure)) {
1125 
1126          RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1127          new_demand.update(get_demand_before(ctx, block_idx, idx));
1128 
1129          assert(!local_next_use_distance.empty());
1130 
1131          /* if reg pressure is too high, spill variable with furthest next use */
1132          while (RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1133             unsigned distance = 0;
1134             Temp to_spill;
1135             bool do_rematerialize = false;
1136             if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
1137                for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1138                   bool can_rematerialize = ctx.remat.count(pair.first);
1139                   if (pair.first.type() == RegType::vgpr &&
1140                       ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1141                        (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1142                       current_spills.find(pair.first) == current_spills.end() &&
1143                       ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1144                      to_spill = pair.first;
1145                      distance = pair.second;
1146                      do_rematerialize = can_rematerialize;
1147                   }
1148                }
1149             } else {
1150                for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1151                   bool can_rematerialize = ctx.remat.count(pair.first);
1152                   if (pair.first.type() == RegType::sgpr &&
1153                       ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1154                        (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1155                       current_spills.find(pair.first) == current_spills.end() &&
1156                       ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1157                      to_spill = pair.first;
1158                      distance = pair.second;
1159                      do_rematerialize = can_rematerialize;
1160                   }
1161                }
1162             }
1163 
1164             assert(distance != 0 && distance > idx);
1165             uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1166 
1167             /* add interferences with currently spilled variables */
1168             for (std::pair<Temp, uint32_t> pair : current_spills)
1169                ctx.add_interference(spill_id, pair.second);
1170             for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads)
1171                ctx.add_interference(spill_id, pair.second.second);
1172 
1173             current_spills[to_spill] = spill_id;
1174             spilled_registers += to_spill;
1175 
1176             /* rename if necessary */
1177             if (ctx.renames[block_idx].find(to_spill) != ctx.renames[block_idx].end()) {
1178                to_spill = ctx.renames[block_idx][to_spill];
1179             }
1180 
1181             /* add spill to new instructions */
1182             aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1183             spill->operands[0] = Operand(to_spill);
1184             spill->operands[1] = Operand(spill_id);
1185             instructions.emplace_back(std::move(spill));
1186          }
1187       }
1188 
1189       /* add reloads and instruction to new instructions */
1190       for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1191          aco_ptr<Instruction> reload = do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1192          instructions.emplace_back(std::move(reload));
1193       }
1194       instructions.emplace_back(std::move(instr));
1195       idx++;
1196    }
1197 
1198    block->instructions = std::move(instructions);
1199    ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1200 }
1201 
spill_block(spill_ctx & ctx,unsigned block_idx)1202 void spill_block(spill_ctx& ctx, unsigned block_idx)
1203 {
1204    Block* block = &ctx.program->blocks[block_idx];
1205 
1206    /* determine set of variables which are spilled at the beginning of the block */
1207    RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1208 
1209    /* add interferences for spilled variables */
1210    for (auto it = ctx.spills_entry[block_idx].begin(); it != ctx.spills_entry[block_idx].end(); ++it) {
1211       for (auto it2 = std::next(it); it2 != ctx.spills_entry[block_idx].end(); ++it2)
1212          ctx.add_interference(it->second, it2->second);
1213    }
1214 
1215    bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1216    if (!is_loop_header) {
1217       /* add spill/reload code on incoming control flow edges */
1218       add_coupling_code(ctx, block, block_idx);
1219    }
1220 
1221    std::map<Temp, uint32_t> current_spills = ctx.spills_entry[block_idx];
1222 
1223    /* check conditions to process this block */
1224    bool process = RegisterDemand(block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1225                   !ctx.renames[block_idx].empty() ||
1226                   ctx.remat_used.size();
1227 
1228    std::map<Temp, uint32_t>::iterator it = current_spills.begin();
1229    while (!process && it != current_spills.end()) {
1230       if (ctx.next_use_distances_start[block_idx][it->first].first == block_idx)
1231          process = true;
1232       ++it;
1233    }
1234 
1235    if (process)
1236       process_block(ctx, block_idx, block, current_spills, spilled_registers);
1237    else
1238       ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1239 
1240    ctx.processed[block_idx] = true;
1241 
1242    /* check if the next block leaves the current loop */
1243    if (block->loop_nest_depth == 0 || ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1244       return;
1245 
1246    Block* loop_header = ctx.loop_header.top();
1247 
1248    /* preserve original renames at end of loop header block */
1249    std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1250 
1251    /* add coupling code to all loop header predecessors */
1252    add_coupling_code(ctx, loop_header, loop_header->index);
1253 
1254    /* propagate new renames through loop: i.e. repair the SSA */
1255    renames.swap(ctx.renames[loop_header->index]);
1256    for (std::pair<Temp, Temp> rename : renames) {
1257       for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1258          Block& current = ctx.program->blocks[idx];
1259          std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1260 
1261          /* first rename phis */
1262          while (instr_it != current.instructions.end()) {
1263             aco_ptr<Instruction>& phi = *instr_it;
1264             if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1265                break;
1266             /* no need to rename the loop header phis once again. this happened in add_coupling_code() */
1267             if (idx == loop_header->index) {
1268                instr_it++;
1269                continue;
1270             }
1271 
1272             for (Operand& op : phi->operands) {
1273                if (!op.isTemp())
1274                   continue;
1275                if (op.getTemp() == rename.first)
1276                   op.setTemp(rename.second);
1277             }
1278             instr_it++;
1279          }
1280 
1281          std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = ctx.next_use_distances_start[idx].find(rename.first);
1282 
1283          /* variable is not live at beginning of this block */
1284          if (it == ctx.next_use_distances_start[idx].end())
1285             continue;
1286 
1287          /* if the variable is live at the block's exit, add rename */
1288          if (ctx.next_use_distances_end[idx].find(rename.first) != ctx.next_use_distances_end[idx].end())
1289             ctx.renames[idx].insert(rename);
1290 
1291          /* rename all uses in this block */
1292          bool renamed = false;
1293          while (!renamed && instr_it != current.instructions.end()) {
1294             aco_ptr<Instruction>& instr = *instr_it;
1295             for (Operand& op : instr->operands) {
1296                if (!op.isTemp())
1297                   continue;
1298                if (op.getTemp() == rename.first) {
1299                   op.setTemp(rename.second);
1300                   /* we can stop with this block as soon as the variable is spilled */
1301                   if (instr->opcode == aco_opcode::p_spill)
1302                     renamed = true;
1303                }
1304             }
1305             instr_it++;
1306          }
1307       }
1308    }
1309 
1310    /* remove loop header info from stack */
1311    ctx.loop_header.pop();
1312 }
1313 
load_scratch_resource(spill_ctx & ctx,Temp & scratch_offset,std::vector<aco_ptr<Instruction>> & instructions,unsigned offset,bool is_top_level)1314 Temp load_scratch_resource(spill_ctx& ctx, Temp& scratch_offset,
1315                            std::vector<aco_ptr<Instruction>>& instructions,
1316                            unsigned offset, bool is_top_level)
1317 {
1318    Builder bld(ctx.program);
1319    if (is_top_level) {
1320       bld.reset(&instructions);
1321    } else {
1322       /* find p_logical_end */
1323       unsigned idx = instructions.size() - 1;
1324       while (instructions[idx]->opcode != aco_opcode::p_logical_end)
1325          idx--;
1326       bld.reset(&instructions, std::next(instructions.begin(), idx));
1327    }
1328 
1329    Temp private_segment_buffer = ctx.program->private_segment_buffer;
1330    if (ctx.program->stage != compute_cs)
1331       private_segment_buffer = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, Operand(0u));
1332 
1333    if (offset)
1334       scratch_offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), scratch_offset, Operand(offset));
1335 
1336    uint32_t rsrc_conf = S_008F0C_ADD_TID_ENABLE(1) |
1337                         S_008F0C_INDEX_STRIDE(ctx.program->wave_size == 64 ? 3 : 2);
1338 
1339    if (ctx.program->chip_class >= GFX10) {
1340       rsrc_conf |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1341                    S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1342                    S_008F0C_RESOURCE_LEVEL(1);
1343    } else if (ctx.program->chip_class <= GFX7) { /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
1344       rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1345                    S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1346    }
1347    /* older generations need element size = 4 bytes. element size removed in GFX9 */
1348    if (ctx.program->chip_class <= GFX8)
1349       rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
1350 
1351    return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
1352                      private_segment_buffer, Operand(-1u),
1353                      Operand(rsrc_conf));
1354 }
1355 
add_interferences(spill_ctx & ctx,std::vector<bool> & is_assigned,std::vector<uint32_t> & slots,std::vector<bool> & slots_used,unsigned id)1356 void add_interferences(spill_ctx& ctx, std::vector<bool>& is_assigned,
1357                        std::vector<uint32_t>& slots, std::vector<bool>& slots_used,
1358                        unsigned id)
1359 {
1360    for (unsigned other : ctx.interferences[id].second) {
1361       if (!is_assigned[other])
1362          continue;
1363 
1364       RegClass other_rc = ctx.interferences[other].first;
1365       unsigned slot = slots[other];
1366       std::fill(slots_used.begin() + slot, slots_used.begin() + slot + other_rc.size(), true);
1367    }
1368 }
1369 
find_available_slot(std::vector<bool> & used,unsigned wave_size,unsigned size,bool is_sgpr,unsigned * num_slots)1370 unsigned find_available_slot(std::vector<bool>& used, unsigned wave_size,
1371                              unsigned size, bool is_sgpr, unsigned *num_slots)
1372 {
1373    unsigned wave_size_minus_one = wave_size - 1;
1374    unsigned slot = 0;
1375 
1376    while (true) {
1377       bool available = true;
1378       for (unsigned i = 0; i < size; i++) {
1379          if (slot + i < used.size() && used[slot + i]) {
1380             available = false;
1381             break;
1382          }
1383       }
1384       if (!available) {
1385          slot++;
1386          continue;
1387       }
1388 
1389       if (is_sgpr && ((slot & wave_size_minus_one) > wave_size - size)) {
1390          slot = align(slot, wave_size);
1391          continue;
1392       }
1393 
1394       std::fill(used.begin(), used.end(), false);
1395 
1396       if (slot + size > used.size())
1397          used.resize(slot + size);
1398 
1399       return slot;
1400    }
1401 }
1402 
assign_spill_slots_helper(spill_ctx & ctx,RegType type,std::vector<bool> & is_assigned,std::vector<uint32_t> & slots,unsigned * num_slots)1403 void assign_spill_slots_helper(spill_ctx& ctx, RegType type,
1404                                std::vector<bool>& is_assigned,
1405                                std::vector<uint32_t>& slots,
1406                                unsigned *num_slots)
1407 {
1408    std::vector<bool> slots_used(*num_slots);
1409 
1410    /* assign slots for ids with affinities first */
1411    for (std::vector<uint32_t>& vec : ctx.affinities) {
1412       if (ctx.interferences[vec[0]].first.type() != type)
1413          continue;
1414 
1415       for (unsigned id : vec) {
1416          if (!ctx.is_reloaded[id])
1417             continue;
1418 
1419          add_interferences(ctx, is_assigned, slots, slots_used, id);
1420       }
1421 
1422       unsigned slot = find_available_slot(slots_used, ctx.wave_size,
1423                                           ctx.interferences[vec[0]].first.size(),
1424                                           type == RegType::sgpr, num_slots);
1425 
1426       for (unsigned id : vec) {
1427          assert(!is_assigned[id]);
1428 
1429          if (ctx.is_reloaded[id]) {
1430             slots[id] = slot;
1431             is_assigned[id] = true;
1432          }
1433       }
1434    }
1435 
1436    /* assign slots for ids without affinities */
1437    for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1438       if (is_assigned[id] || !ctx.is_reloaded[id] || ctx.interferences[id].first.type() != type)
1439          continue;
1440 
1441       add_interferences(ctx, is_assigned, slots, slots_used, id);
1442 
1443       unsigned slot = find_available_slot(slots_used, ctx.wave_size,
1444                                           ctx.interferences[id].first.size(),
1445                                           type == RegType::sgpr, num_slots);
1446 
1447       slots[id] = slot;
1448       is_assigned[id] = true;
1449    }
1450 
1451    *num_slots = slots_used.size();
1452 }
1453 
assign_spill_slots(spill_ctx & ctx,unsigned spills_to_vgpr)1454 void assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr) {
1455    std::vector<uint32_t> slots(ctx.interferences.size());
1456    std::vector<bool> is_assigned(ctx.interferences.size());
1457 
1458    /* first, handle affinities: just merge all interferences into both spill ids */
1459    for (std::vector<uint32_t>& vec : ctx.affinities) {
1460       for (unsigned i = 0; i < vec.size(); i++) {
1461          for (unsigned j = i + 1; j < vec.size(); j++) {
1462             assert(vec[i] != vec[j]);
1463             bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1464             ctx.is_reloaded[vec[i]] = reloaded;
1465             ctx.is_reloaded[vec[j]] = reloaded;
1466          }
1467       }
1468    }
1469    for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1470       for (ASSERTED uint32_t id : ctx.interferences[i].second)
1471          assert(i != id);
1472 
1473    /* for each spill slot, assign as many spill ids as possible */
1474    unsigned sgpr_spill_slots = 0, vgpr_spill_slots = 0;
1475    assign_spill_slots_helper(ctx, RegType::sgpr, is_assigned, slots, &sgpr_spill_slots);
1476    assign_spill_slots_helper(ctx, RegType::vgpr, is_assigned, slots, &vgpr_spill_slots);
1477 
1478    for (unsigned id = 0; id < is_assigned.size(); id++)
1479       assert(is_assigned[id] || !ctx.is_reloaded[id]);
1480 
1481    for (std::vector<uint32_t>& vec : ctx.affinities) {
1482       for (unsigned i = 0; i < vec.size(); i++) {
1483          for (unsigned j = i + 1; j < vec.size(); j++) {
1484             assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1485             if (!is_assigned[vec[i]])
1486                continue;
1487             assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1488             assert(ctx.interferences[vec[i]].first.type() == ctx.interferences[vec[j]].first.type());
1489             assert(slots[vec[i]] == slots[vec[j]]);
1490          }
1491       }
1492    }
1493 
1494    /* hope, we didn't mess up */
1495    std::vector<Temp> vgpr_spill_temps((sgpr_spill_slots + ctx.wave_size - 1) / ctx.wave_size);
1496    assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1497 
1498    /* replace pseudo instructions with actual hardware instructions */
1499    Temp scratch_offset = ctx.program->scratch_offset, scratch_rsrc = Temp();
1500    unsigned last_top_level_block_idx = 0;
1501    std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
1502    for (Block& block : ctx.program->blocks) {
1503 
1504       /* after loops, we insert a user if there was a reload inside the loop */
1505       if (block.loop_nest_depth == 0) {
1506          int end_vgprs = 0;
1507          for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1508             if (reload_in_loop[i])
1509                end_vgprs++;
1510          }
1511 
1512          if (end_vgprs > 0) {
1513             aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
1514             int k = 0;
1515             for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1516                if (reload_in_loop[i])
1517                   destr->operands[k++] = Operand(vgpr_spill_temps[i]);
1518                reload_in_loop[i] = false;
1519             }
1520             /* find insertion point */
1521             std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1522             while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1523                ++it;
1524             block.instructions.insert(it, std::move(destr));
1525          }
1526       }
1527 
1528       if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
1529          last_top_level_block_idx = block.index;
1530 
1531          /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1532          for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1533             if (vgpr_spill_temps[i] == Temp())
1534                continue;
1535 
1536             bool can_destroy = true;
1537             for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[block.linear_preds[0]]) {
1538 
1539                if (ctx.interferences[pair.second].first.type() == RegType::sgpr &&
1540                    slots[pair.second] / ctx.wave_size == i) {
1541                   can_destroy = false;
1542                   break;
1543                }
1544             }
1545             if (can_destroy)
1546                vgpr_spill_temps[i] = Temp();
1547          }
1548       }
1549 
1550       std::vector<aco_ptr<Instruction>>::iterator it;
1551       std::vector<aco_ptr<Instruction>> instructions;
1552       instructions.reserve(block.instructions.size());
1553       Builder bld(ctx.program, &instructions);
1554       for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1555 
1556          if ((*it)->opcode == aco_opcode::p_spill) {
1557             uint32_t spill_id = (*it)->operands[1].constantValue();
1558 
1559             if (!ctx.is_reloaded[spill_id]) {
1560                /* never reloaded, so don't spill */
1561             } else if (!is_assigned[spill_id]) {
1562                unreachable("No spill slot assigned for spill id");
1563             } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1564                /* spill vgpr */
1565                ctx.program->config->spilled_vgprs += (*it)->operands[0].size();
1566                uint32_t spill_slot = slots[spill_id];
1567                bool add_offset_to_sgpr = ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size + vgpr_spill_slots * 4 > 4096;
1568                unsigned base_offset = add_offset_to_sgpr ? 0 : ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
1569 
1570                /* check if the scratch resource descriptor already exists */
1571                if (scratch_rsrc == Temp()) {
1572                   unsigned offset = add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
1573                   scratch_rsrc = load_scratch_resource(ctx, scratch_offset,
1574                                                        last_top_level_block_idx == block.index ?
1575                                                        instructions : ctx.program->blocks[last_top_level_block_idx].instructions,
1576                                                        offset,
1577                                                        last_top_level_block_idx == block.index);
1578                }
1579 
1580                unsigned offset = base_offset + spill_slot * 4;
1581                aco_opcode opcode = aco_opcode::buffer_store_dword;
1582                assert((*it)->operands[0].isTemp());
1583                Temp temp = (*it)->operands[0].getTemp();
1584                assert(temp.type() == RegType::vgpr && !temp.is_linear());
1585                if (temp.size() > 1) {
1586                   Instruction* split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, temp.size())};
1587                   split->operands[0] = Operand(temp);
1588                   for (unsigned i = 0; i < temp.size(); i++)
1589                      split->definitions[i] = bld.def(v1);
1590                   bld.insert(split);
1591                   for (unsigned i = 0; i < temp.size(); i++) {
1592                      Instruction *instr = bld.mubuf(opcode, scratch_rsrc, Operand(v1), scratch_offset, split->definitions[i].getTemp(), offset + i * 4, false, true);
1593                      static_cast<MUBUF_instruction *>(instr)->sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1594                   }
1595                } else {
1596                   Instruction *instr = bld.mubuf(opcode, scratch_rsrc, Operand(v1), scratch_offset, temp, offset, false, true);
1597                   static_cast<MUBUF_instruction *>(instr)->sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1598                }
1599             } else {
1600                ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1601 
1602                uint32_t spill_slot = slots[spill_id];
1603 
1604                /* check if the linear vgpr already exists */
1605                if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1606                   Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1607                   vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1608                   aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1609                   create->definitions[0] = Definition(linear_vgpr);
1610                   /* find the right place to insert this definition */
1611                   if (last_top_level_block_idx == block.index) {
1612                      /* insert right before the current instruction */
1613                      instructions.emplace_back(std::move(create));
1614                   } else {
1615                      assert(last_top_level_block_idx < block.index);
1616                      /* insert before the branch at last top level block */
1617                      std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1618                      instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1619                   }
1620                }
1621 
1622                /* spill sgpr: just add the vgpr temp to operands */
1623                Pseudo_instruction* spill = create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1624                spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1625                spill->operands[1] = Operand(spill_slot % ctx.wave_size);
1626                spill->operands[2] = (*it)->operands[0];
1627                instructions.emplace_back(aco_ptr<Instruction>(spill));
1628             }
1629 
1630          } else if ((*it)->opcode == aco_opcode::p_reload) {
1631             uint32_t spill_id = (*it)->operands[0].constantValue();
1632             assert(ctx.is_reloaded[spill_id]);
1633 
1634             if (!is_assigned[spill_id]) {
1635                unreachable("No spill slot assigned for spill id");
1636             } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1637                /* reload vgpr */
1638                uint32_t spill_slot = slots[spill_id];
1639                bool add_offset_to_sgpr = ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size + vgpr_spill_slots * 4 > 4096;
1640                unsigned base_offset = add_offset_to_sgpr ? 0 : ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
1641 
1642                /* check if the scratch resource descriptor already exists */
1643                if (scratch_rsrc == Temp()) {
1644                   unsigned offset = add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
1645                   scratch_rsrc = load_scratch_resource(ctx, scratch_offset,
1646                                                        last_top_level_block_idx == block.index ?
1647                                                        instructions : ctx.program->blocks[last_top_level_block_idx].instructions,
1648                                                        offset,
1649                                                        last_top_level_block_idx == block.index);
1650                }
1651 
1652                unsigned offset = base_offset + spill_slot * 4;
1653                aco_opcode opcode = aco_opcode::buffer_load_dword;
1654                Definition def = (*it)->definitions[0];
1655                if (def.size() > 1) {
1656                   Instruction* vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, def.size(), 1)};
1657                   vec->definitions[0] = def;
1658                   for (unsigned i = 0; i < def.size(); i++) {
1659                      Temp tmp = bld.tmp(v1);
1660                      vec->operands[i] = Operand(tmp);
1661                      Instruction *instr = bld.mubuf(opcode, Definition(tmp), scratch_rsrc, Operand(v1), scratch_offset, offset + i * 4, false, true);
1662                      static_cast<MUBUF_instruction *>(instr)->sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1663                   }
1664                   bld.insert(vec);
1665                } else {
1666                   Instruction *instr = bld.mubuf(opcode, def, scratch_rsrc, Operand(v1), scratch_offset, offset, false, true);
1667                   static_cast<MUBUF_instruction *>(instr)->sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1668                }
1669             } else {
1670                uint32_t spill_slot = slots[spill_id];
1671                reload_in_loop[spill_slot / ctx.wave_size] = block.loop_nest_depth > 0;
1672 
1673                /* check if the linear vgpr already exists */
1674                if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1675                   Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1676                   vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1677                   aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1678                   create->definitions[0] = Definition(linear_vgpr);
1679                   /* find the right place to insert this definition */
1680                   if (last_top_level_block_idx == block.index) {
1681                      /* insert right before the current instruction */
1682                      instructions.emplace_back(std::move(create));
1683                   } else {
1684                      assert(last_top_level_block_idx < block.index);
1685                      /* insert before the branch at last top level block */
1686                      std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1687                      instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1688                   }
1689                }
1690 
1691                /* reload sgpr: just add the vgpr temp to operands */
1692                Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1693                reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1694                reload->operands[1] = Operand(spill_slot % ctx.wave_size);
1695                reload->definitions[0] = (*it)->definitions[0];
1696                instructions.emplace_back(aco_ptr<Instruction>(reload));
1697             }
1698          } else if (!ctx.remat_used.count(it->get()) || ctx.remat_used[it->get()]) {
1699             instructions.emplace_back(std::move(*it));
1700          }
1701 
1702       }
1703       block.instructions = std::move(instructions);
1704    }
1705 
1706    /* update required scratch memory */
1707    ctx.program->config->scratch_bytes_per_wave += align(vgpr_spill_slots * 4 * ctx.program->wave_size, 1024);
1708 
1709    /* SSA elimination inserts copies for logical phis right before p_logical_end
1710     * So if a linear vgpr is used between that p_logical_end and the branch,
1711     * we need to ensure logical phis don't choose a definition which aliases
1712     * the linear vgpr.
1713     * TODO: Moving the spills and reloads to before p_logical_end might produce
1714     *       slightly better code. */
1715    for (Block& block : ctx.program->blocks) {
1716       /* loops exits are already handled */
1717       if (block.logical_preds.size() <= 1)
1718          continue;
1719 
1720       bool has_logical_phis = false;
1721       for (aco_ptr<Instruction>& instr : block.instructions) {
1722          if (instr->opcode == aco_opcode::p_phi) {
1723             has_logical_phis = true;
1724             break;
1725          } else if (instr->opcode != aco_opcode::p_linear_phi) {
1726             break;
1727          }
1728       }
1729       if (!has_logical_phis)
1730          continue;
1731 
1732       std::set<Temp> vgprs;
1733       for (unsigned pred_idx : block.logical_preds) {
1734          Block& pred = ctx.program->blocks[pred_idx];
1735          for (int i = pred.instructions.size() - 1; i >= 0; i--) {
1736             aco_ptr<Instruction>& pred_instr = pred.instructions[i];
1737             if (pred_instr->opcode == aco_opcode::p_logical_end) {
1738                break;
1739             } else if (pred_instr->opcode == aco_opcode::p_spill ||
1740                        pred_instr->opcode == aco_opcode::p_reload) {
1741                vgprs.insert(pred_instr->operands[0].getTemp());
1742             }
1743          }
1744       }
1745       if (!vgprs.size())
1746          continue;
1747 
1748       aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
1749       int k = 0;
1750       for (Temp tmp : vgprs) {
1751          destr->operands[k++] = Operand(tmp);
1752       }
1753       /* find insertion point */
1754       std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1755       while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1756          ++it;
1757       block.instructions.insert(it, std::move(destr));
1758    }
1759 }
1760 
1761 } /* end namespace */
1762 
1763 
spill(Program * program,live & live_vars)1764 void spill(Program* program, live& live_vars)
1765 {
1766    program->config->spilled_vgprs = 0;
1767    program->config->spilled_sgprs = 0;
1768 
1769    /* no spilling when register pressure is low enough */
1770    if (program->num_waves > 0)
1771       return;
1772 
1773    /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1774    lower_to_cssa(program, live_vars);
1775 
1776    /* calculate target register demand */
1777    RegisterDemand register_target = program->max_reg_demand;
1778    if (register_target.sgpr > program->sgpr_limit)
1779       register_target.vgpr += (register_target.sgpr - program->sgpr_limit + program->wave_size - 1 + 32) / program->wave_size;
1780    register_target.sgpr = program->sgpr_limit;
1781 
1782    if (register_target.vgpr > program->vgpr_limit)
1783       register_target.sgpr = program->sgpr_limit - 5;
1784    int spills_to_vgpr = (program->max_reg_demand.sgpr - register_target.sgpr + program->wave_size - 1 + 32) / program->wave_size;
1785    register_target.vgpr = program->vgpr_limit - spills_to_vgpr;
1786 
1787    /* initialize ctx */
1788    spill_ctx ctx(register_target, program, live_vars.register_demand);
1789    compute_global_next_uses(ctx);
1790    get_rematerialize_info(ctx);
1791 
1792    /* create spills and reloads */
1793    for (unsigned i = 0; i < program->blocks.size(); i++)
1794       spill_block(ctx, i);
1795 
1796    /* assign spill slots and DCE rematerialized code */
1797    assign_spill_slots(ctx, spills_to_vgpr);
1798 
1799    /* update live variable information */
1800    live_vars = live_var_analysis(program);
1801 
1802    assert(program->num_waves > 0);
1803 }
1804 
1805 }
1806 
1807