• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  * Copyright © 2018 Google
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include "aco_ir.h"
27 
28 #include "util/u_math.h"
29 
30 #include <set>
31 #include <vector>
32 
33 namespace aco {
34 RegisterDemand
get_live_changes(aco_ptr<Instruction> & instr)35 get_live_changes(aco_ptr<Instruction>& instr)
36 {
37    RegisterDemand changes;
38    for (const Definition& def : instr->definitions) {
39       if (!def.isTemp() || def.isKill())
40          continue;
41       changes += def.getTemp();
42    }
43 
44    for (const Operand& op : instr->operands) {
45       if (!op.isTemp() || !op.isFirstKill())
46          continue;
47       changes -= op.getTemp();
48    }
49 
50    return changes;
51 }
52 
53 void
handle_def_fixed_to_op(RegisterDemand * demand,RegisterDemand demand_before,Instruction * instr,int op_idx)54 handle_def_fixed_to_op(RegisterDemand* demand, RegisterDemand demand_before, Instruction* instr,
55                        int op_idx)
56 {
57    /* Usually the register demand before an instruction would be considered part of the previous
58     * instruction, since it's not greater than the register demand for that previous instruction.
59     * Except, it can be greater in the case of an definition fixed to a non-killed operand: the RA
60     * needs to reserve space between the two instructions for the definition (containing a copy of
61     * the operand).
62     */
63    demand_before += instr->definitions[0].getTemp();
64    demand->update(demand_before);
65 }
66 
67 RegisterDemand
get_temp_registers(aco_ptr<Instruction> & instr)68 get_temp_registers(aco_ptr<Instruction>& instr)
69 {
70    RegisterDemand temp_registers;
71 
72    for (Definition def : instr->definitions) {
73       if (!def.isTemp())
74          continue;
75       if (def.isKill())
76          temp_registers += def.getTemp();
77    }
78 
79    for (Operand op : instr->operands) {
80       if (op.isTemp() && op.isLateKill() && op.isFirstKill())
81          temp_registers += op.getTemp();
82    }
83 
84    int op_idx = get_op_fixed_to_def(instr.get());
85    if (op_idx != -1 && !instr->operands[op_idx].isKill()) {
86       RegisterDemand before_instr;
87       before_instr -= get_live_changes(instr);
88       handle_def_fixed_to_op(&temp_registers, before_instr, instr.get(), op_idx);
89    }
90 
91    return temp_registers;
92 }
93 
94 RegisterDemand
get_demand_before(RegisterDemand demand,aco_ptr<Instruction> & instr,aco_ptr<Instruction> & instr_before)95 get_demand_before(RegisterDemand demand, aco_ptr<Instruction>& instr,
96                   aco_ptr<Instruction>& instr_before)
97 {
98    demand -= get_live_changes(instr);
99    demand -= get_temp_registers(instr);
100    if (instr_before)
101       demand += get_temp_registers(instr_before);
102    return demand;
103 }
104 
105 namespace {
106 struct PhiInfo {
107    uint16_t logical_phi_sgpr_ops = 0;
108    uint16_t linear_phi_ops = 0;
109    uint16_t linear_phi_defs = 0;
110 };
111 
112 bool
instr_needs_vcc(Instruction * instr)113 instr_needs_vcc(Instruction* instr)
114 {
115    if (instr->isVOPC())
116       return true;
117    if (instr->isVOP2() && !instr->isVOP3()) {
118       if (instr->operands.size() == 3 && instr->operands[2].isTemp() &&
119           instr->operands[2].regClass().type() == RegType::sgpr)
120          return true;
121       if (instr->definitions.size() == 2)
122          return true;
123    }
124    return false;
125 }
126 
127 void
process_live_temps_per_block(Program * program,live & lives,Block * block,unsigned & worklist,std::vector<PhiInfo> & phi_info)128 process_live_temps_per_block(Program* program, live& lives, Block* block, unsigned& worklist,
129                              std::vector<PhiInfo>& phi_info)
130 {
131    std::vector<RegisterDemand>& register_demand = lives.register_demand[block->index];
132    RegisterDemand new_demand;
133 
134    register_demand.resize(block->instructions.size());
135    IDSet live = lives.live_out[block->index];
136 
137    /* initialize register demand */
138    for (unsigned t : live)
139       new_demand += Temp(t, program->temp_rc[t]);
140    new_demand.sgpr -= phi_info[block->index].logical_phi_sgpr_ops;
141 
142    /* traverse the instructions backwards */
143    int idx;
144    for (idx = block->instructions.size() - 1; idx >= 0; idx--) {
145       Instruction* insn = block->instructions[idx].get();
146       if (is_phi(insn))
147          break;
148 
149       program->needs_vcc |= instr_needs_vcc(insn);
150       register_demand[idx] = RegisterDemand(new_demand.vgpr, new_demand.sgpr);
151 
152       /* KILL */
153       for (Definition& definition : insn->definitions) {
154          if (!definition.isTemp()) {
155             continue;
156          }
157          if (definition.isFixed() && definition.physReg() == vcc)
158             program->needs_vcc = true;
159 
160          const Temp temp = definition.getTemp();
161          const size_t n = live.erase(temp.id());
162 
163          if (n) {
164             new_demand -= temp;
165             definition.setKill(false);
166          } else {
167             register_demand[idx] += temp;
168             definition.setKill(true);
169          }
170       }
171 
172       /* GEN */
173       if (insn->opcode == aco_opcode::p_logical_end) {
174          new_demand.sgpr += phi_info[block->index].logical_phi_sgpr_ops;
175       } else {
176          /* we need to do this in a separate loop because the next one can
177           * setKill() for several operands at once and we don't want to
178           * overwrite that in a later iteration */
179          for (Operand& op : insn->operands)
180             op.setKill(false);
181 
182          for (unsigned i = 0; i < insn->operands.size(); ++i) {
183             Operand& operand = insn->operands[i];
184             if (!operand.isTemp())
185                continue;
186             if (operand.isFixed() && operand.physReg() == vcc)
187                program->needs_vcc = true;
188             const Temp temp = operand.getTemp();
189             const bool inserted = live.insert(temp.id()).second;
190             if (inserted) {
191                operand.setFirstKill(true);
192                for (unsigned j = i + 1; j < insn->operands.size(); ++j) {
193                   if (insn->operands[j].isTemp() &&
194                       insn->operands[j].tempId() == operand.tempId()) {
195                      insn->operands[j].setFirstKill(false);
196                      insn->operands[j].setKill(true);
197                   }
198                }
199                if (operand.isLateKill())
200                   register_demand[idx] += temp;
201                new_demand += temp;
202             }
203          }
204       }
205 
206       int op_idx = get_op_fixed_to_def(insn);
207       if (op_idx != -1 && !insn->operands[op_idx].isKill()) {
208          RegisterDemand before_instr = new_demand;
209          handle_def_fixed_to_op(&register_demand[idx], before_instr, insn, op_idx);
210       }
211    }
212 
213    /* handle phi definitions */
214    uint16_t linear_phi_defs = 0;
215    int phi_idx = idx;
216    while (phi_idx >= 0) {
217       register_demand[phi_idx] = new_demand;
218       Instruction* insn = block->instructions[phi_idx].get();
219 
220       assert(is_phi(insn) && insn->definitions.size() == 1);
221       if (!insn->definitions[0].isTemp()) {
222          assert(insn->definitions[0].isFixed() && insn->definitions[0].physReg() == exec);
223          phi_idx--;
224          continue;
225       }
226       Definition& definition = insn->definitions[0];
227       if (definition.isFixed() && definition.physReg() == vcc)
228          program->needs_vcc = true;
229       const Temp temp = definition.getTemp();
230       const size_t n = live.erase(temp.id());
231 
232       if (n)
233          definition.setKill(false);
234       else
235          definition.setKill(true);
236 
237       if (insn->opcode == aco_opcode::p_linear_phi) {
238          assert(definition.getTemp().type() == RegType::sgpr);
239          linear_phi_defs += definition.size();
240       }
241 
242       phi_idx--;
243    }
244 
245    for (unsigned pred_idx : block->linear_preds)
246       phi_info[pred_idx].linear_phi_defs = linear_phi_defs;
247 
248    /* now, we need to merge the live-ins into the live-out sets */
249    bool fast_merge =
250       block->logical_preds.size() == 0 || block->logical_preds == block->linear_preds;
251 
252 #ifndef NDEBUG
253    if ((block->linear_preds.empty() && !live.empty()) ||
254        (block->logical_preds.empty() && new_demand.vgpr > 0))
255       fast_merge = false; /* we might have errors */
256 #endif
257 
258    if (fast_merge) {
259       for (unsigned pred_idx : block->linear_preds) {
260          if (lives.live_out[pred_idx].insert(live))
261             worklist = std::max(worklist, pred_idx + 1);
262       }
263    } else {
264       for (unsigned t : live) {
265          RegClass rc = program->temp_rc[t];
266          std::vector<unsigned>& preds = rc.is_linear() ? block->linear_preds : block->logical_preds;
267 
268 #ifndef NDEBUG
269          if (preds.empty())
270             aco_err(program, "Temporary never defined or are defined after use: %%%d in BB%d", t,
271                     block->index);
272 #endif
273 
274          for (unsigned pred_idx : preds) {
275             auto it = lives.live_out[pred_idx].insert(t);
276             if (it.second)
277                worklist = std::max(worklist, pred_idx + 1);
278          }
279       }
280    }
281 
282    /* handle phi operands */
283    phi_idx = idx;
284    while (phi_idx >= 0) {
285       Instruction* insn = block->instructions[phi_idx].get();
286       assert(is_phi(insn));
287       /* directly insert into the predecessors live-out set */
288       std::vector<unsigned>& preds =
289          insn->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
290       for (unsigned i = 0; i < preds.size(); ++i) {
291          Operand& operand = insn->operands[i];
292          if (!operand.isTemp())
293             continue;
294          if (operand.isFixed() && operand.physReg() == vcc)
295             program->needs_vcc = true;
296          /* check if we changed an already processed block */
297          const bool inserted = lives.live_out[preds[i]].insert(operand.tempId()).second;
298          if (inserted) {
299             worklist = std::max(worklist, preds[i] + 1);
300             if (insn->opcode == aco_opcode::p_phi && operand.getTemp().type() == RegType::sgpr) {
301                phi_info[preds[i]].logical_phi_sgpr_ops += operand.size();
302             } else if (insn->opcode == aco_opcode::p_linear_phi) {
303                assert(operand.getTemp().type() == RegType::sgpr);
304                phi_info[preds[i]].linear_phi_ops += operand.size();
305             }
306          }
307 
308          /* set if the operand is killed by this (or another) phi instruction */
309          operand.setKill(!live.count(operand.tempId()));
310       }
311       phi_idx--;
312    }
313 
314    assert(!block->linear_preds.empty() || (new_demand == RegisterDemand() && live.empty()));
315 }
316 
317 unsigned
calc_waves_per_workgroup(Program * program)318 calc_waves_per_workgroup(Program* program)
319 {
320    /* When workgroup size is not known, just go with wave_size */
321    unsigned workgroup_size =
322       program->workgroup_size == UINT_MAX ? program->wave_size : program->workgroup_size;
323 
324    return align(workgroup_size, program->wave_size) / program->wave_size;
325 }
326 } /* end namespace */
327 
328 bool
uses_scratch(Program * program)329 uses_scratch(Program* program)
330 {
331    /* RT uses scratch but we don't yet know how much. */
332    return program->config->scratch_bytes_per_wave || program->stage == raytracing_cs;
333 }
334 
335 uint16_t
get_extra_sgprs(Program * program)336 get_extra_sgprs(Program* program)
337 {
338    /* We don't use this register on GFX6-8 and it's removed on GFX10+. */
339    bool needs_flat_scr = uses_scratch(program) && program->gfx_level == GFX9;
340 
341    if (program->gfx_level >= GFX10) {
342       assert(!program->dev.xnack_enabled);
343       return 0;
344    } else if (program->gfx_level >= GFX8) {
345       if (needs_flat_scr)
346          return 6;
347       else if (program->dev.xnack_enabled)
348          return 4;
349       else if (program->needs_vcc)
350          return 2;
351       else
352          return 0;
353    } else {
354       assert(!program->dev.xnack_enabled);
355       if (needs_flat_scr)
356          return 4;
357       else if (program->needs_vcc)
358          return 2;
359       else
360          return 0;
361    }
362 }
363 
364 uint16_t
get_sgpr_alloc(Program * program,uint16_t addressable_sgprs)365 get_sgpr_alloc(Program* program, uint16_t addressable_sgprs)
366 {
367    uint16_t sgprs = addressable_sgprs + get_extra_sgprs(program);
368    uint16_t granule = program->dev.sgpr_alloc_granule;
369    return ALIGN_NPOT(std::max(sgprs, granule), granule);
370 }
371 
372 uint16_t
get_vgpr_alloc(Program * program,uint16_t addressable_vgprs)373 get_vgpr_alloc(Program* program, uint16_t addressable_vgprs)
374 {
375    assert(addressable_vgprs <= program->dev.vgpr_limit);
376    uint16_t granule = program->dev.vgpr_alloc_granule;
377    return ALIGN_NPOT(std::max(addressable_vgprs, granule), granule);
378 }
379 
380 unsigned
round_down(unsigned a,unsigned b)381 round_down(unsigned a, unsigned b)
382 {
383    return a - (a % b);
384 }
385 
386 uint16_t
get_addr_sgpr_from_waves(Program * program,uint16_t waves)387 get_addr_sgpr_from_waves(Program* program, uint16_t waves)
388 {
389    /* it's not possible to allocate more than 128 SGPRs */
390    uint16_t sgprs = std::min(program->dev.physical_sgprs / waves, 128);
391    sgprs = round_down(sgprs, program->dev.sgpr_alloc_granule);
392    sgprs -= get_extra_sgprs(program);
393    return std::min(sgprs, program->dev.sgpr_limit);
394 }
395 
396 uint16_t
get_addr_vgpr_from_waves(Program * program,uint16_t waves)397 get_addr_vgpr_from_waves(Program* program, uint16_t waves)
398 {
399    uint16_t vgprs = program->dev.physical_vgprs / waves;
400    vgprs = vgprs / program->dev.vgpr_alloc_granule * program->dev.vgpr_alloc_granule;
401    vgprs -= program->config->num_shared_vgprs / 2;
402    return std::min(vgprs, program->dev.vgpr_limit);
403 }
404 
405 void
calc_min_waves(Program * program)406 calc_min_waves(Program* program)
407 {
408    unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
409    unsigned simd_per_cu_wgp = program->dev.simd_per_cu * (program->wgp_mode ? 2 : 1);
410    program->min_waves = DIV_ROUND_UP(waves_per_workgroup, simd_per_cu_wgp);
411 }
412 
413 uint16_t
max_suitable_waves(Program * program,uint16_t waves)414 max_suitable_waves(Program* program, uint16_t waves)
415 {
416    unsigned num_simd = program->dev.simd_per_cu * (program->wgp_mode ? 2 : 1);
417    unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
418    unsigned num_workgroups = waves * num_simd / waves_per_workgroup;
419 
420    /* Adjust #workgroups for LDS */
421    unsigned lds_per_workgroup = align(program->config->lds_size * program->dev.lds_encoding_granule,
422                                       program->dev.lds_alloc_granule);
423 
424    if (program->stage == fragment_fs) {
425       /* PS inputs are moved from PC (parameter cache) to LDS before PS waves are launched.
426        * Each PS input occupies 3x vec4 of LDS space. See Figure 10.3 in GCN3 ISA manual.
427        * These limit occupancy the same way as other stages' LDS usage does.
428        */
429       unsigned lds_bytes_per_interp = 3 * 16;
430       unsigned lds_param_bytes = lds_bytes_per_interp * program->info.ps.num_interp;
431       lds_per_workgroup += align(lds_param_bytes, program->dev.lds_alloc_granule);
432    }
433    unsigned lds_limit = program->wgp_mode ? program->dev.lds_limit * 2 : program->dev.lds_limit;
434    if (lds_per_workgroup)
435       num_workgroups = std::min(num_workgroups, lds_limit / lds_per_workgroup);
436 
437    /* Hardware limitation */
438    if (waves_per_workgroup > 1)
439       num_workgroups = std::min(num_workgroups, program->wgp_mode ? 32u : 16u);
440 
441    /* Adjust #waves for workgroup multiples:
442     * In cases like waves_per_workgroup=3 or lds=65536 and
443     * waves_per_workgroup=1, we want the maximum possible number of waves per
444     * SIMD and not the minimum. so DIV_ROUND_UP is used
445     */
446    unsigned workgroup_waves = num_workgroups * waves_per_workgroup;
447    return DIV_ROUND_UP(workgroup_waves, num_simd);
448 }
449 
450 void
update_vgpr_sgpr_demand(Program * program,const RegisterDemand new_demand)451 update_vgpr_sgpr_demand(Program* program, const RegisterDemand new_demand)
452 {
453    assert(program->min_waves >= 1);
454    uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
455    uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
456 
457    /* this won't compile, register pressure reduction necessary */
458    if (new_demand.vgpr > vgpr_limit || new_demand.sgpr > sgpr_limit) {
459       program->num_waves = 0;
460       program->max_reg_demand = new_demand;
461    } else {
462       program->num_waves = program->dev.physical_sgprs / get_sgpr_alloc(program, new_demand.sgpr);
463       uint16_t vgpr_demand =
464          get_vgpr_alloc(program, new_demand.vgpr) + program->config->num_shared_vgprs / 2;
465       program->num_waves =
466          std::min<uint16_t>(program->num_waves, program->dev.physical_vgprs / vgpr_demand);
467       program->num_waves = std::min(program->num_waves, program->dev.max_waves_per_simd);
468 
469       /* Adjust for LDS and workgroup multiples and calculate max_reg_demand */
470       program->num_waves = max_suitable_waves(program, program->num_waves);
471       program->max_reg_demand.vgpr = get_addr_vgpr_from_waves(program, program->num_waves);
472       program->max_reg_demand.sgpr = get_addr_sgpr_from_waves(program, program->num_waves);
473    }
474 }
475 
476 live
live_var_analysis(Program * program)477 live_var_analysis(Program* program)
478 {
479    live result;
480    result.live_out.resize(program->blocks.size());
481    result.register_demand.resize(program->blocks.size());
482    unsigned worklist = program->blocks.size();
483    std::vector<PhiInfo> phi_info(program->blocks.size());
484    RegisterDemand new_demand;
485 
486    program->needs_vcc = program->gfx_level >= GFX10;
487 
488    /* this implementation assumes that the block idx corresponds to the block's position in
489     * program->blocks vector */
490    while (worklist) {
491       unsigned block_idx = --worklist;
492       process_live_temps_per_block(program, result, &program->blocks[block_idx], worklist,
493                                    phi_info);
494    }
495 
496    /* Handle branches: we will insert copies created for linear phis just before the branch. */
497    for (Block& block : program->blocks) {
498       result.register_demand[block.index].back().sgpr += phi_info[block.index].linear_phi_defs;
499       result.register_demand[block.index].back().sgpr -= phi_info[block.index].linear_phi_ops;
500 
501       /* update block's register demand */
502       if (program->progress < CompilationProgress::after_ra) {
503          block.register_demand = RegisterDemand();
504          for (RegisterDemand& demand : result.register_demand[block.index])
505             block.register_demand.update(demand);
506       }
507 
508       new_demand.update(block.register_demand);
509    }
510 
511    /* calculate the program's register demand and number of waves */
512    if (program->progress < CompilationProgress::after_ra)
513       update_vgpr_sgpr_demand(program, new_demand);
514 
515    return result;
516 }
517 
518 } // namespace aco
519