• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  * Copyright © 2018 Google
4  *
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "aco_ir.h"
9 
10 namespace aco {
11 
12 RegisterDemand
get_live_changes(Instruction * instr)13 get_live_changes(Instruction* instr)
14 {
15    RegisterDemand changes;
16    for (const Definition& def : instr->definitions) {
17       if (!def.isTemp() || def.isKill())
18          continue;
19       changes += def.getTemp();
20    }
21 
22    for (const Operand& op : instr->operands) {
23       if (!op.isTemp() || !op.isFirstKill())
24          continue;
25       changes -= op.getTemp();
26    }
27 
28    return changes;
29 }
30 
31 RegisterDemand
get_temp_registers(Instruction * instr)32 get_temp_registers(Instruction* instr)
33 {
34    RegisterDemand demand_before;
35    RegisterDemand demand_after;
36 
37    for (Definition def : instr->definitions) {
38       if (def.isKill())
39          demand_after += def.getTemp();
40       else if (def.isTemp())
41          demand_before -= def.getTemp();
42    }
43 
44    for (Operand op : instr->operands) {
45       if (op.isFirstKill() || op.isCopyKill()) {
46          demand_before += op.getTemp();
47          if (op.isLateKill())
48             demand_after += op.getTemp();
49       } else if (op.isClobbered() && !op.isKill()) {
50          demand_before += op.getTemp();
51       }
52    }
53 
54    demand_after.update(demand_before);
55    return demand_after;
56 }
57 
58 namespace {
59 
60 struct live_ctx {
61    monotonic_buffer_resource m;
62    Program* program;
63    int32_t worklist;
64    uint32_t handled_once;
65 };
66 
67 bool
instr_needs_vcc(Instruction * instr)68 instr_needs_vcc(Instruction* instr)
69 {
70    if (instr->isVOPC())
71       return true;
72    if (instr->isVOP2() && !instr->isVOP3()) {
73       if (instr->operands.size() == 3 && instr->operands[2].isTemp() &&
74           instr->operands[2].regClass().type() == RegType::sgpr)
75          return true;
76       if (instr->definitions.size() == 2)
77          return true;
78    }
79    return false;
80 }
81 
82 IDSet
compute_live_out(live_ctx & ctx,Block * block)83 compute_live_out(live_ctx& ctx, Block* block)
84 {
85    IDSet live(ctx.m);
86 
87    if (block->logical_succs.empty()) {
88       /* Linear blocks:
89        * Directly insert the successor if it is a linear block as well.
90        */
91       for (unsigned succ : block->linear_succs) {
92          if (ctx.program->blocks[succ].logical_preds.empty()) {
93             live.insert(ctx.program->live.live_in[succ]);
94          } else {
95             for (unsigned t : ctx.program->live.live_in[succ]) {
96                if (ctx.program->temp_rc[t].is_linear())
97                   live.insert(t);
98             }
99          }
100       }
101    } else {
102       /* Logical blocks:
103        * Linear successors are either linear blocks or logical targets.
104        */
105       live = IDSet(ctx.program->live.live_in[block->linear_succs[0]], ctx.m);
106       if (block->linear_succs.size() == 2)
107          live.insert(ctx.program->live.live_in[block->linear_succs[1]]);
108 
109       /* At most one logical target needs a separate insertion. */
110       if (block->logical_succs.back() != block->linear_succs.back()) {
111          for (unsigned t : ctx.program->live.live_in[block->logical_succs.back()]) {
112             if (!ctx.program->temp_rc[t].is_linear())
113                live.insert(t);
114          }
115       } else {
116          assert(block->logical_succs[0] == block->linear_succs[0]);
117       }
118    }
119 
120    /* Handle phi operands */
121    if (block->linear_succs.size() == 1 && block->linear_succs[0] >= ctx.handled_once) {
122       Block& succ = ctx.program->blocks[block->linear_succs[0]];
123       auto it = std::find(succ.linear_preds.begin(), succ.linear_preds.end(), block->index);
124       unsigned op_idx = std::distance(succ.linear_preds.begin(), it);
125       for (aco_ptr<Instruction>& phi : succ.instructions) {
126          if (!is_phi(phi))
127             break;
128          if (phi->opcode == aco_opcode::p_phi || phi->definitions[0].isKill())
129             continue;
130          if (phi->operands[op_idx].isTemp())
131             live.insert(phi->operands[op_idx].tempId());
132       }
133    }
134    if (block->logical_succs.size() == 1 && block->logical_succs[0] >= ctx.handled_once) {
135       Block& succ = ctx.program->blocks[block->logical_succs[0]];
136       auto it = std::find(succ.logical_preds.begin(), succ.logical_preds.end(), block->index);
137       unsigned op_idx = std::distance(succ.logical_preds.begin(), it);
138       for (aco_ptr<Instruction>& phi : succ.instructions) {
139          if (!is_phi(phi))
140             break;
141          if (phi->opcode == aco_opcode::p_linear_phi || phi->definitions[0].isKill())
142             continue;
143          if (phi->operands[op_idx].isTemp())
144             live.insert(phi->operands[op_idx].tempId());
145       }
146    }
147 
148    return live;
149 }
150 
151 void
process_live_temps_per_block(live_ctx & ctx,Block * block)152 process_live_temps_per_block(live_ctx& ctx, Block* block)
153 {
154    RegisterDemand new_demand;
155    block->register_demand = RegisterDemand();
156    IDSet live = compute_live_out(ctx, block);
157 
158    /* initialize register demand */
159    for (unsigned t : live)
160       new_demand += Temp(t, ctx.program->temp_rc[t]);
161 
162    /* traverse the instructions backwards */
163    int idx;
164    for (idx = block->instructions.size() - 1; idx >= 0; idx--) {
165       Instruction* insn = block->instructions[idx].get();
166       if (is_phi(insn))
167          break;
168 
169       ctx.program->needs_vcc |= instr_needs_vcc(insn);
170       insn->register_demand = RegisterDemand(new_demand.vgpr, new_demand.sgpr);
171 
172       bool has_vgpr_def = false;
173 
174       /* KILL */
175       for (Definition& definition : insn->definitions) {
176          has_vgpr_def |= definition.regClass().type() == RegType::vgpr &&
177                          !definition.regClass().is_linear_vgpr();
178 
179          if (!definition.isTemp()) {
180             continue;
181          }
182          if (definition.isFixed() && definition.physReg() == vcc)
183             ctx.program->needs_vcc = true;
184 
185          const Temp temp = definition.getTemp();
186          const size_t n = live.erase(temp.id());
187 
188          if (n) {
189             new_demand -= temp;
190             definition.setKill(false);
191          } else {
192             insn->register_demand += temp;
193             definition.setKill(true);
194          }
195       }
196 
197       if (ctx.program->gfx_level >= GFX10 && insn->isVALU() &&
198           insn->definitions.back().regClass() == s2) {
199          /* RDNA2 ISA doc, 6.2.4. Wave64 Destination Restrictions:
200           * The first pass of a wave64 VALU instruction may not overwrite a scalar value used by
201           * the second half.
202           */
203          bool carry_in = insn->opcode == aco_opcode::v_addc_co_u32 ||
204                          insn->opcode == aco_opcode::v_subb_co_u32 ||
205                          insn->opcode == aco_opcode::v_subbrev_co_u32;
206          for (unsigned op_idx = 0; op_idx < (carry_in ? 2 : insn->operands.size()); op_idx++) {
207             if (insn->operands[op_idx].isOfType(RegType::sgpr))
208                insn->operands[op_idx].setLateKill(true);
209          }
210       } else if (insn->opcode == aco_opcode::p_bpermute_readlane ||
211                  insn->opcode == aco_opcode::p_bpermute_permlane ||
212                  insn->opcode == aco_opcode::p_bpermute_shared_vgpr ||
213                  insn->opcode == aco_opcode::p_dual_src_export_gfx11 ||
214                  insn->opcode == aco_opcode::v_mqsad_u32_u8) {
215          for (Operand& op : insn->operands)
216             op.setLateKill(true);
217       } else if (insn->opcode == aco_opcode::p_interp_gfx11 && insn->operands.size() == 7) {
218          insn->operands[5].setLateKill(true); /* we re-use the destination reg in the middle */
219       } else if (insn->opcode == aco_opcode::v_interp_p1_f32 && ctx.program->dev.has_16bank_lds) {
220          insn->operands[0].setLateKill(true);
221       } else if (insn->opcode == aco_opcode::p_init_scratch) {
222          insn->operands.back().setLateKill(true);
223       } else if (instr_info.classes[(int)insn->opcode] == instr_class::wmma) {
224          insn->operands[0].setLateKill(true);
225          insn->operands[1].setLateKill(true);
226       }
227 
228       /* Check if a definition clobbers some operand */
229       int op_idx = get_op_fixed_to_def(insn);
230       if (op_idx != -1)
231          insn->operands[op_idx].setClobbered(true);
232 
233       /* we need to do this in a separate loop because the next one can
234        * setKill() for several operands at once and we don't want to
235        * overwrite that in a later iteration */
236       for (Operand& op : insn->operands) {
237          op.setKill(false);
238          /* Linear vgprs must be late kill: this is to ensure linear VGPR operands and
239           * normal VGPR definitions don't try to use the same register, which is problematic
240           * because of assignment restrictions.
241           */
242          if (op.hasRegClass() && op.regClass().is_linear_vgpr() && !op.isUndefined() &&
243              has_vgpr_def)
244             op.setLateKill(true);
245       }
246 
247       /* GEN */
248       RegisterDemand operand_demand;
249       for (unsigned i = 0; i < insn->operands.size(); ++i) {
250          Operand& operand = insn->operands[i];
251          if (!operand.isTemp())
252             continue;
253 
254          const Temp temp = operand.getTemp();
255          if (operand.isPrecolored()) {
256             assert(!operand.isLateKill());
257             ctx.program->needs_vcc |= operand.physReg() == vcc;
258 
259             /* Check if this operand gets overwritten by a precolored definition. */
260             if (std::any_of(insn->definitions.begin(), insn->definitions.end(),
261                             [=](Definition def)
262                             {
263                                return def.isFixed() &&
264                                       def.physReg() + def.size() > operand.physReg() &&
265                                       operand.physReg() + operand.size() > def.physReg();
266                             }))
267                operand.setClobbered(true);
268 
269             /* Check if another precolored operand uses the same temporary.
270              * This assumes that operands of one instruction are not precolored twice to
271              * the same register. In this case, register pressure might be overestimated.
272              */
273             for (unsigned j = i + 1; !operand.isCopyKill() && j < insn->operands.size(); ++j) {
274                if (insn->operands[j].isPrecolored() && insn->operands[j].getTemp() == temp) {
275                   operand_demand += temp;
276                   insn->operands[j].setCopyKill(true);
277                }
278             }
279          }
280 
281          if (operand.isKill())
282             continue;
283 
284          if (live.insert(temp.id()).second) {
285             operand.setFirstKill(true);
286             for (unsigned j = i + 1; j < insn->operands.size(); ++j) {
287                if (insn->operands[j].isTemp() && insn->operands[j].getTemp() == temp)
288                   insn->operands[j].setKill(true);
289             }
290             if (operand.isLateKill())
291                insn->register_demand += temp;
292             new_demand += temp;
293          } else if (operand.isClobbered()) {
294             operand_demand += temp;
295          }
296       }
297 
298       operand_demand += new_demand;
299       insn->register_demand.update(operand_demand);
300       block->register_demand.update(insn->register_demand);
301    }
302 
303    /* handle phi definitions */
304    for (int phi_idx = 0; phi_idx <= idx; phi_idx++) {
305       Instruction* insn = block->instructions[phi_idx].get();
306       insn->register_demand = new_demand;
307 
308       assert(is_phi(insn) && insn->definitions.size() == 1);
309       if (!insn->definitions[0].isTemp()) {
310          assert(insn->definitions[0].isFixed() && insn->definitions[0].physReg() == exec);
311          continue;
312       }
313       Definition& definition = insn->definitions[0];
314       ctx.program->needs_vcc |= definition.isFixed() && definition.physReg() == vcc;
315       const size_t n = live.erase(definition.tempId());
316       if (n && (definition.isKill() || ctx.handled_once > block->index)) {
317          Block::edge_vec& preds =
318             insn->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
319          for (unsigned i = 0; i < preds.size(); i++) {
320             if (insn->operands[i].isTemp())
321                ctx.worklist = std::max<int>(ctx.worklist, preds[i]);
322          }
323       }
324       definition.setKill(!n);
325    }
326 
327    /* handle phi operands */
328    for (int phi_idx = 0; phi_idx <= idx; phi_idx++) {
329       Instruction* insn = block->instructions[phi_idx].get();
330       assert(is_phi(insn));
331       /* Ignore dead phis. */
332       if (insn->definitions[0].isKill())
333          continue;
334       for (Operand& operand : insn->operands) {
335          if (!operand.isTemp())
336             continue;
337 
338          /* set if the operand is killed by this (or another) phi instruction */
339          operand.setKill(!live.count(operand.tempId()));
340       }
341    }
342 
343    if (ctx.program->live.live_in[block->index].insert(live)) {
344       if (block->linear_preds.size()) {
345          assert(block->logical_preds.empty() ||
346                 block->logical_preds.back() <= block->linear_preds.back());
347          ctx.worklist = std::max<int>(ctx.worklist, block->linear_preds.back());
348       } else {
349          ASSERTED bool is_valid = validate_ir(ctx.program);
350          assert(!is_valid);
351       }
352    }
353 
354    block->live_in_demand = new_demand;
355    block->register_demand.update(block->live_in_demand);
356    ctx.program->max_reg_demand.update(block->register_demand);
357    ctx.handled_once = std::min(ctx.handled_once, block->index);
358 
359    assert(!block->linear_preds.empty() || (new_demand == RegisterDemand() && live.empty()));
360 }
361 
362 unsigned
calc_waves_per_workgroup(Program * program)363 calc_waves_per_workgroup(Program* program)
364 {
365    /* When workgroup size is not known, just go with wave_size */
366    unsigned workgroup_size =
367       program->workgroup_size == UINT_MAX ? program->wave_size : program->workgroup_size;
368 
369    return align(workgroup_size, program->wave_size) / program->wave_size;
370 }
371 } /* end namespace */
372 
373 bool
uses_scratch(Program * program)374 uses_scratch(Program* program)
375 {
376    /* RT uses scratch but we don't yet know how much. */
377    return program->config->scratch_bytes_per_wave || program->stage == raytracing_cs;
378 }
379 
380 uint16_t
get_extra_sgprs(Program * program)381 get_extra_sgprs(Program* program)
382 {
383    /* We don't use this register on GFX6-8 and it's removed on GFX10+. */
384    bool needs_flat_scr = uses_scratch(program) && program->gfx_level == GFX9;
385 
386    if (program->gfx_level >= GFX10) {
387       assert(!program->dev.xnack_enabled);
388       return 0;
389    } else if (program->gfx_level >= GFX8) {
390       if (needs_flat_scr)
391          return 6;
392       else if (program->dev.xnack_enabled)
393          return 4;
394       else if (program->needs_vcc)
395          return 2;
396       else
397          return 0;
398    } else {
399       assert(!program->dev.xnack_enabled);
400       if (needs_flat_scr)
401          return 4;
402       else if (program->needs_vcc)
403          return 2;
404       else
405          return 0;
406    }
407 }
408 
409 uint16_t
get_sgpr_alloc(Program * program,uint16_t addressable_sgprs)410 get_sgpr_alloc(Program* program, uint16_t addressable_sgprs)
411 {
412    uint16_t sgprs = addressable_sgprs + get_extra_sgprs(program);
413    uint16_t granule = program->dev.sgpr_alloc_granule;
414    return ALIGN_NPOT(std::max(sgprs, granule), granule);
415 }
416 
417 uint16_t
get_vgpr_alloc(Program * program,uint16_t addressable_vgprs)418 get_vgpr_alloc(Program* program, uint16_t addressable_vgprs)
419 {
420    assert(addressable_vgprs <= program->dev.vgpr_limit);
421    uint16_t granule = program->dev.vgpr_alloc_granule;
422    return ALIGN_NPOT(std::max(addressable_vgprs, granule), granule);
423 }
424 
425 unsigned
round_down(unsigned a,unsigned b)426 round_down(unsigned a, unsigned b)
427 {
428    return a - (a % b);
429 }
430 
431 uint16_t
get_addr_sgpr_from_waves(Program * program,uint16_t waves)432 get_addr_sgpr_from_waves(Program* program, uint16_t waves)
433 {
434    /* it's not possible to allocate more than 128 SGPRs */
435    uint16_t sgprs = std::min(program->dev.physical_sgprs / waves, 128);
436    sgprs = round_down(sgprs, program->dev.sgpr_alloc_granule);
437    sgprs -= get_extra_sgprs(program);
438    return std::min(sgprs, program->dev.sgpr_limit);
439 }
440 
441 uint16_t
get_addr_vgpr_from_waves(Program * program,uint16_t waves)442 get_addr_vgpr_from_waves(Program* program, uint16_t waves)
443 {
444    uint16_t vgprs = program->dev.physical_vgprs / waves;
445    vgprs = vgprs / program->dev.vgpr_alloc_granule * program->dev.vgpr_alloc_granule;
446    vgprs -= program->config->num_shared_vgprs / 2;
447    return std::min(vgprs, program->dev.vgpr_limit);
448 }
449 
450 void
calc_min_waves(Program * program)451 calc_min_waves(Program* program)
452 {
453    unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
454    unsigned simd_per_cu_wgp = program->dev.simd_per_cu * (program->wgp_mode ? 2 : 1);
455    program->min_waves = DIV_ROUND_UP(waves_per_workgroup, simd_per_cu_wgp);
456 }
457 
458 uint16_t
max_suitable_waves(Program * program,uint16_t waves)459 max_suitable_waves(Program* program, uint16_t waves)
460 {
461    unsigned num_simd = program->dev.simd_per_cu * (program->wgp_mode ? 2 : 1);
462    unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
463    unsigned num_workgroups = waves * num_simd / waves_per_workgroup;
464 
465    /* Adjust #workgroups for LDS */
466    unsigned lds_per_workgroup = align(program->config->lds_size * program->dev.lds_encoding_granule,
467                                       program->dev.lds_alloc_granule);
468 
469    if (program->stage == fragment_fs) {
470       /* PS inputs are moved from PC (parameter cache) to LDS before PS waves are launched.
471        * Each PS input occupies 3x vec4 of LDS space. See Figure 10.3 in GCN3 ISA manual.
472        * These limit occupancy the same way as other stages' LDS usage does.
473        */
474       unsigned lds_bytes_per_interp = 3 * 16;
475       unsigned lds_param_bytes = lds_bytes_per_interp * program->info.ps.num_inputs;
476       lds_per_workgroup += align(lds_param_bytes, program->dev.lds_alloc_granule);
477    }
478    unsigned lds_limit = program->wgp_mode ? program->dev.lds_limit * 2 : program->dev.lds_limit;
479    if (lds_per_workgroup)
480       num_workgroups = std::min(num_workgroups, lds_limit / lds_per_workgroup);
481 
482    /* Hardware limitation */
483    if (waves_per_workgroup > 1)
484       num_workgroups = std::min(num_workgroups, program->wgp_mode ? 32u : 16u);
485 
486    /* Adjust #waves for workgroup multiples:
487     * In cases like waves_per_workgroup=3 or lds=65536 and
488     * waves_per_workgroup=1, we want the maximum possible number of waves per
489     * SIMD and not the minimum. so DIV_ROUND_UP is used
490     */
491    unsigned workgroup_waves = num_workgroups * waves_per_workgroup;
492    return DIV_ROUND_UP(workgroup_waves, num_simd);
493 }
494 
495 void
update_vgpr_sgpr_demand(Program * program,const RegisterDemand new_demand)496 update_vgpr_sgpr_demand(Program* program, const RegisterDemand new_demand)
497 {
498    assert(program->min_waves >= 1);
499    uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
500    uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
501 
502    /* this won't compile, register pressure reduction necessary */
503    if (new_demand.vgpr > vgpr_limit || new_demand.sgpr > sgpr_limit) {
504       program->num_waves = 0;
505       program->max_reg_demand = new_demand;
506    } else {
507       program->num_waves = program->dev.physical_sgprs / get_sgpr_alloc(program, new_demand.sgpr);
508       uint16_t vgpr_demand =
509          get_vgpr_alloc(program, new_demand.vgpr) + program->config->num_shared_vgprs / 2;
510       program->num_waves =
511          std::min<uint16_t>(program->num_waves, program->dev.physical_vgprs / vgpr_demand);
512       program->num_waves = std::min(program->num_waves, program->dev.max_waves_per_simd);
513 
514       /* Adjust for LDS and workgroup multiples and calculate max_reg_demand */
515       program->num_waves = max_suitable_waves(program, program->num_waves);
516       program->max_reg_demand.vgpr = get_addr_vgpr_from_waves(program, program->num_waves);
517       program->max_reg_demand.sgpr = get_addr_sgpr_from_waves(program, program->num_waves);
518    }
519 }
520 
521 void
live_var_analysis(Program * program)522 live_var_analysis(Program* program)
523 {
524    program->live.live_in.clear();
525    program->live.memory.release();
526    program->live.live_in.resize(program->blocks.size(), IDSet(program->live.memory));
527    program->max_reg_demand = RegisterDemand();
528    program->needs_vcc = program->gfx_level >= GFX10;
529 
530    live_ctx ctx;
531    ctx.program = program;
532    ctx.worklist = program->blocks.size() - 1;
533    ctx.handled_once = program->blocks.size();
534 
535    /* this implementation assumes that the block idx corresponds to the block's position in
536     * program->blocks vector */
537    while (ctx.worklist >= 0) {
538       process_live_temps_per_block(ctx, &program->blocks[ctx.worklist--]);
539    }
540 
541    /* calculate the program's register demand and number of waves */
542    if (program->progress < CompilationProgress::after_ra)
543       update_vgpr_sgpr_demand(program, program->max_reg_demand);
544 }
545 
546 } // namespace aco
547