• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_builder.h"
26 #include "aco_ir.h"
27 
28 #include "common/amdgfxregs.h"
29 
30 #include <algorithm>
31 #include <unordered_set>
32 #include <vector>
33 
34 #define SMEM_WINDOW_SIZE    (350 - ctx.num_waves * 35)
35 #define VMEM_WINDOW_SIZE    (1024 - ctx.num_waves * 64)
36 #define POS_EXP_WINDOW_SIZE 512
37 #define SMEM_MAX_MOVES      (64 - ctx.num_waves * 4)
38 #define VMEM_MAX_MOVES      (256 - ctx.num_waves * 16)
39 /* creating clauses decreases def-use distances, so make it less aggressive the lower num_waves is */
40 #define VMEM_CLAUSE_MAX_GRAB_DIST (ctx.num_waves * 2)
41 #define POS_EXP_MAX_MOVES         512
42 
43 namespace aco {
44 
45 enum MoveResult {
46    move_success,
47    move_fail_ssa,
48    move_fail_rar,
49    move_fail_pressure,
50 };
51 
52 /**
53  * Cursor for downwards moves, where a single instruction is moved towards
54  * or below a group of instruction that hardware can execute as a clause.
55  */
56 struct DownwardsCursor {
57    int source_idx; /* Current instruction to consider for moving */
58 
59    int insert_idx_clause; /* First clause instruction */
60    int insert_idx;        /* First instruction *after* the clause */
61 
62    /* Maximum demand of all clause instructions,
63     * i.e. from insert_idx_clause (inclusive) to insert_idx (exclusive) */
64    RegisterDemand clause_demand;
65    /* Maximum demand of instructions from source_idx to insert_idx_clause (both exclusive) */
66    RegisterDemand total_demand;
67 
DownwardsCursoraco::DownwardsCursor68    DownwardsCursor(int current_idx, RegisterDemand initial_clause_demand)
69        : source_idx(current_idx - 1), insert_idx_clause(current_idx), insert_idx(current_idx + 1),
70          clause_demand(initial_clause_demand)
71    {}
72 
73    void verify_invariants(const RegisterDemand* register_demand);
74 };
75 
76 /**
77  * Cursor for upwards moves, where a single instruction is moved below
78  * another instruction.
79  */
80 struct UpwardsCursor {
81    int source_idx; /* Current instruction to consider for moving */
82    int insert_idx; /* Instruction to move in front of */
83 
84    /* Maximum demand of instructions from insert_idx (inclusive) to source_idx (exclusive) */
85    RegisterDemand total_demand;
86 
UpwardsCursoraco::UpwardsCursor87    UpwardsCursor(int source_idx_) : source_idx(source_idx_)
88    {
89       insert_idx = -1; /* to be initialized later */
90    }
91 
has_insert_idxaco::UpwardsCursor92    bool has_insert_idx() const { return insert_idx != -1; }
93    void verify_invariants(const RegisterDemand* register_demand);
94 };
95 
96 struct MoveState {
97    RegisterDemand max_registers;
98 
99    Block* block;
100    Instruction* current;
101    RegisterDemand* register_demand; /* demand per instruction */
102    bool improved_rar;
103 
104    std::vector<bool> depends_on;
105    /* Two are needed because, for downwards VMEM scheduling, one needs to
106     * exclude the instructions in the clause, since new instructions in the
107     * clause are not moved past any other instructions in the clause. */
108    std::vector<bool> RAR_dependencies;
109    std::vector<bool> RAR_dependencies_clause;
110 
111    /* for moving instructions before the current instruction to after it */
112    DownwardsCursor downwards_init(int current_idx, bool improved_rar, bool may_form_clauses);
113    MoveResult downwards_move(DownwardsCursor&, bool clause);
114    void downwards_skip(DownwardsCursor&);
115 
116    /* for moving instructions after the first use of the current instruction upwards */
117    UpwardsCursor upwards_init(int source_idx, bool improved_rar);
118    bool upwards_check_deps(UpwardsCursor&);
119    void upwards_update_insert_idx(UpwardsCursor&);
120    MoveResult upwards_move(UpwardsCursor&);
121    void upwards_skip(UpwardsCursor&);
122 };
123 
124 struct sched_ctx {
125    int16_t num_waves;
126    int16_t last_SMEM_stall;
127    int last_SMEM_dep_idx;
128    MoveState mv;
129    bool schedule_pos_exports = true;
130    unsigned schedule_pos_export_div = 1;
131 };
132 
133 /* This scheduler is a simple bottom-up pass based on ideas from
134  * "A Novel Lightweight Instruction Scheduling Algorithm for Just-In-Time Compiler"
135  * from Xiaohua Shi and Peng Guo.
136  * The basic approach is to iterate over all instructions. When a memory instruction
137  * is encountered it tries to move independent instructions from above and below
138  * between the memory instruction and it's first user.
139  * The novelty is that this scheduler cares for the current register pressure:
140  * Instructions will only be moved if the register pressure won't exceed a certain bound.
141  */
142 
143 template <typename T>
144 void
move_element(T begin_it,size_t idx,size_t before)145 move_element(T begin_it, size_t idx, size_t before)
146 {
147    if (idx < before) {
148       auto begin = std::next(begin_it, idx);
149       auto end = std::next(begin_it, before);
150       std::rotate(begin, begin + 1, end);
151    } else if (idx > before) {
152       auto begin = std::next(begin_it, before);
153       auto end = std::next(begin_it, idx + 1);
154       std::rotate(begin, end - 1, end);
155    }
156 }
157 
158 void
verify_invariants(const RegisterDemand * register_demand)159 DownwardsCursor::verify_invariants(const RegisterDemand* register_demand)
160 {
161    assert(source_idx < insert_idx_clause);
162    assert(insert_idx_clause < insert_idx);
163 
164 #ifndef NDEBUG
165    RegisterDemand reference_demand;
166    for (int i = source_idx + 1; i < insert_idx_clause; ++i) {
167       reference_demand.update(register_demand[i]);
168    }
169    assert(total_demand == reference_demand);
170 
171    reference_demand = {};
172    for (int i = insert_idx_clause; i < insert_idx; ++i) {
173       reference_demand.update(register_demand[i]);
174    }
175    assert(clause_demand == reference_demand);
176 #endif
177 }
178 
179 DownwardsCursor
downwards_init(int current_idx,bool improved_rar_,bool may_form_clauses)180 MoveState::downwards_init(int current_idx, bool improved_rar_, bool may_form_clauses)
181 {
182    improved_rar = improved_rar_;
183 
184    std::fill(depends_on.begin(), depends_on.end(), false);
185    if (improved_rar) {
186       std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
187       if (may_form_clauses)
188          std::fill(RAR_dependencies_clause.begin(), RAR_dependencies_clause.end(), false);
189    }
190 
191    for (const Operand& op : current->operands) {
192       if (op.isTemp()) {
193          depends_on[op.tempId()] = true;
194          if (improved_rar && op.isFirstKill())
195             RAR_dependencies[op.tempId()] = true;
196       }
197    }
198 
199    DownwardsCursor cursor(current_idx, register_demand[current_idx]);
200    cursor.verify_invariants(register_demand);
201    return cursor;
202 }
203 
204 /* If add_to_clause is true, the current clause is extended by moving the
205  * instruction at source_idx in front of the clause. Otherwise, the instruction
206  * is moved past the end of the clause without extending it */
207 MoveResult
downwards_move(DownwardsCursor & cursor,bool add_to_clause)208 MoveState::downwards_move(DownwardsCursor& cursor, bool add_to_clause)
209 {
210    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
211 
212    for (const Definition& def : instr->definitions)
213       if (def.isTemp() && depends_on[def.tempId()])
214          return move_fail_ssa;
215 
216    /* check if one of candidate's operands is killed by depending instruction */
217    std::vector<bool>& RAR_deps =
218       improved_rar ? (add_to_clause ? RAR_dependencies_clause : RAR_dependencies) : depends_on;
219    for (const Operand& op : instr->operands) {
220       if (op.isTemp() && RAR_deps[op.tempId()]) {
221          // FIXME: account for difference in register pressure
222          return move_fail_rar;
223       }
224    }
225 
226    if (add_to_clause) {
227       for (const Operand& op : instr->operands) {
228          if (op.isTemp()) {
229             depends_on[op.tempId()] = true;
230             if (op.isFirstKill())
231                RAR_dependencies[op.tempId()] = true;
232          }
233       }
234    }
235 
236    const int dest_insert_idx = add_to_clause ? cursor.insert_idx_clause : cursor.insert_idx;
237    RegisterDemand register_pressure = cursor.total_demand;
238    if (!add_to_clause) {
239       register_pressure.update(cursor.clause_demand);
240    }
241 
242    /* Check the new demand of the instructions being moved over */
243    const RegisterDemand candidate_diff = get_live_changes(instr);
244    if (RegisterDemand(register_pressure - candidate_diff).exceeds(max_registers))
245       return move_fail_pressure;
246 
247    /* New demand for the moved instruction */
248    const RegisterDemand temp = get_temp_registers(instr);
249    const RegisterDemand temp2 = get_temp_registers(block->instructions[dest_insert_idx - 1]);
250    const RegisterDemand new_demand = register_demand[dest_insert_idx - 1] - temp2 + temp;
251    if (new_demand.exceeds(max_registers))
252       return move_fail_pressure;
253 
254    /* move the candidate below the memory load */
255    move_element(block->instructions.begin(), cursor.source_idx, dest_insert_idx);
256 
257    /* update register pressure */
258    move_element(register_demand, cursor.source_idx, dest_insert_idx);
259    for (int i = cursor.source_idx; i < dest_insert_idx - 1; i++)
260       register_demand[i] -= candidate_diff;
261    register_demand[dest_insert_idx - 1] = new_demand;
262    cursor.insert_idx_clause--;
263    if (cursor.source_idx != cursor.insert_idx_clause) {
264       /* Update demand if we moved over any instructions before the clause */
265       cursor.total_demand -= candidate_diff;
266    } else {
267       assert(cursor.total_demand == RegisterDemand{});
268    }
269    if (add_to_clause) {
270       cursor.clause_demand.update(new_demand);
271    } else {
272       cursor.clause_demand -= candidate_diff;
273       cursor.insert_idx--;
274    }
275 
276    cursor.source_idx--;
277    cursor.verify_invariants(register_demand);
278    return move_success;
279 }
280 
281 void
downwards_skip(DownwardsCursor & cursor)282 MoveState::downwards_skip(DownwardsCursor& cursor)
283 {
284    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
285 
286    for (const Operand& op : instr->operands) {
287       if (op.isTemp()) {
288          depends_on[op.tempId()] = true;
289          if (improved_rar && op.isFirstKill()) {
290             RAR_dependencies[op.tempId()] = true;
291             RAR_dependencies_clause[op.tempId()] = true;
292          }
293       }
294    }
295    cursor.total_demand.update(register_demand[cursor.source_idx]);
296    cursor.source_idx--;
297    cursor.verify_invariants(register_demand);
298 }
299 
300 void
verify_invariants(const RegisterDemand * register_demand)301 UpwardsCursor::verify_invariants(const RegisterDemand* register_demand)
302 {
303 #ifndef NDEBUG
304    if (!has_insert_idx()) {
305       return;
306    }
307 
308    assert(insert_idx < source_idx);
309 
310    RegisterDemand reference_demand;
311    for (int i = insert_idx; i < source_idx; ++i) {
312       reference_demand.update(register_demand[i]);
313    }
314    assert(total_demand == reference_demand);
315 #endif
316 }
317 
318 UpwardsCursor
upwards_init(int source_idx,bool improved_rar_)319 MoveState::upwards_init(int source_idx, bool improved_rar_)
320 {
321    improved_rar = improved_rar_;
322 
323    std::fill(depends_on.begin(), depends_on.end(), false);
324    std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
325 
326    for (const Definition& def : current->definitions) {
327       if (def.isTemp())
328          depends_on[def.tempId()] = true;
329    }
330 
331    return UpwardsCursor(source_idx);
332 }
333 
334 bool
upwards_check_deps(UpwardsCursor & cursor)335 MoveState::upwards_check_deps(UpwardsCursor& cursor)
336 {
337    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
338    for (const Operand& op : instr->operands) {
339       if (op.isTemp() && depends_on[op.tempId()])
340          return false;
341    }
342    return true;
343 }
344 
345 void
upwards_update_insert_idx(UpwardsCursor & cursor)346 MoveState::upwards_update_insert_idx(UpwardsCursor& cursor)
347 {
348    cursor.insert_idx = cursor.source_idx;
349    cursor.total_demand = register_demand[cursor.insert_idx];
350 }
351 
352 MoveResult
upwards_move(UpwardsCursor & cursor)353 MoveState::upwards_move(UpwardsCursor& cursor)
354 {
355    assert(cursor.has_insert_idx());
356 
357    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
358    for (const Operand& op : instr->operands) {
359       if (op.isTemp() && depends_on[op.tempId()])
360          return move_fail_ssa;
361    }
362 
363    /* check if candidate uses/kills an operand which is used by a dependency */
364    for (const Operand& op : instr->operands) {
365       if (op.isTemp() && (!improved_rar || op.isFirstKill()) && RAR_dependencies[op.tempId()])
366          return move_fail_rar;
367    }
368 
369    /* check if register pressure is low enough: the diff is negative if register pressure is
370     * decreased */
371    const RegisterDemand candidate_diff = get_live_changes(instr);
372    const RegisterDemand temp = get_temp_registers(instr);
373    if (RegisterDemand(cursor.total_demand + candidate_diff).exceeds(max_registers))
374       return move_fail_pressure;
375    const RegisterDemand temp2 = get_temp_registers(block->instructions[cursor.insert_idx - 1]);
376    const RegisterDemand new_demand =
377       register_demand[cursor.insert_idx - 1] - temp2 + candidate_diff + temp;
378    if (new_demand.exceeds(max_registers))
379       return move_fail_pressure;
380 
381    /* move the candidate above the insert_idx */
382    move_element(block->instructions.begin(), cursor.source_idx, cursor.insert_idx);
383 
384    /* update register pressure */
385    move_element(register_demand, cursor.source_idx, cursor.insert_idx);
386    register_demand[cursor.insert_idx] = new_demand;
387    for (int i = cursor.insert_idx + 1; i <= cursor.source_idx; i++)
388       register_demand[i] += candidate_diff;
389    cursor.total_demand += candidate_diff;
390 
391    cursor.total_demand.update(register_demand[cursor.source_idx]);
392 
393    cursor.insert_idx++;
394    cursor.source_idx++;
395 
396    cursor.verify_invariants(register_demand);
397 
398    return move_success;
399 }
400 
401 void
upwards_skip(UpwardsCursor & cursor)402 MoveState::upwards_skip(UpwardsCursor& cursor)
403 {
404    if (cursor.has_insert_idx()) {
405       aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
406       for (const Definition& def : instr->definitions) {
407          if (def.isTemp())
408             depends_on[def.tempId()] = true;
409       }
410       for (const Operand& op : instr->operands) {
411          if (op.isTemp())
412             RAR_dependencies[op.tempId()] = true;
413       }
414       cursor.total_demand.update(register_demand[cursor.source_idx]);
415    }
416 
417    cursor.source_idx++;
418 
419    cursor.verify_invariants(register_demand);
420 }
421 
422 bool
is_gs_or_done_sendmsg(const Instruction * instr)423 is_gs_or_done_sendmsg(const Instruction* instr)
424 {
425    if (instr->opcode == aco_opcode::s_sendmsg) {
426       uint16_t imm = instr->sopp().imm;
427       return (imm & sendmsg_id_mask) == _sendmsg_gs || (imm & sendmsg_id_mask) == _sendmsg_gs_done;
428    }
429    return false;
430 }
431 
432 bool
is_done_sendmsg(const Instruction * instr)433 is_done_sendmsg(const Instruction* instr)
434 {
435    if (instr->opcode == aco_opcode::s_sendmsg)
436       return (instr->sopp().imm & sendmsg_id_mask) == _sendmsg_gs_done;
437    return false;
438 }
439 
440 memory_sync_info
get_sync_info_with_hack(const Instruction * instr)441 get_sync_info_with_hack(const Instruction* instr)
442 {
443    memory_sync_info sync = get_sync_info(instr);
444    if (instr->isSMEM() && !instr->operands.empty() && instr->operands[0].bytes() == 16) {
445       // FIXME: currently, it doesn't seem beneficial to omit this due to how our scheduler works
446       sync.storage = (storage_class)(sync.storage | storage_buffer);
447       sync.semantics =
448          (memory_semantics)((sync.semantics | semantic_private) & ~semantic_can_reorder);
449    }
450    return sync;
451 }
452 
453 struct memory_event_set {
454    bool has_control_barrier;
455 
456    unsigned bar_acquire;
457    unsigned bar_release;
458    unsigned bar_classes;
459 
460    unsigned access_acquire;
461    unsigned access_release;
462    unsigned access_relaxed;
463    unsigned access_atomic;
464 };
465 
466 struct hazard_query {
467    bool contains_spill;
468    bool contains_sendmsg;
469    bool uses_exec;
470    memory_event_set mem_events;
471    unsigned aliasing_storage;      /* storage classes which are accessed (non-SMEM) */
472    unsigned aliasing_storage_smem; /* storage classes which are accessed (SMEM) */
473 };
474 
475 void
init_hazard_query(hazard_query * query)476 init_hazard_query(hazard_query* query)
477 {
478    query->contains_spill = false;
479    query->contains_sendmsg = false;
480    query->uses_exec = false;
481    memset(&query->mem_events, 0, sizeof(query->mem_events));
482    query->aliasing_storage = 0;
483    query->aliasing_storage_smem = 0;
484 }
485 
486 void
add_memory_event(memory_event_set * set,Instruction * instr,memory_sync_info * sync)487 add_memory_event(memory_event_set* set, Instruction* instr, memory_sync_info* sync)
488 {
489    set->has_control_barrier |= is_done_sendmsg(instr);
490    if (instr->opcode == aco_opcode::p_barrier) {
491       Pseudo_barrier_instruction& bar = instr->barrier();
492       if (bar.sync.semantics & semantic_acquire)
493          set->bar_acquire |= bar.sync.storage;
494       if (bar.sync.semantics & semantic_release)
495          set->bar_release |= bar.sync.storage;
496       set->bar_classes |= bar.sync.storage;
497 
498       set->has_control_barrier |= bar.exec_scope > scope_invocation;
499    }
500 
501    if (!sync->storage)
502       return;
503 
504    if (sync->semantics & semantic_acquire)
505       set->access_acquire |= sync->storage;
506    if (sync->semantics & semantic_release)
507       set->access_release |= sync->storage;
508 
509    if (!(sync->semantics & semantic_private)) {
510       if (sync->semantics & semantic_atomic)
511          set->access_atomic |= sync->storage;
512       else
513          set->access_relaxed |= sync->storage;
514    }
515 }
516 
517 void
add_to_hazard_query(hazard_query * query,Instruction * instr)518 add_to_hazard_query(hazard_query* query, Instruction* instr)
519 {
520    if (instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload)
521       query->contains_spill = true;
522    query->contains_sendmsg |= instr->opcode == aco_opcode::s_sendmsg;
523    query->uses_exec |= needs_exec_mask(instr);
524 
525    memory_sync_info sync = get_sync_info_with_hack(instr);
526 
527    add_memory_event(&query->mem_events, instr, &sync);
528 
529    if (!(sync.semantics & semantic_can_reorder)) {
530       unsigned storage = sync.storage;
531       /* images and buffer/global memory can alias */ // TODO: more precisely, buffer images and
532                                                       // buffer/global memory can alias
533       if (storage & (storage_buffer | storage_image))
534          storage |= storage_buffer | storage_image;
535       if (instr->isSMEM())
536          query->aliasing_storage_smem |= storage;
537       else
538          query->aliasing_storage |= storage;
539    }
540 }
541 
542 enum HazardResult {
543    hazard_success,
544    hazard_fail_reorder_vmem_smem,
545    hazard_fail_reorder_ds,
546    hazard_fail_reorder_sendmsg,
547    hazard_fail_spill,
548    hazard_fail_export,
549    hazard_fail_barrier,
550    /* Must stop at these failures. The hazard query code doesn't consider them
551     * when added. */
552    hazard_fail_exec,
553    hazard_fail_unreorderable,
554 };
555 
556 HazardResult
perform_hazard_query(hazard_query * query,Instruction * instr,bool upwards)557 perform_hazard_query(hazard_query* query, Instruction* instr, bool upwards)
558 {
559    /* don't schedule discards downwards */
560    if (!upwards && instr->opcode == aco_opcode::p_exit_early_if)
561       return hazard_fail_unreorderable;
562 
563    if (query->uses_exec) {
564       for (const Definition& def : instr->definitions) {
565          if (def.isFixed() && def.physReg() == exec)
566             return hazard_fail_exec;
567       }
568    }
569 
570    /* don't move exports so that they stay closer together */
571    if (instr->isEXP())
572       return hazard_fail_export;
573 
574    /* don't move non-reorderable instructions */
575    if (instr->opcode == aco_opcode::s_memtime || instr->opcode == aco_opcode::s_memrealtime ||
576        instr->opcode == aco_opcode::s_setprio || instr->opcode == aco_opcode::s_getreg_b32 ||
577        instr->opcode == aco_opcode::p_init_scratch || instr->opcode == aco_opcode::p_jump_to_epilog)
578       return hazard_fail_unreorderable;
579 
580    memory_event_set instr_set;
581    memset(&instr_set, 0, sizeof(instr_set));
582    memory_sync_info sync = get_sync_info_with_hack(instr);
583    add_memory_event(&instr_set, instr, &sync);
584 
585    memory_event_set* first = &instr_set;
586    memory_event_set* second = &query->mem_events;
587    if (upwards)
588       std::swap(first, second);
589 
590    /* everything after barrier(acquire) happens after the atomics/control_barriers before
591     * everything after load(acquire) happens after the load
592     */
593    if ((first->has_control_barrier || first->access_atomic) && second->bar_acquire)
594       return hazard_fail_barrier;
595    if (((first->access_acquire || first->bar_acquire) && second->bar_classes) ||
596        ((first->access_acquire | first->bar_acquire) &
597         (second->access_relaxed | second->access_atomic)))
598       return hazard_fail_barrier;
599 
600    /* everything before barrier(release) happens before the atomics/control_barriers after *
601     * everything before store(release) happens before the store
602     */
603    if (first->bar_release && (second->has_control_barrier || second->access_atomic))
604       return hazard_fail_barrier;
605    if ((first->bar_classes && (second->bar_release || second->access_release)) ||
606        ((first->access_relaxed | first->access_atomic) &
607         (second->bar_release | second->access_release)))
608       return hazard_fail_barrier;
609 
610    /* don't move memory barriers around other memory barriers */
611    if (first->bar_classes && second->bar_classes)
612       return hazard_fail_barrier;
613 
614    /* Don't move memory accesses to before control barriers. I don't think
615     * this is necessary for the Vulkan memory model, but it might be for GLSL450. */
616    unsigned control_classes =
617       storage_buffer | storage_atomic_counter | storage_image | storage_shared |
618       storage_task_payload;
619    if (first->has_control_barrier &&
620        ((second->access_atomic | second->access_relaxed) & control_classes))
621       return hazard_fail_barrier;
622 
623    /* don't move memory loads/stores past potentially aliasing loads/stores */
624    unsigned aliasing_storage =
625       instr->isSMEM() ? query->aliasing_storage_smem : query->aliasing_storage;
626    if ((sync.storage & aliasing_storage) && !(sync.semantics & semantic_can_reorder)) {
627       unsigned intersect = sync.storage & aliasing_storage;
628       if (intersect & storage_shared)
629          return hazard_fail_reorder_ds;
630       return hazard_fail_reorder_vmem_smem;
631    }
632 
633    if ((instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload) &&
634        query->contains_spill)
635       return hazard_fail_spill;
636 
637    if (instr->opcode == aco_opcode::s_sendmsg && query->contains_sendmsg)
638       return hazard_fail_reorder_sendmsg;
639 
640    return hazard_success;
641 }
642 
643 void
schedule_SMEM(sched_ctx & ctx,Block * block,std::vector<RegisterDemand> & register_demand,Instruction * current,int idx)644 schedule_SMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& register_demand,
645               Instruction* current, int idx)
646 {
647    assert(idx != 0);
648    int window_size = SMEM_WINDOW_SIZE;
649    int max_moves = SMEM_MAX_MOVES;
650    int16_t k = 0;
651 
652    /* don't move s_memtime/s_memrealtime */
653    if (current->opcode == aco_opcode::s_memtime || current->opcode == aco_opcode::s_memrealtime)
654       return;
655 
656    /* first, check if we have instructions before current to move down */
657    hazard_query hq;
658    init_hazard_query(&hq);
659    add_to_hazard_query(&hq, current);
660 
661    DownwardsCursor cursor = ctx.mv.downwards_init(idx, false, false);
662 
663    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
664         candidate_idx--) {
665       assert(candidate_idx >= 0);
666       assert(candidate_idx == cursor.source_idx);
667       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
668 
669       /* break if we'd make the previous SMEM instruction stall */
670       bool can_stall_prev_smem =
671          idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
672       if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
673          break;
674 
675       /* break when encountering another MEM instruction, logical_start or barriers */
676       if (candidate->opcode == aco_opcode::p_logical_start)
677          break;
678       /* only move VMEM instructions below descriptor loads. be more aggressive at higher num_waves
679        * to help create more vmem clauses */
680       if ((candidate->isVMEM() || candidate->isFlatLike()) &&
681           (cursor.insert_idx - cursor.source_idx > (ctx.num_waves * 4) ||
682            current->operands[0].size() == 4))
683          break;
684       /* don't move descriptor loads below buffer loads */
685       if (candidate->format == Format::SMEM && current->operands[0].size() == 4 &&
686           candidate->operands[0].size() == 2)
687          break;
688 
689       bool can_move_down = true;
690 
691       HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);
692       if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
693           haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
694           haz == hazard_fail_export)
695          can_move_down = false;
696       else if (haz != hazard_success)
697          break;
698 
699       /* don't use LDS/GDS instructions to hide latency since it can
700        * significanly worsen LDS scheduling */
701       if (candidate->isDS() || !can_move_down) {
702          add_to_hazard_query(&hq, candidate.get());
703          ctx.mv.downwards_skip(cursor);
704          continue;
705       }
706 
707       MoveResult res = ctx.mv.downwards_move(cursor, false);
708       if (res == move_fail_ssa || res == move_fail_rar) {
709          add_to_hazard_query(&hq, candidate.get());
710          ctx.mv.downwards_skip(cursor);
711          continue;
712       } else if (res == move_fail_pressure) {
713          break;
714       }
715 
716       if (candidate_idx < ctx.last_SMEM_dep_idx)
717          ctx.last_SMEM_stall++;
718       k++;
719    }
720 
721    /* find the first instruction depending on current or find another MEM */
722    UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, false);
723 
724    bool found_dependency = false;
725    /* second, check if we have instructions after current to move up */
726    for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int)idx + window_size;
727         candidate_idx++) {
728       assert(candidate_idx == up_cursor.source_idx);
729       assert(candidate_idx < (int)block->instructions.size());
730       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
731 
732       if (candidate->opcode == aco_opcode::p_logical_end)
733          break;
734 
735       /* check if candidate depends on current */
736       bool is_dependency = !found_dependency && !ctx.mv.upwards_check_deps(up_cursor);
737       /* no need to steal from following VMEM instructions */
738       if (is_dependency && (candidate->isVMEM() || candidate->isFlatLike()))
739          break;
740 
741       if (found_dependency) {
742          HazardResult haz = perform_hazard_query(&hq, candidate.get(), true);
743          if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
744              haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
745              haz == hazard_fail_export)
746             is_dependency = true;
747          else if (haz != hazard_success)
748             break;
749       }
750 
751       if (is_dependency) {
752          if (!found_dependency) {
753             ctx.mv.upwards_update_insert_idx(up_cursor);
754             init_hazard_query(&hq);
755             found_dependency = true;
756          }
757       }
758 
759       if (is_dependency || !found_dependency) {
760          if (found_dependency)
761             add_to_hazard_query(&hq, candidate.get());
762          else
763             k++;
764          ctx.mv.upwards_skip(up_cursor);
765          continue;
766       }
767 
768       MoveResult res = ctx.mv.upwards_move(up_cursor);
769       if (res == move_fail_ssa || res == move_fail_rar) {
770          /* no need to steal from following VMEM instructions */
771          if (res == move_fail_ssa && (candidate->isVMEM() || candidate->isFlatLike()))
772             break;
773          add_to_hazard_query(&hq, candidate.get());
774          ctx.mv.upwards_skip(up_cursor);
775          continue;
776       } else if (res == move_fail_pressure) {
777          break;
778       }
779       k++;
780    }
781 
782    ctx.last_SMEM_dep_idx = found_dependency ? up_cursor.insert_idx : 0;
783    ctx.last_SMEM_stall = 10 - ctx.num_waves - k;
784 }
785 
786 void
schedule_VMEM(sched_ctx & ctx,Block * block,std::vector<RegisterDemand> & register_demand,Instruction * current,int idx)787 schedule_VMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& register_demand,
788               Instruction* current, int idx)
789 {
790    assert(idx != 0);
791    int window_size = VMEM_WINDOW_SIZE;
792    int max_moves = VMEM_MAX_MOVES;
793    int clause_max_grab_dist = VMEM_CLAUSE_MAX_GRAB_DIST;
794    bool only_clauses = false;
795    int16_t k = 0;
796 
797    /* first, check if we have instructions before current to move down */
798    hazard_query indep_hq;
799    hazard_query clause_hq;
800    init_hazard_query(&indep_hq);
801    init_hazard_query(&clause_hq);
802    add_to_hazard_query(&indep_hq, current);
803 
804    DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, true);
805 
806    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
807         candidate_idx--) {
808       assert(candidate_idx == cursor.source_idx);
809       assert(candidate_idx >= 0);
810       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
811       bool is_vmem = candidate->isVMEM() || candidate->isFlatLike();
812 
813       /* break when encountering another VMEM instruction, logical_start or barriers */
814       if (candidate->opcode == aco_opcode::p_logical_start)
815          break;
816 
817       /* break if we'd make the previous SMEM instruction stall */
818       bool can_stall_prev_smem =
819          idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
820       if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
821          break;
822 
823       bool part_of_clause = false;
824       if (current->isVMEM() == candidate->isVMEM()) {
825          int grab_dist = cursor.insert_idx_clause - candidate_idx;
826          /* We can't easily tell how much this will decrease the def-to-use
827           * distances, so just use how far it will be moved as a heuristic. */
828          part_of_clause =
829             grab_dist < clause_max_grab_dist + k && should_form_clause(current, candidate.get());
830       }
831 
832       /* if current depends on candidate, add additional dependencies and continue */
833       bool can_move_down = !is_vmem || part_of_clause || candidate->definitions.empty();
834       if (only_clauses) {
835          /* In case of high register pressure, only try to form clauses,
836           * and only if the previous clause is not larger
837           * than the current one will be.
838           */
839          if (part_of_clause) {
840             int clause_size = cursor.insert_idx - cursor.insert_idx_clause;
841             int prev_clause_size = 1;
842             while (should_form_clause(current,
843                                       block->instructions[candidate_idx - prev_clause_size].get()))
844                prev_clause_size++;
845             if (prev_clause_size > clause_size + 1)
846                break;
847          } else {
848             can_move_down = false;
849          }
850       }
851       HazardResult haz =
852          perform_hazard_query(part_of_clause ? &clause_hq : &indep_hq, candidate.get(), false);
853       if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
854           haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
855           haz == hazard_fail_export)
856          can_move_down = false;
857       else if (haz != hazard_success)
858          break;
859 
860       if (!can_move_down) {
861          if (part_of_clause)
862             break;
863          add_to_hazard_query(&indep_hq, candidate.get());
864          add_to_hazard_query(&clause_hq, candidate.get());
865          ctx.mv.downwards_skip(cursor);
866          continue;
867       }
868 
869       Instruction* candidate_ptr = candidate.get();
870       MoveResult res = ctx.mv.downwards_move(cursor, part_of_clause);
871       if (res == move_fail_ssa || res == move_fail_rar) {
872          if (part_of_clause)
873             break;
874          add_to_hazard_query(&indep_hq, candidate.get());
875          add_to_hazard_query(&clause_hq, candidate.get());
876          ctx.mv.downwards_skip(cursor);
877          continue;
878       } else if (res == move_fail_pressure) {
879          only_clauses = true;
880          if (part_of_clause)
881             break;
882          add_to_hazard_query(&indep_hq, candidate.get());
883          add_to_hazard_query(&clause_hq, candidate.get());
884          ctx.mv.downwards_skip(cursor);
885          continue;
886       }
887       if (part_of_clause)
888          add_to_hazard_query(&indep_hq, candidate_ptr);
889       else
890          k++;
891       if (candidate_idx < ctx.last_SMEM_dep_idx)
892          ctx.last_SMEM_stall++;
893    }
894 
895    /* find the first instruction depending on current or find another VMEM */
896    UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, true);
897 
898    bool found_dependency = false;
899    /* second, check if we have instructions after current to move up */
900    for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int)idx + window_size;
901         candidate_idx++) {
902       assert(candidate_idx == up_cursor.source_idx);
903       assert(candidate_idx < (int)block->instructions.size());
904       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
905       bool is_vmem = candidate->isVMEM() || candidate->isFlatLike();
906 
907       if (candidate->opcode == aco_opcode::p_logical_end)
908          break;
909 
910       /* check if candidate depends on current */
911       bool is_dependency = false;
912       if (found_dependency) {
913          HazardResult haz = perform_hazard_query(&indep_hq, candidate.get(), true);
914          if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
915              haz == hazard_fail_reorder_vmem_smem || haz == hazard_fail_reorder_sendmsg ||
916              haz == hazard_fail_barrier || haz == hazard_fail_export)
917             is_dependency = true;
918          else if (haz != hazard_success)
919             break;
920       }
921 
922       is_dependency |= !found_dependency && !ctx.mv.upwards_check_deps(up_cursor);
923       if (is_dependency) {
924          if (!found_dependency) {
925             ctx.mv.upwards_update_insert_idx(up_cursor);
926             init_hazard_query(&indep_hq);
927             found_dependency = true;
928          }
929       } else if (is_vmem) {
930          /* don't move up dependencies of other VMEM instructions */
931          for (const Definition& def : candidate->definitions) {
932             if (def.isTemp())
933                ctx.mv.depends_on[def.tempId()] = true;
934          }
935       }
936 
937       if (is_dependency || !found_dependency) {
938          if (found_dependency)
939             add_to_hazard_query(&indep_hq, candidate.get());
940          else
941             k++;
942          ctx.mv.upwards_skip(up_cursor);
943          continue;
944       }
945 
946       MoveResult res = ctx.mv.upwards_move(up_cursor);
947       if (res == move_fail_ssa || res == move_fail_rar) {
948          add_to_hazard_query(&indep_hq, candidate.get());
949          ctx.mv.upwards_skip(up_cursor);
950          continue;
951       } else if (res == move_fail_pressure) {
952          break;
953       }
954       k++;
955    }
956 }
957 
958 void
schedule_position_export(sched_ctx & ctx,Block * block,std::vector<RegisterDemand> & register_demand,Instruction * current,int idx)959 schedule_position_export(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& register_demand,
960                          Instruction* current, int idx)
961 {
962    assert(idx != 0);
963    int window_size = POS_EXP_WINDOW_SIZE / ctx.schedule_pos_export_div;
964    int max_moves = POS_EXP_MAX_MOVES / ctx.schedule_pos_export_div;
965    int16_t k = 0;
966 
967    DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, false);
968 
969    hazard_query hq;
970    init_hazard_query(&hq);
971    add_to_hazard_query(&hq, current);
972 
973    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
974         candidate_idx--) {
975       assert(candidate_idx >= 0);
976       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
977 
978       if (candidate->opcode == aco_opcode::p_logical_start)
979          break;
980       if (candidate->isVMEM() || candidate->isSMEM() || candidate->isFlatLike())
981          break;
982 
983       HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);
984       if (haz == hazard_fail_exec || haz == hazard_fail_unreorderable)
985          break;
986 
987       if (haz != hazard_success) {
988          add_to_hazard_query(&hq, candidate.get());
989          ctx.mv.downwards_skip(cursor);
990          continue;
991       }
992 
993       MoveResult res = ctx.mv.downwards_move(cursor, false);
994       if (res == move_fail_ssa || res == move_fail_rar) {
995          add_to_hazard_query(&hq, candidate.get());
996          ctx.mv.downwards_skip(cursor);
997          continue;
998       } else if (res == move_fail_pressure) {
999          break;
1000       }
1001       k++;
1002    }
1003 }
1004 
1005 void
schedule_block(sched_ctx & ctx,Program * program,Block * block,live & live_vars)1006 schedule_block(sched_ctx& ctx, Program* program, Block* block, live& live_vars)
1007 {
1008    ctx.last_SMEM_dep_idx = 0;
1009    ctx.last_SMEM_stall = INT16_MIN;
1010    ctx.mv.block = block;
1011    ctx.mv.register_demand = live_vars.register_demand[block->index].data();
1012 
1013    /* go through all instructions and find memory loads */
1014    for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
1015       Instruction* current = block->instructions[idx].get();
1016 
1017       if (block->kind & block_kind_export_end && current->isEXP() && ctx.schedule_pos_exports) {
1018          unsigned target = current->exp().dest;
1019          if (target >= V_008DFC_SQ_EXP_POS && target < V_008DFC_SQ_EXP_PRIM) {
1020             ctx.mv.current = current;
1021             schedule_position_export(ctx, block, live_vars.register_demand[block->index], current,
1022                                      idx);
1023          }
1024       }
1025 
1026       if (current->definitions.empty())
1027          continue;
1028 
1029       if (current->isVMEM() || current->isFlatLike()) {
1030          ctx.mv.current = current;
1031          schedule_VMEM(ctx, block, live_vars.register_demand[block->index], current, idx);
1032       }
1033 
1034       if (current->isSMEM()) {
1035          ctx.mv.current = current;
1036          schedule_SMEM(ctx, block, live_vars.register_demand[block->index], current, idx);
1037       }
1038    }
1039 
1040    /* resummarize the block's register demand */
1041    block->register_demand = RegisterDemand();
1042    for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
1043       block->register_demand.update(live_vars.register_demand[block->index][idx]);
1044    }
1045 }
1046 
1047 void
schedule_program(Program * program,live & live_vars)1048 schedule_program(Program* program, live& live_vars)
1049 {
1050    /* don't use program->max_reg_demand because that is affected by max_waves_per_simd */
1051    RegisterDemand demand;
1052    for (Block& block : program->blocks)
1053       demand.update(block.register_demand);
1054    demand.vgpr += program->config->num_shared_vgprs / 2;
1055 
1056    sched_ctx ctx;
1057    ctx.mv.depends_on.resize(program->peekAllocationId());
1058    ctx.mv.RAR_dependencies.resize(program->peekAllocationId());
1059    ctx.mv.RAR_dependencies_clause.resize(program->peekAllocationId());
1060    /* Allowing the scheduler to reduce the number of waves to as low as 5
1061     * improves performance of Thrones of Britannia significantly and doesn't
1062     * seem to hurt anything else. */
1063    // TODO: account for possible uneven num_waves on GFX10+
1064    unsigned wave_fac = program->dev.physical_vgprs / 256;
1065    if (program->num_waves <= 5 * wave_fac)
1066       ctx.num_waves = program->num_waves;
1067    else if (demand.vgpr >= 29)
1068       ctx.num_waves = 5 * wave_fac;
1069    else if (demand.vgpr >= 25)
1070       ctx.num_waves = 6 * wave_fac;
1071    else
1072       ctx.num_waves = 7 * wave_fac;
1073    ctx.num_waves = std::max<uint16_t>(ctx.num_waves, program->min_waves);
1074    ctx.num_waves = std::min<uint16_t>(ctx.num_waves, program->num_waves);
1075    ctx.num_waves = max_suitable_waves(program, ctx.num_waves);
1076 
1077    /* VMEM_MAX_MOVES and such assume pre-GFX10 wave count */
1078    ctx.num_waves = std::max<uint16_t>(ctx.num_waves / wave_fac, 1);
1079 
1080    assert(ctx.num_waves > 0);
1081    ctx.mv.max_registers = {int16_t(get_addr_vgpr_from_waves(program, ctx.num_waves * wave_fac) - 2),
1082                            int16_t(get_addr_sgpr_from_waves(program, ctx.num_waves * wave_fac))};
1083 
1084    /* NGG culling shaders are very sensitive to position export scheduling.
1085     * Schedule less aggressively when early primitive export is used, and
1086     * keep the position export at the very bottom when late primitive export is used.
1087     */
1088    if (program->info.has_ngg_culling && program->stage.num_sw_stages() == 1) {
1089       if (!program->info.has_ngg_early_prim_export)
1090          ctx.schedule_pos_exports = false;
1091       else
1092          ctx.schedule_pos_export_div = 4;
1093    }
1094 
1095    for (Block& block : program->blocks)
1096       schedule_block(ctx, program, &block, live_vars);
1097 
1098    /* update max_reg_demand and num_waves */
1099    RegisterDemand new_demand;
1100    for (Block& block : program->blocks) {
1101       new_demand.update(block.register_demand);
1102    }
1103    update_vgpr_sgpr_demand(program, new_demand);
1104 
1105 /* if enabled, this code asserts that register_demand is updated correctly */
1106 #if 0
1107    int prev_num_waves = program->num_waves;
1108    const RegisterDemand prev_max_demand = program->max_reg_demand;
1109 
1110    std::vector<RegisterDemand> demands(program->blocks.size());
1111    for (unsigned j = 0; j < program->blocks.size(); j++) {
1112       demands[j] = program->blocks[j].register_demand;
1113    }
1114 
1115    live live_vars2 = aco::live_var_analysis(program);
1116 
1117    for (unsigned j = 0; j < program->blocks.size(); j++) {
1118       Block &b = program->blocks[j];
1119       for (unsigned i = 0; i < b.instructions.size(); i++)
1120          assert(live_vars.register_demand[b.index][i] == live_vars2.register_demand[b.index][i]);
1121       assert(b.register_demand == demands[j]);
1122    }
1123 
1124    assert(program->max_reg_demand == prev_max_demand);
1125    assert(program->num_waves == prev_num_waves);
1126 #endif
1127 }
1128 
1129 } // namespace aco
1130