• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Thomas Helland
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_constant_expressions.h"
26 #include "nir_loop_analyze.h"
27 
28 typedef enum {
29    undefined,
30    invariant,
31    not_invariant,
32    basic_induction
33 } nir_loop_variable_type;
34 
35 typedef struct nir_basic_induction_var {
36    nir_alu_instr *alu;                      /* The def of the alu-operation */
37    nir_ssa_def *def_outside_loop;           /* The phi-src outside the loop */
38 } nir_basic_induction_var;
39 
40 typedef struct {
41    /* A link for the work list */
42    struct list_head process_link;
43 
44    bool in_loop;
45 
46    /* The ssa_def associated with this info */
47    nir_ssa_def *def;
48 
49    /* The type of this ssa_def */
50    nir_loop_variable_type type;
51 
52    /* If this is of type basic_induction */
53    struct nir_basic_induction_var *ind;
54 
55    /* True if variable is in an if branch */
56    bool in_if_branch;
57 
58    /* True if variable is in a nested loop */
59    bool in_nested_loop;
60 
61 } nir_loop_variable;
62 
63 typedef struct {
64    /* The loop we store information for */
65    nir_loop *loop;
66 
67    /* Loop_variable for all ssa_defs in function */
68    nir_loop_variable *loop_vars;
69 
70    /* A list of the loop_vars to analyze */
71    struct list_head process_list;
72 
73    nir_variable_mode indirect_mask;
74 
75 } loop_info_state;
76 
77 static nir_loop_variable *
get_loop_var(nir_ssa_def * value,loop_info_state * state)78 get_loop_var(nir_ssa_def *value, loop_info_state *state)
79 {
80    return &(state->loop_vars[value->index]);
81 }
82 
83 typedef struct {
84    loop_info_state *state;
85    bool in_if_branch;
86    bool in_nested_loop;
87 } init_loop_state;
88 
89 static bool
init_loop_def(nir_ssa_def * def,void * void_init_loop_state)90 init_loop_def(nir_ssa_def *def, void *void_init_loop_state)
91 {
92    init_loop_state *loop_init_state = void_init_loop_state;
93    nir_loop_variable *var = get_loop_var(def, loop_init_state->state);
94 
95    if (loop_init_state->in_nested_loop) {
96       var->in_nested_loop = true;
97    } else if (loop_init_state->in_if_branch) {
98       var->in_if_branch = true;
99    } else {
100       /* Add to the tail of the list. That way we start at the beginning of
101        * the defs in the loop instead of the end when walking the list. This
102        * means less recursive calls. Only add defs that are not in nested
103        * loops or conditional blocks.
104        */
105       list_addtail(&var->process_link, &loop_init_state->state->process_list);
106    }
107 
108    var->in_loop = true;
109 
110    return true;
111 }
112 
113 /** Calculate an estimated cost in number of instructions
114  *
115  * We do this so that we don't unroll loops which will later get massively
116  * inflated due to int64 or fp64 lowering.  The estimates provided here don't
117  * have to be massively accurate; they just have to be good enough that loop
118  * unrolling doesn't cause things to blow up too much.
119  */
120 static unsigned
instr_cost(nir_instr * instr,const nir_shader_compiler_options * options)121 instr_cost(nir_instr *instr, const nir_shader_compiler_options *options)
122 {
123    if (instr->type == nir_instr_type_intrinsic ||
124        instr->type == nir_instr_type_tex)
125       return 1;
126 
127    if (instr->type != nir_instr_type_alu)
128       return 0;
129 
130    nir_alu_instr *alu = nir_instr_as_alu(instr);
131    const nir_op_info *info = &nir_op_infos[alu->op];
132 
133    /* Assume everything 16 or 32-bit is cheap.
134     *
135     * There are no 64-bit ops that don't have a 64-bit thing as their
136     * destination or first source.
137     */
138    if (nir_dest_bit_size(alu->dest.dest) < 64 &&
139        nir_src_bit_size(alu->src[0].src) < 64)
140       return 1;
141 
142    bool is_fp64 = nir_dest_bit_size(alu->dest.dest) == 64 &&
143       nir_alu_type_get_base_type(info->output_type) == nir_type_float;
144    for (unsigned i = 0; i < info->num_inputs; i++) {
145       if (nir_src_bit_size(alu->src[i].src) == 64 &&
146           nir_alu_type_get_base_type(info->input_types[i]) == nir_type_float)
147          is_fp64 = true;
148    }
149 
150    if (is_fp64) {
151       /* If it's something lowered normally, it's expensive. */
152       unsigned cost = 1;
153       if (options->lower_doubles_options &
154           nir_lower_doubles_op_to_options_mask(alu->op))
155          cost *= 20;
156 
157       /* If it's full software, it's even more expensive */
158       if (options->lower_doubles_options & nir_lower_fp64_full_software)
159          cost *= 100;
160 
161       return cost;
162    } else {
163       if (options->lower_int64_options &
164           nir_lower_int64_op_to_options_mask(alu->op)) {
165          /* These require a doing the division algorithm. */
166          if (alu->op == nir_op_idiv || alu->op == nir_op_udiv ||
167              alu->op == nir_op_imod || alu->op == nir_op_umod ||
168              alu->op == nir_op_irem)
169             return 100;
170 
171          /* Other int64 lowering isn't usually all that expensive */
172          return 5;
173       }
174 
175       return 1;
176    }
177 }
178 
179 static bool
init_loop_block(nir_block * block,loop_info_state * state,bool in_if_branch,bool in_nested_loop,const nir_shader_compiler_options * options)180 init_loop_block(nir_block *block, loop_info_state *state,
181                 bool in_if_branch, bool in_nested_loop,
182                 const nir_shader_compiler_options *options)
183 {
184    init_loop_state init_state = {.in_if_branch = in_if_branch,
185                                  .in_nested_loop = in_nested_loop,
186                                  .state = state };
187 
188    nir_foreach_instr(instr, block) {
189       state->loop->info->instr_cost += instr_cost(instr, options);
190       nir_foreach_ssa_def(instr, init_loop_def, &init_state);
191    }
192 
193    return true;
194 }
195 
196 static inline bool
is_var_alu(nir_loop_variable * var)197 is_var_alu(nir_loop_variable *var)
198 {
199    return var->def->parent_instr->type == nir_instr_type_alu;
200 }
201 
202 static inline bool
is_var_phi(nir_loop_variable * var)203 is_var_phi(nir_loop_variable *var)
204 {
205    return var->def->parent_instr->type == nir_instr_type_phi;
206 }
207 
208 static inline bool
mark_invariant(nir_ssa_def * def,loop_info_state * state)209 mark_invariant(nir_ssa_def *def, loop_info_state *state)
210 {
211    nir_loop_variable *var = get_loop_var(def, state);
212 
213    if (var->type == invariant)
214       return true;
215 
216    if (!var->in_loop) {
217       var->type = invariant;
218       return true;
219    }
220 
221    if (var->type == not_invariant)
222       return false;
223 
224    if (is_var_alu(var)) {
225       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
226 
227       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
228          if (!mark_invariant(alu->src[i].src.ssa, state)) {
229             var->type = not_invariant;
230             return false;
231          }
232       }
233       var->type = invariant;
234       return true;
235    }
236 
237    /* Phis shouldn't be invariant except if one operand is invariant, and the
238     * other is the phi itself. These should be removed by opt_remove_phis.
239     * load_consts are already set to invariant and constant during init,
240     * and so should return earlier. Remaining op_codes are set undefined.
241     */
242    var->type = not_invariant;
243    return false;
244 }
245 
246 static void
compute_invariance_information(loop_info_state * state)247 compute_invariance_information(loop_info_state *state)
248 {
249    /* An expression is invariant in a loop L if:
250     *  (base cases)
251     *    – it’s a constant
252     *    – it’s a variable use, all of whose single defs are outside of L
253     *  (inductive cases)
254     *    – it’s a pure computation all of whose args are loop invariant
255     *    – it’s a variable use whose single reaching def, and the
256     *      rhs of that def is loop-invariant
257     */
258    list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
259                             process_link) {
260       assert(!var->in_if_branch && !var->in_nested_loop);
261 
262       if (mark_invariant(var->def, state))
263          list_del(&var->process_link);
264    }
265 }
266 
267 /* If all of the instruction sources point to identical ALU instructions (as
268  * per nir_instrs_equal), return one of the ALU instructions.  Otherwise,
269  * return NULL.
270  */
271 static nir_alu_instr *
phi_instr_as_alu(nir_phi_instr * phi)272 phi_instr_as_alu(nir_phi_instr *phi)
273 {
274    nir_alu_instr *first = NULL;
275    nir_foreach_phi_src(src, phi) {
276       assert(src->src.is_ssa);
277       if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
278          return NULL;
279 
280       nir_alu_instr *alu = nir_instr_as_alu(src->src.ssa->parent_instr);
281       if (first == NULL) {
282          first = alu;
283       } else {
284          if (!nir_instrs_equal(&first->instr, &alu->instr))
285             return NULL;
286       }
287    }
288 
289    return first;
290 }
291 
292 static bool
alu_src_has_identity_swizzle(nir_alu_instr * alu,unsigned src_idx)293 alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
294 {
295    assert(nir_op_infos[alu->op].input_sizes[src_idx] == 0);
296    assert(alu->dest.dest.is_ssa);
297    for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
298       if (alu->src[src_idx].swizzle[i] != i)
299          return false;
300    }
301 
302    return true;
303 }
304 
305 static bool
compute_induction_information(loop_info_state * state)306 compute_induction_information(loop_info_state *state)
307 {
308    bool found_induction_var = false;
309    list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
310                             process_link) {
311 
312       /* It can't be an induction variable if it is invariant. Invariants and
313        * things in nested loops or conditionals should have been removed from
314        * the list by compute_invariance_information().
315        */
316       assert(!var->in_if_branch && !var->in_nested_loop &&
317              var->type != invariant);
318 
319       /* We are only interested in checking phis for the basic induction
320        * variable case as its simple to detect. All basic induction variables
321        * have a phi node
322        */
323       if (!is_var_phi(var))
324          continue;
325 
326       nir_phi_instr *phi = nir_instr_as_phi(var->def->parent_instr);
327       nir_basic_induction_var *biv = rzalloc(state, nir_basic_induction_var);
328 
329       nir_loop_variable *alu_src_var = NULL;
330       nir_foreach_phi_src(src, phi) {
331          nir_loop_variable *src_var = get_loop_var(src->src.ssa, state);
332 
333          /* If one of the sources is in an if branch or nested loop then don't
334           * attempt to go any further.
335           */
336          if (src_var->in_if_branch || src_var->in_nested_loop)
337             break;
338 
339          /* Detect inductions variables that are incremented in both branches
340           * of an unnested if rather than in a loop block.
341           */
342          if (is_var_phi(src_var)) {
343             nir_phi_instr *src_phi =
344                nir_instr_as_phi(src_var->def->parent_instr);
345             nir_alu_instr *src_phi_alu = phi_instr_as_alu(src_phi);
346             if (src_phi_alu) {
347                src_var = get_loop_var(&src_phi_alu->dest.dest.ssa, state);
348                if (!src_var->in_if_branch)
349                   break;
350             }
351          }
352 
353          if (!src_var->in_loop && !biv->def_outside_loop) {
354             biv->def_outside_loop = src_var->def;
355          } else if (is_var_alu(src_var) && !biv->alu) {
356             alu_src_var = src_var;
357             nir_alu_instr *alu = nir_instr_as_alu(src_var->def->parent_instr);
358 
359             if (nir_op_infos[alu->op].num_inputs == 2) {
360                for (unsigned i = 0; i < 2; i++) {
361                   /* Is one of the operands const, and the other the phi.  The
362                    * phi source can't be swizzled in any way.
363                    */
364                   if (nir_src_is_const(alu->src[i].src) &&
365                       alu->src[1-i].src.ssa == &phi->dest.ssa &&
366                       alu_src_has_identity_swizzle(alu, 1 - i))
367                      biv->alu = alu;
368                }
369             }
370 
371             if (!biv->alu)
372                break;
373          } else {
374             biv->alu = NULL;
375             break;
376          }
377       }
378 
379       if (biv->alu && biv->def_outside_loop &&
380           biv->def_outside_loop->parent_instr->type == nir_instr_type_load_const) {
381          alu_src_var->type = basic_induction;
382          alu_src_var->ind = biv;
383          var->type = basic_induction;
384          var->ind = biv;
385 
386          found_induction_var = true;
387       } else {
388          ralloc_free(biv);
389       }
390    }
391    return found_induction_var;
392 }
393 
394 static bool
initialize_ssa_def(nir_ssa_def * def,void * void_state)395 initialize_ssa_def(nir_ssa_def *def, void *void_state)
396 {
397    loop_info_state *state = void_state;
398    nir_loop_variable *var = get_loop_var(def, state);
399 
400    var->in_loop = false;
401    var->def = def;
402 
403    if (def->parent_instr->type == nir_instr_type_load_const) {
404       var->type = invariant;
405    } else {
406       var->type = undefined;
407    }
408 
409    return true;
410 }
411 
412 static bool
find_loop_terminators(loop_info_state * state)413 find_loop_terminators(loop_info_state *state)
414 {
415    bool success = false;
416    foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
417       if (node->type == nir_cf_node_if) {
418          nir_if *nif = nir_cf_node_as_if(node);
419 
420          nir_block *break_blk = NULL;
421          nir_block *continue_from_blk = NULL;
422          bool continue_from_then = true;
423 
424          nir_block *last_then = nir_if_last_then_block(nif);
425          nir_block *last_else = nir_if_last_else_block(nif);
426          if (nir_block_ends_in_break(last_then)) {
427             break_blk = last_then;
428             continue_from_blk = last_else;
429             continue_from_then = false;
430          } else if (nir_block_ends_in_break(last_else)) {
431             break_blk = last_else;
432             continue_from_blk = last_then;
433          }
434 
435          /* If there is a break then we should find a terminator. If we can
436           * not find a loop terminator, but there is a break-statement then
437           * we should return false so that we do not try to find trip-count
438           */
439          if (!nir_is_trivial_loop_if(nif, break_blk)) {
440             state->loop->info->complex_loop = true;
441             return false;
442          }
443 
444          /* Continue if the if contained no jumps at all */
445          if (!break_blk)
446             continue;
447 
448          if (nif->condition.ssa->parent_instr->type == nir_instr_type_phi) {
449             state->loop->info->complex_loop = true;
450             return false;
451          }
452 
453          nir_loop_terminator *terminator =
454             rzalloc(state->loop->info, nir_loop_terminator);
455 
456          list_addtail(&terminator->loop_terminator_link,
457                       &state->loop->info->loop_terminator_list);
458 
459          terminator->nif = nif;
460          terminator->break_block = break_blk;
461          terminator->continue_from_block = continue_from_blk;
462          terminator->continue_from_then = continue_from_then;
463          terminator->conditional_instr = nif->condition.ssa->parent_instr;
464 
465          success = true;
466       }
467    }
468 
469    return success;
470 }
471 
472 /* This function looks for an array access within a loop that uses an
473  * induction variable for the array index. If found it returns the size of the
474  * array, otherwise 0 is returned. If we find an induction var we pass it back
475  * to the caller via array_index_out.
476  */
477 static unsigned
find_array_access_via_induction(loop_info_state * state,nir_deref_instr * deref,nir_loop_variable ** array_index_out)478 find_array_access_via_induction(loop_info_state *state,
479                                 nir_deref_instr *deref,
480                                 nir_loop_variable **array_index_out)
481 {
482    for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
483       if (d->deref_type != nir_deref_type_array)
484          continue;
485 
486       assert(d->arr.index.is_ssa);
487       nir_loop_variable *array_index = get_loop_var(d->arr.index.ssa, state);
488 
489       if (array_index->type != basic_induction)
490          continue;
491 
492       if (array_index_out)
493          *array_index_out = array_index;
494 
495       nir_deref_instr *parent = nir_deref_instr_parent(d);
496       if (glsl_type_is_array_or_matrix(parent->type)) {
497          return glsl_get_length(parent->type);
498       } else {
499          assert(glsl_type_is_vector(parent->type));
500          return glsl_get_vector_elements(parent->type);
501       }
502    }
503 
504    return 0;
505 }
506 
507 static bool
guess_loop_limit(loop_info_state * state,nir_const_value * limit_val,nir_ssa_scalar basic_ind)508 guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
509                  nir_ssa_scalar basic_ind)
510 {
511    unsigned min_array_size = 0;
512 
513    nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
514       nir_foreach_instr(instr, block) {
515          if (instr->type != nir_instr_type_intrinsic)
516             continue;
517 
518          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
519 
520          /* Check for arrays variably-indexed by a loop induction variable. */
521          if (intrin->intrinsic == nir_intrinsic_load_deref ||
522              intrin->intrinsic == nir_intrinsic_store_deref ||
523              intrin->intrinsic == nir_intrinsic_copy_deref) {
524 
525             nir_loop_variable *array_idx = NULL;
526             unsigned array_size =
527                find_array_access_via_induction(state,
528                                                nir_src_as_deref(intrin->src[0]),
529                                                &array_idx);
530             if (array_idx && basic_ind.def == array_idx->def &&
531                 (min_array_size == 0 || min_array_size > array_size)) {
532                /* Array indices are scalars */
533                assert(basic_ind.def->num_components == 1);
534                min_array_size = array_size;
535             }
536 
537             if (intrin->intrinsic != nir_intrinsic_copy_deref)
538                continue;
539 
540             array_size =
541                find_array_access_via_induction(state,
542                                                nir_src_as_deref(intrin->src[1]),
543                                                &array_idx);
544             if (array_idx && basic_ind.def == array_idx->def &&
545                 (min_array_size == 0 || min_array_size > array_size)) {
546                /* Array indices are scalars */
547                assert(basic_ind.def->num_components == 1);
548                min_array_size = array_size;
549             }
550          }
551       }
552    }
553 
554    if (min_array_size) {
555       *limit_val = nir_const_value_for_uint(min_array_size,
556                                             basic_ind.def->bit_size);
557       return true;
558    }
559 
560    return false;
561 }
562 
563 static bool
try_find_limit_of_alu(nir_ssa_scalar limit,nir_const_value * limit_val,nir_loop_terminator * terminator,loop_info_state * state)564 try_find_limit_of_alu(nir_ssa_scalar limit, nir_const_value *limit_val,
565                       nir_loop_terminator *terminator, loop_info_state *state)
566 {
567    if (!nir_ssa_scalar_is_alu(limit))
568       return false;
569 
570    nir_op limit_op = nir_ssa_scalar_alu_op(limit);
571    if (limit_op == nir_op_imin || limit_op == nir_op_fmin) {
572       for (unsigned i = 0; i < 2; i++) {
573          nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(limit, i);
574          if (nir_ssa_scalar_is_const(src)) {
575             *limit_val = nir_ssa_scalar_as_const_value(src);
576             terminator->exact_trip_count_unknown = true;
577             return true;
578          }
579       }
580    }
581 
582    return false;
583 }
584 
585 static nir_const_value
eval_const_unop(nir_op op,unsigned bit_size,nir_const_value src0,unsigned execution_mode)586 eval_const_unop(nir_op op, unsigned bit_size, nir_const_value src0,
587                 unsigned execution_mode)
588 {
589    assert(nir_op_infos[op].num_inputs == 1);
590    nir_const_value dest;
591    nir_const_value *src[1] = { &src0 };
592    nir_eval_const_opcode(op, &dest, 1, bit_size, src, execution_mode);
593    return dest;
594 }
595 
596 static nir_const_value
eval_const_binop(nir_op op,unsigned bit_size,nir_const_value src0,nir_const_value src1,unsigned execution_mode)597 eval_const_binop(nir_op op, unsigned bit_size,
598                  nir_const_value src0, nir_const_value src1,
599                  unsigned execution_mode)
600 {
601    assert(nir_op_infos[op].num_inputs == 2);
602    nir_const_value dest;
603    nir_const_value *src[2] = { &src0, &src1 };
604    nir_eval_const_opcode(op, &dest, 1, bit_size, src, execution_mode);
605    return dest;
606 }
607 
608 static int32_t
get_iteration(nir_op cond_op,nir_const_value initial,nir_const_value step,nir_const_value limit,unsigned bit_size,unsigned execution_mode)609 get_iteration(nir_op cond_op, nir_const_value initial, nir_const_value step,
610               nir_const_value limit, unsigned bit_size,
611               unsigned execution_mode)
612 {
613    nir_const_value span, iter;
614 
615    switch (cond_op) {
616    case nir_op_ige:
617    case nir_op_ilt:
618    case nir_op_ieq:
619    case nir_op_ine:
620       span = eval_const_binop(nir_op_isub, bit_size, limit, initial,
621                               execution_mode);
622       iter = eval_const_binop(nir_op_idiv, bit_size, span, step,
623                               execution_mode);
624       break;
625 
626    case nir_op_uge:
627    case nir_op_ult:
628       span = eval_const_binop(nir_op_isub, bit_size, limit, initial,
629                               execution_mode);
630       iter = eval_const_binop(nir_op_udiv, bit_size, span, step,
631                               execution_mode);
632       break;
633 
634    case nir_op_fge:
635    case nir_op_flt:
636    case nir_op_feq:
637    case nir_op_fneu:
638       span = eval_const_binop(nir_op_fsub, bit_size, limit, initial,
639                               execution_mode);
640       iter = eval_const_binop(nir_op_fdiv, bit_size, span,
641                               step, execution_mode);
642       iter = eval_const_unop(nir_op_f2i64, bit_size, iter, execution_mode);
643       break;
644 
645    default:
646       return -1;
647    }
648 
649    uint64_t iter_u64 = nir_const_value_as_uint(iter, bit_size);
650    return iter_u64 > INT_MAX ? -1 : (int)iter_u64;
651 }
652 
653 static bool
will_break_on_first_iteration(nir_const_value step,nir_alu_type induction_base_type,unsigned trip_offset,nir_op cond_op,unsigned bit_size,nir_const_value initial,nir_const_value limit,bool limit_rhs,bool invert_cond,unsigned execution_mode)654 will_break_on_first_iteration(nir_const_value step,
655                               nir_alu_type induction_base_type,
656                               unsigned trip_offset,
657                               nir_op cond_op, unsigned bit_size,
658                               nir_const_value initial,
659                               nir_const_value limit,
660                               bool limit_rhs, bool invert_cond,
661                               unsigned execution_mode)
662 {
663    if (trip_offset == 1) {
664       nir_op add_op;
665       switch (induction_base_type) {
666       case nir_type_float:
667          add_op = nir_op_fadd;
668          break;
669       case nir_type_int:
670       case nir_type_uint:
671          add_op = nir_op_iadd;
672          break;
673       default:
674          unreachable("Unhandled induction variable base type!");
675       }
676 
677       initial = eval_const_binop(add_op, bit_size, initial, step,
678                                  execution_mode);
679    }
680 
681    nir_const_value *src[2];
682    src[limit_rhs ? 0 : 1] = &initial;
683    src[limit_rhs ? 1 : 0] = &limit;
684 
685    /* Evaluate the loop exit condition */
686    nir_const_value result;
687    nir_eval_const_opcode(cond_op, &result, 1, bit_size, src, execution_mode);
688 
689    return invert_cond ? !result.b : result.b;
690 }
691 
692 static bool
test_iterations(int32_t iter_int,nir_const_value step,nir_const_value limit,nir_op cond_op,unsigned bit_size,nir_alu_type induction_base_type,nir_const_value initial,bool limit_rhs,bool invert_cond,unsigned execution_mode)693 test_iterations(int32_t iter_int, nir_const_value step,
694                 nir_const_value limit, nir_op cond_op, unsigned bit_size,
695                 nir_alu_type induction_base_type,
696                 nir_const_value initial, bool limit_rhs, bool invert_cond,
697                 unsigned execution_mode)
698 {
699    assert(nir_op_infos[cond_op].num_inputs == 2);
700 
701    nir_const_value iter_src;
702    nir_op mul_op;
703    nir_op add_op;
704    switch (induction_base_type) {
705    case nir_type_float:
706       iter_src = nir_const_value_for_float(iter_int, bit_size);
707       mul_op = nir_op_fmul;
708       add_op = nir_op_fadd;
709       break;
710    case nir_type_int:
711    case nir_type_uint:
712       iter_src = nir_const_value_for_int(iter_int, bit_size);
713       mul_op = nir_op_imul;
714       add_op = nir_op_iadd;
715       break;
716    default:
717       unreachable("Unhandled induction variable base type!");
718    }
719 
720    /* Multiple the iteration count we are testing by the number of times we
721     * step the induction variable each iteration.
722     */
723    nir_const_value mul_result =
724       eval_const_binop(mul_op, bit_size, iter_src, step, execution_mode);
725 
726    /* Add the initial value to the accumulated induction variable total */
727    nir_const_value add_result =
728       eval_const_binop(add_op, bit_size, mul_result, initial, execution_mode);
729 
730    nir_const_value *src[2];
731    src[limit_rhs ? 0 : 1] = &add_result;
732    src[limit_rhs ? 1 : 0] = &limit;
733 
734    /* Evaluate the loop exit condition */
735    nir_const_value result;
736    nir_eval_const_opcode(cond_op, &result, 1, bit_size, src, execution_mode);
737 
738    return invert_cond ? !result.b : result.b;
739 }
740 
741 static int
calculate_iterations(nir_const_value initial,nir_const_value step,nir_const_value limit,nir_alu_instr * alu,nir_ssa_scalar cond,nir_op alu_op,bool limit_rhs,bool invert_cond,unsigned execution_mode)742 calculate_iterations(nir_const_value initial, nir_const_value step,
743                      nir_const_value limit, nir_alu_instr *alu,
744                      nir_ssa_scalar cond, nir_op alu_op, bool limit_rhs,
745                      bool invert_cond, unsigned execution_mode)
746 {
747    /* nir_op_isub should have been lowered away by this point */
748    assert(alu->op != nir_op_isub);
749 
750    /* Make sure the alu type for our induction variable is compatible with the
751     * conditional alus input type. If its not something has gone really wrong.
752     */
753    nir_alu_type induction_base_type =
754       nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type);
755    if (induction_base_type == nir_type_int || induction_base_type == nir_type_uint) {
756       assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_int ||
757              nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_uint);
758    } else {
759       assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[0]) ==
760              induction_base_type);
761    }
762 
763    /* Check for nsupported alu operations */
764    if (alu->op != nir_op_iadd && alu->op != nir_op_fadd)
765       return -1;
766 
767    /* do-while loops can increment the starting value before the condition is
768     * checked. e.g.
769     *
770     *    do {
771     *        ndx++;
772     *     } while (ndx < 3);
773     *
774     * Here we check if the induction variable is used directly by the loop
775     * condition and if so we assume we need to step the initial value.
776     */
777    unsigned trip_offset = 0;
778    nir_alu_instr *cond_alu = nir_instr_as_alu(cond.def->parent_instr);
779    if (cond_alu->src[0].src.ssa == &alu->dest.dest.ssa ||
780        cond_alu->src[1].src.ssa == &alu->dest.dest.ssa) {
781       trip_offset = 1;
782    }
783 
784    assert(nir_src_bit_size(alu->src[0].src) ==
785           nir_src_bit_size(alu->src[1].src));
786    unsigned bit_size = nir_src_bit_size(alu->src[0].src);
787 
788    /* get_iteration works under assumption that iterator will be
789     * incremented or decremented until it hits the limit,
790     * however if the loop condition is false on the first iteration
791     * get_iteration's assumption is broken. Handle such loops first.
792     */
793    if (will_break_on_first_iteration(step, induction_base_type, trip_offset,
794                                      alu_op, bit_size, initial,
795                                      limit, limit_rhs, invert_cond,
796                                      execution_mode)) {
797       return 0;
798    }
799 
800    int iter_int = get_iteration(alu_op, initial, step, limit, bit_size,
801                                 execution_mode);
802 
803    /* If iter_int is negative the loop is ill-formed or is the conditional is
804     * unsigned with a huge iteration count so don't bother going any further.
805     */
806    if (iter_int < 0)
807       return -1;
808 
809    /* An explanation from the GLSL unrolling pass:
810     *
811     * Make sure that the calculated number of iterations satisfies the exit
812     * condition.  This is needed to catch off-by-one errors and some types of
813     * ill-formed loops.  For example, we need to detect that the following
814     * loop does not have a maximum iteration count.
815     *
816     *    for (float x = 0.0; x != 0.9; x += 0.2);
817     */
818    for (int bias = -1; bias <= 1; bias++) {
819       const int iter_bias = iter_int + bias;
820 
821       if (test_iterations(iter_bias, step, limit, alu_op, bit_size,
822                           induction_base_type, initial,
823                           limit_rhs, invert_cond, execution_mode)) {
824          return iter_bias > 0 ? iter_bias - trip_offset : iter_bias;
825       }
826    }
827 
828    return -1;
829 }
830 
831 static nir_op
inverse_comparison(nir_op alu_op)832 inverse_comparison(nir_op alu_op)
833 {
834    switch (alu_op) {
835    case nir_op_fge:
836       return nir_op_flt;
837    case nir_op_ige:
838       return nir_op_ilt;
839    case nir_op_uge:
840       return nir_op_ult;
841    case nir_op_flt:
842       return nir_op_fge;
843    case nir_op_ilt:
844       return nir_op_ige;
845    case nir_op_ult:
846       return nir_op_uge;
847    case nir_op_feq:
848       return nir_op_fneu;
849    case nir_op_ieq:
850       return nir_op_ine;
851    case nir_op_fneu:
852       return nir_op_feq;
853    case nir_op_ine:
854       return nir_op_ieq;
855    default:
856       unreachable("Unsuported comparison!");
857    }
858 }
859 
860 static bool
is_supported_terminator_condition(nir_ssa_scalar cond)861 is_supported_terminator_condition(nir_ssa_scalar cond)
862 {
863    if (!nir_ssa_scalar_is_alu(cond))
864       return false;
865 
866    nir_alu_instr *alu = nir_instr_as_alu(cond.def->parent_instr);
867    return nir_alu_instr_is_comparison(alu) &&
868           nir_op_infos[alu->op].num_inputs == 2;
869 }
870 
871 static bool
get_induction_and_limit_vars(nir_ssa_scalar cond,nir_ssa_scalar * ind,nir_ssa_scalar * limit,bool * limit_rhs,loop_info_state * state)872 get_induction_and_limit_vars(nir_ssa_scalar cond,
873                              nir_ssa_scalar *ind,
874                              nir_ssa_scalar *limit,
875                              bool *limit_rhs,
876                              loop_info_state *state)
877 {
878    nir_ssa_scalar rhs, lhs;
879    lhs = nir_ssa_scalar_chase_alu_src(cond, 0);
880    rhs = nir_ssa_scalar_chase_alu_src(cond, 1);
881 
882    if (get_loop_var(lhs.def, state)->type == basic_induction) {
883       *ind = lhs;
884       *limit = rhs;
885       *limit_rhs = true;
886       return true;
887    } else if (get_loop_var(rhs.def, state)->type == basic_induction) {
888       *ind = rhs;
889       *limit = lhs;
890       *limit_rhs = false;
891       return true;
892    } else {
893       return false;
894    }
895 }
896 
897 static bool
try_find_trip_count_vars_in_iand(nir_ssa_scalar * cond,nir_ssa_scalar * ind,nir_ssa_scalar * limit,bool * limit_rhs,loop_info_state * state)898 try_find_trip_count_vars_in_iand(nir_ssa_scalar *cond,
899                                  nir_ssa_scalar *ind,
900                                  nir_ssa_scalar *limit,
901                                  bool *limit_rhs,
902                                  loop_info_state *state)
903 {
904    const nir_op alu_op = nir_ssa_scalar_alu_op(*cond);
905    assert(alu_op == nir_op_ieq || alu_op == nir_op_inot);
906 
907    nir_ssa_scalar iand = nir_ssa_scalar_chase_alu_src(*cond, 0);
908 
909    if (alu_op == nir_op_ieq) {
910       nir_ssa_scalar zero = nir_ssa_scalar_chase_alu_src(*cond, 1);
911 
912       if (!nir_ssa_scalar_is_alu(iand) || !nir_ssa_scalar_is_const(zero)) {
913          /* Maybe we had it the wrong way, flip things around */
914          nir_ssa_scalar tmp = zero;
915          zero = iand;
916          iand = tmp;
917 
918          /* If we still didn't find what we need then return */
919          if (!nir_ssa_scalar_is_const(zero))
920             return false;
921       }
922 
923       /* If the loop is not breaking on (x && y) == 0 then return */
924       if (nir_ssa_scalar_as_uint(zero) != 0)
925          return false;
926    }
927 
928    if (!nir_ssa_scalar_is_alu(iand))
929       return false;
930 
931    if (nir_ssa_scalar_alu_op(iand) != nir_op_iand)
932       return false;
933 
934    /* Check if iand src is a terminator condition and try get induction var
935     * and trip limit var.
936     */
937    bool found_induction_var = false;
938    for (unsigned i = 0; i < 2; i++) {
939       nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(iand, i);
940       if (is_supported_terminator_condition(src) &&
941           get_induction_and_limit_vars(src, ind, limit, limit_rhs, state)) {
942          *cond = src;
943          found_induction_var = true;
944 
945          /* If we've found one with a constant limit, stop. */
946          if (nir_ssa_scalar_is_const(*limit))
947             return true;
948       }
949    }
950 
951    return found_induction_var;
952 }
953 
954 /* Run through each of the terminators of the loop and try to infer a possible
955  * trip-count. We need to check them all, and set the lowest trip-count as the
956  * trip-count of our loop. If one of the terminators has an undecidable
957  * trip-count we can not safely assume anything about the duration of the
958  * loop.
959  */
960 static void
find_trip_count(loop_info_state * state,unsigned execution_mode)961 find_trip_count(loop_info_state *state, unsigned execution_mode)
962 {
963    bool trip_count_known = true;
964    bool guessed_trip_count = false;
965    nir_loop_terminator *limiting_terminator = NULL;
966    int max_trip_count = -1;
967 
968    list_for_each_entry(nir_loop_terminator, terminator,
969                        &state->loop->info->loop_terminator_list,
970                        loop_terminator_link) {
971       assert(terminator->nif->condition.is_ssa);
972       nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
973 
974       if (!nir_ssa_scalar_is_alu(cond)) {
975          /* If we get here the loop is dead and will get cleaned up by the
976           * nir_opt_dead_cf pass.
977           */
978          trip_count_known = false;
979          continue;
980       }
981 
982       nir_op alu_op = nir_ssa_scalar_alu_op(cond);
983 
984       bool limit_rhs;
985       nir_ssa_scalar basic_ind = { NULL, 0 };
986       nir_ssa_scalar limit;
987       if ((alu_op == nir_op_inot || alu_op == nir_op_ieq) &&
988           try_find_trip_count_vars_in_iand(&cond, &basic_ind, &limit,
989                                            &limit_rhs, state)) {
990 
991          /* The loop is exiting on (x && y) == 0 so we need to get the
992           * inverse of x or y (i.e. which ever contained the induction var) in
993           * order to compute the trip count.
994           */
995          alu_op = inverse_comparison(nir_ssa_scalar_alu_op(cond));
996          trip_count_known = false;
997          terminator->exact_trip_count_unknown = true;
998       }
999 
1000       if (!basic_ind.def) {
1001          if (is_supported_terminator_condition(cond)) {
1002             get_induction_and_limit_vars(cond, &basic_ind,
1003                                          &limit, &limit_rhs, state);
1004          }
1005       }
1006 
1007       /* The comparison has to have a basic induction variable for us to be
1008        * able to find trip counts.
1009        */
1010       if (!basic_ind.def) {
1011          trip_count_known = false;
1012          continue;
1013       }
1014 
1015       terminator->induction_rhs = !limit_rhs;
1016 
1017       /* Attempt to find a constant limit for the loop */
1018       nir_const_value limit_val;
1019       if (nir_ssa_scalar_is_const(limit)) {
1020          limit_val = nir_ssa_scalar_as_const_value(limit);
1021       } else {
1022          trip_count_known = false;
1023 
1024          if (!try_find_limit_of_alu(limit, &limit_val, terminator, state)) {
1025             /* Guess loop limit based on array access */
1026             if (!guess_loop_limit(state, &limit_val, basic_ind)) {
1027                continue;
1028             }
1029 
1030             guessed_trip_count = true;
1031          }
1032       }
1033 
1034       /* We have determined that we have the following constants:
1035        * (With the typical int i = 0; i < x; i++; as an example)
1036        *    - Upper limit.
1037        *    - Starting value
1038        *    - Step / iteration size
1039        * Thats all thats needed to calculate the trip-count
1040        */
1041 
1042       nir_basic_induction_var *ind_var =
1043          get_loop_var(basic_ind.def, state)->ind;
1044 
1045       /* The basic induction var might be a vector but, because we guarantee
1046        * earlier that the phi source has a scalar swizzle, we can take the
1047        * component from basic_ind.
1048        */
1049       nir_ssa_scalar initial_s = { ind_var->def_outside_loop, basic_ind.comp };
1050       nir_ssa_scalar alu_s = { &ind_var->alu->dest.dest.ssa, basic_ind.comp };
1051 
1052       nir_const_value initial_val = nir_ssa_scalar_as_const_value(initial_s);
1053 
1054       /* We are guaranteed by earlier code that at least one of these sources
1055        * is a constant but we don't know which.
1056        */
1057       nir_const_value step_val;
1058       memset(&step_val, 0, sizeof(step_val));
1059       UNUSED bool found_step_value = false;
1060       assert(nir_op_infos[ind_var->alu->op].num_inputs == 2);
1061       for (unsigned i = 0; i < 2; i++) {
1062          nir_ssa_scalar alu_src = nir_ssa_scalar_chase_alu_src(alu_s, i);
1063          if (nir_ssa_scalar_is_const(alu_src)) {
1064             found_step_value = true;
1065             step_val = nir_ssa_scalar_as_const_value(alu_src);
1066             break;
1067          }
1068       }
1069       assert(found_step_value);
1070 
1071       int iterations = calculate_iterations(initial_val, step_val, limit_val,
1072                                             ind_var->alu, cond,
1073                                             alu_op, limit_rhs,
1074                                             terminator->continue_from_then,
1075                                             execution_mode);
1076 
1077       /* Where we not able to calculate the iteration count */
1078       if (iterations == -1) {
1079          trip_count_known = false;
1080          guessed_trip_count = false;
1081          continue;
1082       }
1083 
1084       if (guessed_trip_count) {
1085          guessed_trip_count = false;
1086          if (state->loop->info->guessed_trip_count == 0 ||
1087              state->loop->info->guessed_trip_count > iterations)
1088             state->loop->info->guessed_trip_count = iterations;
1089 
1090          continue;
1091       }
1092 
1093       /* If this is the first run or we have found a smaller amount of
1094        * iterations than previously (we have identified a more limiting
1095        * terminator) set the trip count and limiting terminator.
1096        */
1097       if (max_trip_count == -1 || iterations < max_trip_count) {
1098          max_trip_count = iterations;
1099          limiting_terminator = terminator;
1100       }
1101    }
1102 
1103    state->loop->info->exact_trip_count_known = trip_count_known;
1104    if (max_trip_count > -1)
1105       state->loop->info->max_trip_count = max_trip_count;
1106    state->loop->info->limiting_terminator = limiting_terminator;
1107 }
1108 
1109 static bool
force_unroll_array_access(loop_info_state * state,nir_deref_instr * deref)1110 force_unroll_array_access(loop_info_state *state, nir_deref_instr *deref)
1111 {
1112    unsigned array_size = find_array_access_via_induction(state, deref, NULL);
1113    if (array_size) {
1114       if ((array_size == state->loop->info->max_trip_count) &&
1115           nir_deref_mode_must_be(deref, nir_var_shader_in |
1116                                         nir_var_shader_out |
1117                                         nir_var_shader_temp |
1118                                         nir_var_function_temp))
1119          return true;
1120 
1121       if (nir_deref_mode_must_be(deref, state->indirect_mask))
1122          return true;
1123    }
1124 
1125    return false;
1126 }
1127 
1128 static bool
force_unroll_heuristics(loop_info_state * state,nir_block * block)1129 force_unroll_heuristics(loop_info_state *state, nir_block *block)
1130 {
1131    nir_foreach_instr(instr, block) {
1132       if (instr->type != nir_instr_type_intrinsic)
1133          continue;
1134 
1135       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1136 
1137       /* Check for arrays variably-indexed by a loop induction variable.
1138        * Unrolling the loop may convert that access into constant-indexing.
1139        */
1140       if (intrin->intrinsic == nir_intrinsic_load_deref ||
1141           intrin->intrinsic == nir_intrinsic_store_deref ||
1142           intrin->intrinsic == nir_intrinsic_copy_deref) {
1143          if (force_unroll_array_access(state,
1144                                        nir_src_as_deref(intrin->src[0])))
1145             return true;
1146 
1147          if (intrin->intrinsic == nir_intrinsic_copy_deref &&
1148              force_unroll_array_access(state,
1149                                        nir_src_as_deref(intrin->src[1])))
1150             return true;
1151       }
1152    }
1153 
1154    return false;
1155 }
1156 
1157 static void
get_loop_info(loop_info_state * state,nir_function_impl * impl)1158 get_loop_info(loop_info_state *state, nir_function_impl *impl)
1159 {
1160    nir_shader *shader = impl->function->shader;
1161    const nir_shader_compiler_options *options = shader->options;
1162 
1163    /* Initialize all variables to "outside_loop". This also marks defs
1164     * invariant and constant if they are nir_instr_type_load_consts
1165     */
1166    nir_foreach_block(block, impl) {
1167       nir_foreach_instr(instr, block)
1168          nir_foreach_ssa_def(instr, initialize_ssa_def, state);
1169    }
1170 
1171    /* Add all entries in the outermost part of the loop to the processing list
1172     * Mark the entries in conditionals or in nested loops accordingly
1173     */
1174    foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
1175       switch (node->type) {
1176 
1177       case nir_cf_node_block:
1178          init_loop_block(nir_cf_node_as_block(node), state,
1179                          false, false, options);
1180          break;
1181 
1182       case nir_cf_node_if:
1183          nir_foreach_block_in_cf_node(block, node)
1184             init_loop_block(block, state, true, false, options);
1185          break;
1186 
1187       case nir_cf_node_loop:
1188          nir_foreach_block_in_cf_node(block, node) {
1189             init_loop_block(block, state, false, true, options);
1190          }
1191          break;
1192 
1193       case nir_cf_node_function:
1194          break;
1195       }
1196    }
1197 
1198    /* Try to find all simple terminators of the loop. If we can't find any,
1199     * or we find possible terminators that have side effects then bail.
1200     */
1201    if (!find_loop_terminators(state)) {
1202       list_for_each_entry_safe(nir_loop_terminator, terminator,
1203                                &state->loop->info->loop_terminator_list,
1204                                loop_terminator_link) {
1205          list_del(&terminator->loop_terminator_link);
1206          ralloc_free(terminator);
1207       }
1208       return;
1209    }
1210 
1211    /* Induction analysis needs invariance information so get that first */
1212    compute_invariance_information(state);
1213 
1214    /* We have invariance information so try to find induction variables */
1215    if (!compute_induction_information(state))
1216       return;
1217 
1218    /* Run through each of the terminators and try to compute a trip-count */
1219    find_trip_count(state, impl->function->shader->info.float_controls_execution_mode);
1220 
1221    nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
1222       if (force_unroll_heuristics(state, block)) {
1223          state->loop->info->force_unroll = true;
1224          break;
1225       }
1226    }
1227 }
1228 
1229 static loop_info_state *
initialize_loop_info_state(nir_loop * loop,void * mem_ctx,nir_function_impl * impl)1230 initialize_loop_info_state(nir_loop *loop, void *mem_ctx,
1231                            nir_function_impl *impl)
1232 {
1233    loop_info_state *state = rzalloc(mem_ctx, loop_info_state);
1234    state->loop_vars = rzalloc_array(mem_ctx, nir_loop_variable,
1235                                     impl->ssa_alloc);
1236    state->loop = loop;
1237 
1238    list_inithead(&state->process_list);
1239 
1240    if (loop->info)
1241      ralloc_free(loop->info);
1242 
1243    loop->info = rzalloc(loop, nir_loop_info);
1244 
1245    list_inithead(&loop->info->loop_terminator_list);
1246 
1247    return state;
1248 }
1249 
1250 static void
process_loops(nir_cf_node * cf_node,nir_variable_mode indirect_mask)1251 process_loops(nir_cf_node *cf_node, nir_variable_mode indirect_mask)
1252 {
1253    switch (cf_node->type) {
1254    case nir_cf_node_block:
1255       return;
1256    case nir_cf_node_if: {
1257       nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1258       foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->then_list)
1259          process_loops(nested_node, indirect_mask);
1260       foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->else_list)
1261          process_loops(nested_node, indirect_mask);
1262       return;
1263    }
1264    case nir_cf_node_loop: {
1265       nir_loop *loop = nir_cf_node_as_loop(cf_node);
1266       foreach_list_typed(nir_cf_node, nested_node, node, &loop->body)
1267          process_loops(nested_node, indirect_mask);
1268       break;
1269    }
1270    default:
1271       unreachable("unknown cf node type");
1272    }
1273 
1274    nir_loop *loop = nir_cf_node_as_loop(cf_node);
1275    nir_function_impl *impl = nir_cf_node_get_function(cf_node);
1276    void *mem_ctx = ralloc_context(NULL);
1277 
1278    loop_info_state *state = initialize_loop_info_state(loop, mem_ctx, impl);
1279    state->indirect_mask = indirect_mask;
1280 
1281    get_loop_info(state, impl);
1282 
1283    ralloc_free(mem_ctx);
1284 }
1285 
1286 void
nir_loop_analyze_impl(nir_function_impl * impl,nir_variable_mode indirect_mask)1287 nir_loop_analyze_impl(nir_function_impl *impl,
1288                       nir_variable_mode indirect_mask)
1289 {
1290    nir_index_ssa_defs(impl);
1291    foreach_list_typed(nir_cf_node, node, node, &impl->body)
1292       process_loops(node, indirect_mask);
1293 }
1294