1 /*
2 * Copyright © 2015 Thomas Helland
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_constant_expressions.h"
26 #include "nir_loop_analyze.h"
27 #include "util/bitset.h"
28
29 typedef enum {
30 undefined,
31 invariant,
32 not_invariant,
33 basic_induction
34 } nir_loop_variable_type;
35
36 typedef struct nir_basic_induction_var {
37 nir_alu_instr *alu; /* The def of the alu-operation */
38 nir_ssa_def *def_outside_loop; /* The phi-src outside the loop */
39 } nir_basic_induction_var;
40
41 typedef struct {
42 /* A link for the work list */
43 struct list_head process_link;
44
45 bool in_loop;
46
47 /* The ssa_def associated with this info */
48 nir_ssa_def *def;
49
50 /* The type of this ssa_def */
51 nir_loop_variable_type type;
52
53 /* If this is of type basic_induction */
54 struct nir_basic_induction_var *ind;
55
56 /* True if variable is in an if branch */
57 bool in_if_branch;
58
59 /* True if variable is in a nested loop */
60 bool in_nested_loop;
61
62 /* Could be a basic_induction if following uniforms are inlined */
63 nir_src *init_src;
64 nir_alu_src *update_src;
65 } nir_loop_variable;
66
67 typedef struct {
68 /* The loop we store information for */
69 nir_loop *loop;
70
71 /* Loop_variable for all ssa_defs in function */
72 nir_loop_variable *loop_vars;
73 BITSET_WORD *loop_vars_init;
74
75 /* A list of the loop_vars to analyze */
76 struct list_head process_list;
77
78 nir_variable_mode indirect_mask;
79
80 bool force_unroll_sampler_indirect;
81 } loop_info_state;
82
83 static nir_loop_variable *
get_loop_var(nir_ssa_def * value,loop_info_state * state)84 get_loop_var(nir_ssa_def *value, loop_info_state *state)
85 {
86 nir_loop_variable *var = &(state->loop_vars[value->index]);
87
88 if (!BITSET_TEST(state->loop_vars_init, value->index)) {
89 var->in_loop = false;
90 var->def = value;
91 var->in_if_branch = false;
92 var->in_nested_loop = false;
93 var->init_src = NULL;
94 var->update_src = NULL;
95 if (value->parent_instr->type == nir_instr_type_load_const)
96 var->type = invariant;
97 else
98 var->type = undefined;
99
100 BITSET_SET(state->loop_vars_init, value->index);
101 }
102
103 return var;
104 }
105
106 typedef struct {
107 loop_info_state *state;
108 bool in_if_branch;
109 bool in_nested_loop;
110 } init_loop_state;
111
112 static bool
init_loop_def(nir_ssa_def * def,void * void_init_loop_state)113 init_loop_def(nir_ssa_def *def, void *void_init_loop_state)
114 {
115 init_loop_state *loop_init_state = void_init_loop_state;
116 nir_loop_variable *var = get_loop_var(def, loop_init_state->state);
117
118 if (loop_init_state->in_nested_loop) {
119 var->in_nested_loop = true;
120 } else if (loop_init_state->in_if_branch) {
121 var->in_if_branch = true;
122 } else {
123 /* Add to the tail of the list. That way we start at the beginning of
124 * the defs in the loop instead of the end when walking the list. This
125 * means less recursive calls. Only add defs that are not in nested
126 * loops or conditional blocks.
127 */
128 list_addtail(&var->process_link, &loop_init_state->state->process_list);
129 }
130
131 var->in_loop = true;
132
133 return true;
134 }
135
136 /** Calculate an estimated cost in number of instructions
137 *
138 * We do this so that we don't unroll loops which will later get massively
139 * inflated due to int64 or fp64 lowering. The estimates provided here don't
140 * have to be massively accurate; they just have to be good enough that loop
141 * unrolling doesn't cause things to blow up too much.
142 */
143 static unsigned
instr_cost(nir_instr * instr,const nir_shader_compiler_options * options)144 instr_cost(nir_instr *instr, const nir_shader_compiler_options *options)
145 {
146 if (instr->type == nir_instr_type_intrinsic ||
147 instr->type == nir_instr_type_tex)
148 return 1;
149
150 if (instr->type != nir_instr_type_alu)
151 return 0;
152
153 nir_alu_instr *alu = nir_instr_as_alu(instr);
154 const nir_op_info *info = &nir_op_infos[alu->op];
155 unsigned cost = 1;
156
157 if (alu->op == nir_op_flrp) {
158 if ((options->lower_flrp16 && nir_dest_bit_size(alu->dest.dest) == 16) ||
159 (options->lower_flrp32 && nir_dest_bit_size(alu->dest.dest) == 32) ||
160 (options->lower_flrp64 && nir_dest_bit_size(alu->dest.dest) == 64))
161 cost *= 3;
162 }
163
164 /* Assume everything 16 or 32-bit is cheap.
165 *
166 * There are no 64-bit ops that don't have a 64-bit thing as their
167 * destination or first source.
168 */
169 if (nir_dest_bit_size(alu->dest.dest) < 64 &&
170 nir_src_bit_size(alu->src[0].src) < 64)
171 return cost;
172
173 bool is_fp64 = nir_dest_bit_size(alu->dest.dest) == 64 &&
174 nir_alu_type_get_base_type(info->output_type) == nir_type_float;
175 for (unsigned i = 0; i < info->num_inputs; i++) {
176 if (nir_src_bit_size(alu->src[i].src) == 64 &&
177 nir_alu_type_get_base_type(info->input_types[i]) == nir_type_float)
178 is_fp64 = true;
179 }
180
181 if (is_fp64) {
182 /* If it's something lowered normally, it's expensive. */
183 if (options->lower_doubles_options &
184 nir_lower_doubles_op_to_options_mask(alu->op))
185 cost *= 20;
186
187 /* If it's full software, it's even more expensive */
188 if (options->lower_doubles_options & nir_lower_fp64_full_software)
189 cost *= 100;
190
191 return cost;
192 } else {
193 if (options->lower_int64_options &
194 nir_lower_int64_op_to_options_mask(alu->op)) {
195 /* These require a doing the division algorithm. */
196 if (alu->op == nir_op_idiv || alu->op == nir_op_udiv ||
197 alu->op == nir_op_imod || alu->op == nir_op_umod ||
198 alu->op == nir_op_irem)
199 return cost * 100;
200
201 /* Other int64 lowering isn't usually all that expensive */
202 return cost * 5;
203 }
204
205 return cost;
206 }
207 }
208
209 static bool
init_loop_block(nir_block * block,loop_info_state * state,bool in_if_branch,bool in_nested_loop,const nir_shader_compiler_options * options)210 init_loop_block(nir_block *block, loop_info_state *state,
211 bool in_if_branch, bool in_nested_loop,
212 const nir_shader_compiler_options *options)
213 {
214 init_loop_state init_state = {.in_if_branch = in_if_branch,
215 .in_nested_loop = in_nested_loop,
216 .state = state };
217
218 nir_foreach_instr(instr, block) {
219 state->loop->info->instr_cost += instr_cost(instr, options);
220 nir_foreach_ssa_def(instr, init_loop_def, &init_state);
221 }
222
223 return true;
224 }
225
226 static inline bool
is_var_alu(nir_loop_variable * var)227 is_var_alu(nir_loop_variable *var)
228 {
229 return var->def->parent_instr->type == nir_instr_type_alu;
230 }
231
232 static inline bool
is_var_phi(nir_loop_variable * var)233 is_var_phi(nir_loop_variable *var)
234 {
235 return var->def->parent_instr->type == nir_instr_type_phi;
236 }
237
238 static inline bool
mark_invariant(nir_ssa_def * def,loop_info_state * state)239 mark_invariant(nir_ssa_def *def, loop_info_state *state)
240 {
241 nir_loop_variable *var = get_loop_var(def, state);
242
243 if (var->type == invariant)
244 return true;
245
246 if (!var->in_loop) {
247 var->type = invariant;
248 return true;
249 }
250
251 if (var->type == not_invariant)
252 return false;
253
254 if (is_var_alu(var)) {
255 nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
256
257 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
258 if (!mark_invariant(alu->src[i].src.ssa, state)) {
259 var->type = not_invariant;
260 return false;
261 }
262 }
263 var->type = invariant;
264 return true;
265 }
266
267 /* Phis shouldn't be invariant except if one operand is invariant, and the
268 * other is the phi itself. These should be removed by opt_remove_phis.
269 * load_consts are already set to invariant and constant during init,
270 * and so should return earlier. Remaining op_codes are set undefined.
271 */
272 var->type = not_invariant;
273 return false;
274 }
275
276 static void
compute_invariance_information(loop_info_state * state)277 compute_invariance_information(loop_info_state *state)
278 {
279 /* An expression is invariant in a loop L if:
280 * (base cases)
281 * – it’s a constant
282 * – it’s a variable use, all of whose single defs are outside of L
283 * (inductive cases)
284 * – it’s a pure computation all of whose args are loop invariant
285 * – it’s a variable use whose single reaching def, and the
286 * rhs of that def is loop-invariant
287 */
288 list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
289 process_link) {
290 assert(!var->in_if_branch && !var->in_nested_loop);
291
292 if (mark_invariant(var->def, state))
293 list_del(&var->process_link);
294 }
295 }
296
297 /* If all of the instruction sources point to identical ALU instructions (as
298 * per nir_instrs_equal), return one of the ALU instructions. Otherwise,
299 * return NULL.
300 */
301 static nir_alu_instr *
phi_instr_as_alu(nir_phi_instr * phi)302 phi_instr_as_alu(nir_phi_instr *phi)
303 {
304 nir_alu_instr *first = NULL;
305 nir_foreach_phi_src(src, phi) {
306 assert(src->src.is_ssa);
307 if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
308 return NULL;
309
310 nir_alu_instr *alu = nir_instr_as_alu(src->src.ssa->parent_instr);
311 if (first == NULL) {
312 first = alu;
313 } else {
314 if (!nir_instrs_equal(&first->instr, &alu->instr))
315 return NULL;
316 }
317 }
318
319 return first;
320 }
321
322 static bool
alu_src_has_identity_swizzle(nir_alu_instr * alu,unsigned src_idx)323 alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
324 {
325 assert(nir_op_infos[alu->op].input_sizes[src_idx] == 0);
326 assert(alu->dest.dest.is_ssa);
327 for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
328 if (alu->src[src_idx].swizzle[i] != i)
329 return false;
330 }
331
332 return true;
333 }
334
335 static bool
is_only_uniform_src(nir_src * src)336 is_only_uniform_src(nir_src *src)
337 {
338 if (!src->is_ssa)
339 return false;
340
341 nir_instr *instr = src->ssa->parent_instr;
342
343 switch (instr->type) {
344 case nir_instr_type_alu: {
345 /* Return true if all sources return true. */
346 nir_alu_instr *alu = nir_instr_as_alu(instr);
347 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
348 if (!is_only_uniform_src(&alu->src[i].src))
349 return false;
350 }
351 return true;
352 }
353
354 case nir_instr_type_intrinsic: {
355 nir_intrinsic_instr *inst = nir_instr_as_intrinsic(instr);
356 /* current uniform inline only support load ubo */
357 return inst->intrinsic == nir_intrinsic_load_ubo;
358 }
359
360 case nir_instr_type_load_const:
361 /* Always return true for constants. */
362 return true;
363
364 default:
365 return false;
366 }
367 }
368
369 static bool
compute_induction_information(loop_info_state * state)370 compute_induction_information(loop_info_state *state)
371 {
372 bool found_induction_var = false;
373 unsigned num_induction_vars = 0;
374
375 list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
376 process_link) {
377
378 /* It can't be an induction variable if it is invariant. Invariants and
379 * things in nested loops or conditionals should have been removed from
380 * the list by compute_invariance_information().
381 */
382 assert(!var->in_if_branch && !var->in_nested_loop &&
383 var->type != invariant);
384
385 /* We are only interested in checking phis for the basic induction
386 * variable case as its simple to detect. All basic induction variables
387 * have a phi node
388 */
389 if (!is_var_phi(var))
390 continue;
391
392 nir_phi_instr *phi = nir_instr_as_phi(var->def->parent_instr);
393 nir_basic_induction_var *biv = rzalloc(state, nir_basic_induction_var);
394
395 nir_src *init_src = NULL;
396 nir_loop_variable *alu_src_var = NULL;
397 nir_foreach_phi_src(src, phi) {
398 nir_loop_variable *src_var = get_loop_var(src->src.ssa, state);
399
400 /* If one of the sources is in an if branch or nested loop then don't
401 * attempt to go any further.
402 */
403 if (src_var->in_if_branch || src_var->in_nested_loop)
404 break;
405
406 /* Detect inductions variables that are incremented in both branches
407 * of an unnested if rather than in a loop block.
408 */
409 if (is_var_phi(src_var)) {
410 nir_phi_instr *src_phi =
411 nir_instr_as_phi(src_var->def->parent_instr);
412 nir_alu_instr *src_phi_alu = phi_instr_as_alu(src_phi);
413 if (src_phi_alu) {
414 src_var = get_loop_var(&src_phi_alu->dest.dest.ssa, state);
415 if (!src_var->in_if_branch)
416 break;
417 }
418 }
419
420 if (!src_var->in_loop && !biv->def_outside_loop) {
421 biv->def_outside_loop = src_var->def;
422 init_src = &src->src;
423 } else if (is_var_alu(src_var) && !biv->alu) {
424 alu_src_var = src_var;
425 nir_alu_instr *alu = nir_instr_as_alu(src_var->def->parent_instr);
426
427 /* Check for unsupported alu operations */
428 if (alu->op != nir_op_iadd && alu->op != nir_op_fadd)
429 break;
430
431 if (nir_op_infos[alu->op].num_inputs == 2) {
432 for (unsigned i = 0; i < 2; i++) {
433 /* Is one of the operands const or uniform, and the other the phi.
434 * The phi source can't be swizzled in any way.
435 */
436 if (alu->src[1-i].src.ssa == &phi->dest.ssa &&
437 alu_src_has_identity_swizzle(alu, 1 - i)) {
438 nir_src *src = &alu->src[i].src;
439 if (nir_src_is_const(*src))
440 biv->alu = alu;
441 else if (is_only_uniform_src(src)) {
442 /* Update value of induction variable is a statement
443 * contains only uniform and constant
444 */
445 var->update_src = alu->src + i;
446 biv->alu = alu;
447 }
448 }
449 }
450 }
451
452 if (!biv->alu)
453 break;
454 } else {
455 biv->alu = NULL;
456 break;
457 }
458 }
459
460 if (biv->alu && biv->def_outside_loop) {
461 nir_instr *inst = biv->def_outside_loop->parent_instr;
462 if (inst->type == nir_instr_type_load_const) {
463 /* Initial value of induction variable is a constant */
464 if (var->update_src) {
465 alu_src_var->update_src = var->update_src;
466 ralloc_free(biv);
467 } else {
468 alu_src_var->type = basic_induction;
469 alu_src_var->ind = biv;
470 var->type = basic_induction;
471 var->ind = biv;
472
473 found_induction_var = true;
474 }
475 num_induction_vars += 2;
476 } else if (is_only_uniform_src(init_src)) {
477 /* Initial value of induction variable is a uniform */
478 var->init_src = init_src;
479
480 alu_src_var->init_src = var->init_src;
481 alu_src_var->update_src = var->update_src;
482
483 num_induction_vars += 2;
484 ralloc_free(biv);
485 } else {
486 var->update_src = NULL;
487 ralloc_free(biv);
488 }
489 } else {
490 var->update_src = NULL;
491 ralloc_free(biv);
492 }
493 }
494
495 nir_loop_info *info = state->loop->info;
496 ralloc_free(info->induction_vars);
497 info->num_induction_vars = 0;
498
499 /* record induction variables into nir_loop_info */
500 if (num_induction_vars) {
501 info->induction_vars = ralloc_array(info, nir_loop_induction_variable,
502 num_induction_vars);
503
504 list_for_each_entry(nir_loop_variable, var, &state->process_list,
505 process_link) {
506 if (var->type == basic_induction || var->init_src || var->update_src) {
507 nir_loop_induction_variable *ivar =
508 &info->induction_vars[info->num_induction_vars++];
509 ivar->def = var->def;
510 ivar->init_src = var->init_src;
511 ivar->update_src = var->update_src;
512 }
513 }
514 /* don't overflow */
515 assert(info->num_induction_vars <= num_induction_vars);
516 }
517
518 return found_induction_var;
519 }
520
521 static bool
find_loop_terminators(loop_info_state * state)522 find_loop_terminators(loop_info_state *state)
523 {
524 bool success = false;
525 foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
526 if (node->type == nir_cf_node_if) {
527 nir_if *nif = nir_cf_node_as_if(node);
528
529 nir_block *break_blk = NULL;
530 nir_block *continue_from_blk = NULL;
531 bool continue_from_then = true;
532
533 nir_block *last_then = nir_if_last_then_block(nif);
534 nir_block *last_else = nir_if_last_else_block(nif);
535 if (nir_block_ends_in_break(last_then)) {
536 break_blk = last_then;
537 continue_from_blk = last_else;
538 continue_from_then = false;
539 } else if (nir_block_ends_in_break(last_else)) {
540 break_blk = last_else;
541 continue_from_blk = last_then;
542 }
543
544 /* If there is a break then we should find a terminator. If we can
545 * not find a loop terminator, but there is a break-statement then
546 * we should return false so that we do not try to find trip-count
547 */
548 if (!nir_is_trivial_loop_if(nif, break_blk)) {
549 state->loop->info->complex_loop = true;
550 return false;
551 }
552
553 /* Continue if the if contained no jumps at all */
554 if (!break_blk)
555 continue;
556
557 if (nif->condition.ssa->parent_instr->type == nir_instr_type_phi) {
558 state->loop->info->complex_loop = true;
559 return false;
560 }
561
562 nir_loop_terminator *terminator =
563 rzalloc(state->loop->info, nir_loop_terminator);
564
565 list_addtail(&terminator->loop_terminator_link,
566 &state->loop->info->loop_terminator_list);
567
568 terminator->nif = nif;
569 terminator->break_block = break_blk;
570 terminator->continue_from_block = continue_from_blk;
571 terminator->continue_from_then = continue_from_then;
572 terminator->conditional_instr = nif->condition.ssa->parent_instr;
573
574 success = true;
575 }
576 }
577
578 return success;
579 }
580
581 /* This function looks for an array access within a loop that uses an
582 * induction variable for the array index. If found it returns the size of the
583 * array, otherwise 0 is returned. If we find an induction var we pass it back
584 * to the caller via array_index_out.
585 */
586 static unsigned
find_array_access_via_induction(loop_info_state * state,nir_deref_instr * deref,nir_loop_variable ** array_index_out)587 find_array_access_via_induction(loop_info_state *state,
588 nir_deref_instr *deref,
589 nir_loop_variable **array_index_out)
590 {
591 for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
592 if (d->deref_type != nir_deref_type_array)
593 continue;
594
595 assert(d->arr.index.is_ssa);
596 nir_loop_variable *array_index = get_loop_var(d->arr.index.ssa, state);
597
598 if (array_index->type != basic_induction)
599 continue;
600
601 if (array_index_out)
602 *array_index_out = array_index;
603
604 nir_deref_instr *parent = nir_deref_instr_parent(d);
605
606 if (glsl_type_is_array_or_matrix(parent->type)) {
607 return glsl_get_length(parent->type);
608 } else {
609 assert(glsl_type_is_vector(parent->type));
610 return glsl_get_vector_elements(parent->type);
611 }
612 }
613
614 return 0;
615 }
616
617 static bool
guess_loop_limit(loop_info_state * state,nir_const_value * limit_val,nir_ssa_scalar basic_ind)618 guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
619 nir_ssa_scalar basic_ind)
620 {
621 unsigned min_array_size = 0;
622
623 nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
624 nir_foreach_instr(instr, block) {
625 if (instr->type != nir_instr_type_intrinsic)
626 continue;
627
628 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
629
630 /* Check for arrays variably-indexed by a loop induction variable. */
631 if (intrin->intrinsic == nir_intrinsic_load_deref ||
632 intrin->intrinsic == nir_intrinsic_store_deref ||
633 intrin->intrinsic == nir_intrinsic_copy_deref) {
634
635 nir_loop_variable *array_idx = NULL;
636 unsigned array_size =
637 find_array_access_via_induction(state,
638 nir_src_as_deref(intrin->src[0]),
639 &array_idx);
640 if (array_idx && basic_ind.def == array_idx->def &&
641 (min_array_size == 0 || min_array_size > array_size)) {
642 /* Array indices are scalars */
643 assert(basic_ind.def->num_components == 1);
644 min_array_size = array_size;
645 }
646
647 if (intrin->intrinsic != nir_intrinsic_copy_deref)
648 continue;
649
650 array_size =
651 find_array_access_via_induction(state,
652 nir_src_as_deref(intrin->src[1]),
653 &array_idx);
654 if (array_idx && basic_ind.def == array_idx->def &&
655 (min_array_size == 0 || min_array_size > array_size)) {
656 /* Array indices are scalars */
657 assert(basic_ind.def->num_components == 1);
658 min_array_size = array_size;
659 }
660 }
661 }
662 }
663
664 if (min_array_size) {
665 *limit_val = nir_const_value_for_uint(min_array_size,
666 basic_ind.def->bit_size);
667 return true;
668 }
669
670 return false;
671 }
672
673 static bool
try_find_limit_of_alu(nir_ssa_scalar limit,nir_const_value * limit_val,nir_loop_terminator * terminator,loop_info_state * state)674 try_find_limit_of_alu(nir_ssa_scalar limit, nir_const_value *limit_val,
675 nir_loop_terminator *terminator, loop_info_state *state)
676 {
677 if (!nir_ssa_scalar_is_alu(limit))
678 return false;
679
680 nir_op limit_op = nir_ssa_scalar_alu_op(limit);
681 if (limit_op == nir_op_imin || limit_op == nir_op_fmin) {
682 for (unsigned i = 0; i < 2; i++) {
683 nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(limit, i);
684 if (nir_ssa_scalar_is_const(src)) {
685 *limit_val = nir_ssa_scalar_as_const_value(src);
686 terminator->exact_trip_count_unknown = true;
687 return true;
688 }
689 }
690 }
691
692 return false;
693 }
694
695 static nir_const_value
eval_const_unop(nir_op op,unsigned bit_size,nir_const_value src0,unsigned execution_mode)696 eval_const_unop(nir_op op, unsigned bit_size, nir_const_value src0,
697 unsigned execution_mode)
698 {
699 assert(nir_op_infos[op].num_inputs == 1);
700 nir_const_value dest;
701 nir_const_value *src[1] = { &src0 };
702 nir_eval_const_opcode(op, &dest, 1, bit_size, src, execution_mode);
703 return dest;
704 }
705
706 static nir_const_value
eval_const_binop(nir_op op,unsigned bit_size,nir_const_value src0,nir_const_value src1,unsigned execution_mode)707 eval_const_binop(nir_op op, unsigned bit_size,
708 nir_const_value src0, nir_const_value src1,
709 unsigned execution_mode)
710 {
711 assert(nir_op_infos[op].num_inputs == 2);
712 nir_const_value dest;
713 nir_const_value *src[2] = { &src0, &src1 };
714 nir_eval_const_opcode(op, &dest, 1, bit_size, src, execution_mode);
715 return dest;
716 }
717
718 static int32_t
get_iteration(nir_op cond_op,nir_const_value initial,nir_const_value step,nir_const_value limit,unsigned bit_size,unsigned execution_mode)719 get_iteration(nir_op cond_op, nir_const_value initial, nir_const_value step,
720 nir_const_value limit, unsigned bit_size,
721 unsigned execution_mode)
722 {
723 nir_const_value span, iter;
724
725 switch (cond_op) {
726 case nir_op_ige:
727 case nir_op_ilt:
728 case nir_op_ieq:
729 case nir_op_ine:
730 span = eval_const_binop(nir_op_isub, bit_size, limit, initial,
731 execution_mode);
732 iter = eval_const_binop(nir_op_idiv, bit_size, span, step,
733 execution_mode);
734 break;
735
736 case nir_op_uge:
737 case nir_op_ult:
738 span = eval_const_binop(nir_op_isub, bit_size, limit, initial,
739 execution_mode);
740 iter = eval_const_binop(nir_op_udiv, bit_size, span, step,
741 execution_mode);
742 break;
743
744 case nir_op_fge:
745 case nir_op_flt:
746 case nir_op_feq:
747 case nir_op_fneu:
748 span = eval_const_binop(nir_op_fsub, bit_size, limit, initial,
749 execution_mode);
750 iter = eval_const_binop(nir_op_fdiv, bit_size, span,
751 step, execution_mode);
752 iter = eval_const_unop(nir_op_f2i64, bit_size, iter, execution_mode);
753 break;
754
755 default:
756 return -1;
757 }
758
759 uint64_t iter_u64 = nir_const_value_as_uint(iter, bit_size);
760 return iter_u64 > INT_MAX ? -1 : (int)iter_u64;
761 }
762
763 static bool
will_break_on_first_iteration(nir_const_value step,nir_alu_type induction_base_type,unsigned trip_offset,nir_op cond_op,unsigned bit_size,nir_const_value initial,nir_const_value limit,bool limit_rhs,bool invert_cond,unsigned execution_mode)764 will_break_on_first_iteration(nir_const_value step,
765 nir_alu_type induction_base_type,
766 unsigned trip_offset,
767 nir_op cond_op, unsigned bit_size,
768 nir_const_value initial,
769 nir_const_value limit,
770 bool limit_rhs, bool invert_cond,
771 unsigned execution_mode)
772 {
773 if (trip_offset == 1) {
774 nir_op add_op;
775 switch (induction_base_type) {
776 case nir_type_float:
777 add_op = nir_op_fadd;
778 break;
779 case nir_type_int:
780 case nir_type_uint:
781 add_op = nir_op_iadd;
782 break;
783 default:
784 unreachable("Unhandled induction variable base type!");
785 }
786
787 initial = eval_const_binop(add_op, bit_size, initial, step,
788 execution_mode);
789 }
790
791 nir_const_value *src[2];
792 src[limit_rhs ? 0 : 1] = &initial;
793 src[limit_rhs ? 1 : 0] = &limit;
794
795 /* Evaluate the loop exit condition */
796 nir_const_value result;
797 nir_eval_const_opcode(cond_op, &result, 1, bit_size, src, execution_mode);
798
799 return invert_cond ? !result.b : result.b;
800 }
801
802 static bool
test_iterations(int32_t iter_int,nir_const_value step,nir_const_value limit,nir_op cond_op,unsigned bit_size,nir_alu_type induction_base_type,nir_const_value initial,bool limit_rhs,bool invert_cond,unsigned execution_mode)803 test_iterations(int32_t iter_int, nir_const_value step,
804 nir_const_value limit, nir_op cond_op, unsigned bit_size,
805 nir_alu_type induction_base_type,
806 nir_const_value initial, bool limit_rhs, bool invert_cond,
807 unsigned execution_mode)
808 {
809 assert(nir_op_infos[cond_op].num_inputs == 2);
810
811 nir_const_value iter_src;
812 nir_op mul_op;
813 nir_op add_op;
814 switch (induction_base_type) {
815 case nir_type_float:
816 iter_src = nir_const_value_for_float(iter_int, bit_size);
817 mul_op = nir_op_fmul;
818 add_op = nir_op_fadd;
819 break;
820 case nir_type_int:
821 case nir_type_uint:
822 iter_src = nir_const_value_for_int(iter_int, bit_size);
823 mul_op = nir_op_imul;
824 add_op = nir_op_iadd;
825 break;
826 default:
827 unreachable("Unhandled induction variable base type!");
828 }
829
830 /* Multiple the iteration count we are testing by the number of times we
831 * step the induction variable each iteration.
832 */
833 nir_const_value mul_result =
834 eval_const_binop(mul_op, bit_size, iter_src, step, execution_mode);
835
836 /* Add the initial value to the accumulated induction variable total */
837 nir_const_value add_result =
838 eval_const_binop(add_op, bit_size, mul_result, initial, execution_mode);
839
840 nir_const_value *src[2];
841 src[limit_rhs ? 0 : 1] = &add_result;
842 src[limit_rhs ? 1 : 0] = &limit;
843
844 /* Evaluate the loop exit condition */
845 nir_const_value result;
846 nir_eval_const_opcode(cond_op, &result, 1, bit_size, src, execution_mode);
847
848 return invert_cond ? !result.b : result.b;
849 }
850
851 static int
calculate_iterations(nir_const_value initial,nir_const_value step,nir_const_value limit,nir_alu_instr * alu,nir_ssa_scalar cond,nir_op alu_op,bool limit_rhs,bool invert_cond,unsigned execution_mode)852 calculate_iterations(nir_const_value initial, nir_const_value step,
853 nir_const_value limit, nir_alu_instr *alu,
854 nir_ssa_scalar cond, nir_op alu_op, bool limit_rhs,
855 bool invert_cond, unsigned execution_mode)
856 {
857 /* nir_op_isub should have been lowered away by this point */
858 assert(alu->op != nir_op_isub);
859
860 /* Make sure the alu type for our induction variable is compatible with the
861 * conditional alus input type. If its not something has gone really wrong.
862 */
863 nir_alu_type induction_base_type =
864 nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type);
865 if (induction_base_type == nir_type_int || induction_base_type == nir_type_uint) {
866 assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_int ||
867 nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_uint);
868 } else {
869 assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[0]) ==
870 induction_base_type);
871 }
872
873 /* Only variable with these update ops were marked as induction. */
874 assert(alu->op == nir_op_iadd || alu->op == nir_op_fadd);
875
876 /* do-while loops can increment the starting value before the condition is
877 * checked. e.g.
878 *
879 * do {
880 * ndx++;
881 * } while (ndx < 3);
882 *
883 * Here we check if the induction variable is used directly by the loop
884 * condition and if so we assume we need to step the initial value.
885 */
886 unsigned trip_offset = 0;
887 nir_alu_instr *cond_alu = nir_instr_as_alu(cond.def->parent_instr);
888 if (cond_alu->src[0].src.ssa == &alu->dest.dest.ssa ||
889 cond_alu->src[1].src.ssa == &alu->dest.dest.ssa) {
890 trip_offset = 1;
891 }
892
893 assert(nir_src_bit_size(alu->src[0].src) ==
894 nir_src_bit_size(alu->src[1].src));
895 unsigned bit_size = nir_src_bit_size(alu->src[0].src);
896
897 /* get_iteration works under assumption that iterator will be
898 * incremented or decremented until it hits the limit,
899 * however if the loop condition is false on the first iteration
900 * get_iteration's assumption is broken. Handle such loops first.
901 */
902 if (will_break_on_first_iteration(step, induction_base_type, trip_offset,
903 alu_op, bit_size, initial,
904 limit, limit_rhs, invert_cond,
905 execution_mode)) {
906 return 0;
907 }
908
909 int iter_int = get_iteration(alu_op, initial, step, limit, bit_size,
910 execution_mode);
911
912 /* If iter_int is negative the loop is ill-formed or is the conditional is
913 * unsigned with a huge iteration count so don't bother going any further.
914 */
915 if (iter_int < 0)
916 return -1;
917
918 /* An explanation from the GLSL unrolling pass:
919 *
920 * Make sure that the calculated number of iterations satisfies the exit
921 * condition. This is needed to catch off-by-one errors and some types of
922 * ill-formed loops. For example, we need to detect that the following
923 * loop does not have a maximum iteration count.
924 *
925 * for (float x = 0.0; x != 0.9; x += 0.2);
926 */
927 for (int bias = -1; bias <= 1; bias++) {
928 const int iter_bias = iter_int + bias;
929
930 if (test_iterations(iter_bias, step, limit, alu_op, bit_size,
931 induction_base_type, initial,
932 limit_rhs, invert_cond, execution_mode)) {
933 return iter_bias > 0 ? iter_bias - trip_offset : iter_bias;
934 }
935 }
936
937 return -1;
938 }
939
940 static nir_op
inverse_comparison(nir_op alu_op)941 inverse_comparison(nir_op alu_op)
942 {
943 switch (alu_op) {
944 case nir_op_fge:
945 return nir_op_flt;
946 case nir_op_ige:
947 return nir_op_ilt;
948 case nir_op_uge:
949 return nir_op_ult;
950 case nir_op_flt:
951 return nir_op_fge;
952 case nir_op_ilt:
953 return nir_op_ige;
954 case nir_op_ult:
955 return nir_op_uge;
956 case nir_op_feq:
957 return nir_op_fneu;
958 case nir_op_ieq:
959 return nir_op_ine;
960 case nir_op_fneu:
961 return nir_op_feq;
962 case nir_op_ine:
963 return nir_op_ieq;
964 default:
965 unreachable("Unsuported comparison!");
966 }
967 }
968
969 static bool
get_induction_and_limit_vars(nir_ssa_scalar cond,nir_ssa_scalar * ind,nir_ssa_scalar * limit,bool * limit_rhs,loop_info_state * state)970 get_induction_and_limit_vars(nir_ssa_scalar cond,
971 nir_ssa_scalar *ind,
972 nir_ssa_scalar *limit,
973 bool *limit_rhs,
974 loop_info_state *state)
975 {
976 nir_ssa_scalar rhs, lhs;
977 lhs = nir_ssa_scalar_chase_alu_src(cond, 0);
978 rhs = nir_ssa_scalar_chase_alu_src(cond, 1);
979
980 if (get_loop_var(lhs.def, state)->type == basic_induction) {
981 *ind = lhs;
982 *limit = rhs;
983 *limit_rhs = true;
984 return true;
985 } else if (get_loop_var(rhs.def, state)->type == basic_induction) {
986 *ind = rhs;
987 *limit = lhs;
988 *limit_rhs = false;
989 return true;
990 } else {
991 return false;
992 }
993 }
994
995 static bool
try_find_trip_count_vars_in_iand(nir_ssa_scalar * cond,nir_ssa_scalar * ind,nir_ssa_scalar * limit,bool * limit_rhs,loop_info_state * state)996 try_find_trip_count_vars_in_iand(nir_ssa_scalar *cond,
997 nir_ssa_scalar *ind,
998 nir_ssa_scalar *limit,
999 bool *limit_rhs,
1000 loop_info_state *state)
1001 {
1002 const nir_op alu_op = nir_ssa_scalar_alu_op(*cond);
1003 assert(alu_op == nir_op_ieq || alu_op == nir_op_inot);
1004
1005 nir_ssa_scalar iand = nir_ssa_scalar_chase_alu_src(*cond, 0);
1006
1007 if (alu_op == nir_op_ieq) {
1008 nir_ssa_scalar zero = nir_ssa_scalar_chase_alu_src(*cond, 1);
1009
1010 if (!nir_ssa_scalar_is_alu(iand) || !nir_ssa_scalar_is_const(zero)) {
1011 /* Maybe we had it the wrong way, flip things around */
1012 nir_ssa_scalar tmp = zero;
1013 zero = iand;
1014 iand = tmp;
1015
1016 /* If we still didn't find what we need then return */
1017 if (!nir_ssa_scalar_is_const(zero))
1018 return false;
1019 }
1020
1021 /* If the loop is not breaking on (x && y) == 0 then return */
1022 if (nir_ssa_scalar_as_uint(zero) != 0)
1023 return false;
1024 }
1025
1026 if (!nir_ssa_scalar_is_alu(iand))
1027 return false;
1028
1029 if (nir_ssa_scalar_alu_op(iand) != nir_op_iand)
1030 return false;
1031
1032 /* Check if iand src is a terminator condition and try get induction var
1033 * and trip limit var.
1034 */
1035 bool found_induction_var = false;
1036 for (unsigned i = 0; i < 2; i++) {
1037 nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(iand, i);
1038 if (nir_is_supported_terminator_condition(src) &&
1039 get_induction_and_limit_vars(src, ind, limit, limit_rhs, state)) {
1040 *cond = src;
1041 found_induction_var = true;
1042
1043 /* If we've found one with a constant limit, stop. */
1044 if (nir_ssa_scalar_is_const(*limit))
1045 return true;
1046 }
1047 }
1048
1049 return found_induction_var;
1050 }
1051
1052 /* Run through each of the terminators of the loop and try to infer a possible
1053 * trip-count. We need to check them all, and set the lowest trip-count as the
1054 * trip-count of our loop. If one of the terminators has an undecidable
1055 * trip-count we can not safely assume anything about the duration of the
1056 * loop.
1057 */
1058 static void
find_trip_count(loop_info_state * state,unsigned execution_mode)1059 find_trip_count(loop_info_state *state, unsigned execution_mode)
1060 {
1061 bool trip_count_known = true;
1062 bool guessed_trip_count = false;
1063 nir_loop_terminator *limiting_terminator = NULL;
1064 int max_trip_count = -1;
1065
1066 list_for_each_entry(nir_loop_terminator, terminator,
1067 &state->loop->info->loop_terminator_list,
1068 loop_terminator_link) {
1069 assert(terminator->nif->condition.is_ssa);
1070 nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
1071
1072 if (!nir_ssa_scalar_is_alu(cond)) {
1073 /* If we get here the loop is dead and will get cleaned up by the
1074 * nir_opt_dead_cf pass.
1075 */
1076 trip_count_known = false;
1077 terminator->exact_trip_count_unknown = true;
1078 continue;
1079 }
1080
1081 nir_op alu_op = nir_ssa_scalar_alu_op(cond);
1082
1083 bool limit_rhs;
1084 nir_ssa_scalar basic_ind = { NULL, 0 };
1085 nir_ssa_scalar limit;
1086 if ((alu_op == nir_op_inot || alu_op == nir_op_ieq) &&
1087 try_find_trip_count_vars_in_iand(&cond, &basic_ind, &limit,
1088 &limit_rhs, state)) {
1089
1090 /* The loop is exiting on (x && y) == 0 so we need to get the
1091 * inverse of x or y (i.e. which ever contained the induction var) in
1092 * order to compute the trip count.
1093 */
1094 alu_op = inverse_comparison(nir_ssa_scalar_alu_op(cond));
1095 trip_count_known = false;
1096 terminator->exact_trip_count_unknown = true;
1097 }
1098
1099 if (!basic_ind.def) {
1100 if (nir_is_supported_terminator_condition(cond)) {
1101 get_induction_and_limit_vars(cond, &basic_ind,
1102 &limit, &limit_rhs, state);
1103 }
1104 }
1105
1106 /* The comparison has to have a basic induction variable for us to be
1107 * able to find trip counts.
1108 */
1109 if (!basic_ind.def) {
1110 trip_count_known = false;
1111 terminator->exact_trip_count_unknown = true;
1112 continue;
1113 }
1114
1115 terminator->induction_rhs = !limit_rhs;
1116
1117 /* Attempt to find a constant limit for the loop */
1118 nir_const_value limit_val;
1119 if (nir_ssa_scalar_is_const(limit)) {
1120 limit_val = nir_ssa_scalar_as_const_value(limit);
1121 } else {
1122 trip_count_known = false;
1123
1124 if (!try_find_limit_of_alu(limit, &limit_val, terminator, state)) {
1125 /* Guess loop limit based on array access */
1126 if (!guess_loop_limit(state, &limit_val, basic_ind)) {
1127 terminator->exact_trip_count_unknown = true;
1128 continue;
1129 }
1130
1131 guessed_trip_count = true;
1132 }
1133 }
1134
1135 /* We have determined that we have the following constants:
1136 * (With the typical int i = 0; i < x; i++; as an example)
1137 * - Upper limit.
1138 * - Starting value
1139 * - Step / iteration size
1140 * Thats all thats needed to calculate the trip-count
1141 */
1142
1143 nir_basic_induction_var *ind_var =
1144 get_loop_var(basic_ind.def, state)->ind;
1145
1146 /* The basic induction var might be a vector but, because we guarantee
1147 * earlier that the phi source has a scalar swizzle, we can take the
1148 * component from basic_ind.
1149 */
1150 nir_ssa_scalar initial_s = { ind_var->def_outside_loop, basic_ind.comp };
1151 nir_ssa_scalar alu_s = { &ind_var->alu->dest.dest.ssa, basic_ind.comp };
1152
1153 nir_const_value initial_val = nir_ssa_scalar_as_const_value(initial_s);
1154
1155 /* We are guaranteed by earlier code that at least one of these sources
1156 * is a constant but we don't know which.
1157 */
1158 nir_const_value step_val;
1159 memset(&step_val, 0, sizeof(step_val));
1160 UNUSED bool found_step_value = false;
1161 assert(nir_op_infos[ind_var->alu->op].num_inputs == 2);
1162 for (unsigned i = 0; i < 2; i++) {
1163 nir_ssa_scalar alu_src = nir_ssa_scalar_chase_alu_src(alu_s, i);
1164 if (nir_ssa_scalar_is_const(alu_src)) {
1165 found_step_value = true;
1166 step_val = nir_ssa_scalar_as_const_value(alu_src);
1167 break;
1168 }
1169 }
1170 assert(found_step_value);
1171
1172 int iterations = calculate_iterations(initial_val, step_val, limit_val,
1173 ind_var->alu, cond,
1174 alu_op, limit_rhs,
1175 terminator->continue_from_then,
1176 execution_mode);
1177
1178 /* Where we not able to calculate the iteration count */
1179 if (iterations == -1) {
1180 trip_count_known = false;
1181 guessed_trip_count = false;
1182 terminator->exact_trip_count_unknown = true;
1183 continue;
1184 }
1185
1186 if (guessed_trip_count) {
1187 guessed_trip_count = false;
1188 terminator->exact_trip_count_unknown = true;
1189 if (state->loop->info->guessed_trip_count == 0 ||
1190 state->loop->info->guessed_trip_count > iterations)
1191 state->loop->info->guessed_trip_count = iterations;
1192
1193 continue;
1194 }
1195
1196 /* If this is the first run or we have found a smaller amount of
1197 * iterations than previously (we have identified a more limiting
1198 * terminator) set the trip count and limiting terminator.
1199 */
1200 if (max_trip_count == -1 || iterations < max_trip_count) {
1201 max_trip_count = iterations;
1202 limiting_terminator = terminator;
1203 }
1204 }
1205
1206 state->loop->info->exact_trip_count_known = trip_count_known;
1207 if (max_trip_count > -1)
1208 state->loop->info->max_trip_count = max_trip_count;
1209 state->loop->info->limiting_terminator = limiting_terminator;
1210 }
1211
1212 static bool
force_unroll_array_access(loop_info_state * state,nir_deref_instr * deref,bool contains_sampler)1213 force_unroll_array_access(loop_info_state *state, nir_deref_instr *deref,
1214 bool contains_sampler)
1215 {
1216 unsigned array_size = find_array_access_via_induction(state, deref, NULL);
1217 if (array_size) {
1218 if ((array_size == state->loop->info->max_trip_count) &&
1219 nir_deref_mode_must_be(deref, nir_var_shader_in |
1220 nir_var_shader_out |
1221 nir_var_shader_temp |
1222 nir_var_function_temp))
1223 return true;
1224
1225 if (nir_deref_mode_must_be(deref, state->indirect_mask))
1226 return true;
1227
1228 if (contains_sampler && state->force_unroll_sampler_indirect)
1229 return true;
1230 }
1231
1232 return false;
1233 }
1234
1235 static bool
force_unroll_heuristics(loop_info_state * state,nir_block * block)1236 force_unroll_heuristics(loop_info_state *state, nir_block *block)
1237 {
1238 nir_foreach_instr(instr, block) {
1239 if (instr->type == nir_instr_type_tex) {
1240 nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
1241 int sampler_idx =
1242 nir_tex_instr_src_index(tex_instr,
1243 nir_tex_src_sampler_deref);
1244
1245
1246 if (sampler_idx >= 0) {
1247 nir_deref_instr *deref =
1248 nir_instr_as_deref(tex_instr->src[sampler_idx].src.ssa->parent_instr);
1249 if (force_unroll_array_access(state, deref, true))
1250 return true;
1251 }
1252 }
1253
1254
1255 if (instr->type != nir_instr_type_intrinsic)
1256 continue;
1257
1258 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1259
1260 /* Check for arrays variably-indexed by a loop induction variable.
1261 * Unrolling the loop may convert that access into constant-indexing.
1262 */
1263 if (intrin->intrinsic == nir_intrinsic_load_deref ||
1264 intrin->intrinsic == nir_intrinsic_store_deref ||
1265 intrin->intrinsic == nir_intrinsic_copy_deref) {
1266 if (force_unroll_array_access(state,
1267 nir_src_as_deref(intrin->src[0]),
1268 false))
1269 return true;
1270
1271 if (intrin->intrinsic == nir_intrinsic_copy_deref &&
1272 force_unroll_array_access(state,
1273 nir_src_as_deref(intrin->src[1]),
1274 false))
1275 return true;
1276 }
1277 }
1278
1279 return false;
1280 }
1281
1282 static void
get_loop_info(loop_info_state * state,nir_function_impl * impl)1283 get_loop_info(loop_info_state *state, nir_function_impl *impl)
1284 {
1285 nir_shader *shader = impl->function->shader;
1286 const nir_shader_compiler_options *options = shader->options;
1287
1288 /* Add all entries in the outermost part of the loop to the processing list
1289 * Mark the entries in conditionals or in nested loops accordingly
1290 */
1291 foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
1292 switch (node->type) {
1293
1294 case nir_cf_node_block:
1295 init_loop_block(nir_cf_node_as_block(node), state,
1296 false, false, options);
1297 break;
1298
1299 case nir_cf_node_if:
1300 nir_foreach_block_in_cf_node(block, node)
1301 init_loop_block(block, state, true, false, options);
1302 break;
1303
1304 case nir_cf_node_loop:
1305 nir_foreach_block_in_cf_node(block, node) {
1306 init_loop_block(block, state, false, true, options);
1307 }
1308 break;
1309
1310 case nir_cf_node_function:
1311 break;
1312 }
1313 }
1314
1315 /* Try to find all simple terminators of the loop. If we can't find any,
1316 * or we find possible terminators that have side effects then bail.
1317 */
1318 if (!find_loop_terminators(state)) {
1319 list_for_each_entry_safe(nir_loop_terminator, terminator,
1320 &state->loop->info->loop_terminator_list,
1321 loop_terminator_link) {
1322 list_del(&terminator->loop_terminator_link);
1323 ralloc_free(terminator);
1324 }
1325 return;
1326 }
1327
1328 /* Induction analysis needs invariance information so get that first */
1329 compute_invariance_information(state);
1330
1331 /* We have invariance information so try to find induction variables */
1332 if (!compute_induction_information(state))
1333 return;
1334
1335 /* Run through each of the terminators and try to compute a trip-count */
1336 find_trip_count(state, impl->function->shader->info.float_controls_execution_mode);
1337
1338 nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
1339 if (force_unroll_heuristics(state, block)) {
1340 state->loop->info->force_unroll = true;
1341 break;
1342 }
1343 }
1344 }
1345
1346 static loop_info_state *
initialize_loop_info_state(nir_loop * loop,void * mem_ctx,nir_function_impl * impl)1347 initialize_loop_info_state(nir_loop *loop, void *mem_ctx,
1348 nir_function_impl *impl)
1349 {
1350 loop_info_state *state = rzalloc(mem_ctx, loop_info_state);
1351 state->loop_vars = ralloc_array(mem_ctx, nir_loop_variable,
1352 impl->ssa_alloc);
1353 state->loop_vars_init = rzalloc_array(mem_ctx, BITSET_WORD,
1354 BITSET_WORDS(impl->ssa_alloc));
1355 state->loop = loop;
1356
1357 list_inithead(&state->process_list);
1358
1359 if (loop->info)
1360 ralloc_free(loop->info);
1361
1362 loop->info = rzalloc(loop, nir_loop_info);
1363
1364 list_inithead(&loop->info->loop_terminator_list);
1365
1366 return state;
1367 }
1368
1369 static void
process_loops(nir_cf_node * cf_node,nir_variable_mode indirect_mask,bool force_unroll_sampler_indirect)1370 process_loops(nir_cf_node *cf_node, nir_variable_mode indirect_mask,
1371 bool force_unroll_sampler_indirect)
1372 {
1373 switch (cf_node->type) {
1374 case nir_cf_node_block:
1375 return;
1376 case nir_cf_node_if: {
1377 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1378 foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->then_list)
1379 process_loops(nested_node, indirect_mask, force_unroll_sampler_indirect);
1380 foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->else_list)
1381 process_loops(nested_node, indirect_mask, force_unroll_sampler_indirect);
1382 return;
1383 }
1384 case nir_cf_node_loop: {
1385 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1386 foreach_list_typed(nir_cf_node, nested_node, node, &loop->body)
1387 process_loops(nested_node, indirect_mask, force_unroll_sampler_indirect);
1388 break;
1389 }
1390 default:
1391 unreachable("unknown cf node type");
1392 }
1393
1394 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1395 nir_function_impl *impl = nir_cf_node_get_function(cf_node);
1396 void *mem_ctx = ralloc_context(NULL);
1397
1398 loop_info_state *state = initialize_loop_info_state(loop, mem_ctx, impl);
1399 state->indirect_mask = indirect_mask;
1400 state->force_unroll_sampler_indirect = force_unroll_sampler_indirect;
1401
1402 get_loop_info(state, impl);
1403
1404 ralloc_free(mem_ctx);
1405 }
1406
1407 void
nir_loop_analyze_impl(nir_function_impl * impl,nir_variable_mode indirect_mask,bool force_unroll_sampler_indirect)1408 nir_loop_analyze_impl(nir_function_impl *impl,
1409 nir_variable_mode indirect_mask,
1410 bool force_unroll_sampler_indirect)
1411 {
1412 nir_index_ssa_defs(impl);
1413 foreach_list_typed(nir_cf_node, node, node, &impl->body)
1414 process_loops(node, indirect_mask, force_unroll_sampler_indirect);
1415 }
1416