• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_instr_set.h"
30 
31 /*
32  * Implements Global Code Motion.  A description of GCM can be found in
33  * "Global Code Motion; Global Value Numbering" by Cliff Click.
34  * Unfortunately, the algorithm presented in the paper is broken in a
35  * number of ways.  The algorithm used here differs substantially from the
36  * one in the paper but it is, in my opinion, much easier to read and
37  * verify correcness.
38  */
39 
40 struct gcm_block_info {
41    /* Number of loops this block is inside */
42    unsigned loop_depth;
43 
44    /* The last instruction inserted into this block.  This is used as we
45     * traverse the instructions and insert them back into the program to
46     * put them in the right order.
47     */
48    nir_instr *last_instr;
49 };
50 
51 struct gcm_instr_info {
52    nir_block *early_block;
53 };
54 
55 /* Flags used in the instr->pass_flags field for various instruction states */
56 enum {
57    GCM_INSTR_PINNED =                (1 << 0),
58    GCM_INSTR_SCHEDULE_EARLIER_ONLY = (1 << 1),
59    GCM_INSTR_SCHEDULED_EARLY =       (1 << 2),
60    GCM_INSTR_SCHEDULED_LATE =        (1 << 3),
61    GCM_INSTR_PLACED =                (1 << 4),
62 };
63 
64 struct gcm_state {
65    nir_function_impl *impl;
66    nir_instr *instr;
67 
68    bool progress;
69 
70    /* The list of non-pinned instructions.  As we do the late scheduling,
71     * we pull non-pinned instructions out of their blocks and place them in
72     * this list.  This saves us from having linked-list problems when we go
73     * to put instructions back in their blocks.
74     */
75    struct exec_list instrs;
76 
77    struct gcm_block_info *blocks;
78 
79    unsigned num_instrs;
80    struct gcm_instr_info *instr_infos;
81 };
82 
83 /* Recursively walks the CFG and builds the block_info structure */
84 static void
gcm_build_block_info(struct exec_list * cf_list,struct gcm_state * state,unsigned loop_depth)85 gcm_build_block_info(struct exec_list *cf_list, struct gcm_state *state,
86                      unsigned loop_depth)
87 {
88    foreach_list_typed(nir_cf_node, node, node, cf_list) {
89       switch (node->type) {
90       case nir_cf_node_block: {
91          nir_block *block = nir_cf_node_as_block(node);
92          state->blocks[block->index].loop_depth = loop_depth;
93          break;
94       }
95       case nir_cf_node_if: {
96          nir_if *if_stmt = nir_cf_node_as_if(node);
97          gcm_build_block_info(&if_stmt->then_list, state, loop_depth);
98          gcm_build_block_info(&if_stmt->else_list, state, loop_depth);
99          break;
100       }
101       case nir_cf_node_loop: {
102          nir_loop *loop = nir_cf_node_as_loop(node);
103          gcm_build_block_info(&loop->body, state, loop_depth + 1);
104          break;
105       }
106       default:
107          unreachable("Invalid CF node type");
108       }
109    }
110 }
111 
112 static bool
is_src_scalarizable(nir_src * src)113 is_src_scalarizable(nir_src *src)
114 {
115    assert(src->is_ssa);
116 
117    nir_instr *src_instr = src->ssa->parent_instr;
118    switch (src_instr->type) {
119    case nir_instr_type_alu: {
120       nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
121 
122       /* ALU operations with output_size == 0 should be scalarized.  We
123        * will also see a bunch of vecN operations from scalarizing ALU
124        * operations and, since they can easily be copy-propagated, they
125        * are ok too.
126        */
127       return nir_op_infos[src_alu->op].output_size == 0 ||
128              src_alu->op == nir_op_vec2 ||
129              src_alu->op == nir_op_vec3 ||
130              src_alu->op == nir_op_vec4;
131    }
132 
133    case nir_instr_type_load_const:
134       /* These are trivially scalarizable */
135       return true;
136 
137    case nir_instr_type_ssa_undef:
138       return true;
139 
140    case nir_instr_type_intrinsic: {
141       nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
142 
143       switch (src_intrin->intrinsic) {
144       case nir_intrinsic_load_deref: {
145          /* Don't scalarize if we see a load of a local variable because it
146           * might turn into one of the things we can't scalarize.
147           */
148          nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
149          return !nir_deref_mode_may_be(deref, (nir_var_function_temp |
150                                                nir_var_shader_temp));
151       }
152 
153       case nir_intrinsic_interp_deref_at_centroid:
154       case nir_intrinsic_interp_deref_at_sample:
155       case nir_intrinsic_interp_deref_at_offset:
156       case nir_intrinsic_load_uniform:
157       case nir_intrinsic_load_ubo:
158       case nir_intrinsic_load_ssbo:
159       case nir_intrinsic_load_global:
160       case nir_intrinsic_load_global_constant:
161       case nir_intrinsic_load_input:
162          return true;
163       default:
164          break;
165       }
166 
167       return false;
168    }
169 
170    default:
171       /* We can't scalarize this type of instruction */
172       return false;
173    }
174 }
175 
176 /* Walks the instruction list and marks immovable instructions as pinned
177  *
178  * This function also serves to initialize the instr->pass_flags field.
179  * After this is completed, all instructions' pass_flags fields will be set
180  * to either GCM_INSTR_PINNED or 0.
181  */
182 static void
gcm_pin_instructions(nir_function_impl * impl,struct gcm_state * state)183 gcm_pin_instructions(nir_function_impl *impl, struct gcm_state *state)
184 {
185    state->num_instrs = 0;
186 
187    nir_foreach_block(block, impl) {
188       nir_foreach_instr_safe(instr, block) {
189          /* Index the instructions for use in gcm_state::instrs */
190          instr->index = state->num_instrs++;
191 
192          switch (instr->type) {
193          case nir_instr_type_alu:
194             switch (nir_instr_as_alu(instr)->op) {
195             case nir_op_fddx:
196             case nir_op_fddy:
197             case nir_op_fddx_fine:
198             case nir_op_fddy_fine:
199             case nir_op_fddx_coarse:
200             case nir_op_fddy_coarse:
201                /* These can only go in uniform control flow */
202                instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY;
203                break;
204 
205             case nir_op_mov:
206                if (!is_src_scalarizable(&(nir_instr_as_alu(instr)->src[0].src))) {
207                   instr->pass_flags = GCM_INSTR_PINNED;
208                   break;
209                }
210                /* fallthrough */
211 
212             default:
213                instr->pass_flags = 0;
214                break;
215             }
216             break;
217 
218          case nir_instr_type_tex:
219             if (nir_tex_instr_has_implicit_derivative(nir_instr_as_tex(instr)))
220                instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY;
221             break;
222 
223          case nir_instr_type_deref:
224          case nir_instr_type_load_const:
225             instr->pass_flags = 0;
226             break;
227 
228          case nir_instr_type_intrinsic: {
229             if (nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr))) {
230                instr->pass_flags = 0;
231             } else {
232                instr->pass_flags = GCM_INSTR_PINNED;
233             }
234             break;
235          }
236 
237          case nir_instr_type_jump:
238          case nir_instr_type_ssa_undef:
239          case nir_instr_type_phi:
240             instr->pass_flags = GCM_INSTR_PINNED;
241             break;
242 
243          default:
244             unreachable("Invalid instruction type in GCM");
245          }
246 
247          if (!(instr->pass_flags & GCM_INSTR_PINNED)) {
248             /* If this is an unpinned instruction, go ahead and pull it out of
249              * the program and put it on the instrs list.  This has a couple
250              * of benifits.  First, it makes the scheduling algorithm more
251              * efficient because we can avoid walking over basic blocks and
252              * pinned instructions.  Second, it keeps us from causing linked
253              * list confusion when we're trying to put everything in its
254              * proper place at the end of the pass.
255              *
256              * Note that we don't use nir_instr_remove here because that also
257              * cleans up uses and defs and we want to keep that information.
258              */
259             exec_node_remove(&instr->node);
260             exec_list_push_tail(&state->instrs, &instr->node);
261          }
262       }
263    }
264 }
265 
266 static void
267 gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state);
268 
269 /** Update an instructions schedule for the given source
270  *
271  * This function is called iteratively as we walk the sources of an
272  * instruction.  It ensures that the given source instruction has been
273  * scheduled and then update this instruction's block if the source
274  * instruction is lower down the tree.
275  */
276 static bool
gcm_schedule_early_src(nir_src * src,void * void_state)277 gcm_schedule_early_src(nir_src *src, void *void_state)
278 {
279    struct gcm_state *state = void_state;
280    nir_instr *instr = state->instr;
281 
282    assert(src->is_ssa);
283 
284    gcm_schedule_early_instr(src->ssa->parent_instr, void_state);
285 
286    /* While the index isn't a proper dominance depth, it does have the
287     * property that if A dominates B then A->index <= B->index.  Since we
288     * know that this instruction must have been dominated by all of its
289     * sources at some point (even if it's gone through value-numbering),
290     * all of the sources must lie on the same branch of the dominance tree.
291     * Therefore, we can just go ahead and just compare indices.
292     */
293    struct gcm_instr_info *src_info =
294       &state->instr_infos[src->ssa->parent_instr->index];
295    struct gcm_instr_info *info = &state->instr_infos[instr->index];
296    if (info->early_block->index < src_info->early_block->index)
297       info->early_block = src_info->early_block;
298 
299    /* We need to restore the state instruction because it may have been
300     * changed through the gcm_schedule_early_instr call above.  Since we
301     * may still be iterating through sources and future calls to
302     * gcm_schedule_early_src for the same instruction will still need it.
303     */
304    state->instr = instr;
305 
306    return true;
307 }
308 
309 /** Schedules an instruction early
310  *
311  * This function performs a recursive depth-first search starting at the
312  * given instruction and proceeding through the sources to schedule
313  * instructions as early as they can possibly go in the dominance tree.
314  * The instructions are "scheduled" by updating the early_block field of
315  * the corresponding gcm_instr_state entry.
316  */
317 static void
gcm_schedule_early_instr(nir_instr * instr,struct gcm_state * state)318 gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state)
319 {
320    if (instr->pass_flags & GCM_INSTR_SCHEDULED_EARLY)
321       return;
322 
323    instr->pass_flags |= GCM_INSTR_SCHEDULED_EARLY;
324 
325    /* Pinned instructions always get scheduled in their original block so we
326     * don't need to do anything.  Also, bailing here keeps us from ever
327     * following the sources of phi nodes which can be back-edges.
328     */
329    if (instr->pass_flags & GCM_INSTR_PINNED) {
330       state->instr_infos[instr->index].early_block = instr->block;
331       return;
332    }
333 
334    /* Start with the instruction at the top.  As we iterate over the
335     * sources, it will get moved down as needed.
336     */
337    state->instr_infos[instr->index].early_block = nir_start_block(state->impl);
338    state->instr = instr;
339 
340    nir_foreach_src(instr, gcm_schedule_early_src, state);
341 }
342 
343 static nir_block *
gcm_choose_block_for_instr(nir_instr * instr,nir_block * early_block,nir_block * late_block,struct gcm_state * state)344 gcm_choose_block_for_instr(nir_instr *instr, nir_block *early_block,
345                            nir_block *late_block, struct gcm_state *state)
346 {
347    assert(nir_block_dominates(early_block, late_block));
348 
349    nir_block *best = late_block;
350    for (nir_block *block = late_block; block != NULL; block = block->imm_dom) {
351       /* Being too aggressive with how we pull instructions out of loops can
352        * result in extra register pressure and spilling. For example its fairly
353        * common for loops in compute shaders to calculate SSBO offsets using
354        * the workgroup id, subgroup id and subgroup invocation, pulling all
355        * these calculations outside the loop causes register pressure.
356        *
357        * To work around these issues for now we only allow constant and texture
358        * instructions to be moved outside their original loops.
359        *
360        * TODO: figure out some heuristics to allow more to be moved out of loops.
361        */
362       if (state->blocks[block->index].loop_depth <
363           state->blocks[best->index].loop_depth &&
364           (nir_block_dominates(instr->block, block) ||
365            instr->type == nir_instr_type_load_const ||
366            instr->type == nir_instr_type_tex))
367          best = block;
368       else if (block == instr->block)
369          best = block;
370 
371       if (block == early_block)
372          break;
373    }
374 
375    return best;
376 }
377 
378 static void
379 gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state);
380 
381 /** Schedules the instruction associated with the given SSA def late
382  *
383  * This function works by first walking all of the uses of the given SSA
384  * definition, ensuring that they are scheduled, and then computing the LCA
385  * (least common ancestor) of its uses.  It then schedules this instruction
386  * as close to the LCA as possible while trying to stay out of loops.
387  */
388 static bool
gcm_schedule_late_def(nir_ssa_def * def,void * void_state)389 gcm_schedule_late_def(nir_ssa_def *def, void *void_state)
390 {
391    struct gcm_state *state = void_state;
392 
393    nir_block *lca = NULL;
394 
395    nir_foreach_use(use_src, def) {
396       nir_instr *use_instr = use_src->parent_instr;
397 
398       gcm_schedule_late_instr(use_instr, state);
399 
400       /* Phi instructions are a bit special.  SSA definitions don't have to
401        * dominate the sources of the phi nodes that use them; instead, they
402        * have to dominate the predecessor block corresponding to the phi
403        * source.  We handle this by looking through the sources, finding
404        * any that are usingg this SSA def, and using those blocks instead
405        * of the one the phi lives in.
406        */
407       if (use_instr->type == nir_instr_type_phi) {
408          nir_phi_instr *phi = nir_instr_as_phi(use_instr);
409 
410          nir_foreach_phi_src(phi_src, phi) {
411             if (phi_src->src.ssa == def)
412                lca = nir_dominance_lca(lca, phi_src->pred);
413          }
414       } else {
415          lca = nir_dominance_lca(lca, use_instr->block);
416       }
417    }
418 
419    nir_foreach_if_use(use_src, def) {
420       nir_if *if_stmt = use_src->parent_if;
421 
422       /* For if statements, we consider the block to be the one immediately
423        * preceding the if CF node.
424        */
425       nir_block *pred_block =
426          nir_cf_node_as_block(nir_cf_node_prev(&if_stmt->cf_node));
427 
428       lca = nir_dominance_lca(lca, pred_block);
429    }
430 
431    nir_block *early_block =
432       state->instr_infos[def->parent_instr->index].early_block;
433 
434    /* Some instructions may never be used.  Flag them and the instruction
435     * placement code will get rid of them for us.
436     */
437    if (lca == NULL) {
438       def->parent_instr->block = NULL;
439       return true;
440    }
441 
442    if (def->parent_instr->pass_flags & GCM_INSTR_SCHEDULE_EARLIER_ONLY &&
443        lca != def->parent_instr->block &&
444        nir_block_dominates(def->parent_instr->block, lca)) {
445       lca = def->parent_instr->block;
446    }
447 
448    /* We now have the LCA of all of the uses.  If our invariants hold,
449     * this is dominated by the block that we chose when scheduling early.
450     * We now walk up the dominance tree and pick the lowest block that is
451     * as far outside loops as we can get.
452     */
453    nir_block *best_block =
454       gcm_choose_block_for_instr(def->parent_instr, early_block, lca, state);
455 
456    if (def->parent_instr->block != best_block)
457       state->progress = true;
458 
459    def->parent_instr->block = best_block;
460 
461    return true;
462 }
463 
464 /** Schedules an instruction late
465  *
466  * This function performs a depth-first search starting at the given
467  * instruction and proceeding through its uses to schedule instructions as
468  * late as they can reasonably go in the dominance tree.  The instructions
469  * are "scheduled" by updating their instr->block field.
470  *
471  * The name of this function is actually a bit of a misnomer as it doesn't
472  * schedule them "as late as possible" as the paper implies.  Instead, it
473  * first finds the lates possible place it can schedule the instruction and
474  * then possibly schedules it earlier than that.  The actual location is as
475  * far down the tree as we can go while trying to stay out of loops.
476  */
477 static void
gcm_schedule_late_instr(nir_instr * instr,struct gcm_state * state)478 gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state)
479 {
480    if (instr->pass_flags & GCM_INSTR_SCHEDULED_LATE)
481       return;
482 
483    instr->pass_flags |= GCM_INSTR_SCHEDULED_LATE;
484 
485    /* Pinned instructions are already scheduled so we don't need to do
486     * anything.  Also, bailing here keeps us from ever following phi nodes
487     * which can be back-edges.
488     */
489    if (instr->pass_flags & GCM_INSTR_PINNED)
490       return;
491 
492    nir_foreach_ssa_def(instr, gcm_schedule_late_def, state);
493 }
494 
495 static void
496 gcm_place_instr(nir_instr *instr, struct gcm_state *state);
497 
498 static bool
gcm_place_instr_def(nir_ssa_def * def,void * state)499 gcm_place_instr_def(nir_ssa_def *def, void *state)
500 {
501    nir_foreach_use(use_src, def)
502       gcm_place_instr(use_src->parent_instr, state);
503 
504    return false;
505 }
506 
507 static bool
gcm_replace_def_with_undef(nir_ssa_def * def,void * void_state)508 gcm_replace_def_with_undef(nir_ssa_def *def, void *void_state)
509 {
510    struct gcm_state *state = void_state;
511 
512    if (list_is_empty(&def->uses) && list_is_empty(&def->if_uses))
513       return true;
514 
515    nir_ssa_undef_instr *undef =
516       nir_ssa_undef_instr_create(state->impl->function->shader,
517                                  def->num_components, def->bit_size);
518    nir_instr_insert(nir_before_cf_list(&state->impl->body), &undef->instr);
519    nir_ssa_def_rewrite_uses(def, nir_src_for_ssa(&undef->def));
520 
521    return true;
522 }
523 
524 /** Places an instrution back into the program
525  *
526  * The earlier passes of GCM simply choose blocks for each instruction and
527  * otherwise leave them alone.  This pass actually places the instructions
528  * into their chosen blocks.
529  *
530  * To do so, we use a standard post-order depth-first search linearization
531  * algorithm.  We walk over the uses of the given instruction and ensure
532  * that they are placed and then place this instruction.  Because we are
533  * working on multiple blocks at a time, we keep track of the last inserted
534  * instruction per-block in the state structure's block_info array.  When
535  * we insert an instruction in a block we insert it before the last
536  * instruction inserted in that block rather than the last instruction
537  * inserted globally.
538  */
539 static void
gcm_place_instr(nir_instr * instr,struct gcm_state * state)540 gcm_place_instr(nir_instr *instr, struct gcm_state *state)
541 {
542    if (instr->pass_flags & GCM_INSTR_PLACED)
543       return;
544 
545    instr->pass_flags |= GCM_INSTR_PLACED;
546 
547    if (instr->block == NULL) {
548       nir_foreach_ssa_def(instr, gcm_replace_def_with_undef, state);
549       nir_instr_remove(instr);
550       return;
551    }
552 
553    /* Phi nodes are our once source of back-edges.  Since right now we are
554     * only doing scheduling within blocks, we don't need to worry about
555     * them since they are always at the top.  Just skip them completely.
556     */
557    if (instr->type == nir_instr_type_phi) {
558       assert(instr->pass_flags & GCM_INSTR_PINNED);
559       return;
560    }
561 
562    nir_foreach_ssa_def(instr, gcm_place_instr_def, state);
563 
564    if (instr->pass_flags & GCM_INSTR_PINNED) {
565       /* Pinned instructions have an implicit dependence on the pinned
566        * instructions that come after them in the block.  Since the pinned
567        * instructions will naturally "chain" together, we only need to
568        * explicitly visit one of them.
569        */
570       for (nir_instr *after = nir_instr_next(instr);
571            after;
572            after = nir_instr_next(after)) {
573          if (after->pass_flags & GCM_INSTR_PINNED) {
574             gcm_place_instr(after, state);
575             break;
576          }
577       }
578    }
579 
580    struct gcm_block_info *block_info = &state->blocks[instr->block->index];
581    if (!(instr->pass_flags & GCM_INSTR_PINNED)) {
582       exec_node_remove(&instr->node);
583 
584       if (block_info->last_instr) {
585          exec_node_insert_node_before(&block_info->last_instr->node,
586                                       &instr->node);
587       } else {
588          /* Schedule it at the end of the block */
589          nir_instr *jump_instr = nir_block_last_instr(instr->block);
590          if (jump_instr && jump_instr->type == nir_instr_type_jump) {
591             exec_node_insert_node_before(&jump_instr->node, &instr->node);
592          } else {
593             exec_list_push_tail(&instr->block->instr_list, &instr->node);
594          }
595       }
596    }
597 
598    block_info->last_instr = instr;
599 }
600 
601 static bool
opt_gcm_impl(nir_function_impl * impl,bool value_number)602 opt_gcm_impl(nir_function_impl *impl, bool value_number)
603 {
604    nir_metadata_require(impl, nir_metadata_block_index |
605                               nir_metadata_dominance);
606 
607    struct gcm_state state;
608 
609    state.impl = impl;
610    state.instr = NULL;
611    state.progress = false;
612    exec_list_make_empty(&state.instrs);
613    state.blocks = rzalloc_array(NULL, struct gcm_block_info, impl->num_blocks);
614 
615    gcm_build_block_info(&impl->body, &state, 0);
616 
617    gcm_pin_instructions(impl, &state);
618 
619    state.instr_infos =
620       rzalloc_array(NULL, struct gcm_instr_info, state.num_instrs);
621 
622    if (value_number) {
623       struct set *gvn_set = nir_instr_set_create(NULL);
624       foreach_list_typed_safe(nir_instr, instr, node, &state.instrs) {
625          if (nir_instr_set_add_or_rewrite(gvn_set, instr)) {
626             nir_instr_remove(instr);
627             state.progress = true;
628          }
629       }
630       nir_instr_set_destroy(gvn_set);
631    }
632 
633    foreach_list_typed(nir_instr, instr, node, &state.instrs)
634       gcm_schedule_early_instr(instr, &state);
635 
636    foreach_list_typed(nir_instr, instr, node, &state.instrs)
637       gcm_schedule_late_instr(instr, &state);
638 
639    while (!exec_list_is_empty(&state.instrs)) {
640       nir_instr *instr = exec_node_data(nir_instr,
641                                         state.instrs.tail_sentinel.prev, node);
642       gcm_place_instr(instr, &state);
643    }
644 
645    ralloc_free(state.blocks);
646    ralloc_free(state.instr_infos);
647 
648    nir_metadata_preserve(impl, nir_metadata_block_index |
649                                nir_metadata_dominance);
650 
651    return state.progress;
652 }
653 
654 bool
nir_opt_gcm(nir_shader * shader,bool value_number)655 nir_opt_gcm(nir_shader *shader, bool value_number)
656 {
657    bool progress = false;
658 
659    nir_foreach_function(function, shader) {
660       if (function->impl)
661          progress |= opt_gcm_impl(function->impl, value_number);
662    }
663 
664    return progress;
665 }
666