• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27 #include "nir_vla.h"
28 
nir_inline_function_impl(struct nir_builder * b,const nir_function_impl * impl,nir_ssa_def ** params,struct hash_table * shader_var_remap)29 void nir_inline_function_impl(struct nir_builder *b,
30                               const nir_function_impl *impl,
31                               nir_ssa_def **params,
32                               struct hash_table *shader_var_remap)
33 {
34    nir_function_impl *copy = nir_function_impl_clone(b->shader, impl);
35 
36    /* Insert a nop at the cursor so we can keep track of where things are as
37     * we add/remove stuff from the CFG.
38     */
39    nir_intrinsic_instr *nop =
40       nir_intrinsic_instr_create(b->shader, nir_intrinsic_nop);
41    nir_builder_instr_insert(b, &nop->instr);
42 
43    exec_list_append(&b->impl->locals, &copy->locals);
44    exec_list_append(&b->impl->registers, &copy->registers);
45 
46    nir_foreach_block(block, copy) {
47       nir_foreach_instr_safe(instr, block) {
48          switch (instr->type) {
49          case nir_instr_type_deref: {
50             nir_deref_instr *deref = nir_instr_as_deref(instr);
51             if (deref->deref_type != nir_deref_type_var)
52                break;
53 
54             /* We don't need to remap function variables.  We already cloned
55              * them as part of nir_function_impl_clone and appended them to
56              * b->impl->locals.
57              */
58             if (deref->var->data.mode == nir_var_function_temp)
59                break;
60 
61             /* If no map is provided, we assume that there are either no
62              * shader variables or they already live b->shader (this is the
63              * case for function inlining within a single shader.
64              */
65             if (shader_var_remap == NULL)
66                break;
67 
68             struct hash_entry *entry =
69                _mesa_hash_table_search(shader_var_remap, deref->var);
70             if (entry == NULL) {
71                nir_variable *nvar = nir_variable_clone(deref->var, b->shader);
72                nir_shader_add_variable(b->shader, nvar);
73                entry = _mesa_hash_table_insert(shader_var_remap,
74                                                deref->var, nvar);
75             }
76             deref->var = entry->data;
77             break;
78          }
79 
80          case nir_instr_type_intrinsic: {
81             nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
82             if (load->intrinsic != nir_intrinsic_load_param)
83                break;
84 
85             unsigned param_idx = nir_intrinsic_param_idx(load);
86             assert(param_idx < impl->function->num_params);
87             assert(load->dest.is_ssa);
88             nir_ssa_def_rewrite_uses(&load->dest.ssa,
89                                      nir_src_for_ssa(params[param_idx]));
90 
91             /* Remove any left-over load_param intrinsics because they're soon
92              * to be in another function and therefore no longer valid.
93              */
94             nir_instr_remove(&load->instr);
95             break;
96          }
97 
98          case nir_instr_type_jump:
99             /* Returns have to be lowered for this to work */
100             assert(nir_instr_as_jump(instr)->type != nir_jump_return);
101             break;
102 
103          default:
104             break;
105          }
106       }
107    }
108 
109    /* Pluck the body out of the function and place it here */
110    nir_cf_list body;
111    nir_cf_list_extract(&body, &copy->body);
112    nir_cf_reinsert(&body, nir_before_instr(&nop->instr));
113 
114    b->cursor = nir_instr_remove(&nop->instr);
115 }
116 
117 static bool inline_function_impl(nir_function_impl *impl, struct set *inlined);
118 
119 static bool
inline_functions_block(nir_block * block,nir_builder * b,struct set * inlined)120 inline_functions_block(nir_block *block, nir_builder *b,
121                        struct set *inlined)
122 {
123    bool progress = false;
124    /* This is tricky.  We're iterating over instructions in a block but, as
125     * we go, the block and its instruction list are being split into
126     * pieces.  However, this *should* be safe since foreach_safe always
127     * stashes the next thing in the iteration.  That next thing will
128     * properly get moved to the next block when it gets split, and we
129     * continue iterating there.
130     */
131    nir_foreach_instr_safe(instr, block) {
132       if (instr->type != nir_instr_type_call)
133          continue;
134 
135       progress = true;
136 
137       nir_call_instr *call = nir_instr_as_call(instr);
138       assert(call->callee->impl);
139 
140       /* Make sure that the function we're calling is already inlined */
141       inline_function_impl(call->callee->impl, inlined);
142 
143       b->cursor = nir_instr_remove(&call->instr);
144 
145       /* Rewrite all of the uses of the callee's parameters to use the call
146        * instructions sources.  In order to ensure that the "load" happens
147        * here and not later (for register sources), we make sure to convert it
148        * to an SSA value first.
149        */
150       const unsigned num_params = call->num_params;
151       NIR_VLA(nir_ssa_def *, params, num_params);
152       for (unsigned i = 0; i < num_params; i++) {
153          params[i] = nir_ssa_for_src(b, call->params[i],
154                                      call->callee->params[i].num_components);
155       }
156 
157       nir_inline_function_impl(b, call->callee->impl, params, NULL);
158    }
159 
160    return progress;
161 }
162 
163 static bool
inline_function_impl(nir_function_impl * impl,struct set * inlined)164 inline_function_impl(nir_function_impl *impl, struct set *inlined)
165 {
166    if (_mesa_set_search(inlined, impl))
167       return false; /* Already inlined */
168 
169    nir_builder b;
170    nir_builder_init(&b, impl);
171 
172    bool progress = false;
173    nir_foreach_block_safe(block, impl) {
174       progress |= inline_functions_block(block, &b, inlined);
175    }
176 
177    if (progress) {
178       /* SSA and register indices are completely messed up now */
179       nir_index_ssa_defs(impl);
180       nir_index_local_regs(impl);
181 
182       nir_metadata_preserve(impl, nir_metadata_none);
183    } else {
184       nir_metadata_preserve(impl, nir_metadata_all);
185    }
186 
187    _mesa_set_add(inlined, impl);
188 
189    return progress;
190 }
191 
192 /** A pass to inline all functions in a shader into their callers
193  *
194  * For most use-cases, function inlining is a multi-step process.  The general
195  * pattern employed by SPIR-V consumers and others is as follows:
196  *
197  *  1. nir_lower_variable_initializers(shader, nir_var_function_temp)
198  *
199  *     This is needed because local variables from the callee are simply added
200  *     to the locals list for the caller and the information about where the
201  *     constant initializer logically happens is lost.  If the callee is
202  *     called in a loop, this can cause the variable to go from being
203  *     initialized once per loop iteration to being initialized once at the
204  *     top of the caller and values to persist from one invocation of the
205  *     callee to the next.  The simple solution to this problem is to get rid
206  *     of constant initializers before function inlining.
207  *
208  *  2. nir_lower_returns(shader)
209  *
210  *     nir_inline_functions assumes that all functions end "naturally" by
211  *     execution reaching the end of the function without any return
212  *     instructions causing instant jumps to the end.  Thanks to NIR being
213  *     structured, we can't represent arbitrary jumps to various points in the
214  *     program which is what an early return in the callee would have to turn
215  *     into when we inline it into the caller.  Instead, we require returns to
216  *     be lowered which lets us just copy+paste the callee directly into the
217  *     caller.
218  *
219  *  3. nir_inline_functions(shader)
220  *
221  *     This does the actual function inlining and the resulting shader will
222  *     contain no call instructions.
223  *
224  *  4. nir_opt_deref(shader)
225  *
226  *     Most functions contain pointer parameters where the result of a deref
227  *     instruction is passed in as a parameter, loaded via a load_param
228  *     intrinsic, and then turned back into a deref via a cast.  Function
229  *     inlining will get rid of the load_param but we are still left with a
230  *     cast.  Running nir_opt_deref gets rid of the intermediate cast and
231  *     results in a whole deref chain again.  This is currently required by a
232  *     number of optimizations and lowering passes at least for certain
233  *     variable modes.
234  *
235  *  5. Loop over the functions and delete all but the main entrypoint.
236  *
237  *     In the Intel Vulkan driver this looks like this:
238  *
239  *        foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
240  *           if (func != entry_point)
241  *              exec_node_remove(&func->node);
242  *        }
243  *        assert(exec_list_length(&nir->functions) == 1);
244  *
245  *    While nir_inline_functions does get rid of all call instructions, it
246  *    doesn't get rid of any functions because it doesn't know what the "root
247  *    function" is.  Instead, it's up to the individual driver to know how to
248  *    decide on a root function and delete the rest.  With SPIR-V,
249  *    spirv_to_nir returns the root function and so we can just use == whereas
250  *    with GL, you may have to look for a function named "main".
251  *
252  *  6. nir_lower_variable_initializers(shader, ~nir_var_function_temp)
253  *
254  *     Lowering constant initializers on inputs, outputs, global variables,
255  *     etc. requires that we know the main entrypoint so that we know where to
256  *     initialize them.  Otherwise, we would have to assume that anything
257  *     could be a main entrypoint and initialize them at the start of every
258  *     function but that would clearly be wrong if any of those functions were
259  *     ever called within another function.  Simply requiring a single-
260  *     entrypoint function shader is the best way to make it well-defined.
261  */
262 bool
nir_inline_functions(nir_shader * shader)263 nir_inline_functions(nir_shader *shader)
264 {
265    struct set *inlined = _mesa_pointer_set_create(NULL);
266    bool progress = false;
267 
268    nir_foreach_function(function, shader) {
269       if (function->impl)
270          progress = inline_function_impl(function->impl, inlined) || progress;
271    }
272 
273    _mesa_set_destroy(inlined, NULL);
274 
275    return progress;
276 }
277