1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27 #include "nir_vla.h"
28
function_ends_in_jump(nir_function_impl * impl)29 static bool function_ends_in_jump(nir_function_impl *impl)
30 {
31 nir_block *last_block = nir_impl_last_block(impl);
32 return nir_block_ends_in_jump(last_block);
33 }
34
nir_inline_function_impl(struct nir_builder * b,const nir_function_impl * impl,nir_ssa_def ** params,struct hash_table * shader_var_remap)35 void nir_inline_function_impl(struct nir_builder *b,
36 const nir_function_impl *impl,
37 nir_ssa_def **params,
38 struct hash_table *shader_var_remap)
39 {
40 nir_function_impl *copy = nir_function_impl_clone(b->shader, impl);
41
42 exec_list_append(&b->impl->locals, ©->locals);
43 exec_list_append(&b->impl->registers, ©->registers);
44
45 nir_foreach_block(block, copy) {
46 nir_foreach_instr_safe(instr, block) {
47 switch (instr->type) {
48 case nir_instr_type_deref: {
49 nir_deref_instr *deref = nir_instr_as_deref(instr);
50 if (deref->deref_type != nir_deref_type_var)
51 break;
52
53 /* We don't need to remap function variables. We already cloned
54 * them as part of nir_function_impl_clone and appended them to
55 * b->impl->locals.
56 */
57 if (deref->var->data.mode == nir_var_function_temp)
58 break;
59
60 /* If no map is provided, we assume that there are either no
61 * shader variables or they already live b->shader (this is the
62 * case for function inlining within a single shader.
63 */
64 if (shader_var_remap == NULL)
65 break;
66
67 struct hash_entry *entry =
68 _mesa_hash_table_search(shader_var_remap, deref->var);
69 if (entry == NULL) {
70 nir_variable *nvar = nir_variable_clone(deref->var, b->shader);
71 nir_shader_add_variable(b->shader, nvar);
72 entry = _mesa_hash_table_insert(shader_var_remap,
73 deref->var, nvar);
74 }
75 deref->var = entry->data;
76 break;
77 }
78
79 case nir_instr_type_intrinsic: {
80 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
81 if (load->intrinsic != nir_intrinsic_load_param)
82 break;
83
84 unsigned param_idx = nir_intrinsic_param_idx(load);
85 assert(param_idx < impl->function->num_params);
86 assert(load->dest.is_ssa);
87 nir_ssa_def_rewrite_uses(&load->dest.ssa,
88 params[param_idx]);
89
90 /* Remove any left-over load_param intrinsics because they're soon
91 * to be in another function and therefore no longer valid.
92 */
93 nir_instr_remove(&load->instr);
94 break;
95 }
96
97 case nir_instr_type_jump:
98 /* Returns have to be lowered for this to work */
99 assert(nir_instr_as_jump(instr)->type != nir_jump_return);
100 break;
101
102 default:
103 break;
104 }
105 }
106 }
107
108 bool nest_if = function_ends_in_jump(copy);
109
110 /* Pluck the body out of the function and place it here */
111 nir_cf_list body;
112 nir_cf_list_extract(&body, ©->body);
113
114 if (nest_if) {
115 nir_if *cf = nir_push_if(b, nir_imm_bool(b, true));
116 nir_cf_reinsert(&body, nir_after_cf_list(&cf->then_list));
117 nir_pop_if(b, cf);
118 } else {
119 /* Insert a nop at the cursor so we can keep track of where things are as
120 * we add/remove stuff from the CFG.
121 */
122 nir_intrinsic_instr *nop = nir_nop(b);
123 nir_cf_reinsert(&body, nir_before_instr(&nop->instr));
124 b->cursor = nir_instr_remove(&nop->instr);
125 }
126 }
127
128 static bool inline_function_impl(nir_function_impl *impl, struct set *inlined);
129
130 static bool
inline_functions_block(nir_block * block,nir_builder * b,struct set * inlined)131 inline_functions_block(nir_block *block, nir_builder *b,
132 struct set *inlined)
133 {
134 bool progress = false;
135 /* This is tricky. We're iterating over instructions in a block but, as
136 * we go, the block and its instruction list are being split into
137 * pieces. However, this *should* be safe since foreach_safe always
138 * stashes the next thing in the iteration. That next thing will
139 * properly get moved to the next block when it gets split, and we
140 * continue iterating there.
141 */
142 nir_foreach_instr_safe(instr, block) {
143 if (instr->type != nir_instr_type_call)
144 continue;
145
146 progress = true;
147
148 nir_call_instr *call = nir_instr_as_call(instr);
149 assert(call->callee->impl);
150
151 /* Make sure that the function we're calling is already inlined */
152 inline_function_impl(call->callee->impl, inlined);
153
154 b->cursor = nir_instr_remove(&call->instr);
155
156 /* Rewrite all of the uses of the callee's parameters to use the call
157 * instructions sources. In order to ensure that the "load" happens
158 * here and not later (for register sources), we make sure to convert it
159 * to an SSA value first.
160 */
161 const unsigned num_params = call->num_params;
162 NIR_VLA(nir_ssa_def *, params, num_params);
163 for (unsigned i = 0; i < num_params; i++) {
164 params[i] = nir_ssa_for_src(b, call->params[i],
165 call->callee->params[i].num_components);
166 }
167
168 nir_inline_function_impl(b, call->callee->impl, params, NULL);
169 }
170
171 return progress;
172 }
173
174 static bool
inline_function_impl(nir_function_impl * impl,struct set * inlined)175 inline_function_impl(nir_function_impl *impl, struct set *inlined)
176 {
177 if (_mesa_set_search(inlined, impl))
178 return false; /* Already inlined */
179
180 nir_builder b;
181 nir_builder_init(&b, impl);
182
183 bool progress = false;
184 nir_foreach_block_safe(block, impl) {
185 progress |= inline_functions_block(block, &b, inlined);
186 }
187
188 if (progress) {
189 /* SSA and register indices are completely messed up now */
190 nir_index_ssa_defs(impl);
191 nir_index_local_regs(impl);
192
193 nir_metadata_preserve(impl, nir_metadata_none);
194 } else {
195 nir_metadata_preserve(impl, nir_metadata_all);
196 }
197
198 _mesa_set_add(inlined, impl);
199
200 return progress;
201 }
202
203 /** A pass to inline all functions in a shader into their callers
204 *
205 * For most use-cases, function inlining is a multi-step process. The general
206 * pattern employed by SPIR-V consumers and others is as follows:
207 *
208 * 1. nir_lower_variable_initializers(shader, nir_var_function_temp)
209 *
210 * This is needed because local variables from the callee are simply added
211 * to the locals list for the caller and the information about where the
212 * constant initializer logically happens is lost. If the callee is
213 * called in a loop, this can cause the variable to go from being
214 * initialized once per loop iteration to being initialized once at the
215 * top of the caller and values to persist from one invocation of the
216 * callee to the next. The simple solution to this problem is to get rid
217 * of constant initializers before function inlining.
218 *
219 * 2. nir_lower_returns(shader)
220 *
221 * nir_inline_functions assumes that all functions end "naturally" by
222 * execution reaching the end of the function without any return
223 * instructions causing instant jumps to the end. Thanks to NIR being
224 * structured, we can't represent arbitrary jumps to various points in the
225 * program which is what an early return in the callee would have to turn
226 * into when we inline it into the caller. Instead, we require returns to
227 * be lowered which lets us just copy+paste the callee directly into the
228 * caller.
229 *
230 * 3. nir_inline_functions(shader)
231 *
232 * This does the actual function inlining and the resulting shader will
233 * contain no call instructions.
234 *
235 * 4. nir_opt_deref(shader)
236 *
237 * Most functions contain pointer parameters where the result of a deref
238 * instruction is passed in as a parameter, loaded via a load_param
239 * intrinsic, and then turned back into a deref via a cast. Function
240 * inlining will get rid of the load_param but we are still left with a
241 * cast. Running nir_opt_deref gets rid of the intermediate cast and
242 * results in a whole deref chain again. This is currently required by a
243 * number of optimizations and lowering passes at least for certain
244 * variable modes.
245 *
246 * 5. Loop over the functions and delete all but the main entrypoint.
247 *
248 * In the Intel Vulkan driver this looks like this:
249 *
250 * foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
251 * if (func != entry_point)
252 * exec_node_remove(&func->node);
253 * }
254 * assert(exec_list_length(&nir->functions) == 1);
255 *
256 * While nir_inline_functions does get rid of all call instructions, it
257 * doesn't get rid of any functions because it doesn't know what the "root
258 * function" is. Instead, it's up to the individual driver to know how to
259 * decide on a root function and delete the rest. With SPIR-V,
260 * spirv_to_nir returns the root function and so we can just use == whereas
261 * with GL, you may have to look for a function named "main".
262 *
263 * 6. nir_lower_variable_initializers(shader, ~nir_var_function_temp)
264 *
265 * Lowering constant initializers on inputs, outputs, global variables,
266 * etc. requires that we know the main entrypoint so that we know where to
267 * initialize them. Otherwise, we would have to assume that anything
268 * could be a main entrypoint and initialize them at the start of every
269 * function but that would clearly be wrong if any of those functions were
270 * ever called within another function. Simply requiring a single-
271 * entrypoint function shader is the best way to make it well-defined.
272 */
273 bool
nir_inline_functions(nir_shader * shader)274 nir_inline_functions(nir_shader *shader)
275 {
276 struct set *inlined = _mesa_pointer_set_create(NULL);
277 bool progress = false;
278
279 nir_foreach_function(function, shader) {
280 if (function->impl)
281 progress = inline_function_impl(function->impl, inlined) || progress;
282 }
283
284 _mesa_set_destroy(inlined, NULL);
285
286 return progress;
287 }
288