1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27 #include "nir_vla.h"
28
29 /*
30 * TODO: write a proper inliner for GPUs.
31 * This heuristic just inlines small functions,
32 * and tail calls get inlined as well.
33 */
34 static bool
nir_function_can_inline(nir_function * function)35 nir_function_can_inline(nir_function *function)
36 {
37 bool can_inline = true;
38 if (!function->should_inline) {
39 if (function->impl) {
40 nir_foreach_block(block, function->impl) {
41 nir_foreach_instr(instr, block) {
42 if (instr->type != nir_instr_type_intrinsic)
43 continue;
44 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
45 if (intr->intrinsic == nir_intrinsic_barrier)
46 return true;
47 }
48 }
49
50 if (function->impl->num_blocks > 2)
51 can_inline = false;
52 if (function->impl->ssa_alloc > 45)
53 can_inline = false;
54 }
55 }
56 return can_inline;
57 }
58
59 static bool
function_ends_in_jump(nir_function_impl * impl)60 function_ends_in_jump(nir_function_impl *impl)
61 {
62 nir_block *last_block = nir_impl_last_block(impl);
63 return nir_block_ends_in_jump(last_block);
64 }
65
66 /* A cast is used to deref function in/out params. However the bindless
67 * textures spec allows both uniforms and functions temps to be passed to a
68 * function param defined the same way. To deal with this we need to update
69 * this when we inline and know what variable mode we are dealing with.
70 */
71 static void
fixup_cast_deref_mode(nir_deref_instr * deref)72 fixup_cast_deref_mode(nir_deref_instr *deref)
73 {
74 nir_deref_instr *parent = nir_src_as_deref(deref->parent);
75 if (parent && deref->modes & nir_var_function_temp) {
76 if (parent->modes & nir_var_uniform) {
77 deref->modes |= nir_var_uniform;
78 } else if (parent->modes & nir_var_image) {
79 deref->modes |= nir_var_image;
80 } else if (parent->modes & nir_var_mem_ubo) {
81 deref->modes |= nir_var_mem_ubo;
82 } else if (parent->modes & nir_var_mem_ssbo) {
83 deref->modes |= nir_var_mem_ssbo;
84 } else
85 return;
86
87 deref->modes ^= nir_var_function_temp;
88
89 nir_foreach_use(use, &deref->def) {
90 if (nir_src_parent_instr(use)->type != nir_instr_type_deref)
91 continue;
92
93 /* Recurse into children */
94 fixup_cast_deref_mode(nir_instr_as_deref(nir_src_parent_instr(use)));
95 }
96 }
97 }
98
99 void
nir_inline_function_impl(struct nir_builder * b,const nir_function_impl * impl,nir_def ** params,struct hash_table * shader_var_remap)100 nir_inline_function_impl(struct nir_builder *b,
101 const nir_function_impl *impl,
102 nir_def **params,
103 struct hash_table *shader_var_remap)
104 {
105 nir_function_impl *copy = nir_function_impl_clone(b->shader, impl);
106
107 exec_list_append(&b->impl->locals, ©->locals);
108
109 nir_foreach_block(block, copy) {
110 nir_foreach_instr_safe(instr, block) {
111 switch (instr->type) {
112 case nir_instr_type_deref: {
113 nir_deref_instr *deref = nir_instr_as_deref(instr);
114
115 /* Note: This shouldn't change the mode of anything but the
116 * replaced nir_intrinsic_load_param intrinsics handled later in
117 * this switch table. Any incorrect modes should have already been
118 * detected by previous nir_vaidate calls.
119 */
120 if (deref->deref_type == nir_deref_type_cast) {
121 fixup_cast_deref_mode(deref);
122 break;
123 }
124
125 if (deref->deref_type != nir_deref_type_var)
126 break;
127
128 /* We don't need to remap function variables. We already cloned
129 * them as part of nir_function_impl_clone and appended them to
130 * b->impl->locals.
131 */
132 if (deref->var->data.mode == nir_var_function_temp)
133 break;
134
135 /* If no map is provided, we assume that there are either no
136 * shader variables or they already live b->shader (this is the
137 * case for function inlining within a single shader.
138 */
139 if (shader_var_remap == NULL)
140 break;
141
142 struct hash_entry *entry =
143 _mesa_hash_table_search(shader_var_remap, deref->var);
144 if (entry == NULL) {
145 nir_variable *nvar = nir_variable_clone(deref->var, b->shader);
146 nir_shader_add_variable(b->shader, nvar);
147 entry = _mesa_hash_table_insert(shader_var_remap,
148 deref->var, nvar);
149 }
150 deref->var = entry->data;
151 break;
152 }
153
154 case nir_instr_type_intrinsic: {
155 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
156 if (load->intrinsic != nir_intrinsic_load_param)
157 break;
158
159 unsigned param_idx = nir_intrinsic_param_idx(load);
160 assert(param_idx < impl->function->num_params);
161 nir_def_replace(&load->def, params[param_idx]);
162 break;
163 }
164
165 case nir_instr_type_jump:
166 /* Returns have to be lowered for this to work */
167 assert(nir_instr_as_jump(instr)->type != nir_jump_return);
168 break;
169
170 default:
171 break;
172 }
173 }
174 }
175
176 bool nest_if = function_ends_in_jump(copy);
177
178 /* Pluck the body out of the function and place it here */
179 nir_cf_list body;
180 nir_cf_list_extract(&body, ©->body);
181
182 if (nest_if) {
183 nir_if *cf = nir_push_if(b, nir_imm_true(b));
184 nir_cf_reinsert(&body, nir_after_cf_list(&cf->then_list));
185 nir_pop_if(b, cf);
186 } else {
187 /* Insert a nop at the cursor so we can keep track of where things are as
188 * we add/remove stuff from the CFG.
189 */
190 nir_intrinsic_instr *nop = nir_nop(b);
191 nir_cf_reinsert(&body, nir_before_instr(&nop->instr));
192 b->cursor = nir_instr_remove(&nop->instr);
193 }
194 }
195
196 static bool inline_function_impl(nir_function_impl *impl, struct set *inlined);
197
inline_functions_pass(nir_builder * b,nir_instr * instr,void * cb_data)198 static bool inline_functions_pass(nir_builder *b,
199 nir_instr *instr,
200 void *cb_data)
201 {
202 struct set *inlined = cb_data;
203 if (instr->type != nir_instr_type_call)
204 return false;
205
206 nir_call_instr *call = nir_instr_as_call(instr);
207 if (!call->callee->impl)
208 return false;
209
210 assert(!call->indirect_callee.ssa);
211
212 if (b->shader->options->driver_functions &&
213 b->shader->info.stage == MESA_SHADER_KERNEL) {
214 bool last_instr = (instr == nir_block_last_instr(instr->block));
215 if (!nir_function_can_inline(call->callee) && !last_instr) {
216 return false;
217 }
218 }
219
220 /* Make sure that the function we're calling is already inlined */
221 inline_function_impl(call->callee->impl, inlined);
222
223 b->cursor = nir_instr_remove(&call->instr);
224
225 /* Rewrite all of the uses of the callee's parameters to use the call
226 * instructions sources. In order to ensure that the "load" happens
227 * here and not later (for register sources), we make sure to convert it
228 * to an SSA value first.
229 */
230 const unsigned num_params = call->num_params;
231 NIR_VLA(nir_def *, params, num_params);
232 for (unsigned i = 0; i < num_params; i++) {
233 params[i] = call->params[i].ssa;
234 }
235
236 nir_inline_function_impl(b, call->callee->impl, params, NULL);
237 return true;
238 }
239
240 static bool
inline_function_impl(nir_function_impl * impl,struct set * inlined)241 inline_function_impl(nir_function_impl *impl, struct set *inlined)
242 {
243 if (_mesa_set_search(inlined, impl))
244 return false; /* Already inlined */
245
246 bool progress;
247 progress = nir_function_instructions_pass(impl, inline_functions_pass,
248 nir_metadata_none, inlined);
249 if (progress) {
250 /* Indices are completely messed up now */
251 nir_index_ssa_defs(impl);
252 }
253
254 _mesa_set_add(inlined, impl);
255
256 return progress;
257 }
258
259 /** A pass to inline all functions in a shader into their callers
260 *
261 * For most use-cases, function inlining is a multi-step process. The general
262 * pattern employed by SPIR-V consumers and others is as follows:
263 *
264 * 1. nir_lower_variable_initializers(shader, nir_var_function_temp)
265 *
266 * This is needed because local variables from the callee are simply added
267 * to the locals list for the caller and the information about where the
268 * constant initializer logically happens is lost. If the callee is
269 * called in a loop, this can cause the variable to go from being
270 * initialized once per loop iteration to being initialized once at the
271 * top of the caller and values to persist from one invocation of the
272 * callee to the next. The simple solution to this problem is to get rid
273 * of constant initializers before function inlining.
274 *
275 * 2. nir_lower_returns(shader)
276 *
277 * nir_inline_functions assumes that all functions end "naturally" by
278 * execution reaching the end of the function without any return
279 * instructions causing instant jumps to the end. Thanks to NIR being
280 * structured, we can't represent arbitrary jumps to various points in the
281 * program which is what an early return in the callee would have to turn
282 * into when we inline it into the caller. Instead, we require returns to
283 * be lowered which lets us just copy+paste the callee directly into the
284 * caller.
285 *
286 * 3. nir_inline_functions(shader)
287 *
288 * This does the actual function inlining and the resulting shader will
289 * contain no call instructions.
290 *
291 * 4. nir_opt_deref(shader)
292 *
293 * Most functions contain pointer parameters where the result of a deref
294 * instruction is passed in as a parameter, loaded via a load_param
295 * intrinsic, and then turned back into a deref via a cast. Function
296 * inlining will get rid of the load_param but we are still left with a
297 * cast. Running nir_opt_deref gets rid of the intermediate cast and
298 * results in a whole deref chain again. This is currently required by a
299 * number of optimizations and lowering passes at least for certain
300 * variable modes.
301 *
302 * 5. Loop over the functions and delete all but the main entrypoint.
303 *
304 * In the Intel Vulkan driver this looks like this:
305 *
306 * nir_remove_non_entrypoints(nir);
307 *
308 * While nir_inline_functions does get rid of all call instructions, it
309 * doesn't get rid of any functions because it doesn't know what the "root
310 * function" is. Instead, it's up to the individual driver to know how to
311 * decide on a root function and delete the rest. With SPIR-V,
312 * spirv_to_nir returns the root function and so we can just use == whereas
313 * with GL, you may have to look for a function named "main".
314 *
315 * 6. nir_lower_variable_initializers(shader, ~nir_var_function_temp)
316 *
317 * Lowering constant initializers on inputs, outputs, global variables,
318 * etc. requires that we know the main entrypoint so that we know where to
319 * initialize them. Otherwise, we would have to assume that anything
320 * could be a main entrypoint and initialize them at the start of every
321 * function but that would clearly be wrong if any of those functions were
322 * ever called within another function. Simply requiring a single-
323 * entrypoint function shader is the best way to make it well-defined.
324 */
325 bool
nir_inline_functions(nir_shader * shader)326 nir_inline_functions(nir_shader *shader)
327 {
328 struct set *inlined = _mesa_pointer_set_create(NULL);
329 bool progress = false;
330
331 nir_foreach_function_impl(impl, shader) {
332 progress = inline_function_impl(impl, inlined) || progress;
333 }
334
335 _mesa_set_destroy(inlined, NULL);
336
337 return progress;
338 }
339
340 struct lower_link_state {
341 struct hash_table *shader_var_remap;
342 const nir_shader *link_shader;
343 unsigned printf_index_offset;
344 };
345
346 static bool
lower_calls_vars_instr(struct nir_builder * b,nir_instr * instr,void * cb_data)347 lower_calls_vars_instr(struct nir_builder *b,
348 nir_instr *instr,
349 void *cb_data)
350 {
351 struct lower_link_state *state = cb_data;
352
353 switch (instr->type) {
354 case nir_instr_type_deref: {
355 nir_deref_instr *deref = nir_instr_as_deref(instr);
356 if (deref->deref_type != nir_deref_type_var)
357 return false;
358 if (deref->var->data.mode == nir_var_function_temp)
359 return false;
360
361 assert(state->shader_var_remap);
362 struct hash_entry *entry =
363 _mesa_hash_table_search(state->shader_var_remap, deref->var);
364 if (entry == NULL) {
365 nir_variable *nvar = nir_variable_clone(deref->var, b->shader);
366 nir_shader_add_variable(b->shader, nvar);
367 entry = _mesa_hash_table_insert(state->shader_var_remap,
368 deref->var, nvar);
369 }
370 deref->var = entry->data;
371 break;
372 }
373 case nir_instr_type_call: {
374 nir_call_instr *ncall = nir_instr_as_call(instr);
375 if (!ncall->callee->name)
376 return false;
377
378 nir_function *func = nir_shader_get_function_for_name(b->shader, ncall->callee->name);
379 if (func) {
380 ncall->callee = func;
381 break;
382 }
383
384 nir_function *new_func;
385 new_func = nir_shader_get_function_for_name(state->link_shader, ncall->callee->name);
386 if (new_func)
387 ncall->callee = nir_function_clone(b->shader, new_func);
388 break;
389 }
390 case nir_instr_type_intrinsic: {
391 /* Reindex the offset of the printf intrinsic by the number of already
392 * present printfs in the shader where functions are linked into.
393 */
394 if (state->printf_index_offset == 0)
395 return false;
396
397 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
398 if (intrin->intrinsic != nir_intrinsic_printf)
399 return false;
400
401 b->cursor = nir_before_instr(instr);
402 nir_src_rewrite(&intrin->src[0],
403 nir_iadd_imm(b, intrin->src[0].ssa,
404 state->printf_index_offset));
405 break;
406 }
407 default:
408 break;
409 }
410 return true;
411 }
412
413 static bool
lower_call_function_impl(struct nir_builder * b,nir_function * callee,const nir_function_impl * impl,struct lower_link_state * state)414 lower_call_function_impl(struct nir_builder *b,
415 nir_function *callee,
416 const nir_function_impl *impl,
417 struct lower_link_state *state)
418 {
419 nir_function_impl *copy = nir_function_impl_clone(b->shader, impl);
420 copy->function = callee;
421 callee->impl = copy;
422
423 return nir_function_instructions_pass(copy,
424 lower_calls_vars_instr,
425 nir_metadata_none,
426 state);
427 }
428
429 static bool
function_link_pass(struct nir_builder * b,nir_instr * instr,void * cb_data)430 function_link_pass(struct nir_builder *b,
431 nir_instr *instr,
432 void *cb_data)
433 {
434 struct lower_link_state *state = cb_data;
435
436 if (instr->type != nir_instr_type_call)
437 return false;
438
439 nir_call_instr *call = nir_instr_as_call(instr);
440 nir_function *func = NULL;
441
442 if (!call->callee->name)
443 return false;
444
445 if (call->callee->impl)
446 return false;
447
448 func = nir_shader_get_function_for_name(state->link_shader, call->callee->name);
449 if (!func || !func->impl) {
450 return false;
451 }
452 return lower_call_function_impl(b, call->callee,
453 func->impl,
454 state);
455 }
456
457 bool
nir_link_shader_functions(nir_shader * shader,const nir_shader * link_shader)458 nir_link_shader_functions(nir_shader *shader,
459 const nir_shader *link_shader)
460 {
461 void *ra_ctx = ralloc_context(NULL);
462 struct hash_table *copy_vars = _mesa_pointer_hash_table_create(ra_ctx);
463 bool progress = false, overall_progress = false;
464
465 struct lower_link_state state = {
466 .shader_var_remap = copy_vars,
467 .link_shader = link_shader,
468 .printf_index_offset = shader->printf_info_count,
469 };
470 /* do progress passes inside the pass */
471 do {
472 progress = false;
473 nir_foreach_function_impl(impl, shader) {
474 bool this_progress = nir_function_instructions_pass(impl,
475 function_link_pass,
476 nir_metadata_none,
477 &state);
478 if (this_progress)
479 nir_index_ssa_defs(impl);
480 progress |= this_progress;
481 }
482 overall_progress |= progress;
483 } while (progress);
484
485 if (overall_progress && link_shader->printf_info_count > 0) {
486 shader->printf_info = reralloc(shader, shader->printf_info,
487 u_printf_info,
488 shader->printf_info_count +
489 link_shader->printf_info_count);
490
491 for (unsigned i = 0; i < link_shader->printf_info_count; i++){
492 const u_printf_info *src_info = &link_shader->printf_info[i];
493 u_printf_info *dst_info = &shader->printf_info[shader->printf_info_count++];
494
495 dst_info->num_args = src_info->num_args;
496 dst_info->arg_sizes = ralloc_array(shader, unsigned, dst_info->num_args);
497 memcpy(dst_info->arg_sizes, src_info->arg_sizes,
498 sizeof(dst_info->arg_sizes[0]) * dst_info->num_args);
499
500 dst_info->string_size = src_info->string_size;
501 dst_info->strings = ralloc_memdup(shader, src_info->strings,
502 dst_info->string_size);
503 }
504 }
505
506 ralloc_free(ra_ctx);
507
508 return overall_progress;
509 }
510
511 static void
512 nir_mark_used_functions(struct nir_function *func, struct set *used_funcs);
513
mark_used_pass_cb(struct nir_builder * b,nir_instr * instr,void * data)514 static bool mark_used_pass_cb(struct nir_builder *b,
515 nir_instr *instr, void *data)
516 {
517 struct set *used_funcs = data;
518 if (instr->type != nir_instr_type_call)
519 return false;
520 nir_call_instr *call = nir_instr_as_call(instr);
521
522 _mesa_set_add(used_funcs, call->callee);
523
524 nir_mark_used_functions(call->callee, used_funcs);
525 return true;
526 }
527
528 static void
nir_mark_used_functions(struct nir_function * func,struct set * used_funcs)529 nir_mark_used_functions(struct nir_function *func, struct set *used_funcs)
530 {
531 if (func->impl) {
532 nir_function_instructions_pass(func->impl,
533 mark_used_pass_cb,
534 nir_metadata_none,
535 used_funcs);
536 }
537 }
538
539 void
nir_cleanup_functions(nir_shader * nir)540 nir_cleanup_functions(nir_shader *nir)
541 {
542 if (!nir->options->driver_functions) {
543 nir_remove_non_entrypoints(nir);
544 return;
545 }
546
547 struct set *used_funcs = _mesa_set_create(NULL, _mesa_hash_pointer,
548 _mesa_key_pointer_equal);
549 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
550 if (func->is_entrypoint) {
551 _mesa_set_add(used_funcs, func);
552 nir_mark_used_functions(func, used_funcs);
553 }
554 }
555 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
556 if (!_mesa_set_search(used_funcs, func))
557 exec_node_remove(&func->node);
558 }
559 _mesa_set_destroy(used_funcs, NULL);
560 }
561