1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vtn_private.h"
25 #include "spirv_info.h"
26 #include "nir/nir_vla.h"
27 #include "util/u_debug.h"
28
29 static unsigned
glsl_type_count_function_params(const struct glsl_type * type)30 glsl_type_count_function_params(const struct glsl_type *type)
31 {
32 if (glsl_type_is_vector_or_scalar(type)) {
33 return 1;
34 } else if (glsl_type_is_array_or_matrix(type)) {
35 return glsl_get_length(type) *
36 glsl_type_count_function_params(glsl_get_array_element(type));
37 } else {
38 assert(glsl_type_is_struct_or_ifc(type));
39 unsigned count = 0;
40 unsigned elems = glsl_get_length(type);
41 for (unsigned i = 0; i < elems; i++) {
42 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
43 count += glsl_type_count_function_params(elem_type);
44 }
45 return count;
46 }
47 }
48
49 static void
glsl_type_add_to_function_params(const struct glsl_type * type,nir_function * func,unsigned * param_idx)50 glsl_type_add_to_function_params(const struct glsl_type *type,
51 nir_function *func,
52 unsigned *param_idx)
53 {
54 if (glsl_type_is_vector_or_scalar(type)) {
55 func->params[(*param_idx)++] = (nir_parameter) {
56 .num_components = glsl_get_vector_elements(type),
57 .bit_size = glsl_get_bit_size(type),
58 };
59 } else if (glsl_type_is_array_or_matrix(type)) {
60 unsigned elems = glsl_get_length(type);
61 const struct glsl_type *elem_type = glsl_get_array_element(type);
62 for (unsigned i = 0; i < elems; i++)
63 glsl_type_add_to_function_params(elem_type,func, param_idx);
64 } else {
65 assert(glsl_type_is_struct_or_ifc(type));
66 unsigned elems = glsl_get_length(type);
67 for (unsigned i = 0; i < elems; i++) {
68 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
69 glsl_type_add_to_function_params(elem_type, func, param_idx);
70 }
71 }
72 }
73
74 static void
vtn_ssa_value_add_to_call_params(struct vtn_builder * b,struct vtn_ssa_value * value,nir_call_instr * call,unsigned * param_idx)75 vtn_ssa_value_add_to_call_params(struct vtn_builder *b,
76 struct vtn_ssa_value *value,
77 nir_call_instr *call,
78 unsigned *param_idx)
79 {
80 if (glsl_type_is_vector_or_scalar(value->type)) {
81 call->params[(*param_idx)++] = nir_src_for_ssa(value->def);
82 } else {
83 unsigned elems = glsl_get_length(value->type);
84 for (unsigned i = 0; i < elems; i++) {
85 vtn_ssa_value_add_to_call_params(b, value->elems[i],
86 call, param_idx);
87 }
88 }
89 }
90
91 static void
vtn_ssa_value_load_function_param(struct vtn_builder * b,struct vtn_ssa_value * value,unsigned * param_idx)92 vtn_ssa_value_load_function_param(struct vtn_builder *b,
93 struct vtn_ssa_value *value,
94 unsigned *param_idx)
95 {
96 if (glsl_type_is_vector_or_scalar(value->type)) {
97 value->def = nir_load_param(&b->nb, (*param_idx)++);
98 } else {
99 unsigned elems = glsl_get_length(value->type);
100 for (unsigned i = 0; i < elems; i++)
101 vtn_ssa_value_load_function_param(b, value->elems[i], param_idx);
102 }
103 }
104
105 void
vtn_handle_function_call(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)106 vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
107 const uint32_t *w, unsigned count)
108 {
109 struct vtn_function *vtn_callee =
110 vtn_value(b, w[3], vtn_value_type_function)->func;
111
112 vtn_callee->referenced = true;
113
114 nir_call_instr *call = nir_call_instr_create(b->nb.shader,
115 vtn_callee->nir_func);
116
117 unsigned param_idx = 0;
118
119 nir_deref_instr *ret_deref = NULL;
120 struct vtn_type *ret_type = vtn_callee->type->return_type;
121 if (ret_type->base_type != vtn_base_type_void) {
122 nir_variable *ret_tmp =
123 nir_local_variable_create(b->nb.impl,
124 glsl_get_bare_type(ret_type->type),
125 "return_tmp");
126 ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
127 call->params[param_idx++] = nir_src_for_ssa(&ret_deref->def);
128 }
129
130 for (unsigned i = 0; i < vtn_callee->type->length; i++) {
131 vtn_ssa_value_add_to_call_params(b, vtn_ssa_value(b, w[4 + i]),
132 call, ¶m_idx);
133 }
134 assert(param_idx == call->num_params);
135
136 nir_builder_instr_insert(&b->nb, &call->instr);
137
138 if (ret_type->base_type == vtn_base_type_void) {
139 vtn_push_value(b, w[2], vtn_value_type_undef);
140 } else {
141 vtn_push_ssa_value(b, w[2], vtn_local_load(b, ret_deref, 0));
142 }
143 }
144
145 static void
function_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_func)146 function_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
147 const struct vtn_decoration *dec, void *void_func)
148 {
149 struct vtn_function *func = void_func;
150
151 switch (dec->decoration) {
152 case SpvDecorationLinkageAttributes: {
153 unsigned name_words;
154 const char *name =
155 vtn_string_literal(b, dec->operands, dec->num_operands, &name_words);
156 vtn_fail_if(name_words >= dec->num_operands,
157 "Malformed LinkageAttributes decoration");
158 (void)name; /* TODO: What is this? */
159 func->linkage = dec->operands[name_words];
160 break;
161 }
162
163 default:
164 break;
165 }
166 }
167
168 bool
vtn_cfg_handle_prepass_instruction(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)169 vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
170 const uint32_t *w, unsigned count)
171 {
172 switch (opcode) {
173 case SpvOpFunction: {
174 vtn_assert(b->func == NULL);
175 b->func = vtn_zalloc(b, struct vtn_function);
176
177 list_inithead(&b->func->body);
178 b->func->linkage = SpvLinkageTypeMax;
179 b->func->control = w[3];
180 list_inithead(&b->func->constructs);
181
182 UNUSED const struct glsl_type *result_type = vtn_get_type(b, w[1])->type;
183 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
184 val->func = b->func;
185
186 vtn_foreach_decoration(b, val, function_decoration_cb, b->func);
187
188 b->func->type = vtn_get_type(b, w[4]);
189 const struct vtn_type *func_type = b->func->type;
190
191 vtn_assert(func_type->return_type->type == result_type);
192
193 nir_function *func =
194 nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
195
196 unsigned num_params = 0;
197 for (unsigned i = 0; i < func_type->length; i++)
198 num_params += glsl_type_count_function_params(func_type->params[i]->type);
199
200 /* Add one parameter for the function return value */
201 if (func_type->return_type->base_type != vtn_base_type_void)
202 num_params++;
203
204 func->should_inline = b->func->control & SpvFunctionControlInlineMask;
205 func->dont_inline = b->func->control & SpvFunctionControlDontInlineMask;
206 func->is_exported = b->func->linkage == SpvLinkageTypeExport;
207
208 func->num_params = num_params;
209 func->params = ralloc_array(b->shader, nir_parameter, num_params);
210
211 unsigned idx = 0;
212 if (func_type->return_type->base_type != vtn_base_type_void) {
213 nir_address_format addr_format =
214 vtn_mode_to_address_format(b, vtn_variable_mode_function);
215 /* The return value is a regular pointer */
216 func->params[idx++] = (nir_parameter) {
217 .num_components = nir_address_format_num_components(addr_format),
218 .bit_size = nir_address_format_bit_size(addr_format),
219 };
220 }
221
222 for (unsigned i = 0; i < func_type->length; i++)
223 glsl_type_add_to_function_params(func_type->params[i]->type, func, &idx);
224 assert(idx == num_params);
225
226 b->func->nir_func = func;
227
228 /* Set up a nir_function_impl and the builder so we can load arguments
229 * directly in our OpFunctionParameter handler.
230 */
231 nir_function_impl *impl = nir_function_impl_create(func);
232 b->nb = nir_builder_at(nir_before_impl(impl));
233 b->nb.exact = b->exact;
234
235 b->func_param_idx = 0;
236
237 /* The return value is the first parameter */
238 if (func_type->return_type->base_type != vtn_base_type_void)
239 b->func_param_idx++;
240 break;
241 }
242
243 case SpvOpFunctionEnd:
244 b->func->end = w;
245 if (b->func->start_block == NULL) {
246 vtn_fail_if(b->func->linkage != SpvLinkageTypeImport,
247 "A function declaration (an OpFunction with no basic "
248 "blocks), must have a Linkage Attributes Decoration "
249 "with the Import Linkage Type.");
250
251 /* In this case, the function didn't have any actual blocks. It's
252 * just a prototype so delete the function_impl.
253 */
254 b->func->nir_func->impl = NULL;
255 } else {
256 vtn_fail_if(b->func->linkage == SpvLinkageTypeImport,
257 "A function definition (an OpFunction with basic blocks) "
258 "cannot be decorated with the Import Linkage Type.");
259 }
260 b->func = NULL;
261 break;
262
263 case SpvOpFunctionParameter: {
264 vtn_assert(b->func_param_idx < b->func->nir_func->num_params);
265 struct vtn_type *type = vtn_get_type(b, w[1]);
266 struct vtn_ssa_value *value = vtn_create_ssa_value(b, type->type);
267 vtn_ssa_value_load_function_param(b, value, &b->func_param_idx);
268 vtn_push_ssa_value(b, w[2], value);
269 break;
270 }
271
272 case SpvOpLabel: {
273 vtn_assert(b->block == NULL);
274 b->block = vtn_zalloc(b, struct vtn_block);
275 b->block->label = w;
276 vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
277
278 b->func->block_count++;
279
280 if (b->func->start_block == NULL) {
281 /* This is the first block encountered for this function. In this
282 * case, we set the start block and add it to the list of
283 * implemented functions that we'll walk later.
284 */
285 b->func->start_block = b->block;
286 list_addtail(&b->func->link, &b->functions);
287 }
288 break;
289 }
290
291 case SpvOpSelectionMerge:
292 case SpvOpLoopMerge:
293 vtn_assert(b->block && b->block->merge == NULL);
294 b->block->merge = w;
295 break;
296
297 case SpvOpBranch:
298 case SpvOpBranchConditional:
299 case SpvOpSwitch:
300 case SpvOpKill:
301 case SpvOpTerminateInvocation:
302 case SpvOpIgnoreIntersectionKHR:
303 case SpvOpTerminateRayKHR:
304 case SpvOpEmitMeshTasksEXT:
305 case SpvOpReturn:
306 case SpvOpReturnValue:
307 case SpvOpUnreachable:
308 if (b->wa_ignore_return_after_emit_mesh_tasks &&
309 opcode == SpvOpReturn && !b->block) {
310 /* At this point block was already reset by
311 * SpvOpEmitMeshTasksEXT. */
312 break;
313 }
314 vtn_assert(b->block && b->block->branch == NULL);
315 b->block->branch = w;
316 b->block = NULL;
317 break;
318
319 default:
320 /* Continue on as per normal */
321 return true;
322 }
323
324 return true;
325 }
326
327 /* returns the default block */
328 void
vtn_parse_switch(struct vtn_builder * b,const uint32_t * branch,struct list_head * case_list)329 vtn_parse_switch(struct vtn_builder *b,
330 const uint32_t *branch,
331 struct list_head *case_list)
332 {
333 const uint32_t *branch_end = branch + (branch[0] >> SpvWordCountShift);
334
335 struct vtn_value *sel_val = vtn_untyped_value(b, branch[1]);
336 vtn_fail_if(!sel_val->type ||
337 sel_val->type->base_type != vtn_base_type_scalar,
338 "Selector of OpSwitch must have a type of OpTypeInt");
339
340 nir_alu_type sel_type =
341 nir_get_nir_type_for_glsl_type(sel_val->type->type);
342 vtn_fail_if(nir_alu_type_get_base_type(sel_type) != nir_type_int &&
343 nir_alu_type_get_base_type(sel_type) != nir_type_uint,
344 "Selector of OpSwitch must have a type of OpTypeInt");
345
346 struct hash_table *block_to_case = _mesa_pointer_hash_table_create(b);
347
348 bool is_default = true;
349 const unsigned bitsize = nir_alu_type_get_type_size(sel_type);
350 for (const uint32_t *w = branch + 2; w < branch_end;) {
351 uint64_t literal = 0;
352 if (!is_default) {
353 if (bitsize <= 32) {
354 literal = *(w++);
355 } else {
356 assert(bitsize == 64);
357 literal = vtn_u64_literal(w);
358 w += 2;
359 }
360 }
361 struct vtn_block *case_block = vtn_block(b, *(w++));
362
363 struct hash_entry *case_entry =
364 _mesa_hash_table_search(block_to_case, case_block);
365
366 struct vtn_case *cse;
367 if (case_entry) {
368 cse = case_entry->data;
369 } else {
370 cse = vtn_zalloc(b, struct vtn_case);
371 cse->block = case_block;
372 cse->block->switch_case = cse;
373 util_dynarray_init(&cse->values, b);
374
375 list_addtail(&cse->link, case_list);
376 _mesa_hash_table_insert(block_to_case, case_block, cse);
377 }
378
379 if (is_default) {
380 cse->is_default = true;
381 } else {
382 util_dynarray_append(&cse->values, uint64_t, literal);
383 }
384
385 is_default = false;
386 }
387
388 _mesa_hash_table_destroy(block_to_case, NULL);
389 }
390
391 void
vtn_build_cfg(struct vtn_builder * b,const uint32_t * words,const uint32_t * end)392 vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
393 {
394 vtn_foreach_instruction(b, words, end,
395 vtn_cfg_handle_prepass_instruction);
396
397 if (b->shader->info.stage == MESA_SHADER_KERNEL)
398 return;
399
400 vtn_build_structured_cfg(b, words, end);
401 }
402
403 bool
vtn_handle_phis_first_pass(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)404 vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
405 const uint32_t *w, unsigned count)
406 {
407 if (opcode == SpvOpLabel)
408 return true; /* Nothing to do */
409
410 /* If this isn't a phi node, stop. */
411 if (opcode != SpvOpPhi)
412 return false;
413
414 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
415 * For each phi, we create a variable with the appropreate type and
416 * do a load from that variable. Then, in a second pass, we add
417 * stores to that variable to each of the predecessor blocks.
418 *
419 * We could do something more intelligent here. However, in order to
420 * handle loops and things properly, we really need dominance
421 * information. It would end up basically being the into-SSA
422 * algorithm all over again. It's easier if we just let
423 * lower_vars_to_ssa do that for us instead of repeating it here.
424 */
425 struct vtn_type *type = vtn_get_type(b, w[1]);
426 nir_variable *phi_var =
427 nir_local_variable_create(b->nb.impl, type->type, "phi");
428
429 struct vtn_value *phi_val = vtn_untyped_value(b, w[2]);
430 if (vtn_value_is_relaxed_precision(b, phi_val))
431 phi_var->data.precision = GLSL_PRECISION_MEDIUM;
432
433 _mesa_hash_table_insert(b->phi_table, w, phi_var);
434
435 vtn_push_ssa_value(b, w[2],
436 vtn_local_load(b, nir_build_deref_var(&b->nb, phi_var), 0));
437
438 return true;
439 }
440
441 static bool
vtn_handle_phi_second_pass(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)442 vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
443 const uint32_t *w, unsigned count)
444 {
445 if (opcode != SpvOpPhi)
446 return true;
447
448 struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w);
449
450 /* It's possible that this phi is in an unreachable block in which case it
451 * may never have been emitted and therefore may not be in the hash table.
452 * In this case, there's no var for it and it's safe to just bail.
453 */
454 if (phi_entry == NULL)
455 return true;
456
457 nir_variable *phi_var = phi_entry->data;
458
459 for (unsigned i = 3; i < count; i += 2) {
460 struct vtn_block *pred = vtn_block(b, w[i + 1]);
461
462 /* If block does not have end_nop, that is because it is an unreacheable
463 * block, and hence it is not worth to handle it */
464 if (!pred->end_nop)
465 continue;
466
467 b->nb.cursor = nir_after_instr(&pred->end_nop->instr);
468
469 struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
470
471 vtn_local_store(b, src, nir_build_deref_var(&b->nb, phi_var), 0);
472 }
473
474 return true;
475 }
476
477 void
vtn_emit_ret_store(struct vtn_builder * b,const struct vtn_block * block)478 vtn_emit_ret_store(struct vtn_builder *b, const struct vtn_block *block)
479 {
480 if ((*block->branch & SpvOpCodeMask) != SpvOpReturnValue)
481 return;
482
483 vtn_fail_if(b->func->type->return_type->base_type == vtn_base_type_void,
484 "Return with a value from a function returning void");
485 struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
486 const struct glsl_type *ret_type =
487 glsl_get_bare_type(b->func->type->return_type->type);
488 nir_deref_instr *ret_deref =
489 nir_build_deref_cast(&b->nb, nir_load_param(&b->nb, 0),
490 nir_var_function_temp, ret_type, 0);
491 vtn_local_store(b, src, ret_deref, 0);
492 }
493
494 static struct nir_block *
vtn_new_unstructured_block(struct vtn_builder * b,struct vtn_function * func)495 vtn_new_unstructured_block(struct vtn_builder *b, struct vtn_function *func)
496 {
497 struct nir_block *n = nir_block_create(b->shader);
498 exec_list_push_tail(&func->nir_func->impl->body, &n->cf_node.node);
499 n->cf_node.parent = &func->nir_func->impl->cf_node;
500 return n;
501 }
502
503 static void
vtn_add_unstructured_block(struct vtn_builder * b,struct vtn_function * func,struct list_head * work_list,struct vtn_block * block)504 vtn_add_unstructured_block(struct vtn_builder *b,
505 struct vtn_function *func,
506 struct list_head *work_list,
507 struct vtn_block *block)
508 {
509 if (!block->block) {
510 block->block = vtn_new_unstructured_block(b, func);
511 list_addtail(&block->link, work_list);
512 }
513 }
514
515 static void
vtn_emit_cf_func_unstructured(struct vtn_builder * b,struct vtn_function * func,vtn_instruction_handler handler)516 vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
517 vtn_instruction_handler handler)
518 {
519 struct list_head work_list;
520 list_inithead(&work_list);
521
522 func->start_block->block = nir_start_block(func->nir_func->impl);
523 list_addtail(&func->start_block->link, &work_list);
524 while (!list_is_empty(&work_list)) {
525 struct vtn_block *block =
526 list_first_entry(&work_list, struct vtn_block, link);
527 list_del(&block->link);
528
529 vtn_assert(block->block);
530
531 const uint32_t *block_start = block->label;
532 const uint32_t *block_end = block->branch;
533
534 b->nb.cursor = nir_after_block(block->block);
535 block_start = vtn_foreach_instruction(b, block_start, block_end,
536 vtn_handle_phis_first_pass);
537 vtn_foreach_instruction(b, block_start, block_end, handler);
538 block->end_nop = nir_nop(&b->nb);
539
540 SpvOp op = *block_end & SpvOpCodeMask;
541 switch (op) {
542 case SpvOpBranch: {
543 struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
544 vtn_add_unstructured_block(b, func, &work_list, branch_block);
545 nir_goto(&b->nb, branch_block->block);
546 break;
547 }
548
549 case SpvOpBranchConditional: {
550 nir_def *cond = vtn_ssa_value(b, block->branch[1])->def;
551 struct vtn_block *then_block = vtn_block(b, block->branch[2]);
552 struct vtn_block *else_block = vtn_block(b, block->branch[3]);
553
554 vtn_add_unstructured_block(b, func, &work_list, then_block);
555 if (then_block == else_block) {
556 nir_goto(&b->nb, then_block->block);
557 } else {
558 vtn_add_unstructured_block(b, func, &work_list, else_block);
559 nir_goto_if(&b->nb, then_block->block, nir_src_for_ssa(cond),
560 else_block->block);
561 }
562
563 break;
564 }
565
566 case SpvOpSwitch: {
567 struct list_head cases;
568 list_inithead(&cases);
569 vtn_parse_switch(b, block->branch, &cases);
570
571 nir_def *sel = vtn_get_nir_ssa(b, block->branch[1]);
572
573 struct vtn_case *def = NULL;
574 vtn_foreach_case(cse, &cases) {
575 if (cse->is_default) {
576 assert(def == NULL);
577 def = cse;
578 continue;
579 }
580
581 nir_def *cond = nir_imm_false(&b->nb);
582 util_dynarray_foreach(&cse->values, uint64_t, val)
583 cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
584
585 /* block for the next check */
586 nir_block *e = vtn_new_unstructured_block(b, func);
587 vtn_add_unstructured_block(b, func, &work_list, cse->block);
588
589 /* add branching */
590 nir_goto_if(&b->nb, cse->block->block, nir_src_for_ssa(cond), e);
591 b->nb.cursor = nir_after_block(e);
592 }
593
594 vtn_assert(def != NULL);
595 vtn_add_unstructured_block(b, func, &work_list, def->block);
596
597 /* now that all cases are handled, branch into the default block */
598 nir_goto(&b->nb, def->block->block);
599 break;
600 }
601
602 case SpvOpKill: {
603 nir_discard(&b->nb);
604 nir_goto(&b->nb, b->func->nir_func->impl->end_block);
605 break;
606 }
607
608 case SpvOpUnreachable:
609 case SpvOpReturn:
610 case SpvOpReturnValue: {
611 vtn_emit_ret_store(b, block);
612 nir_goto(&b->nb, b->func->nir_func->impl->end_block);
613 break;
614 }
615
616 default:
617 vtn_fail("Unhandled opcode %s", spirv_op_to_string(op));
618 }
619 }
620 }
621
622 void
vtn_function_emit(struct vtn_builder * b,struct vtn_function * func,vtn_instruction_handler instruction_handler)623 vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
624 vtn_instruction_handler instruction_handler)
625 {
626 static int force_unstructured = -1;
627 if (force_unstructured < 0) {
628 force_unstructured =
629 debug_get_bool_option("MESA_SPIRV_FORCE_UNSTRUCTURED", false);
630 }
631
632 nir_function_impl *impl = func->nir_func->impl;
633 b->nb = nir_builder_at(nir_after_impl(impl));
634 b->func = func;
635 b->nb.exact = b->exact;
636 b->phi_table = _mesa_pointer_hash_table_create(b);
637
638 if (b->shader->info.stage == MESA_SHADER_KERNEL || force_unstructured) {
639 impl->structured = false;
640 vtn_emit_cf_func_unstructured(b, func, instruction_handler);
641 } else {
642 vtn_emit_cf_func_structured(b, func, instruction_handler);
643 }
644
645 vtn_foreach_instruction(b, func->start_block->label, func->end,
646 vtn_handle_phi_second_pass);
647
648 if (func->nir_func->impl->structured)
649 nir_copy_prop_impl(impl);
650 nir_rematerialize_derefs_in_use_blocks_impl(impl);
651
652 /*
653 * There are some cases where we need to repair SSA to insert
654 * the needed phi nodes:
655 *
656 * - Early termination instructions `OpKill` and `OpTerminateInvocation`,
657 * in NIR. They're represented by regular intrinsics with no control-flow
658 * semantics. This means that the SSA form from the SPIR-V may not
659 * 100% match NIR.
660 *
661 * - Switches with only default case may also define SSA which may
662 * subsequently be used out of the switch.
663 */
664 if (func->nir_func->impl->structured)
665 nir_repair_ssa_impl(impl);
666
667 func->emitted = true;
668 }
669