1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vtn_private.h"
25 #include "spirv_info.h"
26 #include "nir/nir_vla.h"
27 #include "util/debug.h"
28
29 static struct vtn_block *
vtn_block(struct vtn_builder * b,uint32_t value_id)30 vtn_block(struct vtn_builder *b, uint32_t value_id)
31 {
32 return vtn_value(b, value_id, vtn_value_type_block)->block;
33 }
34
35 static unsigned
glsl_type_count_function_params(const struct glsl_type * type)36 glsl_type_count_function_params(const struct glsl_type *type)
37 {
38 if (glsl_type_is_vector_or_scalar(type)) {
39 return 1;
40 } else if (glsl_type_is_array_or_matrix(type)) {
41 return glsl_get_length(type) *
42 glsl_type_count_function_params(glsl_get_array_element(type));
43 } else {
44 assert(glsl_type_is_struct_or_ifc(type));
45 unsigned count = 0;
46 unsigned elems = glsl_get_length(type);
47 for (unsigned i = 0; i < elems; i++) {
48 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
49 count += glsl_type_count_function_params(elem_type);
50 }
51 return count;
52 }
53 }
54
55 static void
glsl_type_add_to_function_params(const struct glsl_type * type,nir_function * func,unsigned * param_idx)56 glsl_type_add_to_function_params(const struct glsl_type *type,
57 nir_function *func,
58 unsigned *param_idx)
59 {
60 if (glsl_type_is_vector_or_scalar(type)) {
61 func->params[(*param_idx)++] = (nir_parameter) {
62 .num_components = glsl_get_vector_elements(type),
63 .bit_size = glsl_get_bit_size(type),
64 };
65 } else if (glsl_type_is_array_or_matrix(type)) {
66 unsigned elems = glsl_get_length(type);
67 const struct glsl_type *elem_type = glsl_get_array_element(type);
68 for (unsigned i = 0; i < elems; i++)
69 glsl_type_add_to_function_params(elem_type,func, param_idx);
70 } else {
71 assert(glsl_type_is_struct_or_ifc(type));
72 unsigned elems = glsl_get_length(type);
73 for (unsigned i = 0; i < elems; i++) {
74 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
75 glsl_type_add_to_function_params(elem_type, func, param_idx);
76 }
77 }
78 }
79
80 static void
vtn_ssa_value_add_to_call_params(struct vtn_builder * b,struct vtn_ssa_value * value,nir_call_instr * call,unsigned * param_idx)81 vtn_ssa_value_add_to_call_params(struct vtn_builder *b,
82 struct vtn_ssa_value *value,
83 nir_call_instr *call,
84 unsigned *param_idx)
85 {
86 if (glsl_type_is_vector_or_scalar(value->type)) {
87 call->params[(*param_idx)++] = nir_src_for_ssa(value->def);
88 } else {
89 unsigned elems = glsl_get_length(value->type);
90 for (unsigned i = 0; i < elems; i++) {
91 vtn_ssa_value_add_to_call_params(b, value->elems[i],
92 call, param_idx);
93 }
94 }
95 }
96
97 static void
vtn_ssa_value_load_function_param(struct vtn_builder * b,struct vtn_ssa_value * value,unsigned * param_idx)98 vtn_ssa_value_load_function_param(struct vtn_builder *b,
99 struct vtn_ssa_value *value,
100 unsigned *param_idx)
101 {
102 if (glsl_type_is_vector_or_scalar(value->type)) {
103 value->def = nir_load_param(&b->nb, (*param_idx)++);
104 } else {
105 unsigned elems = glsl_get_length(value->type);
106 for (unsigned i = 0; i < elems; i++)
107 vtn_ssa_value_load_function_param(b, value->elems[i], param_idx);
108 }
109 }
110
111 void
vtn_handle_function_call(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)112 vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
113 const uint32_t *w, unsigned count)
114 {
115 struct vtn_function *vtn_callee =
116 vtn_value(b, w[3], vtn_value_type_function)->func;
117
118 vtn_callee->referenced = true;
119
120 nir_call_instr *call = nir_call_instr_create(b->nb.shader,
121 vtn_callee->nir_func);
122
123 unsigned param_idx = 0;
124
125 nir_deref_instr *ret_deref = NULL;
126 struct vtn_type *ret_type = vtn_callee->type->return_type;
127 if (ret_type->base_type != vtn_base_type_void) {
128 nir_variable *ret_tmp =
129 nir_local_variable_create(b->nb.impl,
130 glsl_get_bare_type(ret_type->type),
131 "return_tmp");
132 ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
133 call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
134 }
135
136 for (unsigned i = 0; i < vtn_callee->type->length; i++) {
137 vtn_ssa_value_add_to_call_params(b, vtn_ssa_value(b, w[4 + i]),
138 call, ¶m_idx);
139 }
140 assert(param_idx == call->num_params);
141
142 nir_builder_instr_insert(&b->nb, &call->instr);
143
144 if (ret_type->base_type == vtn_base_type_void) {
145 vtn_push_value(b, w[2], vtn_value_type_undef);
146 } else {
147 vtn_push_ssa_value(b, w[2], vtn_local_load(b, ret_deref, 0));
148 }
149 }
150
151 static void
function_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_func)152 function_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
153 const struct vtn_decoration *dec, void *void_func)
154 {
155 struct vtn_function *func = void_func;
156
157 switch (dec->decoration) {
158 case SpvDecorationLinkageAttributes: {
159 unsigned name_words;
160 const char *name =
161 vtn_string_literal(b, dec->operands, dec->num_operands, &name_words);
162 vtn_fail_if(name_words >= dec->num_operands,
163 "Malformed LinkageAttributes decoration");
164 (void)name; /* TODO: What is this? */
165 func->linkage = dec->operands[name_words];
166 break;
167 }
168
169 default:
170 break;
171 }
172 }
173
174 static bool
vtn_cfg_handle_prepass_instruction(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)175 vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
176 const uint32_t *w, unsigned count)
177 {
178 switch (opcode) {
179 case SpvOpFunction: {
180 vtn_assert(b->func == NULL);
181 b->func = rzalloc(b, struct vtn_function);
182
183 b->func->node.type = vtn_cf_node_type_function;
184 b->func->node.parent = NULL;
185 list_inithead(&b->func->body);
186 b->func->linkage = SpvLinkageTypeMax;
187 b->func->control = w[3];
188
189 UNUSED const struct glsl_type *result_type = vtn_get_type(b, w[1])->type;
190 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
191 val->func = b->func;
192
193 vtn_foreach_decoration(b, val, function_decoration_cb, b->func);
194
195 b->func->type = vtn_get_type(b, w[4]);
196 const struct vtn_type *func_type = b->func->type;
197
198 vtn_assert(func_type->return_type->type == result_type);
199
200 nir_function *func =
201 nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
202
203 unsigned num_params = 0;
204 for (unsigned i = 0; i < func_type->length; i++)
205 num_params += glsl_type_count_function_params(func_type->params[i]->type);
206
207 /* Add one parameter for the function return value */
208 if (func_type->return_type->base_type != vtn_base_type_void)
209 num_params++;
210
211 func->num_params = num_params;
212 func->params = ralloc_array(b->shader, nir_parameter, num_params);
213
214 unsigned idx = 0;
215 if (func_type->return_type->base_type != vtn_base_type_void) {
216 nir_address_format addr_format =
217 vtn_mode_to_address_format(b, vtn_variable_mode_function);
218 /* The return value is a regular pointer */
219 func->params[idx++] = (nir_parameter) {
220 .num_components = nir_address_format_num_components(addr_format),
221 .bit_size = nir_address_format_bit_size(addr_format),
222 };
223 }
224
225 for (unsigned i = 0; i < func_type->length; i++)
226 glsl_type_add_to_function_params(func_type->params[i]->type, func, &idx);
227 assert(idx == num_params);
228
229 b->func->nir_func = func;
230
231 /* Set up a nir_function_impl and the builder so we can load arguments
232 * directly in our OpFunctionParameter handler.
233 */
234 nir_function_impl *impl = nir_function_impl_create(func);
235 nir_builder_init(&b->nb, impl);
236 b->nb.cursor = nir_before_cf_list(&impl->body);
237 b->nb.exact = b->exact;
238
239 b->func_param_idx = 0;
240
241 /* The return value is the first parameter */
242 if (func_type->return_type->base_type != vtn_base_type_void)
243 b->func_param_idx++;
244 break;
245 }
246
247 case SpvOpFunctionEnd:
248 b->func->end = w;
249 if (b->func->start_block == NULL) {
250 vtn_fail_if(b->func->linkage != SpvLinkageTypeImport,
251 "A function declaration (an OpFunction with no basic "
252 "blocks), must have a Linkage Attributes Decoration "
253 "with the Import Linkage Type.");
254
255 /* In this case, the function didn't have any actual blocks. It's
256 * just a prototype so delete the function_impl.
257 */
258 b->func->nir_func->impl = NULL;
259 } else {
260 vtn_fail_if(b->func->linkage == SpvLinkageTypeImport,
261 "A function definition (an OpFunction with basic blocks) "
262 "cannot be decorated with the Import Linkage Type.");
263 }
264 b->func = NULL;
265 break;
266
267 case SpvOpFunctionParameter: {
268 vtn_assert(b->func_param_idx < b->func->nir_func->num_params);
269 struct vtn_type *type = vtn_get_type(b, w[1]);
270 struct vtn_ssa_value *value = vtn_create_ssa_value(b, type->type);
271 vtn_ssa_value_load_function_param(b, value, &b->func_param_idx);
272 vtn_push_ssa_value(b, w[2], value);
273 break;
274 }
275
276 case SpvOpLabel: {
277 vtn_assert(b->block == NULL);
278 b->block = rzalloc(b, struct vtn_block);
279 b->block->node.type = vtn_cf_node_type_block;
280 b->block->label = w;
281 vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
282
283 if (b->func->start_block == NULL) {
284 /* This is the first block encountered for this function. In this
285 * case, we set the start block and add it to the list of
286 * implemented functions that we'll walk later.
287 */
288 b->func->start_block = b->block;
289 list_addtail(&b->func->node.link, &b->functions);
290 }
291 break;
292 }
293
294 case SpvOpSelectionMerge:
295 case SpvOpLoopMerge:
296 vtn_assert(b->block && b->block->merge == NULL);
297 b->block->merge = w;
298 break;
299
300 case SpvOpBranch:
301 case SpvOpBranchConditional:
302 case SpvOpSwitch:
303 case SpvOpKill:
304 case SpvOpTerminateInvocation:
305 case SpvOpIgnoreIntersectionKHR:
306 case SpvOpTerminateRayKHR:
307 case SpvOpReturn:
308 case SpvOpReturnValue:
309 case SpvOpUnreachable:
310 vtn_assert(b->block && b->block->branch == NULL);
311 b->block->branch = w;
312 b->block = NULL;
313 break;
314
315 default:
316 /* Continue on as per normal */
317 return true;
318 }
319
320 return true;
321 }
322
323 /* This function performs a depth-first search of the cases and puts them
324 * in fall-through order.
325 */
326 static void
vtn_order_case(struct vtn_switch * swtch,struct vtn_case * cse)327 vtn_order_case(struct vtn_switch *swtch, struct vtn_case *cse)
328 {
329 if (cse->visited)
330 return;
331
332 cse->visited = true;
333
334 list_del(&cse->node.link);
335
336 if (cse->fallthrough) {
337 vtn_order_case(swtch, cse->fallthrough);
338
339 /* If we have a fall-through, place this case right before the case it
340 * falls through to. This ensures that fallthroughs come one after
341 * the other. These two can never get separated because that would
342 * imply something else falling through to the same case. Also, this
343 * can't break ordering because the DFS ensures that this case is
344 * visited before anything that falls through to it.
345 */
346 list_addtail(&cse->node.link, &cse->fallthrough->node.link);
347 } else {
348 list_add(&cse->node.link, &swtch->cases);
349 }
350 }
351
352 static void
vtn_switch_order_cases(struct vtn_switch * swtch)353 vtn_switch_order_cases(struct vtn_switch *swtch)
354 {
355 struct list_head cases;
356 list_replace(&swtch->cases, &cases);
357 list_inithead(&swtch->cases);
358 while (!list_is_empty(&cases)) {
359 struct vtn_case *cse =
360 list_first_entry(&cases, struct vtn_case, node.link);
361 vtn_order_case(swtch, cse);
362 }
363 }
364
365 static void
vtn_block_set_merge_cf_node(struct vtn_builder * b,struct vtn_block * block,struct vtn_cf_node * cf_node)366 vtn_block_set_merge_cf_node(struct vtn_builder *b, struct vtn_block *block,
367 struct vtn_cf_node *cf_node)
368 {
369 vtn_fail_if(block->merge_cf_node != NULL,
370 "The merge block declared by a header block cannot be a "
371 "merge block declared by any other header block.");
372
373 block->merge_cf_node = cf_node;
374 }
375
376 #define VTN_DECL_CF_NODE_FIND(_type) \
377 static inline struct vtn_##_type * \
378 vtn_cf_node_find_##_type(struct vtn_cf_node *node) \
379 { \
380 while (node && node->type != vtn_cf_node_type_##_type) \
381 node = node->parent; \
382 return (struct vtn_##_type *)node; \
383 }
384
385 VTN_DECL_CF_NODE_FIND(if)
VTN_DECL_CF_NODE_FIND(loop)386 VTN_DECL_CF_NODE_FIND(loop)
387 VTN_DECL_CF_NODE_FIND(case)
388 VTN_DECL_CF_NODE_FIND(switch)
389 VTN_DECL_CF_NODE_FIND(function)
390
391 static enum vtn_branch_type
392 vtn_handle_branch(struct vtn_builder *b,
393 struct vtn_cf_node *cf_parent,
394 struct vtn_block *target_block)
395 {
396 struct vtn_loop *loop = vtn_cf_node_find_loop(cf_parent);
397
398 /* Detect a loop back-edge first. That way none of the code below
399 * accidentally operates on a loop back-edge.
400 */
401 if (loop && target_block == loop->header_block)
402 return vtn_branch_type_loop_back_edge;
403
404 /* Try to detect fall-through */
405 if (target_block->switch_case) {
406 /* When it comes to handling switch cases, we can break calls to
407 * vtn_handle_branch into two cases: calls from within a case construct
408 * and calls for the jump to each case construct. In the second case,
409 * cf_parent is the vtn_switch itself and vtn_cf_node_find_case() will
410 * return the outer switch case in which this switch is contained. It's
411 * fine if the target block is a switch case from an outer switch as
412 * long as it is also the switch break for this switch.
413 */
414 struct vtn_case *switch_case = vtn_cf_node_find_case(cf_parent);
415
416 /* This doesn't get called for the OpSwitch */
417 vtn_fail_if(switch_case == NULL,
418 "A switch case can only be entered through an OpSwitch or "
419 "falling through from another switch case.");
420
421 /* Because block->switch_case is only set on the entry block for a given
422 * switch case, we only ever get here if we're jumping to the start of a
423 * switch case. It's possible, however, that a switch case could jump
424 * to itself via a back-edge. That *should* get caught by the loop
425 * handling case above but if we have a back edge without a loop merge,
426 * we could en up here.
427 */
428 vtn_fail_if(target_block->switch_case == switch_case,
429 "A switch cannot fall-through to itself. Likely, there is "
430 "a back-edge which is not to a loop header.");
431
432 vtn_fail_if(target_block->switch_case->node.parent !=
433 switch_case->node.parent,
434 "A switch case fall-through must come from the same "
435 "OpSwitch construct");
436
437 vtn_fail_if(switch_case->fallthrough != NULL &&
438 switch_case->fallthrough != target_block->switch_case,
439 "Each case construct can have at most one branch to "
440 "another case construct");
441
442 switch_case->fallthrough = target_block->switch_case;
443
444 /* We don't immediately return vtn_branch_type_switch_fallthrough
445 * because it may also be a loop or switch break for an inner loop or
446 * switch and that takes precedence.
447 */
448 }
449
450 if (loop && target_block == loop->cont_block)
451 return vtn_branch_type_loop_continue;
452
453 /* We walk blocks as a breadth-first search on the control-flow construct
454 * tree where, when we find a construct, we add the vtn_cf_node for that
455 * construct and continue iterating at the merge target block (if any).
456 * Therefore, we want merges whose with parent == cf_parent to be treated
457 * as regular branches. We only want to consider merges if they break out
458 * of the current CF construct.
459 */
460 if (target_block->merge_cf_node != NULL &&
461 target_block->merge_cf_node->parent != cf_parent) {
462 switch (target_block->merge_cf_node->type) {
463 case vtn_cf_node_type_if:
464 for (struct vtn_cf_node *node = cf_parent;
465 node != target_block->merge_cf_node; node = node->parent) {
466 vtn_fail_if(node == NULL || node->type != vtn_cf_node_type_if,
467 "Branching to the merge block of a selection "
468 "construct can only be used to break out of a "
469 "selection construct");
470
471 struct vtn_if *if_stmt = vtn_cf_node_as_if(node);
472
473 /* This should be guaranteed by our iteration */
474 assert(if_stmt->merge_block != target_block);
475
476 vtn_fail_if(if_stmt->merge_block != NULL,
477 "Branching to the merge block of a selection "
478 "construct can only be used to break out of the "
479 "inner most nested selection level");
480 }
481 return vtn_branch_type_if_merge;
482
483 case vtn_cf_node_type_loop:
484 vtn_fail_if(target_block->merge_cf_node != &loop->node,
485 "Loop breaks can only break out of the inner most "
486 "nested loop level");
487 return vtn_branch_type_loop_break;
488
489 case vtn_cf_node_type_switch: {
490 struct vtn_switch *swtch = vtn_cf_node_find_switch(cf_parent);
491 vtn_fail_if(target_block->merge_cf_node != &swtch->node,
492 "Switch breaks can only break out of the inner most "
493 "nested switch level");
494 return vtn_branch_type_switch_break;
495 }
496
497 default:
498 unreachable("Invalid CF node type for a merge");
499 }
500 }
501
502 if (target_block->switch_case)
503 return vtn_branch_type_switch_fallthrough;
504
505 return vtn_branch_type_none;
506 }
507
508 struct vtn_cfg_work_item {
509 struct list_head link;
510
511 struct vtn_cf_node *cf_parent;
512 struct list_head *cf_list;
513 struct vtn_block *start_block;
514 };
515
516 static void
vtn_add_cfg_work_item(struct vtn_builder * b,struct list_head * work_list,struct vtn_cf_node * cf_parent,struct list_head * cf_list,struct vtn_block * start_block)517 vtn_add_cfg_work_item(struct vtn_builder *b,
518 struct list_head *work_list,
519 struct vtn_cf_node *cf_parent,
520 struct list_head *cf_list,
521 struct vtn_block *start_block)
522 {
523 struct vtn_cfg_work_item *work = ralloc(b, struct vtn_cfg_work_item);
524 work->cf_parent = cf_parent;
525 work->cf_list = cf_list;
526 work->start_block = start_block;
527 list_addtail(&work->link, work_list);
528 }
529
530 /* returns the default block */
531 static void
vtn_parse_switch(struct vtn_builder * b,struct vtn_switch * swtch,const uint32_t * branch,struct list_head * case_list)532 vtn_parse_switch(struct vtn_builder *b,
533 struct vtn_switch *swtch,
534 const uint32_t *branch,
535 struct list_head *case_list)
536 {
537 const uint32_t *branch_end = branch + (branch[0] >> SpvWordCountShift);
538
539 struct vtn_value *sel_val = vtn_untyped_value(b, branch[1]);
540 vtn_fail_if(!sel_val->type ||
541 sel_val->type->base_type != vtn_base_type_scalar,
542 "Selector of OpSwitch must have a type of OpTypeInt");
543
544 nir_alu_type sel_type =
545 nir_get_nir_type_for_glsl_type(sel_val->type->type);
546 vtn_fail_if(nir_alu_type_get_base_type(sel_type) != nir_type_int &&
547 nir_alu_type_get_base_type(sel_type) != nir_type_uint,
548 "Selector of OpSwitch must have a type of OpTypeInt");
549
550 struct hash_table *block_to_case = _mesa_pointer_hash_table_create(b);
551
552 bool is_default = true;
553 const unsigned bitsize = nir_alu_type_get_type_size(sel_type);
554 for (const uint32_t *w = branch + 2; w < branch_end;) {
555 uint64_t literal = 0;
556 if (!is_default) {
557 if (bitsize <= 32) {
558 literal = *(w++);
559 } else {
560 assert(bitsize == 64);
561 literal = vtn_u64_literal(w);
562 w += 2;
563 }
564 }
565 struct vtn_block *case_block = vtn_block(b, *(w++));
566
567 struct hash_entry *case_entry =
568 _mesa_hash_table_search(block_to_case, case_block);
569
570 struct vtn_case *cse;
571 if (case_entry) {
572 cse = case_entry->data;
573 } else {
574 cse = rzalloc(b, struct vtn_case);
575
576 cse->node.type = vtn_cf_node_type_case;
577 cse->node.parent = swtch ? &swtch->node : NULL;
578 cse->block = case_block;
579 list_inithead(&cse->body);
580 util_dynarray_init(&cse->values, b);
581
582 list_addtail(&cse->node.link, case_list);
583 _mesa_hash_table_insert(block_to_case, case_block, cse);
584 }
585
586 if (is_default) {
587 cse->is_default = true;
588 } else {
589 util_dynarray_append(&cse->values, uint64_t, literal);
590 }
591
592 is_default = false;
593 }
594
595 _mesa_hash_table_destroy(block_to_case, NULL);
596 }
597
598 /* Processes a block and returns the next block to process or NULL if we've
599 * reached the end of the construct.
600 */
601 static struct vtn_block *
vtn_process_block(struct vtn_builder * b,struct list_head * work_list,struct vtn_cf_node * cf_parent,struct list_head * cf_list,struct vtn_block * block)602 vtn_process_block(struct vtn_builder *b,
603 struct list_head *work_list,
604 struct vtn_cf_node *cf_parent,
605 struct list_head *cf_list,
606 struct vtn_block *block)
607 {
608 if (!list_is_empty(cf_list)) {
609 /* vtn_process_block() acts like an iterator: it processes the given
610 * block and then returns the next block to process. For a given
611 * control-flow construct, vtn_build_cfg() calls vtn_process_block()
612 * repeatedly until it finally returns NULL. Therefore, we know that
613 * the only blocks on which vtn_process_block() can be called are either
614 * the first block in a construct or a block that vtn_process_block()
615 * returned for the current construct. If cf_list is empty then we know
616 * that we're processing the first block in the construct and we have to
617 * add it to the list.
618 *
619 * If cf_list is not empty, then it must be the block returned by the
620 * previous call to vtn_process_block(). We know a priori that
621 * vtn_process_block only returns either normal branches
622 * (vtn_branch_type_none) or merge target blocks.
623 */
624 switch (vtn_handle_branch(b, cf_parent, block)) {
625 case vtn_branch_type_none:
626 /* For normal branches, we want to process them and add them to the
627 * current construct. Merge target blocks also look like normal
628 * branches from the perspective of this construct. See also
629 * vtn_handle_branch().
630 */
631 break;
632
633 case vtn_branch_type_loop_continue:
634 case vtn_branch_type_switch_fallthrough:
635 /* The two cases where we can get early exits from a construct that
636 * are not to that construct's merge target are loop continues and
637 * switch fall-throughs. In these cases, we need to break out of the
638 * current construct by returning NULL.
639 */
640 return NULL;
641
642 default:
643 /* The only way we can get here is if something was used as two kinds
644 * of merges at the same time and that's illegal.
645 */
646 vtn_fail("A block was used as a merge target from two or more "
647 "structured control-flow constructs");
648 }
649 }
650
651 /* Once a block has been processed, it is placed into and the list link
652 * will point to something non-null. If we see a node we've already
653 * processed here, it either exists in multiple functions or it's an
654 * invalid back-edge.
655 */
656 if (block->node.parent != NULL) {
657 vtn_fail_if(vtn_cf_node_find_function(&block->node) !=
658 vtn_cf_node_find_function(cf_parent),
659 "A block cannot exist in two functions at the "
660 "same time");
661
662 vtn_fail("Invalid back or cross-edge in the CFG");
663 }
664
665 if (block->merge && (*block->merge & SpvOpCodeMask) == SpvOpLoopMerge &&
666 block->loop == NULL) {
667 vtn_fail_if((*block->branch & SpvOpCodeMask) != SpvOpBranch &&
668 (*block->branch & SpvOpCodeMask) != SpvOpBranchConditional,
669 "An OpLoopMerge instruction must immediately precede "
670 "either an OpBranch or OpBranchConditional instruction.");
671
672 struct vtn_loop *loop = rzalloc(b, struct vtn_loop);
673
674 loop->node.type = vtn_cf_node_type_loop;
675 loop->node.parent = cf_parent;
676 list_inithead(&loop->body);
677 list_inithead(&loop->cont_body);
678 loop->header_block = block;
679 loop->break_block = vtn_block(b, block->merge[1]);
680 loop->cont_block = vtn_block(b, block->merge[2]);
681 loop->control = block->merge[3];
682
683 list_addtail(&loop->node.link, cf_list);
684 block->loop = loop;
685
686 /* Note: The work item for the main loop body will start with the
687 * current block as its start block. If we weren't careful, we would
688 * get here again and end up in an infinite loop. This is why we set
689 * block->loop above and check for it before creating one. This way,
690 * we only create the loop once and the second iteration that tries to
691 * handle this loop goes to the cases below and gets handled as a
692 * regular block.
693 */
694 vtn_add_cfg_work_item(b, work_list, &loop->node,
695 &loop->body, loop->header_block);
696
697 /* For continue targets, SPIR-V guarantees the following:
698 *
699 * - the Continue Target must dominate the back-edge block
700 * - the back-edge block must post dominate the Continue Target
701 *
702 * If the header block is the same as the continue target, this
703 * condition is trivially satisfied and there is no real continue
704 * section.
705 */
706 if (loop->cont_block != loop->header_block) {
707 vtn_add_cfg_work_item(b, work_list, &loop->node,
708 &loop->cont_body, loop->cont_block);
709 }
710
711 vtn_block_set_merge_cf_node(b, loop->break_block, &loop->node);
712
713 return loop->break_block;
714 }
715
716 /* Add the block to the CF list */
717 block->node.parent = cf_parent;
718 list_addtail(&block->node.link, cf_list);
719
720 switch (*block->branch & SpvOpCodeMask) {
721 case SpvOpBranch: {
722 struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
723
724 block->branch_type = vtn_handle_branch(b, cf_parent, branch_block);
725
726 if (block->branch_type == vtn_branch_type_none)
727 return branch_block;
728 else
729 return NULL;
730 }
731
732 case SpvOpReturn:
733 case SpvOpReturnValue:
734 block->branch_type = vtn_branch_type_return;
735 return NULL;
736
737 case SpvOpKill:
738 block->branch_type = vtn_branch_type_discard;
739 return NULL;
740
741 case SpvOpTerminateInvocation:
742 block->branch_type = vtn_branch_type_terminate_invocation;
743 return NULL;
744
745 case SpvOpIgnoreIntersectionKHR:
746 block->branch_type = vtn_branch_type_ignore_intersection;
747 return NULL;
748
749 case SpvOpTerminateRayKHR:
750 block->branch_type = vtn_branch_type_terminate_ray;
751 return NULL;
752
753 case SpvOpBranchConditional: {
754 struct vtn_value *cond_val = vtn_untyped_value(b, block->branch[1]);
755 vtn_fail_if(!cond_val->type ||
756 cond_val->type->base_type != vtn_base_type_scalar ||
757 cond_val->type->type != glsl_bool_type(),
758 "Condition must be a Boolean type scalar");
759
760 struct vtn_if *if_stmt = rzalloc(b, struct vtn_if);
761
762 if_stmt->node.type = vtn_cf_node_type_if;
763 if_stmt->node.parent = cf_parent;
764 if_stmt->header_block = block;
765 list_inithead(&if_stmt->then_body);
766 list_inithead(&if_stmt->else_body);
767
768 list_addtail(&if_stmt->node.link, cf_list);
769
770 if (block->merge &&
771 (*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge) {
772 /* We may not always have a merge block and that merge doesn't
773 * technically have to be an OpSelectionMerge. We could have a block
774 * with an OpLoopMerge which ends in an OpBranchConditional.
775 */
776 if_stmt->merge_block = vtn_block(b, block->merge[1]);
777 vtn_block_set_merge_cf_node(b, if_stmt->merge_block, &if_stmt->node);
778
779 if_stmt->control = block->merge[2];
780 }
781
782 struct vtn_block *then_block = vtn_block(b, block->branch[2]);
783 if_stmt->then_type = vtn_handle_branch(b, &if_stmt->node, then_block);
784 if (if_stmt->then_type == vtn_branch_type_none) {
785 vtn_add_cfg_work_item(b, work_list, &if_stmt->node,
786 &if_stmt->then_body, then_block);
787 }
788
789 struct vtn_block *else_block = vtn_block(b, block->branch[3]);
790 if (then_block != else_block) {
791 if_stmt->else_type = vtn_handle_branch(b, &if_stmt->node, else_block);
792 if (if_stmt->else_type == vtn_branch_type_none) {
793 vtn_add_cfg_work_item(b, work_list, &if_stmt->node,
794 &if_stmt->else_body, else_block);
795 }
796 }
797
798 return if_stmt->merge_block;
799 }
800
801 case SpvOpSwitch: {
802 struct vtn_switch *swtch = rzalloc(b, struct vtn_switch);
803
804 swtch->node.type = vtn_cf_node_type_switch;
805 swtch->node.parent = cf_parent;
806 swtch->selector = block->branch[1];
807 list_inithead(&swtch->cases);
808
809 list_addtail(&swtch->node.link, cf_list);
810
811 /* We may not always have a merge block */
812 if (block->merge) {
813 vtn_fail_if((*block->merge & SpvOpCodeMask) != SpvOpSelectionMerge,
814 "An OpLoopMerge instruction must immediately precede "
815 "either an OpBranch or OpBranchConditional "
816 "instruction.");
817 swtch->break_block = vtn_block(b, block->merge[1]);
818 vtn_block_set_merge_cf_node(b, swtch->break_block, &swtch->node);
819 }
820
821 /* First, we go through and record all of the cases. */
822 vtn_parse_switch(b, swtch, block->branch, &swtch->cases);
823
824 /* Gather the branch types for the switch */
825 vtn_foreach_cf_node(case_node, &swtch->cases) {
826 struct vtn_case *cse = vtn_cf_node_as_case(case_node);
827
828 cse->type = vtn_handle_branch(b, &swtch->node, cse->block);
829 switch (cse->type) {
830 case vtn_branch_type_none:
831 /* This is a "real" cases which has stuff in it */
832 vtn_fail_if(cse->block->switch_case != NULL,
833 "OpSwitch has a case which is also in another "
834 "OpSwitch construct");
835 cse->block->switch_case = cse;
836 vtn_add_cfg_work_item(b, work_list, &cse->node,
837 &cse->body, cse->block);
838 break;
839
840 case vtn_branch_type_switch_break:
841 case vtn_branch_type_loop_break:
842 case vtn_branch_type_loop_continue:
843 /* Switch breaks as well as loop breaks and continues can be
844 * used to break out of a switch construct or as direct targets
845 * of the OpSwitch.
846 */
847 break;
848
849 default:
850 vtn_fail("Target of OpSwitch is not a valid structured exit "
851 "from the switch construct.");
852 }
853 }
854
855 return swtch->break_block;
856 }
857
858 case SpvOpUnreachable:
859 return NULL;
860
861 default:
862 vtn_fail("Block did not end with a valid branch instruction");
863 }
864 }
865
866 void
vtn_build_cfg(struct vtn_builder * b,const uint32_t * words,const uint32_t * end)867 vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
868 {
869 vtn_foreach_instruction(b, words, end,
870 vtn_cfg_handle_prepass_instruction);
871
872 if (b->shader->info.stage == MESA_SHADER_KERNEL)
873 return;
874
875 vtn_foreach_cf_node(func_node, &b->functions) {
876 struct vtn_function *func = vtn_cf_node_as_function(func_node);
877
878 /* We build the CFG for each function by doing a breadth-first search on
879 * the control-flow graph. We keep track of our state using a worklist.
880 * Doing a BFS ensures that we visit each structured control-flow
881 * construct and its merge node before we visit the stuff inside the
882 * construct.
883 */
884 struct list_head work_list;
885 list_inithead(&work_list);
886 vtn_add_cfg_work_item(b, &work_list, &func->node, &func->body,
887 func->start_block);
888
889 while (!list_is_empty(&work_list)) {
890 struct vtn_cfg_work_item *work =
891 list_first_entry(&work_list, struct vtn_cfg_work_item, link);
892 list_del(&work->link);
893
894 for (struct vtn_block *block = work->start_block; block; ) {
895 block = vtn_process_block(b, &work_list, work->cf_parent,
896 work->cf_list, block);
897 }
898 }
899 }
900 }
901
902 static bool
vtn_handle_phis_first_pass(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)903 vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
904 const uint32_t *w, unsigned count)
905 {
906 if (opcode == SpvOpLabel)
907 return true; /* Nothing to do */
908
909 /* If this isn't a phi node, stop. */
910 if (opcode != SpvOpPhi)
911 return false;
912
913 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
914 * For each phi, we create a variable with the appropreate type and
915 * do a load from that variable. Then, in a second pass, we add
916 * stores to that variable to each of the predecessor blocks.
917 *
918 * We could do something more intelligent here. However, in order to
919 * handle loops and things properly, we really need dominance
920 * information. It would end up basically being the into-SSA
921 * algorithm all over again. It's easier if we just let
922 * lower_vars_to_ssa do that for us instead of repeating it here.
923 */
924 struct vtn_type *type = vtn_get_type(b, w[1]);
925 nir_variable *phi_var =
926 nir_local_variable_create(b->nb.impl, type->type, "phi");
927
928 struct vtn_value *phi_val = vtn_untyped_value(b, w[2]);
929 if (vtn_value_is_relaxed_precision(b, phi_val))
930 phi_var->data.precision = GLSL_PRECISION_MEDIUM;
931
932 _mesa_hash_table_insert(b->phi_table, w, phi_var);
933
934 vtn_push_ssa_value(b, w[2],
935 vtn_local_load(b, nir_build_deref_var(&b->nb, phi_var), 0));
936
937 return true;
938 }
939
940 static bool
vtn_handle_phi_second_pass(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)941 vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
942 const uint32_t *w, unsigned count)
943 {
944 if (opcode != SpvOpPhi)
945 return true;
946
947 struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w);
948
949 /* It's possible that this phi is in an unreachable block in which case it
950 * may never have been emitted and therefore may not be in the hash table.
951 * In this case, there's no var for it and it's safe to just bail.
952 */
953 if (phi_entry == NULL)
954 return true;
955
956 nir_variable *phi_var = phi_entry->data;
957
958 for (unsigned i = 3; i < count; i += 2) {
959 struct vtn_block *pred = vtn_block(b, w[i + 1]);
960
961 /* If block does not have end_nop, that is because it is an unreacheable
962 * block, and hence it is not worth to handle it */
963 if (!pred->end_nop)
964 continue;
965
966 b->nb.cursor = nir_after_instr(&pred->end_nop->instr);
967
968 struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
969
970 vtn_local_store(b, src, nir_build_deref_var(&b->nb, phi_var), 0);
971 }
972
973 return true;
974 }
975
976 static void
vtn_emit_branch(struct vtn_builder * b,enum vtn_branch_type branch_type,nir_variable * switch_fall_var,bool * has_switch_break)977 vtn_emit_branch(struct vtn_builder *b, enum vtn_branch_type branch_type,
978 nir_variable *switch_fall_var, bool *has_switch_break)
979 {
980 switch (branch_type) {
981 case vtn_branch_type_if_merge:
982 break; /* Nothing to do */
983 case vtn_branch_type_switch_break:
984 nir_store_var(&b->nb, switch_fall_var, nir_imm_false(&b->nb), 1);
985 *has_switch_break = true;
986 break;
987 case vtn_branch_type_switch_fallthrough:
988 break; /* Nothing to do */
989 case vtn_branch_type_loop_break:
990 nir_jump(&b->nb, nir_jump_break);
991 break;
992 case vtn_branch_type_loop_continue:
993 nir_jump(&b->nb, nir_jump_continue);
994 break;
995 case vtn_branch_type_loop_back_edge:
996 break;
997 case vtn_branch_type_return:
998 nir_jump(&b->nb, nir_jump_return);
999 break;
1000 case vtn_branch_type_discard:
1001 if (b->convert_discard_to_demote)
1002 nir_demote(&b->nb);
1003 else
1004 nir_discard(&b->nb);
1005 break;
1006 case vtn_branch_type_terminate_invocation:
1007 nir_terminate(&b->nb);
1008 break;
1009 case vtn_branch_type_ignore_intersection:
1010 nir_ignore_ray_intersection(&b->nb);
1011 nir_jump(&b->nb, nir_jump_halt);
1012 break;
1013 case vtn_branch_type_terminate_ray:
1014 nir_terminate_ray(&b->nb);
1015 nir_jump(&b->nb, nir_jump_halt);
1016 break;
1017 default:
1018 vtn_fail("Invalid branch type");
1019 }
1020 }
1021
1022 static nir_ssa_def *
vtn_switch_case_condition(struct vtn_builder * b,struct vtn_switch * swtch,nir_ssa_def * sel,struct vtn_case * cse)1023 vtn_switch_case_condition(struct vtn_builder *b, struct vtn_switch *swtch,
1024 nir_ssa_def *sel, struct vtn_case *cse)
1025 {
1026 if (cse->is_default) {
1027 nir_ssa_def *any = nir_imm_false(&b->nb);
1028 vtn_foreach_cf_node(other_node, &swtch->cases) {
1029 struct vtn_case *other = vtn_cf_node_as_case(other_node);
1030 if (other->is_default)
1031 continue;
1032
1033 any = nir_ior(&b->nb, any,
1034 vtn_switch_case_condition(b, swtch, sel, other));
1035 }
1036 return nir_inot(&b->nb, any);
1037 } else {
1038 nir_ssa_def *cond = nir_imm_false(&b->nb);
1039 util_dynarray_foreach(&cse->values, uint64_t, val)
1040 cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
1041 return cond;
1042 }
1043 }
1044
1045 static nir_loop_control
vtn_loop_control(struct vtn_builder * b,struct vtn_loop * vtn_loop)1046 vtn_loop_control(struct vtn_builder *b, struct vtn_loop *vtn_loop)
1047 {
1048 if (vtn_loop->control == SpvLoopControlMaskNone)
1049 return nir_loop_control_none;
1050 else if (vtn_loop->control & SpvLoopControlDontUnrollMask)
1051 return nir_loop_control_dont_unroll;
1052 else if (vtn_loop->control & SpvLoopControlUnrollMask)
1053 return nir_loop_control_unroll;
1054 else if (vtn_loop->control & SpvLoopControlDependencyInfiniteMask ||
1055 vtn_loop->control & SpvLoopControlDependencyLengthMask ||
1056 vtn_loop->control & SpvLoopControlMinIterationsMask ||
1057 vtn_loop->control & SpvLoopControlMaxIterationsMask ||
1058 vtn_loop->control & SpvLoopControlIterationMultipleMask ||
1059 vtn_loop->control & SpvLoopControlPeelCountMask ||
1060 vtn_loop->control & SpvLoopControlPartialCountMask) {
1061 /* We do not do anything special with these yet. */
1062 return nir_loop_control_none;
1063 } else {
1064 vtn_fail("Invalid loop control");
1065 }
1066 }
1067
1068 static nir_selection_control
vtn_selection_control(struct vtn_builder * b,struct vtn_if * vtn_if)1069 vtn_selection_control(struct vtn_builder *b, struct vtn_if *vtn_if)
1070 {
1071 if (vtn_if->control == SpvSelectionControlMaskNone)
1072 return nir_selection_control_none;
1073 else if (vtn_if->control & SpvSelectionControlDontFlattenMask)
1074 return nir_selection_control_dont_flatten;
1075 else if (vtn_if->control & SpvSelectionControlFlattenMask)
1076 return nir_selection_control_flatten;
1077 else
1078 vtn_fail("Invalid selection control");
1079 }
1080
1081 static void
vtn_emit_ret_store(struct vtn_builder * b,struct vtn_block * block)1082 vtn_emit_ret_store(struct vtn_builder *b, struct vtn_block *block)
1083 {
1084 if ((*block->branch & SpvOpCodeMask) != SpvOpReturnValue)
1085 return;
1086
1087 vtn_fail_if(b->func->type->return_type->base_type == vtn_base_type_void,
1088 "Return with a value from a function returning void");
1089 struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
1090 const struct glsl_type *ret_type =
1091 glsl_get_bare_type(b->func->type->return_type->type);
1092 nir_deref_instr *ret_deref =
1093 nir_build_deref_cast(&b->nb, nir_load_param(&b->nb, 0),
1094 nir_var_function_temp, ret_type, 0);
1095 vtn_local_store(b, src, ret_deref, 0);
1096 }
1097
1098 static void
vtn_emit_cf_list_structured(struct vtn_builder * b,struct list_head * cf_list,nir_variable * switch_fall_var,bool * has_switch_break,vtn_instruction_handler handler)1099 vtn_emit_cf_list_structured(struct vtn_builder *b, struct list_head *cf_list,
1100 nir_variable *switch_fall_var,
1101 bool *has_switch_break,
1102 vtn_instruction_handler handler)
1103 {
1104 vtn_foreach_cf_node(node, cf_list) {
1105 switch (node->type) {
1106 case vtn_cf_node_type_block: {
1107 struct vtn_block *block = vtn_cf_node_as_block(node);
1108
1109 const uint32_t *block_start = block->label;
1110 const uint32_t *block_end = block->merge ? block->merge :
1111 block->branch;
1112
1113 block_start = vtn_foreach_instruction(b, block_start, block_end,
1114 vtn_handle_phis_first_pass);
1115
1116 vtn_foreach_instruction(b, block_start, block_end, handler);
1117
1118 block->end_nop = nir_nop(&b->nb);
1119
1120 vtn_emit_ret_store(b, block);
1121
1122 if (block->branch_type != vtn_branch_type_none) {
1123 vtn_emit_branch(b, block->branch_type,
1124 switch_fall_var, has_switch_break);
1125 return;
1126 }
1127
1128 break;
1129 }
1130
1131 case vtn_cf_node_type_if: {
1132 struct vtn_if *vtn_if = vtn_cf_node_as_if(node);
1133 const uint32_t *branch = vtn_if->header_block->branch;
1134 vtn_assert((branch[0] & SpvOpCodeMask) == SpvOpBranchConditional);
1135
1136 bool sw_break = false;
1137 /* If both branches are the same, just emit the first block, which is
1138 * the only one we filled when building the CFG.
1139 */
1140 if (branch[2] == branch[3]) {
1141 if (vtn_if->then_type == vtn_branch_type_none) {
1142 vtn_emit_cf_list_structured(b, &vtn_if->then_body,
1143 switch_fall_var, &sw_break, handler);
1144 } else {
1145 vtn_emit_branch(b, vtn_if->then_type, switch_fall_var, &sw_break);
1146 }
1147 break;
1148 }
1149
1150 nir_if *nif =
1151 nir_push_if(&b->nb, vtn_get_nir_ssa(b, branch[1]));
1152
1153 nif->control = vtn_selection_control(b, vtn_if);
1154
1155 if (vtn_if->then_type == vtn_branch_type_none) {
1156 vtn_emit_cf_list_structured(b, &vtn_if->then_body,
1157 switch_fall_var, &sw_break, handler);
1158 } else {
1159 vtn_emit_branch(b, vtn_if->then_type, switch_fall_var, &sw_break);
1160 }
1161
1162 nir_push_else(&b->nb, nif);
1163 if (vtn_if->else_type == vtn_branch_type_none) {
1164 vtn_emit_cf_list_structured(b, &vtn_if->else_body,
1165 switch_fall_var, &sw_break, handler);
1166 } else {
1167 vtn_emit_branch(b, vtn_if->else_type, switch_fall_var, &sw_break);
1168 }
1169
1170 nir_pop_if(&b->nb, nif);
1171
1172 /* If we encountered a switch break somewhere inside of the if,
1173 * then it would have been handled correctly by calling
1174 * emit_cf_list or emit_branch for the interrior. However, we
1175 * need to predicate everything following on wether or not we're
1176 * still going.
1177 */
1178 if (sw_break) {
1179 *has_switch_break = true;
1180 nir_push_if(&b->nb, nir_load_var(&b->nb, switch_fall_var));
1181 }
1182 break;
1183 }
1184
1185 case vtn_cf_node_type_loop: {
1186 struct vtn_loop *vtn_loop = vtn_cf_node_as_loop(node);
1187
1188 nir_loop *loop = nir_push_loop(&b->nb);
1189 loop->control = vtn_loop_control(b, vtn_loop);
1190
1191 vtn_emit_cf_list_structured(b, &vtn_loop->body, NULL, NULL, handler);
1192
1193 if (!list_is_empty(&vtn_loop->cont_body)) {
1194 /* If we have a non-trivial continue body then we need to put
1195 * it at the beginning of the loop with a flag to ensure that
1196 * it doesn't get executed in the first iteration.
1197 */
1198 nir_variable *do_cont =
1199 nir_local_variable_create(b->nb.impl, glsl_bool_type(), "cont");
1200
1201 b->nb.cursor = nir_before_cf_node(&loop->cf_node);
1202 nir_store_var(&b->nb, do_cont, nir_imm_false(&b->nb), 1);
1203
1204 b->nb.cursor = nir_before_cf_list(&loop->body);
1205
1206 nir_if *cont_if =
1207 nir_push_if(&b->nb, nir_load_var(&b->nb, do_cont));
1208
1209 vtn_emit_cf_list_structured(b, &vtn_loop->cont_body, NULL, NULL,
1210 handler);
1211
1212 nir_pop_if(&b->nb, cont_if);
1213
1214 nir_store_var(&b->nb, do_cont, nir_imm_true(&b->nb), 1);
1215 }
1216
1217 nir_pop_loop(&b->nb, loop);
1218 break;
1219 }
1220
1221 case vtn_cf_node_type_switch: {
1222 struct vtn_switch *vtn_switch = vtn_cf_node_as_switch(node);
1223
1224 /* Before we can emit anything, we need to sort the list of cases in
1225 * fall-through order.
1226 */
1227 vtn_switch_order_cases(vtn_switch);
1228
1229 /* First, we create a variable to keep track of whether or not the
1230 * switch is still going at any given point. Any switch breaks
1231 * will set this variable to false.
1232 */
1233 nir_variable *fall_var =
1234 nir_local_variable_create(b->nb.impl, glsl_bool_type(), "fall");
1235 nir_store_var(&b->nb, fall_var, nir_imm_false(&b->nb), 1);
1236
1237 nir_ssa_def *sel = vtn_get_nir_ssa(b, vtn_switch->selector);
1238
1239 /* Now we can walk the list of cases and actually emit code */
1240 vtn_foreach_cf_node(case_node, &vtn_switch->cases) {
1241 struct vtn_case *cse = vtn_cf_node_as_case(case_node);
1242
1243 /* If this case jumps directly to the break block, we don't have
1244 * to handle the case as the body is empty and doesn't fall
1245 * through.
1246 */
1247 if (cse->block == vtn_switch->break_block)
1248 continue;
1249
1250 /* Figure out the condition */
1251 nir_ssa_def *cond =
1252 vtn_switch_case_condition(b, vtn_switch, sel, cse);
1253 /* Take fallthrough into account */
1254 cond = nir_ior(&b->nb, cond, nir_load_var(&b->nb, fall_var));
1255
1256 nir_if *case_if = nir_push_if(&b->nb, cond);
1257
1258 bool has_break = false;
1259 nir_store_var(&b->nb, fall_var, nir_imm_true(&b->nb), 1);
1260 vtn_emit_cf_list_structured(b, &cse->body, fall_var, &has_break,
1261 handler);
1262 (void)has_break; /* We don't care */
1263
1264 nir_pop_if(&b->nb, case_if);
1265 }
1266
1267 break;
1268 }
1269
1270 default:
1271 vtn_fail("Invalid CF node type");
1272 }
1273 }
1274 }
1275
1276 static struct nir_block *
vtn_new_unstructured_block(struct vtn_builder * b,struct vtn_function * func)1277 vtn_new_unstructured_block(struct vtn_builder *b, struct vtn_function *func)
1278 {
1279 struct nir_block *n = nir_block_create(b->shader);
1280 exec_list_push_tail(&func->nir_func->impl->body, &n->cf_node.node);
1281 n->cf_node.parent = &func->nir_func->impl->cf_node;
1282 return n;
1283 }
1284
1285 static void
vtn_add_unstructured_block(struct vtn_builder * b,struct vtn_function * func,struct list_head * work_list,struct vtn_block * block)1286 vtn_add_unstructured_block(struct vtn_builder *b,
1287 struct vtn_function *func,
1288 struct list_head *work_list,
1289 struct vtn_block *block)
1290 {
1291 if (!block->block) {
1292 block->block = vtn_new_unstructured_block(b, func);
1293 list_addtail(&block->node.link, work_list);
1294 }
1295 }
1296
1297 static void
vtn_emit_cf_func_unstructured(struct vtn_builder * b,struct vtn_function * func,vtn_instruction_handler handler)1298 vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
1299 vtn_instruction_handler handler)
1300 {
1301 struct list_head work_list;
1302 list_inithead(&work_list);
1303
1304 func->start_block->block = nir_start_block(func->nir_func->impl);
1305 list_addtail(&func->start_block->node.link, &work_list);
1306 while (!list_is_empty(&work_list)) {
1307 struct vtn_block *block =
1308 list_first_entry(&work_list, struct vtn_block, node.link);
1309 list_del(&block->node.link);
1310
1311 vtn_assert(block->block);
1312
1313 const uint32_t *block_start = block->label;
1314 const uint32_t *block_end = block->branch;
1315
1316 b->nb.cursor = nir_after_block(block->block);
1317 block_start = vtn_foreach_instruction(b, block_start, block_end,
1318 vtn_handle_phis_first_pass);
1319 vtn_foreach_instruction(b, block_start, block_end, handler);
1320 block->end_nop = nir_nop(&b->nb);
1321
1322 SpvOp op = *block_end & SpvOpCodeMask;
1323 switch (op) {
1324 case SpvOpBranch: {
1325 struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
1326 vtn_add_unstructured_block(b, func, &work_list, branch_block);
1327 nir_goto(&b->nb, branch_block->block);
1328 break;
1329 }
1330
1331 case SpvOpBranchConditional: {
1332 nir_ssa_def *cond = vtn_ssa_value(b, block->branch[1])->def;
1333 struct vtn_block *then_block = vtn_block(b, block->branch[2]);
1334 struct vtn_block *else_block = vtn_block(b, block->branch[3]);
1335
1336 vtn_add_unstructured_block(b, func, &work_list, then_block);
1337 if (then_block == else_block) {
1338 nir_goto(&b->nb, then_block->block);
1339 } else {
1340 vtn_add_unstructured_block(b, func, &work_list, else_block);
1341 nir_goto_if(&b->nb, then_block->block, nir_src_for_ssa(cond),
1342 else_block->block);
1343 }
1344
1345 break;
1346 }
1347
1348 case SpvOpSwitch: {
1349 struct list_head cases;
1350 list_inithead(&cases);
1351 vtn_parse_switch(b, NULL, block->branch, &cases);
1352
1353 nir_ssa_def *sel = vtn_get_nir_ssa(b, block->branch[1]);
1354
1355 struct vtn_case *def = NULL;
1356 vtn_foreach_cf_node(case_node, &cases) {
1357 struct vtn_case *cse = vtn_cf_node_as_case(case_node);
1358 if (cse->is_default) {
1359 assert(def == NULL);
1360 def = cse;
1361 continue;
1362 }
1363
1364 nir_ssa_def *cond = nir_imm_false(&b->nb);
1365 util_dynarray_foreach(&cse->values, uint64_t, val)
1366 cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
1367
1368 /* block for the next check */
1369 nir_block *e = vtn_new_unstructured_block(b, func);
1370 vtn_add_unstructured_block(b, func, &work_list, cse->block);
1371
1372 /* add branching */
1373 nir_goto_if(&b->nb, cse->block->block, nir_src_for_ssa(cond), e);
1374 b->nb.cursor = nir_after_block(e);
1375 }
1376
1377 vtn_assert(def != NULL);
1378 vtn_add_unstructured_block(b, func, &work_list, def->block);
1379
1380 /* now that all cases are handled, branch into the default block */
1381 nir_goto(&b->nb, def->block->block);
1382 break;
1383 }
1384
1385 case SpvOpKill: {
1386 nir_discard(&b->nb);
1387 nir_goto(&b->nb, b->func->nir_func->impl->end_block);
1388 break;
1389 }
1390
1391 case SpvOpUnreachable:
1392 case SpvOpReturn:
1393 case SpvOpReturnValue: {
1394 vtn_emit_ret_store(b, block);
1395 nir_goto(&b->nb, b->func->nir_func->impl->end_block);
1396 break;
1397 }
1398
1399 default:
1400 vtn_fail("Unhandled opcode %s", spirv_op_to_string(op));
1401 }
1402 }
1403 }
1404
1405 void
vtn_function_emit(struct vtn_builder * b,struct vtn_function * func,vtn_instruction_handler instruction_handler)1406 vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
1407 vtn_instruction_handler instruction_handler)
1408 {
1409 static int force_unstructured = -1;
1410 if (force_unstructured < 0) {
1411 force_unstructured =
1412 env_var_as_boolean("MESA_SPIRV_FORCE_UNSTRUCTURED", false);
1413 }
1414
1415 nir_function_impl *impl = func->nir_func->impl;
1416 nir_builder_init(&b->nb, impl);
1417 b->func = func;
1418 b->nb.cursor = nir_after_cf_list(&impl->body);
1419 b->nb.exact = b->exact;
1420 b->phi_table = _mesa_pointer_hash_table_create(b);
1421
1422 if (b->shader->info.stage == MESA_SHADER_KERNEL || force_unstructured) {
1423 impl->structured = false;
1424 vtn_emit_cf_func_unstructured(b, func, instruction_handler);
1425 } else {
1426 vtn_emit_cf_list_structured(b, &func->body, NULL, NULL,
1427 instruction_handler);
1428 }
1429
1430 vtn_foreach_instruction(b, func->start_block->label, func->end,
1431 vtn_handle_phi_second_pass);
1432
1433 if (func->nir_func->impl->structured)
1434 nir_copy_prop_impl(impl);
1435 nir_rematerialize_derefs_in_use_blocks_impl(impl);
1436
1437 /*
1438 * There are some cases where we need to repair SSA to insert
1439 * the needed phi nodes:
1440 *
1441 * - Continue blocks for loops get inserted before the body of the loop
1442 * but instructions in the continue may use SSA defs in the loop body.
1443 *
1444 * - Early termination instructions `OpKill` and `OpTerminateInvocation`,
1445 * in NIR. They're represented by regular intrinsics with no control-flow
1446 * semantics. This means that the SSA form from the SPIR-V may not
1447 * 100% match NIR.
1448 *
1449 * - Switches with only default case may also define SSA which may
1450 * subsequently be used out of the switch.
1451 */
1452 if (func->nir_func->impl->structured)
1453 nir_repair_ssa_impl(impl);
1454
1455 func->emitted = true;
1456 }
1457