1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vtn_private.h"
25 #include "spirv_info.h"
26 #include "nir/nir_vla.h"
27 #include "util/debug.h"
28
29 static struct vtn_block *
vtn_block(struct vtn_builder * b,uint32_t value_id)30 vtn_block(struct vtn_builder *b, uint32_t value_id)
31 {
32 return vtn_value(b, value_id, vtn_value_type_block)->block;
33 }
34
35 static unsigned
glsl_type_count_function_params(const struct glsl_type * type)36 glsl_type_count_function_params(const struct glsl_type *type)
37 {
38 if (glsl_type_is_vector_or_scalar(type)) {
39 return 1;
40 } else if (glsl_type_is_array_or_matrix(type)) {
41 return glsl_get_length(type) *
42 glsl_type_count_function_params(glsl_get_array_element(type));
43 } else {
44 assert(glsl_type_is_struct_or_ifc(type));
45 unsigned count = 0;
46 unsigned elems = glsl_get_length(type);
47 for (unsigned i = 0; i < elems; i++) {
48 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
49 count += glsl_type_count_function_params(elem_type);
50 }
51 return count;
52 }
53 }
54
55 static void
glsl_type_add_to_function_params(const struct glsl_type * type,nir_function * func,unsigned * param_idx)56 glsl_type_add_to_function_params(const struct glsl_type *type,
57 nir_function *func,
58 unsigned *param_idx)
59 {
60 if (glsl_type_is_vector_or_scalar(type)) {
61 func->params[(*param_idx)++] = (nir_parameter) {
62 .num_components = glsl_get_vector_elements(type),
63 .bit_size = glsl_get_bit_size(type),
64 };
65 } else if (glsl_type_is_array_or_matrix(type)) {
66 unsigned elems = glsl_get_length(type);
67 const struct glsl_type *elem_type = glsl_get_array_element(type);
68 for (unsigned i = 0; i < elems; i++)
69 glsl_type_add_to_function_params(elem_type,func, param_idx);
70 } else {
71 assert(glsl_type_is_struct_or_ifc(type));
72 unsigned elems = glsl_get_length(type);
73 for (unsigned i = 0; i < elems; i++) {
74 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
75 glsl_type_add_to_function_params(elem_type, func, param_idx);
76 }
77 }
78 }
79
80 static void
vtn_ssa_value_add_to_call_params(struct vtn_builder * b,struct vtn_ssa_value * value,nir_call_instr * call,unsigned * param_idx)81 vtn_ssa_value_add_to_call_params(struct vtn_builder *b,
82 struct vtn_ssa_value *value,
83 nir_call_instr *call,
84 unsigned *param_idx)
85 {
86 if (glsl_type_is_vector_or_scalar(value->type)) {
87 call->params[(*param_idx)++] = nir_src_for_ssa(value->def);
88 } else {
89 unsigned elems = glsl_get_length(value->type);
90 for (unsigned i = 0; i < elems; i++) {
91 vtn_ssa_value_add_to_call_params(b, value->elems[i],
92 call, param_idx);
93 }
94 }
95 }
96
97 static void
vtn_ssa_value_load_function_param(struct vtn_builder * b,struct vtn_ssa_value * value,unsigned * param_idx)98 vtn_ssa_value_load_function_param(struct vtn_builder *b,
99 struct vtn_ssa_value *value,
100 unsigned *param_idx)
101 {
102 if (glsl_type_is_vector_or_scalar(value->type)) {
103 value->def = nir_load_param(&b->nb, (*param_idx)++);
104 } else {
105 unsigned elems = glsl_get_length(value->type);
106 for (unsigned i = 0; i < elems; i++)
107 vtn_ssa_value_load_function_param(b, value->elems[i], param_idx);
108 }
109 }
110
111 void
vtn_handle_function_call(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)112 vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
113 const uint32_t *w, unsigned count)
114 {
115 struct vtn_function *vtn_callee =
116 vtn_value(b, w[3], vtn_value_type_function)->func;
117 struct nir_function *callee = vtn_callee->impl->function;
118
119 vtn_callee->referenced = true;
120
121 nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
122
123 unsigned param_idx = 0;
124
125 nir_deref_instr *ret_deref = NULL;
126 struct vtn_type *ret_type = vtn_callee->type->return_type;
127 if (ret_type->base_type != vtn_base_type_void) {
128 nir_variable *ret_tmp =
129 nir_local_variable_create(b->nb.impl,
130 glsl_get_bare_type(ret_type->type),
131 "return_tmp");
132 ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
133 call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
134 }
135
136 for (unsigned i = 0; i < vtn_callee->type->length; i++) {
137 vtn_ssa_value_add_to_call_params(b, vtn_ssa_value(b, w[4 + i]),
138 call, ¶m_idx);
139 }
140 assert(param_idx == call->num_params);
141
142 nir_builder_instr_insert(&b->nb, &call->instr);
143
144 if (ret_type->base_type == vtn_base_type_void) {
145 vtn_push_value(b, w[2], vtn_value_type_undef);
146 } else {
147 vtn_push_ssa_value(b, w[2], vtn_local_load(b, ret_deref, 0));
148 }
149 }
150
151 static bool
vtn_cfg_handle_prepass_instruction(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)152 vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
153 const uint32_t *w, unsigned count)
154 {
155 switch (opcode) {
156 case SpvOpFunction: {
157 vtn_assert(b->func == NULL);
158 b->func = rzalloc(b, struct vtn_function);
159
160 b->func->node.type = vtn_cf_node_type_function;
161 b->func->node.parent = NULL;
162 list_inithead(&b->func->body);
163 b->func->control = w[3];
164
165 UNUSED const struct glsl_type *result_type = vtn_get_type(b, w[1])->type;
166 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
167 val->func = b->func;
168
169 b->func->type = vtn_get_type(b, w[4]);
170 const struct vtn_type *func_type = b->func->type;
171
172 vtn_assert(func_type->return_type->type == result_type);
173
174 nir_function *func =
175 nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
176
177 unsigned num_params = 0;
178 for (unsigned i = 0; i < func_type->length; i++)
179 num_params += glsl_type_count_function_params(func_type->params[i]->type);
180
181 /* Add one parameter for the function return value */
182 if (func_type->return_type->base_type != vtn_base_type_void)
183 num_params++;
184
185 func->num_params = num_params;
186 func->params = ralloc_array(b->shader, nir_parameter, num_params);
187
188 unsigned idx = 0;
189 if (func_type->return_type->base_type != vtn_base_type_void) {
190 nir_address_format addr_format =
191 vtn_mode_to_address_format(b, vtn_variable_mode_function);
192 /* The return value is a regular pointer */
193 func->params[idx++] = (nir_parameter) {
194 .num_components = nir_address_format_num_components(addr_format),
195 .bit_size = nir_address_format_bit_size(addr_format),
196 };
197 }
198
199 for (unsigned i = 0; i < func_type->length; i++)
200 glsl_type_add_to_function_params(func_type->params[i]->type, func, &idx);
201 assert(idx == num_params);
202
203 b->func->impl = nir_function_impl_create(func);
204 nir_builder_init(&b->nb, func->impl);
205 b->nb.cursor = nir_before_cf_list(&b->func->impl->body);
206 b->nb.exact = b->exact;
207
208 b->func_param_idx = 0;
209
210 /* The return value is the first parameter */
211 if (func_type->return_type->base_type != vtn_base_type_void)
212 b->func_param_idx++;
213 break;
214 }
215
216 case SpvOpFunctionEnd:
217 b->func->end = w;
218 b->func = NULL;
219 break;
220
221 case SpvOpFunctionParameter: {
222 vtn_assert(b->func_param_idx < b->func->impl->function->num_params);
223 struct vtn_type *type = vtn_get_type(b, w[1]);
224 struct vtn_ssa_value *value = vtn_create_ssa_value(b, type->type);
225 vtn_ssa_value_load_function_param(b, value, &b->func_param_idx);
226 vtn_push_ssa_value(b, w[2], value);
227 break;
228 }
229
230 case SpvOpLabel: {
231 vtn_assert(b->block == NULL);
232 b->block = rzalloc(b, struct vtn_block);
233 b->block->node.type = vtn_cf_node_type_block;
234 b->block->label = w;
235 vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
236
237 if (b->func->start_block == NULL) {
238 /* This is the first block encountered for this function. In this
239 * case, we set the start block and add it to the list of
240 * implemented functions that we'll walk later.
241 */
242 b->func->start_block = b->block;
243 list_addtail(&b->func->node.link, &b->functions);
244 }
245 break;
246 }
247
248 case SpvOpSelectionMerge:
249 case SpvOpLoopMerge:
250 vtn_assert(b->block && b->block->merge == NULL);
251 b->block->merge = w;
252 break;
253
254 case SpvOpBranch:
255 case SpvOpBranchConditional:
256 case SpvOpSwitch:
257 case SpvOpKill:
258 case SpvOpTerminateInvocation:
259 case SpvOpReturn:
260 case SpvOpReturnValue:
261 case SpvOpUnreachable:
262 vtn_assert(b->block && b->block->branch == NULL);
263 b->block->branch = w;
264 b->block = NULL;
265 break;
266
267 default:
268 /* Continue on as per normal */
269 return true;
270 }
271
272 return true;
273 }
274
275 /* This function performs a depth-first search of the cases and puts them
276 * in fall-through order.
277 */
278 static void
vtn_order_case(struct vtn_switch * swtch,struct vtn_case * cse)279 vtn_order_case(struct vtn_switch *swtch, struct vtn_case *cse)
280 {
281 if (cse->visited)
282 return;
283
284 cse->visited = true;
285
286 list_del(&cse->node.link);
287
288 if (cse->fallthrough) {
289 vtn_order_case(swtch, cse->fallthrough);
290
291 /* If we have a fall-through, place this case right before the case it
292 * falls through to. This ensures that fallthroughs come one after
293 * the other. These two can never get separated because that would
294 * imply something else falling through to the same case. Also, this
295 * can't break ordering because the DFS ensures that this case is
296 * visited before anything that falls through to it.
297 */
298 list_addtail(&cse->node.link, &cse->fallthrough->node.link);
299 } else {
300 list_add(&cse->node.link, &swtch->cases);
301 }
302 }
303
304 static void
vtn_switch_order_cases(struct vtn_switch * swtch)305 vtn_switch_order_cases(struct vtn_switch *swtch)
306 {
307 struct list_head cases;
308 list_replace(&swtch->cases, &cases);
309 list_inithead(&swtch->cases);
310 while (!list_is_empty(&cases)) {
311 struct vtn_case *cse =
312 list_first_entry(&cases, struct vtn_case, node.link);
313 vtn_order_case(swtch, cse);
314 }
315 }
316
317 static void
vtn_block_set_merge_cf_node(struct vtn_builder * b,struct vtn_block * block,struct vtn_cf_node * cf_node)318 vtn_block_set_merge_cf_node(struct vtn_builder *b, struct vtn_block *block,
319 struct vtn_cf_node *cf_node)
320 {
321 vtn_fail_if(block->merge_cf_node != NULL,
322 "The merge block declared by a header block cannot be a "
323 "merge block declared by any other header block.");
324
325 block->merge_cf_node = cf_node;
326 }
327
328 #define VTN_DECL_CF_NODE_FIND(_type) \
329 static inline struct vtn_##_type * \
330 vtn_cf_node_find_##_type(struct vtn_cf_node *node) \
331 { \
332 while (node && node->type != vtn_cf_node_type_##_type) \
333 node = node->parent; \
334 return (struct vtn_##_type *)node; \
335 }
336
337 VTN_DECL_CF_NODE_FIND(if)
VTN_DECL_CF_NODE_FIND(loop)338 VTN_DECL_CF_NODE_FIND(loop)
339 VTN_DECL_CF_NODE_FIND(case)
340 VTN_DECL_CF_NODE_FIND(switch)
341 VTN_DECL_CF_NODE_FIND(function)
342
343 static enum vtn_branch_type
344 vtn_handle_branch(struct vtn_builder *b,
345 struct vtn_cf_node *cf_parent,
346 struct vtn_block *target_block)
347 {
348 struct vtn_loop *loop = vtn_cf_node_find_loop(cf_parent);
349
350 /* Detect a loop back-edge first. That way none of the code below
351 * accidentally operates on a loop back-edge.
352 */
353 if (loop && target_block == loop->header_block)
354 return vtn_branch_type_loop_back_edge;
355
356 /* Try to detect fall-through */
357 if (target_block->switch_case) {
358 /* When it comes to handling switch cases, we can break calls to
359 * vtn_handle_branch into two cases: calls from within a case construct
360 * and calls for the jump to each case construct. In the second case,
361 * cf_parent is the vtn_switch itself and vtn_cf_node_find_case() will
362 * return the outer switch case in which this switch is contained. It's
363 * fine if the target block is a switch case from an outer switch as
364 * long as it is also the switch break for this switch.
365 */
366 struct vtn_case *switch_case = vtn_cf_node_find_case(cf_parent);
367
368 /* This doesn't get called for the OpSwitch */
369 vtn_fail_if(switch_case == NULL,
370 "A switch case can only be entered through an OpSwitch or "
371 "falling through from another switch case.");
372
373 /* Because block->switch_case is only set on the entry block for a given
374 * switch case, we only ever get here if we're jumping to the start of a
375 * switch case. It's possible, however, that a switch case could jump
376 * to itself via a back-edge. That *should* get caught by the loop
377 * handling case above but if we have a back edge without a loop merge,
378 * we could en up here.
379 */
380 vtn_fail_if(target_block->switch_case == switch_case,
381 "A switch cannot fall-through to itself. Likely, there is "
382 "a back-edge which is not to a loop header.");
383
384 vtn_fail_if(target_block->switch_case->node.parent !=
385 switch_case->node.parent,
386 "A switch case fall-through must come from the same "
387 "OpSwitch construct");
388
389 vtn_fail_if(switch_case->fallthrough != NULL &&
390 switch_case->fallthrough != target_block->switch_case,
391 "Each case construct can have at most one branch to "
392 "another case construct");
393
394 switch_case->fallthrough = target_block->switch_case;
395
396 /* We don't immediately return vtn_branch_type_switch_fallthrough
397 * because it may also be a loop or switch break for an inner loop or
398 * switch and that takes precedence.
399 */
400 }
401
402 if (loop && target_block == loop->cont_block)
403 return vtn_branch_type_loop_continue;
404
405 /* We walk blocks as a breadth-first search on the control-flow construct
406 * tree where, when we find a construct, we add the vtn_cf_node for that
407 * construct and continue iterating at the merge target block (if any).
408 * Therefore, we want merges whose with parent == cf_parent to be treated
409 * as regular branches. We only want to consider merges if they break out
410 * of the current CF construct.
411 */
412 if (target_block->merge_cf_node != NULL &&
413 target_block->merge_cf_node->parent != cf_parent) {
414 switch (target_block->merge_cf_node->type) {
415 case vtn_cf_node_type_if:
416 for (struct vtn_cf_node *node = cf_parent;
417 node != target_block->merge_cf_node; node = node->parent) {
418 vtn_fail_if(node == NULL || node->type != vtn_cf_node_type_if,
419 "Branching to the merge block of a selection "
420 "construct can only be used to break out of a "
421 "selection construct");
422
423 struct vtn_if *if_stmt = vtn_cf_node_as_if(node);
424
425 /* This should be guaranteed by our iteration */
426 assert(if_stmt->merge_block != target_block);
427
428 vtn_fail_if(if_stmt->merge_block != NULL,
429 "Branching to the merge block of a selection "
430 "construct can only be used to break out of the "
431 "inner most nested selection level");
432 }
433 return vtn_branch_type_if_merge;
434
435 case vtn_cf_node_type_loop:
436 vtn_fail_if(target_block->merge_cf_node != &loop->node,
437 "Loop breaks can only break out of the inner most "
438 "nested loop level");
439 return vtn_branch_type_loop_break;
440
441 case vtn_cf_node_type_switch: {
442 struct vtn_switch *swtch = vtn_cf_node_find_switch(cf_parent);
443 vtn_fail_if(target_block->merge_cf_node != &swtch->node,
444 "Switch breaks can only break out of the inner most "
445 "nested switch level");
446 return vtn_branch_type_switch_break;
447 }
448
449 default:
450 unreachable("Invalid CF node type for a merge");
451 }
452 }
453
454 if (target_block->switch_case)
455 return vtn_branch_type_switch_fallthrough;
456
457 return vtn_branch_type_none;
458 }
459
460 struct vtn_cfg_work_item {
461 struct list_head link;
462
463 struct vtn_cf_node *cf_parent;
464 struct list_head *cf_list;
465 struct vtn_block *start_block;
466 };
467
468 static void
vtn_add_cfg_work_item(struct vtn_builder * b,struct list_head * work_list,struct vtn_cf_node * cf_parent,struct list_head * cf_list,struct vtn_block * start_block)469 vtn_add_cfg_work_item(struct vtn_builder *b,
470 struct list_head *work_list,
471 struct vtn_cf_node *cf_parent,
472 struct list_head *cf_list,
473 struct vtn_block *start_block)
474 {
475 struct vtn_cfg_work_item *work = ralloc(b, struct vtn_cfg_work_item);
476 work->cf_parent = cf_parent;
477 work->cf_list = cf_list;
478 work->start_block = start_block;
479 list_addtail(&work->link, work_list);
480 }
481
482 /* returns the default block */
483 static void
vtn_parse_switch(struct vtn_builder * b,struct vtn_switch * swtch,const uint32_t * branch,struct list_head * case_list)484 vtn_parse_switch(struct vtn_builder *b,
485 struct vtn_switch *swtch,
486 const uint32_t *branch,
487 struct list_head *case_list)
488 {
489 const uint32_t *branch_end = branch + (branch[0] >> SpvWordCountShift);
490
491 struct vtn_value *sel_val = vtn_untyped_value(b, branch[1]);
492 vtn_fail_if(!sel_val->type ||
493 sel_val->type->base_type != vtn_base_type_scalar,
494 "Selector of OpSwitch must have a type of OpTypeInt");
495
496 nir_alu_type sel_type =
497 nir_get_nir_type_for_glsl_type(sel_val->type->type);
498 vtn_fail_if(nir_alu_type_get_base_type(sel_type) != nir_type_int &&
499 nir_alu_type_get_base_type(sel_type) != nir_type_uint,
500 "Selector of OpSwitch must have a type of OpTypeInt");
501
502 struct hash_table *block_to_case = _mesa_pointer_hash_table_create(b);
503
504 bool is_default = true;
505 const unsigned bitsize = nir_alu_type_get_type_size(sel_type);
506 for (const uint32_t *w = branch + 2; w < branch_end;) {
507 uint64_t literal = 0;
508 if (!is_default) {
509 if (bitsize <= 32) {
510 literal = *(w++);
511 } else {
512 assert(bitsize == 64);
513 literal = vtn_u64_literal(w);
514 w += 2;
515 }
516 }
517 struct vtn_block *case_block = vtn_block(b, *(w++));
518
519 struct hash_entry *case_entry =
520 _mesa_hash_table_search(block_to_case, case_block);
521
522 struct vtn_case *cse;
523 if (case_entry) {
524 cse = case_entry->data;
525 } else {
526 cse = rzalloc(b, struct vtn_case);
527
528 cse->node.type = vtn_cf_node_type_case;
529 cse->node.parent = swtch ? &swtch->node : NULL;
530 cse->block = case_block;
531 list_inithead(&cse->body);
532 util_dynarray_init(&cse->values, b);
533
534 list_addtail(&cse->node.link, case_list);
535 _mesa_hash_table_insert(block_to_case, case_block, cse);
536 }
537
538 if (is_default) {
539 cse->is_default = true;
540 } else {
541 util_dynarray_append(&cse->values, uint64_t, literal);
542 }
543
544 is_default = false;
545 }
546
547 _mesa_hash_table_destroy(block_to_case, NULL);
548 }
549
550 /* Processes a block and returns the next block to process or NULL if we've
551 * reached the end of the construct.
552 */
553 static struct vtn_block *
vtn_process_block(struct vtn_builder * b,struct list_head * work_list,struct vtn_cf_node * cf_parent,struct list_head * cf_list,struct vtn_block * block)554 vtn_process_block(struct vtn_builder *b,
555 struct list_head *work_list,
556 struct vtn_cf_node *cf_parent,
557 struct list_head *cf_list,
558 struct vtn_block *block)
559 {
560 if (!list_is_empty(cf_list)) {
561 /* vtn_process_block() acts like an iterator: it processes the given
562 * block and then returns the next block to process. For a given
563 * control-flow construct, vtn_build_cfg() calls vtn_process_block()
564 * repeatedly until it finally returns NULL. Therefore, we know that
565 * the only blocks on which vtn_process_block() can be called are either
566 * the first block in a construct or a block that vtn_process_block()
567 * returned for the current construct. If cf_list is empty then we know
568 * that we're processing the first block in the construct and we have to
569 * add it to the list.
570 *
571 * If cf_list is not empty, then it must be the block returned by the
572 * previous call to vtn_process_block(). We know a priori that
573 * vtn_process_block only returns either normal branches
574 * (vtn_branch_type_none) or merge target blocks.
575 */
576 switch (vtn_handle_branch(b, cf_parent, block)) {
577 case vtn_branch_type_none:
578 /* For normal branches, we want to process them and add them to the
579 * current construct. Merge target blocks also look like normal
580 * branches from the perspective of this construct. See also
581 * vtn_handle_branch().
582 */
583 break;
584
585 case vtn_branch_type_loop_continue:
586 case vtn_branch_type_switch_fallthrough:
587 /* The two cases where we can get early exits from a construct that
588 * are not to that construct's merge target are loop continues and
589 * switch fall-throughs. In these cases, we need to break out of the
590 * current construct by returning NULL.
591 */
592 return NULL;
593
594 default:
595 /* The only way we can get here is if something was used as two kinds
596 * of merges at the same time and that's illegal.
597 */
598 vtn_fail("A block was used as a merge target from two or more "
599 "structured control-flow constructs");
600 }
601 }
602
603 /* Once a block has been processed, it is placed into and the list link
604 * will point to something non-null. If we see a node we've already
605 * processed here, it either exists in multiple functions or it's an
606 * invalid back-edge.
607 */
608 if (block->node.parent != NULL) {
609 vtn_fail_if(vtn_cf_node_find_function(&block->node) !=
610 vtn_cf_node_find_function(cf_parent),
611 "A block cannot exist in two functions at the "
612 "same time");
613
614 vtn_fail("Invalid back or cross-edge in the CFG");
615 }
616
617 if (block->merge && (*block->merge & SpvOpCodeMask) == SpvOpLoopMerge &&
618 block->loop == NULL) {
619 vtn_fail_if((*block->branch & SpvOpCodeMask) != SpvOpBranch &&
620 (*block->branch & SpvOpCodeMask) != SpvOpBranchConditional,
621 "An OpLoopMerge instruction must immediately precede "
622 "either an OpBranch or OpBranchConditional instruction.");
623
624 struct vtn_loop *loop = rzalloc(b, struct vtn_loop);
625
626 loop->node.type = vtn_cf_node_type_loop;
627 loop->node.parent = cf_parent;
628 list_inithead(&loop->body);
629 list_inithead(&loop->cont_body);
630 loop->header_block = block;
631 loop->break_block = vtn_block(b, block->merge[1]);
632 loop->cont_block = vtn_block(b, block->merge[2]);
633 loop->control = block->merge[3];
634
635 list_addtail(&loop->node.link, cf_list);
636 block->loop = loop;
637
638 /* Note: The work item for the main loop body will start with the
639 * current block as its start block. If we weren't careful, we would
640 * get here again and end up in an infinite loop. This is why we set
641 * block->loop above and check for it before creating one. This way,
642 * we only create the loop once and the second iteration that tries to
643 * handle this loop goes to the cases below and gets handled as a
644 * regular block.
645 */
646 vtn_add_cfg_work_item(b, work_list, &loop->node,
647 &loop->body, loop->header_block);
648
649 /* For continue targets, SPIR-V guarantees the following:
650 *
651 * - the Continue Target must dominate the back-edge block
652 * - the back-edge block must post dominate the Continue Target
653 *
654 * If the header block is the same as the continue target, this
655 * condition is trivially satisfied and there is no real continue
656 * section.
657 */
658 if (loop->cont_block != loop->header_block) {
659 vtn_add_cfg_work_item(b, work_list, &loop->node,
660 &loop->cont_body, loop->cont_block);
661 }
662
663 vtn_block_set_merge_cf_node(b, loop->break_block, &loop->node);
664
665 return loop->break_block;
666 }
667
668 /* Add the block to the CF list */
669 block->node.parent = cf_parent;
670 list_addtail(&block->node.link, cf_list);
671
672 switch (*block->branch & SpvOpCodeMask) {
673 case SpvOpBranch: {
674 struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
675
676 block->branch_type = vtn_handle_branch(b, cf_parent, branch_block);
677
678 if (block->branch_type == vtn_branch_type_none)
679 return branch_block;
680 else
681 return NULL;
682 }
683
684 case SpvOpReturn:
685 case SpvOpReturnValue:
686 block->branch_type = vtn_branch_type_return;
687 return NULL;
688
689 case SpvOpKill:
690 b->has_early_terminate = true;
691 block->branch_type = vtn_branch_type_discard;
692 return NULL;
693
694 case SpvOpTerminateInvocation:
695 b->has_early_terminate = true;
696 block->branch_type = vtn_branch_type_terminate;
697 return NULL;
698
699 case SpvOpBranchConditional: {
700 struct vtn_value *cond_val = vtn_untyped_value(b, block->branch[1]);
701 vtn_fail_if(!cond_val->type ||
702 cond_val->type->base_type != vtn_base_type_scalar ||
703 cond_val->type->type != glsl_bool_type(),
704 "Condition must be a Boolean type scalar");
705
706 struct vtn_block *then_block = vtn_block(b, block->branch[2]);
707 struct vtn_block *else_block = vtn_block(b, block->branch[3]);
708
709 if (then_block == else_block) {
710 /* This is uncommon but it can happen. We treat this the same way as
711 * an unconditional branch.
712 */
713 block->branch_type = vtn_handle_branch(b, cf_parent, then_block);
714
715 if (block->branch_type == vtn_branch_type_none)
716 return then_block;
717 else
718 return NULL;
719 }
720
721 struct vtn_if *if_stmt = rzalloc(b, struct vtn_if);
722
723 if_stmt->node.type = vtn_cf_node_type_if;
724 if_stmt->node.parent = cf_parent;
725 if_stmt->condition = block->branch[1];
726 list_inithead(&if_stmt->then_body);
727 list_inithead(&if_stmt->else_body);
728
729 list_addtail(&if_stmt->node.link, cf_list);
730
731 if (block->merge &&
732 (*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge) {
733 /* We may not always have a merge block and that merge doesn't
734 * technically have to be an OpSelectionMerge. We could have a block
735 * with an OpLoopMerge which ends in an OpBranchConditional.
736 */
737 if_stmt->merge_block = vtn_block(b, block->merge[1]);
738 vtn_block_set_merge_cf_node(b, if_stmt->merge_block, &if_stmt->node);
739
740 if_stmt->control = block->merge[2];
741 }
742
743 if_stmt->then_type = vtn_handle_branch(b, &if_stmt->node, then_block);
744 if (if_stmt->then_type == vtn_branch_type_none) {
745 vtn_add_cfg_work_item(b, work_list, &if_stmt->node,
746 &if_stmt->then_body, then_block);
747 }
748
749 if_stmt->else_type = vtn_handle_branch(b, &if_stmt->node, else_block);
750 if (if_stmt->else_type == vtn_branch_type_none) {
751 vtn_add_cfg_work_item(b, work_list, &if_stmt->node,
752 &if_stmt->else_body, else_block);
753 }
754
755 return if_stmt->merge_block;
756 }
757
758 case SpvOpSwitch: {
759 struct vtn_switch *swtch = rzalloc(b, struct vtn_switch);
760
761 swtch->node.type = vtn_cf_node_type_switch;
762 swtch->node.parent = cf_parent;
763 swtch->selector = block->branch[1];
764 list_inithead(&swtch->cases);
765
766 list_addtail(&swtch->node.link, cf_list);
767
768 /* We may not always have a merge block */
769 if (block->merge) {
770 vtn_fail_if((*block->merge & SpvOpCodeMask) != SpvOpSelectionMerge,
771 "An OpLoopMerge instruction must immediately precede "
772 "either an OpBranch or OpBranchConditional "
773 "instruction.");
774 swtch->break_block = vtn_block(b, block->merge[1]);
775 vtn_block_set_merge_cf_node(b, swtch->break_block, &swtch->node);
776 }
777
778 /* First, we go through and record all of the cases. */
779 vtn_parse_switch(b, swtch, block->branch, &swtch->cases);
780
781 /* Gather the branch types for the switch */
782 vtn_foreach_cf_node(case_node, &swtch->cases) {
783 struct vtn_case *cse = vtn_cf_node_as_case(case_node);
784
785 cse->type = vtn_handle_branch(b, &swtch->node, cse->block);
786 switch (cse->type) {
787 case vtn_branch_type_none:
788 /* This is a "real" cases which has stuff in it */
789 vtn_fail_if(cse->block->switch_case != NULL,
790 "OpSwitch has a case which is also in another "
791 "OpSwitch construct");
792 cse->block->switch_case = cse;
793 vtn_add_cfg_work_item(b, work_list, &cse->node,
794 &cse->body, cse->block);
795 break;
796
797 case vtn_branch_type_switch_break:
798 case vtn_branch_type_loop_break:
799 case vtn_branch_type_loop_continue:
800 /* Switch breaks as well as loop breaks and continues can be
801 * used to break out of a switch construct or as direct targets
802 * of the OpSwitch.
803 */
804 break;
805
806 default:
807 vtn_fail("Target of OpSwitch is not a valid structured exit "
808 "from the switch construct.");
809 }
810 }
811
812 return swtch->break_block;
813 }
814
815 case SpvOpUnreachable:
816 return NULL;
817
818 default:
819 vtn_fail("Block did not end with a valid branch instruction");
820 }
821 }
822
823 void
vtn_build_cfg(struct vtn_builder * b,const uint32_t * words,const uint32_t * end)824 vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
825 {
826 vtn_foreach_instruction(b, words, end,
827 vtn_cfg_handle_prepass_instruction);
828
829 if (b->shader->info.stage == MESA_SHADER_KERNEL)
830 return;
831
832 vtn_foreach_cf_node(func_node, &b->functions) {
833 struct vtn_function *func = vtn_cf_node_as_function(func_node);
834
835 /* We build the CFG for each function by doing a breadth-first search on
836 * the control-flow graph. We keep track of our state using a worklist.
837 * Doing a BFS ensures that we visit each structured control-flow
838 * construct and its merge node before we visit the stuff inside the
839 * construct.
840 */
841 struct list_head work_list;
842 list_inithead(&work_list);
843 vtn_add_cfg_work_item(b, &work_list, &func->node, &func->body,
844 func->start_block);
845
846 while (!list_is_empty(&work_list)) {
847 struct vtn_cfg_work_item *work =
848 list_first_entry(&work_list, struct vtn_cfg_work_item, link);
849 list_del(&work->link);
850
851 for (struct vtn_block *block = work->start_block; block; ) {
852 block = vtn_process_block(b, &work_list, work->cf_parent,
853 work->cf_list, block);
854 }
855 }
856 }
857 }
858
859 static bool
vtn_handle_phis_first_pass(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)860 vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
861 const uint32_t *w, unsigned count)
862 {
863 if (opcode == SpvOpLabel)
864 return true; /* Nothing to do */
865
866 /* If this isn't a phi node, stop. */
867 if (opcode != SpvOpPhi)
868 return false;
869
870 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
871 * For each phi, we create a variable with the appropreate type and
872 * do a load from that variable. Then, in a second pass, we add
873 * stores to that variable to each of the predecessor blocks.
874 *
875 * We could do something more intelligent here. However, in order to
876 * handle loops and things properly, we really need dominance
877 * information. It would end up basically being the into-SSA
878 * algorithm all over again. It's easier if we just let
879 * lower_vars_to_ssa do that for us instead of repeating it here.
880 */
881 struct vtn_type *type = vtn_get_type(b, w[1]);
882 nir_variable *phi_var =
883 nir_local_variable_create(b->nb.impl, type->type, "phi");
884 _mesa_hash_table_insert(b->phi_table, w, phi_var);
885
886 vtn_push_ssa_value(b, w[2],
887 vtn_local_load(b, nir_build_deref_var(&b->nb, phi_var), 0));
888
889 return true;
890 }
891
892 static bool
vtn_handle_phi_second_pass(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)893 vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
894 const uint32_t *w, unsigned count)
895 {
896 if (opcode != SpvOpPhi)
897 return true;
898
899 struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w);
900
901 /* It's possible that this phi is in an unreachable block in which case it
902 * may never have been emitted and therefore may not be in the hash table.
903 * In this case, there's no var for it and it's safe to just bail.
904 */
905 if (phi_entry == NULL)
906 return true;
907
908 nir_variable *phi_var = phi_entry->data;
909
910 for (unsigned i = 3; i < count; i += 2) {
911 struct vtn_block *pred = vtn_block(b, w[i + 1]);
912
913 /* If block does not have end_nop, that is because it is an unreacheable
914 * block, and hence it is not worth to handle it */
915 if (!pred->end_nop)
916 continue;
917
918 b->nb.cursor = nir_after_instr(&pred->end_nop->instr);
919
920 struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
921
922 vtn_local_store(b, src, nir_build_deref_var(&b->nb, phi_var), 0);
923 }
924
925 return true;
926 }
927
928 static void
vtn_emit_branch(struct vtn_builder * b,enum vtn_branch_type branch_type,nir_variable * switch_fall_var,bool * has_switch_break)929 vtn_emit_branch(struct vtn_builder *b, enum vtn_branch_type branch_type,
930 nir_variable *switch_fall_var, bool *has_switch_break)
931 {
932 switch (branch_type) {
933 case vtn_branch_type_if_merge:
934 break; /* Nothing to do */
935 case vtn_branch_type_switch_break:
936 nir_store_var(&b->nb, switch_fall_var, nir_imm_false(&b->nb), 1);
937 *has_switch_break = true;
938 break;
939 case vtn_branch_type_switch_fallthrough:
940 break; /* Nothing to do */
941 case vtn_branch_type_loop_break:
942 nir_jump(&b->nb, nir_jump_break);
943 break;
944 case vtn_branch_type_loop_continue:
945 nir_jump(&b->nb, nir_jump_continue);
946 break;
947 case vtn_branch_type_loop_back_edge:
948 break;
949 case vtn_branch_type_return:
950 nir_jump(&b->nb, nir_jump_return);
951 break;
952 case vtn_branch_type_discard: {
953 nir_intrinsic_op op =
954 b->convert_discard_to_demote ? nir_intrinsic_demote : nir_intrinsic_discard;
955 nir_intrinsic_instr *discard =
956 nir_intrinsic_instr_create(b->nb.shader, op);
957 nir_builder_instr_insert(&b->nb, &discard->instr);
958 break;
959 }
960 case vtn_branch_type_terminate: {
961 nir_intrinsic_instr *terminate =
962 nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_terminate);
963 nir_builder_instr_insert(&b->nb, &terminate->instr);
964 break;
965 }
966 default:
967 vtn_fail("Invalid branch type");
968 }
969 }
970
971 static nir_ssa_def *
vtn_switch_case_condition(struct vtn_builder * b,struct vtn_switch * swtch,nir_ssa_def * sel,struct vtn_case * cse)972 vtn_switch_case_condition(struct vtn_builder *b, struct vtn_switch *swtch,
973 nir_ssa_def *sel, struct vtn_case *cse)
974 {
975 if (cse->is_default) {
976 nir_ssa_def *any = nir_imm_false(&b->nb);
977 vtn_foreach_cf_node(other_node, &swtch->cases) {
978 struct vtn_case *other = vtn_cf_node_as_case(other_node);
979 if (other->is_default)
980 continue;
981
982 any = nir_ior(&b->nb, any,
983 vtn_switch_case_condition(b, swtch, sel, other));
984 }
985 return nir_inot(&b->nb, any);
986 } else {
987 nir_ssa_def *cond = nir_imm_false(&b->nb);
988 util_dynarray_foreach(&cse->values, uint64_t, val)
989 cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
990 return cond;
991 }
992 }
993
994 static nir_loop_control
vtn_loop_control(struct vtn_builder * b,struct vtn_loop * vtn_loop)995 vtn_loop_control(struct vtn_builder *b, struct vtn_loop *vtn_loop)
996 {
997 if (vtn_loop->control == SpvLoopControlMaskNone)
998 return nir_loop_control_none;
999 else if (vtn_loop->control & SpvLoopControlDontUnrollMask)
1000 return nir_loop_control_dont_unroll;
1001 else if (vtn_loop->control & SpvLoopControlUnrollMask)
1002 return nir_loop_control_unroll;
1003 else if (vtn_loop->control & SpvLoopControlDependencyInfiniteMask ||
1004 vtn_loop->control & SpvLoopControlDependencyLengthMask ||
1005 vtn_loop->control & SpvLoopControlMinIterationsMask ||
1006 vtn_loop->control & SpvLoopControlMaxIterationsMask ||
1007 vtn_loop->control & SpvLoopControlIterationMultipleMask ||
1008 vtn_loop->control & SpvLoopControlPeelCountMask ||
1009 vtn_loop->control & SpvLoopControlPartialCountMask) {
1010 /* We do not do anything special with these yet. */
1011 return nir_loop_control_none;
1012 } else {
1013 vtn_fail("Invalid loop control");
1014 }
1015 }
1016
1017 static nir_selection_control
vtn_selection_control(struct vtn_builder * b,struct vtn_if * vtn_if)1018 vtn_selection_control(struct vtn_builder *b, struct vtn_if *vtn_if)
1019 {
1020 if (vtn_if->control == SpvSelectionControlMaskNone)
1021 return nir_selection_control_none;
1022 else if (vtn_if->control & SpvSelectionControlDontFlattenMask)
1023 return nir_selection_control_dont_flatten;
1024 else if (vtn_if->control & SpvSelectionControlFlattenMask)
1025 return nir_selection_control_flatten;
1026 else
1027 vtn_fail("Invalid selection control");
1028 }
1029
1030 static void
vtn_emit_ret_store(struct vtn_builder * b,struct vtn_block * block)1031 vtn_emit_ret_store(struct vtn_builder *b, struct vtn_block *block)
1032 {
1033 if ((*block->branch & SpvOpCodeMask) != SpvOpReturnValue)
1034 return;
1035
1036 vtn_fail_if(b->func->type->return_type->base_type == vtn_base_type_void,
1037 "Return with a value from a function returning void");
1038 struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
1039 const struct glsl_type *ret_type =
1040 glsl_get_bare_type(b->func->type->return_type->type);
1041 nir_deref_instr *ret_deref =
1042 nir_build_deref_cast(&b->nb, nir_load_param(&b->nb, 0),
1043 nir_var_function_temp, ret_type, 0);
1044 vtn_local_store(b, src, ret_deref, 0);
1045 }
1046
1047 static void
vtn_emit_cf_list_structured(struct vtn_builder * b,struct list_head * cf_list,nir_variable * switch_fall_var,bool * has_switch_break,vtn_instruction_handler handler)1048 vtn_emit_cf_list_structured(struct vtn_builder *b, struct list_head *cf_list,
1049 nir_variable *switch_fall_var,
1050 bool *has_switch_break,
1051 vtn_instruction_handler handler)
1052 {
1053 vtn_foreach_cf_node(node, cf_list) {
1054 switch (node->type) {
1055 case vtn_cf_node_type_block: {
1056 struct vtn_block *block = vtn_cf_node_as_block(node);
1057
1058 const uint32_t *block_start = block->label;
1059 const uint32_t *block_end = block->merge ? block->merge :
1060 block->branch;
1061
1062 block_start = vtn_foreach_instruction(b, block_start, block_end,
1063 vtn_handle_phis_first_pass);
1064
1065 vtn_foreach_instruction(b, block_start, block_end, handler);
1066
1067 block->end_nop = nir_intrinsic_instr_create(b->nb.shader,
1068 nir_intrinsic_nop);
1069 nir_builder_instr_insert(&b->nb, &block->end_nop->instr);
1070
1071 vtn_emit_ret_store(b, block);
1072
1073 if (block->branch_type != vtn_branch_type_none) {
1074 vtn_emit_branch(b, block->branch_type,
1075 switch_fall_var, has_switch_break);
1076 return;
1077 }
1078
1079 break;
1080 }
1081
1082 case vtn_cf_node_type_if: {
1083 struct vtn_if *vtn_if = vtn_cf_node_as_if(node);
1084 bool sw_break = false;
1085
1086 nir_if *nif =
1087 nir_push_if(&b->nb, vtn_get_nir_ssa(b, vtn_if->condition));
1088
1089 nif->control = vtn_selection_control(b, vtn_if);
1090
1091 if (vtn_if->then_type == vtn_branch_type_none) {
1092 vtn_emit_cf_list_structured(b, &vtn_if->then_body,
1093 switch_fall_var, &sw_break, handler);
1094 } else {
1095 vtn_emit_branch(b, vtn_if->then_type, switch_fall_var, &sw_break);
1096 }
1097
1098 nir_push_else(&b->nb, nif);
1099 if (vtn_if->else_type == vtn_branch_type_none) {
1100 vtn_emit_cf_list_structured(b, &vtn_if->else_body,
1101 switch_fall_var, &sw_break, handler);
1102 } else {
1103 vtn_emit_branch(b, vtn_if->else_type, switch_fall_var, &sw_break);
1104 }
1105
1106 nir_pop_if(&b->nb, nif);
1107
1108 /* If we encountered a switch break somewhere inside of the if,
1109 * then it would have been handled correctly by calling
1110 * emit_cf_list or emit_branch for the interrior. However, we
1111 * need to predicate everything following on wether or not we're
1112 * still going.
1113 */
1114 if (sw_break) {
1115 *has_switch_break = true;
1116 nir_push_if(&b->nb, nir_load_var(&b->nb, switch_fall_var));
1117 }
1118 break;
1119 }
1120
1121 case vtn_cf_node_type_loop: {
1122 struct vtn_loop *vtn_loop = vtn_cf_node_as_loop(node);
1123
1124 nir_loop *loop = nir_push_loop(&b->nb);
1125 loop->control = vtn_loop_control(b, vtn_loop);
1126
1127 vtn_emit_cf_list_structured(b, &vtn_loop->body, NULL, NULL, handler);
1128
1129 if (!list_is_empty(&vtn_loop->cont_body)) {
1130 /* If we have a non-trivial continue body then we need to put
1131 * it at the beginning of the loop with a flag to ensure that
1132 * it doesn't get executed in the first iteration.
1133 */
1134 nir_variable *do_cont =
1135 nir_local_variable_create(b->nb.impl, glsl_bool_type(), "cont");
1136
1137 b->nb.cursor = nir_before_cf_node(&loop->cf_node);
1138 nir_store_var(&b->nb, do_cont, nir_imm_false(&b->nb), 1);
1139
1140 b->nb.cursor = nir_before_cf_list(&loop->body);
1141
1142 nir_if *cont_if =
1143 nir_push_if(&b->nb, nir_load_var(&b->nb, do_cont));
1144
1145 vtn_emit_cf_list_structured(b, &vtn_loop->cont_body, NULL, NULL,
1146 handler);
1147
1148 nir_pop_if(&b->nb, cont_if);
1149
1150 nir_store_var(&b->nb, do_cont, nir_imm_true(&b->nb), 1);
1151
1152 b->has_loop_continue = true;
1153 }
1154
1155 nir_pop_loop(&b->nb, loop);
1156 break;
1157 }
1158
1159 case vtn_cf_node_type_switch: {
1160 struct vtn_switch *vtn_switch = vtn_cf_node_as_switch(node);
1161
1162 /* Before we can emit anything, we need to sort the list of cases in
1163 * fall-through order.
1164 */
1165 vtn_switch_order_cases(vtn_switch);
1166
1167 /* First, we create a variable to keep track of whether or not the
1168 * switch is still going at any given point. Any switch breaks
1169 * will set this variable to false.
1170 */
1171 nir_variable *fall_var =
1172 nir_local_variable_create(b->nb.impl, glsl_bool_type(), "fall");
1173 nir_store_var(&b->nb, fall_var, nir_imm_false(&b->nb), 1);
1174
1175 nir_ssa_def *sel = vtn_get_nir_ssa(b, vtn_switch->selector);
1176
1177 /* Now we can walk the list of cases and actually emit code */
1178 vtn_foreach_cf_node(case_node, &vtn_switch->cases) {
1179 struct vtn_case *cse = vtn_cf_node_as_case(case_node);
1180
1181 /* If this case jumps directly to the break block, we don't have
1182 * to handle the case as the body is empty and doesn't fall
1183 * through.
1184 */
1185 if (cse->block == vtn_switch->break_block)
1186 continue;
1187
1188 /* Figure out the condition */
1189 nir_ssa_def *cond =
1190 vtn_switch_case_condition(b, vtn_switch, sel, cse);
1191 /* Take fallthrough into account */
1192 cond = nir_ior(&b->nb, cond, nir_load_var(&b->nb, fall_var));
1193
1194 nir_if *case_if = nir_push_if(&b->nb, cond);
1195
1196 bool has_break = false;
1197 nir_store_var(&b->nb, fall_var, nir_imm_true(&b->nb), 1);
1198 vtn_emit_cf_list_structured(b, &cse->body, fall_var, &has_break,
1199 handler);
1200 (void)has_break; /* We don't care */
1201
1202 nir_pop_if(&b->nb, case_if);
1203 }
1204
1205 break;
1206 }
1207
1208 default:
1209 vtn_fail("Invalid CF node type");
1210 }
1211 }
1212 }
1213
1214 static struct nir_block *
vtn_new_unstructured_block(struct vtn_builder * b,struct vtn_function * func)1215 vtn_new_unstructured_block(struct vtn_builder *b, struct vtn_function *func)
1216 {
1217 struct nir_block *n = nir_block_create(b->shader);
1218 exec_list_push_tail(&func->impl->body, &n->cf_node.node);
1219 n->cf_node.parent = &func->impl->cf_node;
1220 return n;
1221 }
1222
1223 static void
vtn_add_unstructured_block(struct vtn_builder * b,struct vtn_function * func,struct list_head * work_list,struct vtn_block * block)1224 vtn_add_unstructured_block(struct vtn_builder *b,
1225 struct vtn_function *func,
1226 struct list_head *work_list,
1227 struct vtn_block *block)
1228 {
1229 if (!block->block) {
1230 block->block = vtn_new_unstructured_block(b, func);
1231 list_addtail(&block->node.link, work_list);
1232 }
1233 }
1234
1235 static void
vtn_emit_cf_func_unstructured(struct vtn_builder * b,struct vtn_function * func,vtn_instruction_handler handler)1236 vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
1237 vtn_instruction_handler handler)
1238 {
1239 struct list_head work_list;
1240 list_inithead(&work_list);
1241
1242 func->start_block->block = nir_start_block(func->impl);
1243 list_addtail(&func->start_block->node.link, &work_list);
1244 while (!list_is_empty(&work_list)) {
1245 struct vtn_block *block =
1246 list_first_entry(&work_list, struct vtn_block, node.link);
1247 list_del(&block->node.link);
1248
1249 vtn_assert(block->block);
1250
1251 const uint32_t *block_start = block->label;
1252 const uint32_t *block_end = block->branch;
1253
1254 b->nb.cursor = nir_after_block(block->block);
1255 block_start = vtn_foreach_instruction(b, block_start, block_end,
1256 vtn_handle_phis_first_pass);
1257 vtn_foreach_instruction(b, block_start, block_end, handler);
1258 block->end_nop = nir_intrinsic_instr_create(b->nb.shader,
1259 nir_intrinsic_nop);
1260 nir_builder_instr_insert(&b->nb, &block->end_nop->instr);
1261
1262 SpvOp op = *block_end & SpvOpCodeMask;
1263 switch (op) {
1264 case SpvOpBranch: {
1265 struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
1266 vtn_add_unstructured_block(b, func, &work_list, branch_block);
1267 nir_goto(&b->nb, branch_block->block);
1268 break;
1269 }
1270
1271 case SpvOpBranchConditional: {
1272 nir_ssa_def *cond = vtn_ssa_value(b, block->branch[1])->def;
1273 struct vtn_block *then_block = vtn_block(b, block->branch[2]);
1274 struct vtn_block *else_block = vtn_block(b, block->branch[3]);
1275
1276 vtn_add_unstructured_block(b, func, &work_list, then_block);
1277 if (then_block == else_block) {
1278 nir_goto(&b->nb, then_block->block);
1279 } else {
1280 vtn_add_unstructured_block(b, func, &work_list, else_block);
1281 nir_goto_if(&b->nb, then_block->block, nir_src_for_ssa(cond),
1282 else_block->block);
1283 }
1284
1285 break;
1286 }
1287
1288 case SpvOpSwitch: {
1289 struct list_head cases;
1290 list_inithead(&cases);
1291 vtn_parse_switch(b, NULL, block->branch, &cases);
1292
1293 nir_ssa_def *sel = vtn_get_nir_ssa(b, block->branch[1]);
1294
1295 struct vtn_case *def = NULL;
1296 vtn_foreach_cf_node(case_node, &cases) {
1297 struct vtn_case *cse = vtn_cf_node_as_case(case_node);
1298 if (cse->is_default) {
1299 assert(def == NULL);
1300 def = cse;
1301 continue;
1302 }
1303
1304 nir_ssa_def *cond = nir_imm_false(&b->nb);
1305 util_dynarray_foreach(&cse->values, uint64_t, val)
1306 cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
1307
1308 /* block for the next check */
1309 nir_block *e = vtn_new_unstructured_block(b, func);
1310 vtn_add_unstructured_block(b, func, &work_list, cse->block);
1311
1312 /* add branching */
1313 nir_goto_if(&b->nb, cse->block->block, nir_src_for_ssa(cond), e);
1314 b->nb.cursor = nir_after_block(e);
1315 }
1316
1317 vtn_assert(def != NULL);
1318 vtn_add_unstructured_block(b, func, &work_list, def->block);
1319
1320 /* now that all cases are handled, branch into the default block */
1321 nir_goto(&b->nb, def->block->block);
1322 break;
1323 }
1324
1325 case SpvOpKill: {
1326 nir_intrinsic_instr *discard =
1327 nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_discard);
1328 nir_builder_instr_insert(&b->nb, &discard->instr);
1329 nir_goto(&b->nb, b->func->impl->end_block);
1330 break;
1331 }
1332
1333 case SpvOpUnreachable:
1334 case SpvOpReturn:
1335 case SpvOpReturnValue: {
1336 vtn_emit_ret_store(b, block);
1337 nir_goto(&b->nb, b->func->impl->end_block);
1338 break;
1339 }
1340
1341 default:
1342 vtn_fail("Unhandled opcode %s", spirv_op_to_string(op));
1343 }
1344 }
1345 }
1346
1347 void
vtn_function_emit(struct vtn_builder * b,struct vtn_function * func,vtn_instruction_handler instruction_handler)1348 vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
1349 vtn_instruction_handler instruction_handler)
1350 {
1351 static int force_unstructured = -1;
1352 if (force_unstructured < 0) {
1353 force_unstructured =
1354 env_var_as_boolean("MESA_SPIRV_FORCE_UNSTRUCTURED", false);
1355 }
1356
1357 nir_builder_init(&b->nb, func->impl);
1358 b->func = func;
1359 b->nb.cursor = nir_after_cf_list(&func->impl->body);
1360 b->nb.exact = b->exact;
1361 b->has_loop_continue = false;
1362 b->phi_table = _mesa_pointer_hash_table_create(b);
1363
1364 if (b->shader->info.stage == MESA_SHADER_KERNEL || force_unstructured) {
1365 b->func->impl->structured = false;
1366 vtn_emit_cf_func_unstructured(b, func, instruction_handler);
1367 } else {
1368 vtn_emit_cf_list_structured(b, &func->body, NULL, NULL,
1369 instruction_handler);
1370 }
1371
1372 vtn_foreach_instruction(b, func->start_block->label, func->end,
1373 vtn_handle_phi_second_pass);
1374
1375 nir_rematerialize_derefs_in_use_blocks_impl(func->impl);
1376
1377 /* Continue blocks for loops get inserted before the body of the loop
1378 * but instructions in the continue may use SSA defs in the loop body.
1379 * Therefore, we need to repair SSA to insert the needed phi nodes.
1380 */
1381 if (b->func->impl->structured &&
1382 (b->has_loop_continue || b->has_early_terminate))
1383 nir_repair_ssa_impl(func->impl);
1384
1385 func->emitted = true;
1386 }
1387