1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include <assert.h>
31
32 nir_shader *
nir_shader_create(void * mem_ctx,gl_shader_stage stage,const nir_shader_compiler_options * options,shader_info * si)33 nir_shader_create(void *mem_ctx,
34 gl_shader_stage stage,
35 const nir_shader_compiler_options *options,
36 shader_info *si)
37 {
38 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
39
40 exec_list_make_empty(&shader->uniforms);
41 exec_list_make_empty(&shader->inputs);
42 exec_list_make_empty(&shader->outputs);
43 exec_list_make_empty(&shader->shared);
44
45 shader->options = options;
46
47 if (si) {
48 assert(si->stage == stage);
49 shader->info = *si;
50 } else {
51 shader->info.stage = stage;
52 }
53
54 exec_list_make_empty(&shader->functions);
55 exec_list_make_empty(&shader->registers);
56 exec_list_make_empty(&shader->globals);
57 exec_list_make_empty(&shader->system_values);
58 shader->reg_alloc = 0;
59
60 shader->num_inputs = 0;
61 shader->num_outputs = 0;
62 shader->num_uniforms = 0;
63 shader->num_shared = 0;
64
65 return shader;
66 }
67
68 static nir_register *
reg_create(void * mem_ctx,struct exec_list * list)69 reg_create(void *mem_ctx, struct exec_list *list)
70 {
71 nir_register *reg = ralloc(mem_ctx, nir_register);
72
73 list_inithead(®->uses);
74 list_inithead(®->defs);
75 list_inithead(®->if_uses);
76
77 reg->num_components = 0;
78 reg->bit_size = 32;
79 reg->num_array_elems = 0;
80 reg->is_packed = false;
81 reg->name = NULL;
82
83 exec_list_push_tail(list, ®->node);
84
85 return reg;
86 }
87
88 nir_register *
nir_global_reg_create(nir_shader * shader)89 nir_global_reg_create(nir_shader *shader)
90 {
91 nir_register *reg = reg_create(shader, &shader->registers);
92 reg->index = shader->reg_alloc++;
93 reg->is_global = true;
94
95 return reg;
96 }
97
98 nir_register *
nir_local_reg_create(nir_function_impl * impl)99 nir_local_reg_create(nir_function_impl *impl)
100 {
101 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
102 reg->index = impl->reg_alloc++;
103 reg->is_global = false;
104
105 return reg;
106 }
107
108 void
nir_reg_remove(nir_register * reg)109 nir_reg_remove(nir_register *reg)
110 {
111 exec_node_remove(®->node);
112 }
113
114 void
nir_shader_add_variable(nir_shader * shader,nir_variable * var)115 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
116 {
117 switch (var->data.mode) {
118 case nir_var_all:
119 assert(!"invalid mode");
120 break;
121
122 case nir_var_local:
123 assert(!"nir_shader_add_variable cannot be used for local variables");
124 break;
125
126 case nir_var_param:
127 assert(!"nir_shader_add_variable cannot be used for function parameters");
128 break;
129
130 case nir_var_global:
131 exec_list_push_tail(&shader->globals, &var->node);
132 break;
133
134 case nir_var_shader_in:
135 exec_list_push_tail(&shader->inputs, &var->node);
136 break;
137
138 case nir_var_shader_out:
139 exec_list_push_tail(&shader->outputs, &var->node);
140 break;
141
142 case nir_var_uniform:
143 case nir_var_shader_storage:
144 exec_list_push_tail(&shader->uniforms, &var->node);
145 break;
146
147 case nir_var_shared:
148 assert(shader->info.stage == MESA_SHADER_COMPUTE);
149 exec_list_push_tail(&shader->shared, &var->node);
150 break;
151
152 case nir_var_system_value:
153 exec_list_push_tail(&shader->system_values, &var->node);
154 break;
155 }
156 }
157
158 nir_variable *
nir_variable_create(nir_shader * shader,nir_variable_mode mode,const struct glsl_type * type,const char * name)159 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
160 const struct glsl_type *type, const char *name)
161 {
162 nir_variable *var = rzalloc(shader, nir_variable);
163 var->name = ralloc_strdup(var, name);
164 var->type = type;
165 var->data.mode = mode;
166
167 if ((mode == nir_var_shader_in &&
168 shader->info.stage != MESA_SHADER_VERTEX) ||
169 (mode == nir_var_shader_out &&
170 shader->info.stage != MESA_SHADER_FRAGMENT))
171 var->data.interpolation = INTERP_MODE_SMOOTH;
172
173 if (mode == nir_var_shader_in || mode == nir_var_uniform)
174 var->data.read_only = true;
175
176 nir_shader_add_variable(shader, var);
177
178 return var;
179 }
180
181 nir_variable *
nir_local_variable_create(nir_function_impl * impl,const struct glsl_type * type,const char * name)182 nir_local_variable_create(nir_function_impl *impl,
183 const struct glsl_type *type, const char *name)
184 {
185 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
186 var->name = ralloc_strdup(var, name);
187 var->type = type;
188 var->data.mode = nir_var_local;
189
190 nir_function_impl_add_variable(impl, var);
191
192 return var;
193 }
194
195 nir_function *
nir_function_create(nir_shader * shader,const char * name)196 nir_function_create(nir_shader *shader, const char *name)
197 {
198 nir_function *func = ralloc(shader, nir_function);
199
200 exec_list_push_tail(&shader->functions, &func->node);
201
202 func->name = ralloc_strdup(func, name);
203 func->shader = shader;
204 func->num_params = 0;
205 func->params = NULL;
206 func->return_type = glsl_void_type();
207 func->impl = NULL;
208
209 return func;
210 }
211
nir_src_copy(nir_src * dest,const nir_src * src,void * mem_ctx)212 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
213 {
214 dest->is_ssa = src->is_ssa;
215 if (src->is_ssa) {
216 dest->ssa = src->ssa;
217 } else {
218 dest->reg.base_offset = src->reg.base_offset;
219 dest->reg.reg = src->reg.reg;
220 if (src->reg.indirect) {
221 dest->reg.indirect = ralloc(mem_ctx, nir_src);
222 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
223 } else {
224 dest->reg.indirect = NULL;
225 }
226 }
227 }
228
nir_dest_copy(nir_dest * dest,const nir_dest * src,nir_instr * instr)229 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
230 {
231 /* Copying an SSA definition makes no sense whatsoever. */
232 assert(!src->is_ssa);
233
234 dest->is_ssa = false;
235
236 dest->reg.base_offset = src->reg.base_offset;
237 dest->reg.reg = src->reg.reg;
238 if (src->reg.indirect) {
239 dest->reg.indirect = ralloc(instr, nir_src);
240 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
241 } else {
242 dest->reg.indirect = NULL;
243 }
244 }
245
246 void
nir_alu_src_copy(nir_alu_src * dest,const nir_alu_src * src,nir_alu_instr * instr)247 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
248 nir_alu_instr *instr)
249 {
250 nir_src_copy(&dest->src, &src->src, &instr->instr);
251 dest->abs = src->abs;
252 dest->negate = src->negate;
253 for (unsigned i = 0; i < 4; i++)
254 dest->swizzle[i] = src->swizzle[i];
255 }
256
257 void
nir_alu_dest_copy(nir_alu_dest * dest,const nir_alu_dest * src,nir_alu_instr * instr)258 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
259 nir_alu_instr *instr)
260 {
261 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
262 dest->write_mask = src->write_mask;
263 dest->saturate = src->saturate;
264 }
265
266
267 static void
cf_init(nir_cf_node * node,nir_cf_node_type type)268 cf_init(nir_cf_node *node, nir_cf_node_type type)
269 {
270 exec_node_init(&node->node);
271 node->parent = NULL;
272 node->type = type;
273 }
274
275 nir_function_impl *
nir_function_impl_create_bare(nir_shader * shader)276 nir_function_impl_create_bare(nir_shader *shader)
277 {
278 nir_function_impl *impl = ralloc(shader, nir_function_impl);
279
280 impl->function = NULL;
281
282 cf_init(&impl->cf_node, nir_cf_node_function);
283
284 exec_list_make_empty(&impl->body);
285 exec_list_make_empty(&impl->registers);
286 exec_list_make_empty(&impl->locals);
287 impl->num_params = 0;
288 impl->params = NULL;
289 impl->return_var = NULL;
290 impl->reg_alloc = 0;
291 impl->ssa_alloc = 0;
292 impl->valid_metadata = nir_metadata_none;
293
294 /* create start & end blocks */
295 nir_block *start_block = nir_block_create(shader);
296 nir_block *end_block = nir_block_create(shader);
297 start_block->cf_node.parent = &impl->cf_node;
298 end_block->cf_node.parent = &impl->cf_node;
299 impl->end_block = end_block;
300
301 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
302
303 start_block->successors[0] = end_block;
304 _mesa_set_add(end_block->predecessors, start_block);
305 return impl;
306 }
307
308 nir_function_impl *
nir_function_impl_create(nir_function * function)309 nir_function_impl_create(nir_function *function)
310 {
311 assert(function->impl == NULL);
312
313 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
314
315 function->impl = impl;
316 impl->function = function;
317
318 impl->num_params = function->num_params;
319 impl->params = ralloc_array(function->shader,
320 nir_variable *, impl->num_params);
321
322 for (unsigned i = 0; i < impl->num_params; i++) {
323 impl->params[i] = rzalloc(function->shader, nir_variable);
324 impl->params[i]->type = function->params[i].type;
325 impl->params[i]->data.mode = nir_var_param;
326 impl->params[i]->data.location = i;
327 }
328
329 if (!glsl_type_is_void(function->return_type)) {
330 impl->return_var = rzalloc(function->shader, nir_variable);
331 impl->return_var->type = function->return_type;
332 impl->return_var->data.mode = nir_var_param;
333 impl->return_var->data.location = -1;
334 } else {
335 impl->return_var = NULL;
336 }
337
338 return impl;
339 }
340
341 nir_block *
nir_block_create(nir_shader * shader)342 nir_block_create(nir_shader *shader)
343 {
344 nir_block *block = rzalloc(shader, nir_block);
345
346 cf_init(&block->cf_node, nir_cf_node_block);
347
348 block->successors[0] = block->successors[1] = NULL;
349 block->predecessors = _mesa_set_create(block, _mesa_hash_pointer,
350 _mesa_key_pointer_equal);
351 block->imm_dom = NULL;
352 /* XXX maybe it would be worth it to defer allocation? This
353 * way it doesn't get allocated for shader refs that never run
354 * nir_calc_dominance? For example, state-tracker creates an
355 * initial IR, clones that, runs appropriate lowering pass, passes
356 * to driver which does common lowering/opt, and then stores ref
357 * which is later used to do state specific lowering and futher
358 * opt. Do any of the references not need dominance metadata?
359 */
360 block->dom_frontier = _mesa_set_create(block, _mesa_hash_pointer,
361 _mesa_key_pointer_equal);
362
363 exec_list_make_empty(&block->instr_list);
364
365 return block;
366 }
367
368 static inline void
src_init(nir_src * src)369 src_init(nir_src *src)
370 {
371 src->is_ssa = false;
372 src->reg.reg = NULL;
373 src->reg.indirect = NULL;
374 src->reg.base_offset = 0;
375 }
376
377 nir_if *
nir_if_create(nir_shader * shader)378 nir_if_create(nir_shader *shader)
379 {
380 nir_if *if_stmt = ralloc(shader, nir_if);
381
382 cf_init(&if_stmt->cf_node, nir_cf_node_if);
383 src_init(&if_stmt->condition);
384
385 nir_block *then = nir_block_create(shader);
386 exec_list_make_empty(&if_stmt->then_list);
387 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
388 then->cf_node.parent = &if_stmt->cf_node;
389
390 nir_block *else_stmt = nir_block_create(shader);
391 exec_list_make_empty(&if_stmt->else_list);
392 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
393 else_stmt->cf_node.parent = &if_stmt->cf_node;
394
395 return if_stmt;
396 }
397
398 nir_loop *
nir_loop_create(nir_shader * shader)399 nir_loop_create(nir_shader *shader)
400 {
401 nir_loop *loop = rzalloc(shader, nir_loop);
402
403 cf_init(&loop->cf_node, nir_cf_node_loop);
404
405 nir_block *body = nir_block_create(shader);
406 exec_list_make_empty(&loop->body);
407 exec_list_push_tail(&loop->body, &body->cf_node.node);
408 body->cf_node.parent = &loop->cf_node;
409
410 body->successors[0] = body;
411 _mesa_set_add(body->predecessors, body);
412
413 return loop;
414 }
415
416 static void
instr_init(nir_instr * instr,nir_instr_type type)417 instr_init(nir_instr *instr, nir_instr_type type)
418 {
419 instr->type = type;
420 instr->block = NULL;
421 exec_node_init(&instr->node);
422 }
423
424 static void
dest_init(nir_dest * dest)425 dest_init(nir_dest *dest)
426 {
427 dest->is_ssa = false;
428 dest->reg.reg = NULL;
429 dest->reg.indirect = NULL;
430 dest->reg.base_offset = 0;
431 }
432
433 static void
alu_dest_init(nir_alu_dest * dest)434 alu_dest_init(nir_alu_dest *dest)
435 {
436 dest_init(&dest->dest);
437 dest->saturate = false;
438 dest->write_mask = 0xf;
439 }
440
441 static void
alu_src_init(nir_alu_src * src)442 alu_src_init(nir_alu_src *src)
443 {
444 src_init(&src->src);
445 src->abs = src->negate = false;
446 src->swizzle[0] = 0;
447 src->swizzle[1] = 1;
448 src->swizzle[2] = 2;
449 src->swizzle[3] = 3;
450 }
451
452 nir_alu_instr *
nir_alu_instr_create(nir_shader * shader,nir_op op)453 nir_alu_instr_create(nir_shader *shader, nir_op op)
454 {
455 unsigned num_srcs = nir_op_infos[op].num_inputs;
456 /* TODO: don't use rzalloc */
457 nir_alu_instr *instr =
458 rzalloc_size(shader,
459 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
460
461 instr_init(&instr->instr, nir_instr_type_alu);
462 instr->op = op;
463 alu_dest_init(&instr->dest);
464 for (unsigned i = 0; i < num_srcs; i++)
465 alu_src_init(&instr->src[i]);
466
467 return instr;
468 }
469
470 nir_jump_instr *
nir_jump_instr_create(nir_shader * shader,nir_jump_type type)471 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
472 {
473 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
474 instr_init(&instr->instr, nir_instr_type_jump);
475 instr->type = type;
476 return instr;
477 }
478
479 nir_load_const_instr *
nir_load_const_instr_create(nir_shader * shader,unsigned num_components,unsigned bit_size)480 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
481 unsigned bit_size)
482 {
483 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
484 instr_init(&instr->instr, nir_instr_type_load_const);
485
486 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
487
488 return instr;
489 }
490
491 nir_intrinsic_instr *
nir_intrinsic_instr_create(nir_shader * shader,nir_intrinsic_op op)492 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
493 {
494 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
495 /* TODO: don't use rzalloc */
496 nir_intrinsic_instr *instr =
497 rzalloc_size(shader,
498 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
499
500 instr_init(&instr->instr, nir_instr_type_intrinsic);
501 instr->intrinsic = op;
502
503 if (nir_intrinsic_infos[op].has_dest)
504 dest_init(&instr->dest);
505
506 for (unsigned i = 0; i < num_srcs; i++)
507 src_init(&instr->src[i]);
508
509 return instr;
510 }
511
512 nir_call_instr *
nir_call_instr_create(nir_shader * shader,nir_function * callee)513 nir_call_instr_create(nir_shader *shader, nir_function *callee)
514 {
515 nir_call_instr *instr = ralloc(shader, nir_call_instr);
516 instr_init(&instr->instr, nir_instr_type_call);
517
518 instr->callee = callee;
519 instr->num_params = callee->num_params;
520 instr->params = ralloc_array(instr, nir_deref_var *, instr->num_params);
521 instr->return_deref = NULL;
522
523 return instr;
524 }
525
526 nir_tex_instr *
nir_tex_instr_create(nir_shader * shader,unsigned num_srcs)527 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
528 {
529 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
530 instr_init(&instr->instr, nir_instr_type_tex);
531
532 dest_init(&instr->dest);
533
534 instr->num_srcs = num_srcs;
535 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
536 for (unsigned i = 0; i < num_srcs; i++)
537 src_init(&instr->src[i].src);
538
539 instr->texture_index = 0;
540 instr->texture_array_size = 0;
541 instr->texture = NULL;
542 instr->sampler_index = 0;
543 instr->sampler = NULL;
544
545 return instr;
546 }
547
548 void
nir_tex_instr_add_src(nir_tex_instr * tex,nir_tex_src_type src_type,nir_src src)549 nir_tex_instr_add_src(nir_tex_instr *tex,
550 nir_tex_src_type src_type,
551 nir_src src)
552 {
553 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
554 tex->num_srcs + 1);
555
556 for (unsigned i = 0; i < tex->num_srcs; i++) {
557 new_srcs[i].src_type = tex->src[i].src_type;
558 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
559 &tex->src[i].src);
560 }
561
562 ralloc_free(tex->src);
563 tex->src = new_srcs;
564
565 tex->src[tex->num_srcs].src_type = src_type;
566 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
567 tex->num_srcs++;
568 }
569
570 void
nir_tex_instr_remove_src(nir_tex_instr * tex,unsigned src_idx)571 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
572 {
573 assert(src_idx < tex->num_srcs);
574
575 /* First rewrite the source to NIR_SRC_INIT */
576 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
577
578 /* Now, move all of the other sources down */
579 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
580 tex->src[i-1].src_type = tex->src[i].src_type;
581 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
582 }
583 tex->num_srcs--;
584 }
585
586 nir_phi_instr *
nir_phi_instr_create(nir_shader * shader)587 nir_phi_instr_create(nir_shader *shader)
588 {
589 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
590 instr_init(&instr->instr, nir_instr_type_phi);
591
592 dest_init(&instr->dest);
593 exec_list_make_empty(&instr->srcs);
594 return instr;
595 }
596
597 nir_parallel_copy_instr *
nir_parallel_copy_instr_create(nir_shader * shader)598 nir_parallel_copy_instr_create(nir_shader *shader)
599 {
600 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
601 instr_init(&instr->instr, nir_instr_type_parallel_copy);
602
603 exec_list_make_empty(&instr->entries);
604
605 return instr;
606 }
607
608 nir_ssa_undef_instr *
nir_ssa_undef_instr_create(nir_shader * shader,unsigned num_components,unsigned bit_size)609 nir_ssa_undef_instr_create(nir_shader *shader,
610 unsigned num_components,
611 unsigned bit_size)
612 {
613 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
614 instr_init(&instr->instr, nir_instr_type_ssa_undef);
615
616 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
617
618 return instr;
619 }
620
621 nir_deref_var *
nir_deref_var_create(void * mem_ctx,nir_variable * var)622 nir_deref_var_create(void *mem_ctx, nir_variable *var)
623 {
624 nir_deref_var *deref = ralloc(mem_ctx, nir_deref_var);
625 deref->deref.deref_type = nir_deref_type_var;
626 deref->deref.child = NULL;
627 deref->deref.type = var->type;
628 deref->var = var;
629 return deref;
630 }
631
632 nir_deref_array *
nir_deref_array_create(void * mem_ctx)633 nir_deref_array_create(void *mem_ctx)
634 {
635 nir_deref_array *deref = ralloc(mem_ctx, nir_deref_array);
636 deref->deref.deref_type = nir_deref_type_array;
637 deref->deref.child = NULL;
638 deref->deref_array_type = nir_deref_array_type_direct;
639 src_init(&deref->indirect);
640 deref->base_offset = 0;
641 return deref;
642 }
643
644 nir_deref_struct *
nir_deref_struct_create(void * mem_ctx,unsigned field_index)645 nir_deref_struct_create(void *mem_ctx, unsigned field_index)
646 {
647 nir_deref_struct *deref = ralloc(mem_ctx, nir_deref_struct);
648 deref->deref.deref_type = nir_deref_type_struct;
649 deref->deref.child = NULL;
650 deref->index = field_index;
651 return deref;
652 }
653
654 nir_deref_var *
nir_deref_var_clone(const nir_deref_var * deref,void * mem_ctx)655 nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx)
656 {
657 if (deref == NULL)
658 return NULL;
659
660 nir_deref_var *ret = nir_deref_var_create(mem_ctx, deref->var);
661 ret->deref.type = deref->deref.type;
662 if (deref->deref.child)
663 ret->deref.child = nir_deref_clone(deref->deref.child, ret);
664 return ret;
665 }
666
667 static nir_deref_array *
deref_array_clone(const nir_deref_array * deref,void * mem_ctx)668 deref_array_clone(const nir_deref_array *deref, void *mem_ctx)
669 {
670 nir_deref_array *ret = nir_deref_array_create(mem_ctx);
671 ret->base_offset = deref->base_offset;
672 ret->deref_array_type = deref->deref_array_type;
673 if (deref->deref_array_type == nir_deref_array_type_indirect) {
674 nir_src_copy(&ret->indirect, &deref->indirect, mem_ctx);
675 }
676 ret->deref.type = deref->deref.type;
677 if (deref->deref.child)
678 ret->deref.child = nir_deref_clone(deref->deref.child, ret);
679 return ret;
680 }
681
682 static nir_deref_struct *
deref_struct_clone(const nir_deref_struct * deref,void * mem_ctx)683 deref_struct_clone(const nir_deref_struct *deref, void *mem_ctx)
684 {
685 nir_deref_struct *ret = nir_deref_struct_create(mem_ctx, deref->index);
686 ret->deref.type = deref->deref.type;
687 if (deref->deref.child)
688 ret->deref.child = nir_deref_clone(deref->deref.child, ret);
689 return ret;
690 }
691
692 nir_deref *
nir_deref_clone(const nir_deref * deref,void * mem_ctx)693 nir_deref_clone(const nir_deref *deref, void *mem_ctx)
694 {
695 if (deref == NULL)
696 return NULL;
697
698 switch (deref->deref_type) {
699 case nir_deref_type_var:
700 return &nir_deref_var_clone(nir_deref_as_var(deref), mem_ctx)->deref;
701 case nir_deref_type_array:
702 return &deref_array_clone(nir_deref_as_array(deref), mem_ctx)->deref;
703 case nir_deref_type_struct:
704 return &deref_struct_clone(nir_deref_as_struct(deref), mem_ctx)->deref;
705 default:
706 unreachable("Invalid dereference type");
707 }
708
709 return NULL;
710 }
711
712 /* This is the second step in the recursion. We've found the tail and made a
713 * copy. Now we need to iterate over all possible leaves and call the
714 * callback on each one.
715 */
716 static bool
deref_foreach_leaf_build_recur(nir_deref_var * deref,nir_deref * tail,nir_deref_foreach_leaf_cb cb,void * state)717 deref_foreach_leaf_build_recur(nir_deref_var *deref, nir_deref *tail,
718 nir_deref_foreach_leaf_cb cb, void *state)
719 {
720 unsigned length;
721 union {
722 nir_deref_array arr;
723 nir_deref_struct str;
724 } tmp;
725
726 assert(tail->child == NULL);
727 switch (glsl_get_base_type(tail->type)) {
728 case GLSL_TYPE_UINT:
729 case GLSL_TYPE_UINT16:
730 case GLSL_TYPE_UINT64:
731 case GLSL_TYPE_INT:
732 case GLSL_TYPE_INT16:
733 case GLSL_TYPE_INT64:
734 case GLSL_TYPE_FLOAT:
735 case GLSL_TYPE_FLOAT16:
736 case GLSL_TYPE_DOUBLE:
737 case GLSL_TYPE_BOOL:
738 if (glsl_type_is_vector_or_scalar(tail->type))
739 return cb(deref, state);
740 /* Fall Through */
741
742 case GLSL_TYPE_ARRAY:
743 tmp.arr.deref.deref_type = nir_deref_type_array;
744 tmp.arr.deref.type = glsl_get_array_element(tail->type);
745 tmp.arr.deref_array_type = nir_deref_array_type_direct;
746 tmp.arr.indirect = NIR_SRC_INIT;
747 tail->child = &tmp.arr.deref;
748
749 length = glsl_get_length(tail->type);
750 for (unsigned i = 0; i < length; i++) {
751 tmp.arr.deref.child = NULL;
752 tmp.arr.base_offset = i;
753 if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
754 return false;
755 }
756 return true;
757
758 case GLSL_TYPE_STRUCT:
759 tmp.str.deref.deref_type = nir_deref_type_struct;
760 tail->child = &tmp.str.deref;
761
762 length = glsl_get_length(tail->type);
763 for (unsigned i = 0; i < length; i++) {
764 tmp.arr.deref.child = NULL;
765 tmp.str.deref.type = glsl_get_struct_field(tail->type, i);
766 tmp.str.index = i;
767 if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
768 return false;
769 }
770 return true;
771
772 default:
773 unreachable("Invalid type for dereference");
774 }
775 }
776
777 /* This is the first step of the foreach_leaf recursion. In this step we are
778 * walking to the end of the deref chain and making a copy in the stack as we
779 * go. This is because we don't want to mutate the deref chain that was
780 * passed in by the caller. The downside is that this deref chain is on the
781 * stack and , if the caller wants to do anything with it, they will have to
782 * make their own copy because this one will go away.
783 */
784 static bool
deref_foreach_leaf_copy_recur(nir_deref_var * deref,nir_deref * tail,nir_deref_foreach_leaf_cb cb,void * state)785 deref_foreach_leaf_copy_recur(nir_deref_var *deref, nir_deref *tail,
786 nir_deref_foreach_leaf_cb cb, void *state)
787 {
788 union {
789 nir_deref_array arr;
790 nir_deref_struct str;
791 } c;
792
793 if (tail->child) {
794 switch (tail->child->deref_type) {
795 case nir_deref_type_array:
796 c.arr = *nir_deref_as_array(tail->child);
797 tail->child = &c.arr.deref;
798 return deref_foreach_leaf_copy_recur(deref, &c.arr.deref, cb, state);
799
800 case nir_deref_type_struct:
801 c.str = *nir_deref_as_struct(tail->child);
802 tail->child = &c.str.deref;
803 return deref_foreach_leaf_copy_recur(deref, &c.str.deref, cb, state);
804
805 case nir_deref_type_var:
806 default:
807 unreachable("Invalid deref type for a child");
808 }
809 } else {
810 /* We've gotten to the end of the original deref. Time to start
811 * building our own derefs.
812 */
813 return deref_foreach_leaf_build_recur(deref, tail, cb, state);
814 }
815 }
816
817 /**
818 * This function iterates over all of the possible derefs that can be created
819 * with the given deref as the head. It then calls the provided callback with
820 * a full deref for each one.
821 *
822 * The deref passed to the callback will be allocated on the stack. You will
823 * need to make a copy if you want it to hang around.
824 */
825 bool
nir_deref_foreach_leaf(nir_deref_var * deref,nir_deref_foreach_leaf_cb cb,void * state)826 nir_deref_foreach_leaf(nir_deref_var *deref,
827 nir_deref_foreach_leaf_cb cb, void *state)
828 {
829 nir_deref_var copy = *deref;
830 return deref_foreach_leaf_copy_recur(©, ©.deref, cb, state);
831 }
832
833 /* Returns a load_const instruction that represents the constant
834 * initializer for the given deref chain. The caller is responsible for
835 * ensuring that there actually is a constant initializer.
836 */
837 nir_load_const_instr *
nir_deref_get_const_initializer_load(nir_shader * shader,nir_deref_var * deref)838 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref)
839 {
840 nir_constant *constant = deref->var->constant_initializer;
841 assert(constant);
842
843 const nir_deref *tail = &deref->deref;
844 unsigned matrix_col = 0;
845 while (tail->child) {
846 switch (tail->child->deref_type) {
847 case nir_deref_type_array: {
848 nir_deref_array *arr = nir_deref_as_array(tail->child);
849 assert(arr->deref_array_type == nir_deref_array_type_direct);
850 if (glsl_type_is_matrix(tail->type)) {
851 assert(arr->deref.child == NULL);
852 matrix_col = arr->base_offset;
853 } else {
854 constant = constant->elements[arr->base_offset];
855 }
856 break;
857 }
858
859 case nir_deref_type_struct: {
860 constant = constant->elements[nir_deref_as_struct(tail->child)->index];
861 break;
862 }
863
864 default:
865 unreachable("Invalid deref child type");
866 }
867
868 tail = tail->child;
869 }
870
871 unsigned bit_size = glsl_get_bit_size(tail->type);
872 nir_load_const_instr *load =
873 nir_load_const_instr_create(shader, glsl_get_vector_elements(tail->type),
874 bit_size);
875
876 switch (glsl_get_base_type(tail->type)) {
877 case GLSL_TYPE_FLOAT:
878 case GLSL_TYPE_INT:
879 case GLSL_TYPE_UINT:
880 case GLSL_TYPE_FLOAT16:
881 case GLSL_TYPE_DOUBLE:
882 case GLSL_TYPE_INT16:
883 case GLSL_TYPE_UINT16:
884 case GLSL_TYPE_UINT64:
885 case GLSL_TYPE_INT64:
886 case GLSL_TYPE_BOOL:
887 load->value = constant->values[matrix_col];
888 break;
889 default:
890 unreachable("Invalid immediate type");
891 }
892
893 return load;
894 }
895
896 nir_function_impl *
nir_cf_node_get_function(nir_cf_node * node)897 nir_cf_node_get_function(nir_cf_node *node)
898 {
899 while (node->type != nir_cf_node_function) {
900 node = node->parent;
901 }
902
903 return nir_cf_node_as_function(node);
904 }
905
906 /* Reduces a cursor by trying to convert everything to after and trying to
907 * go up to block granularity when possible.
908 */
909 static nir_cursor
reduce_cursor(nir_cursor cursor)910 reduce_cursor(nir_cursor cursor)
911 {
912 switch (cursor.option) {
913 case nir_cursor_before_block:
914 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
915 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
916 if (exec_list_is_empty(&cursor.block->instr_list)) {
917 /* Empty block. After is as good as before. */
918 cursor.option = nir_cursor_after_block;
919 }
920 return cursor;
921
922 case nir_cursor_after_block:
923 return cursor;
924
925 case nir_cursor_before_instr: {
926 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
927 if (prev_instr) {
928 /* Before this instruction is after the previous */
929 cursor.instr = prev_instr;
930 cursor.option = nir_cursor_after_instr;
931 } else {
932 /* No previous instruction. Switch to before block */
933 cursor.block = cursor.instr->block;
934 cursor.option = nir_cursor_before_block;
935 }
936 return reduce_cursor(cursor);
937 }
938
939 case nir_cursor_after_instr:
940 if (nir_instr_next(cursor.instr) == NULL) {
941 /* This is the last instruction, switch to after block */
942 cursor.option = nir_cursor_after_block;
943 cursor.block = cursor.instr->block;
944 }
945 return cursor;
946
947 default:
948 unreachable("Inavlid cursor option");
949 }
950 }
951
952 bool
nir_cursors_equal(nir_cursor a,nir_cursor b)953 nir_cursors_equal(nir_cursor a, nir_cursor b)
954 {
955 /* Reduced cursors should be unique */
956 a = reduce_cursor(a);
957 b = reduce_cursor(b);
958
959 return a.block == b.block && a.option == b.option;
960 }
961
962 static bool
add_use_cb(nir_src * src,void * state)963 add_use_cb(nir_src *src, void *state)
964 {
965 nir_instr *instr = state;
966
967 src->parent_instr = instr;
968 list_addtail(&src->use_link,
969 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
970
971 return true;
972 }
973
974 static bool
add_ssa_def_cb(nir_ssa_def * def,void * state)975 add_ssa_def_cb(nir_ssa_def *def, void *state)
976 {
977 nir_instr *instr = state;
978
979 if (instr->block && def->index == UINT_MAX) {
980 nir_function_impl *impl =
981 nir_cf_node_get_function(&instr->block->cf_node);
982
983 def->index = impl->ssa_alloc++;
984 }
985
986 return true;
987 }
988
989 static bool
add_reg_def_cb(nir_dest * dest,void * state)990 add_reg_def_cb(nir_dest *dest, void *state)
991 {
992 nir_instr *instr = state;
993
994 if (!dest->is_ssa) {
995 dest->reg.parent_instr = instr;
996 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
997 }
998
999 return true;
1000 }
1001
1002 static void
add_defs_uses(nir_instr * instr)1003 add_defs_uses(nir_instr *instr)
1004 {
1005 nir_foreach_src(instr, add_use_cb, instr);
1006 nir_foreach_dest(instr, add_reg_def_cb, instr);
1007 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
1008 }
1009
1010 void
nir_instr_insert(nir_cursor cursor,nir_instr * instr)1011 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
1012 {
1013 switch (cursor.option) {
1014 case nir_cursor_before_block:
1015 /* Only allow inserting jumps into empty blocks. */
1016 if (instr->type == nir_instr_type_jump)
1017 assert(exec_list_is_empty(&cursor.block->instr_list));
1018
1019 instr->block = cursor.block;
1020 add_defs_uses(instr);
1021 exec_list_push_head(&cursor.block->instr_list, &instr->node);
1022 break;
1023 case nir_cursor_after_block: {
1024 /* Inserting instructions after a jump is illegal. */
1025 nir_instr *last = nir_block_last_instr(cursor.block);
1026 assert(last == NULL || last->type != nir_instr_type_jump);
1027 (void) last;
1028
1029 instr->block = cursor.block;
1030 add_defs_uses(instr);
1031 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
1032 break;
1033 }
1034 case nir_cursor_before_instr:
1035 assert(instr->type != nir_instr_type_jump);
1036 instr->block = cursor.instr->block;
1037 add_defs_uses(instr);
1038 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
1039 break;
1040 case nir_cursor_after_instr:
1041 /* Inserting instructions after a jump is illegal. */
1042 assert(cursor.instr->type != nir_instr_type_jump);
1043
1044 /* Only allow inserting jumps at the end of the block. */
1045 if (instr->type == nir_instr_type_jump)
1046 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
1047
1048 instr->block = cursor.instr->block;
1049 add_defs_uses(instr);
1050 exec_node_insert_after(&cursor.instr->node, &instr->node);
1051 break;
1052 }
1053
1054 if (instr->type == nir_instr_type_jump)
1055 nir_handle_add_jump(instr->block);
1056 }
1057
1058 static bool
src_is_valid(const nir_src * src)1059 src_is_valid(const nir_src *src)
1060 {
1061 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
1062 }
1063
1064 static bool
remove_use_cb(nir_src * src,void * state)1065 remove_use_cb(nir_src *src, void *state)
1066 {
1067 (void) state;
1068
1069 if (src_is_valid(src))
1070 list_del(&src->use_link);
1071
1072 return true;
1073 }
1074
1075 static bool
remove_def_cb(nir_dest * dest,void * state)1076 remove_def_cb(nir_dest *dest, void *state)
1077 {
1078 (void) state;
1079
1080 if (!dest->is_ssa)
1081 list_del(&dest->reg.def_link);
1082
1083 return true;
1084 }
1085
1086 static void
remove_defs_uses(nir_instr * instr)1087 remove_defs_uses(nir_instr *instr)
1088 {
1089 nir_foreach_dest(instr, remove_def_cb, instr);
1090 nir_foreach_src(instr, remove_use_cb, instr);
1091 }
1092
nir_instr_remove(nir_instr * instr)1093 void nir_instr_remove(nir_instr *instr)
1094 {
1095 remove_defs_uses(instr);
1096 exec_node_remove(&instr->node);
1097
1098 if (instr->type == nir_instr_type_jump) {
1099 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
1100 nir_handle_remove_jump(instr->block, jump_instr->type);
1101 }
1102 }
1103
1104 /*@}*/
1105
1106 void
nir_index_local_regs(nir_function_impl * impl)1107 nir_index_local_regs(nir_function_impl *impl)
1108 {
1109 unsigned index = 0;
1110 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1111 reg->index = index++;
1112 }
1113 impl->reg_alloc = index;
1114 }
1115
1116 void
nir_index_global_regs(nir_shader * shader)1117 nir_index_global_regs(nir_shader *shader)
1118 {
1119 unsigned index = 0;
1120 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1121 reg->index = index++;
1122 }
1123 shader->reg_alloc = index;
1124 }
1125
1126 static bool
visit_alu_dest(nir_alu_instr * instr,nir_foreach_dest_cb cb,void * state)1127 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
1128 {
1129 return cb(&instr->dest.dest, state);
1130 }
1131
1132 static bool
visit_intrinsic_dest(nir_intrinsic_instr * instr,nir_foreach_dest_cb cb,void * state)1133 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
1134 void *state)
1135 {
1136 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1137 return cb(&instr->dest, state);
1138
1139 return true;
1140 }
1141
1142 static bool
visit_texture_dest(nir_tex_instr * instr,nir_foreach_dest_cb cb,void * state)1143 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
1144 void *state)
1145 {
1146 return cb(&instr->dest, state);
1147 }
1148
1149 static bool
visit_phi_dest(nir_phi_instr * instr,nir_foreach_dest_cb cb,void * state)1150 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
1151 {
1152 return cb(&instr->dest, state);
1153 }
1154
1155 static bool
visit_parallel_copy_dest(nir_parallel_copy_instr * instr,nir_foreach_dest_cb cb,void * state)1156 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
1157 nir_foreach_dest_cb cb, void *state)
1158 {
1159 nir_foreach_parallel_copy_entry(entry, instr) {
1160 if (!cb(&entry->dest, state))
1161 return false;
1162 }
1163
1164 return true;
1165 }
1166
1167 bool
nir_foreach_dest(nir_instr * instr,nir_foreach_dest_cb cb,void * state)1168 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
1169 {
1170 switch (instr->type) {
1171 case nir_instr_type_alu:
1172 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
1173 case nir_instr_type_intrinsic:
1174 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
1175 case nir_instr_type_tex:
1176 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
1177 case nir_instr_type_phi:
1178 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
1179 case nir_instr_type_parallel_copy:
1180 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
1181 cb, state);
1182
1183 case nir_instr_type_load_const:
1184 case nir_instr_type_ssa_undef:
1185 case nir_instr_type_call:
1186 case nir_instr_type_jump:
1187 break;
1188
1189 default:
1190 unreachable("Invalid instruction type");
1191 break;
1192 }
1193
1194 return true;
1195 }
1196
1197 struct foreach_ssa_def_state {
1198 nir_foreach_ssa_def_cb cb;
1199 void *client_state;
1200 };
1201
1202 static inline bool
nir_ssa_def_visitor(nir_dest * dest,void * void_state)1203 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1204 {
1205 struct foreach_ssa_def_state *state = void_state;
1206
1207 if (dest->is_ssa)
1208 return state->cb(&dest->ssa, state->client_state);
1209 else
1210 return true;
1211 }
1212
1213 bool
nir_foreach_ssa_def(nir_instr * instr,nir_foreach_ssa_def_cb cb,void * state)1214 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1215 {
1216 switch (instr->type) {
1217 case nir_instr_type_alu:
1218 case nir_instr_type_tex:
1219 case nir_instr_type_intrinsic:
1220 case nir_instr_type_phi:
1221 case nir_instr_type_parallel_copy: {
1222 struct foreach_ssa_def_state foreach_state = {cb, state};
1223 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1224 }
1225
1226 case nir_instr_type_load_const:
1227 return cb(&nir_instr_as_load_const(instr)->def, state);
1228 case nir_instr_type_ssa_undef:
1229 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1230 case nir_instr_type_call:
1231 case nir_instr_type_jump:
1232 return true;
1233 default:
1234 unreachable("Invalid instruction type");
1235 }
1236 }
1237
1238 static bool
visit_src(nir_src * src,nir_foreach_src_cb cb,void * state)1239 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1240 {
1241 if (!cb(src, state))
1242 return false;
1243 if (!src->is_ssa && src->reg.indirect)
1244 return cb(src->reg.indirect, state);
1245 return true;
1246 }
1247
1248 static bool
visit_deref_array_src(nir_deref_array * deref,nir_foreach_src_cb cb,void * state)1249 visit_deref_array_src(nir_deref_array *deref, nir_foreach_src_cb cb,
1250 void *state)
1251 {
1252 if (deref->deref_array_type == nir_deref_array_type_indirect)
1253 return visit_src(&deref->indirect, cb, state);
1254 return true;
1255 }
1256
1257 static bool
visit_deref_src(nir_deref_var * deref,nir_foreach_src_cb cb,void * state)1258 visit_deref_src(nir_deref_var *deref, nir_foreach_src_cb cb, void *state)
1259 {
1260 nir_deref *cur = &deref->deref;
1261 while (cur != NULL) {
1262 if (cur->deref_type == nir_deref_type_array) {
1263 if (!visit_deref_array_src(nir_deref_as_array(cur), cb, state))
1264 return false;
1265 }
1266
1267 cur = cur->child;
1268 }
1269
1270 return true;
1271 }
1272
1273 static bool
visit_alu_src(nir_alu_instr * instr,nir_foreach_src_cb cb,void * state)1274 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1275 {
1276 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1277 if (!visit_src(&instr->src[i].src, cb, state))
1278 return false;
1279
1280 return true;
1281 }
1282
1283 static bool
visit_tex_src(nir_tex_instr * instr,nir_foreach_src_cb cb,void * state)1284 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1285 {
1286 for (unsigned i = 0; i < instr->num_srcs; i++) {
1287 if (!visit_src(&instr->src[i].src, cb, state))
1288 return false;
1289 }
1290
1291 if (instr->texture != NULL) {
1292 if (!visit_deref_src(instr->texture, cb, state))
1293 return false;
1294 }
1295
1296 if (instr->sampler != NULL) {
1297 if (!visit_deref_src(instr->sampler, cb, state))
1298 return false;
1299 }
1300
1301 return true;
1302 }
1303
1304 static bool
visit_intrinsic_src(nir_intrinsic_instr * instr,nir_foreach_src_cb cb,void * state)1305 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1306 void *state)
1307 {
1308 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1309 for (unsigned i = 0; i < num_srcs; i++) {
1310 if (!visit_src(&instr->src[i], cb, state))
1311 return false;
1312 }
1313
1314 unsigned num_vars =
1315 nir_intrinsic_infos[instr->intrinsic].num_variables;
1316 for (unsigned i = 0; i < num_vars; i++) {
1317 if (!visit_deref_src(instr->variables[i], cb, state))
1318 return false;
1319 }
1320
1321 return true;
1322 }
1323
1324 static bool
visit_phi_src(nir_phi_instr * instr,nir_foreach_src_cb cb,void * state)1325 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1326 {
1327 nir_foreach_phi_src(src, instr) {
1328 if (!visit_src(&src->src, cb, state))
1329 return false;
1330 }
1331
1332 return true;
1333 }
1334
1335 static bool
visit_parallel_copy_src(nir_parallel_copy_instr * instr,nir_foreach_src_cb cb,void * state)1336 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1337 nir_foreach_src_cb cb, void *state)
1338 {
1339 nir_foreach_parallel_copy_entry(entry, instr) {
1340 if (!visit_src(&entry->src, cb, state))
1341 return false;
1342 }
1343
1344 return true;
1345 }
1346
1347 typedef struct {
1348 void *state;
1349 nir_foreach_src_cb cb;
1350 } visit_dest_indirect_state;
1351
1352 static bool
visit_dest_indirect(nir_dest * dest,void * _state)1353 visit_dest_indirect(nir_dest *dest, void *_state)
1354 {
1355 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1356
1357 if (!dest->is_ssa && dest->reg.indirect)
1358 return state->cb(dest->reg.indirect, state->state);
1359
1360 return true;
1361 }
1362
1363 bool
nir_foreach_src(nir_instr * instr,nir_foreach_src_cb cb,void * state)1364 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1365 {
1366 switch (instr->type) {
1367 case nir_instr_type_alu:
1368 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1369 return false;
1370 break;
1371 case nir_instr_type_intrinsic:
1372 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1373 return false;
1374 break;
1375 case nir_instr_type_tex:
1376 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1377 return false;
1378 break;
1379 case nir_instr_type_call:
1380 /* Call instructions have no regular sources */
1381 break;
1382 case nir_instr_type_load_const:
1383 /* Constant load instructions have no regular sources */
1384 break;
1385 case nir_instr_type_phi:
1386 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1387 return false;
1388 break;
1389 case nir_instr_type_parallel_copy:
1390 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1391 cb, state))
1392 return false;
1393 break;
1394 case nir_instr_type_jump:
1395 case nir_instr_type_ssa_undef:
1396 return true;
1397
1398 default:
1399 unreachable("Invalid instruction type");
1400 break;
1401 }
1402
1403 visit_dest_indirect_state dest_state;
1404 dest_state.state = state;
1405 dest_state.cb = cb;
1406 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1407 }
1408
1409 nir_const_value *
nir_src_as_const_value(nir_src src)1410 nir_src_as_const_value(nir_src src)
1411 {
1412 if (!src.is_ssa)
1413 return NULL;
1414
1415 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1416 return NULL;
1417
1418 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1419
1420 return &load->value;
1421 }
1422
1423 /**
1424 * Returns true if the source is known to be dynamically uniform. Otherwise it
1425 * returns false which means it may or may not be dynamically uniform but it
1426 * can't be determined.
1427 */
1428 bool
nir_src_is_dynamically_uniform(nir_src src)1429 nir_src_is_dynamically_uniform(nir_src src)
1430 {
1431 if (!src.is_ssa)
1432 return false;
1433
1434 /* Constants are trivially dynamically uniform */
1435 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1436 return true;
1437
1438 /* As are uniform variables */
1439 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1440 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1441
1442 if (intr->intrinsic == nir_intrinsic_load_uniform)
1443 return true;
1444 }
1445
1446 /* XXX: this could have many more tests, such as when a sampler function is
1447 * called with dynamically uniform arguments.
1448 */
1449 return false;
1450 }
1451
1452 static void
src_remove_all_uses(nir_src * src)1453 src_remove_all_uses(nir_src *src)
1454 {
1455 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1456 if (!src_is_valid(src))
1457 continue;
1458
1459 list_del(&src->use_link);
1460 }
1461 }
1462
1463 static void
src_add_all_uses(nir_src * src,nir_instr * parent_instr,nir_if * parent_if)1464 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1465 {
1466 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1467 if (!src_is_valid(src))
1468 continue;
1469
1470 if (parent_instr) {
1471 src->parent_instr = parent_instr;
1472 if (src->is_ssa)
1473 list_addtail(&src->use_link, &src->ssa->uses);
1474 else
1475 list_addtail(&src->use_link, &src->reg.reg->uses);
1476 } else {
1477 assert(parent_if);
1478 src->parent_if = parent_if;
1479 if (src->is_ssa)
1480 list_addtail(&src->use_link, &src->ssa->if_uses);
1481 else
1482 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1483 }
1484 }
1485 }
1486
1487 void
nir_instr_rewrite_src(nir_instr * instr,nir_src * src,nir_src new_src)1488 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1489 {
1490 assert(!src_is_valid(src) || src->parent_instr == instr);
1491
1492 src_remove_all_uses(src);
1493 *src = new_src;
1494 src_add_all_uses(src, instr, NULL);
1495 }
1496
1497 void
nir_instr_move_src(nir_instr * dest_instr,nir_src * dest,nir_src * src)1498 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1499 {
1500 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1501
1502 src_remove_all_uses(dest);
1503 src_remove_all_uses(src);
1504 *dest = *src;
1505 *src = NIR_SRC_INIT;
1506 src_add_all_uses(dest, dest_instr, NULL);
1507 }
1508
1509 void
nir_if_rewrite_condition(nir_if * if_stmt,nir_src new_src)1510 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1511 {
1512 nir_src *src = &if_stmt->condition;
1513 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1514
1515 src_remove_all_uses(src);
1516 *src = new_src;
1517 src_add_all_uses(src, NULL, if_stmt);
1518 }
1519
1520 void
nir_instr_rewrite_dest(nir_instr * instr,nir_dest * dest,nir_dest new_dest)1521 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1522 {
1523 if (dest->is_ssa) {
1524 /* We can only overwrite an SSA destination if it has no uses. */
1525 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1526 } else {
1527 list_del(&dest->reg.def_link);
1528 if (dest->reg.indirect)
1529 src_remove_all_uses(dest->reg.indirect);
1530 }
1531
1532 /* We can't re-write with an SSA def */
1533 assert(!new_dest.is_ssa);
1534
1535 nir_dest_copy(dest, &new_dest, instr);
1536
1537 dest->reg.parent_instr = instr;
1538 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1539
1540 if (dest->reg.indirect)
1541 src_add_all_uses(dest->reg.indirect, instr, NULL);
1542 }
1543
1544 void
nir_instr_rewrite_deref(nir_instr * instr,nir_deref_var ** deref,nir_deref_var * new_deref)1545 nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
1546 nir_deref_var *new_deref)
1547 {
1548 if (*deref)
1549 visit_deref_src(*deref, remove_use_cb, NULL);
1550
1551 *deref = new_deref;
1552
1553 if (*deref)
1554 visit_deref_src(*deref, add_use_cb, instr);
1555 }
1556
1557 /* note: does *not* take ownership of 'name' */
1558 void
nir_ssa_def_init(nir_instr * instr,nir_ssa_def * def,unsigned num_components,unsigned bit_size,const char * name)1559 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1560 unsigned num_components,
1561 unsigned bit_size, const char *name)
1562 {
1563 def->name = ralloc_strdup(instr, name);
1564 def->parent_instr = instr;
1565 list_inithead(&def->uses);
1566 list_inithead(&def->if_uses);
1567 def->num_components = num_components;
1568 def->bit_size = bit_size;
1569
1570 if (instr->block) {
1571 nir_function_impl *impl =
1572 nir_cf_node_get_function(&instr->block->cf_node);
1573
1574 def->index = impl->ssa_alloc++;
1575 } else {
1576 def->index = UINT_MAX;
1577 }
1578 }
1579
1580 /* note: does *not* take ownership of 'name' */
1581 void
nir_ssa_dest_init(nir_instr * instr,nir_dest * dest,unsigned num_components,unsigned bit_size,const char * name)1582 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1583 unsigned num_components, unsigned bit_size,
1584 const char *name)
1585 {
1586 dest->is_ssa = true;
1587 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1588 }
1589
1590 void
nir_ssa_def_rewrite_uses(nir_ssa_def * def,nir_src new_src)1591 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1592 {
1593 assert(!new_src.is_ssa || def != new_src.ssa);
1594
1595 nir_foreach_use_safe(use_src, def)
1596 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1597
1598 nir_foreach_if_use_safe(use_src, def)
1599 nir_if_rewrite_condition(use_src->parent_if, new_src);
1600 }
1601
1602 static bool
is_instr_between(nir_instr * start,nir_instr * end,nir_instr * between)1603 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1604 {
1605 assert(start->block == end->block);
1606
1607 if (between->block != start->block)
1608 return false;
1609
1610 /* Search backwards looking for "between" */
1611 while (start != end) {
1612 if (between == end)
1613 return true;
1614
1615 end = nir_instr_prev(end);
1616 assert(end);
1617 }
1618
1619 return false;
1620 }
1621
1622 /* Replaces all uses of the given SSA def with the given source but only if
1623 * the use comes after the after_me instruction. This can be useful if you
1624 * are emitting code to fix up the result of some instruction: you can freely
1625 * use the result in that code and then call rewrite_uses_after and pass the
1626 * last fixup instruction as after_me and it will replace all of the uses you
1627 * want without touching the fixup code.
1628 *
1629 * This function assumes that after_me is in the same block as
1630 * def->parent_instr and that after_me comes after def->parent_instr.
1631 */
1632 void
nir_ssa_def_rewrite_uses_after(nir_ssa_def * def,nir_src new_src,nir_instr * after_me)1633 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1634 nir_instr *after_me)
1635 {
1636 assert(!new_src.is_ssa || def != new_src.ssa);
1637
1638 nir_foreach_use_safe(use_src, def) {
1639 assert(use_src->parent_instr != def->parent_instr);
1640 /* Since def already dominates all of its uses, the only way a use can
1641 * not be dominated by after_me is if it is between def and after_me in
1642 * the instruction list.
1643 */
1644 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1645 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1646 }
1647
1648 nir_foreach_if_use_safe(use_src, def)
1649 nir_if_rewrite_condition(use_src->parent_if, new_src);
1650 }
1651
1652 uint8_t
nir_ssa_def_components_read(const nir_ssa_def * def)1653 nir_ssa_def_components_read(const nir_ssa_def *def)
1654 {
1655 uint8_t read_mask = 0;
1656 nir_foreach_use(use, def) {
1657 if (use->parent_instr->type == nir_instr_type_alu) {
1658 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1659 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1660 int src_idx = alu_src - &alu->src[0];
1661 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1662
1663 for (unsigned c = 0; c < 4; c++) {
1664 if (!nir_alu_instr_channel_used(alu, src_idx, c))
1665 continue;
1666
1667 read_mask |= (1 << alu_src->swizzle[c]);
1668 }
1669 } else {
1670 return (1 << def->num_components) - 1;
1671 }
1672 }
1673
1674 return read_mask;
1675 }
1676
1677 nir_block *
nir_block_cf_tree_next(nir_block * block)1678 nir_block_cf_tree_next(nir_block *block)
1679 {
1680 if (block == NULL) {
1681 /* nir_foreach_block_safe() will call this function on a NULL block
1682 * after the last iteration, but it won't use the result so just return
1683 * NULL here.
1684 */
1685 return NULL;
1686 }
1687
1688 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1689 if (cf_next)
1690 return nir_cf_node_cf_tree_first(cf_next);
1691
1692 nir_cf_node *parent = block->cf_node.parent;
1693
1694 switch (parent->type) {
1695 case nir_cf_node_if: {
1696 /* Are we at the end of the if? Go to the beginning of the else */
1697 nir_if *if_stmt = nir_cf_node_as_if(parent);
1698 if (block == nir_if_last_then_block(if_stmt))
1699 return nir_if_first_else_block(if_stmt);
1700
1701 assert(block == nir_if_last_else_block(if_stmt));
1702 /* fall through */
1703 }
1704
1705 case nir_cf_node_loop:
1706 return nir_cf_node_as_block(nir_cf_node_next(parent));
1707
1708 case nir_cf_node_function:
1709 return NULL;
1710
1711 default:
1712 unreachable("unknown cf node type");
1713 }
1714 }
1715
1716 nir_block *
nir_block_cf_tree_prev(nir_block * block)1717 nir_block_cf_tree_prev(nir_block *block)
1718 {
1719 if (block == NULL) {
1720 /* do this for consistency with nir_block_cf_tree_next() */
1721 return NULL;
1722 }
1723
1724 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1725 if (cf_prev)
1726 return nir_cf_node_cf_tree_last(cf_prev);
1727
1728 nir_cf_node *parent = block->cf_node.parent;
1729
1730 switch (parent->type) {
1731 case nir_cf_node_if: {
1732 /* Are we at the beginning of the else? Go to the end of the if */
1733 nir_if *if_stmt = nir_cf_node_as_if(parent);
1734 if (block == nir_if_first_else_block(if_stmt))
1735 return nir_if_last_then_block(if_stmt);
1736
1737 assert(block == nir_if_first_then_block(if_stmt));
1738 /* fall through */
1739 }
1740
1741 case nir_cf_node_loop:
1742 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1743
1744 case nir_cf_node_function:
1745 return NULL;
1746
1747 default:
1748 unreachable("unknown cf node type");
1749 }
1750 }
1751
nir_cf_node_cf_tree_first(nir_cf_node * node)1752 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1753 {
1754 switch (node->type) {
1755 case nir_cf_node_function: {
1756 nir_function_impl *impl = nir_cf_node_as_function(node);
1757 return nir_start_block(impl);
1758 }
1759
1760 case nir_cf_node_if: {
1761 nir_if *if_stmt = nir_cf_node_as_if(node);
1762 return nir_if_first_then_block(if_stmt);
1763 }
1764
1765 case nir_cf_node_loop: {
1766 nir_loop *loop = nir_cf_node_as_loop(node);
1767 return nir_loop_first_block(loop);
1768 }
1769
1770 case nir_cf_node_block: {
1771 return nir_cf_node_as_block(node);
1772 }
1773
1774 default:
1775 unreachable("unknown node type");
1776 }
1777 }
1778
nir_cf_node_cf_tree_last(nir_cf_node * node)1779 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1780 {
1781 switch (node->type) {
1782 case nir_cf_node_function: {
1783 nir_function_impl *impl = nir_cf_node_as_function(node);
1784 return nir_impl_last_block(impl);
1785 }
1786
1787 case nir_cf_node_if: {
1788 nir_if *if_stmt = nir_cf_node_as_if(node);
1789 return nir_if_last_else_block(if_stmt);
1790 }
1791
1792 case nir_cf_node_loop: {
1793 nir_loop *loop = nir_cf_node_as_loop(node);
1794 return nir_loop_last_block(loop);
1795 }
1796
1797 case nir_cf_node_block: {
1798 return nir_cf_node_as_block(node);
1799 }
1800
1801 default:
1802 unreachable("unknown node type");
1803 }
1804 }
1805
nir_cf_node_cf_tree_next(nir_cf_node * node)1806 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1807 {
1808 if (node->type == nir_cf_node_block)
1809 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1810 else if (node->type == nir_cf_node_function)
1811 return NULL;
1812 else
1813 return nir_cf_node_as_block(nir_cf_node_next(node));
1814 }
1815
1816 nir_if *
nir_block_get_following_if(nir_block * block)1817 nir_block_get_following_if(nir_block *block)
1818 {
1819 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1820 return NULL;
1821
1822 if (nir_cf_node_is_last(&block->cf_node))
1823 return NULL;
1824
1825 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1826
1827 if (next_node->type != nir_cf_node_if)
1828 return NULL;
1829
1830 return nir_cf_node_as_if(next_node);
1831 }
1832
1833 nir_loop *
nir_block_get_following_loop(nir_block * block)1834 nir_block_get_following_loop(nir_block *block)
1835 {
1836 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1837 return NULL;
1838
1839 if (nir_cf_node_is_last(&block->cf_node))
1840 return NULL;
1841
1842 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1843
1844 if (next_node->type != nir_cf_node_loop)
1845 return NULL;
1846
1847 return nir_cf_node_as_loop(next_node);
1848 }
1849
1850 void
nir_index_blocks(nir_function_impl * impl)1851 nir_index_blocks(nir_function_impl *impl)
1852 {
1853 unsigned index = 0;
1854
1855 if (impl->valid_metadata & nir_metadata_block_index)
1856 return;
1857
1858 nir_foreach_block(block, impl) {
1859 block->index = index++;
1860 }
1861
1862 impl->num_blocks = index;
1863 }
1864
1865 static bool
index_ssa_def_cb(nir_ssa_def * def,void * state)1866 index_ssa_def_cb(nir_ssa_def *def, void *state)
1867 {
1868 unsigned *index = (unsigned *) state;
1869 def->index = (*index)++;
1870
1871 return true;
1872 }
1873
1874 /**
1875 * The indices are applied top-to-bottom which has the very nice property
1876 * that, if A dominates B, then A->index <= B->index.
1877 */
1878 void
nir_index_ssa_defs(nir_function_impl * impl)1879 nir_index_ssa_defs(nir_function_impl *impl)
1880 {
1881 unsigned index = 0;
1882
1883 nir_foreach_block(block, impl) {
1884 nir_foreach_instr(instr, block)
1885 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1886 }
1887
1888 impl->ssa_alloc = index;
1889 }
1890
1891 /**
1892 * The indices are applied top-to-bottom which has the very nice property
1893 * that, if A dominates B, then A->index <= B->index.
1894 */
1895 unsigned
nir_index_instrs(nir_function_impl * impl)1896 nir_index_instrs(nir_function_impl *impl)
1897 {
1898 unsigned index = 0;
1899
1900 nir_foreach_block(block, impl) {
1901 nir_foreach_instr(instr, block)
1902 instr->index = index++;
1903 }
1904
1905 return index;
1906 }
1907
1908 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val)1909 nir_intrinsic_from_system_value(gl_system_value val)
1910 {
1911 switch (val) {
1912 case SYSTEM_VALUE_VERTEX_ID:
1913 return nir_intrinsic_load_vertex_id;
1914 case SYSTEM_VALUE_INSTANCE_ID:
1915 return nir_intrinsic_load_instance_id;
1916 case SYSTEM_VALUE_DRAW_ID:
1917 return nir_intrinsic_load_draw_id;
1918 case SYSTEM_VALUE_BASE_INSTANCE:
1919 return nir_intrinsic_load_base_instance;
1920 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1921 return nir_intrinsic_load_vertex_id_zero_base;
1922 case SYSTEM_VALUE_BASE_VERTEX:
1923 return nir_intrinsic_load_base_vertex;
1924 case SYSTEM_VALUE_INVOCATION_ID:
1925 return nir_intrinsic_load_invocation_id;
1926 case SYSTEM_VALUE_FRAG_COORD:
1927 return nir_intrinsic_load_frag_coord;
1928 case SYSTEM_VALUE_FRONT_FACE:
1929 return nir_intrinsic_load_front_face;
1930 case SYSTEM_VALUE_SAMPLE_ID:
1931 return nir_intrinsic_load_sample_id;
1932 case SYSTEM_VALUE_SAMPLE_POS:
1933 return nir_intrinsic_load_sample_pos;
1934 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1935 return nir_intrinsic_load_sample_mask_in;
1936 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1937 return nir_intrinsic_load_local_invocation_id;
1938 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1939 return nir_intrinsic_load_local_invocation_index;
1940 case SYSTEM_VALUE_WORK_GROUP_ID:
1941 return nir_intrinsic_load_work_group_id;
1942 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1943 return nir_intrinsic_load_num_work_groups;
1944 case SYSTEM_VALUE_PRIMITIVE_ID:
1945 return nir_intrinsic_load_primitive_id;
1946 case SYSTEM_VALUE_TESS_COORD:
1947 return nir_intrinsic_load_tess_coord;
1948 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1949 return nir_intrinsic_load_tess_level_outer;
1950 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1951 return nir_intrinsic_load_tess_level_inner;
1952 case SYSTEM_VALUE_VERTICES_IN:
1953 return nir_intrinsic_load_patch_vertices_in;
1954 case SYSTEM_VALUE_HELPER_INVOCATION:
1955 return nir_intrinsic_load_helper_invocation;
1956 case SYSTEM_VALUE_VIEW_INDEX:
1957 return nir_intrinsic_load_view_index;
1958 case SYSTEM_VALUE_SUBGROUP_SIZE:
1959 return nir_intrinsic_load_subgroup_size;
1960 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1961 return nir_intrinsic_load_subgroup_invocation;
1962 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1963 return nir_intrinsic_load_subgroup_eq_mask;
1964 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1965 return nir_intrinsic_load_subgroup_ge_mask;
1966 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1967 return nir_intrinsic_load_subgroup_gt_mask;
1968 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1969 return nir_intrinsic_load_subgroup_le_mask;
1970 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1971 return nir_intrinsic_load_subgroup_lt_mask;
1972 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1973 return nir_intrinsic_load_local_group_size;
1974 default:
1975 unreachable("system value does not directly correspond to intrinsic");
1976 }
1977 }
1978
1979 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin)1980 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1981 {
1982 switch (intrin) {
1983 case nir_intrinsic_load_vertex_id:
1984 return SYSTEM_VALUE_VERTEX_ID;
1985 case nir_intrinsic_load_instance_id:
1986 return SYSTEM_VALUE_INSTANCE_ID;
1987 case nir_intrinsic_load_draw_id:
1988 return SYSTEM_VALUE_DRAW_ID;
1989 case nir_intrinsic_load_base_instance:
1990 return SYSTEM_VALUE_BASE_INSTANCE;
1991 case nir_intrinsic_load_vertex_id_zero_base:
1992 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1993 case nir_intrinsic_load_base_vertex:
1994 return SYSTEM_VALUE_BASE_VERTEX;
1995 case nir_intrinsic_load_invocation_id:
1996 return SYSTEM_VALUE_INVOCATION_ID;
1997 case nir_intrinsic_load_frag_coord:
1998 return SYSTEM_VALUE_FRAG_COORD;
1999 case nir_intrinsic_load_front_face:
2000 return SYSTEM_VALUE_FRONT_FACE;
2001 case nir_intrinsic_load_sample_id:
2002 return SYSTEM_VALUE_SAMPLE_ID;
2003 case nir_intrinsic_load_sample_pos:
2004 return SYSTEM_VALUE_SAMPLE_POS;
2005 case nir_intrinsic_load_sample_mask_in:
2006 return SYSTEM_VALUE_SAMPLE_MASK_IN;
2007 case nir_intrinsic_load_local_invocation_id:
2008 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
2009 case nir_intrinsic_load_local_invocation_index:
2010 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
2011 case nir_intrinsic_load_num_work_groups:
2012 return SYSTEM_VALUE_NUM_WORK_GROUPS;
2013 case nir_intrinsic_load_work_group_id:
2014 return SYSTEM_VALUE_WORK_GROUP_ID;
2015 case nir_intrinsic_load_primitive_id:
2016 return SYSTEM_VALUE_PRIMITIVE_ID;
2017 case nir_intrinsic_load_tess_coord:
2018 return SYSTEM_VALUE_TESS_COORD;
2019 case nir_intrinsic_load_tess_level_outer:
2020 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
2021 case nir_intrinsic_load_tess_level_inner:
2022 return SYSTEM_VALUE_TESS_LEVEL_INNER;
2023 case nir_intrinsic_load_patch_vertices_in:
2024 return SYSTEM_VALUE_VERTICES_IN;
2025 case nir_intrinsic_load_helper_invocation:
2026 return SYSTEM_VALUE_HELPER_INVOCATION;
2027 case nir_intrinsic_load_view_index:
2028 return SYSTEM_VALUE_VIEW_INDEX;
2029 case nir_intrinsic_load_subgroup_size:
2030 return SYSTEM_VALUE_SUBGROUP_SIZE;
2031 case nir_intrinsic_load_subgroup_invocation:
2032 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
2033 case nir_intrinsic_load_subgroup_eq_mask:
2034 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
2035 case nir_intrinsic_load_subgroup_ge_mask:
2036 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
2037 case nir_intrinsic_load_subgroup_gt_mask:
2038 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
2039 case nir_intrinsic_load_subgroup_le_mask:
2040 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
2041 case nir_intrinsic_load_subgroup_lt_mask:
2042 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
2043 case nir_intrinsic_load_local_group_size:
2044 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
2045 default:
2046 unreachable("intrinsic doesn't produce a system value");
2047 }
2048 }
2049