• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014-2015 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef NIR_BUILDER_H
25 #define NIR_BUILDER_H
26 
27 #include "nir_control_flow.h"
28 #include "util/bitscan.h"
29 #include "util/half_float.h"
30 
31 struct exec_list;
32 
33 typedef struct nir_builder {
34    nir_cursor cursor;
35 
36    /* Whether new ALU instructions will be marked "exact" */
37    bool exact;
38 
39    /* Whether to run divergence analysis on inserted instructions (loop merge
40     * and header phis are not updated). */
41    bool update_divergence;
42 
43    nir_shader *shader;
44    nir_function_impl *impl;
45 } nir_builder;
46 
47 static inline void
nir_builder_init(nir_builder * build,nir_function_impl * impl)48 nir_builder_init(nir_builder *build, nir_function_impl *impl)
49 {
50    memset(build, 0, sizeof(*build));
51    build->exact = false;
52    build->impl = impl;
53    build->shader = impl->function->shader;
54 }
55 
56 static inline void
nir_builder_init_simple_shader(nir_builder * build,void * mem_ctx,gl_shader_stage stage,const nir_shader_compiler_options * options)57 nir_builder_init_simple_shader(nir_builder *build, void *mem_ctx,
58                                gl_shader_stage stage,
59                                const nir_shader_compiler_options *options)
60 {
61    memset(build, 0, sizeof(*build));
62    build->shader = nir_shader_create(mem_ctx, stage, options, NULL);
63    nir_function *func = nir_function_create(build->shader, "main");
64    func->is_entrypoint = true;
65    build->exact = false;
66    build->impl = nir_function_impl_create(func);
67    build->cursor = nir_after_cf_list(&build->impl->body);
68 }
69 
70 typedef bool (*nir_instr_pass_cb)(struct nir_builder *, nir_instr *, void *);
71 
72 /**
73  * Iterates over all the instructions in a NIR shader and calls the given pass
74  * on them.
75  *
76  * The pass should return true if it modified the shader.  In that case, only
77  * the preserved metadata flags will be preserved in the function impl.
78  *
79  * The builder will be initialized to point at the function impl, but its
80  * cursor is unset.
81  */
82 static inline bool
nir_shader_instructions_pass(nir_shader * shader,nir_instr_pass_cb pass,nir_metadata preserved,void * cb_data)83 nir_shader_instructions_pass(nir_shader *shader,
84                              nir_instr_pass_cb pass,
85                              nir_metadata preserved,
86                              void *cb_data)
87 {
88    bool progress = false;
89 
90    nir_foreach_function(function, shader) {
91       if (!function->impl)
92          continue;
93 
94       nir_builder b;
95       nir_builder_init(&b, function->impl);
96 
97       nir_foreach_block_safe(block, function->impl) {
98          nir_foreach_instr_safe(instr, block) {
99             progress |= pass(&b, instr, cb_data);
100          }
101       }
102 
103       if (progress) {
104          nir_metadata_preserve(function->impl, preserved);
105       } else {
106          nir_metadata_preserve(function->impl, nir_metadata_all);
107       }
108    }
109 
110    return progress;
111 }
112 
113 static inline void
nir_builder_instr_insert(nir_builder * build,nir_instr * instr)114 nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
115 {
116    nir_instr_insert(build->cursor, instr);
117 
118    if (build->update_divergence)
119       nir_update_instr_divergence(build->shader, instr);
120 
121    /* Move the cursor forward. */
122    build->cursor = nir_after_instr(instr);
123 }
124 
125 static inline nir_instr *
nir_builder_last_instr(nir_builder * build)126 nir_builder_last_instr(nir_builder *build)
127 {
128    assert(build->cursor.option == nir_cursor_after_instr);
129    return build->cursor.instr;
130 }
131 
132 static inline void
nir_builder_cf_insert(nir_builder * build,nir_cf_node * cf)133 nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
134 {
135    nir_cf_node_insert(build->cursor, cf);
136 }
137 
138 static inline bool
nir_builder_is_inside_cf(nir_builder * build,nir_cf_node * cf_node)139 nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
140 {
141    nir_block *block = nir_cursor_current_block(build->cursor);
142    for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
143       if (n == cf_node)
144          return true;
145    }
146    return false;
147 }
148 
149 static inline nir_if *
nir_push_if_src(nir_builder * build,nir_src condition)150 nir_push_if_src(nir_builder *build, nir_src condition)
151 {
152    nir_if *nif = nir_if_create(build->shader);
153    nif->condition = condition;
154    nir_builder_cf_insert(build, &nif->cf_node);
155    build->cursor = nir_before_cf_list(&nif->then_list);
156    return nif;
157 }
158 
159 static inline nir_if *
nir_push_if(nir_builder * build,nir_ssa_def * condition)160 nir_push_if(nir_builder *build, nir_ssa_def *condition)
161 {
162    return nir_push_if_src(build, nir_src_for_ssa(condition));
163 }
164 
165 static inline nir_if *
nir_push_else(nir_builder * build,nir_if * nif)166 nir_push_else(nir_builder *build, nir_if *nif)
167 {
168    if (nif) {
169       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
170    } else {
171       nir_block *block = nir_cursor_current_block(build->cursor);
172       nif = nir_cf_node_as_if(block->cf_node.parent);
173    }
174    build->cursor = nir_before_cf_list(&nif->else_list);
175    return nif;
176 }
177 
178 static inline void
nir_pop_if(nir_builder * build,nir_if * nif)179 nir_pop_if(nir_builder *build, nir_if *nif)
180 {
181    if (nif) {
182       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
183    } else {
184       nir_block *block = nir_cursor_current_block(build->cursor);
185       nif = nir_cf_node_as_if(block->cf_node.parent);
186    }
187    build->cursor = nir_after_cf_node(&nif->cf_node);
188 }
189 
190 static inline nir_ssa_def *
nir_if_phi(nir_builder * build,nir_ssa_def * then_def,nir_ssa_def * else_def)191 nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
192 {
193    nir_block *block = nir_cursor_current_block(build->cursor);
194    nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
195 
196    nir_phi_instr *phi = nir_phi_instr_create(build->shader);
197 
198    nir_phi_src *src = ralloc(phi, nir_phi_src);
199    src->pred = nir_if_last_then_block(nif);
200    src->src = nir_src_for_ssa(then_def);
201    exec_list_push_tail(&phi->srcs, &src->node);
202 
203    src = ralloc(phi, nir_phi_src);
204    src->pred = nir_if_last_else_block(nif);
205    src->src = nir_src_for_ssa(else_def);
206    exec_list_push_tail(&phi->srcs, &src->node);
207 
208    assert(then_def->num_components == else_def->num_components);
209    assert(then_def->bit_size == else_def->bit_size);
210    nir_ssa_dest_init(&phi->instr, &phi->dest,
211                      then_def->num_components, then_def->bit_size, NULL);
212 
213    nir_builder_instr_insert(build, &phi->instr);
214 
215    return &phi->dest.ssa;
216 }
217 
218 static inline nir_loop *
nir_push_loop(nir_builder * build)219 nir_push_loop(nir_builder *build)
220 {
221    nir_loop *loop = nir_loop_create(build->shader);
222    nir_builder_cf_insert(build, &loop->cf_node);
223    build->cursor = nir_before_cf_list(&loop->body);
224    return loop;
225 }
226 
227 static inline void
nir_pop_loop(nir_builder * build,nir_loop * loop)228 nir_pop_loop(nir_builder *build, nir_loop *loop)
229 {
230    if (loop) {
231       assert(nir_builder_is_inside_cf(build, &loop->cf_node));
232    } else {
233       nir_block *block = nir_cursor_current_block(build->cursor);
234       loop = nir_cf_node_as_loop(block->cf_node.parent);
235    }
236    build->cursor = nir_after_cf_node(&loop->cf_node);
237 }
238 
239 static inline nir_ssa_def *
nir_ssa_undef(nir_builder * build,unsigned num_components,unsigned bit_size)240 nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
241 {
242    nir_ssa_undef_instr *undef =
243       nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
244    if (!undef)
245       return NULL;
246 
247    nir_instr_insert(nir_before_cf_list(&build->impl->body), &undef->instr);
248    if (build->update_divergence)
249       nir_update_instr_divergence(build->shader, &undef->instr);
250 
251    return &undef->def;
252 }
253 
254 static inline nir_ssa_def *
nir_build_imm(nir_builder * build,unsigned num_components,unsigned bit_size,const nir_const_value * value)255 nir_build_imm(nir_builder *build, unsigned num_components,
256               unsigned bit_size, const nir_const_value *value)
257 {
258    nir_load_const_instr *load_const =
259       nir_load_const_instr_create(build->shader, num_components, bit_size);
260    if (!load_const)
261       return NULL;
262 
263    memcpy(load_const->value, value, sizeof(nir_const_value) * num_components);
264 
265    nir_builder_instr_insert(build, &load_const->instr);
266 
267    return &load_const->def;
268 }
269 
270 static inline nir_ssa_def *
nir_imm_zero(nir_builder * build,unsigned num_components,unsigned bit_size)271 nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
272 {
273    nir_load_const_instr *load_const =
274       nir_load_const_instr_create(build->shader, num_components, bit_size);
275 
276    /* nir_load_const_instr_create uses rzalloc so it's already zero */
277 
278    nir_builder_instr_insert(build, &load_const->instr);
279 
280    return &load_const->def;
281 }
282 
283 static inline nir_ssa_def *
nir_imm_boolN_t(nir_builder * build,bool x,unsigned bit_size)284 nir_imm_boolN_t(nir_builder *build, bool x, unsigned bit_size)
285 {
286    nir_const_value v = nir_const_value_for_bool(x, bit_size);
287    return nir_build_imm(build, 1, bit_size, &v);
288 }
289 
290 static inline nir_ssa_def *
nir_imm_bool(nir_builder * build,bool x)291 nir_imm_bool(nir_builder *build, bool x)
292 {
293    return nir_imm_boolN_t(build, x, 1);
294 }
295 
296 static inline nir_ssa_def *
nir_imm_true(nir_builder * build)297 nir_imm_true(nir_builder *build)
298 {
299    return nir_imm_bool(build, true);
300 }
301 
302 static inline nir_ssa_def *
nir_imm_false(nir_builder * build)303 nir_imm_false(nir_builder *build)
304 {
305    return nir_imm_bool(build, false);
306 }
307 
308 static inline nir_ssa_def *
nir_imm_floatN_t(nir_builder * build,double x,unsigned bit_size)309 nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
310 {
311    nir_const_value v = nir_const_value_for_float(x, bit_size);
312    return nir_build_imm(build, 1, bit_size, &v);
313 }
314 
315 static inline nir_ssa_def *
nir_imm_float16(nir_builder * build,float x)316 nir_imm_float16(nir_builder *build, float x)
317 {
318    return nir_imm_floatN_t(build, x, 16);
319 }
320 
321 static inline nir_ssa_def *
nir_imm_float(nir_builder * build,float x)322 nir_imm_float(nir_builder *build, float x)
323 {
324    return nir_imm_floatN_t(build, x, 32);
325 }
326 
327 static inline nir_ssa_def *
nir_imm_double(nir_builder * build,double x)328 nir_imm_double(nir_builder *build, double x)
329 {
330    return nir_imm_floatN_t(build, x, 64);
331 }
332 
333 static inline nir_ssa_def *
nir_imm_vec2(nir_builder * build,float x,float y)334 nir_imm_vec2(nir_builder *build, float x, float y)
335 {
336    nir_const_value v[2] = {
337       nir_const_value_for_float(x, 32),
338       nir_const_value_for_float(y, 32),
339    };
340    return nir_build_imm(build, 2, 32, v);
341 }
342 
343 static inline nir_ssa_def *
nir_imm_vec4(nir_builder * build,float x,float y,float z,float w)344 nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
345 {
346    nir_const_value v[4] = {
347       nir_const_value_for_float(x, 32),
348       nir_const_value_for_float(y, 32),
349       nir_const_value_for_float(z, 32),
350       nir_const_value_for_float(w, 32),
351    };
352 
353    return nir_build_imm(build, 4, 32, v);
354 }
355 
356 static inline nir_ssa_def *
nir_imm_vec4_16(nir_builder * build,float x,float y,float z,float w)357 nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
358 {
359    nir_const_value v[4] = {
360       nir_const_value_for_float(x, 16),
361       nir_const_value_for_float(y, 16),
362       nir_const_value_for_float(z, 16),
363       nir_const_value_for_float(w, 16),
364    };
365 
366    return nir_build_imm(build, 4, 16, v);
367 }
368 
369 static inline nir_ssa_def *
nir_imm_intN_t(nir_builder * build,uint64_t x,unsigned bit_size)370 nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
371 {
372    nir_const_value v = nir_const_value_for_raw_uint(x, bit_size);
373    return nir_build_imm(build, 1, bit_size, &v);
374 }
375 
376 static inline nir_ssa_def *
nir_imm_int(nir_builder * build,int x)377 nir_imm_int(nir_builder *build, int x)
378 {
379    return nir_imm_intN_t(build, x, 32);
380 }
381 
382 static inline nir_ssa_def *
nir_imm_int64(nir_builder * build,int64_t x)383 nir_imm_int64(nir_builder *build, int64_t x)
384 {
385    return nir_imm_intN_t(build, x, 64);
386 }
387 
388 static inline nir_ssa_def *
nir_imm_ivec2(nir_builder * build,int x,int y)389 nir_imm_ivec2(nir_builder *build, int x, int y)
390 {
391    nir_const_value v[2] = {
392       nir_const_value_for_int(x, 32),
393       nir_const_value_for_int(y, 32),
394    };
395 
396    return nir_build_imm(build, 2, 32, v);
397 }
398 
399 static inline nir_ssa_def *
nir_imm_ivec4(nir_builder * build,int x,int y,int z,int w)400 nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
401 {
402    nir_const_value v[4] = {
403       nir_const_value_for_int(x, 32),
404       nir_const_value_for_int(y, 32),
405       nir_const_value_for_int(z, 32),
406       nir_const_value_for_int(w, 32),
407    };
408 
409    return nir_build_imm(build, 4, 32, v);
410 }
411 
412 static inline nir_ssa_def *
nir_builder_alu_instr_finish_and_insert(nir_builder * build,nir_alu_instr * instr)413 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
414 {
415    const nir_op_info *op_info = &nir_op_infos[instr->op];
416 
417    instr->exact = build->exact;
418 
419    /* Guess the number of components the destination temporary should have
420     * based on our input sizes, if it's not fixed for the op.
421     */
422    unsigned num_components = op_info->output_size;
423    if (num_components == 0) {
424       for (unsigned i = 0; i < op_info->num_inputs; i++) {
425          if (op_info->input_sizes[i] == 0)
426             num_components = MAX2(num_components,
427                                   instr->src[i].src.ssa->num_components);
428       }
429    }
430    assert(num_components != 0);
431 
432    /* Figure out the bitwidth based on the source bitwidth if the instruction
433     * is variable-width.
434     */
435    unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
436    if (bit_size == 0) {
437       for (unsigned i = 0; i < op_info->num_inputs; i++) {
438          unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
439          if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
440             if (bit_size)
441                assert(src_bit_size == bit_size);
442             else
443                bit_size = src_bit_size;
444          } else {
445             assert(src_bit_size ==
446                nir_alu_type_get_type_size(op_info->input_types[i]));
447          }
448       }
449    }
450 
451    /* When in doubt, assume 32. */
452    if (bit_size == 0)
453       bit_size = 32;
454 
455    /* Make sure we don't swizzle from outside of our source vector (like if a
456     * scalar value was passed into a multiply with a vector).
457     */
458    for (unsigned i = 0; i < op_info->num_inputs; i++) {
459       for (unsigned j = instr->src[i].src.ssa->num_components;
460            j < NIR_MAX_VEC_COMPONENTS; j++) {
461          instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
462       }
463    }
464 
465    nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
466                      bit_size, NULL);
467    instr->dest.write_mask = (1 << num_components) - 1;
468 
469    nir_builder_instr_insert(build, &instr->instr);
470 
471    return &instr->dest.dest.ssa;
472 }
473 
474 static inline nir_ssa_def *
nir_build_alu(nir_builder * build,nir_op op,nir_ssa_def * src0,nir_ssa_def * src1,nir_ssa_def * src2,nir_ssa_def * src3)475 nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
476               nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
477 {
478    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
479    if (!instr)
480       return NULL;
481 
482    instr->src[0].src = nir_src_for_ssa(src0);
483    if (src1)
484       instr->src[1].src = nir_src_for_ssa(src1);
485    if (src2)
486       instr->src[2].src = nir_src_for_ssa(src2);
487    if (src3)
488       instr->src[3].src = nir_src_for_ssa(src3);
489 
490    return nir_builder_alu_instr_finish_and_insert(build, instr);
491 }
492 
493 /* for the couple special cases with more than 4 src args: */
494 static inline nir_ssa_def *
nir_build_alu_src_arr(nir_builder * build,nir_op op,nir_ssa_def ** srcs)495 nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs)
496 {
497    const nir_op_info *op_info = &nir_op_infos[op];
498    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
499    if (!instr)
500       return NULL;
501 
502    for (unsigned i = 0; i < op_info->num_inputs; i++)
503       instr->src[i].src = nir_src_for_ssa(srcs[i]);
504 
505    return nir_builder_alu_instr_finish_and_insert(build, instr);
506 }
507 
508 #include "nir_builder_opcodes.h"
509 
510 static inline nir_ssa_def *
nir_vec(nir_builder * build,nir_ssa_def ** comp,unsigned num_components)511 nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
512 {
513    return nir_build_alu_src_arr(build, nir_op_vec(num_components), comp);
514 }
515 
516 static inline nir_ssa_def *
nir_mov_alu(nir_builder * build,nir_alu_src src,unsigned num_components)517 nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
518 {
519    assert(!src.abs && !src.negate);
520    if (src.src.is_ssa && src.src.ssa->num_components == num_components) {
521       bool any_swizzles = false;
522       for (unsigned i = 0; i < num_components; i++) {
523          if (src.swizzle[i] != i)
524             any_swizzles = true;
525       }
526       if (!any_swizzles)
527          return src.src.ssa;
528    }
529 
530    nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
531    nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
532                      nir_src_bit_size(src.src), NULL);
533    mov->exact = build->exact;
534    mov->dest.write_mask = (1 << num_components) - 1;
535    mov->src[0] = src;
536    nir_builder_instr_insert(build, &mov->instr);
537 
538    return &mov->dest.dest.ssa;
539 }
540 
541 /**
542  * Construct an fmov or imov that reswizzles the source's components.
543  */
544 static inline nir_ssa_def *
nir_swizzle(nir_builder * build,nir_ssa_def * src,const unsigned * swiz,unsigned num_components)545 nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
546             unsigned num_components)
547 {
548    assert(num_components <= NIR_MAX_VEC_COMPONENTS);
549    nir_alu_src alu_src = { NIR_SRC_INIT };
550    alu_src.src = nir_src_for_ssa(src);
551 
552    bool is_identity_swizzle = true;
553    for (unsigned i = 0; i < num_components && i < NIR_MAX_VEC_COMPONENTS; i++) {
554       if (swiz[i] != i)
555          is_identity_swizzle = false;
556       alu_src.swizzle[i] = swiz[i];
557    }
558 
559    if (num_components == src->num_components && is_identity_swizzle)
560       return src;
561 
562    return nir_mov_alu(build, alu_src, num_components);
563 }
564 
565 /* Selects the right fdot given the number of components in each source. */
566 static inline nir_ssa_def *
nir_fdot(nir_builder * build,nir_ssa_def * src0,nir_ssa_def * src1)567 nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
568 {
569    assert(src0->num_components == src1->num_components);
570    switch (src0->num_components) {
571    case 1: return nir_fmul(build, src0, src1);
572    case 2: return nir_fdot2(build, src0, src1);
573    case 3: return nir_fdot3(build, src0, src1);
574    case 4: return nir_fdot4(build, src0, src1);
575    case 8: return nir_fdot8(build, src0, src1);
576    case 16: return nir_fdot16(build, src0, src1);
577    default:
578       unreachable("bad component size");
579    }
580 
581    return NULL;
582 }
583 
584 static inline nir_ssa_def *
nir_ball_iequal(nir_builder * b,nir_ssa_def * src0,nir_ssa_def * src1)585 nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
586 {
587    switch (src0->num_components) {
588    case 1: return nir_ieq(b, src0, src1);
589    case 2: return nir_ball_iequal2(b, src0, src1);
590    case 3: return nir_ball_iequal3(b, src0, src1);
591    case 4: return nir_ball_iequal4(b, src0, src1);
592    case 8: return nir_ball_iequal8(b, src0, src1);
593    case 16: return nir_ball_iequal16(b, src0, src1);
594    default:
595       unreachable("bad component size");
596    }
597 }
598 
599 static inline nir_ssa_def *
nir_ball(nir_builder * b,nir_ssa_def * src)600 nir_ball(nir_builder *b, nir_ssa_def *src)
601 {
602    return nir_ball_iequal(b, src, nir_imm_true(b));
603 }
604 
605 static inline nir_ssa_def *
nir_bany_inequal(nir_builder * b,nir_ssa_def * src0,nir_ssa_def * src1)606 nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
607 {
608    switch (src0->num_components) {
609    case 1: return nir_ine(b, src0, src1);
610    case 2: return nir_bany_inequal2(b, src0, src1);
611    case 3: return nir_bany_inequal3(b, src0, src1);
612    case 4: return nir_bany_inequal4(b, src0, src1);
613    case 8: return nir_bany_inequal8(b, src0, src1);
614    case 16: return nir_bany_inequal16(b, src0, src1);
615    default:
616       unreachable("bad component size");
617    }
618 }
619 
620 static inline nir_ssa_def *
nir_bany(nir_builder * b,nir_ssa_def * src)621 nir_bany(nir_builder *b, nir_ssa_def *src)
622 {
623    return nir_bany_inequal(b, src, nir_imm_false(b));
624 }
625 
626 static inline nir_ssa_def *
nir_channel(nir_builder * b,nir_ssa_def * def,unsigned c)627 nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
628 {
629    return nir_swizzle(b, def, &c, 1);
630 }
631 
632 static inline nir_ssa_def *
nir_channels(nir_builder * b,nir_ssa_def * def,nir_component_mask_t mask)633 nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
634 {
635    unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
636 
637    for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
638       if ((mask & (1 << i)) == 0)
639          continue;
640       swizzle[num_channels++] = i;
641    }
642 
643    return nir_swizzle(b, def, swizzle, num_channels);
644 }
645 
646 static inline nir_ssa_def *
_nir_select_from_array_helper(nir_builder * b,nir_ssa_def ** arr,nir_ssa_def * idx,unsigned start,unsigned end)647 _nir_select_from_array_helper(nir_builder *b, nir_ssa_def **arr,
648                               nir_ssa_def *idx,
649                               unsigned start, unsigned end)
650 {
651    if (start == end - 1) {
652       return arr[start];
653    } else {
654       unsigned mid = start + (end - start) / 2;
655       return nir_bcsel(b, nir_ilt(b, idx, nir_imm_intN_t(b, mid, idx->bit_size)),
656                        _nir_select_from_array_helper(b, arr, idx, start, mid),
657                        _nir_select_from_array_helper(b, arr, idx, mid, end));
658    }
659 }
660 
661 static inline nir_ssa_def *
nir_select_from_ssa_def_array(nir_builder * b,nir_ssa_def ** arr,unsigned arr_len,nir_ssa_def * idx)662 nir_select_from_ssa_def_array(nir_builder *b, nir_ssa_def **arr,
663                               unsigned arr_len, nir_ssa_def *idx)
664 {
665    return _nir_select_from_array_helper(b, arr, idx, 0, arr_len);
666 }
667 
668 static inline nir_ssa_def *
nir_vector_extract(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * c)669 nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
670 {
671    nir_src c_src = nir_src_for_ssa(c);
672    if (nir_src_is_const(c_src)) {
673       uint64_t c_const = nir_src_as_uint(c_src);
674       if (c_const < vec->num_components)
675          return nir_channel(b, vec, c_const);
676       else
677          return nir_ssa_undef(b, 1, vec->bit_size);
678    } else {
679       nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
680       for (unsigned i = 0; i < vec->num_components; i++)
681          comps[i] = nir_channel(b, vec, i);
682       return nir_select_from_ssa_def_array(b, comps, vec->num_components, c);
683    }
684 }
685 
686 /** Replaces the component of `vec` specified by `c` with `scalar` */
687 static inline nir_ssa_def *
nir_vector_insert_imm(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * scalar,unsigned c)688 nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
689                       nir_ssa_def *scalar, unsigned c)
690 {
691    assert(scalar->num_components == 1);
692    assert(c < vec->num_components);
693 
694    nir_op vec_op = nir_op_vec(vec->num_components);
695    nir_alu_instr *vec_instr = nir_alu_instr_create(b->shader, vec_op);
696 
697    for (unsigned i = 0; i < vec->num_components; i++) {
698       if (i == c) {
699          vec_instr->src[i].src = nir_src_for_ssa(scalar);
700          vec_instr->src[i].swizzle[0] = 0;
701       } else {
702          vec_instr->src[i].src = nir_src_for_ssa(vec);
703          vec_instr->src[i].swizzle[0] = i;
704       }
705    }
706 
707    return nir_builder_alu_instr_finish_and_insert(b, vec_instr);
708 }
709 
710 /** Replaces the component of `vec` specified by `c` with `scalar` */
711 static inline nir_ssa_def *
nir_vector_insert(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * scalar,nir_ssa_def * c)712 nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
713                   nir_ssa_def *c)
714 {
715    assert(scalar->num_components == 1);
716    assert(c->num_components == 1);
717 
718    nir_src c_src = nir_src_for_ssa(c);
719    if (nir_src_is_const(c_src)) {
720       uint64_t c_const = nir_src_as_uint(c_src);
721       if (c_const < vec->num_components)
722          return nir_vector_insert_imm(b, vec, scalar, c_const);
723       else
724          return vec;
725    } else {
726       nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
727       for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
728          per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
729       nir_ssa_def *per_comp_idx =
730          nir_build_imm(b, vec->num_components,
731                        c->bit_size, per_comp_idx_const);
732 
733       /* nir_builder will automatically splat out scalars to vectors so an
734        * insert is as simple as "if I'm the channel, replace me with the
735        * scalar."
736        */
737       return nir_bcsel(b, nir_ieq(b, c, per_comp_idx), scalar, vec);
738    }
739 }
740 
741 static inline nir_ssa_def *
nir_i2i(nir_builder * build,nir_ssa_def * x,unsigned dest_bit_size)742 nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
743 {
744    if (x->bit_size == dest_bit_size)
745       return x;
746 
747    switch (dest_bit_size) {
748    case 64: return nir_i2i64(build, x);
749    case 32: return nir_i2i32(build, x);
750    case 16: return nir_i2i16(build, x);
751    case 8:  return nir_i2i8(build, x);
752    default: unreachable("Invalid bit size");
753    }
754 }
755 
756 static inline nir_ssa_def *
nir_u2u(nir_builder * build,nir_ssa_def * x,unsigned dest_bit_size)757 nir_u2u(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
758 {
759    if (x->bit_size == dest_bit_size)
760       return x;
761 
762    switch (dest_bit_size) {
763    case 64: return nir_u2u64(build, x);
764    case 32: return nir_u2u32(build, x);
765    case 16: return nir_u2u16(build, x);
766    case 8:  return nir_u2u8(build, x);
767    default: unreachable("Invalid bit size");
768    }
769 }
770 
771 static inline nir_ssa_def *
nir_iadd_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)772 nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
773 {
774    assert(x->bit_size <= 64);
775    y &= BITFIELD64_MASK(x->bit_size);
776 
777    if (y == 0) {
778       return x;
779    } else {
780       return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
781    }
782 }
783 
784 
785 static inline nir_ssa_def *
nir_ieq_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)786 nir_ieq_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
787 {
788    return nir_ieq(build, x, nir_imm_intN_t(build, y, x->bit_size));
789 }
790 
791 static inline nir_ssa_def *
_nir_mul_imm(nir_builder * build,nir_ssa_def * x,uint64_t y,bool amul)792 _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
793 {
794    assert(x->bit_size <= 64);
795    y &= BITFIELD64_MASK(x->bit_size);
796 
797    if (y == 0) {
798       return nir_imm_intN_t(build, 0, x->bit_size);
799    } else if (y == 1) {
800       return x;
801    } else if (!build->shader->options->lower_bitops &&
802               util_is_power_of_two_or_zero64(y)) {
803       return nir_ishl(build, x, nir_imm_int(build, ffsll(y) - 1));
804    } else if (amul) {
805       return nir_amul(build, x, nir_imm_intN_t(build, y, x->bit_size));
806    } else {
807       return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
808    }
809 }
810 
811 static inline nir_ssa_def *
nir_imul_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)812 nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
813 {
814    return _nir_mul_imm(build, x, y, false);
815 }
816 
817 static inline nir_ssa_def *
nir_amul_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)818 nir_amul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
819 {
820    return _nir_mul_imm(build, x, y, true);
821 }
822 
823 static inline nir_ssa_def *
nir_fadd_imm(nir_builder * build,nir_ssa_def * x,double y)824 nir_fadd_imm(nir_builder *build, nir_ssa_def *x, double y)
825 {
826    return nir_fadd(build, x, nir_imm_floatN_t(build, y, x->bit_size));
827 }
828 
829 static inline nir_ssa_def *
nir_fmul_imm(nir_builder * build,nir_ssa_def * x,double y)830 nir_fmul_imm(nir_builder *build, nir_ssa_def *x, double y)
831 {
832    return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
833 }
834 
835 static inline nir_ssa_def *
nir_iand_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)836 nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
837 {
838    assert(x->bit_size <= 64);
839    y &= BITFIELD64_MASK(x->bit_size);
840 
841    if (y == 0) {
842       return nir_imm_intN_t(build, 0, x->bit_size);
843    } else if (y == BITFIELD64_MASK(x->bit_size)) {
844       return x;
845    } else {
846       return nir_iand(build, x, nir_imm_intN_t(build, y, x->bit_size));
847    }
848 }
849 
850 static inline nir_ssa_def *
nir_ishr_imm(nir_builder * build,nir_ssa_def * x,uint32_t y)851 nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
852 {
853    if (y == 0) {
854       return x;
855    } else {
856       return nir_ishr(build, x, nir_imm_int(build, y));
857    }
858 }
859 
860 static inline nir_ssa_def *
nir_ushr_imm(nir_builder * build,nir_ssa_def * x,uint32_t y)861 nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
862 {
863    if (y == 0) {
864       return x;
865    } else {
866       return nir_ushr(build, x, nir_imm_int(build, y));
867    }
868 }
869 
870 static inline nir_ssa_def *
nir_udiv_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)871 nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
872 {
873    assert(x->bit_size <= 64);
874    y &= BITFIELD64_MASK(x->bit_size);
875 
876    if (y == 1) {
877       return x;
878    } else if (util_is_power_of_two_nonzero(y)) {
879       return nir_ushr_imm(build, x, ffsll(y) - 1);
880    } else {
881       return nir_udiv(build, x, nir_imm_intN_t(build, y, x->bit_size));
882    }
883 }
884 
885 static inline nir_ssa_def *
nir_pack_bits(nir_builder * b,nir_ssa_def * src,unsigned dest_bit_size)886 nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
887 {
888    assert(src->num_components * src->bit_size == dest_bit_size);
889 
890    switch (dest_bit_size) {
891    case 64:
892       switch (src->bit_size) {
893       case 32: return nir_pack_64_2x32(b, src);
894       case 16: return nir_pack_64_4x16(b, src);
895       default: break;
896       }
897       break;
898 
899    case 32:
900       if (src->bit_size == 16)
901          return nir_pack_32_2x16(b, src);
902       break;
903 
904    default:
905       break;
906    }
907 
908    /* If we got here, we have no dedicated unpack opcode. */
909    nir_ssa_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
910    for (unsigned i = 0; i < src->num_components; i++) {
911       nir_ssa_def *val = nir_u2u(b, nir_channel(b, src, i), dest_bit_size);
912       val = nir_ishl(b, val, nir_imm_int(b, i * src->bit_size));
913       dest = nir_ior(b, dest, val);
914    }
915    return dest;
916 }
917 
918 static inline nir_ssa_def *
nir_unpack_bits(nir_builder * b,nir_ssa_def * src,unsigned dest_bit_size)919 nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
920 {
921    assert(src->num_components == 1);
922    assert(src->bit_size > dest_bit_size);
923    const unsigned dest_num_components = src->bit_size / dest_bit_size;
924    assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
925 
926    switch (src->bit_size) {
927    case 64:
928       switch (dest_bit_size) {
929       case 32: return nir_unpack_64_2x32(b, src);
930       case 16: return nir_unpack_64_4x16(b, src);
931       default: break;
932       }
933       break;
934 
935    case 32:
936       if (dest_bit_size == 16)
937          return nir_unpack_32_2x16(b, src);
938       break;
939 
940    default:
941       break;
942    }
943 
944    /* If we got here, we have no dedicated unpack opcode. */
945    nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
946    for (unsigned i = 0; i < dest_num_components; i++) {
947       nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
948       dest_comps[i] = nir_u2u(b, val, dest_bit_size);
949    }
950    return nir_vec(b, dest_comps, dest_num_components);
951 }
952 
953 /**
954  * Treats srcs as if it's one big blob of bits and extracts the range of bits
955  * given by
956  *
957  *       [first_bit, first_bit + dest_num_components * dest_bit_size)
958  *
959  * The range can have any alignment or size as long as it's an integer number
960  * of destination components and fits inside the concatenated sources.
961  *
962  * TODO: The one caveat here is that we can't handle byte alignment if 64-bit
963  * values are involved because that would require pack/unpack to/from a vec8
964  * which NIR currently does not support.
965  */
966 static inline nir_ssa_def *
nir_extract_bits(nir_builder * b,nir_ssa_def ** srcs,unsigned num_srcs,unsigned first_bit,unsigned dest_num_components,unsigned dest_bit_size)967 nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
968                  unsigned first_bit,
969                  unsigned dest_num_components, unsigned dest_bit_size)
970 {
971    const unsigned num_bits = dest_num_components * dest_bit_size;
972 
973    /* Figure out the common bit size */
974    unsigned common_bit_size = dest_bit_size;
975    for (unsigned i = 0; i < num_srcs; i++)
976       common_bit_size = MIN2(common_bit_size, srcs[i]->bit_size);
977    if (first_bit > 0)
978       common_bit_size = MIN2(common_bit_size, (1u << (ffs(first_bit) - 1)));
979 
980    /* We don't want to have to deal with 1-bit values */
981    assert(common_bit_size >= 8);
982 
983    nir_ssa_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
984    assert(num_bits / common_bit_size <= ARRAY_SIZE(common_comps));
985 
986    /* First, unpack to the common bit size and select the components from the
987     * source.
988     */
989    int src_idx = -1;
990    unsigned src_start_bit = 0;
991    unsigned src_end_bit = 0;
992    for (unsigned i = 0; i < num_bits / common_bit_size; i++) {
993       const unsigned bit = first_bit + (i * common_bit_size);
994       while (bit >= src_end_bit) {
995          src_idx++;
996          assert(src_idx < (int) num_srcs);
997          src_start_bit = src_end_bit;
998          src_end_bit += srcs[src_idx]->bit_size *
999                         srcs[src_idx]->num_components;
1000       }
1001       assert(bit >= src_start_bit);
1002       assert(bit + common_bit_size <= src_end_bit);
1003       const unsigned rel_bit = bit - src_start_bit;
1004       const unsigned src_bit_size = srcs[src_idx]->bit_size;
1005 
1006       nir_ssa_def *comp = nir_channel(b, srcs[src_idx],
1007                                       rel_bit / src_bit_size);
1008       if (srcs[src_idx]->bit_size > common_bit_size) {
1009          nir_ssa_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
1010          comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) /
1011                                          common_bit_size);
1012       }
1013       common_comps[i] = comp;
1014    }
1015 
1016    /* Now, re-pack the destination if we have to */
1017    if (dest_bit_size > common_bit_size) {
1018       unsigned common_per_dest = dest_bit_size / common_bit_size;
1019       nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
1020       for (unsigned i = 0; i < dest_num_components; i++) {
1021          nir_ssa_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
1022                                          common_per_dest);
1023          dest_comps[i] = nir_pack_bits(b, unpacked, dest_bit_size);
1024       }
1025       return nir_vec(b, dest_comps, dest_num_components);
1026    } else {
1027       assert(dest_bit_size == common_bit_size);
1028       return nir_vec(b, common_comps, dest_num_components);
1029    }
1030 }
1031 
1032 static inline nir_ssa_def *
nir_bitcast_vector(nir_builder * b,nir_ssa_def * src,unsigned dest_bit_size)1033 nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
1034 {
1035    assert((src->bit_size * src->num_components) % dest_bit_size == 0);
1036    const unsigned dest_num_components =
1037       (src->bit_size * src->num_components) / dest_bit_size;
1038    assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
1039 
1040    return nir_extract_bits(b, &src, 1, 0, dest_num_components, dest_bit_size);
1041 }
1042 
1043 /**
1044  * Turns a nir_src into a nir_ssa_def * so it can be passed to
1045  * nir_build_alu()-based builder calls.
1046  *
1047  * See nir_ssa_for_alu_src() for alu instructions.
1048  */
1049 static inline nir_ssa_def *
nir_ssa_for_src(nir_builder * build,nir_src src,int num_components)1050 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
1051 {
1052    if (src.is_ssa && src.ssa->num_components == num_components)
1053       return src.ssa;
1054 
1055    nir_alu_src alu = { NIR_SRC_INIT };
1056    alu.src = src;
1057    for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
1058       alu.swizzle[j] = j;
1059 
1060    return nir_mov_alu(build, alu, num_components);
1061 }
1062 
1063 /**
1064  * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
1065  * nir_alu_src's swizzle.
1066  */
1067 static inline nir_ssa_def *
nir_ssa_for_alu_src(nir_builder * build,nir_alu_instr * instr,unsigned srcn)1068 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
1069 {
1070    if (nir_alu_src_is_trivial_ssa(instr, srcn))
1071       return instr->src[srcn].src.ssa;
1072 
1073    nir_alu_src *src = &instr->src[srcn];
1074    unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
1075    return nir_mov_alu(build, *src, num_components);
1076 }
1077 
1078 static inline unsigned
nir_get_ptr_bitsize(nir_shader * shader)1079 nir_get_ptr_bitsize(nir_shader *shader)
1080 {
1081    if (shader->info.stage == MESA_SHADER_KERNEL)
1082       return shader->info.cs.ptr_size;
1083    return 32;
1084 }
1085 
1086 static inline nir_deref_instr *
nir_build_deref_var(nir_builder * build,nir_variable * var)1087 nir_build_deref_var(nir_builder *build, nir_variable *var)
1088 {
1089    nir_deref_instr *deref =
1090       nir_deref_instr_create(build->shader, nir_deref_type_var);
1091 
1092    deref->modes = (nir_variable_mode)var->data.mode;
1093    deref->type = var->type;
1094    deref->var = var;
1095 
1096    nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
1097                      nir_get_ptr_bitsize(build->shader), NULL);
1098 
1099    nir_builder_instr_insert(build, &deref->instr);
1100 
1101    return deref;
1102 }
1103 
1104 static inline nir_deref_instr *
nir_build_deref_array(nir_builder * build,nir_deref_instr * parent,nir_ssa_def * index)1105 nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
1106                       nir_ssa_def *index)
1107 {
1108    assert(glsl_type_is_array(parent->type) ||
1109           glsl_type_is_matrix(parent->type) ||
1110           glsl_type_is_vector(parent->type));
1111 
1112    assert(index->bit_size == parent->dest.ssa.bit_size);
1113 
1114    nir_deref_instr *deref =
1115       nir_deref_instr_create(build->shader, nir_deref_type_array);
1116 
1117    deref->modes = parent->modes;
1118    deref->type = glsl_get_array_element(parent->type);
1119    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1120    deref->arr.index = nir_src_for_ssa(index);
1121 
1122    nir_ssa_dest_init(&deref->instr, &deref->dest,
1123                      parent->dest.ssa.num_components,
1124                      parent->dest.ssa.bit_size, NULL);
1125 
1126    nir_builder_instr_insert(build, &deref->instr);
1127 
1128    return deref;
1129 }
1130 
1131 static inline nir_deref_instr *
nir_build_deref_array_imm(nir_builder * build,nir_deref_instr * parent,int64_t index)1132 nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
1133                           int64_t index)
1134 {
1135    assert(parent->dest.is_ssa);
1136    nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
1137                                          parent->dest.ssa.bit_size);
1138 
1139    return nir_build_deref_array(build, parent, idx_ssa);
1140 }
1141 
1142 static inline nir_deref_instr *
nir_build_deref_ptr_as_array(nir_builder * build,nir_deref_instr * parent,nir_ssa_def * index)1143 nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
1144                              nir_ssa_def *index)
1145 {
1146    assert(parent->deref_type == nir_deref_type_array ||
1147           parent->deref_type == nir_deref_type_ptr_as_array ||
1148           parent->deref_type == nir_deref_type_cast);
1149 
1150    assert(index->bit_size == parent->dest.ssa.bit_size);
1151 
1152    nir_deref_instr *deref =
1153       nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
1154 
1155    deref->modes = parent->modes;
1156    deref->type = parent->type;
1157    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1158    deref->arr.index = nir_src_for_ssa(index);
1159 
1160    nir_ssa_dest_init(&deref->instr, &deref->dest,
1161                      parent->dest.ssa.num_components,
1162                      parent->dest.ssa.bit_size, NULL);
1163 
1164    nir_builder_instr_insert(build, &deref->instr);
1165 
1166    return deref;
1167 }
1168 
1169 static inline nir_deref_instr *
nir_build_deref_array_wildcard(nir_builder * build,nir_deref_instr * parent)1170 nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
1171 {
1172    assert(glsl_type_is_array(parent->type) ||
1173           glsl_type_is_matrix(parent->type));
1174 
1175    nir_deref_instr *deref =
1176       nir_deref_instr_create(build->shader, nir_deref_type_array_wildcard);
1177 
1178    deref->modes = parent->modes;
1179    deref->type = glsl_get_array_element(parent->type);
1180    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1181 
1182    nir_ssa_dest_init(&deref->instr, &deref->dest,
1183                      parent->dest.ssa.num_components,
1184                      parent->dest.ssa.bit_size, NULL);
1185 
1186    nir_builder_instr_insert(build, &deref->instr);
1187 
1188    return deref;
1189 }
1190 
1191 static inline nir_deref_instr *
nir_build_deref_struct(nir_builder * build,nir_deref_instr * parent,unsigned index)1192 nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
1193                        unsigned index)
1194 {
1195    assert(glsl_type_is_struct_or_ifc(parent->type));
1196 
1197    nir_deref_instr *deref =
1198       nir_deref_instr_create(build->shader, nir_deref_type_struct);
1199 
1200    deref->modes = parent->modes;
1201    deref->type = glsl_get_struct_field(parent->type, index);
1202    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1203    deref->strct.index = index;
1204 
1205    nir_ssa_dest_init(&deref->instr, &deref->dest,
1206                      parent->dest.ssa.num_components,
1207                      parent->dest.ssa.bit_size, NULL);
1208 
1209    nir_builder_instr_insert(build, &deref->instr);
1210 
1211    return deref;
1212 }
1213 
1214 static inline nir_deref_instr *
nir_build_deref_cast(nir_builder * build,nir_ssa_def * parent,nir_variable_mode modes,const struct glsl_type * type,unsigned ptr_stride)1215 nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
1216                      nir_variable_mode modes, const struct glsl_type *type,
1217                      unsigned ptr_stride)
1218 {
1219    nir_deref_instr *deref =
1220       nir_deref_instr_create(build->shader, nir_deref_type_cast);
1221 
1222    deref->modes = modes;
1223    deref->type = type;
1224    deref->parent = nir_src_for_ssa(parent);
1225    deref->cast.ptr_stride = ptr_stride;
1226 
1227    nir_ssa_dest_init(&deref->instr, &deref->dest,
1228                      parent->num_components, parent->bit_size, NULL);
1229 
1230    nir_builder_instr_insert(build, &deref->instr);
1231 
1232    return deref;
1233 }
1234 
1235 static inline nir_deref_instr *
nir_alignment_deref_cast(nir_builder * build,nir_deref_instr * parent,uint32_t align_mul,uint32_t align_offset)1236 nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent,
1237                          uint32_t align_mul, uint32_t align_offset)
1238 {
1239    nir_deref_instr *deref =
1240       nir_deref_instr_create(build->shader, nir_deref_type_cast);
1241 
1242    deref->modes = parent->modes;
1243    deref->type = parent->type;
1244    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1245    deref->cast.ptr_stride = nir_deref_instr_array_stride(deref);
1246    deref->cast.align_mul = align_mul;
1247    deref->cast.align_offset = align_offset;
1248 
1249    nir_ssa_dest_init(&deref->instr, &deref->dest,
1250                      parent->dest.ssa.num_components,
1251                      parent->dest.ssa.bit_size, NULL);
1252 
1253    nir_builder_instr_insert(build, &deref->instr);
1254 
1255    return deref;
1256 }
1257 
1258 /** Returns a deref that follows another but starting from the given parent
1259  *
1260  * The new deref will be the same type and take the same array or struct index
1261  * as the leader deref but it may have a different parent.  This is very
1262  * useful for walking deref paths.
1263  */
1264 static inline nir_deref_instr *
nir_build_deref_follower(nir_builder * b,nir_deref_instr * parent,nir_deref_instr * leader)1265 nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
1266                          nir_deref_instr *leader)
1267 {
1268    /* If the derefs would have the same parent, don't make a new one */
1269    assert(leader->parent.is_ssa);
1270    if (leader->parent.ssa == &parent->dest.ssa)
1271       return leader;
1272 
1273    UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
1274 
1275    switch (leader->deref_type) {
1276    case nir_deref_type_var:
1277       unreachable("A var dereference cannot have a parent");
1278       break;
1279 
1280    case nir_deref_type_array:
1281    case nir_deref_type_array_wildcard:
1282       assert(glsl_type_is_matrix(parent->type) ||
1283              glsl_type_is_array(parent->type) ||
1284              (leader->deref_type == nir_deref_type_array &&
1285               glsl_type_is_vector(parent->type)));
1286       assert(glsl_get_length(parent->type) ==
1287              glsl_get_length(leader_parent->type));
1288 
1289       if (leader->deref_type == nir_deref_type_array) {
1290          assert(leader->arr.index.is_ssa);
1291          nir_ssa_def *index = nir_i2i(b, leader->arr.index.ssa,
1292                                          parent->dest.ssa.bit_size);
1293          return nir_build_deref_array(b, parent, index);
1294       } else {
1295          return nir_build_deref_array_wildcard(b, parent);
1296       }
1297 
1298    case nir_deref_type_struct:
1299       assert(glsl_type_is_struct_or_ifc(parent->type));
1300       assert(glsl_get_length(parent->type) ==
1301              glsl_get_length(leader_parent->type));
1302 
1303       return nir_build_deref_struct(b, parent, leader->strct.index);
1304 
1305    default:
1306       unreachable("Invalid deref instruction type");
1307    }
1308 }
1309 
1310 static inline nir_ssa_def *
nir_load_reg(nir_builder * build,nir_register * reg)1311 nir_load_reg(nir_builder *build, nir_register *reg)
1312 {
1313    return nir_ssa_for_src(build, nir_src_for_reg(reg), reg->num_components);
1314 }
1315 
1316 static inline void
nir_store_reg(nir_builder * build,nir_register * reg,nir_ssa_def * def,nir_component_mask_t write_mask)1317 nir_store_reg(nir_builder *build, nir_register *reg,
1318               nir_ssa_def *def, nir_component_mask_t write_mask)
1319 {
1320    assert(reg->num_components == def->num_components);
1321    assert(reg->bit_size == def->bit_size);
1322 
1323    nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
1324    mov->src[0].src = nir_src_for_ssa(def);
1325    mov->dest.dest = nir_dest_for_reg(reg);
1326    mov->dest.write_mask = write_mask & BITFIELD_MASK(reg->num_components);
1327    nir_builder_instr_insert(build, &mov->instr);
1328 }
1329 
1330 static inline nir_ssa_def *
nir_load_deref_with_access(nir_builder * build,nir_deref_instr * deref,enum gl_access_qualifier access)1331 nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1332                            enum gl_access_qualifier access)
1333 {
1334    nir_intrinsic_instr *load =
1335       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_deref);
1336    load->num_components = glsl_get_vector_elements(deref->type);
1337    load->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1338    nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
1339                      glsl_get_bit_size(deref->type), NULL);
1340    nir_intrinsic_set_access(load, access);
1341    nir_builder_instr_insert(build, &load->instr);
1342    return &load->dest.ssa;
1343 }
1344 
1345 static inline nir_ssa_def *
nir_load_deref(nir_builder * build,nir_deref_instr * deref)1346 nir_load_deref(nir_builder *build, nir_deref_instr *deref)
1347 {
1348    return nir_load_deref_with_access(build, deref, (enum gl_access_qualifier)0);
1349 }
1350 
1351 static inline void
nir_store_deref_with_access(nir_builder * build,nir_deref_instr * deref,nir_ssa_def * value,unsigned writemask,enum gl_access_qualifier access)1352 nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1353                             nir_ssa_def *value, unsigned writemask,
1354                             enum gl_access_qualifier access)
1355 {
1356    nir_intrinsic_instr *store =
1357       nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_deref);
1358    store->num_components = glsl_get_vector_elements(deref->type);
1359    store->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1360    store->src[1] = nir_src_for_ssa(value);
1361    nir_intrinsic_set_write_mask(store,
1362                                 writemask & ((1 << store->num_components) - 1));
1363    nir_intrinsic_set_access(store, access);
1364    nir_builder_instr_insert(build, &store->instr);
1365 }
1366 
1367 static inline void
nir_store_deref(nir_builder * build,nir_deref_instr * deref,nir_ssa_def * value,unsigned writemask)1368 nir_store_deref(nir_builder *build, nir_deref_instr *deref,
1369                 nir_ssa_def *value, unsigned writemask)
1370 {
1371    nir_store_deref_with_access(build, deref, value, writemask,
1372                                (enum gl_access_qualifier)0);
1373 }
1374 
1375 static inline void
nir_copy_deref_with_access(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src,enum gl_access_qualifier dest_access,enum gl_access_qualifier src_access)1376 nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
1377                            nir_deref_instr *src,
1378                            enum gl_access_qualifier dest_access,
1379                            enum gl_access_qualifier src_access)
1380 {
1381    nir_intrinsic_instr *copy =
1382       nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_deref);
1383    copy->src[0] = nir_src_for_ssa(&dest->dest.ssa);
1384    copy->src[1] = nir_src_for_ssa(&src->dest.ssa);
1385    nir_intrinsic_set_dst_access(copy, dest_access);
1386    nir_intrinsic_set_src_access(copy, src_access);
1387    nir_builder_instr_insert(build, &copy->instr);
1388 }
1389 
1390 static inline void
nir_copy_deref(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src)1391 nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
1392 {
1393    nir_copy_deref_with_access(build, dest, src,
1394                               (enum gl_access_qualifier) 0,
1395                               (enum gl_access_qualifier) 0);
1396 }
1397 
1398 static inline void
nir_memcpy_deref_with_access(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src,nir_ssa_def * size,enum gl_access_qualifier dest_access,enum gl_access_qualifier src_access)1399 nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
1400                              nir_deref_instr *src, nir_ssa_def *size,
1401                              enum gl_access_qualifier dest_access,
1402                              enum gl_access_qualifier src_access)
1403 {
1404    nir_intrinsic_instr *copy =
1405       nir_intrinsic_instr_create(build->shader, nir_intrinsic_memcpy_deref);
1406    copy->src[0] = nir_src_for_ssa(&dest->dest.ssa);
1407    copy->src[1] = nir_src_for_ssa(&src->dest.ssa);
1408    copy->src[2] = nir_src_for_ssa(size);
1409    nir_intrinsic_set_dst_access(copy, dest_access);
1410    nir_intrinsic_set_src_access(copy, src_access);
1411    nir_builder_instr_insert(build, &copy->instr);
1412 }
1413 
1414 static inline void
nir_memcpy_deref(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src,nir_ssa_def * size)1415 nir_memcpy_deref(nir_builder *build, nir_deref_instr *dest,
1416                  nir_deref_instr *src, nir_ssa_def *size)
1417 {
1418    nir_memcpy_deref_with_access(build, dest, src, size,
1419                                 (enum gl_access_qualifier)0,
1420                                 (enum gl_access_qualifier)0);
1421 }
1422 
1423 static inline nir_ssa_def *
nir_build_deref_mode_is(nir_builder * build,nir_deref_instr * deref,nir_variable_mode mode)1424 nir_build_deref_mode_is(nir_builder *build, nir_deref_instr *deref,
1425                         nir_variable_mode mode)
1426 {
1427    nir_intrinsic_instr *intrin =
1428       nir_intrinsic_instr_create(build->shader, nir_intrinsic_deref_mode_is);
1429    intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1430    nir_intrinsic_set_memory_modes(intrin, mode);
1431    nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
1432    nir_builder_instr_insert(build, &intrin->instr);
1433    return &intrin->dest.ssa;
1434 }
1435 
1436 static inline nir_ssa_def *
nir_load_var(nir_builder * build,nir_variable * var)1437 nir_load_var(nir_builder *build, nir_variable *var)
1438 {
1439    return nir_load_deref(build, nir_build_deref_var(build, var));
1440 }
1441 
1442 static inline void
nir_store_var(nir_builder * build,nir_variable * var,nir_ssa_def * value,unsigned writemask)1443 nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
1444               unsigned writemask)
1445 {
1446    nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
1447 }
1448 
1449 static inline void
nir_copy_var(nir_builder * build,nir_variable * dest,nir_variable * src)1450 nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
1451 {
1452    nir_copy_deref(build, nir_build_deref_var(build, dest),
1453                          nir_build_deref_var(build, src));
1454 }
1455 
1456 static inline nir_ssa_def *
nir_load_global(nir_builder * build,nir_ssa_def * addr,unsigned align,unsigned num_components,unsigned bit_size)1457 nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
1458                 unsigned num_components, unsigned bit_size)
1459 {
1460    nir_intrinsic_instr *load =
1461       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global);
1462    load->num_components = num_components;
1463    load->src[0] = nir_src_for_ssa(addr);
1464    nir_intrinsic_set_align(load, align, 0);
1465    nir_ssa_dest_init(&load->instr, &load->dest,
1466                      num_components, bit_size, NULL);
1467    nir_builder_instr_insert(build, &load->instr);
1468    return &load->dest.ssa;
1469 }
1470 
1471 static inline void
nir_store_global(nir_builder * build,nir_ssa_def * addr,unsigned align,nir_ssa_def * value,nir_component_mask_t write_mask)1472 nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
1473                  nir_ssa_def *value, nir_component_mask_t write_mask)
1474 {
1475    nir_intrinsic_instr *store =
1476       nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
1477    store->num_components = value->num_components;
1478    store->src[0] = nir_src_for_ssa(value);
1479    store->src[1] = nir_src_for_ssa(addr);
1480    nir_intrinsic_set_write_mask(store,
1481       write_mask & BITFIELD_MASK(value->num_components));
1482    nir_intrinsic_set_align(store, align, 0);
1483    nir_builder_instr_insert(build, &store->instr);
1484 }
1485 
1486 static inline nir_ssa_def *
nir_load_param(nir_builder * build,uint32_t param_idx)1487 nir_load_param(nir_builder *build, uint32_t param_idx)
1488 {
1489    assert(param_idx < build->impl->function->num_params);
1490    nir_parameter *param = &build->impl->function->params[param_idx];
1491 
1492    nir_intrinsic_instr *load =
1493       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_param);
1494    nir_intrinsic_set_param_idx(load, param_idx);
1495    load->num_components = param->num_components;
1496    nir_ssa_dest_init(&load->instr, &load->dest,
1497                      param->num_components, param->bit_size, NULL);
1498    nir_builder_instr_insert(build, &load->instr);
1499    return &load->dest.ssa;
1500 }
1501 
1502 static inline nir_ssa_def *
nir_load_reloc_const_intel(nir_builder * b,uint32_t id)1503 nir_load_reloc_const_intel(nir_builder *b, uint32_t id)
1504 {
1505    nir_intrinsic_instr *load =
1506       nir_intrinsic_instr_create(b->shader,
1507                                  nir_intrinsic_load_reloc_const_intel);
1508    nir_intrinsic_set_param_idx(load, id);
1509    nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
1510    nir_builder_instr_insert(b, &load->instr);
1511    return &load->dest.ssa;
1512 }
1513 
1514 static inline nir_ssa_def *
nir_convert_alu_types(nir_builder * b,nir_ssa_def * src,nir_alu_type src_type,nir_alu_type dest_type,nir_rounding_mode round,bool saturate)1515 nir_convert_alu_types(nir_builder *b, nir_ssa_def *src,
1516                       nir_alu_type src_type, nir_alu_type dest_type,
1517                       nir_rounding_mode round, bool saturate)
1518 {
1519    assert(nir_alu_type_get_type_size(dest_type) != 0);
1520    assert(nir_alu_type_get_type_size(src_type) == 0 ||
1521           nir_alu_type_get_type_size(src_type) == src->bit_size);
1522    src_type = (nir_alu_type)(src_type | src->bit_size);
1523 
1524    nir_intrinsic_instr *conv =
1525       nir_intrinsic_instr_create(b->shader, nir_intrinsic_convert_alu_types);
1526    conv->src[0] = nir_src_for_ssa(src);
1527    conv->num_components = src->num_components;
1528    nir_intrinsic_set_src_type(conv, src_type);
1529    nir_intrinsic_set_dest_type(conv, dest_type);
1530    nir_intrinsic_set_rounding_mode(conv, round);
1531    nir_intrinsic_set_saturate(conv, saturate);
1532    nir_ssa_dest_init(&conv->instr, &conv->dest, src->num_components,
1533                      nir_alu_type_get_type_size(dest_type), NULL);
1534    nir_builder_instr_insert(b, &conv->instr);
1535    return &conv->dest.ssa;
1536 }
1537 
1538 #include "nir_builder_opcodes.h"
1539 
1540 static inline nir_ssa_def *
nir_f2b(nir_builder * build,nir_ssa_def * f)1541 nir_f2b(nir_builder *build, nir_ssa_def *f)
1542 {
1543    return nir_f2b1(build, f);
1544 }
1545 
1546 static inline nir_ssa_def *
nir_i2b(nir_builder * build,nir_ssa_def * i)1547 nir_i2b(nir_builder *build, nir_ssa_def *i)
1548 {
1549    return nir_i2b1(build, i);
1550 }
1551 
1552 static inline nir_ssa_def *
nir_b2f(nir_builder * build,nir_ssa_def * b,uint32_t bit_size)1553 nir_b2f(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1554 {
1555    switch (bit_size) {
1556    case 64: return nir_b2f64(build, b);
1557    case 32: return nir_b2f32(build, b);
1558    case 16: return nir_b2f16(build, b);
1559    default:
1560       unreachable("Invalid bit-size");
1561    };
1562 }
1563 
1564 static inline nir_ssa_def *
nir_b2i(nir_builder * build,nir_ssa_def * b,uint32_t bit_size)1565 nir_b2i(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1566 {
1567    switch (bit_size) {
1568    case 64: return nir_b2i64(build, b);
1569    case 32: return nir_b2i32(build, b);
1570    case 16: return nir_b2i16(build, b);
1571    case 8:  return nir_b2i8(build, b);
1572    default:
1573       unreachable("Invalid bit-size");
1574    };
1575 }
1576 static inline nir_ssa_def *
nir_load_barycentric(nir_builder * build,nir_intrinsic_op op,unsigned interp_mode)1577 nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
1578                      unsigned interp_mode)
1579 {
1580    unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
1581    nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
1582    nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
1583    nir_intrinsic_set_interp_mode(bary, interp_mode);
1584    nir_builder_instr_insert(build, &bary->instr);
1585    return &bary->dest.ssa;
1586 }
1587 
1588 static inline void
nir_jump(nir_builder * build,nir_jump_type jump_type)1589 nir_jump(nir_builder *build, nir_jump_type jump_type)
1590 {
1591    assert(jump_type != nir_jump_goto && jump_type != nir_jump_goto_if);
1592    nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
1593    nir_builder_instr_insert(build, &jump->instr);
1594 }
1595 
1596 static inline void
nir_goto(nir_builder * build,struct nir_block * target)1597 nir_goto(nir_builder *build, struct nir_block *target)
1598 {
1599    assert(!build->impl->structured);
1600    nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto);
1601    jump->target = target;
1602    nir_builder_instr_insert(build, &jump->instr);
1603 }
1604 
1605 static inline void
nir_goto_if(nir_builder * build,struct nir_block * target,nir_src cond,struct nir_block * else_target)1606 nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
1607             struct nir_block *else_target)
1608 {
1609    assert(!build->impl->structured);
1610    nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto_if);
1611    jump->condition = cond;
1612    jump->target = target;
1613    jump->else_target = else_target;
1614    nir_builder_instr_insert(build, &jump->instr);
1615 }
1616 
1617 static inline nir_ssa_def *
nir_compare_func(nir_builder * b,enum compare_func func,nir_ssa_def * src0,nir_ssa_def * src1)1618 nir_compare_func(nir_builder *b, enum compare_func func,
1619                  nir_ssa_def *src0, nir_ssa_def *src1)
1620 {
1621    switch (func) {
1622    case COMPARE_FUNC_NEVER:
1623       return nir_imm_int(b, 0);
1624    case COMPARE_FUNC_ALWAYS:
1625       return nir_imm_int(b, ~0);
1626    case COMPARE_FUNC_EQUAL:
1627       return nir_feq(b, src0, src1);
1628    case COMPARE_FUNC_NOTEQUAL:
1629       return nir_fneu(b, src0, src1);
1630    case COMPARE_FUNC_GREATER:
1631       return nir_flt(b, src1, src0);
1632    case COMPARE_FUNC_GEQUAL:
1633       return nir_fge(b, src0, src1);
1634    case COMPARE_FUNC_LESS:
1635       return nir_flt(b, src0, src1);
1636    case COMPARE_FUNC_LEQUAL:
1637       return nir_fge(b, src1, src0);
1638    }
1639    unreachable("bad compare func");
1640 }
1641 
1642 static inline void
nir_scoped_barrier(nir_builder * b,nir_scope exec_scope,nir_scope mem_scope,nir_memory_semantics mem_semantics,nir_variable_mode mem_modes)1643 nir_scoped_barrier(nir_builder *b,
1644                    nir_scope exec_scope,
1645                    nir_scope mem_scope,
1646                    nir_memory_semantics mem_semantics,
1647                    nir_variable_mode mem_modes)
1648 {
1649    nir_intrinsic_instr *intrin =
1650       nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_barrier);
1651    nir_intrinsic_set_execution_scope(intrin, exec_scope);
1652    nir_intrinsic_set_memory_scope(intrin, mem_scope);
1653    nir_intrinsic_set_memory_semantics(intrin, mem_semantics);
1654    nir_intrinsic_set_memory_modes(intrin, mem_modes);
1655    nir_builder_instr_insert(b, &intrin->instr);
1656 }
1657 
1658 static inline void
nir_scoped_memory_barrier(nir_builder * b,nir_scope scope,nir_memory_semantics semantics,nir_variable_mode modes)1659 nir_scoped_memory_barrier(nir_builder *b,
1660                           nir_scope scope,
1661                           nir_memory_semantics semantics,
1662                           nir_variable_mode modes)
1663 {
1664    nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
1665 }
1666 
1667 static inline nir_ssa_def *
nir_convert_to_bit_size(nir_builder * b,nir_ssa_def * src,nir_alu_type type,unsigned bit_size)1668 nir_convert_to_bit_size(nir_builder *b,
1669                     nir_ssa_def *src,
1670                     nir_alu_type type,
1671                     unsigned bit_size)
1672 {
1673    nir_alu_type base_type = nir_alu_type_get_base_type(type);
1674    nir_alu_type dst_type = (nir_alu_type)(bit_size | base_type);
1675 
1676    nir_op opcode =
1677       nir_type_conversion_op(type, dst_type, nir_rounding_mode_undef);
1678 
1679    return nir_build_alu(b, opcode, src, NULL, NULL, NULL);
1680 }
1681 
1682 static inline nir_ssa_def *
nir_i2iN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1683 nir_i2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1684 {
1685    return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
1686 }
1687 
1688 static inline nir_ssa_def *
nir_u2uN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1689 nir_u2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1690 {
1691    return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
1692 }
1693 
1694 static inline nir_ssa_def *
nir_b2bN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1695 nir_b2bN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1696 {
1697    return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
1698 }
1699 
1700 static inline nir_ssa_def *
nir_f2fN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1701 nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1702 {
1703    return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
1704 }
1705 
1706 #endif /* NIR_BUILDER_H */
1707