• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014-2015 Broadcom
3  * Copyright © 2021 Google
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "nir_builder.h"
26 
27 nir_builder MUST_CHECK PRINTFLIKE(3, 4)
nir_builder_init_simple_shader(gl_shader_stage stage,const nir_shader_compiler_options * options,const char * name,...)28    nir_builder_init_simple_shader(gl_shader_stage stage,
29                                   const nir_shader_compiler_options *options,
30                                   const char *name, ...)
31 {
32    nir_builder b;
33 
34    memset(&b, 0, sizeof(b));
35    b.shader = nir_shader_create(NULL, stage, options, NULL);
36 
37    if (name) {
38       va_list args;
39       va_start(args, name);
40       b.shader->info.name = ralloc_vasprintf(b.shader, name, args);
41       va_end(args);
42    }
43 
44    nir_function *func = nir_function_create(b.shader, "main");
45    func->is_entrypoint = true;
46    b.exact = false;
47    b.impl = nir_function_impl_create(func);
48    b.cursor = nir_after_cf_list(&b.impl->body);
49 
50    /* Simple shaders are typically internal, e.g. blit shaders */
51    b.shader->info.internal = true;
52 
53    /* Compute shaders on Vulkan require some workgroup size initialized, pick
54     * a safe default value. This relies on merging workgroups for efficiency.
55     */
56    b.shader->info.workgroup_size[0] = 1;
57    b.shader->info.workgroup_size[1] = 1;
58    b.shader->info.workgroup_size[2] = 1;
59 
60    return b;
61 }
62 
63 nir_def *
nir_builder_alu_instr_finish_and_insert(nir_builder * build,nir_alu_instr * instr)64 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
65 {
66    const nir_op_info *op_info = &nir_op_infos[instr->op];
67 
68    instr->exact = build->exact;
69    instr->fp_fast_math = build->fp_fast_math;
70 
71    /* Guess the number of components the destination temporary should have
72     * based on our input sizes, if it's not fixed for the op.
73     */
74    unsigned num_components = op_info->output_size;
75    if (num_components == 0) {
76       for (unsigned i = 0; i < op_info->num_inputs; i++) {
77          if (op_info->input_sizes[i] == 0)
78             num_components = MAX2(num_components,
79                                   instr->src[i].src.ssa->num_components);
80       }
81    }
82    assert(num_components != 0);
83 
84    /* Figure out the bitwidth based on the source bitwidth if the instruction
85     * is variable-width.
86     */
87    unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
88    if (bit_size == 0) {
89       for (unsigned i = 0; i < op_info->num_inputs; i++) {
90          unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
91          if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
92             if (bit_size)
93                assert(src_bit_size == bit_size);
94             else
95                bit_size = src_bit_size;
96          } else {
97             assert(src_bit_size ==
98                    nir_alu_type_get_type_size(op_info->input_types[i]));
99          }
100       }
101    }
102 
103    /* When in doubt, assume 32. */
104    if (bit_size == 0)
105       bit_size = 32;
106 
107    /* Make sure we don't swizzle from outside of our source vector (like if a
108     * scalar value was passed into a multiply with a vector).
109     */
110    for (unsigned i = 0; i < op_info->num_inputs; i++) {
111       for (unsigned j = instr->src[i].src.ssa->num_components;
112            j < NIR_MAX_VEC_COMPONENTS; j++) {
113          instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
114       }
115    }
116 
117    nir_def_init(&instr->instr, &instr->def, num_components,
118                 bit_size);
119 
120    nir_builder_instr_insert(build, &instr->instr);
121 
122    return &instr->def;
123 }
124 
125 nir_def *
nir_build_alu(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1,nir_def * src2,nir_def * src3)126 nir_build_alu(nir_builder *build, nir_op op, nir_def *src0,
127               nir_def *src1, nir_def *src2, nir_def *src3)
128 {
129    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
130    if (!instr)
131       return NULL;
132 
133    instr->src[0].src = nir_src_for_ssa(src0);
134    if (src1)
135       instr->src[1].src = nir_src_for_ssa(src1);
136    if (src2)
137       instr->src[2].src = nir_src_for_ssa(src2);
138    if (src3)
139       instr->src[3].src = nir_src_for_ssa(src3);
140 
141    return nir_builder_alu_instr_finish_and_insert(build, instr);
142 }
143 
144 nir_def *
nir_build_alu1(nir_builder * build,nir_op op,nir_def * src0)145 nir_build_alu1(nir_builder *build, nir_op op, nir_def *src0)
146 {
147    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
148    if (!instr)
149       return NULL;
150 
151    instr->src[0].src = nir_src_for_ssa(src0);
152 
153    return nir_builder_alu_instr_finish_and_insert(build, instr);
154 }
155 
156 nir_def *
nir_build_alu2(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1)157 nir_build_alu2(nir_builder *build, nir_op op, nir_def *src0,
158                nir_def *src1)
159 {
160    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
161    if (!instr)
162       return NULL;
163 
164    instr->src[0].src = nir_src_for_ssa(src0);
165    instr->src[1].src = nir_src_for_ssa(src1);
166 
167    return nir_builder_alu_instr_finish_and_insert(build, instr);
168 }
169 
170 nir_def *
nir_build_alu3(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1,nir_def * src2)171 nir_build_alu3(nir_builder *build, nir_op op, nir_def *src0,
172                nir_def *src1, nir_def *src2)
173 {
174    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
175    if (!instr)
176       return NULL;
177 
178    instr->src[0].src = nir_src_for_ssa(src0);
179    instr->src[1].src = nir_src_for_ssa(src1);
180    instr->src[2].src = nir_src_for_ssa(src2);
181 
182    return nir_builder_alu_instr_finish_and_insert(build, instr);
183 }
184 
185 nir_def *
nir_build_alu4(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1,nir_def * src2,nir_def * src3)186 nir_build_alu4(nir_builder *build, nir_op op, nir_def *src0,
187                nir_def *src1, nir_def *src2, nir_def *src3)
188 {
189    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
190    if (!instr)
191       return NULL;
192 
193    instr->src[0].src = nir_src_for_ssa(src0);
194    instr->src[1].src = nir_src_for_ssa(src1);
195    instr->src[2].src = nir_src_for_ssa(src2);
196    instr->src[3].src = nir_src_for_ssa(src3);
197 
198    return nir_builder_alu_instr_finish_and_insert(build, instr);
199 }
200 
201 /* for the couple special cases with more than 4 src args: */
202 nir_def *
nir_build_alu_src_arr(nir_builder * build,nir_op op,nir_def ** srcs)203 nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs)
204 {
205    const nir_op_info *op_info = &nir_op_infos[op];
206    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
207    if (!instr)
208       return NULL;
209 
210    for (unsigned i = 0; i < op_info->num_inputs; i++)
211       instr->src[i].src = nir_src_for_ssa(srcs[i]);
212 
213    return nir_builder_alu_instr_finish_and_insert(build, instr);
214 }
215 
216 nir_def *
nir_build_tex_deref_instr(nir_builder * build,nir_texop op,nir_deref_instr * texture,nir_deref_instr * sampler,unsigned num_extra_srcs,const nir_tex_src * extra_srcs)217 nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
218                           nir_deref_instr *texture,
219                           nir_deref_instr *sampler,
220                           unsigned num_extra_srcs,
221                           const nir_tex_src *extra_srcs)
222 {
223    assert(texture != NULL);
224    assert(glsl_type_is_image(texture->type) ||
225           glsl_type_is_texture(texture->type) ||
226           glsl_type_is_sampler(texture->type));
227 
228    const unsigned num_srcs = 1 + (sampler != NULL) + num_extra_srcs;
229 
230    nir_tex_instr *tex = nir_tex_instr_create(build->shader, num_srcs);
231    tex->op = op;
232    tex->sampler_dim = glsl_get_sampler_dim(texture->type);
233    tex->is_array = glsl_sampler_type_is_array(texture->type);
234    tex->is_shadow = false;
235 
236    switch (op) {
237    case nir_texop_txs:
238    case nir_texop_texture_samples:
239    case nir_texop_query_levels:
240    case nir_texop_txf_ms_mcs_intel:
241    case nir_texop_fragment_mask_fetch_amd:
242    case nir_texop_descriptor_amd:
243       tex->dest_type = nir_type_int32;
244       break;
245    case nir_texop_lod:
246       tex->dest_type = nir_type_float32;
247       break;
248    case nir_texop_samples_identical:
249       tex->dest_type = nir_type_bool1;
250       break;
251    default:
252       assert(!nir_tex_instr_is_query(tex));
253       tex->dest_type = nir_get_nir_type_for_glsl_base_type(
254          glsl_get_sampler_result_type(texture->type));
255       break;
256    }
257 
258    unsigned src_idx = 0;
259    tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
260                                              &texture->def);
261    if (sampler != NULL) {
262       assert(glsl_type_is_sampler(sampler->type));
263       tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
264                                                 &sampler->def);
265    }
266    for (unsigned i = 0; i < num_extra_srcs; i++) {
267       switch (extra_srcs[i].src_type) {
268       case nir_tex_src_coord:
269          tex->coord_components = nir_src_num_components(extra_srcs[i].src);
270          assert(tex->coord_components == tex->is_array +
271                                             glsl_get_sampler_dim_coordinate_components(tex->sampler_dim));
272          break;
273 
274       case nir_tex_src_lod:
275          assert(tex->sampler_dim == GLSL_SAMPLER_DIM_1D ||
276                 tex->sampler_dim == GLSL_SAMPLER_DIM_2D ||
277                 tex->sampler_dim == GLSL_SAMPLER_DIM_3D ||
278                 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
279          break;
280 
281       case nir_tex_src_ms_index:
282          assert(tex->sampler_dim == GLSL_SAMPLER_DIM_MS);
283          break;
284 
285       case nir_tex_src_comparator:
286          /* Assume 1-component shadow for the builder helper */
287          tex->is_shadow = true;
288          tex->is_new_style_shadow = true;
289          break;
290 
291       case nir_tex_src_texture_deref:
292       case nir_tex_src_sampler_deref:
293       case nir_tex_src_texture_offset:
294       case nir_tex_src_sampler_offset:
295       case nir_tex_src_texture_handle:
296       case nir_tex_src_sampler_handle:
297          unreachable("Texture and sampler must be provided directly as derefs");
298          break;
299 
300       default:
301          break;
302       }
303 
304       tex->src[src_idx++] = extra_srcs[i];
305    }
306    assert(src_idx == num_srcs);
307 
308    nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex),
309                 nir_alu_type_get_type_size(tex->dest_type));
310    nir_builder_instr_insert(build, &tex->instr);
311 
312    return &tex->def;
313 }
314 
315 nir_def *
nir_build_string(nir_builder * build,const char * value)316 nir_build_string(nir_builder *build, const char *value)
317 {
318    nir_debug_info_instr *instr =
319       nir_debug_info_instr_create(build->shader, nir_debug_info_string, strlen(value));
320    memcpy(instr->string, value, instr->string_length);
321    nir_def_init(&instr->instr, &instr->def, 1, nir_get_ptr_bitsize(build->shader));
322    nir_builder_instr_insert(build, &instr->instr);
323    return &instr->def;
324 }
325 
326 nir_def *
nir_vec_scalars(nir_builder * build,nir_scalar * comp,unsigned num_components)327 nir_vec_scalars(nir_builder *build, nir_scalar *comp, unsigned num_components)
328 {
329    nir_op op = nir_op_vec(num_components);
330    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
331    if (!instr)
332       return NULL;
333 
334    for (unsigned i = 0; i < num_components; i++) {
335       instr->src[i].src = nir_src_for_ssa(comp[i].def);
336       instr->src[i].swizzle[0] = comp[i].comp;
337    }
338    instr->exact = build->exact;
339    instr->fp_fast_math = build->fp_fast_math;
340 
341    /* Note: not reusing nir_builder_alu_instr_finish_and_insert() because it
342     * can't re-guess the num_components when num_components == 1 (nir_op_mov).
343     */
344    nir_def_init(&instr->instr, &instr->def, num_components,
345                 comp[0].def->bit_size);
346 
347    nir_builder_instr_insert(build, &instr->instr);
348 
349    return &instr->def;
350 }
351 
352 /**
353  * Get nir_def for an alu src, respecting the nir_alu_src's swizzle.
354  */
355 nir_def *
nir_ssa_for_alu_src(nir_builder * build,nir_alu_instr * instr,unsigned srcn)356 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
357 {
358    if (nir_alu_src_is_trivial_ssa(instr, srcn))
359       return instr->src[srcn].src.ssa;
360 
361    nir_alu_src *src = &instr->src[srcn];
362    unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
363    return nir_mov_alu(build, *src, num_components);
364 }
365 
366 /* Generic builder for system values. */
367 nir_def *
nir_load_system_value(nir_builder * build,nir_intrinsic_op op,int index,unsigned num_components,unsigned bit_size)368 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
369                       unsigned num_components, unsigned bit_size)
370 {
371    nir_intrinsic_instr *load = nir_intrinsic_instr_create(build->shader, op);
372    if (nir_intrinsic_infos[op].dest_components > 0)
373       assert(num_components == nir_intrinsic_infos[op].dest_components);
374    else
375       load->num_components = num_components;
376    load->const_index[0] = index;
377 
378    nir_def_init(&load->instr, &load->def, num_components, bit_size);
379    nir_builder_instr_insert(build, &load->instr);
380    return &load->def;
381 }
382 
383 void
nir_builder_instr_insert(nir_builder * build,nir_instr * instr)384 nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
385 {
386    nir_instr_insert(build->cursor, instr);
387 
388    /* Move the cursor forward. */
389    build->cursor = nir_after_instr(instr);
390 }
391 
392 void
nir_builder_instr_insert_at_top(nir_builder * build,nir_instr * instr)393 nir_builder_instr_insert_at_top(nir_builder *build, nir_instr *instr)
394 {
395    nir_cursor top = nir_before_impl(build->impl);
396    const bool at_top = build->cursor.block != NULL &&
397                        nir_cursors_equal(build->cursor, top);
398 
399    nir_instr_insert(top, instr);
400 
401    if (at_top)
402       build->cursor = nir_after_instr(instr);
403 }
404 
405 void
nir_builder_cf_insert(nir_builder * build,nir_cf_node * cf)406 nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
407 {
408    nir_cf_node_insert(build->cursor, cf);
409 }
410 
411 bool
nir_builder_is_inside_cf(nir_builder * build,nir_cf_node * cf_node)412 nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
413 {
414    nir_block *block = nir_cursor_current_block(build->cursor);
415    for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
416       if (n == cf_node)
417          return true;
418    }
419    return false;
420 }
421 
422 nir_if *
nir_push_if(nir_builder * build,nir_def * condition)423 nir_push_if(nir_builder *build, nir_def *condition)
424 {
425    nir_if *nif = nir_if_create(build->shader);
426    nif->condition = nir_src_for_ssa(condition);
427    nir_builder_cf_insert(build, &nif->cf_node);
428    build->cursor = nir_before_cf_list(&nif->then_list);
429    return nif;
430 }
431 
432 nir_if *
nir_push_else(nir_builder * build,nir_if * nif)433 nir_push_else(nir_builder *build, nir_if *nif)
434 {
435    if (nif) {
436       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
437    } else {
438       nir_block *block = nir_cursor_current_block(build->cursor);
439       nif = nir_cf_node_as_if(block->cf_node.parent);
440    }
441    build->cursor = nir_before_cf_list(&nif->else_list);
442    return nif;
443 }
444 
445 void
nir_pop_if(nir_builder * build,nir_if * nif)446 nir_pop_if(nir_builder *build, nir_if *nif)
447 {
448    if (nif) {
449       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
450    } else {
451       nir_block *block = nir_cursor_current_block(build->cursor);
452       nif = nir_cf_node_as_if(block->cf_node.parent);
453    }
454    build->cursor = nir_after_cf_node(&nif->cf_node);
455 }
456 
457 nir_def *
nir_if_phi(nir_builder * build,nir_def * then_def,nir_def * else_def)458 nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def)
459 {
460    nir_block *block = nir_cursor_current_block(build->cursor);
461    nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
462 
463    nir_phi_instr *phi = nir_phi_instr_create(build->shader);
464    nir_phi_instr_add_src(phi, nir_if_last_then_block(nif), then_def);
465    nir_phi_instr_add_src(phi, nir_if_last_else_block(nif), else_def);
466 
467    assert(then_def->num_components == else_def->num_components);
468    assert(then_def->bit_size == else_def->bit_size);
469    nir_def_init(&phi->instr, &phi->def, then_def->num_components,
470                 then_def->bit_size);
471 
472    nir_builder_instr_insert(build, &phi->instr);
473 
474    return &phi->def;
475 }
476 
477 nir_loop *
nir_push_loop(nir_builder * build)478 nir_push_loop(nir_builder *build)
479 {
480    nir_loop *loop = nir_loop_create(build->shader);
481    nir_builder_cf_insert(build, &loop->cf_node);
482    build->cursor = nir_before_cf_list(&loop->body);
483    return loop;
484 }
485 
486 nir_loop *
nir_push_continue(nir_builder * build,nir_loop * loop)487 nir_push_continue(nir_builder *build, nir_loop *loop)
488 {
489    if (loop) {
490       assert(nir_builder_is_inside_cf(build, &loop->cf_node));
491    } else {
492       nir_block *block = nir_cursor_current_block(build->cursor);
493       loop = nir_cf_node_as_loop(block->cf_node.parent);
494    }
495 
496    nir_loop_add_continue_construct(loop);
497 
498    build->cursor = nir_before_cf_list(&loop->continue_list);
499    return loop;
500 }
501 
502 void
nir_pop_loop(nir_builder * build,nir_loop * loop)503 nir_pop_loop(nir_builder *build, nir_loop *loop)
504 {
505    if (loop) {
506       assert(nir_builder_is_inside_cf(build, &loop->cf_node));
507    } else {
508       nir_block *block = nir_cursor_current_block(build->cursor);
509       loop = nir_cf_node_as_loop(block->cf_node.parent);
510    }
511    build->cursor = nir_after_cf_node(&loop->cf_node);
512 }
513 
514 nir_def *
nir_compare_func(nir_builder * b,enum compare_func func,nir_def * src0,nir_def * src1)515 nir_compare_func(nir_builder *b, enum compare_func func,
516                  nir_def *src0, nir_def *src1)
517 {
518    switch (func) {
519    case COMPARE_FUNC_NEVER:
520       return nir_imm_int(b, 0);
521    case COMPARE_FUNC_ALWAYS:
522       return nir_imm_int(b, ~0);
523    case COMPARE_FUNC_EQUAL:
524       return nir_feq(b, src0, src1);
525    case COMPARE_FUNC_NOTEQUAL:
526       return nir_fneu(b, src0, src1);
527    case COMPARE_FUNC_GREATER:
528       return nir_flt(b, src1, src0);
529    case COMPARE_FUNC_GEQUAL:
530       return nir_fge(b, src0, src1);
531    case COMPARE_FUNC_LESS:
532       return nir_flt(b, src0, src1);
533    case COMPARE_FUNC_LEQUAL:
534       return nir_fge(b, src1, src0);
535    }
536    unreachable("bad compare func");
537 }
538 
539 nir_def *
nir_type_convert(nir_builder * b,nir_def * src,nir_alu_type src_type,nir_alu_type dest_type,nir_rounding_mode rnd)540 nir_type_convert(nir_builder *b,
541                  nir_def *src,
542                  nir_alu_type src_type,
543                  nir_alu_type dest_type,
544                  nir_rounding_mode rnd)
545 {
546    assert(nir_alu_type_get_type_size(src_type) == 0 ||
547           nir_alu_type_get_type_size(src_type) == src->bit_size);
548 
549    const nir_alu_type dst_base =
550       (nir_alu_type)nir_alu_type_get_base_type(dest_type);
551 
552    const nir_alu_type src_base =
553       (nir_alu_type)nir_alu_type_get_base_type(src_type);
554 
555    /* b2b uses the regular type conversion path, but i2b and f2b are
556     * implemented as src != 0.
557     */
558    if (dst_base == nir_type_bool && src_base != nir_type_bool) {
559       nir_op opcode;
560 
561       const unsigned dst_bit_size = nir_alu_type_get_type_size(dest_type);
562 
563       if (src_base == nir_type_float) {
564          switch (dst_bit_size) {
565          case 1:
566             opcode = nir_op_fneu;
567             break;
568          case 8:
569             opcode = nir_op_fneu8;
570             break;
571          case 16:
572             opcode = nir_op_fneu16;
573             break;
574          case 32:
575             opcode = nir_op_fneu32;
576             break;
577          default:
578             unreachable("Invalid Boolean size.");
579          }
580       } else {
581          assert(src_base == nir_type_int || src_base == nir_type_uint);
582 
583          switch (dst_bit_size) {
584          case 1:
585             opcode = nir_op_ine;
586             break;
587          case 8:
588             opcode = nir_op_ine8;
589             break;
590          case 16:
591             opcode = nir_op_ine16;
592             break;
593          case 32:
594             opcode = nir_op_ine32;
595             break;
596          default:
597             unreachable("Invalid Boolean size.");
598          }
599       }
600 
601       return nir_build_alu(b, opcode, src,
602                            nir_imm_zero(b, src->num_components, src->bit_size),
603                            NULL, NULL);
604    } else {
605       src_type = (nir_alu_type)(src_type | src->bit_size);
606 
607       nir_op opcode =
608          nir_type_conversion_op(src_type, dest_type, rnd);
609       if (opcode == nir_op_mov)
610          return src;
611 
612       return nir_build_alu(b, opcode, src, NULL, NULL, NULL);
613    }
614 }
615 
616 nir_def *
nir_gen_rect_vertices(nir_builder * b,nir_def * z,nir_def * w)617 nir_gen_rect_vertices(nir_builder *b, nir_def *z, nir_def *w)
618 {
619    if (!z)
620       z = nir_imm_float(b, 0.0);
621    if (!w)
622       w = nir_imm_float(b, 1.0);
623 
624    nir_def *vertex_id;
625    if (b->shader->options && b->shader->options->vertex_id_zero_based)
626       vertex_id = nir_load_vertex_id_zero_base(b);
627    else
628       vertex_id = nir_load_vertex_id(b);
629 
630    /* vertex 0: -1.0, -1.0
631     * vertex 1: -1.0,  1.0
632     * vertex 2:  1.0, -1.0
633     * vertex 3:  1.0,  1.0
634     *
635     * so:
636     *
637     * channel 0 is vertex_id < 2 ? -1.0 :  1.0
638     * channel 1 is vertex_id & 1 ?  1.0 : -1.0
639     */
640 
641    nir_def *c0cmp = nir_ilt_imm(b, vertex_id, 2);
642    nir_def *c1cmp = nir_test_mask(b, vertex_id, 1);
643 
644    nir_def *comp[4];
645    comp[0] = nir_bcsel(b, c0cmp, nir_imm_float(b, -1.0), nir_imm_float(b, 1.0));
646    comp[1] = nir_bcsel(b, c1cmp, nir_imm_float(b, 1.0), nir_imm_float(b, -1.0));
647    comp[2] = z;
648    comp[3] = w;
649 
650    return nir_vec(b, comp, 4);
651 }
652