• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014-2015 Broadcom
3  * Copyright © 2021 Google
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "nir_builder.h"
26 
27 nir_builder MUST_CHECK PRINTFLIKE(3, 4)
nir_builder_init_simple_shader(gl_shader_stage stage,const nir_shader_compiler_options * options,const char * name,...)28    nir_builder_init_simple_shader(gl_shader_stage stage,
29                                   const nir_shader_compiler_options *options,
30                                   const char *name, ...)
31 {
32    nir_builder b;
33 
34    memset(&b, 0, sizeof(b));
35    b.shader = nir_shader_create(NULL, stage, options, NULL);
36 
37    if (name) {
38       va_list args;
39       va_start(args, name);
40       b.shader->info.name = ralloc_vasprintf(b.shader, name, args);
41       va_end(args);
42    }
43 
44    nir_function *func = nir_function_create(b.shader, "main");
45    func->is_entrypoint = true;
46    b.exact = false;
47    b.impl = nir_function_impl_create(func);
48    b.cursor = nir_after_cf_list(&b.impl->body);
49 
50    /* Simple shaders are typically internal, e.g. blit shaders */
51    b.shader->info.internal = true;
52 
53    /* Compute shaders on Vulkan require some workgroup size initialized, pick
54     * a safe default value. This relies on merging workgroups for efficiency.
55     */
56    b.shader->info.workgroup_size[0] = 1;
57    b.shader->info.workgroup_size[1] = 1;
58    b.shader->info.workgroup_size[2] = 1;
59 
60    return b;
61 }
62 
63 nir_def *
nir_builder_alu_instr_finish_and_insert(nir_builder * build,nir_alu_instr * instr)64 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
65 {
66    const nir_op_info *op_info = &nir_op_infos[instr->op];
67 
68    instr->exact = build->exact;
69 
70    /* Guess the number of components the destination temporary should have
71     * based on our input sizes, if it's not fixed for the op.
72     */
73    unsigned num_components = op_info->output_size;
74    if (num_components == 0) {
75       for (unsigned i = 0; i < op_info->num_inputs; i++) {
76          if (op_info->input_sizes[i] == 0)
77             num_components = MAX2(num_components,
78                                   instr->src[i].src.ssa->num_components);
79       }
80    }
81    assert(num_components != 0);
82 
83    /* Figure out the bitwidth based on the source bitwidth if the instruction
84     * is variable-width.
85     */
86    unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
87    if (bit_size == 0) {
88       for (unsigned i = 0; i < op_info->num_inputs; i++) {
89          unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
90          if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
91             if (bit_size)
92                assert(src_bit_size == bit_size);
93             else
94                bit_size = src_bit_size;
95          } else {
96             assert(src_bit_size ==
97                    nir_alu_type_get_type_size(op_info->input_types[i]));
98          }
99       }
100    }
101 
102    /* When in doubt, assume 32. */
103    if (bit_size == 0)
104       bit_size = 32;
105 
106    /* Make sure we don't swizzle from outside of our source vector (like if a
107     * scalar value was passed into a multiply with a vector).
108     */
109    for (unsigned i = 0; i < op_info->num_inputs; i++) {
110       for (unsigned j = instr->src[i].src.ssa->num_components;
111            j < NIR_MAX_VEC_COMPONENTS; j++) {
112          instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
113       }
114    }
115 
116    nir_def_init(&instr->instr, &instr->def, num_components,
117                 bit_size);
118 
119    nir_builder_instr_insert(build, &instr->instr);
120 
121    return &instr->def;
122 }
123 
124 nir_def *
nir_build_alu(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1,nir_def * src2,nir_def * src3)125 nir_build_alu(nir_builder *build, nir_op op, nir_def *src0,
126               nir_def *src1, nir_def *src2, nir_def *src3)
127 {
128    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
129    if (!instr)
130       return NULL;
131 
132    instr->src[0].src = nir_src_for_ssa(src0);
133    if (src1)
134       instr->src[1].src = nir_src_for_ssa(src1);
135    if (src2)
136       instr->src[2].src = nir_src_for_ssa(src2);
137    if (src3)
138       instr->src[3].src = nir_src_for_ssa(src3);
139 
140    return nir_builder_alu_instr_finish_and_insert(build, instr);
141 }
142 
143 nir_def *
nir_build_alu1(nir_builder * build,nir_op op,nir_def * src0)144 nir_build_alu1(nir_builder *build, nir_op op, nir_def *src0)
145 {
146    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
147    if (!instr)
148       return NULL;
149 
150    instr->src[0].src = nir_src_for_ssa(src0);
151 
152    return nir_builder_alu_instr_finish_and_insert(build, instr);
153 }
154 
155 nir_def *
nir_build_alu2(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1)156 nir_build_alu2(nir_builder *build, nir_op op, nir_def *src0,
157                nir_def *src1)
158 {
159    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
160    if (!instr)
161       return NULL;
162 
163    instr->src[0].src = nir_src_for_ssa(src0);
164    instr->src[1].src = nir_src_for_ssa(src1);
165 
166    return nir_builder_alu_instr_finish_and_insert(build, instr);
167 }
168 
169 nir_def *
nir_build_alu3(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1,nir_def * src2)170 nir_build_alu3(nir_builder *build, nir_op op, nir_def *src0,
171                nir_def *src1, nir_def *src2)
172 {
173    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
174    if (!instr)
175       return NULL;
176 
177    instr->src[0].src = nir_src_for_ssa(src0);
178    instr->src[1].src = nir_src_for_ssa(src1);
179    instr->src[2].src = nir_src_for_ssa(src2);
180 
181    return nir_builder_alu_instr_finish_and_insert(build, instr);
182 }
183 
184 nir_def *
nir_build_alu4(nir_builder * build,nir_op op,nir_def * src0,nir_def * src1,nir_def * src2,nir_def * src3)185 nir_build_alu4(nir_builder *build, nir_op op, nir_def *src0,
186                nir_def *src1, nir_def *src2, nir_def *src3)
187 {
188    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
189    if (!instr)
190       return NULL;
191 
192    instr->src[0].src = nir_src_for_ssa(src0);
193    instr->src[1].src = nir_src_for_ssa(src1);
194    instr->src[2].src = nir_src_for_ssa(src2);
195    instr->src[3].src = nir_src_for_ssa(src3);
196 
197    return nir_builder_alu_instr_finish_and_insert(build, instr);
198 }
199 
200 /* for the couple special cases with more than 4 src args: */
201 nir_def *
nir_build_alu_src_arr(nir_builder * build,nir_op op,nir_def ** srcs)202 nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs)
203 {
204    const nir_op_info *op_info = &nir_op_infos[op];
205    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
206    if (!instr)
207       return NULL;
208 
209    for (unsigned i = 0; i < op_info->num_inputs; i++)
210       instr->src[i].src = nir_src_for_ssa(srcs[i]);
211 
212    return nir_builder_alu_instr_finish_and_insert(build, instr);
213 }
214 
215 nir_def *
nir_build_tex_deref_instr(nir_builder * build,nir_texop op,nir_deref_instr * texture,nir_deref_instr * sampler,unsigned num_extra_srcs,const nir_tex_src * extra_srcs)216 nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
217                           nir_deref_instr *texture,
218                           nir_deref_instr *sampler,
219                           unsigned num_extra_srcs,
220                           const nir_tex_src *extra_srcs)
221 {
222    assert(texture != NULL);
223    assert(glsl_type_is_image(texture->type) ||
224           glsl_type_is_texture(texture->type) ||
225           glsl_type_is_sampler(texture->type));
226 
227    const unsigned num_srcs = 1 + (sampler != NULL) + num_extra_srcs;
228 
229    nir_tex_instr *tex = nir_tex_instr_create(build->shader, num_srcs);
230    tex->op = op;
231    tex->sampler_dim = glsl_get_sampler_dim(texture->type);
232    tex->is_array = glsl_sampler_type_is_array(texture->type);
233    tex->is_shadow = false;
234 
235    switch (op) {
236    case nir_texop_txs:
237    case nir_texop_texture_samples:
238    case nir_texop_query_levels:
239    case nir_texop_txf_ms_mcs_intel:
240    case nir_texop_fragment_mask_fetch_amd:
241    case nir_texop_descriptor_amd:
242       tex->dest_type = nir_type_int32;
243       break;
244    case nir_texop_lod:
245       tex->dest_type = nir_type_float32;
246       break;
247    case nir_texop_samples_identical:
248       tex->dest_type = nir_type_bool1;
249       break;
250    default:
251       assert(!nir_tex_instr_is_query(tex));
252       tex->dest_type = nir_get_nir_type_for_glsl_base_type(
253          glsl_get_sampler_result_type(texture->type));
254       break;
255    }
256 
257    unsigned src_idx = 0;
258    tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
259                                              &texture->def);
260    if (sampler != NULL) {
261       assert(glsl_type_is_sampler(sampler->type));
262       tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
263                                                 &sampler->def);
264    }
265    for (unsigned i = 0; i < num_extra_srcs; i++) {
266       switch (extra_srcs[i].src_type) {
267       case nir_tex_src_coord:
268          tex->coord_components = nir_src_num_components(extra_srcs[i].src);
269          assert(tex->coord_components == tex->is_array +
270                                             glsl_get_sampler_dim_coordinate_components(tex->sampler_dim));
271          break;
272 
273       case nir_tex_src_lod:
274          assert(tex->sampler_dim == GLSL_SAMPLER_DIM_1D ||
275                 tex->sampler_dim == GLSL_SAMPLER_DIM_2D ||
276                 tex->sampler_dim == GLSL_SAMPLER_DIM_3D ||
277                 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
278          break;
279 
280       case nir_tex_src_ms_index:
281          assert(tex->sampler_dim == GLSL_SAMPLER_DIM_MS);
282          break;
283 
284       case nir_tex_src_comparator:
285          /* Assume 1-component shadow for the builder helper */
286          tex->is_shadow = true;
287          tex->is_new_style_shadow = true;
288          break;
289 
290       case nir_tex_src_texture_deref:
291       case nir_tex_src_sampler_deref:
292       case nir_tex_src_texture_offset:
293       case nir_tex_src_sampler_offset:
294       case nir_tex_src_texture_handle:
295       case nir_tex_src_sampler_handle:
296          unreachable("Texture and sampler must be provided directly as derefs");
297          break;
298 
299       default:
300          break;
301       }
302 
303       tex->src[src_idx++] = extra_srcs[i];
304    }
305    assert(src_idx == num_srcs);
306 
307    nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex),
308                 nir_alu_type_get_type_size(tex->dest_type));
309    nir_builder_instr_insert(build, &tex->instr);
310 
311    return &tex->def;
312 }
313 
314 nir_def *
nir_vec_scalars(nir_builder * build,nir_scalar * comp,unsigned num_components)315 nir_vec_scalars(nir_builder *build, nir_scalar *comp, unsigned num_components)
316 {
317    nir_op op = nir_op_vec(num_components);
318    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
319    if (!instr)
320       return NULL;
321 
322    for (unsigned i = 0; i < num_components; i++) {
323       instr->src[i].src = nir_src_for_ssa(comp[i].def);
324       instr->src[i].swizzle[0] = comp[i].comp;
325    }
326    instr->exact = build->exact;
327 
328    /* Note: not reusing nir_builder_alu_instr_finish_and_insert() because it
329     * can't re-guess the num_components when num_components == 1 (nir_op_mov).
330     */
331    nir_def_init(&instr->instr, &instr->def, num_components,
332                 comp[0].def->bit_size);
333 
334    nir_builder_instr_insert(build, &instr->instr);
335 
336    return &instr->def;
337 }
338 
339 /**
340  * Get nir_def for an alu src, respecting the nir_alu_src's swizzle.
341  */
342 nir_def *
nir_ssa_for_alu_src(nir_builder * build,nir_alu_instr * instr,unsigned srcn)343 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
344 {
345    if (nir_alu_src_is_trivial_ssa(instr, srcn))
346       return instr->src[srcn].src.ssa;
347 
348    nir_alu_src *src = &instr->src[srcn];
349    unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
350    return nir_mov_alu(build, *src, num_components);
351 }
352 
353 /* Generic builder for system values. */
354 nir_def *
nir_load_system_value(nir_builder * build,nir_intrinsic_op op,int index,unsigned num_components,unsigned bit_size)355 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
356                       unsigned num_components, unsigned bit_size)
357 {
358    nir_intrinsic_instr *load = nir_intrinsic_instr_create(build->shader, op);
359    if (nir_intrinsic_infos[op].dest_components > 0)
360       assert(num_components == nir_intrinsic_infos[op].dest_components);
361    else
362       load->num_components = num_components;
363    load->const_index[0] = index;
364 
365    nir_def_init(&load->instr, &load->def, num_components, bit_size);
366    nir_builder_instr_insert(build, &load->instr);
367    return &load->def;
368 }
369 
370 void
nir_builder_instr_insert(nir_builder * build,nir_instr * instr)371 nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
372 {
373    nir_instr_insert(build->cursor, instr);
374 
375    if (build->update_divergence)
376       nir_update_instr_divergence(build->shader, instr);
377 
378    /* Move the cursor forward. */
379    build->cursor = nir_after_instr(instr);
380 }
381 
382 void
nir_builder_cf_insert(nir_builder * build,nir_cf_node * cf)383 nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
384 {
385    nir_cf_node_insert(build->cursor, cf);
386 }
387 
388 bool
nir_builder_is_inside_cf(nir_builder * build,nir_cf_node * cf_node)389 nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
390 {
391    nir_block *block = nir_cursor_current_block(build->cursor);
392    for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
393       if (n == cf_node)
394          return true;
395    }
396    return false;
397 }
398 
399 nir_if *
nir_push_if(nir_builder * build,nir_def * condition)400 nir_push_if(nir_builder *build, nir_def *condition)
401 {
402    nir_if *nif = nir_if_create(build->shader);
403    nif->condition = nir_src_for_ssa(condition);
404    nir_builder_cf_insert(build, &nif->cf_node);
405    build->cursor = nir_before_cf_list(&nif->then_list);
406    return nif;
407 }
408 
409 nir_if *
nir_push_else(nir_builder * build,nir_if * nif)410 nir_push_else(nir_builder *build, nir_if *nif)
411 {
412    if (nif) {
413       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
414    } else {
415       nir_block *block = nir_cursor_current_block(build->cursor);
416       nif = nir_cf_node_as_if(block->cf_node.parent);
417    }
418    build->cursor = nir_before_cf_list(&nif->else_list);
419    return nif;
420 }
421 
422 void
nir_pop_if(nir_builder * build,nir_if * nif)423 nir_pop_if(nir_builder *build, nir_if *nif)
424 {
425    if (nif) {
426       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
427    } else {
428       nir_block *block = nir_cursor_current_block(build->cursor);
429       nif = nir_cf_node_as_if(block->cf_node.parent);
430    }
431    build->cursor = nir_after_cf_node(&nif->cf_node);
432 }
433 
434 nir_def *
nir_if_phi(nir_builder * build,nir_def * then_def,nir_def * else_def)435 nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def)
436 {
437    nir_block *block = nir_cursor_current_block(build->cursor);
438    nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
439 
440    nir_phi_instr *phi = nir_phi_instr_create(build->shader);
441    nir_phi_instr_add_src(phi, nir_if_last_then_block(nif), then_def);
442    nir_phi_instr_add_src(phi, nir_if_last_else_block(nif), else_def);
443 
444    assert(then_def->num_components == else_def->num_components);
445    assert(then_def->bit_size == else_def->bit_size);
446    nir_def_init(&phi->instr, &phi->def, then_def->num_components,
447                 then_def->bit_size);
448 
449    nir_builder_instr_insert(build, &phi->instr);
450 
451    return &phi->def;
452 }
453 
454 nir_loop *
nir_push_loop(nir_builder * build)455 nir_push_loop(nir_builder *build)
456 {
457    nir_loop *loop = nir_loop_create(build->shader);
458    nir_builder_cf_insert(build, &loop->cf_node);
459    build->cursor = nir_before_cf_list(&loop->body);
460    return loop;
461 }
462 
463 nir_loop *
nir_push_continue(nir_builder * build,nir_loop * loop)464 nir_push_continue(nir_builder *build, nir_loop *loop)
465 {
466    if (loop) {
467       assert(nir_builder_is_inside_cf(build, &loop->cf_node));
468    } else {
469       nir_block *block = nir_cursor_current_block(build->cursor);
470       loop = nir_cf_node_as_loop(block->cf_node.parent);
471    }
472 
473    nir_loop_add_continue_construct(loop);
474 
475    build->cursor = nir_before_cf_list(&loop->continue_list);
476    return loop;
477 }
478 
479 void
nir_pop_loop(nir_builder * build,nir_loop * loop)480 nir_pop_loop(nir_builder *build, nir_loop *loop)
481 {
482    if (loop) {
483       assert(nir_builder_is_inside_cf(build, &loop->cf_node));
484    } else {
485       nir_block *block = nir_cursor_current_block(build->cursor);
486       loop = nir_cf_node_as_loop(block->cf_node.parent);
487    }
488    build->cursor = nir_after_cf_node(&loop->cf_node);
489 }
490 
491 nir_def *
nir_compare_func(nir_builder * b,enum compare_func func,nir_def * src0,nir_def * src1)492 nir_compare_func(nir_builder *b, enum compare_func func,
493                  nir_def *src0, nir_def *src1)
494 {
495    switch (func) {
496    case COMPARE_FUNC_NEVER:
497       return nir_imm_int(b, 0);
498    case COMPARE_FUNC_ALWAYS:
499       return nir_imm_int(b, ~0);
500    case COMPARE_FUNC_EQUAL:
501       return nir_feq(b, src0, src1);
502    case COMPARE_FUNC_NOTEQUAL:
503       return nir_fneu(b, src0, src1);
504    case COMPARE_FUNC_GREATER:
505       return nir_flt(b, src1, src0);
506    case COMPARE_FUNC_GEQUAL:
507       return nir_fge(b, src0, src1);
508    case COMPARE_FUNC_LESS:
509       return nir_flt(b, src0, src1);
510    case COMPARE_FUNC_LEQUAL:
511       return nir_fge(b, src1, src0);
512    }
513    unreachable("bad compare func");
514 }
515 
516 nir_def *
nir_type_convert(nir_builder * b,nir_def * src,nir_alu_type src_type,nir_alu_type dest_type,nir_rounding_mode rnd)517 nir_type_convert(nir_builder *b,
518                  nir_def *src,
519                  nir_alu_type src_type,
520                  nir_alu_type dest_type,
521                  nir_rounding_mode rnd)
522 {
523    assert(nir_alu_type_get_type_size(src_type) == 0 ||
524           nir_alu_type_get_type_size(src_type) == src->bit_size);
525 
526    const nir_alu_type dst_base =
527       (nir_alu_type)nir_alu_type_get_base_type(dest_type);
528 
529    const nir_alu_type src_base =
530       (nir_alu_type)nir_alu_type_get_base_type(src_type);
531 
532    /* b2b uses the regular type conversion path, but i2b and f2b are
533     * implemented as src != 0.
534     */
535    if (dst_base == nir_type_bool && src_base != nir_type_bool) {
536       nir_op opcode;
537 
538       const unsigned dst_bit_size = nir_alu_type_get_type_size(dest_type);
539 
540       if (src_base == nir_type_float) {
541          switch (dst_bit_size) {
542          case 1:
543             opcode = nir_op_fneu;
544             break;
545          case 8:
546             opcode = nir_op_fneu8;
547             break;
548          case 16:
549             opcode = nir_op_fneu16;
550             break;
551          case 32:
552             opcode = nir_op_fneu32;
553             break;
554          default:
555             unreachable("Invalid Boolean size.");
556          }
557       } else {
558          assert(src_base == nir_type_int || src_base == nir_type_uint);
559 
560          switch (dst_bit_size) {
561          case 1:
562             opcode = nir_op_ine;
563             break;
564          case 8:
565             opcode = nir_op_ine8;
566             break;
567          case 16:
568             opcode = nir_op_ine16;
569             break;
570          case 32:
571             opcode = nir_op_ine32;
572             break;
573          default:
574             unreachable("Invalid Boolean size.");
575          }
576       }
577 
578       return nir_build_alu(b, opcode, src,
579                            nir_imm_zero(b, src->num_components, src->bit_size),
580                            NULL, NULL);
581    } else {
582       src_type = (nir_alu_type)(src_type | src->bit_size);
583 
584       nir_op opcode =
585          nir_type_conversion_op(src_type, dest_type, rnd);
586       if (opcode == nir_op_mov)
587          return src;
588 
589       return nir_build_alu(b, opcode, src, NULL, NULL, NULL);
590    }
591 }
592 
593 nir_def *
nir_gen_rect_vertices(nir_builder * b,nir_def * z,nir_def * w)594 nir_gen_rect_vertices(nir_builder *b, nir_def *z, nir_def *w)
595 {
596    if (!z)
597       z = nir_imm_float(b, 0.0);
598    if (!w)
599       w = nir_imm_float(b, 1.0);
600 
601    nir_def *vertex_id;
602    if (b->shader->options && b->shader->options->vertex_id_zero_based)
603       vertex_id = nir_load_vertex_id_zero_base(b);
604    else
605       vertex_id = nir_load_vertex_id(b);
606 
607    /* vertex 0: -1.0, -1.0
608     * vertex 1: -1.0,  1.0
609     * vertex 2:  1.0, -1.0
610     * vertex 3:  1.0,  1.0
611     *
612     * so:
613     *
614     * channel 0 is vertex_id < 2 ? -1.0 :  1.0
615     * channel 1 is vertex_id & 1 ?  1.0 : -1.0
616     */
617 
618    nir_def *c0cmp = nir_ilt_imm(b, vertex_id, 2);
619    nir_def *c1cmp = nir_test_mask(b, vertex_id, 1);
620 
621    nir_def *comp[4];
622    comp[0] = nir_bcsel(b, c0cmp, nir_imm_float(b, -1.0), nir_imm_float(b, 1.0));
623    comp[1] = nir_bcsel(b, c1cmp, nir_imm_float(b, 1.0), nir_imm_float(b, -1.0));
624    comp[2] = z;
625    comp[3] = w;
626 
627    return nir_vec(b, comp, 4);
628 }
629