1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/v3d_compiler.h"
25 #include "compiler/nir/nir_builder.h"
26
27 /**
28 * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
29 * intrinsics into something amenable to the V3D architecture.
30 *
31 * Most of the work is turning the VS's store_output intrinsics from working
32 * on a base representing the gallium-level vec4 driver_location to an offset
33 * within the VPM, and emitting the header that's read by the fixed function
34 * hardware between the VS and FS.
35 *
36 * We also adjust the offsets on uniform loads to be in bytes, since that's
37 * what we need for indirect addressing with general TMU access.
38 */
39
40 struct v3d_nir_lower_io_state {
41 int pos_vpm_offset;
42 int vp_vpm_offset;
43 int zs_vpm_offset;
44 int rcp_wc_vpm_offset;
45 int psiz_vpm_offset;
46 int varyings_vpm_offset;
47
48 /* Geometry shader state */
49 struct {
50 /* VPM offset for the current vertex data output */
51 nir_variable *output_offset_var;
52 /* VPM offset for the current vertex header */
53 nir_variable *header_offset_var;
54 /* VPM header for the current vertex */
55 nir_variable *header_var;
56
57 /* Size of the complete VPM output header */
58 uint32_t output_header_size;
59 /* Size of the output data for a single vertex */
60 uint32_t output_vertex_data_size;
61 } gs;
62
63 BITSET_WORD varyings_stored[BITSET_WORDS(V3D_MAX_ANY_STAGE_INPUTS)];
64
65 nir_ssa_def *pos[4];
66 };
67
68 static void
69 v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
70 struct v3d_nir_lower_io_state *state);
71
72 static void
v3d_nir_store_output(nir_builder * b,int base,nir_ssa_def * offset,nir_ssa_def * chan)73 v3d_nir_store_output(nir_builder *b, int base, nir_ssa_def *offset,
74 nir_ssa_def *chan)
75 {
76 nir_intrinsic_instr *intr =
77 nir_intrinsic_instr_create(b->shader,
78 nir_intrinsic_store_output);
79 nir_ssa_dest_init(&intr->instr, &intr->dest,
80 1, intr->dest.ssa.bit_size, NULL);
81 intr->num_components = 1;
82
83 intr->src[0] = nir_src_for_ssa(chan);
84 if (offset) {
85 /* When generating the VIR instruction, the base and the offset
86 * are just going to get added together with an ADD instruction
87 * so we might as well do the add here at the NIR level instead
88 * and let the constant folding do its magic.
89 */
90 intr->src[1] = nir_src_for_ssa(nir_iadd_imm(b, offset, base));
91 base = 0;
92 } else {
93 intr->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
94 }
95
96 nir_intrinsic_set_base(intr, base);
97 nir_intrinsic_set_write_mask(intr, 0x1);
98 nir_intrinsic_set_component(intr, 0);
99
100 nir_builder_instr_insert(b, &intr->instr);
101 }
102
103 /* Convert the uniform offset to bytes. If it happens to be a constant,
104 * constant-folding will clean up the shift for us.
105 */
106 static void
v3d_nir_lower_uniform(struct v3d_compile * c,nir_builder * b,nir_intrinsic_instr * intr)107 v3d_nir_lower_uniform(struct v3d_compile *c, nir_builder *b,
108 nir_intrinsic_instr *intr)
109 {
110 /* On SPIR-V/Vulkan we are already getting our offsets in
111 * bytes.
112 */
113 if (c->key->environment == V3D_ENVIRONMENT_VULKAN)
114 return;
115
116 b->cursor = nir_before_instr(&intr->instr);
117
118 nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
119
120 nir_instr_rewrite_src(&intr->instr,
121 &intr->src[0],
122 nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
123 nir_imm_int(b, 4))));
124 }
125
126 static int
v3d_varying_slot_vpm_offset(struct v3d_compile * c,unsigned location,unsigned component)127 v3d_varying_slot_vpm_offset(struct v3d_compile *c, unsigned location, unsigned component)
128 {
129 uint32_t num_used_outputs = 0;
130 struct v3d_varying_slot *used_outputs = NULL;
131 switch (c->s->info.stage) {
132 case MESA_SHADER_VERTEX:
133 num_used_outputs = c->vs_key->num_used_outputs;
134 used_outputs = c->vs_key->used_outputs;
135 break;
136 case MESA_SHADER_GEOMETRY:
137 num_used_outputs = c->gs_key->num_used_outputs;
138 used_outputs = c->gs_key->used_outputs;
139 break;
140 default:
141 unreachable("Unsupported shader stage");
142 }
143
144 for (int i = 0; i < num_used_outputs; i++) {
145 struct v3d_varying_slot slot = used_outputs[i];
146
147 if (v3d_slot_get_slot(slot) == location &&
148 v3d_slot_get_component(slot) == component) {
149 return i;
150 }
151 }
152
153 return -1;
154 }
155
156 /* Lowers a store_output(gallium driver location) to a series of store_outputs
157 * with a driver_location equal to the offset in the VPM.
158 *
159 * For geometry shaders we need to emit multiple vertices so the VPM offsets
160 * need to be computed in the shader code based on the current vertex index.
161 */
162 static void
v3d_nir_lower_vpm_output(struct v3d_compile * c,nir_builder * b,nir_intrinsic_instr * intr,struct v3d_nir_lower_io_state * state)163 v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
164 nir_intrinsic_instr *intr,
165 struct v3d_nir_lower_io_state *state)
166 {
167 b->cursor = nir_before_instr(&intr->instr);
168
169 /* If this is a geometry shader we need to emit our outputs
170 * to the current vertex offset in the VPM.
171 */
172 nir_ssa_def *offset_reg =
173 c->s->info.stage == MESA_SHADER_GEOMETRY ?
174 nir_load_var(b, state->gs.output_offset_var) : NULL;
175
176 int start_comp = nir_intrinsic_component(intr);
177 unsigned location = nir_intrinsic_io_semantics(intr).location;
178 nir_ssa_def *src = nir_ssa_for_src(b, intr->src[0],
179 intr->num_components);
180 /* Save off the components of the position for the setup of VPM inputs
181 * read by fixed function HW.
182 */
183 if (location == VARYING_SLOT_POS) {
184 for (int i = 0; i < intr->num_components; i++) {
185 state->pos[start_comp + i] = nir_channel(b, src, i);
186 }
187 }
188
189 /* Just psiz to the position in the FF header right now. */
190 if (location == VARYING_SLOT_PSIZ &&
191 state->psiz_vpm_offset != -1) {
192 v3d_nir_store_output(b, state->psiz_vpm_offset, offset_reg, src);
193 }
194
195 if (location == VARYING_SLOT_LAYER) {
196 assert(c->s->info.stage == MESA_SHADER_GEOMETRY);
197 nir_ssa_def *header = nir_load_var(b, state->gs.header_var);
198 header = nir_iand(b, header, nir_imm_int(b, 0xff00ffff));
199
200 /* From the GLES 3.2 spec:
201 *
202 * "When fragments are written to a layered framebuffer, the
203 * fragment’s layer number selects an image from the array
204 * of images at each attachment (...). If the fragment’s
205 * layer number is negative, or greater than or equal to
206 * the minimum number of layers of any attachment, the
207 * effects of the fragment on the framebuffer contents are
208 * undefined."
209 *
210 * This suggests we can just ignore that situation, however,
211 * for V3D an out-of-bounds layer index means that the binner
212 * might do out-of-bounds writes access to the tile state. The
213 * simulator has an assert to catch this, so we play safe here
214 * and we make sure that doesn't happen by setting gl_Layer
215 * to 0 in that case (we always allocate tile state for at
216 * least one layer).
217 */
218 nir_intrinsic_instr *load =
219 nir_intrinsic_instr_create(b->shader,
220 nir_intrinsic_load_fb_layers_v3d);
221 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
222 nir_builder_instr_insert(b, &load->instr);
223 nir_ssa_def *fb_layers = &load->dest.ssa;
224
225 nir_ssa_def *cond = nir_ige(b, src, fb_layers);
226 nir_ssa_def *layer_id =
227 nir_bcsel(b, cond,
228 nir_imm_int(b, 0),
229 nir_ishl(b, src, nir_imm_int(b, 16)));
230 header = nir_ior(b, header, layer_id);
231 nir_store_var(b, state->gs.header_var, header, 0x1);
232 }
233
234 /* Scalarize outputs if it hasn't happened already, since we want to
235 * schedule each VPM write individually. We can skip any outut
236 * components not read by the FS.
237 */
238 for (int i = 0; i < intr->num_components; i++) {
239 int vpm_offset =
240 v3d_varying_slot_vpm_offset(c, location, start_comp + i);
241
242
243 if (vpm_offset == -1)
244 continue;
245
246 if (nir_src_is_const(intr->src[1]))
247 vpm_offset += nir_src_as_uint(intr->src[1]) * 4;
248
249 BITSET_SET(state->varyings_stored, vpm_offset);
250
251 v3d_nir_store_output(b, state->varyings_vpm_offset + vpm_offset,
252 offset_reg, nir_channel(b, src, i));
253 }
254
255 nir_instr_remove(&intr->instr);
256 }
257
258 static inline void
reset_gs_header(nir_builder * b,struct v3d_nir_lower_io_state * state)259 reset_gs_header(nir_builder *b, struct v3d_nir_lower_io_state *state)
260 {
261 const uint8_t NEW_PRIMITIVE_OFFSET = 0;
262 const uint8_t VERTEX_DATA_LENGTH_OFFSET = 8;
263
264 uint32_t vertex_data_size = state->gs.output_vertex_data_size;
265 assert((vertex_data_size & 0xffffff00) == 0);
266
267 uint32_t header;
268 header = 1 << NEW_PRIMITIVE_OFFSET;
269 header |= vertex_data_size << VERTEX_DATA_LENGTH_OFFSET;
270 nir_store_var(b, state->gs.header_var, nir_imm_int(b, header), 0x1);
271 }
272
273 static void
v3d_nir_lower_emit_vertex(struct v3d_compile * c,nir_builder * b,nir_intrinsic_instr * instr,struct v3d_nir_lower_io_state * state)274 v3d_nir_lower_emit_vertex(struct v3d_compile *c, nir_builder *b,
275 nir_intrinsic_instr *instr,
276 struct v3d_nir_lower_io_state *state)
277 {
278 b->cursor = nir_before_instr(&instr->instr);
279
280 nir_ssa_def *header = nir_load_var(b, state->gs.header_var);
281 nir_ssa_def *header_offset = nir_load_var(b, state->gs.header_offset_var);
282 nir_ssa_def *output_offset = nir_load_var(b, state->gs.output_offset_var);
283
284 /* Emit fixed function outputs */
285 v3d_nir_emit_ff_vpm_outputs(c, b, state);
286
287 /* Emit vertex header */
288 v3d_nir_store_output(b, 0, header_offset, header);
289
290 /* Update VPM offset for next vertex output data and header */
291 output_offset =
292 nir_iadd(b, output_offset,
293 nir_imm_int(b, state->gs.output_vertex_data_size));
294
295 header_offset = nir_iadd(b, header_offset, nir_imm_int(b, 1));
296
297 /* Reset the New Primitive bit */
298 header = nir_iand(b, header, nir_imm_int(b, 0xfffffffe));
299
300 nir_store_var(b, state->gs.output_offset_var, output_offset, 0x1);
301 nir_store_var(b, state->gs.header_offset_var, header_offset, 0x1);
302 nir_store_var(b, state->gs.header_var, header, 0x1);
303
304 nir_instr_remove(&instr->instr);
305 }
306
307 static void
v3d_nir_lower_end_primitive(struct v3d_compile * c,nir_builder * b,nir_intrinsic_instr * instr,struct v3d_nir_lower_io_state * state)308 v3d_nir_lower_end_primitive(struct v3d_compile *c, nir_builder *b,
309 nir_intrinsic_instr *instr,
310 struct v3d_nir_lower_io_state *state)
311 {
312 assert(state->gs.header_var);
313 b->cursor = nir_before_instr(&instr->instr);
314 reset_gs_header(b, state);
315
316 nir_instr_remove(&instr->instr);
317 }
318
319 /* Some vertex attribute formats may require to apply a swizzle but the hardware
320 * doesn't provide means to do that, so we need to apply the swizzle in the
321 * vertex shader.
322 *
323 * This is required at least in Vulkan to support madatory vertex attribute
324 * format VK_FORMAT_B8G8R8A8_UNORM.
325 */
326 static void
v3d_nir_lower_vertex_input(struct v3d_compile * c,nir_builder * b,nir_intrinsic_instr * instr)327 v3d_nir_lower_vertex_input(struct v3d_compile *c, nir_builder *b,
328 nir_intrinsic_instr *instr)
329 {
330 assert(c->s->info.stage == MESA_SHADER_VERTEX);
331
332 if (!c->vs_key->va_swap_rb_mask)
333 return;
334
335 const uint32_t location = nir_intrinsic_io_semantics(instr).location;
336
337 if (!(c->vs_key->va_swap_rb_mask & (1 << location)))
338 return;
339
340 assert(instr->num_components == 1);
341 const uint32_t comp = nir_intrinsic_component(instr);
342 if (comp == 0 || comp == 2)
343 nir_intrinsic_set_component(instr, (comp + 2) % 4);
344 }
345
346 static void
v3d_nir_lower_io_instr(struct v3d_compile * c,nir_builder * b,struct nir_instr * instr,struct v3d_nir_lower_io_state * state)347 v3d_nir_lower_io_instr(struct v3d_compile *c, nir_builder *b,
348 struct nir_instr *instr,
349 struct v3d_nir_lower_io_state *state)
350 {
351 if (instr->type != nir_instr_type_intrinsic)
352 return;
353 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
354
355 switch (intr->intrinsic) {
356 case nir_intrinsic_load_input:
357 if (c->s->info.stage == MESA_SHADER_VERTEX)
358 v3d_nir_lower_vertex_input(c, b, intr);
359 break;
360
361 case nir_intrinsic_load_uniform:
362 v3d_nir_lower_uniform(c, b, intr);
363 break;
364
365 case nir_intrinsic_store_output:
366 if (c->s->info.stage == MESA_SHADER_VERTEX ||
367 c->s->info.stage == MESA_SHADER_GEOMETRY) {
368 v3d_nir_lower_vpm_output(c, b, intr, state);
369 }
370 break;
371
372 case nir_intrinsic_emit_vertex:
373 v3d_nir_lower_emit_vertex(c, b, intr, state);
374 break;
375
376 case nir_intrinsic_end_primitive:
377 v3d_nir_lower_end_primitive(c, b, intr, state);
378 break;
379
380 default:
381 break;
382 }
383 }
384
385 /* Remap the output var's .driver_location. This is purely for
386 * nir_print_shader() so that store_output can map back to a variable name.
387 */
388 static void
v3d_nir_lower_io_update_output_var_base(struct v3d_compile * c,struct v3d_nir_lower_io_state * state)389 v3d_nir_lower_io_update_output_var_base(struct v3d_compile *c,
390 struct v3d_nir_lower_io_state *state)
391 {
392 nir_foreach_shader_out_variable_safe(var, c->s) {
393 if (var->data.location == VARYING_SLOT_POS &&
394 state->pos_vpm_offset != -1) {
395 var->data.driver_location = state->pos_vpm_offset;
396 continue;
397 }
398
399 if (var->data.location == VARYING_SLOT_PSIZ &&
400 state->psiz_vpm_offset != -1) {
401 var->data.driver_location = state->psiz_vpm_offset;
402 continue;
403 }
404
405 int vpm_offset =
406 v3d_varying_slot_vpm_offset(c,
407 var->data.location,
408 var->data.location_frac);
409 if (vpm_offset != -1) {
410 var->data.driver_location =
411 state->varyings_vpm_offset + vpm_offset;
412 } else {
413 /* If we couldn't find a mapping for the var, delete
414 * it so that its old .driver_location doesn't confuse
415 * nir_print_shader().
416 */
417 exec_node_remove(&var->node);
418 }
419 }
420 }
421
422 static void
v3d_nir_setup_vpm_layout_vs(struct v3d_compile * c,struct v3d_nir_lower_io_state * state)423 v3d_nir_setup_vpm_layout_vs(struct v3d_compile *c,
424 struct v3d_nir_lower_io_state *state)
425 {
426 uint32_t vpm_offset = 0;
427
428 state->pos_vpm_offset = -1;
429 state->vp_vpm_offset = -1;
430 state->zs_vpm_offset = -1;
431 state->rcp_wc_vpm_offset = -1;
432 state->psiz_vpm_offset = -1;
433
434 bool needs_ff_outputs = c->vs_key->base.is_last_geometry_stage;
435 if (needs_ff_outputs) {
436 if (c->vs_key->is_coord) {
437 state->pos_vpm_offset = vpm_offset;
438 vpm_offset += 4;
439 }
440
441 state->vp_vpm_offset = vpm_offset;
442 vpm_offset += 2;
443
444 if (!c->vs_key->is_coord) {
445 state->zs_vpm_offset = vpm_offset++;
446 state->rcp_wc_vpm_offset = vpm_offset++;
447 }
448
449 if (c->vs_key->per_vertex_point_size)
450 state->psiz_vpm_offset = vpm_offset++;
451 }
452
453 state->varyings_vpm_offset = vpm_offset;
454
455 c->vpm_output_size = MAX2(1, vpm_offset + c->vs_key->num_used_outputs);
456 }
457
458 static void
v3d_nir_setup_vpm_layout_gs(struct v3d_compile * c,struct v3d_nir_lower_io_state * state)459 v3d_nir_setup_vpm_layout_gs(struct v3d_compile *c,
460 struct v3d_nir_lower_io_state *state)
461 {
462 /* 1 header slot for number of output vertices */
463 uint32_t vpm_offset = 1;
464
465 /* 1 header slot per output vertex */
466 const uint32_t num_vertices = c->s->info.gs.vertices_out;
467 vpm_offset += num_vertices;
468
469 state->gs.output_header_size = vpm_offset;
470
471 /* Vertex data: here we only compute offsets into a generic vertex data
472 * elements. When it is time to actually write a particular vertex to
473 * the VPM, we will add the offset for that vertex into the VPM output
474 * to these offsets.
475 *
476 * If geometry shaders are present, they are always the last shader
477 * stage before rasterization, so we always emit fixed function outputs.
478 */
479 vpm_offset = 0;
480 if (c->gs_key->is_coord) {
481 state->pos_vpm_offset = vpm_offset;
482 vpm_offset += 4;
483 } else {
484 state->pos_vpm_offset = -1;
485 }
486
487 state->vp_vpm_offset = vpm_offset;
488 vpm_offset += 2;
489
490 if (!c->gs_key->is_coord) {
491 state->zs_vpm_offset = vpm_offset++;
492 state->rcp_wc_vpm_offset = vpm_offset++;
493 } else {
494 state->zs_vpm_offset = -1;
495 state->rcp_wc_vpm_offset = -1;
496 }
497
498 /* Mesa enables OES_geometry_shader_point_size automatically with
499 * OES_geometry_shader so we always need to handle point size
500 * writes if present.
501 */
502 if (c->gs_key->per_vertex_point_size)
503 state->psiz_vpm_offset = vpm_offset++;
504
505 state->varyings_vpm_offset = vpm_offset;
506
507 state->gs.output_vertex_data_size =
508 state->varyings_vpm_offset + c->gs_key->num_used_outputs;
509
510 c->vpm_output_size =
511 state->gs.output_header_size +
512 state->gs.output_vertex_data_size * num_vertices;
513 }
514
515 static void
v3d_nir_emit_ff_vpm_outputs(struct v3d_compile * c,nir_builder * b,struct v3d_nir_lower_io_state * state)516 v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
517 struct v3d_nir_lower_io_state *state)
518 {
519 /* If this is a geometry shader we need to emit our fixed function
520 * outputs to the current vertex offset in the VPM.
521 */
522 nir_ssa_def *offset_reg =
523 c->s->info.stage == MESA_SHADER_GEOMETRY ?
524 nir_load_var(b, state->gs.output_offset_var) : NULL;
525
526 for (int i = 0; i < 4; i++) {
527 if (!state->pos[i])
528 state->pos[i] = nir_ssa_undef(b, 1, 32);
529 }
530
531 nir_ssa_def *rcp_wc = nir_frcp(b, state->pos[3]);
532
533 if (state->pos_vpm_offset != -1) {
534 for (int i = 0; i < 4; i++) {
535 v3d_nir_store_output(b, state->pos_vpm_offset + i,
536 offset_reg, state->pos[i]);
537 }
538 }
539
540 if (state->vp_vpm_offset != -1) {
541 for (int i = 0; i < 2; i++) {
542 nir_ssa_def *pos;
543 nir_ssa_def *scale;
544 pos = state->pos[i];
545 if (i == 0)
546 scale = nir_load_viewport_x_scale(b);
547 else
548 scale = nir_load_viewport_y_scale(b);
549 pos = nir_fmul(b, pos, scale);
550 pos = nir_fmul(b, pos, rcp_wc);
551 /* Pre-V3D 4.3 hardware has a quirk where it expects XY
552 * coordinates in .8 fixed-point format, but then it
553 * will internally round it to .6 fixed-point,
554 * introducing a double rounding. The double rounding
555 * can cause very slight differences in triangle
556 * raterization coverage that can actually be noticed by
557 * some CTS tests.
558 *
559 * The correct fix for this as recommended by Broadcom
560 * is to convert to .8 fixed-point with ffloor().
561 */
562 pos = nir_f2i32(b, nir_ffloor(b, pos));
563 v3d_nir_store_output(b, state->vp_vpm_offset + i,
564 offset_reg, pos);
565 }
566 }
567
568 if (state->zs_vpm_offset != -1) {
569 nir_ssa_def *z = state->pos[2];
570 z = nir_fmul(b, z, nir_load_viewport_z_scale(b));
571 z = nir_fmul(b, z, rcp_wc);
572 z = nir_fadd(b, z, nir_load_viewport_z_offset(b));
573 v3d_nir_store_output(b, state->zs_vpm_offset, offset_reg, z);
574 }
575
576 if (state->rcp_wc_vpm_offset != -1) {
577 v3d_nir_store_output(b, state->rcp_wc_vpm_offset,
578 offset_reg, rcp_wc);
579 }
580
581 /* Store 0 to varyings requested by the FS but not stored by the
582 * previous stage. This should be undefined behavior, but
583 * glsl-routing seems to rely on it.
584 */
585 uint32_t num_used_outputs;
586 switch (c->s->info.stage) {
587 case MESA_SHADER_VERTEX:
588 num_used_outputs = c->vs_key->num_used_outputs;
589 break;
590 case MESA_SHADER_GEOMETRY:
591 num_used_outputs = c->gs_key->num_used_outputs;
592 break;
593 default:
594 unreachable("Unsupported shader stage");
595 }
596
597 for (int i = 0; i < num_used_outputs; i++) {
598 if (!BITSET_TEST(state->varyings_stored, i)) {
599 v3d_nir_store_output(b, state->varyings_vpm_offset + i,
600 offset_reg, nir_imm_int(b, 0));
601 }
602 }
603 }
604
605 static void
emit_gs_prolog(struct v3d_compile * c,nir_builder * b,nir_function_impl * impl,struct v3d_nir_lower_io_state * state)606 emit_gs_prolog(struct v3d_compile *c, nir_builder *b,
607 nir_function_impl *impl,
608 struct v3d_nir_lower_io_state *state)
609 {
610 nir_block *first = nir_start_block(impl);
611 b->cursor = nir_before_block(first);
612
613 const struct glsl_type *uint_type = glsl_uint_type();
614
615 assert(!state->gs.output_offset_var);
616 state->gs.output_offset_var =
617 nir_local_variable_create(impl, uint_type, "output_offset");
618 nir_store_var(b, state->gs.output_offset_var,
619 nir_imm_int(b, state->gs.output_header_size), 0x1);
620
621 assert(!state->gs.header_offset_var);
622 state->gs.header_offset_var =
623 nir_local_variable_create(impl, uint_type, "header_offset");
624 nir_store_var(b, state->gs.header_offset_var, nir_imm_int(b, 1), 0x1);
625
626 assert(!state->gs.header_var);
627 state->gs.header_var =
628 nir_local_variable_create(impl, uint_type, "header");
629 reset_gs_header(b, state);
630 }
631
632 static void
emit_gs_vpm_output_header_prolog(struct v3d_compile * c,nir_builder * b,struct v3d_nir_lower_io_state * state)633 emit_gs_vpm_output_header_prolog(struct v3d_compile *c, nir_builder *b,
634 struct v3d_nir_lower_io_state *state)
635 {
636 const uint8_t VERTEX_COUNT_OFFSET = 16;
637
638 /* Our GS header has 1 generic header slot (at VPM offset 0) and then
639 * one slot per output vertex after it. This means we don't need to
640 * have a variable just to keep track of the number of vertices we
641 * emitted and instead we can just compute it here from the header
642 * offset variable by removing the one generic header slot that always
643 * goes at the begining of out header.
644 */
645 nir_ssa_def *header_offset =
646 nir_load_var(b, state->gs.header_offset_var);
647 nir_ssa_def *vertex_count =
648 nir_isub(b, header_offset, nir_imm_int(b, 1));
649 nir_ssa_def *header =
650 nir_ior(b, nir_imm_int(b, state->gs.output_header_size),
651 nir_ishl(b, vertex_count,
652 nir_imm_int(b, VERTEX_COUNT_OFFSET)));
653
654 v3d_nir_store_output(b, 0, NULL, header);
655 }
656
657 void
v3d_nir_lower_io(nir_shader * s,struct v3d_compile * c)658 v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c)
659 {
660 struct v3d_nir_lower_io_state state = { 0 };
661
662 /* Set up the layout of the VPM outputs. */
663 switch (s->info.stage) {
664 case MESA_SHADER_VERTEX:
665 v3d_nir_setup_vpm_layout_vs(c, &state);
666 break;
667 case MESA_SHADER_GEOMETRY:
668 v3d_nir_setup_vpm_layout_gs(c, &state);
669 break;
670 case MESA_SHADER_FRAGMENT:
671 case MESA_SHADER_COMPUTE:
672 break;
673 default:
674 unreachable("Unsupported shader stage");
675 }
676
677 nir_foreach_function(function, s) {
678 if (function->impl) {
679 nir_builder b;
680 nir_builder_init(&b, function->impl);
681
682 if (c->s->info.stage == MESA_SHADER_GEOMETRY)
683 emit_gs_prolog(c, &b, function->impl, &state);
684
685 nir_foreach_block(block, function->impl) {
686 nir_foreach_instr_safe(instr, block)
687 v3d_nir_lower_io_instr(c, &b, instr,
688 &state);
689 }
690
691 nir_block *last = nir_impl_last_block(function->impl);
692 b.cursor = nir_after_block(last);
693 if (s->info.stage == MESA_SHADER_VERTEX) {
694 v3d_nir_emit_ff_vpm_outputs(c, &b, &state);
695 } else if (s->info.stage == MESA_SHADER_GEOMETRY) {
696 emit_gs_vpm_output_header_prolog(c, &b, &state);
697 }
698
699 nir_metadata_preserve(function->impl,
700 nir_metadata_block_index |
701 nir_metadata_dominance);
702 }
703 }
704
705 if (s->info.stage == MESA_SHADER_VERTEX ||
706 s->info.stage == MESA_SHADER_GEOMETRY) {
707 v3d_nir_lower_io_update_output_var_base(c, &state);
708 }
709 }
710