• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "ac_nir.h"
8 #include "ac_nir_helpers.h"
9 #include "sid.h"
10 
11 #include "nir_builder.h"
12 #include "nir_xfb_info.h"
13 
14 void
ac_nir_store_var_components(nir_builder * b,nir_variable * var,nir_def * value,unsigned component,unsigned writemask)15 ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_def *value,
16                             unsigned component, unsigned writemask)
17 {
18    /* component store */
19    if (value->num_components != 4) {
20       nir_def *undef = nir_undef(b, 1, value->bit_size);
21 
22       /* add undef component before and after value to form a vec4 */
23       nir_def *comp[4];
24       for (int i = 0; i < 4; i++) {
25          comp[i] = (i >= component && i < component + value->num_components) ?
26             nir_channel(b, value, i - component) : undef;
27       }
28 
29       value = nir_vec(b, comp, 4);
30       writemask <<= component;
31    } else {
32       /* if num_component==4, there should be no component offset */
33       assert(component == 0);
34    }
35 
36    nir_store_var(b, var, value, writemask);
37 }
38 
39 unsigned
ac_nir_map_io_location(unsigned location,uint64_t mask,ac_nir_map_io_driver_location map_io)40 ac_nir_map_io_location(unsigned location,
41                        uint64_t mask,
42                        ac_nir_map_io_driver_location map_io)
43 {
44    /* Unlinked shaders:
45     * We are unaware of the inputs of the next stage while lowering outputs.
46     * The driver needs to pass a callback to map varyings to a fixed location.
47     */
48    if (map_io)
49       return map_io(location);
50 
51    /* Linked shaders:
52     * Take advantage of knowledge of the inputs of the next stage when lowering outputs.
53     * Map varyings to a prefix sum of the IO mask to save space in LDS or VRAM.
54     */
55    assert(mask & BITFIELD64_BIT(location));
56    return util_bitcount64(mask & BITFIELD64_MASK(location));
57 }
58 
59 /**
60  * This function takes an I/O intrinsic like load/store_input,
61  * and emits a sequence that calculates the full offset of that instruction,
62  * including a stride to the base and component offsets.
63  */
64 nir_def *
ac_nir_calc_io_off(nir_builder * b,nir_intrinsic_instr * intrin,nir_def * base_stride,unsigned component_stride,unsigned mapped_driver_location)65 ac_nir_calc_io_off(nir_builder *b,
66                              nir_intrinsic_instr *intrin,
67                              nir_def *base_stride,
68                              unsigned component_stride,
69                              unsigned mapped_driver_location)
70 {
71    /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */
72    nir_def *base_op = nir_imul_imm(b, base_stride, mapped_driver_location);
73 
74    /* offset should be interpreted in relation to the base,
75     * so the instruction effectively reads/writes another input/output
76     * when it has an offset
77     */
78    nir_def *offset_op = nir_imul(b, base_stride,
79                                  nir_get_io_offset_src(intrin)->ssa);
80 
81    /* component is in bytes */
82    unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
83 
84    return nir_iadd_imm_nuw(b, nir_iadd_nuw(b, base_op, offset_op), const_op);
85 }
86 
87 /* Process the given store_output intrinsic and process its information.
88  * Meant to be used for VS/TES/GS when they are the last pre-rasterization stage.
89  *
90  * Assumptions:
91  * - We called nir_lower_io_to_temporaries on the shader
92  * - 64-bit outputs are lowered
93  * - no indirect indexing is present
94  */
ac_nir_gather_prerast_store_output_info(nir_builder * b,nir_intrinsic_instr * intrin,ac_nir_prerast_out * out)95 void ac_nir_gather_prerast_store_output_info(nir_builder *b, nir_intrinsic_instr *intrin, ac_nir_prerast_out *out)
96 {
97    assert(intrin->intrinsic == nir_intrinsic_store_output);
98    assert(nir_src_is_const(intrin->src[1]) && !nir_src_as_uint(intrin->src[1]));
99 
100    const nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
101    const unsigned slot = io_sem.location;
102 
103    nir_def *store_val = intrin->src[0].ssa;
104    assert(store_val->bit_size == 16 || store_val->bit_size == 32);
105 
106    nir_def **output;
107    nir_alu_type *type;
108    ac_nir_prerast_per_output_info *info;
109 
110    if (slot >= VARYING_SLOT_VAR0_16BIT) {
111       const unsigned index = slot - VARYING_SLOT_VAR0_16BIT;
112 
113       if (io_sem.high_16bits) {
114          output = out->outputs_16bit_hi[index];
115          type = out->types_16bit_hi[index];
116          info = &out->infos_16bit_hi[index];
117       } else {
118          output = out->outputs_16bit_lo[index];
119          type = out->types_16bit_lo[index];
120          info = &out->infos_16bit_lo[index];
121       }
122    } else {
123       output = out->outputs[slot];
124       type = out->types[slot];
125       info = &out->infos[slot];
126    }
127 
128    unsigned component_offset = nir_intrinsic_component(intrin);
129    unsigned write_mask = nir_intrinsic_write_mask(intrin);
130    nir_alu_type src_type = nir_intrinsic_src_type(intrin);
131    assert(nir_alu_type_get_type_size(src_type) == store_val->bit_size);
132 
133    b->cursor = nir_before_instr(&intrin->instr);
134 
135    /* 16-bit output stored in a normal varying slot that isn't a dedicated 16-bit slot. */
136    const bool non_dedicated_16bit = slot < VARYING_SLOT_VAR0_16BIT && store_val->bit_size == 16;
137 
138    u_foreach_bit (i, write_mask) {
139       const unsigned stream = (io_sem.gs_streams >> (i * 2)) & 0x3;
140 
141       if (b->shader->info.stage == MESA_SHADER_GEOMETRY) {
142          if (!(b->shader->info.gs.active_stream_mask & (1 << stream)))
143             continue;
144       }
145 
146       const unsigned c = component_offset + i;
147 
148       /* The same output component should always belong to the same stream. */
149       assert(!(info->components_mask & (1 << c)) ||
150              ((info->stream >> (c * 2)) & 3) == stream);
151 
152       /* Components of the same output slot may belong to different streams. */
153       info->stream |= stream << (c * 2);
154       info->components_mask |= BITFIELD_BIT(c);
155 
156       if (!io_sem.no_varying)
157          info->as_varying_mask |= BITFIELD_BIT(c);
158       if (!io_sem.no_sysval_output)
159          info->as_sysval_mask |= BITFIELD_BIT(c);
160 
161       nir_def *store_component = nir_channel(b, intrin->src[0].ssa, i);
162 
163       if (non_dedicated_16bit) {
164          if (io_sem.high_16bits) {
165             nir_def *lo = output[c] ? nir_unpack_32_2x16_split_x(b, output[c]) : nir_imm_intN_t(b, 0, 16);
166             output[c] = nir_pack_32_2x16_split(b, lo, store_component);
167          } else {
168             nir_def *hi = output[c] ? nir_unpack_32_2x16_split_y(b, output[c]) : nir_imm_intN_t(b, 0, 16);
169             output[c] = nir_pack_32_2x16_split(b, store_component, hi);
170          }
171          type[c] = nir_type_uint32;
172       } else {
173          output[c] = store_component;
174          type[c] = src_type;
175       }
176    }
177 }
178 
179 static nir_intrinsic_instr *
export(nir_builder * b,nir_def * val,nir_def * row,unsigned base,unsigned flags,unsigned write_mask)180 export(nir_builder *b, nir_def *val, nir_def *row, unsigned base, unsigned flags,
181        unsigned write_mask)
182 {
183    if (row) {
184       return nir_export_row_amd(b, val, row, .base = base, .flags = flags,
185                                 .write_mask = write_mask);
186    } else {
187       return nir_export_amd(b, val, .base = base, .flags = flags,
188                             .write_mask = write_mask);
189    }
190 }
191 
192 void
ac_nir_export_primitive(nir_builder * b,nir_def * prim,nir_def * row)193 ac_nir_export_primitive(nir_builder *b, nir_def *prim, nir_def *row)
194 {
195    unsigned write_mask = BITFIELD_MASK(prim->num_components);
196 
197    export(b, nir_pad_vec4(b, prim), row, V_008DFC_SQ_EXP_PRIM, AC_EXP_FLAG_DONE,
198           write_mask);
199 }
200 
201 static nir_def *
get_export_output(nir_builder * b,nir_def ** output)202 get_export_output(nir_builder *b, nir_def **output)
203 {
204    nir_def *vec[4];
205    for (int i = 0; i < 4; i++) {
206       if (output[i])
207          vec[i] = nir_u2uN(b, output[i], 32);
208       else
209          vec[i] = nir_undef(b, 1, 32);
210    }
211 
212    return nir_vec(b, vec, 4);
213 }
214 
215 static nir_def *
get_pos0_output(nir_builder * b,nir_def ** output)216 get_pos0_output(nir_builder *b, nir_def **output)
217 {
218    /* Some applications don't write position but expect (0, 0, 0, 1)
219     * so use that value instead of undef when it isn't written.
220     */
221    nir_def *vec[4] = {0};
222 
223    for (int i = 0; i < 4; i++) {
224       if (output[i])
225          vec[i] = nir_u2u32(b, output[i]);
226      else
227          vec[i] = nir_imm_float(b, i == 3 ? 1.0 : 0.0);
228    }
229 
230    return nir_vec(b, vec, 4);
231 }
232 
233 void
ac_nir_export_position(nir_builder * b,enum amd_gfx_level gfx_level,uint32_t clip_cull_mask,bool no_param_export,bool force_vrs,bool done,uint64_t outputs_written,ac_nir_prerast_out * out,nir_def * row)234 ac_nir_export_position(nir_builder *b,
235                        enum amd_gfx_level gfx_level,
236                        uint32_t clip_cull_mask,
237                        bool no_param_export,
238                        bool force_vrs,
239                        bool done,
240                        uint64_t outputs_written,
241                        ac_nir_prerast_out *out,
242                        nir_def *row)
243 {
244    nir_intrinsic_instr *exp[4];
245    unsigned exp_num = 0;
246    unsigned exp_pos_offset = 0;
247 
248    if (outputs_written & VARYING_BIT_POS) {
249       /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
250       * Setting valid_mask=1 prevents it and has no other effect.
251       */
252       const unsigned pos_flags = gfx_level == GFX10 ? AC_EXP_FLAG_VALID_MASK : 0;
253       nir_def *pos = get_pos0_output(b, out->outputs[VARYING_SLOT_POS]);
254 
255       exp[exp_num] = export(b, pos, row, V_008DFC_SQ_EXP_POS + exp_num, pos_flags, 0xf);
256       exp_num++;
257    } else {
258       exp_pos_offset++;
259    }
260 
261    uint64_t mask =
262       VARYING_BIT_PSIZ |
263       VARYING_BIT_EDGE |
264       VARYING_BIT_LAYER |
265       VARYING_BIT_VIEWPORT |
266       VARYING_BIT_PRIMITIVE_SHADING_RATE;
267 
268    /* clear output mask if no one written */
269    if (!out->outputs[VARYING_SLOT_PSIZ][0] || !out->infos[VARYING_SLOT_PSIZ].as_sysval_mask)
270       outputs_written &= ~VARYING_BIT_PSIZ;
271    if (!out->outputs[VARYING_SLOT_EDGE][0] || !out->infos[VARYING_SLOT_EDGE].as_sysval_mask)
272       outputs_written &= ~VARYING_BIT_EDGE;
273    if (!out->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0] || !out->infos[VARYING_SLOT_PRIMITIVE_SHADING_RATE].as_sysval_mask)
274       outputs_written &= ~VARYING_BIT_PRIMITIVE_SHADING_RATE;
275    if (!out->outputs[VARYING_SLOT_LAYER][0] || !out->infos[VARYING_SLOT_LAYER].as_sysval_mask)
276       outputs_written &= ~VARYING_BIT_LAYER;
277    if (!out->outputs[VARYING_SLOT_VIEWPORT][0] || !out->infos[VARYING_SLOT_VIEWPORT].as_sysval_mask)
278       outputs_written &= ~VARYING_BIT_VIEWPORT;
279 
280    if ((outputs_written & mask) || force_vrs) {
281       nir_def *zero = nir_imm_float(b, 0);
282       nir_def *vec[4] = { zero, zero, zero, zero };
283       unsigned write_mask = 0;
284 
285       if (outputs_written & VARYING_BIT_PSIZ) {
286          vec[0] = out->outputs[VARYING_SLOT_PSIZ][0];
287          write_mask |= BITFIELD_BIT(0);
288       }
289 
290       if (outputs_written & VARYING_BIT_EDGE) {
291          vec[1] = nir_umin(b, out->outputs[VARYING_SLOT_EDGE][0], nir_imm_int(b, 1));
292          write_mask |= BITFIELD_BIT(1);
293       }
294 
295       nir_def *rates = NULL;
296       if (outputs_written & VARYING_BIT_PRIMITIVE_SHADING_RATE) {
297          rates = out->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0];
298       } else if (force_vrs) {
299          /* If Pos.W != 1 (typical for non-GUI elements), use coarse shading. */
300          nir_def *pos_w = out->outputs[VARYING_SLOT_POS][3];
301          pos_w = pos_w ? nir_u2u32(b, pos_w) : nir_imm_float(b, 1.0);
302          nir_def *cond = nir_fneu_imm(b, pos_w, 1);
303          rates = nir_bcsel(b, cond, nir_load_force_vrs_rates_amd(b), nir_imm_int(b, 0));
304       }
305 
306       if (rates) {
307          vec[1] = nir_ior(b, vec[1], rates);
308          write_mask |= BITFIELD_BIT(1);
309       }
310 
311       if (outputs_written & VARYING_BIT_LAYER) {
312          vec[2] = out->outputs[VARYING_SLOT_LAYER][0];
313          write_mask |= BITFIELD_BIT(2);
314       }
315 
316       if (outputs_written & VARYING_BIT_VIEWPORT) {
317          if (gfx_level >= GFX9) {
318             /* GFX9 has the layer in [10:0] and the viewport index in [19:16]. */
319             nir_def *v = nir_ishl_imm(b, out->outputs[VARYING_SLOT_VIEWPORT][0], 16);
320             vec[2] = nir_ior(b, vec[2], v);
321             write_mask |= BITFIELD_BIT(2);
322          } else {
323             vec[3] = out->outputs[VARYING_SLOT_VIEWPORT][0];
324             write_mask |= BITFIELD_BIT(3);
325          }
326       }
327 
328       exp[exp_num] = export(b, nir_vec(b, vec, 4), row,
329                             V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset,
330                             0, write_mask);
331       exp_num++;
332    }
333 
334    for (int i = 0; i < 2; i++) {
335       if ((outputs_written & (VARYING_BIT_CLIP_DIST0 << i)) &&
336           (clip_cull_mask & BITFIELD_RANGE(i * 4, 4))) {
337          exp[exp_num] = export(
338             b, get_export_output(b, out->outputs[VARYING_SLOT_CLIP_DIST0 + i]), row,
339             V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0,
340             (clip_cull_mask >> (i * 4)) & 0xf);
341          exp_num++;
342       }
343    }
344 
345    if (outputs_written & VARYING_BIT_CLIP_VERTEX) {
346       nir_def *vtx = get_export_output(b, out->outputs[VARYING_SLOT_CLIP_VERTEX]);
347 
348       /* Clip distance for clip vertex to each user clip plane. */
349       nir_def *clip_dist[8] = {0};
350       u_foreach_bit (i, clip_cull_mask) {
351          nir_def *ucp = nir_load_user_clip_plane(b, .ucp_id = i);
352          clip_dist[i] = nir_fdot4(b, vtx, ucp);
353       }
354 
355       for (int i = 0; i < 2; i++) {
356          if (clip_cull_mask & BITFIELD_RANGE(i * 4, 4)) {
357             exp[exp_num] = export(
358                b, get_export_output(b, clip_dist + i * 4), row,
359                V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0,
360                (clip_cull_mask >> (i * 4)) & 0xf);
361             exp_num++;
362          }
363       }
364    }
365 
366    if (!exp_num)
367       return;
368 
369    nir_intrinsic_instr *final_exp = exp[exp_num - 1];
370 
371    if (done) {
372       /* Specify that this is the last export */
373       const unsigned final_exp_flags = nir_intrinsic_flags(final_exp);
374       nir_intrinsic_set_flags(final_exp, final_exp_flags | AC_EXP_FLAG_DONE);
375    }
376 
377    /* If a shader has no param exports, rasterization can start before
378     * the shader finishes and thus memory stores might not finish before
379     * the pixel shader starts.
380     */
381    if (gfx_level >= GFX10 && no_param_export && b->shader->info.writes_memory) {
382       nir_cursor cursor = b->cursor;
383       b->cursor = nir_before_instr(&final_exp->instr);
384       nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
385                                 nir_var_mem_ssbo | nir_var_mem_global | nir_var_image);
386       b->cursor = cursor;
387    }
388 }
389 
390 void
ac_nir_export_parameters(nir_builder * b,const uint8_t * param_offsets,uint64_t outputs_written,uint16_t outputs_written_16bit,ac_nir_prerast_out * out)391 ac_nir_export_parameters(nir_builder *b,
392                          const uint8_t *param_offsets,
393                          uint64_t outputs_written,
394                          uint16_t outputs_written_16bit,
395                          ac_nir_prerast_out *out)
396 {
397    uint32_t exported_params = 0;
398 
399    u_foreach_bit64 (slot, outputs_written) {
400       unsigned offset = param_offsets[slot];
401       if (offset > AC_EXP_PARAM_OFFSET_31)
402          continue;
403 
404       uint32_t write_mask = 0;
405       for (int i = 0; i < 4; i++) {
406          if (out->outputs[slot][i])
407             write_mask |= (out->infos[slot].as_varying_mask & BITFIELD_BIT(i));
408       }
409 
410       /* no one set this output slot, we can skip the param export */
411       if (!write_mask)
412          continue;
413 
414       /* Since param_offsets[] can map multiple varying slots to the same
415        * param export index (that's radeonsi-specific behavior), we need to
416        * do this so as not to emit duplicated exports.
417        */
418       if (exported_params & BITFIELD_BIT(offset))
419          continue;
420 
421       nir_export_amd(
422          b, get_export_output(b, out->outputs[slot]),
423          .base = V_008DFC_SQ_EXP_PARAM + offset,
424          .write_mask = write_mask);
425       exported_params |= BITFIELD_BIT(offset);
426    }
427 
428    u_foreach_bit (slot, outputs_written_16bit) {
429       unsigned offset = param_offsets[VARYING_SLOT_VAR0_16BIT + slot];
430       if (offset > AC_EXP_PARAM_OFFSET_31)
431          continue;
432 
433       uint32_t write_mask = 0;
434       for (int i = 0; i < 4; i++) {
435          if (out->outputs_16bit_lo[slot][i] || out->outputs_16bit_hi[slot][i])
436             write_mask |= BITFIELD_BIT(i);
437       }
438 
439       /* no one set this output slot, we can skip the param export */
440       if (!write_mask)
441          continue;
442 
443       /* Since param_offsets[] can map multiple varying slots to the same
444        * param export index (that's radeonsi-specific behavior), we need to
445        * do this so as not to emit duplicated exports.
446        */
447       if (exported_params & BITFIELD_BIT(offset))
448          continue;
449 
450       nir_def *vec[4];
451       nir_def *undef = nir_undef(b, 1, 16);
452       for (int i = 0; i < 4; i++) {
453          nir_def *lo = out->outputs_16bit_lo[slot][i] ? out->outputs_16bit_lo[slot][i] : undef;
454          nir_def *hi = out->outputs_16bit_hi[slot][i] ? out->outputs_16bit_hi[slot][i] : undef;
455          vec[i] = nir_pack_32_2x16_split(b, lo, hi);
456       }
457 
458       nir_export_amd(
459          b, nir_vec(b, vec, 4),
460          .base = V_008DFC_SQ_EXP_PARAM + offset,
461          .write_mask = write_mask);
462       exported_params |= BITFIELD_BIT(offset);
463    }
464 }
465 
466 void
ac_nir_store_parameters_to_attr_ring(nir_builder * b,const uint8_t * param_offsets,const uint64_t outputs_written,const uint16_t outputs_written_16bit,ac_nir_prerast_out * out,nir_def * export_tid,nir_def * num_export_threads)467 ac_nir_store_parameters_to_attr_ring(nir_builder *b,
468                                      const uint8_t *param_offsets,
469                                      const uint64_t outputs_written,
470                                      const uint16_t outputs_written_16bit,
471                                      ac_nir_prerast_out *out,
472                                      nir_def *export_tid, nir_def *num_export_threads)
473 {
474    nir_def *attr_rsrc = nir_load_ring_attr_amd(b);
475 
476    /* We should always store full vec4s in groups of 8 lanes for the best performance even if
477     * some of them are garbage or have unused components, so align the number of export threads
478     * to 8.
479     */
480    num_export_threads = nir_iand_imm(b, nir_iadd_imm(b, num_export_threads, 7), ~7);
481 
482    if (!export_tid)
483       nir_push_if(b, nir_is_subgroup_invocation_lt_amd(b, num_export_threads));
484    else
485       nir_push_if(b, nir_ult(b, export_tid, num_export_threads));
486 
487    nir_def *attr_offset = nir_load_ring_attr_offset_amd(b);
488    nir_def *vindex = nir_load_local_invocation_index(b);
489    nir_def *voffset = nir_imm_int(b, 0);
490    nir_def *undef = nir_undef(b, 1, 32);
491 
492    uint32_t exported_params = 0;
493 
494    u_foreach_bit64 (slot, outputs_written) {
495       const unsigned offset = param_offsets[slot];
496 
497       if (offset > AC_EXP_PARAM_OFFSET_31)
498          continue;
499 
500       if (!out->infos[slot].as_varying_mask)
501          continue;
502 
503       if (exported_params & BITFIELD_BIT(offset))
504          continue;
505 
506       nir_def *comp[4];
507       for (unsigned j = 0; j < 4; j++) {
508          comp[j] = out->outputs[slot][j] ? out->outputs[slot][j] : undef;
509       }
510 
511       nir_store_buffer_amd(b, nir_vec(b, comp, 4), attr_rsrc, voffset, attr_offset, vindex,
512                            .base = offset * 16,
513                            .memory_modes = nir_var_shader_out,
514                            .access = ACCESS_COHERENT | ACCESS_IS_SWIZZLED_AMD);
515 
516       exported_params |= BITFIELD_BIT(offset);
517    }
518 
519    u_foreach_bit (i, outputs_written_16bit) {
520       const unsigned offset = param_offsets[VARYING_SLOT_VAR0_16BIT + i];
521 
522       if (offset > AC_EXP_PARAM_OFFSET_31)
523          continue;
524 
525       if (!out->infos_16bit_lo[i].as_varying_mask &&
526           !out->infos_16bit_hi[i].as_varying_mask)
527          continue;
528 
529       if (exported_params & BITFIELD_BIT(offset))
530          continue;
531 
532       nir_def *comp[4];
533       for (unsigned j = 0; j < 4; j++) {
534          nir_def *lo = out->outputs_16bit_lo[i][j] ? out->outputs_16bit_lo[i][j] : undef;
535          nir_def *hi = out->outputs_16bit_hi[i][j] ? out->outputs_16bit_hi[i][j] : undef;
536          comp[j] = nir_pack_32_2x16_split(b, lo, hi);
537       }
538 
539       nir_store_buffer_amd(b, nir_vec(b, comp, 4), attr_rsrc, voffset, attr_offset, vindex,
540                            .base = offset * 16,
541                            .memory_modes = nir_var_shader_out,
542                            .access = ACCESS_COHERENT | ACCESS_IS_SWIZZLED_AMD);
543 
544       exported_params |= BITFIELD_BIT(offset);
545    }
546 
547    nir_pop_if(b, NULL);
548 }
549 
550 static int
sort_xfb(const void * _a,const void * _b)551 sort_xfb(const void *_a, const void *_b)
552 {
553    const nir_xfb_output_info *a = (const nir_xfb_output_info *)_a;
554    const nir_xfb_output_info *b = (const nir_xfb_output_info *)_b;
555 
556    if (a->buffer != b->buffer)
557       return a->buffer > b->buffer ? 1 : -1;
558 
559    assert(a->offset != b->offset);
560    return a->offset > b->offset ? 1 : -1;
561 }
562 
563 /* Return XFB info sorted by buffer and offset, so that we can generate vec4
564  * stores by iterating over outputs only once.
565  */
566 nir_xfb_info *
ac_nir_get_sorted_xfb_info(const nir_shader * nir)567 ac_nir_get_sorted_xfb_info(const nir_shader *nir)
568 {
569    if (!nir->xfb_info)
570       return NULL;
571 
572    unsigned xfb_info_size = nir_xfb_info_size(nir->xfb_info->output_count);
573    nir_xfb_info *info = rzalloc_size(nir, xfb_info_size);
574 
575    memcpy(info, nir->xfb_info, xfb_info_size);
576    qsort(info->outputs, info->output_count, sizeof(info->outputs[0]), sort_xfb);
577    return info;
578 }
579 
580 static nir_def **
get_output_and_type(ac_nir_prerast_out * out,unsigned slot,bool high_16bits,nir_alu_type ** types)581 get_output_and_type(ac_nir_prerast_out *out, unsigned slot, bool high_16bits,
582                     nir_alu_type **types)
583 {
584    nir_def **data;
585    nir_alu_type *type;
586 
587    /* Only VARYING_SLOT_VARn_16BIT slots need output type to convert 16bit output
588     * to 32bit. Vulkan is not allowed to streamout output less than 32bit.
589     */
590    if (slot < VARYING_SLOT_VAR0_16BIT) {
591       data = out->outputs[slot];
592       type = NULL;
593    } else {
594       unsigned index = slot - VARYING_SLOT_VAR0_16BIT;
595 
596       if (high_16bits) {
597          data = out->outputs_16bit_hi[index];
598          type = out->types_16bit_hi[index];
599       } else {
600          data = out->outputs[index];
601          type = out->types_16bit_lo[index];
602       }
603    }
604 
605    *types = type;
606    return data;
607 }
608 
609 void
ac_nir_emit_legacy_streamout(nir_builder * b,unsigned stream,nir_xfb_info * info,ac_nir_prerast_out * out)610 ac_nir_emit_legacy_streamout(nir_builder *b, unsigned stream, nir_xfb_info *info, ac_nir_prerast_out *out)
611 {
612    nir_def *so_vtx_count = nir_ubfe_imm(b, nir_load_streamout_config_amd(b), 16, 7);
613    nir_def *tid = nir_load_subgroup_invocation(b);
614 
615    nir_push_if(b, nir_ilt(b, tid, so_vtx_count));
616    nir_def *so_write_index = nir_load_streamout_write_index_amd(b);
617 
618    nir_def *so_buffers[NIR_MAX_XFB_BUFFERS];
619    nir_def *so_write_offset[NIR_MAX_XFB_BUFFERS];
620    u_foreach_bit(i, info->buffers_written) {
621       so_buffers[i] = nir_load_streamout_buffer_amd(b, i);
622 
623       unsigned stride = info->buffers[i].stride;
624       nir_def *offset = nir_load_streamout_offset_amd(b, i);
625       offset = nir_iadd(b, nir_imul_imm(b, nir_iadd(b, so_write_index, tid), stride),
626                         nir_imul_imm(b, offset, 4));
627       so_write_offset[i] = offset;
628    }
629 
630    nir_def *zero = nir_imm_int(b, 0);
631    unsigned num_values = 0, store_offset = 0, store_buffer_index = 0;
632    nir_def *values[4];
633 
634    for (unsigned i = 0; i < info->output_count; i++) {
635       const nir_xfb_output_info *output = info->outputs + i;
636       if (stream != info->buffer_to_stream[output->buffer])
637          continue;
638 
639       nir_alu_type *output_type;
640       nir_def **output_data =
641          get_output_and_type(out, output->location, output->high_16bits, &output_type);
642 
643       u_foreach_bit(out_comp, output->component_mask) {
644          if (!output_data[out_comp])
645             continue;
646 
647          nir_def *data = output_data[out_comp];
648 
649          if (data->bit_size < 32) {
650             /* Convert the 16-bit output to 32 bits. */
651             assert(output_type);
652 
653             nir_alu_type base_type = nir_alu_type_get_base_type(output_type[out_comp]);
654             data = nir_convert_to_bit_size(b, data, base_type, 32);
655          }
656 
657          assert(out_comp >= output->component_offset);
658          const unsigned store_comp = out_comp - output->component_offset;
659          const unsigned store_comp_offset = output->offset + store_comp * 4;
660          const bool has_hole = store_offset + num_values * 4 != store_comp_offset;
661 
662          /* Flush the gathered components to memory as a vec4 store or less if there is a hole. */
663          if (num_values && (num_values == 4 || store_buffer_index != output->buffer || has_hole)) {
664             nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffers[store_buffer_index],
665                                  so_write_offset[store_buffer_index], zero, zero,
666                                  .base = store_offset,
667                                  .access = ACCESS_NON_TEMPORAL);
668             num_values = 0;
669          }
670 
671          /* Initialize the buffer index and offset if we are beginning a new vec4 store. */
672          if (num_values == 0) {
673             store_buffer_index = output->buffer;
674             store_offset = store_comp_offset;
675          }
676 
677          values[num_values++] = data;
678       }
679    }
680 
681    if (num_values) {
682       /* Flush the remaining components to memory (as an up to vec4 store) */
683       nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffers[store_buffer_index],
684                            so_write_offset[store_buffer_index], zero, zero,
685                            .base = store_offset,
686                            .access = ACCESS_NON_TEMPORAL);
687    }
688 
689    nir_pop_if(b, NULL);
690 }
691 
692 static nir_def *
ac_nir_accum_ior(nir_builder * b,nir_def * accum_result,nir_def * new_term)693 ac_nir_accum_ior(nir_builder *b, nir_def *accum_result, nir_def *new_term)
694 {
695    return accum_result ? nir_ior(b, accum_result, new_term) : new_term;
696 }
697 
698 bool
ac_nir_gs_shader_query(nir_builder * b,bool has_gen_prim_query,bool has_gs_invocations_query,bool has_gs_primitives_query,unsigned num_vertices_per_primitive,unsigned wave_size,nir_def * vertex_count[4],nir_def * primitive_count[4])699 ac_nir_gs_shader_query(nir_builder *b,
700                        bool has_gen_prim_query,
701                        bool has_gs_invocations_query,
702                        bool has_gs_primitives_query,
703                        unsigned num_vertices_per_primitive,
704                        unsigned wave_size,
705                        nir_def *vertex_count[4],
706                        nir_def *primitive_count[4])
707 {
708    nir_def *pipeline_query_enabled = NULL;
709    nir_def *prim_gen_query_enabled = NULL;
710    nir_def *any_query_enabled = NULL;
711 
712    if (has_gen_prim_query) {
713       prim_gen_query_enabled = nir_load_prim_gen_query_enabled_amd(b);
714       any_query_enabled = ac_nir_accum_ior(b, any_query_enabled, prim_gen_query_enabled);
715    }
716 
717    if (has_gs_invocations_query || has_gs_primitives_query) {
718       pipeline_query_enabled = nir_load_pipeline_stat_query_enabled_amd(b);
719       any_query_enabled = ac_nir_accum_ior(b, any_query_enabled, pipeline_query_enabled);
720    }
721 
722    if (!any_query_enabled) {
723       /* has no query */
724       return false;
725    }
726 
727    nir_if *if_shader_query = nir_push_if(b, any_query_enabled);
728 
729    nir_def *active_threads_mask = nir_ballot(b, 1, wave_size, nir_imm_true(b));
730    nir_def *num_active_threads = nir_bit_count(b, active_threads_mask);
731 
732    /* Calculate the "real" number of emitted primitives from the emitted GS vertices and primitives.
733     * GS emits points, line strips or triangle strips.
734     * Real primitives are points, lines or triangles.
735     */
736    nir_def *num_prims_in_wave[4] = {0};
737    u_foreach_bit (i, b->shader->info.gs.active_stream_mask) {
738       assert(vertex_count[i] && primitive_count[i]);
739 
740       nir_scalar vtx_cnt = nir_get_scalar(vertex_count[i], 0);
741       nir_scalar prm_cnt = nir_get_scalar(primitive_count[i], 0);
742 
743       if (nir_scalar_is_const(vtx_cnt) && nir_scalar_is_const(prm_cnt)) {
744          unsigned gs_vtx_cnt = nir_scalar_as_uint(vtx_cnt);
745          unsigned gs_prm_cnt = nir_scalar_as_uint(prm_cnt);
746          unsigned total_prm_cnt = gs_vtx_cnt - gs_prm_cnt * (num_vertices_per_primitive - 1u);
747          if (total_prm_cnt == 0)
748             continue;
749 
750          num_prims_in_wave[i] = nir_imul_imm(b, num_active_threads, total_prm_cnt);
751       } else {
752          nir_def *gs_vtx_cnt = vtx_cnt.def;
753          nir_def *gs_prm_cnt = prm_cnt.def;
754          if (num_vertices_per_primitive > 1)
755             gs_prm_cnt = nir_iadd(b, nir_imul_imm(b, gs_prm_cnt, -1u * (num_vertices_per_primitive - 1)), gs_vtx_cnt);
756          num_prims_in_wave[i] = nir_reduce(b, gs_prm_cnt, .reduction_op = nir_op_iadd);
757       }
758    }
759 
760    /* Store the query result to query result using an atomic add. */
761    nir_if *if_first_lane = nir_push_if(b, nir_elect(b, 1));
762    {
763       if (has_gs_invocations_query || has_gs_primitives_query) {
764          nir_if *if_pipeline_query = nir_push_if(b, pipeline_query_enabled);
765          {
766             nir_def *count = NULL;
767 
768             /* Add all streams' number to the same counter. */
769             for (int i = 0; i < 4; i++) {
770                if (num_prims_in_wave[i]) {
771                   if (count)
772                      count = nir_iadd(b, count, num_prims_in_wave[i]);
773                   else
774                      count = num_prims_in_wave[i];
775                }
776             }
777 
778             if (has_gs_primitives_query && count)
779                nir_atomic_add_gs_emit_prim_count_amd(b, count);
780 
781             if (has_gs_invocations_query)
782                nir_atomic_add_shader_invocation_count_amd(b, num_active_threads);
783          }
784          nir_pop_if(b, if_pipeline_query);
785       }
786 
787       if (has_gen_prim_query) {
788          nir_if *if_prim_gen_query = nir_push_if(b, prim_gen_query_enabled);
789          {
790             /* Add to the counter for this stream. */
791             for (int i = 0; i < 4; i++) {
792                if (num_prims_in_wave[i])
793                   nir_atomic_add_gen_prim_count_amd(b, num_prims_in_wave[i], .stream_id = i);
794             }
795          }
796          nir_pop_if(b, if_prim_gen_query);
797       }
798    }
799    nir_pop_if(b, if_first_lane);
800 
801    nir_pop_if(b, if_shader_query);
802    return true;
803 }
804 
805 nir_def *
ac_nir_pack_ngg_prim_exp_arg(nir_builder * b,unsigned num_vertices_per_primitives,nir_def * vertex_indices[3],nir_def * is_null_prim,enum amd_gfx_level gfx_level)806 ac_nir_pack_ngg_prim_exp_arg(nir_builder *b, unsigned num_vertices_per_primitives,
807                              nir_def *vertex_indices[3], nir_def *is_null_prim,
808                              enum amd_gfx_level gfx_level)
809 {
810    nir_def *arg = nir_load_initial_edgeflags_amd(b);
811 
812    for (unsigned i = 0; i < num_vertices_per_primitives; ++i) {
813       assert(vertex_indices[i]);
814       arg = nir_ior(b, arg, nir_ishl_imm(b, vertex_indices[i],
815                                          (gfx_level >= GFX12 ? 9u : 10u) * i));
816    }
817 
818    if (is_null_prim) {
819       if (is_null_prim->bit_size == 1)
820          is_null_prim = nir_b2i32(b, is_null_prim);
821       assert(is_null_prim->bit_size == 32);
822       arg = nir_ior(b, arg, nir_ishl_imm(b, is_null_prim, 31u));
823    }
824 
825    return arg;
826 }
827