• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Bas Nieuwenhuizen
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "ac_nir.h"
8 #include "sid.h"
9 #include "nir_builder.h"
10 #include "nir_xfb_info.h"
11 
12 /* Load argument with index start from arg plus relative_index. */
13 nir_def *
ac_nir_load_arg_at_offset(nir_builder * b,const struct ac_shader_args * ac_args,struct ac_arg arg,unsigned relative_index)14 ac_nir_load_arg_at_offset(nir_builder *b, const struct ac_shader_args *ac_args,
15                           struct ac_arg arg, unsigned relative_index)
16 {
17    unsigned arg_index = arg.arg_index + relative_index;
18    unsigned num_components = ac_args->args[arg_index].size;
19 
20    if (ac_args->args[arg_index].file == AC_ARG_SGPR)
21       return nir_load_scalar_arg_amd(b, num_components, .base = arg_index);
22    else
23       return nir_load_vector_arg_amd(b, num_components, .base = arg_index);
24 }
25 
26 void
ac_nir_store_arg(nir_builder * b,const struct ac_shader_args * ac_args,struct ac_arg arg,nir_def * val)27 ac_nir_store_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
28                  nir_def *val)
29 {
30    assert(nir_cursor_current_block(b->cursor)->cf_node.parent->type == nir_cf_node_function);
31 
32    if (ac_args->args[arg.arg_index].file == AC_ARG_SGPR)
33       nir_store_scalar_arg_amd(b, val, .base = arg.arg_index);
34    else
35       nir_store_vector_arg_amd(b, val, .base = arg.arg_index);
36 }
37 
38 nir_def *
ac_nir_unpack_arg(nir_builder * b,const struct ac_shader_args * ac_args,struct ac_arg arg,unsigned rshift,unsigned bitwidth)39 ac_nir_unpack_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
40                   unsigned rshift, unsigned bitwidth)
41 {
42    nir_def *value = ac_nir_load_arg(b, ac_args, arg);
43    if (rshift == 0 && bitwidth == 32)
44       return value;
45    else if (rshift == 0)
46       return nir_iand_imm(b, value, BITFIELD_MASK(bitwidth));
47    else if ((32 - rshift) <= bitwidth)
48       return nir_ushr_imm(b, value, rshift);
49    else
50       return nir_ubfe_imm(b, value, rshift, bitwidth);
51 }
52 
53 static bool
is_sin_cos(const nir_instr * instr,UNUSED const void * _)54 is_sin_cos(const nir_instr *instr, UNUSED const void *_)
55 {
56    return instr->type == nir_instr_type_alu && (nir_instr_as_alu(instr)->op == nir_op_fsin ||
57                                                 nir_instr_as_alu(instr)->op == nir_op_fcos);
58 }
59 
60 static nir_def *
lower_sin_cos(struct nir_builder * b,nir_instr * instr,UNUSED void * _)61 lower_sin_cos(struct nir_builder *b, nir_instr *instr, UNUSED void *_)
62 {
63    nir_alu_instr *sincos = nir_instr_as_alu(instr);
64    nir_def *src = nir_fmul_imm(b, nir_ssa_for_alu_src(b, sincos, 0), 0.15915493667125702);
65    return sincos->op == nir_op_fsin ? nir_fsin_amd(b, src) : nir_fcos_amd(b, src);
66 }
67 
68 bool
ac_nir_lower_sin_cos(nir_shader * shader)69 ac_nir_lower_sin_cos(nir_shader *shader)
70 {
71    return nir_shader_lower_instructions(shader, is_sin_cos, lower_sin_cos, NULL);
72 }
73 
74 typedef struct {
75    const struct ac_shader_args *const args;
76    const enum amd_gfx_level gfx_level;
77    const enum ac_hw_stage hw_stage;
78 } lower_intrinsics_to_args_state;
79 
80 static bool
lower_intrinsic_to_arg(nir_builder * b,nir_instr * instr,void * state)81 lower_intrinsic_to_arg(nir_builder *b, nir_instr *instr, void *state)
82 {
83    if (instr->type != nir_instr_type_intrinsic)
84       return false;
85 
86    lower_intrinsics_to_args_state *s = (lower_intrinsics_to_args_state *)state;
87    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
88    nir_def *replacement = NULL;
89    b->cursor = nir_after_instr(&intrin->instr);
90 
91    switch (intrin->intrinsic) {
92    case nir_intrinsic_load_subgroup_id: {
93       if (s->hw_stage == AC_HW_COMPUTE_SHADER) {
94          assert(s->args->tg_size.used);
95 
96          if (s->gfx_level >= GFX10_3) {
97             replacement = ac_nir_unpack_arg(b, s->args, s->args->tg_size, 20, 5);
98          } else {
99             /* GFX6-10 don't actually support a wave id, but we can
100              * use the ordered id because ORDERED_APPEND_* is set to
101              * zero in the compute dispatch initiatior.
102              */
103             replacement = ac_nir_unpack_arg(b, s->args, s->args->tg_size, 6, 6);
104          }
105       } else if (s->hw_stage == AC_HW_HULL_SHADER && s->gfx_level >= GFX11) {
106          assert(s->args->tcs_wave_id.used);
107          replacement = ac_nir_unpack_arg(b, s->args, s->args->tcs_wave_id, 0, 3);
108       } else if (s->hw_stage == AC_HW_LEGACY_GEOMETRY_SHADER ||
109                  s->hw_stage == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
110          assert(s->args->merged_wave_info.used);
111          replacement = ac_nir_unpack_arg(b, s->args, s->args->merged_wave_info, 24, 4);
112       } else {
113          replacement = nir_imm_int(b, 0);
114       }
115 
116       break;
117    }
118    case nir_intrinsic_load_num_subgroups: {
119       if (s->hw_stage == AC_HW_COMPUTE_SHADER) {
120          assert(s->args->tg_size.used);
121          replacement = ac_nir_unpack_arg(b, s->args, s->args->tg_size, 0, 6);
122       } else if (s->hw_stage == AC_HW_LEGACY_GEOMETRY_SHADER ||
123                  s->hw_stage == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
124          assert(s->args->merged_wave_info.used);
125          replacement = ac_nir_unpack_arg(b, s->args, s->args->merged_wave_info, 28, 4);
126       } else {
127          replacement = nir_imm_int(b, 1);
128       }
129 
130       break;
131    }
132    case nir_intrinsic_load_workgroup_id:
133       if (b->shader->info.stage == MESA_SHADER_MESH) {
134          /* This lowering is only valid with fast_launch = 2, otherwise we assume that
135           * lower_workgroup_id_to_index removed any uses of the workgroup id by this point.
136           */
137          assert(s->gfx_level >= GFX11);
138          nir_def *xy = ac_nir_load_arg(b, s->args, s->args->tess_offchip_offset);
139          nir_def *z = ac_nir_load_arg(b, s->args, s->args->gs_attr_offset);
140          replacement = nir_vec3(b, nir_extract_u16(b, xy, nir_imm_int(b, 0)),
141                                 nir_extract_u16(b, xy, nir_imm_int(b, 1)),
142                                 nir_extract_u16(b, z, nir_imm_int(b, 1)));
143       } else {
144          return false;
145       }
146 
147       break;
148    default:
149       return false;
150    }
151 
152    assert(replacement);
153    nir_def_rewrite_uses(&intrin->def, replacement);
154    nir_instr_remove(&intrin->instr);
155    return true;
156 }
157 
158 bool
ac_nir_lower_intrinsics_to_args(nir_shader * shader,const enum amd_gfx_level gfx_level,const enum ac_hw_stage hw_stage,const struct ac_shader_args * ac_args)159 ac_nir_lower_intrinsics_to_args(nir_shader *shader, const enum amd_gfx_level gfx_level,
160                                 const enum ac_hw_stage hw_stage,
161                                 const struct ac_shader_args *ac_args)
162 {
163    lower_intrinsics_to_args_state state = {
164       .gfx_level = gfx_level,
165       .hw_stage = hw_stage,
166       .args = ac_args,
167    };
168 
169    return nir_shader_instructions_pass(shader, lower_intrinsic_to_arg,
170                                        nir_metadata_block_index | nir_metadata_dominance, &state);
171 }
172 
173 void
ac_nir_store_var_components(nir_builder * b,nir_variable * var,nir_def * value,unsigned component,unsigned writemask)174 ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_def *value,
175                             unsigned component, unsigned writemask)
176 {
177    /* component store */
178    if (value->num_components != 4) {
179       nir_def *undef = nir_undef(b, 1, value->bit_size);
180 
181       /* add undef component before and after value to form a vec4 */
182       nir_def *comp[4];
183       for (int i = 0; i < 4; i++) {
184          comp[i] = (i >= component && i < component + value->num_components) ?
185             nir_channel(b, value, i - component) : undef;
186       }
187 
188       value = nir_vec(b, comp, 4);
189       writemask <<= component;
190    } else {
191       /* if num_component==4, there should be no component offset */
192       assert(component == 0);
193    }
194 
195    nir_store_var(b, var, value, writemask);
196 }
197 
198 static nir_intrinsic_instr *
export(nir_builder * b,nir_def * val,nir_def * row,unsigned base,unsigned flags,unsigned write_mask)199 export(nir_builder *b, nir_def *val, nir_def *row, unsigned base, unsigned flags,
200        unsigned write_mask)
201 {
202    if (row) {
203       return nir_export_row_amd(b, val, row, .base = base, .flags = flags,
204                                 .write_mask = write_mask);
205    } else {
206       return nir_export_amd(b, val, .base = base, .flags = flags,
207                             .write_mask = write_mask);
208    }
209 }
210 
211 void
ac_nir_export_primitive(nir_builder * b,nir_def * prim,nir_def * row)212 ac_nir_export_primitive(nir_builder *b, nir_def *prim, nir_def *row)
213 {
214    unsigned write_mask = BITFIELD_MASK(prim->num_components);
215 
216    export(b, nir_pad_vec4(b, prim), row, V_008DFC_SQ_EXP_PRIM, AC_EXP_FLAG_DONE,
217           write_mask);
218 }
219 
220 static nir_def *
get_export_output(nir_builder * b,nir_def ** output)221 get_export_output(nir_builder *b, nir_def **output)
222 {
223    nir_def *vec[4];
224    for (int i = 0; i < 4; i++) {
225       if (output[i])
226          vec[i] = nir_u2uN(b, output[i], 32);
227       else
228          vec[i] = nir_undef(b, 1, 32);
229    }
230 
231    return nir_vec(b, vec, 4);
232 }
233 
234 static nir_def *
get_pos0_output(nir_builder * b,nir_def ** output)235 get_pos0_output(nir_builder *b, nir_def **output)
236 {
237    /* Some applications don't write position but expect (0, 0, 0, 1)
238     * so use that value instead of undef when it isn't written.
239     */
240 
241    nir_def *vec[4];
242 
243    for (int i = 0; i < 4; i++) {
244       if (output[i])
245          vec[i] = nir_u2u32(b, output[i]);
246      else
247          vec[i] = nir_imm_float(b, i == 3 ? 1.0 : 0.0);
248    }
249 
250    return nir_vec(b, vec, 4);
251 }
252 
253 void
ac_nir_export_position(nir_builder * b,enum amd_gfx_level gfx_level,uint32_t clip_cull_mask,bool no_param_export,bool force_vrs,bool done,uint64_t outputs_written,nir_def * (* outputs)[4],nir_def * row)254 ac_nir_export_position(nir_builder *b,
255                        enum amd_gfx_level gfx_level,
256                        uint32_t clip_cull_mask,
257                        bool no_param_export,
258                        bool force_vrs,
259                        bool done,
260                        uint64_t outputs_written,
261                        nir_def *(*outputs)[4],
262                        nir_def *row)
263 {
264    nir_intrinsic_instr *exp[4];
265    unsigned exp_num = 0;
266    unsigned exp_pos_offset = 0;
267 
268    if (outputs_written & VARYING_BIT_POS) {
269       /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
270       * Setting valid_mask=1 prevents it and has no other effect.
271       */
272       const unsigned pos_flags = gfx_level == GFX10 ? AC_EXP_FLAG_VALID_MASK : 0;
273       nir_def *pos = get_pos0_output(b, outputs[VARYING_SLOT_POS]);
274 
275       exp[exp_num] = export(b, pos, row, V_008DFC_SQ_EXP_POS + exp_num, pos_flags, 0xf);
276       exp_num++;
277    } else {
278       exp_pos_offset++;
279    }
280 
281    uint64_t mask =
282       VARYING_BIT_PSIZ |
283       VARYING_BIT_EDGE |
284       VARYING_BIT_LAYER |
285       VARYING_BIT_VIEWPORT |
286       VARYING_BIT_PRIMITIVE_SHADING_RATE;
287 
288    /* clear output mask if no one written */
289    if (!outputs[VARYING_SLOT_PSIZ][0])
290       outputs_written &= ~VARYING_BIT_PSIZ;
291    if (!outputs[VARYING_SLOT_EDGE][0])
292       outputs_written &= ~VARYING_BIT_EDGE;
293    if (!outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0])
294       outputs_written &= ~VARYING_BIT_PRIMITIVE_SHADING_RATE;
295    if (!outputs[VARYING_SLOT_LAYER][0])
296       outputs_written &= ~VARYING_BIT_LAYER;
297    if (!outputs[VARYING_SLOT_VIEWPORT][0])
298       outputs_written &= ~VARYING_BIT_VIEWPORT;
299 
300    if ((outputs_written & mask) || force_vrs) {
301       nir_def *zero = nir_imm_float(b, 0);
302       nir_def *vec[4] = { zero, zero, zero, zero };
303       unsigned write_mask = 0;
304 
305       if (outputs_written & VARYING_BIT_PSIZ) {
306          vec[0] = outputs[VARYING_SLOT_PSIZ][0];
307          write_mask |= BITFIELD_BIT(0);
308       }
309 
310       if (outputs_written & VARYING_BIT_EDGE) {
311          vec[1] = nir_umin(b, outputs[VARYING_SLOT_EDGE][0], nir_imm_int(b, 1));
312          write_mask |= BITFIELD_BIT(1);
313       }
314 
315       nir_def *rates = NULL;
316       if (outputs_written & VARYING_BIT_PRIMITIVE_SHADING_RATE) {
317          rates = outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0];
318       } else if (force_vrs) {
319          /* If Pos.W != 1 (typical for non-GUI elements), use coarse shading. */
320          nir_def *pos_w = outputs[VARYING_SLOT_POS][3];
321          pos_w = pos_w ? nir_u2u32(b, pos_w) : nir_imm_float(b, 1.0);
322          nir_def *cond = nir_fneu_imm(b, pos_w, 1);
323          rates = nir_bcsel(b, cond, nir_load_force_vrs_rates_amd(b), nir_imm_int(b, 0));
324       }
325 
326       if (rates) {
327          vec[1] = nir_ior(b, vec[1], rates);
328          write_mask |= BITFIELD_BIT(1);
329       }
330 
331       if (outputs_written & VARYING_BIT_LAYER) {
332          vec[2] = outputs[VARYING_SLOT_LAYER][0];
333          write_mask |= BITFIELD_BIT(2);
334       }
335 
336       if (outputs_written & VARYING_BIT_VIEWPORT) {
337          if (gfx_level >= GFX9) {
338             /* GFX9 has the layer in [10:0] and the viewport index in [19:16]. */
339             nir_def *v = nir_ishl_imm(b, outputs[VARYING_SLOT_VIEWPORT][0], 16);
340             vec[2] = nir_ior(b, vec[2], v);
341             write_mask |= BITFIELD_BIT(2);
342          } else {
343             vec[3] = outputs[VARYING_SLOT_VIEWPORT][0];
344             write_mask |= BITFIELD_BIT(3);
345          }
346       }
347 
348       exp[exp_num] = export(b, nir_vec(b, vec, 4), row,
349                             V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset,
350                             0, write_mask);
351       exp_num++;
352    }
353 
354    for (int i = 0; i < 2; i++) {
355       if ((outputs_written & (VARYING_BIT_CLIP_DIST0 << i)) &&
356           (clip_cull_mask & BITFIELD_RANGE(i * 4, 4))) {
357          exp[exp_num] = export(
358             b, get_export_output(b, outputs[VARYING_SLOT_CLIP_DIST0 + i]), row,
359             V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0,
360             (clip_cull_mask >> (i * 4)) & 0xf);
361          exp_num++;
362       }
363    }
364 
365    if (outputs_written & VARYING_BIT_CLIP_VERTEX) {
366       nir_def *vtx = get_export_output(b, outputs[VARYING_SLOT_CLIP_VERTEX]);
367 
368       /* Clip distance for clip vertex to each user clip plane. */
369       nir_def *clip_dist[8] = {0};
370       u_foreach_bit (i, clip_cull_mask) {
371          nir_def *ucp = nir_load_user_clip_plane(b, .ucp_id = i);
372          clip_dist[i] = nir_fdot4(b, vtx, ucp);
373       }
374 
375       for (int i = 0; i < 2; i++) {
376          if (clip_cull_mask & BITFIELD_RANGE(i * 4, 4)) {
377             exp[exp_num] = export(
378                b, get_export_output(b, clip_dist + i * 4), row,
379                V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0,
380                (clip_cull_mask >> (i * 4)) & 0xf);
381             exp_num++;
382          }
383       }
384    }
385 
386    if (!exp_num)
387       return;
388 
389    nir_intrinsic_instr *final_exp = exp[exp_num - 1];
390 
391    if (done) {
392       /* Specify that this is the last export */
393       const unsigned final_exp_flags = nir_intrinsic_flags(final_exp);
394       nir_intrinsic_set_flags(final_exp, final_exp_flags | AC_EXP_FLAG_DONE);
395    }
396 
397    /* If a shader has no param exports, rasterization can start before
398     * the shader finishes and thus memory stores might not finish before
399     * the pixel shader starts.
400     */
401    if (gfx_level >= GFX10 && no_param_export && b->shader->info.writes_memory) {
402       nir_cursor cursor = b->cursor;
403       b->cursor = nir_before_instr(&final_exp->instr);
404       nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
405                                 nir_var_mem_ssbo | nir_var_mem_global | nir_var_image);
406       b->cursor = cursor;
407    }
408 }
409 
410 void
ac_nir_export_parameters(nir_builder * b,const uint8_t * param_offsets,uint64_t outputs_written,uint16_t outputs_written_16bit,nir_def * (* outputs)[4],nir_def * (* outputs_16bit_lo)[4],nir_def * (* outputs_16bit_hi)[4])411 ac_nir_export_parameters(nir_builder *b,
412                          const uint8_t *param_offsets,
413                          uint64_t outputs_written,
414                          uint16_t outputs_written_16bit,
415                          nir_def *(*outputs)[4],
416                          nir_def *(*outputs_16bit_lo)[4],
417                          nir_def *(*outputs_16bit_hi)[4])
418 {
419    uint32_t exported_params = 0;
420 
421    u_foreach_bit64 (slot, outputs_written) {
422       unsigned offset = param_offsets[slot];
423       if (offset > AC_EXP_PARAM_OFFSET_31)
424          continue;
425 
426       uint32_t write_mask = 0;
427       for (int i = 0; i < 4; i++) {
428          if (outputs[slot][i])
429             write_mask |= BITFIELD_BIT(i);
430       }
431 
432       /* no one set this output slot, we can skip the param export */
433       if (!write_mask)
434          continue;
435 
436       /* Since param_offsets[] can map multiple varying slots to the same
437        * param export index (that's radeonsi-specific behavior), we need to
438        * do this so as not to emit duplicated exports.
439        */
440       if (exported_params & BITFIELD_BIT(offset))
441          continue;
442 
443       nir_export_amd(
444          b, get_export_output(b, outputs[slot]),
445          .base = V_008DFC_SQ_EXP_PARAM + offset,
446          .write_mask = write_mask);
447       exported_params |= BITFIELD_BIT(offset);
448    }
449 
450    u_foreach_bit (slot, outputs_written_16bit) {
451       unsigned offset = param_offsets[VARYING_SLOT_VAR0_16BIT + slot];
452       if (offset > AC_EXP_PARAM_OFFSET_31)
453          continue;
454 
455       uint32_t write_mask = 0;
456       for (int i = 0; i < 4; i++) {
457          if (outputs_16bit_lo[slot][i] || outputs_16bit_hi[slot][i])
458             write_mask |= BITFIELD_BIT(i);
459       }
460 
461       /* no one set this output slot, we can skip the param export */
462       if (!write_mask)
463          continue;
464 
465       /* Since param_offsets[] can map multiple varying slots to the same
466        * param export index (that's radeonsi-specific behavior), we need to
467        * do this so as not to emit duplicated exports.
468        */
469       if (exported_params & BITFIELD_BIT(offset))
470          continue;
471 
472       nir_def *vec[4];
473       nir_def *undef = nir_undef(b, 1, 16);
474       for (int i = 0; i < 4; i++) {
475          nir_def *lo = outputs_16bit_lo[slot][i] ? outputs_16bit_lo[slot][i] : undef;
476          nir_def *hi = outputs_16bit_hi[slot][i] ? outputs_16bit_hi[slot][i] : undef;
477          vec[i] = nir_pack_32_2x16_split(b, lo, hi);
478       }
479 
480       nir_export_amd(
481          b, nir_vec(b, vec, 4),
482          .base = V_008DFC_SQ_EXP_PARAM + offset,
483          .write_mask = write_mask);
484       exported_params |= BITFIELD_BIT(offset);
485    }
486 }
487 
488 /**
489  * This function takes an I/O intrinsic like load/store_input,
490  * and emits a sequence that calculates the full offset of that instruction,
491  * including a stride to the base and component offsets.
492  */
493 nir_def *
ac_nir_calc_io_offset(nir_builder * b,nir_intrinsic_instr * intrin,nir_def * base_stride,unsigned component_stride,ac_nir_map_io_driver_location map_io)494 ac_nir_calc_io_offset(nir_builder *b,
495                       nir_intrinsic_instr *intrin,
496                       nir_def *base_stride,
497                       unsigned component_stride,
498                       ac_nir_map_io_driver_location map_io)
499 {
500    unsigned base = nir_intrinsic_base(intrin);
501    unsigned semantic = nir_intrinsic_io_semantics(intrin).location;
502    unsigned mapped_driver_location = map_io ? map_io(semantic) : base;
503 
504    /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */
505    nir_def *base_op = nir_imul_imm(b, base_stride, mapped_driver_location);
506 
507    /* offset should be interpreted in relation to the base,
508     * so the instruction effectively reads/writes another input/output
509     * when it has an offset
510     */
511    nir_def *offset_op = nir_imul(b, base_stride,
512                                  nir_get_io_offset_src(intrin)->ssa);
513 
514    /* component is in bytes */
515    unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
516 
517    return nir_iadd_imm_nuw(b, nir_iadd_nuw(b, base_op, offset_op), const_op);
518 }
519 
520 bool
ac_nir_lower_indirect_derefs(nir_shader * shader,enum amd_gfx_level gfx_level)521 ac_nir_lower_indirect_derefs(nir_shader *shader,
522                              enum amd_gfx_level gfx_level)
523 {
524    bool progress = false;
525 
526    /* Lower large variables to scratch first so that we won't bloat the
527     * shader by generating large if ladders for them. We later lower
528     * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
529     */
530    NIR_PASS(progress, shader, nir_lower_vars_to_scratch, nir_var_function_temp, 256,
531             glsl_get_natural_size_align_bytes);
532 
533    /* LLVM doesn't support VGPR indexing on GFX9. */
534    bool llvm_has_working_vgpr_indexing = gfx_level != GFX9;
535 
536    /* TODO: Indirect indexing of GS inputs is unimplemented.
537     *
538     * TCS and TES load inputs directly from LDS or offchip memory, so
539     * indirect indexing is trivial.
540     */
541    nir_variable_mode indirect_mask = 0;
542    if (shader->info.stage == MESA_SHADER_GEOMETRY ||
543        (shader->info.stage != MESA_SHADER_TESS_CTRL && shader->info.stage != MESA_SHADER_TESS_EVAL &&
544         !llvm_has_working_vgpr_indexing)) {
545       indirect_mask |= nir_var_shader_in;
546    }
547    if (!llvm_has_working_vgpr_indexing && shader->info.stage != MESA_SHADER_TESS_CTRL)
548       indirect_mask |= nir_var_shader_out;
549 
550    /* TODO: We shouldn't need to do this, however LLVM isn't currently
551     * smart enough to handle indirects without causing excess spilling
552     * causing the gpu to hang.
553     *
554     * See the following thread for more details of the problem:
555     * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
556     */
557    indirect_mask |= nir_var_function_temp;
558 
559    NIR_PASS(progress, shader, nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
560    return progress;
561 }
562 
563 struct shader_outputs {
564    nir_def *data[VARYING_SLOT_MAX][4];
565    nir_def *data_16bit_lo[16][4];
566    nir_def *data_16bit_hi[16][4];
567 
568    nir_alu_type (*type_16bit_lo)[4];
569    nir_alu_type (*type_16bit_hi)[4];
570 };
571 
572 static nir_def **
get_output_and_type(struct shader_outputs * outputs,unsigned slot,bool high_16bits,nir_alu_type ** types)573 get_output_and_type(struct shader_outputs *outputs, unsigned slot, bool high_16bits,
574                     nir_alu_type **types)
575 {
576    nir_def **data;
577    nir_alu_type *type;
578 
579    /* Only VARYING_SLOT_VARn_16BIT slots need output type to convert 16bit output
580     * to 32bit. Vulkan is not allowed to streamout output less than 32bit.
581     */
582    if (slot < VARYING_SLOT_VAR0_16BIT) {
583       data = outputs->data[slot];
584       type = NULL;
585    } else {
586       unsigned index = slot - VARYING_SLOT_VAR0_16BIT;
587 
588       if (high_16bits) {
589          data = outputs->data_16bit_hi[index];
590          type = outputs->type_16bit_hi[index];
591       } else {
592          data = outputs->data_16bit_lo[index];
593          type = outputs->type_16bit_lo[index];
594       }
595    }
596 
597    *types = type;
598    return data;
599 }
600 
601 static void
emit_streamout(nir_builder * b,unsigned stream,nir_xfb_info * info,struct shader_outputs * outputs)602 emit_streamout(nir_builder *b, unsigned stream, nir_xfb_info *info,
603                struct shader_outputs *outputs)
604 {
605    nir_def *so_vtx_count = nir_ubfe_imm(b, nir_load_streamout_config_amd(b), 16, 7);
606    nir_def *tid = nir_load_subgroup_invocation(b);
607 
608    nir_push_if(b, nir_ilt(b, tid, so_vtx_count));
609    nir_def *so_write_index = nir_load_streamout_write_index_amd(b);
610 
611    nir_def *so_buffers[NIR_MAX_XFB_BUFFERS];
612    nir_def *so_write_offset[NIR_MAX_XFB_BUFFERS];
613    u_foreach_bit(i, info->buffers_written) {
614       so_buffers[i] = nir_load_streamout_buffer_amd(b, i);
615 
616       unsigned stride = info->buffers[i].stride;
617       nir_def *offset = nir_load_streamout_offset_amd(b, i);
618       offset = nir_iadd(b, nir_imul_imm(b, nir_iadd(b, so_write_index, tid), stride),
619                         nir_imul_imm(b, offset, 4));
620       so_write_offset[i] = offset;
621    }
622 
623    nir_def *undef = nir_undef(b, 1, 32);
624    for (unsigned i = 0; i < info->output_count; i++) {
625       const nir_xfb_output_info *output = info->outputs + i;
626       if (stream != info->buffer_to_stream[output->buffer])
627          continue;
628 
629       nir_alu_type *output_type;
630       nir_def **output_data =
631          get_output_and_type(outputs, output->location, output->high_16bits, &output_type);
632 
633       nir_def *vec[4] = {undef, undef, undef, undef};
634       uint8_t mask = 0;
635       u_foreach_bit(j, output->component_mask) {
636          nir_def *data = output_data[j];
637 
638          if (data) {
639             if (data->bit_size < 32) {
640                /* we need output type to convert non-32bit output to 32bit */
641                assert(output_type);
642 
643                nir_alu_type base_type = nir_alu_type_get_base_type(output_type[j]);
644                data = nir_convert_to_bit_size(b, data, base_type, 32);
645             }
646 
647             unsigned comp = j - output->component_offset;
648             vec[comp] = data;
649             mask |= 1 << comp;
650          }
651       }
652 
653       if (!mask)
654          continue;
655 
656       unsigned buffer = output->buffer;
657       nir_def *data = nir_vec(b, vec, util_last_bit(mask));
658       nir_def *zero = nir_imm_int(b, 0);
659       nir_store_buffer_amd(b, data, so_buffers[buffer], so_write_offset[buffer], zero, zero,
660                            .base = output->offset, .write_mask = mask,
661                            .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL);
662    }
663 
664    nir_pop_if(b, NULL);
665 }
666 
667 nir_shader *
ac_nir_create_gs_copy_shader(const nir_shader * gs_nir,enum amd_gfx_level gfx_level,uint32_t clip_cull_mask,const uint8_t * param_offsets,bool has_param_exports,bool disable_streamout,bool kill_pointsize,bool kill_layer,bool force_vrs,ac_nir_gs_output_info * output_info)668 ac_nir_create_gs_copy_shader(const nir_shader *gs_nir,
669                              enum amd_gfx_level gfx_level,
670                              uint32_t clip_cull_mask,
671                              const uint8_t *param_offsets,
672                              bool has_param_exports,
673                              bool disable_streamout,
674                              bool kill_pointsize,
675                              bool kill_layer,
676                              bool force_vrs,
677                              ac_nir_gs_output_info *output_info)
678 {
679    nir_builder b = nir_builder_init_simple_shader(
680       MESA_SHADER_VERTEX, gs_nir->options, "gs_copy");
681 
682    nir_foreach_shader_out_variable(var, gs_nir)
683       nir_shader_add_variable(b.shader, nir_variable_clone(var, b.shader));
684 
685    b.shader->info.outputs_written = gs_nir->info.outputs_written;
686    b.shader->info.outputs_written_16bit = gs_nir->info.outputs_written_16bit;
687 
688    nir_def *gsvs_ring = nir_load_ring_gsvs_amd(&b);
689 
690    nir_xfb_info *info = gs_nir->xfb_info;
691    nir_def *stream_id = NULL;
692    if (!disable_streamout && info)
693       stream_id = nir_ubfe_imm(&b, nir_load_streamout_config_amd(&b), 24, 2);
694 
695    nir_def *vtx_offset = nir_imul_imm(&b, nir_load_vertex_id_zero_base(&b), 4);
696    nir_def *zero = nir_imm_zero(&b, 1, 32);
697 
698    for (unsigned stream = 0; stream < 4; stream++) {
699       if (stream > 0 && (!stream_id || !(info->streams_written & BITFIELD_BIT(stream))))
700          continue;
701 
702       if (stream_id)
703          nir_push_if(&b, nir_ieq_imm(&b, stream_id, stream));
704 
705       uint32_t offset = 0;
706       struct shader_outputs outputs = {
707          .type_16bit_lo = output_info->types_16bit_lo,
708          .type_16bit_hi = output_info->types_16bit_hi,
709       };
710 
711       u_foreach_bit64 (i, gs_nir->info.outputs_written) {
712          u_foreach_bit (j, output_info->usage_mask[i]) {
713             if (((output_info->streams[i] >> (j * 2)) & 0x3) != stream)
714                continue;
715 
716             outputs.data[i][j] =
717                nir_load_buffer_amd(&b, 1, 32, gsvs_ring, vtx_offset, zero, zero,
718                                    .base = offset,
719                                    .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL);
720 
721             /* clamp legacy color output */
722             if (i == VARYING_SLOT_COL0 || i == VARYING_SLOT_COL1 ||
723                 i == VARYING_SLOT_BFC0 || i == VARYING_SLOT_BFC1) {
724                nir_def *color = outputs.data[i][j];
725                nir_def *clamp = nir_load_clamp_vertex_color_amd(&b);
726                outputs.data[i][j] = nir_bcsel(&b, clamp, nir_fsat(&b, color), color);
727             }
728 
729             offset += gs_nir->info.gs.vertices_out * 16 * 4;
730          }
731       }
732 
733       u_foreach_bit (i, gs_nir->info.outputs_written_16bit) {
734          for (unsigned j = 0; j < 4; j++) {
735             bool has_lo_16bit = (output_info->usage_mask_16bit_lo[i] & (1 << j)) &&
736                ((output_info->streams_16bit_lo[i] >> (j * 2)) & 0x3) == stream;
737             bool has_hi_16bit = (output_info->usage_mask_16bit_hi[i] & (1 << j)) &&
738                ((output_info->streams_16bit_hi[i] >> (j * 2)) & 0x3) == stream;
739             if (!has_lo_16bit && !has_hi_16bit)
740                continue;
741 
742             nir_def *data =
743                nir_load_buffer_amd(&b, 1, 32, gsvs_ring, vtx_offset, zero, zero,
744                                    .base = offset,
745                                    .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL);
746 
747             if (has_lo_16bit)
748                outputs.data_16bit_lo[i][j] = nir_unpack_32_2x16_split_x(&b, data);
749 
750             if (has_hi_16bit)
751                outputs.data_16bit_hi[i][j] = nir_unpack_32_2x16_split_y(&b, data);
752 
753             offset += gs_nir->info.gs.vertices_out * 16 * 4;
754          }
755       }
756 
757       if (stream_id)
758          emit_streamout(&b, stream, info, &outputs);
759 
760       if (stream == 0) {
761          uint64_t export_outputs = b.shader->info.outputs_written | VARYING_BIT_POS;
762          if (kill_pointsize)
763             export_outputs &= ~VARYING_BIT_PSIZ;
764          if (kill_layer)
765             export_outputs &= ~VARYING_BIT_LAYER;
766 
767          ac_nir_export_position(&b, gfx_level, clip_cull_mask, !has_param_exports,
768                                 force_vrs, true, export_outputs, outputs.data, NULL);
769 
770          if (has_param_exports) {
771             ac_nir_export_parameters(&b, param_offsets,
772                                      b.shader->info.outputs_written,
773                                      b.shader->info.outputs_written_16bit,
774                                      outputs.data,
775                                      outputs.data_16bit_lo,
776                                      outputs.data_16bit_hi);
777          }
778       }
779 
780       if (stream_id)
781          nir_push_else(&b, NULL);
782    }
783 
784    b.shader->info.clip_distance_array_size = gs_nir->info.clip_distance_array_size;
785    b.shader->info.cull_distance_array_size = gs_nir->info.cull_distance_array_size;
786 
787    return b.shader;
788 }
789 
790 static void
gather_outputs(nir_builder * b,nir_function_impl * impl,struct shader_outputs * outputs)791 gather_outputs(nir_builder *b, nir_function_impl *impl, struct shader_outputs *outputs)
792 {
793    /* Assume:
794     * - the shader used nir_lower_io_to_temporaries
795     * - 64-bit outputs are lowered
796     * - no indirect indexing is present
797     */
798    nir_foreach_block (block, impl) {
799       nir_foreach_instr_safe (instr, block) {
800          if (instr->type != nir_instr_type_intrinsic)
801             continue;
802 
803          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
804          if (intrin->intrinsic != nir_intrinsic_store_output)
805             continue;
806 
807          assert(nir_src_is_const(intrin->src[1]) && !nir_src_as_uint(intrin->src[1]));
808 
809          nir_alu_type type = nir_intrinsic_src_type(intrin);
810          nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
811 
812          nir_alu_type *output_type;
813          nir_def **output_data =
814             get_output_and_type(outputs, sem.location, sem.high_16bits, &output_type);
815 
816          u_foreach_bit (i, nir_intrinsic_write_mask(intrin)) {
817             unsigned comp = nir_intrinsic_component(intrin) + i;
818             output_data[comp] = nir_channel(b, intrin->src[0].ssa, i);
819 
820             if (output_type)
821                output_type[comp] = type;
822          }
823 
824          /* remove all store output instruction */
825          nir_instr_remove(instr);
826       }
827    }
828 }
829 
830 void
ac_nir_lower_legacy_vs(nir_shader * nir,enum amd_gfx_level gfx_level,uint32_t clip_cull_mask,const uint8_t * param_offsets,bool has_param_exports,bool export_primitive_id,bool disable_streamout,bool kill_pointsize,bool kill_layer,bool force_vrs)831 ac_nir_lower_legacy_vs(nir_shader *nir,
832                        enum amd_gfx_level gfx_level,
833                        uint32_t clip_cull_mask,
834                        const uint8_t *param_offsets,
835                        bool has_param_exports,
836                        bool export_primitive_id,
837                        bool disable_streamout,
838                        bool kill_pointsize,
839                        bool kill_layer,
840                        bool force_vrs)
841 {
842    nir_function_impl *impl = nir_shader_get_entrypoint(nir);
843    nir_metadata preserved = nir_metadata_block_index | nir_metadata_dominance;
844 
845    nir_builder b = nir_builder_at(nir_after_impl(impl));
846 
847    nir_alu_type output_types_16bit_lo[16][4];
848    nir_alu_type output_types_16bit_hi[16][4];
849    struct shader_outputs outputs = {
850       .type_16bit_lo = output_types_16bit_lo,
851       .type_16bit_hi = output_types_16bit_hi,
852    };
853    gather_outputs(&b, impl, &outputs);
854 
855    if (export_primitive_id) {
856       /* When the primitive ID is read by FS, we must ensure that it's exported by the previous
857        * vertex stage because it's implicit for VS or TES (but required by the Vulkan spec for GS
858        * or MS).
859        */
860       outputs.data[VARYING_SLOT_PRIMITIVE_ID][0] = nir_load_primitive_id(&b);
861 
862       /* Update outputs_written to reflect that the pass added a new output. */
863       nir->info.outputs_written |= BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_ID);
864    }
865 
866    if (!disable_streamout && nir->xfb_info) {
867       emit_streamout(&b, 0, nir->xfb_info, &outputs);
868       preserved = nir_metadata_none;
869    }
870 
871    uint64_t export_outputs = nir->info.outputs_written | VARYING_BIT_POS;
872    if (kill_pointsize)
873       export_outputs &= ~VARYING_BIT_PSIZ;
874    if (kill_layer)
875       export_outputs &= ~VARYING_BIT_LAYER;
876 
877    ac_nir_export_position(&b, gfx_level, clip_cull_mask, !has_param_exports,
878                           force_vrs, true, export_outputs, outputs.data, NULL);
879 
880    if (has_param_exports) {
881       ac_nir_export_parameters(&b, param_offsets,
882                                nir->info.outputs_written,
883                                nir->info.outputs_written_16bit,
884                                outputs.data,
885                                outputs.data_16bit_lo,
886                                outputs.data_16bit_hi);
887    }
888 
889    nir_metadata_preserve(impl, preserved);
890 }
891 
892 static nir_def *
ac_nir_accum_ior(nir_builder * b,nir_def * accum_result,nir_def * new_term)893 ac_nir_accum_ior(nir_builder *b, nir_def *accum_result, nir_def *new_term)
894 {
895    return accum_result ? nir_ior(b, accum_result, new_term) : new_term;
896 }
897 
898 bool
ac_nir_gs_shader_query(nir_builder * b,bool has_gen_prim_query,bool has_gs_invocations_query,bool has_gs_primitives_query,unsigned num_vertices_per_primitive,unsigned wave_size,nir_def * vertex_count[4],nir_def * primitive_count[4])899 ac_nir_gs_shader_query(nir_builder *b,
900                        bool has_gen_prim_query,
901                        bool has_gs_invocations_query,
902                        bool has_gs_primitives_query,
903                        unsigned num_vertices_per_primitive,
904                        unsigned wave_size,
905                        nir_def *vertex_count[4],
906                        nir_def *primitive_count[4])
907 {
908    nir_def *pipeline_query_enabled = NULL;
909    nir_def *prim_gen_query_enabled = NULL;
910    nir_def *any_query_enabled = NULL;
911 
912    if (has_gen_prim_query) {
913       prim_gen_query_enabled = nir_load_prim_gen_query_enabled_amd(b);
914       any_query_enabled = ac_nir_accum_ior(b, any_query_enabled, prim_gen_query_enabled);
915    }
916 
917    if (has_gs_invocations_query || has_gs_primitives_query) {
918       pipeline_query_enabled = nir_load_pipeline_stat_query_enabled_amd(b);
919       any_query_enabled = ac_nir_accum_ior(b, any_query_enabled, pipeline_query_enabled);
920    }
921 
922    if (!any_query_enabled) {
923       /* has no query */
924       return false;
925    }
926 
927    nir_if *if_shader_query = nir_push_if(b, any_query_enabled);
928 
929    nir_def *active_threads_mask = nir_ballot(b, 1, wave_size, nir_imm_true(b));
930    nir_def *num_active_threads = nir_bit_count(b, active_threads_mask);
931 
932    /* Calculate the "real" number of emitted primitives from the emitted GS vertices and primitives.
933     * GS emits points, line strips or triangle strips.
934     * Real primitives are points, lines or triangles.
935     */
936    nir_def *num_prims_in_wave[4] = {0};
937    u_foreach_bit (i, b->shader->info.gs.active_stream_mask) {
938       assert(vertex_count[i] && primitive_count[i]);
939 
940       nir_scalar vtx_cnt = nir_get_scalar(vertex_count[i], 0);
941       nir_scalar prm_cnt = nir_get_scalar(primitive_count[i], 0);
942 
943       if (nir_scalar_is_const(vtx_cnt) && nir_scalar_is_const(prm_cnt)) {
944          unsigned gs_vtx_cnt = nir_scalar_as_uint(vtx_cnt);
945          unsigned gs_prm_cnt = nir_scalar_as_uint(prm_cnt);
946          unsigned total_prm_cnt = gs_vtx_cnt - gs_prm_cnt * (num_vertices_per_primitive - 1u);
947          if (total_prm_cnt == 0)
948             continue;
949 
950          num_prims_in_wave[i] = nir_imul_imm(b, num_active_threads, total_prm_cnt);
951       } else {
952          nir_def *gs_vtx_cnt = vtx_cnt.def;
953          nir_def *gs_prm_cnt = prm_cnt.def;
954          if (num_vertices_per_primitive > 1)
955             gs_prm_cnt = nir_iadd(b, nir_imul_imm(b, gs_prm_cnt, -1u * (num_vertices_per_primitive - 1)), gs_vtx_cnt);
956          num_prims_in_wave[i] = nir_reduce(b, gs_prm_cnt, .reduction_op = nir_op_iadd);
957       }
958    }
959 
960    /* Store the query result to query result using an atomic add. */
961    nir_if *if_first_lane = nir_push_if(b, nir_elect(b, 1));
962    {
963       if (has_gs_invocations_query || has_gs_primitives_query) {
964          nir_if *if_pipeline_query = nir_push_if(b, pipeline_query_enabled);
965          {
966             nir_def *count = NULL;
967 
968             /* Add all streams' number to the same counter. */
969             for (int i = 0; i < 4; i++) {
970                if (num_prims_in_wave[i]) {
971                   if (count)
972                      count = nir_iadd(b, count, num_prims_in_wave[i]);
973                   else
974                      count = num_prims_in_wave[i];
975                }
976             }
977 
978             if (has_gs_primitives_query && count)
979                nir_atomic_add_gs_emit_prim_count_amd(b, count);
980 
981             if (has_gs_invocations_query)
982                nir_atomic_add_shader_invocation_count_amd(b, num_active_threads);
983          }
984          nir_pop_if(b, if_pipeline_query);
985       }
986 
987       if (has_gen_prim_query) {
988          nir_if *if_prim_gen_query = nir_push_if(b, prim_gen_query_enabled);
989          {
990             /* Add to the counter for this stream. */
991             for (int i = 0; i < 4; i++) {
992                if (num_prims_in_wave[i])
993                   nir_atomic_add_gen_prim_count_amd(b, num_prims_in_wave[i], .stream_id = i);
994             }
995          }
996          nir_pop_if(b, if_prim_gen_query);
997       }
998    }
999    nir_pop_if(b, if_first_lane);
1000 
1001    nir_pop_if(b, if_shader_query);
1002    return true;
1003 }
1004 
1005 typedef struct {
1006    nir_def *outputs[64][4];
1007    nir_def *outputs_16bit_lo[16][4];
1008    nir_def *outputs_16bit_hi[16][4];
1009 
1010    ac_nir_gs_output_info *info;
1011 
1012    nir_def *vertex_count[4];
1013    nir_def *primitive_count[4];
1014 } lower_legacy_gs_state;
1015 
1016 static bool
lower_legacy_gs_store_output(nir_builder * b,nir_intrinsic_instr * intrin,lower_legacy_gs_state * s)1017 lower_legacy_gs_store_output(nir_builder *b, nir_intrinsic_instr *intrin,
1018                              lower_legacy_gs_state *s)
1019 {
1020    /* Assume:
1021     * - the shader used nir_lower_io_to_temporaries
1022     * - 64-bit outputs are lowered
1023     * - no indirect indexing is present
1024     */
1025    assert(nir_src_is_const(intrin->src[1]) && !nir_src_as_uint(intrin->src[1]));
1026 
1027    b->cursor = nir_before_instr(&intrin->instr);
1028 
1029    unsigned component = nir_intrinsic_component(intrin);
1030    unsigned write_mask = nir_intrinsic_write_mask(intrin);
1031    nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
1032 
1033    nir_def **outputs;
1034    if (sem.location < VARYING_SLOT_VAR0_16BIT) {
1035       outputs = s->outputs[sem.location];
1036    } else {
1037       unsigned index = sem.location - VARYING_SLOT_VAR0_16BIT;
1038       if (sem.high_16bits)
1039          outputs = s->outputs_16bit_hi[index];
1040       else
1041          outputs = s->outputs_16bit_lo[index];
1042    }
1043 
1044    nir_def *store_val = intrin->src[0].ssa;
1045    /* 64bit output has been lowered to 32bit */
1046    assert(store_val->bit_size <= 32);
1047 
1048    u_foreach_bit (i, write_mask) {
1049       unsigned comp = component + i;
1050       outputs[comp] = nir_channel(b, store_val, i);
1051    }
1052 
1053    nir_instr_remove(&intrin->instr);
1054    return true;
1055 }
1056 
1057 static bool
lower_legacy_gs_emit_vertex_with_counter(nir_builder * b,nir_intrinsic_instr * intrin,lower_legacy_gs_state * s)1058 lower_legacy_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *intrin,
1059                                          lower_legacy_gs_state *s)
1060 {
1061    b->cursor = nir_before_instr(&intrin->instr);
1062 
1063    unsigned stream = nir_intrinsic_stream_id(intrin);
1064    nir_def *vtxidx = intrin->src[0].ssa;
1065 
1066    nir_def *gsvs_ring = nir_load_ring_gsvs_amd(b, .stream_id = stream);
1067    nir_def *soffset = nir_load_ring_gs2vs_offset_amd(b);
1068 
1069    unsigned offset = 0;
1070    u_foreach_bit64 (i, b->shader->info.outputs_written) {
1071       for (unsigned j = 0; j < 4; j++) {
1072          nir_def *output = s->outputs[i][j];
1073          /* Next vertex emit need a new value, reset all outputs. */
1074          s->outputs[i][j] = NULL;
1075 
1076          if (!(s->info->usage_mask[i] & (1 << j)) ||
1077              ((s->info->streams[i] >> (j * 2)) & 0x3) != stream)
1078             continue;
1079 
1080          unsigned base = offset * b->shader->info.gs.vertices_out * 4;
1081          offset++;
1082 
1083          /* no one set this output, skip the buffer store */
1084          if (!output)
1085             continue;
1086 
1087          nir_def *voffset = nir_ishl_imm(b, vtxidx, 2);
1088 
1089          /* extend 8/16 bit to 32 bit, 64 bit has been lowered */
1090          nir_def *data = nir_u2uN(b, output, 32);
1091 
1092          nir_store_buffer_amd(b, data, gsvs_ring, voffset, soffset, nir_imm_int(b, 0),
1093                               .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL |
1094                                         ACCESS_IS_SWIZZLED_AMD,
1095                               .base = base,
1096                               /* For ACO to not reorder this store around EmitVertex/EndPrimitve */
1097                               .memory_modes = nir_var_shader_out);
1098       }
1099    }
1100 
1101    u_foreach_bit (i, b->shader->info.outputs_written_16bit) {
1102       for (unsigned j = 0; j < 4; j++) {
1103          nir_def *output_lo = s->outputs_16bit_lo[i][j];
1104          nir_def *output_hi = s->outputs_16bit_hi[i][j];
1105          /* Next vertex emit need a new value, reset all outputs. */
1106          s->outputs_16bit_lo[i][j] = NULL;
1107          s->outputs_16bit_hi[i][j] = NULL;
1108 
1109          bool has_lo_16bit = (s->info->usage_mask_16bit_lo[i] & (1 << j)) &&
1110             ((s->info->streams_16bit_lo[i] >> (j * 2)) & 0x3) == stream;
1111          bool has_hi_16bit = (s->info->usage_mask_16bit_hi[i] & (1 << j)) &&
1112             ((s->info->streams_16bit_hi[i] >> (j * 2)) & 0x3) == stream;
1113          if (!has_lo_16bit && !has_hi_16bit)
1114             continue;
1115 
1116          unsigned base = offset * b->shader->info.gs.vertices_out;
1117          offset++;
1118 
1119          bool has_lo_16bit_out = has_lo_16bit && output_lo;
1120          bool has_hi_16bit_out = has_hi_16bit && output_hi;
1121 
1122          /* no one set needed output, skip the buffer store */
1123          if (!has_lo_16bit_out && !has_hi_16bit_out)
1124             continue;
1125 
1126          if (!has_lo_16bit_out)
1127             output_lo = nir_undef(b, 1, 16);
1128 
1129          if (!has_hi_16bit_out)
1130             output_hi = nir_undef(b, 1, 16);
1131 
1132          nir_def *voffset = nir_iadd_imm(b, vtxidx, base);
1133          voffset = nir_ishl_imm(b, voffset, 2);
1134 
1135          nir_store_buffer_amd(b, nir_pack_32_2x16_split(b, output_lo, output_hi),
1136                               gsvs_ring, voffset, soffset, nir_imm_int(b, 0),
1137                               .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL |
1138                                         ACCESS_IS_SWIZZLED_AMD,
1139                               /* For ACO to not reorder this store around EmitVertex/EndPrimitve */
1140                               .memory_modes = nir_var_shader_out);
1141       }
1142    }
1143 
1144    /* Signal vertex emission. */
1145    nir_sendmsg_amd(b, nir_load_gs_wave_id_amd(b),
1146                    .base = AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8));
1147 
1148    nir_instr_remove(&intrin->instr);
1149    return true;
1150 }
1151 
1152 static bool
lower_legacy_gs_set_vertex_and_primitive_count(nir_builder * b,nir_intrinsic_instr * intrin,lower_legacy_gs_state * s)1153 lower_legacy_gs_set_vertex_and_primitive_count(nir_builder *b, nir_intrinsic_instr *intrin,
1154                                                lower_legacy_gs_state *s)
1155 {
1156    b->cursor = nir_before_instr(&intrin->instr);
1157 
1158    unsigned stream = nir_intrinsic_stream_id(intrin);
1159 
1160    s->vertex_count[stream] = intrin->src[0].ssa;
1161    s->primitive_count[stream] = intrin->src[1].ssa;
1162 
1163    nir_instr_remove(&intrin->instr);
1164    return true;
1165 }
1166 
1167 static bool
lower_legacy_gs_end_primitive_with_counter(nir_builder * b,nir_intrinsic_instr * intrin,lower_legacy_gs_state * s)1168 lower_legacy_gs_end_primitive_with_counter(nir_builder *b, nir_intrinsic_instr *intrin,
1169                                                lower_legacy_gs_state *s)
1170 {
1171    b->cursor = nir_before_instr(&intrin->instr);
1172    const unsigned stream = nir_intrinsic_stream_id(intrin);
1173 
1174    /* Signal primitive emission. */
1175    nir_sendmsg_amd(b, nir_load_gs_wave_id_amd(b),
1176                    .base = AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8));
1177 
1178    nir_instr_remove(&intrin->instr);
1179    return true;
1180 }
1181 
1182 static bool
lower_legacy_gs_intrinsic(nir_builder * b,nir_instr * instr,void * state)1183 lower_legacy_gs_intrinsic(nir_builder *b, nir_instr *instr, void *state)
1184 {
1185    lower_legacy_gs_state *s = (lower_legacy_gs_state *) state;
1186 
1187    if (instr->type != nir_instr_type_intrinsic)
1188       return false;
1189 
1190    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1191 
1192    if (intrin->intrinsic == nir_intrinsic_store_output)
1193       return lower_legacy_gs_store_output(b, intrin, s);
1194    else if (intrin->intrinsic == nir_intrinsic_emit_vertex_with_counter)
1195       return lower_legacy_gs_emit_vertex_with_counter(b, intrin, s);
1196    else if (intrin->intrinsic == nir_intrinsic_end_primitive_with_counter)
1197       return lower_legacy_gs_end_primitive_with_counter(b, intrin, s);
1198    else if (intrin->intrinsic == nir_intrinsic_set_vertex_and_primitive_count)
1199       return lower_legacy_gs_set_vertex_and_primitive_count(b, intrin, s);
1200 
1201    return false;
1202 }
1203 
1204 void
ac_nir_lower_legacy_gs(nir_shader * nir,bool has_gen_prim_query,bool has_pipeline_stats_query,ac_nir_gs_output_info * output_info)1205 ac_nir_lower_legacy_gs(nir_shader *nir,
1206                        bool has_gen_prim_query,
1207                        bool has_pipeline_stats_query,
1208                        ac_nir_gs_output_info *output_info)
1209 {
1210    lower_legacy_gs_state s = {
1211       .info = output_info,
1212    };
1213 
1214    unsigned num_vertices_per_primitive = 0;
1215    switch (nir->info.gs.output_primitive) {
1216    case MESA_PRIM_POINTS:
1217       num_vertices_per_primitive = 1;
1218       break;
1219    case MESA_PRIM_LINE_STRIP:
1220       num_vertices_per_primitive = 2;
1221       break;
1222    case MESA_PRIM_TRIANGLE_STRIP:
1223       num_vertices_per_primitive = 3;
1224       break;
1225    default:
1226       unreachable("Invalid GS output primitive.");
1227       break;
1228    }
1229 
1230    nir_shader_instructions_pass(nir, lower_legacy_gs_intrinsic,
1231                                 nir_metadata_block_index | nir_metadata_dominance, &s);
1232 
1233    nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1234 
1235    nir_builder builder = nir_builder_at(nir_after_impl(impl));
1236    nir_builder *b = &builder;
1237 
1238    /* Emit shader query for mix use legacy/NGG GS */
1239    bool progress = ac_nir_gs_shader_query(b,
1240                                           has_gen_prim_query,
1241                                           has_pipeline_stats_query,
1242                                           has_pipeline_stats_query,
1243                                           num_vertices_per_primitive,
1244                                           64,
1245                                           s.vertex_count,
1246                                           s.primitive_count);
1247 
1248    /* Wait for all stores to finish. */
1249    nir_barrier(b, .execution_scope = SCOPE_INVOCATION,
1250                       .memory_scope = SCOPE_DEVICE,
1251                       .memory_semantics = NIR_MEMORY_RELEASE,
1252                       .memory_modes = nir_var_shader_out | nir_var_mem_ssbo |
1253                                       nir_var_mem_global | nir_var_image);
1254 
1255    /* Signal that the GS is done. */
1256    nir_sendmsg_amd(b, nir_load_gs_wave_id_amd(b),
1257                    .base = AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE);
1258 
1259    if (progress)
1260       nir_metadata_preserve(impl, nir_metadata_none);
1261 }
1262