• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 #include "nir/nir.h"
24 #include "nir/nir_xfb_info.h"
25 #include "radv_private.h"
26 #include "radv_shader.h"
27 
28 #include "ac_nir.h"
29 
30 static void
mark_sampler_desc(const nir_variable * var,struct radv_shader_info * info)31 mark_sampler_desc(const nir_variable *var, struct radv_shader_info *info)
32 {
33    info->desc_set_used_mask |= (1u << var->data.descriptor_set);
34 }
35 
36 static void
gather_intrinsic_load_input_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)37 gather_intrinsic_load_input_info(const nir_shader *nir, const nir_intrinsic_instr *instr, struct radv_shader_info *info)
38 {
39    switch (nir->info.stage) {
40    case MESA_SHADER_VERTEX: {
41       unsigned idx = nir_intrinsic_io_semantics(instr).location;
42       unsigned component = nir_intrinsic_component(instr);
43       unsigned mask = nir_def_components_read(&instr->def);
44       mask = (instr->def.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
45 
46       info->vs.input_usage_mask[idx] |= mask & 0xf;
47       if (mask >> 4)
48          info->vs.input_usage_mask[idx + 1] |= mask >> 4;
49       break;
50    }
51    default:
52       break;
53    }
54 }
55 
56 static void
gather_intrinsic_store_output_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info,bool consider_force_vrs)57 gather_intrinsic_store_output_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
58                                    struct radv_shader_info *info, bool consider_force_vrs)
59 {
60    unsigned idx = nir_intrinsic_base(instr);
61    unsigned num_slots = nir_intrinsic_io_semantics(instr).num_slots;
62    unsigned component = nir_intrinsic_component(instr);
63    unsigned write_mask = nir_intrinsic_write_mask(instr);
64    uint8_t *output_usage_mask = NULL;
65 
66    switch (nir->info.stage) {
67    case MESA_SHADER_VERTEX:
68       output_usage_mask = info->vs.output_usage_mask;
69       break;
70    case MESA_SHADER_TESS_EVAL:
71       output_usage_mask = info->tes.output_usage_mask;
72       break;
73    case MESA_SHADER_GEOMETRY:
74       output_usage_mask = info->gs.output_usage_mask;
75       break;
76    case MESA_SHADER_FRAGMENT:
77       if (idx >= FRAG_RESULT_DATA0) {
78          info->ps.colors_written |= 0xfu << (4 * (idx - FRAG_RESULT_DATA0));
79 
80          if (idx == FRAG_RESULT_DATA0)
81             info->ps.color0_written = write_mask;
82       }
83       break;
84    default:
85       break;
86    }
87 
88    if (output_usage_mask) {
89       for (unsigned i = 0; i < num_slots; i++) {
90          output_usage_mask[idx + i] |= ((write_mask >> (i * 4)) & 0xf) << component;
91       }
92    }
93 
94    if (consider_force_vrs && idx == VARYING_SLOT_POS) {
95       unsigned pos_w_chan = 3 - component;
96 
97       if (write_mask & BITFIELD_BIT(pos_w_chan)) {
98          nir_scalar pos_w = nir_scalar_resolved(instr->src[0].ssa, pos_w_chan);
99          /* Use coarse shading if the value of Pos.W can't be determined or if its value is != 1
100           * (typical for non-GUI elements).
101           */
102          if (!nir_scalar_is_const(pos_w) || nir_scalar_as_uint(pos_w) != 0x3f800000u)
103             info->force_vrs_per_vertex = true;
104       }
105    }
106 
107    if (nir->info.stage == MESA_SHADER_GEOMETRY) {
108       uint8_t gs_streams = nir_intrinsic_io_semantics(instr).gs_streams;
109       info->gs.output_streams[idx] |= gs_streams << (component * 2);
110    }
111 }
112 
113 static void
gather_push_constant_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)114 gather_push_constant_info(const nir_shader *nir, const nir_intrinsic_instr *instr, struct radv_shader_info *info)
115 {
116    info->loads_push_constants = true;
117 
118    if (nir_src_is_const(instr->src[0]) && instr->def.bit_size >= 32) {
119       uint32_t start = (nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0])) / 4u;
120       uint32_t size = instr->num_components * (instr->def.bit_size / 32u);
121 
122       if (start + size <= (MAX_PUSH_CONSTANTS_SIZE / 4u)) {
123          info->inline_push_constant_mask |= u_bit_consecutive64(start, size);
124          return;
125       }
126    }
127 
128    info->can_inline_all_push_constants = false;
129 }
130 
131 static void
gather_intrinsic_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info,bool consider_force_vrs)132 gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, struct radv_shader_info *info,
133                       bool consider_force_vrs)
134 {
135    switch (instr->intrinsic) {
136    case nir_intrinsic_load_barycentric_sample:
137    case nir_intrinsic_load_barycentric_pixel:
138    case nir_intrinsic_load_barycentric_centroid:
139    case nir_intrinsic_load_barycentric_at_sample:
140    case nir_intrinsic_load_barycentric_at_offset: {
141       enum glsl_interp_mode mode = nir_intrinsic_interp_mode(instr);
142       switch (mode) {
143       case INTERP_MODE_SMOOTH:
144       case INTERP_MODE_NONE:
145          if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel ||
146              instr->intrinsic == nir_intrinsic_load_barycentric_at_sample ||
147              instr->intrinsic == nir_intrinsic_load_barycentric_at_offset)
148             info->ps.reads_persp_center = true;
149          else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
150             info->ps.reads_persp_centroid = true;
151          else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
152             info->ps.reads_persp_sample = true;
153          break;
154       case INTERP_MODE_NOPERSPECTIVE:
155          if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel ||
156              instr->intrinsic == nir_intrinsic_load_barycentric_at_sample ||
157              instr->intrinsic == nir_intrinsic_load_barycentric_at_offset)
158             info->ps.reads_linear_center = true;
159          else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
160             info->ps.reads_linear_centroid = true;
161          else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
162             info->ps.reads_linear_sample = true;
163          break;
164       default:
165          break;
166       }
167       if (instr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
168          info->ps.needs_sample_positions = true;
169       break;
170    }
171    case nir_intrinsic_load_provoking_vtx_amd:
172       info->ps.load_provoking_vtx = true;
173       break;
174    case nir_intrinsic_load_sample_positions_amd:
175       info->ps.needs_sample_positions = true;
176       break;
177    case nir_intrinsic_load_rasterization_primitive_amd:
178       info->ps.load_rasterization_prim = true;
179       break;
180    case nir_intrinsic_load_local_invocation_id:
181    case nir_intrinsic_load_workgroup_id: {
182       unsigned mask = nir_def_components_read(&instr->def);
183       while (mask) {
184          unsigned i = u_bit_scan(&mask);
185 
186          if (instr->intrinsic == nir_intrinsic_load_workgroup_id)
187             info->cs.uses_block_id[i] = true;
188          else
189             info->cs.uses_thread_id[i] = true;
190       }
191       break;
192    }
193    case nir_intrinsic_load_frag_coord:
194       info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->def);
195       break;
196    case nir_intrinsic_load_sample_pos:
197       info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->def);
198       break;
199    case nir_intrinsic_load_push_constant:
200       gather_push_constant_info(nir, instr, info);
201       break;
202    case nir_intrinsic_vulkan_resource_index:
203       info->desc_set_used_mask |= (1u << nir_intrinsic_desc_set(instr));
204       break;
205    case nir_intrinsic_image_deref_load:
206    case nir_intrinsic_image_deref_sparse_load:
207    case nir_intrinsic_image_deref_store:
208    case nir_intrinsic_image_deref_atomic:
209    case nir_intrinsic_image_deref_atomic_swap:
210    case nir_intrinsic_image_deref_size:
211    case nir_intrinsic_image_deref_samples: {
212       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
213       mark_sampler_desc(var, info);
214       break;
215    }
216    case nir_intrinsic_load_input:
217       gather_intrinsic_load_input_info(nir, instr, info);
218       break;
219    case nir_intrinsic_store_output:
220       gather_intrinsic_store_output_info(nir, instr, info, consider_force_vrs);
221       break;
222    case nir_intrinsic_load_sbt_base_amd:
223       info->cs.is_rt_shader = true;
224       break;
225    case nir_intrinsic_load_rt_dynamic_callable_stack_base_amd:
226       info->cs.uses_dynamic_rt_callable_stack = true;
227       break;
228    case nir_intrinsic_bvh64_intersect_ray_amd:
229       info->cs.uses_rt = true;
230       break;
231    case nir_intrinsic_load_poly_line_smooth_enabled:
232       info->ps.needs_poly_line_smooth = true;
233       break;
234    case nir_intrinsic_begin_invocation_interlock:
235       info->ps.pops = true;
236       break;
237    default:
238       break;
239    }
240 }
241 
242 static void
gather_tex_info(const nir_shader * nir,const nir_tex_instr * instr,struct radv_shader_info * info)243 gather_tex_info(const nir_shader *nir, const nir_tex_instr *instr, struct radv_shader_info *info)
244 {
245    for (unsigned i = 0; i < instr->num_srcs; i++) {
246       switch (instr->src[i].src_type) {
247       case nir_tex_src_texture_deref:
248          mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
249          break;
250       case nir_tex_src_sampler_deref:
251          mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
252          break;
253       default:
254          break;
255       }
256    }
257 }
258 
259 static void
gather_info_block(const nir_shader * nir,const nir_block * block,struct radv_shader_info * info,bool consider_force_vrs)260 gather_info_block(const nir_shader *nir, const nir_block *block, struct radv_shader_info *info, bool consider_force_vrs)
261 {
262    nir_foreach_instr (instr, block) {
263       switch (instr->type) {
264       case nir_instr_type_intrinsic:
265          gather_intrinsic_info(nir, nir_instr_as_intrinsic(instr), info, consider_force_vrs);
266          break;
267       case nir_instr_type_tex:
268          gather_tex_info(nir, nir_instr_as_tex(instr), info);
269          break;
270       default:
271          break;
272       }
273    }
274 }
275 
276 static void
mark_16bit_ps_input(struct radv_shader_info * info,const struct glsl_type * type,int location)277 mark_16bit_ps_input(struct radv_shader_info *info, const struct glsl_type *type, int location)
278 {
279    if (glsl_type_is_scalar(type) || glsl_type_is_vector(type) || glsl_type_is_matrix(type)) {
280       unsigned attrib_count = glsl_count_attribute_slots(type, false);
281       if (glsl_type_is_16bit(type)) {
282          info->ps.float16_shaded_mask |= ((1ull << attrib_count) - 1) << location;
283       }
284    } else if (glsl_type_is_array(type)) {
285       unsigned stride = glsl_count_attribute_slots(glsl_get_array_element(type), false);
286       for (unsigned i = 0; i < glsl_get_length(type); ++i) {
287          mark_16bit_ps_input(info, glsl_get_array_element(type), location + i * stride);
288       }
289    } else {
290       assert(glsl_type_is_struct_or_ifc(type));
291       for (unsigned i = 0; i < glsl_get_length(type); i++) {
292          mark_16bit_ps_input(info, glsl_get_struct_field(type, i), location);
293          location += glsl_count_attribute_slots(glsl_get_struct_field(type, i), false);
294       }
295    }
296 }
297 
298 static void
gather_xfb_info(const nir_shader * nir,struct radv_shader_info * info)299 gather_xfb_info(const nir_shader *nir, struct radv_shader_info *info)
300 {
301    struct radv_streamout_info *so = &info->so;
302 
303    if (!nir->xfb_info)
304       return;
305 
306    const nir_xfb_info *xfb = nir->xfb_info;
307    assert(xfb->output_count <= MAX_SO_OUTPUTS);
308    so->num_outputs = xfb->output_count;
309 
310    for (unsigned i = 0; i < xfb->output_count; i++) {
311       unsigned output_buffer = xfb->outputs[i].buffer;
312       unsigned stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
313       so->enabled_stream_buffers_mask |= (1 << output_buffer) << (stream * 4);
314    }
315 
316    for (unsigned i = 0; i < NIR_MAX_XFB_BUFFERS; i++) {
317       so->strides[i] = xfb->buffers[i].stride / 4;
318    }
319 }
320 
321 static void
assign_outinfo_param(struct radv_vs_output_info * outinfo,gl_varying_slot idx,unsigned * total_param_exports,unsigned extra_offset)322 assign_outinfo_param(struct radv_vs_output_info *outinfo, gl_varying_slot idx, unsigned *total_param_exports,
323                      unsigned extra_offset)
324 {
325    if (outinfo->vs_output_param_offset[idx] == AC_EXP_PARAM_UNDEFINED)
326       outinfo->vs_output_param_offset[idx] = extra_offset + (*total_param_exports)++;
327 }
328 
329 static void
assign_outinfo_params(struct radv_vs_output_info * outinfo,uint64_t mask,unsigned * total_param_exports,unsigned extra_offset)330 assign_outinfo_params(struct radv_vs_output_info *outinfo, uint64_t mask, unsigned *total_param_exports,
331                       unsigned extra_offset)
332 {
333    u_foreach_bit64 (idx, mask) {
334       if (idx >= VARYING_SLOT_VAR0 || idx == VARYING_SLOT_LAYER || idx == VARYING_SLOT_PRIMITIVE_ID ||
335           idx == VARYING_SLOT_VIEWPORT)
336          assign_outinfo_param(outinfo, idx, total_param_exports, extra_offset);
337    }
338 }
339 
340 static uint8_t
radv_get_wave_size(struct radv_device * device,gl_shader_stage stage,const struct radv_shader_info * info,const struct radv_shader_stage_key * stage_key)341 radv_get_wave_size(struct radv_device *device, gl_shader_stage stage, const struct radv_shader_info *info,
342                    const struct radv_shader_stage_key *stage_key)
343 {
344    if (stage_key->subgroup_required_size)
345       return stage_key->subgroup_required_size * 32;
346 
347    if (stage == MESA_SHADER_GEOMETRY && !info->is_ngg)
348       return 64;
349    else if (stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_TASK)
350       return info->wave_size;
351    else if (stage == MESA_SHADER_FRAGMENT)
352       return device->physical_device->ps_wave_size;
353    else if (gl_shader_stage_is_rt(stage))
354       return device->physical_device->rt_wave_size;
355    else
356       return device->physical_device->ge_wave_size;
357 }
358 
359 static uint8_t
radv_get_ballot_bit_size(struct radv_device * device,gl_shader_stage stage,const struct radv_shader_info * info,const struct radv_shader_stage_key * stage_key)360 radv_get_ballot_bit_size(struct radv_device *device, gl_shader_stage stage, const struct radv_shader_info *info,
361                          const struct radv_shader_stage_key *stage_key)
362 {
363    if (stage_key->subgroup_required_size)
364       return stage_key->subgroup_required_size * 32;
365 
366    return 64;
367 }
368 
369 static uint32_t
radv_compute_esgs_itemsize(const struct radv_device * device,uint32_t num_varyings)370 radv_compute_esgs_itemsize(const struct radv_device *device, uint32_t num_varyings)
371 {
372    uint32_t esgs_itemsize;
373 
374    esgs_itemsize = num_varyings * 16;
375 
376    /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
377     * conflicts, i.e. each vertex will start on a different bank.
378     */
379    if (device->physical_device->rad_info.gfx_level >= GFX9 && esgs_itemsize)
380       esgs_itemsize += 4;
381 
382    return esgs_itemsize;
383 }
384 
385 static void
gather_info_input_decl_vs(const nir_shader * nir,unsigned location,const struct glsl_type * type,const struct radv_graphics_state_key * gfx_state,struct radv_shader_info * info)386 gather_info_input_decl_vs(const nir_shader *nir, unsigned location, const struct glsl_type *type,
387                           const struct radv_graphics_state_key *gfx_state, struct radv_shader_info *info)
388 {
389    if (glsl_type_is_scalar(type) || glsl_type_is_vector(type)) {
390       if (gfx_state->vi.instance_rate_inputs & BITFIELD_BIT(location)) {
391          info->vs.needs_instance_id = true;
392          info->vs.needs_base_instance = true;
393       }
394 
395       if (info->vs.use_per_attribute_vb_descs)
396          info->vs.vb_desc_usage_mask |= BITFIELD_BIT(location);
397       else
398          info->vs.vb_desc_usage_mask |= BITFIELD_BIT(gfx_state->vi.vertex_attribute_bindings[location]);
399 
400       info->vs.input_slot_usage_mask |= BITFIELD_RANGE(location, glsl_count_attribute_slots(type, false));
401    } else if (glsl_type_is_matrix(type) || glsl_type_is_array(type)) {
402       const struct glsl_type *elem = glsl_get_array_element(type);
403       unsigned stride = glsl_count_attribute_slots(elem, false);
404 
405       for (unsigned i = 0; i < glsl_get_length(type); ++i)
406          gather_info_input_decl_vs(nir, location + i * stride, elem, gfx_state, info);
407    } else {
408       assert(glsl_type_is_struct_or_ifc(type));
409 
410       for (unsigned i = 0; i < glsl_get_length(type); i++) {
411          const struct glsl_type *field = glsl_get_struct_field(type, i);
412          gather_info_input_decl_vs(nir, location, field, gfx_state, info);
413          location += glsl_count_attribute_slots(field, false);
414       }
415    }
416 }
417 
418 static void
gather_shader_info_ngg_query(struct radv_device * device,struct radv_shader_info * info)419 gather_shader_info_ngg_query(struct radv_device *device, struct radv_shader_info *info)
420 {
421    info->has_xfb_query = info->so.num_outputs > 0;
422    info->has_prim_query = device->cache_key.primitives_generated_query || info->has_xfb_query;
423 }
424 
425 static void
gather_shader_info_vs(struct radv_device * device,const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)426 gather_shader_info_vs(struct radv_device *device, const nir_shader *nir,
427                       const struct radv_graphics_state_key *gfx_state, const struct radv_shader_stage_key *stage_key,
428                       struct radv_shader_info *info)
429 {
430    if (gfx_state->vs.has_prolog && nir->info.inputs_read) {
431       info->vs.has_prolog = true;
432       info->vs.dynamic_inputs = true;
433    }
434 
435    /* Use per-attribute vertex descriptors to prevent faults and for correct bounds checking. */
436    info->vs.use_per_attribute_vb_descs = stage_key->vertex_robustness1 || info->vs.dynamic_inputs;
437 
438    /* We have to ensure consistent input register assignments between the main shader and the
439     * prolog.
440     */
441    info->vs.needs_instance_id |= info->vs.has_prolog;
442    info->vs.needs_base_instance |= info->vs.has_prolog;
443    info->vs.needs_draw_id |= info->vs.has_prolog;
444 
445    nir_foreach_shader_in_variable (var, nir)
446       gather_info_input_decl_vs(nir, var->data.location - VERT_ATTRIB_GENERIC0, var->type, gfx_state, info);
447 
448    if (info->vs.dynamic_inputs)
449       info->vs.vb_desc_usage_mask = BITFIELD_MASK(util_last_bit(info->vs.vb_desc_usage_mask));
450 
451    /* When the topology is unknown (with GPL), the number of vertices per primitive needs be passed
452     * through a user SGPR for NGG streamout with VS. Otherwise, the XFB offset is incorrectly
453     * computed because using the maximum number of vertices can't work.
454     */
455    info->vs.dynamic_num_verts_per_prim = gfx_state->ia.topology == V_008958_DI_PT_NONE && info->is_ngg && nir->xfb_info;
456 
457    if (!info->outputs_linked)
458       info->vs.num_linked_outputs = util_last_bit64(nir->info.outputs_written);
459 
460    if (info->next_stage == MESA_SHADER_TESS_CTRL) {
461       info->vs.as_ls = true;
462    } else if (info->next_stage == MESA_SHADER_GEOMETRY) {
463       info->vs.as_es = true;
464       info->esgs_itemsize = radv_compute_esgs_itemsize(device, info->vs.num_linked_outputs);
465    }
466 
467    if (info->is_ngg) {
468       info->vs.num_outputs = nir->num_outputs;
469 
470       if (info->next_stage == MESA_SHADER_FRAGMENT || info->next_stage == MESA_SHADER_NONE) {
471          gather_shader_info_ngg_query(device, info);
472       }
473    }
474 }
475 
476 static void
gather_shader_info_tcs(struct radv_device * device,const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,struct radv_shader_info * info)477 gather_shader_info_tcs(struct radv_device *device, const nir_shader *nir,
478                        const struct radv_graphics_state_key *gfx_state, struct radv_shader_info *info)
479 {
480    info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
481    info->tcs.tes_inputs_read = ~0ULL;
482    info->tcs.tes_patch_inputs_read = ~0ULL;
483 
484    if (!info->inputs_linked)
485       info->tcs.num_linked_inputs = util_last_bit64(nir->info.inputs_read);
486    if (!info->outputs_linked) {
487       info->tcs.num_linked_outputs = util_last_bit64(nir->info.outputs_written);
488       info->tcs.num_linked_patch_outputs = util_last_bit64(nir->info.patch_outputs_written);
489    }
490 
491    if (gfx_state->ts.patch_control_points) {
492       /* Number of tessellation patches per workgroup processed by the current pipeline. */
493       info->num_tess_patches =
494          get_tcs_num_patches(gfx_state->ts.patch_control_points, nir->info.tess.tcs_vertices_out,
495                              info->tcs.num_linked_inputs, info->tcs.num_linked_outputs,
496                              info->tcs.num_linked_patch_outputs, device->physical_device->hs.tess_offchip_block_dw_size,
497                              device->physical_device->rad_info.gfx_level, device->physical_device->rad_info.family);
498 
499       /* LDS size used by VS+TCS for storing TCS inputs and outputs. */
500       info->tcs.num_lds_blocks =
501          calculate_tess_lds_size(device->physical_device->rad_info.gfx_level, gfx_state->ts.patch_control_points,
502                                  nir->info.tess.tcs_vertices_out, info->tcs.num_linked_inputs, info->num_tess_patches,
503                                  info->tcs.num_linked_outputs, info->tcs.num_linked_patch_outputs);
504    }
505 
506    /* By default, assume a TCS needs an epilog unless it's linked with a TES. */
507    info->has_epilog = true;
508 }
509 
510 static void
gather_shader_info_tes(struct radv_device * device,const nir_shader * nir,struct radv_shader_info * info)511 gather_shader_info_tes(struct radv_device *device, const nir_shader *nir, struct radv_shader_info *info)
512 {
513    info->tes._primitive_mode = nir->info.tess._primitive_mode;
514    info->tes.spacing = nir->info.tess.spacing;
515    info->tes.ccw = nir->info.tess.ccw;
516    info->tes.point_mode = nir->info.tess.point_mode;
517    info->tes.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
518    info->tes.reads_tess_factors =
519       !!(nir->info.inputs_read & (VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER));
520 
521    if (!info->inputs_linked)
522       info->tes.num_linked_inputs = util_last_bit64(nir->info.inputs_read);
523    if (!info->outputs_linked)
524       info->tes.num_linked_outputs = util_last_bit64(nir->info.outputs_written);
525 
526    if (info->next_stage == MESA_SHADER_GEOMETRY) {
527       info->tes.as_es = true;
528       info->esgs_itemsize = radv_compute_esgs_itemsize(device, info->tes.num_linked_outputs);
529    }
530 
531    if (info->is_ngg) {
532       info->tes.num_outputs = nir->num_outputs;
533 
534       if (info->next_stage == MESA_SHADER_FRAGMENT || info->next_stage == MESA_SHADER_NONE) {
535          gather_shader_info_ngg_query(device, info);
536       }
537    }
538 }
539 
540 static void
radv_init_legacy_gs_ring_info(const struct radv_device * device,struct radv_shader_info * gs_info)541 radv_init_legacy_gs_ring_info(const struct radv_device *device, struct radv_shader_info *gs_info)
542 {
543    const struct radv_physical_device *pdevice = device->physical_device;
544    struct radv_legacy_gs_info *gs_ring_info = &gs_info->gs_ring_info;
545    unsigned num_se = pdevice->rad_info.max_se;
546    unsigned wave_size = 64;
547    unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
548    /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
549     * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
550     */
551    unsigned gs_vertex_reuse = (pdevice->rad_info.gfx_level >= GFX8 ? 32 : 16) * num_se;
552    unsigned alignment = 256 * num_se;
553    /* The maximum size is 63.999 MB per SE. */
554    unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
555 
556    /* Calculate the minimum size. */
557    unsigned min_esgs_ring_size =
558       align(gs_ring_info->vgt_esgs_ring_itemsize * 4 * gs_vertex_reuse * wave_size, alignment);
559    /* These are recommended sizes, not minimum sizes. */
560    unsigned esgs_ring_size =
561       max_gs_waves * 2 * wave_size * gs_ring_info->vgt_esgs_ring_itemsize * 4 * gs_info->gs.vertices_in;
562    unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size * gs_info->gs.max_gsvs_emit_size;
563 
564    min_esgs_ring_size = align(min_esgs_ring_size, alignment);
565    esgs_ring_size = align(esgs_ring_size, alignment);
566    gsvs_ring_size = align(gsvs_ring_size, alignment);
567 
568    if (pdevice->rad_info.gfx_level <= GFX8)
569       gs_ring_info->esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
570 
571    gs_ring_info->gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
572 }
573 
574 static void
radv_get_legacy_gs_info(const struct radv_device * device,struct radv_shader_info * gs_info)575 radv_get_legacy_gs_info(const struct radv_device *device, struct radv_shader_info *gs_info)
576 {
577    struct radv_legacy_gs_info *out = &gs_info->gs_ring_info;
578    const unsigned gs_num_invocations = MAX2(gs_info->gs.invocations, 1);
579    const bool uses_adjacency =
580       gs_info->gs.input_prim == MESA_PRIM_LINES_ADJACENCY || gs_info->gs.input_prim == MESA_PRIM_TRIANGLES_ADJACENCY;
581 
582    /* All these are in dwords: */
583    /* We can't allow using the whole LDS, because GS waves compete with
584     * other shader stages for LDS space. */
585    const unsigned max_lds_size = 8 * 1024;
586    const unsigned esgs_itemsize = radv_compute_esgs_itemsize(device, gs_info->gs.num_linked_inputs) / 4;
587    unsigned esgs_lds_size;
588 
589    /* All these are per subgroup: */
590    const unsigned max_out_prims = 32 * 1024;
591    const unsigned max_es_verts = 255;
592    const unsigned ideal_gs_prims = 64;
593    unsigned max_gs_prims, gs_prims;
594    unsigned min_es_verts, es_verts, worst_case_es_verts;
595 
596    if (uses_adjacency || gs_num_invocations > 1)
597       max_gs_prims = 127 / gs_num_invocations;
598    else
599       max_gs_prims = 255;
600 
601    /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
602     * Make sure we don't go over the maximum value.
603     */
604    if (gs_info->gs.vertices_out > 0) {
605       max_gs_prims = MIN2(max_gs_prims, max_out_prims / (gs_info->gs.vertices_out * gs_num_invocations));
606    }
607    assert(max_gs_prims > 0);
608 
609    /* If the primitive has adjacency, halve the number of vertices
610     * that will be reused in multiple primitives.
611     */
612    min_es_verts = gs_info->gs.vertices_in / (uses_adjacency ? 2 : 1);
613 
614    gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
615    worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
616 
617    /* Compute ESGS LDS size based on the worst case number of ES vertices
618     * needed to create the target number of GS prims per subgroup.
619     */
620    esgs_lds_size = esgs_itemsize * worst_case_es_verts;
621 
622    /* If total LDS usage is too big, refactor partitions based on ratio
623     * of ESGS item sizes.
624     */
625    if (esgs_lds_size > max_lds_size) {
626       /* Our target GS Prims Per Subgroup was too large. Calculate
627        * the maximum number of GS Prims Per Subgroup that will fit
628        * into LDS, capped by the maximum that the hardware can support.
629        */
630       gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)), max_gs_prims);
631       assert(gs_prims > 0);
632       worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
633 
634       esgs_lds_size = esgs_itemsize * worst_case_es_verts;
635       assert(esgs_lds_size <= max_lds_size);
636    }
637 
638    /* Now calculate remaining ESGS information. */
639    if (esgs_lds_size)
640       es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
641    else
642       es_verts = max_es_verts;
643 
644    /* Vertices for adjacency primitives are not always reused, so restore
645     * it for ES_VERTS_PER_SUBGRP.
646     */
647    min_es_verts = gs_info->gs.vertices_in;
648 
649    /* For normal primitives, the VGT only checks if they are past the ES
650     * verts per subgroup after allocating a full GS primitive and if they
651     * are, kick off a new subgroup.  But if those additional ES verts are
652     * unique (e.g. not reused) we need to make sure there is enough LDS
653     * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
654     */
655    es_verts -= min_es_verts - 1;
656 
657    const uint32_t es_verts_per_subgroup = es_verts;
658    const uint32_t gs_prims_per_subgroup = gs_prims;
659    const uint32_t gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
660    const uint32_t max_prims_per_subgroup = gs_inst_prims_in_subgroup * gs_info->gs.vertices_out;
661    const uint32_t lds_granularity = device->physical_device->rad_info.lds_encode_granularity;
662    const uint32_t total_lds_bytes = align(esgs_lds_size * 4, lds_granularity);
663    out->lds_size = total_lds_bytes / lds_granularity;
664    out->vgt_gs_onchip_cntl = S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup) |
665                              S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup) |
666                              S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup);
667    out->vgt_gs_max_prims_per_subgroup = S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup);
668    out->vgt_esgs_ring_itemsize = esgs_itemsize;
669    assert(max_prims_per_subgroup <= max_out_prims);
670 
671    radv_init_legacy_gs_ring_info(device, gs_info);
672 }
673 
674 static void
gather_shader_info_gs(struct radv_device * device,const nir_shader * nir,struct radv_shader_info * info)675 gather_shader_info_gs(struct radv_device *device, const nir_shader *nir, struct radv_shader_info *info)
676 {
677    unsigned add_clip = nir->info.clip_distance_array_size + nir->info.cull_distance_array_size > 4;
678    info->gs.gsvs_vertex_size = (util_bitcount64(nir->info.outputs_written) + add_clip) * 16;
679    info->gs.max_gsvs_emit_size = info->gs.gsvs_vertex_size * nir->info.gs.vertices_out;
680 
681    info->gs.vertices_in = nir->info.gs.vertices_in;
682    info->gs.vertices_out = nir->info.gs.vertices_out;
683    info->gs.input_prim = nir->info.gs.input_primitive;
684    info->gs.output_prim = nir->info.gs.output_primitive;
685    info->gs.invocations = nir->info.gs.invocations;
686    info->gs.max_stream = nir->info.gs.active_stream_mask ? util_last_bit(nir->info.gs.active_stream_mask) - 1 : 0;
687 
688    nir_foreach_shader_out_variable (var, nir) {
689       unsigned num_components = glsl_get_component_slots(var->type);
690       unsigned stream = var->data.stream;
691 
692       assert(stream < 4);
693 
694       info->gs.num_stream_output_components[stream] += num_components;
695    }
696 
697    info->gs.has_pipeline_stat_query = device->physical_device->emulate_ngg_gs_query_pipeline_stat;
698 
699    if (!info->inputs_linked)
700       info->gs.num_linked_inputs = util_last_bit64(nir->info.inputs_read);
701 
702    if (info->is_ngg) {
703       gather_shader_info_ngg_query(device, info);
704    } else {
705       radv_get_legacy_gs_info(device, info);
706    }
707 }
708 
709 static void
gather_shader_info_mesh(struct radv_device * device,const nir_shader * nir,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)710 gather_shader_info_mesh(struct radv_device *device, const nir_shader *nir,
711                         const struct radv_shader_stage_key *stage_key, struct radv_shader_info *info)
712 {
713    struct gfx10_ngg_info *ngg_info = &info->ngg_info;
714 
715    info->ms.output_prim = nir->info.mesh.primitive_type;
716 
717    /* Special case for mesh shader workgroups.
718     *
719     * Mesh shaders don't have any real vertex input, but they can produce
720     * an arbitrary number of vertices and primitives (up to 256).
721     * We need to precisely control the number of mesh shader workgroups
722     * that are launched from draw calls.
723     *
724     * To achieve that, we set:
725     * - input primitive topology to point list
726     * - input vertex and primitive count to 1
727     * - max output vertex count and primitive amplification factor
728     *   to the boundaries of the shader
729     *
730     * With that, in the draw call:
731     * - drawing 1 input vertex ~ launching 1 mesh shader workgroup
732     *
733     * In the shader:
734     * - input vertex id ~ workgroup id (in 1D - shader needs to calculate in 3D)
735     *
736     * Notes:
737     * - without GS_EN=1 PRIM_AMP_FACTOR and MAX_VERTS_PER_SUBGROUP don't seem to work
738     * - with GS_EN=1 we must also set VGT_GS_MAX_VERT_OUT (otherwise the GPU hangs)
739     * - with GS_FAST_LAUNCH=1 every lane's VGPRs are initialized to the same input vertex index
740     *
741     */
742    ngg_info->esgs_ring_size = 1;
743    ngg_info->hw_max_esverts = 1;
744    ngg_info->max_gsprims = 1;
745    ngg_info->max_out_verts = nir->info.mesh.max_vertices_out;
746    ngg_info->max_vert_out_per_gs_instance = false;
747    ngg_info->ngg_emit_size = 0;
748    ngg_info->prim_amp_factor = nir->info.mesh.max_primitives_out;
749    ngg_info->vgt_esgs_ring_itemsize = 1;
750 
751    info->ms.has_query = device->cache_key.mesh_shader_queries;
752    info->ms.has_task = stage_key->has_task_shader;
753 }
754 
755 static void
calc_mesh_workgroup_size(const struct radv_device * device,const nir_shader * nir,struct radv_shader_info * info)756 calc_mesh_workgroup_size(const struct radv_device *device, const nir_shader *nir, struct radv_shader_info *info)
757 {
758    unsigned api_workgroup_size = ac_compute_cs_workgroup_size(nir->info.workgroup_size, false, UINT32_MAX);
759 
760    if (device->physical_device->mesh_fast_launch_2) {
761       /* Use multi-row export. It is also necessary to use the API workgroup size for non-emulated queries. */
762       info->workgroup_size = api_workgroup_size;
763    } else {
764       struct gfx10_ngg_info *ngg_info = &info->ngg_info;
765       unsigned min_ngg_workgroup_size = ac_compute_ngg_workgroup_size(
766          ngg_info->hw_max_esverts, ngg_info->max_gsprims, ngg_info->max_out_verts, ngg_info->prim_amp_factor);
767 
768       info->workgroup_size = MAX2(min_ngg_workgroup_size, api_workgroup_size);
769    }
770 }
771 
772 static void
gather_shader_info_fs(const struct radv_device * device,const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,struct radv_shader_info * info)773 gather_shader_info_fs(const struct radv_device *device, const nir_shader *nir,
774                       const struct radv_graphics_state_key *gfx_state, struct radv_shader_info *info)
775 {
776    uint64_t per_primitive_input_mask = nir->info.inputs_read & nir->info.per_primitive_inputs;
777    unsigned num_per_primitive_inputs = util_bitcount64(per_primitive_input_mask);
778    assert(num_per_primitive_inputs <= nir->num_inputs);
779 
780    info->ps.num_interp = nir->num_inputs;
781    info->ps.num_prim_interp = 0;
782 
783    if (device->physical_device->rad_info.gfx_level == GFX10_3) {
784       /* GFX10.3 distinguishes NUM_INTERP and NUM_PRIM_INTERP, but
785        * these are counted together in NUM_INTERP on GFX11.
786        */
787       info->ps.num_interp = nir->num_inputs - num_per_primitive_inputs;
788       info->ps.num_prim_interp = num_per_primitive_inputs;
789    }
790 
791    info->ps.can_discard = nir->info.fs.uses_discard;
792    info->ps.early_fragment_test =
793       nir->info.fs.early_fragment_tests ||
794       (nir->info.fs.early_and_late_fragment_tests && nir->info.fs.depth_layout == FRAG_DEPTH_LAYOUT_NONE &&
795        nir->info.fs.stencil_front_layout == FRAG_STENCIL_LAYOUT_NONE &&
796        nir->info.fs.stencil_back_layout == FRAG_STENCIL_LAYOUT_NONE);
797    info->ps.post_depth_coverage = nir->info.fs.post_depth_coverage;
798    info->ps.depth_layout = nir->info.fs.depth_layout;
799    info->ps.uses_sample_shading = nir->info.fs.uses_sample_shading;
800    info->ps.writes_memory = nir->info.writes_memory;
801    info->ps.has_pcoord = nir->info.inputs_read & VARYING_BIT_PNTC;
802    info->ps.prim_id_input = nir->info.inputs_read & VARYING_BIT_PRIMITIVE_ID;
803    info->ps.layer_input = nir->info.inputs_read & VARYING_BIT_LAYER;
804    info->ps.viewport_index_input = nir->info.inputs_read & VARYING_BIT_VIEWPORT;
805    info->ps.writes_z = nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH);
806    info->ps.writes_stencil = nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
807    info->ps.writes_sample_mask = nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
808    info->ps.reads_sample_mask_in = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
809    info->ps.reads_sample_id = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID);
810    info->ps.reads_frag_shading_rate = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRAG_SHADING_RATE);
811    info->ps.reads_front_face = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRONT_FACE);
812    info->ps.reads_barycentric_model = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL);
813    info->ps.reads_fully_covered = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FULLY_COVERED);
814 
815    bool uses_persp_or_linear_interp = info->ps.reads_persp_center || info->ps.reads_persp_centroid ||
816                                       info->ps.reads_persp_sample || info->ps.reads_linear_center ||
817                                       info->ps.reads_linear_centroid || info->ps.reads_linear_sample;
818 
819    info->ps.allow_flat_shading =
820       !(uses_persp_or_linear_interp || info->ps.needs_sample_positions || info->ps.reads_frag_shading_rate ||
821         info->ps.writes_memory || nir->info.fs.needs_quad_helper_invocations ||
822         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRAG_COORD) ||
823         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_POINT_COORD) ||
824         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
825         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS) ||
826         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN) ||
827         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_HELPER_INVOCATION));
828 
829    info->ps.pops_is_per_sample =
830       info->ps.pops && (nir->info.fs.sample_interlock_ordered || nir->info.fs.sample_interlock_unordered);
831 
832    info->ps.spi_ps_input = radv_compute_spi_ps_input(gfx_state, info);
833 
834    info->has_epilog = gfx_state->ps.has_epilog && info->ps.colors_written;
835 
836    if (!info->has_epilog) {
837       info->ps.mrt0_is_dual_src = gfx_state->ps.epilog.mrt0_is_dual_src;
838       info->ps.spi_shader_col_format = gfx_state->ps.epilog.spi_shader_col_format;
839    }
840 
841    const bool export_alpha_and_mrtz =
842       (info->ps.color0_written & 0x8) && (info->ps.writes_z || info->ps.writes_stencil || info->ps.writes_sample_mask);
843 
844    info->ps.exports_mrtz_via_epilog =
845       info->has_epilog && gfx_state->ps.exports_mrtz_via_epilog && export_alpha_and_mrtz;
846 
847    if (!info->ps.exports_mrtz_via_epilog) {
848       info->ps.writes_mrt0_alpha = gfx_state->ms.alpha_to_coverage_via_mrtz && export_alpha_and_mrtz;
849    }
850 
851    nir_foreach_shader_in_variable (var, nir) {
852       const struct glsl_type *type = var->data.per_vertex ? glsl_get_array_element(var->type) : var->type;
853       unsigned attrib_count = glsl_count_attribute_slots(type, false);
854       int idx = var->data.location;
855 
856       switch (idx) {
857       case VARYING_SLOT_CLIP_DIST0:
858       case VARYING_SLOT_CLIP_DIST1:
859          info->ps.num_input_clips_culls += attrib_count;
860          break;
861       default:
862          break;
863       }
864 
865       if (var->data.compact) {
866          unsigned component_count = var->data.location_frac + glsl_get_length(var->type);
867          attrib_count = (component_count + 3) / 4;
868       } else {
869          mark_16bit_ps_input(info, type, var->data.driver_location);
870       }
871 
872       uint64_t mask = ((1ull << attrib_count) - 1);
873 
874       if (!var->data.per_primitive) {
875          if (var->data.interpolation == INTERP_MODE_FLAT)
876             info->ps.flat_shaded_mask |= mask << var->data.driver_location;
877          else if (var->data.interpolation == INTERP_MODE_EXPLICIT)
878             info->ps.explicit_shaded_mask |= mask << var->data.driver_location;
879          else if (var->data.per_vertex)
880             info->ps.per_vertex_shaded_mask |= mask << var->data.driver_location;
881       }
882 
883       if (var->data.location >= VARYING_SLOT_VAR0) {
884          if (var->data.per_primitive)
885             info->ps.input_per_primitive_mask |= mask << (var->data.location - VARYING_SLOT_VAR0);
886          else
887             info->ps.input_mask |= mask << (var->data.location - VARYING_SLOT_VAR0);
888       }
889    }
890 
891    /* Disable VRS and use the rates from PS_ITER_SAMPLES if:
892     *
893     * - The fragment shader reads gl_SampleMaskIn because the 16-bit sample coverage mask isn't enough for MSAA8x and
894     *   2x2 coarse shading.
895     * - On GFX10.3, if the fragment shader requests a fragment interlock execution mode even if the ordered section was
896     *   optimized out, to consistently implement fragmentShadingRateWithFragmentShaderInterlock = VK_FALSE.
897     */
898    info->ps.force_sample_iter_shading_rate =
899       (info->ps.reads_sample_mask_in && !info->ps.needs_poly_line_smooth) ||
900       (device->physical_device->rad_info.gfx_level == GFX10_3 &&
901        (nir->info.fs.sample_interlock_ordered || nir->info.fs.sample_interlock_unordered ||
902         nir->info.fs.pixel_interlock_ordered || nir->info.fs.pixel_interlock_unordered));
903 
904    /* DB_SHADER_CONTROL based on other fragment shader info fields. */
905 
906    unsigned conservative_z_export = V_02880C_EXPORT_ANY_Z;
907    if (info->ps.depth_layout == FRAG_DEPTH_LAYOUT_GREATER)
908       conservative_z_export = V_02880C_EXPORT_GREATER_THAN_Z;
909    else if (info->ps.depth_layout == FRAG_DEPTH_LAYOUT_LESS)
910       conservative_z_export = V_02880C_EXPORT_LESS_THAN_Z;
911 
912    unsigned z_order =
913       info->ps.early_fragment_test || !info->ps.writes_memory ? V_02880C_EARLY_Z_THEN_LATE_Z : V_02880C_LATE_Z;
914 
915    /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled, but this appears to break Project Cars
916     * (DXVK). See https://bugs.freedesktop.org/show_bug.cgi?id=109401
917     */
918    const bool mask_export_enable = info->ps.writes_sample_mask;
919 
920    const bool disable_rbplus =
921       device->physical_device->rad_info.has_rbplus && !device->physical_device->rad_info.rbplus_allowed;
922 
923    info->ps.db_shader_control =
924       S_02880C_Z_EXPORT_ENABLE(info->ps.writes_z) | S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(info->ps.writes_stencil) |
925       S_02880C_KILL_ENABLE(info->ps.can_discard) | S_02880C_MASK_EXPORT_ENABLE(mask_export_enable) |
926       S_02880C_CONSERVATIVE_Z_EXPORT(conservative_z_export) | S_02880C_Z_ORDER(z_order) |
927       S_02880C_DEPTH_BEFORE_SHADER(info->ps.early_fragment_test) |
928       S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(info->ps.post_depth_coverage) |
929       S_02880C_EXEC_ON_HIER_FAIL(info->ps.writes_memory) | S_02880C_EXEC_ON_NOOP(info->ps.writes_memory) |
930       S_02880C_DUAL_QUAD_DISABLE(disable_rbplus) | S_02880C_PRIMITIVE_ORDERED_PIXEL_SHADER(info->ps.pops);
931 }
932 
933 static void
gather_shader_info_rt(const nir_shader * nir,struct radv_shader_info * info)934 gather_shader_info_rt(const nir_shader *nir, struct radv_shader_info *info)
935 {
936    // TODO: inline push_constants again
937    info->loads_dynamic_offsets = true;
938    info->loads_push_constants = true;
939    info->can_inline_all_push_constants = false;
940    info->inline_push_constant_mask = 0;
941    info->desc_set_used_mask = -1u;
942 }
943 
944 static void
gather_shader_info_cs(struct radv_device * device,const nir_shader * nir,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)945 gather_shader_info_cs(struct radv_device *device, const nir_shader *nir, const struct radv_shader_stage_key *stage_key,
946                       struct radv_shader_info *info)
947 {
948    unsigned default_wave_size = device->physical_device->cs_wave_size;
949    if (info->cs.uses_rt)
950       default_wave_size = device->physical_device->rt_wave_size;
951 
952    unsigned local_size = nir->info.workgroup_size[0] * nir->info.workgroup_size[1] * nir->info.workgroup_size[2];
953 
954    /* Games don't always request full subgroups when they should, which can cause bugs if cswave32
955     * is enabled. Furthermore, if cooperative matrices or subgroup info are used, we can't transparently change
956     * the subgroup size.
957     */
958    const bool require_full_subgroups =
959       stage_key->subgroup_require_full || nir->info.cs.has_cooperative_matrix ||
960       (default_wave_size == 32 && nir->info.uses_wide_subgroup_intrinsics && local_size % RADV_SUBGROUP_SIZE == 0);
961 
962    const unsigned required_subgroup_size = stage_key->subgroup_required_size * 32;
963 
964    if (required_subgroup_size) {
965       info->wave_size = required_subgroup_size;
966    } else if (require_full_subgroups) {
967       info->wave_size = RADV_SUBGROUP_SIZE;
968    } else if (device->physical_device->rad_info.gfx_level >= GFX10 && local_size <= 32) {
969       /* Use wave32 for small workgroups. */
970       info->wave_size = 32;
971    } else {
972       info->wave_size = default_wave_size;
973    }
974 
975    if (device->physical_device->rad_info.has_cs_regalloc_hang_bug) {
976       info->cs.regalloc_hang_bug = info->cs.block_size[0] * info->cs.block_size[1] * info->cs.block_size[2] > 256;
977    }
978 }
979 
980 static void
gather_shader_info_task(struct radv_device * device,const nir_shader * nir,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)981 gather_shader_info_task(struct radv_device *device, const nir_shader *nir,
982                         const struct radv_shader_stage_key *stage_key, struct radv_shader_info *info)
983 {
984    gather_shader_info_cs(device, nir, stage_key, info);
985 
986    /* Task shaders always need these for the I/O lowering even if the API shader doesn't actually
987     * use them.
988     */
989 
990    /* Needed to address the task draw/payload rings. */
991    info->cs.uses_block_id[0] = true;
992    info->cs.uses_block_id[1] = true;
993    info->cs.uses_block_id[2] = true;
994    info->cs.uses_grid_size = true;
995 
996    /* Needed for storing draw ready only on the 1st thread. */
997    info->cs.uses_local_invocation_idx = true;
998 
999    /* Task->Mesh dispatch is linear when Y = Z = 1.
1000     * GFX11 CP can optimize this case with a field in its draw packets.
1001     */
1002    info->cs.linear_taskmesh_dispatch =
1003       nir->info.mesh.ts_mesh_dispatch_dimensions[1] == 1 && nir->info.mesh.ts_mesh_dispatch_dimensions[2] == 1;
1004 
1005    info->cs.has_query = device->cache_key.mesh_shader_queries;
1006 }
1007 
1008 static uint32_t
radv_get_user_data_0(const struct radv_device * device,struct radv_shader_info * info)1009 radv_get_user_data_0(const struct radv_device *device, struct radv_shader_info *info)
1010 {
1011    const enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
1012 
1013    switch (info->stage) {
1014    case MESA_SHADER_VERTEX:
1015    case MESA_SHADER_TESS_EVAL:
1016    case MESA_SHADER_MESH:
1017       if (info->next_stage == MESA_SHADER_TESS_CTRL) {
1018          assert(info->stage == MESA_SHADER_VERTEX);
1019 
1020          if (gfx_level >= GFX10) {
1021             return R_00B430_SPI_SHADER_USER_DATA_HS_0;
1022          } else if (gfx_level == GFX9) {
1023             return R_00B430_SPI_SHADER_USER_DATA_LS_0;
1024          } else {
1025             return R_00B530_SPI_SHADER_USER_DATA_LS_0;
1026          }
1027       }
1028 
1029       if (info->next_stage == MESA_SHADER_GEOMETRY) {
1030          assert(info->stage == MESA_SHADER_VERTEX || info->stage == MESA_SHADER_TESS_EVAL);
1031 
1032          if (gfx_level >= GFX10) {
1033             return R_00B230_SPI_SHADER_USER_DATA_GS_0;
1034          } else {
1035             return R_00B330_SPI_SHADER_USER_DATA_ES_0;
1036          }
1037       }
1038 
1039       if (info->is_ngg)
1040          return R_00B230_SPI_SHADER_USER_DATA_GS_0;
1041 
1042       assert(info->stage != MESA_SHADER_MESH);
1043       return R_00B130_SPI_SHADER_USER_DATA_VS_0;
1044    case MESA_SHADER_TESS_CTRL:
1045       return gfx_level == GFX9 ? R_00B430_SPI_SHADER_USER_DATA_LS_0 : R_00B430_SPI_SHADER_USER_DATA_HS_0;
1046    case MESA_SHADER_GEOMETRY:
1047       return gfx_level == GFX9 ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B230_SPI_SHADER_USER_DATA_GS_0;
1048    case MESA_SHADER_FRAGMENT:
1049       return R_00B030_SPI_SHADER_USER_DATA_PS_0;
1050    case MESA_SHADER_COMPUTE:
1051    case MESA_SHADER_TASK:
1052    case MESA_SHADER_RAYGEN:
1053    case MESA_SHADER_CALLABLE:
1054    case MESA_SHADER_CLOSEST_HIT:
1055    case MESA_SHADER_MISS:
1056    case MESA_SHADER_INTERSECTION:
1057    case MESA_SHADER_ANY_HIT:
1058       return R_00B900_COMPUTE_USER_DATA_0;
1059    default:
1060       unreachable("invalid shader stage");
1061    }
1062 }
1063 
1064 static bool
radv_is_merged_shader_compiled_separately(const struct radv_device * device,const struct radv_shader_info * info)1065 radv_is_merged_shader_compiled_separately(const struct radv_device *device, const struct radv_shader_info *info)
1066 {
1067    const enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
1068 
1069    if (gfx_level >= GFX9) {
1070       switch (info->stage) {
1071       case MESA_SHADER_VERTEX:
1072          if (info->next_stage == MESA_SHADER_TESS_CTRL || info->next_stage == MESA_SHADER_GEOMETRY)
1073             return !info->outputs_linked;
1074          break;
1075       case MESA_SHADER_TESS_EVAL:
1076          if (info->next_stage == MESA_SHADER_GEOMETRY)
1077             return !info->outputs_linked;
1078          break;
1079       case MESA_SHADER_TESS_CTRL:
1080       case MESA_SHADER_GEOMETRY:
1081          return !info->inputs_linked;
1082       default:
1083          break;
1084       }
1085    }
1086 
1087    return false;
1088 }
1089 
1090 void
radv_nir_shader_info_init(gl_shader_stage stage,gl_shader_stage next_stage,struct radv_shader_info * info)1091 radv_nir_shader_info_init(gl_shader_stage stage, gl_shader_stage next_stage, struct radv_shader_info *info)
1092 {
1093    memset(info, 0, sizeof(*info));
1094 
1095    /* Assume that shaders can inline all push constants by default. */
1096    info->can_inline_all_push_constants = true;
1097 
1098    info->stage = stage;
1099    info->next_stage = next_stage;
1100 }
1101 
1102 void
radv_nir_shader_info_pass(struct radv_device * device,const struct nir_shader * nir,const struct radv_shader_layout * layout,const struct radv_shader_stage_key * stage_key,const struct radv_graphics_state_key * gfx_state,const enum radv_pipeline_type pipeline_type,bool consider_force_vrs,struct radv_shader_info * info)1103 radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
1104                           const struct radv_shader_layout *layout, const struct radv_shader_stage_key *stage_key,
1105                           const struct radv_graphics_state_key *gfx_state, const enum radv_pipeline_type pipeline_type,
1106                           bool consider_force_vrs, struct radv_shader_info *info)
1107 {
1108    struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
1109 
1110    if (layout->use_dynamic_descriptors) {
1111       info->loads_push_constants = true;
1112       info->loads_dynamic_offsets = true;
1113    }
1114 
1115    nir_foreach_block (block, func->impl) {
1116       gather_info_block(nir, block, info, consider_force_vrs);
1117    }
1118 
1119    if (nir->info.stage == MESA_SHADER_VERTEX || nir->info.stage == MESA_SHADER_TESS_EVAL ||
1120        nir->info.stage == MESA_SHADER_GEOMETRY)
1121       gather_xfb_info(nir, info);
1122 
1123    if (nir->info.stage == MESA_SHADER_VERTEX || nir->info.stage == MESA_SHADER_TESS_EVAL ||
1124        nir->info.stage == MESA_SHADER_GEOMETRY || nir->info.stage == MESA_SHADER_MESH) {
1125       struct radv_vs_output_info *outinfo = &info->outinfo;
1126 
1127       /* These are not compiled into neither output param nor position exports. */
1128       uint64_t special_mask = BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_COUNT) |
1129                               BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_INDICES) |
1130                               BITFIELD64_BIT(VARYING_SLOT_CULL_PRIMITIVE);
1131       uint64_t per_prim_mask = nir->info.outputs_written & nir->info.per_primitive_outputs & ~special_mask;
1132       uint64_t per_vtx_mask = nir->info.outputs_written & ~nir->info.per_primitive_outputs & ~special_mask;
1133 
1134       /* Mesh multivew is only lowered in ac_nir_lower_ngg, so we have to fake it here. */
1135       if (nir->info.stage == MESA_SHADER_MESH && gfx_state->has_multiview_view_index) {
1136          per_prim_mask |= VARYING_BIT_LAYER;
1137          info->uses_view_index = true;
1138       }
1139 
1140       /* Per vertex outputs. */
1141       outinfo->writes_pointsize = per_vtx_mask & VARYING_BIT_PSIZ;
1142       outinfo->writes_viewport_index = per_vtx_mask & VARYING_BIT_VIEWPORT;
1143       outinfo->writes_layer = per_vtx_mask & VARYING_BIT_LAYER;
1144       outinfo->writes_primitive_shading_rate =
1145          (per_vtx_mask & VARYING_BIT_PRIMITIVE_SHADING_RATE) || info->force_vrs_per_vertex;
1146 
1147       /* Per primitive outputs. */
1148       outinfo->writes_viewport_index_per_primitive = per_prim_mask & VARYING_BIT_VIEWPORT;
1149       outinfo->writes_layer_per_primitive = per_prim_mask & VARYING_BIT_LAYER;
1150       outinfo->writes_primitive_shading_rate_per_primitive = per_prim_mask & VARYING_BIT_PRIMITIVE_SHADING_RATE;
1151 
1152       /* Clip/cull distances. */
1153       outinfo->clip_dist_mask = (1 << nir->info.clip_distance_array_size) - 1;
1154       outinfo->cull_dist_mask = (1 << nir->info.cull_distance_array_size) - 1;
1155       outinfo->cull_dist_mask <<= nir->info.clip_distance_array_size;
1156 
1157       int pos_written = 0x1;
1158 
1159       if (outinfo->writes_pointsize || outinfo->writes_viewport_index || outinfo->writes_layer ||
1160           outinfo->writes_primitive_shading_rate)
1161          pos_written |= 1 << 1;
1162 
1163       unsigned num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
1164       unsigned num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
1165 
1166       if (num_clip_distances + num_cull_distances > 0)
1167          pos_written |= 1 << 2;
1168       if (num_clip_distances + num_cull_distances > 4)
1169          pos_written |= 1 << 3;
1170 
1171       outinfo->pos_exports = util_bitcount(pos_written);
1172 
1173       memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED, sizeof(outinfo->vs_output_param_offset));
1174 
1175       unsigned total_param_exports = 0;
1176 
1177       /* Per-vertex outputs */
1178       assign_outinfo_params(outinfo, per_vtx_mask, &total_param_exports, 0);
1179 
1180       outinfo->param_exports = total_param_exports;
1181 
1182       /* The HW always assumes that there is at least 1 per-vertex param.
1183        * so if there aren't any, we have to offset per-primitive params by 1.
1184        */
1185       const unsigned extra_offset =
1186          !!(total_param_exports == 0 && device->physical_device->rad_info.gfx_level >= GFX11);
1187 
1188       /* Per-primitive outputs: the HW needs these to be last. */
1189       assign_outinfo_params(outinfo, per_prim_mask, &total_param_exports, extra_offset);
1190 
1191       outinfo->prim_param_exports = total_param_exports - outinfo->param_exports;
1192    }
1193 
1194    info->vs.needs_draw_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID);
1195    info->vs.needs_base_instance |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE);
1196    info->vs.needs_instance_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
1197    info->uses_view_index |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VIEW_INDEX);
1198    info->uses_invocation_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INVOCATION_ID);
1199    info->uses_prim_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID);
1200 
1201    /* Used by compute and mesh shaders. Mesh shaders must always declare this before GFX11. */
1202    info->cs.uses_grid_size =
1203       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_NUM_WORKGROUPS) ||
1204       (nir->info.stage == MESA_SHADER_MESH && device->physical_device->rad_info.gfx_level < GFX11);
1205    info->cs.uses_local_invocation_idx = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_LOCAL_INVOCATION_INDEX) |
1206                                         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SUBGROUP_ID) |
1207                                         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_NUM_SUBGROUPS) |
1208                                         radv_shader_should_clear_lds(device, nir);
1209 
1210    if (nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_TASK ||
1211        nir->info.stage == MESA_SHADER_MESH) {
1212       for (int i = 0; i < 3; ++i)
1213          info->cs.block_size[i] = nir->info.workgroup_size[i];
1214    }
1215 
1216    info->user_data_0 = radv_get_user_data_0(device, info);
1217    info->merged_shader_compiled_separately = radv_is_merged_shader_compiled_separately(device, info);
1218 
1219    switch (nir->info.stage) {
1220    case MESA_SHADER_COMPUTE:
1221       gather_shader_info_cs(device, nir, stage_key, info);
1222       break;
1223    case MESA_SHADER_TASK:
1224       gather_shader_info_task(device, nir, stage_key, info);
1225       break;
1226    case MESA_SHADER_FRAGMENT:
1227       gather_shader_info_fs(device, nir, gfx_state, info);
1228       break;
1229    case MESA_SHADER_GEOMETRY:
1230       gather_shader_info_gs(device, nir, info);
1231       break;
1232    case MESA_SHADER_TESS_EVAL:
1233       gather_shader_info_tes(device, nir, info);
1234       break;
1235    case MESA_SHADER_TESS_CTRL:
1236       gather_shader_info_tcs(device, nir, gfx_state, info);
1237       break;
1238    case MESA_SHADER_VERTEX:
1239       gather_shader_info_vs(device, nir, gfx_state, stage_key, info);
1240       break;
1241    case MESA_SHADER_MESH:
1242       gather_shader_info_mesh(device, nir, stage_key, info);
1243       break;
1244    default:
1245       if (gl_shader_stage_is_rt(nir->info.stage))
1246          gather_shader_info_rt(nir, info);
1247       break;
1248    }
1249 
1250    info->wave_size = radv_get_wave_size(device, nir->info.stage, info, stage_key);
1251    info->ballot_bit_size = radv_get_ballot_bit_size(device, nir->info.stage, info, stage_key);
1252 
1253    switch (nir->info.stage) {
1254    case MESA_SHADER_COMPUTE:
1255    case MESA_SHADER_TASK:
1256       info->workgroup_size = ac_compute_cs_workgroup_size(nir->info.workgroup_size, false, UINT32_MAX);
1257 
1258       /* Allow the compiler to assume that the shader always has full subgroups,
1259        * meaning that the initial EXEC mask is -1 in all waves (all lanes enabled).
1260        * This assumption is incorrect for ray tracing and internal (meta) shaders
1261        * because they can use unaligned dispatch.
1262        */
1263       info->cs.uses_full_subgroups = pipeline_type != RADV_PIPELINE_RAY_TRACING && !nir->info.internal &&
1264                                      (info->workgroup_size % info->wave_size) == 0;
1265       break;
1266    case MESA_SHADER_VERTEX:
1267       if (info->vs.as_ls || info->vs.as_es) {
1268          /* Set the maximum possible value by default, this will be optimized during linking if
1269           * possible.
1270           */
1271          info->workgroup_size = 256;
1272       } else {
1273          info->workgroup_size = info->wave_size;
1274       }
1275       break;
1276    case MESA_SHADER_TESS_CTRL:
1277       if (gfx_state->ts.patch_control_points) {
1278          info->workgroup_size = ac_compute_lshs_workgroup_size(
1279             device->physical_device->rad_info.gfx_level, MESA_SHADER_TESS_CTRL, info->num_tess_patches,
1280             gfx_state->ts.patch_control_points, info->tcs.tcs_vertices_out);
1281       } else {
1282          /* Set the maximum possible value when the workgroup size can't be determined. */
1283          info->workgroup_size = 256;
1284       }
1285       break;
1286    case MESA_SHADER_TESS_EVAL:
1287       if (info->tes.as_es) {
1288          /* Set the maximum possible value by default, this will be optimized during linking if
1289           * possible.
1290           */
1291          info->workgroup_size = 256;
1292       } else {
1293          info->workgroup_size = info->wave_size;
1294       }
1295       break;
1296    case MESA_SHADER_GEOMETRY:
1297       if (!info->is_ngg) {
1298          unsigned es_verts_per_subgroup = G_028A44_ES_VERTS_PER_SUBGRP(info->gs_ring_info.vgt_gs_onchip_cntl);
1299          unsigned gs_inst_prims_in_subgroup = G_028A44_GS_INST_PRIMS_IN_SUBGRP(info->gs_ring_info.vgt_gs_onchip_cntl);
1300 
1301          info->workgroup_size =
1302             ac_compute_esgs_workgroup_size(device->physical_device->rad_info.gfx_level, info->wave_size,
1303                                            es_verts_per_subgroup, gs_inst_prims_in_subgroup);
1304       } else {
1305          /* Set the maximum possible value by default, this will be optimized during linking if
1306           * possible.
1307           */
1308          info->workgroup_size = 256;
1309       }
1310       break;
1311    case MESA_SHADER_MESH:
1312       calc_mesh_workgroup_size(device, nir, info);
1313       break;
1314    default:
1315       /* FS always operates without workgroups. Other stages are computed during linking but assume
1316        * no workgroups by default.
1317        */
1318       info->workgroup_size = info->wave_size;
1319       break;
1320    }
1321 }
1322 
1323 static void
clamp_gsprims_to_esverts(unsigned * max_gsprims,unsigned max_esverts,unsigned min_verts_per_prim,bool use_adjacency)1324 clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts, unsigned min_verts_per_prim, bool use_adjacency)
1325 {
1326    unsigned max_reuse = max_esverts - min_verts_per_prim;
1327    if (use_adjacency)
1328       max_reuse /= 2;
1329    *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1330 }
1331 
1332 static unsigned
radv_get_num_input_vertices(const struct radv_shader_info * es_info,const struct radv_shader_info * gs_info)1333 radv_get_num_input_vertices(const struct radv_shader_info *es_info, const struct radv_shader_info *gs_info)
1334 {
1335    if (gs_info) {
1336       return gs_info->gs.vertices_in;
1337    }
1338 
1339    if (es_info->stage == MESA_SHADER_TESS_EVAL) {
1340       if (es_info->tes.point_mode)
1341          return 1;
1342       if (es_info->tes._primitive_mode == TESS_PRIMITIVE_ISOLINES)
1343          return 2;
1344       return 3;
1345    }
1346 
1347    return 3;
1348 }
1349 
1350 static unsigned
radv_get_pre_rast_input_topology(const struct radv_shader_info * es_info,const struct radv_shader_info * gs_info)1351 radv_get_pre_rast_input_topology(const struct radv_shader_info *es_info, const struct radv_shader_info *gs_info)
1352 {
1353    if (gs_info) {
1354       return gs_info->gs.input_prim;
1355    }
1356 
1357    if (es_info->stage == MESA_SHADER_TESS_EVAL) {
1358       if (es_info->tes.point_mode)
1359          return MESA_PRIM_POINTS;
1360       if (es_info->tes._primitive_mode == TESS_PRIMITIVE_ISOLINES)
1361          return MESA_PRIM_LINES;
1362       return MESA_PRIM_TRIANGLES;
1363    }
1364 
1365    return MESA_PRIM_TRIANGLES;
1366 }
1367 
1368 static unsigned
gfx10_get_ngg_scratch_lds_base(const struct radv_device * device,const struct radv_shader_info * es_info,const struct radv_shader_info * gs_info,const struct gfx10_ngg_info * ngg_info)1369 gfx10_get_ngg_scratch_lds_base(const struct radv_device *device, const struct radv_shader_info *es_info,
1370                                const struct radv_shader_info *gs_info, const struct gfx10_ngg_info *ngg_info)
1371 {
1372    uint32_t scratch_lds_base;
1373 
1374    if (gs_info) {
1375       const unsigned esgs_ring_lds_bytes = ngg_info->esgs_ring_size;
1376       const unsigned gs_total_out_vtx_bytes = ngg_info->ngg_emit_size * 4u;
1377 
1378       scratch_lds_base = ALIGN(esgs_ring_lds_bytes + gs_total_out_vtx_bytes, 8u /* for the repacking code */);
1379    } else {
1380       const bool uses_instanceid = es_info->vs.needs_instance_id;
1381       const bool uses_primitive_id = es_info->uses_prim_id;
1382       const bool streamout_enabled = es_info->so.num_outputs && device->physical_device->use_ngg_streamout;
1383       const uint32_t num_outputs =
1384          es_info->stage == MESA_SHADER_VERTEX ? es_info->vs.num_outputs : es_info->tes.num_outputs;
1385       unsigned pervertex_lds_bytes = ac_ngg_nogs_get_pervertex_lds_size(
1386          es_info->stage, num_outputs, streamout_enabled, es_info->outinfo.export_prim_id, false, /* user edge flag */
1387          es_info->has_ngg_culling, uses_instanceid, uses_primitive_id);
1388 
1389       assert(ngg_info->hw_max_esverts <= 256);
1390       unsigned total_es_lds_bytes = pervertex_lds_bytes * ngg_info->hw_max_esverts;
1391 
1392       scratch_lds_base = ALIGN(total_es_lds_bytes, 8u);
1393    }
1394 
1395    return scratch_lds_base;
1396 }
1397 
1398 static void
gfx10_get_ngg_info(const struct radv_device * device,struct radv_shader_info * es_info,struct radv_shader_info * gs_info,struct gfx10_ngg_info * out)1399 gfx10_get_ngg_info(const struct radv_device *device, struct radv_shader_info *es_info, struct radv_shader_info *gs_info,
1400                    struct gfx10_ngg_info *out)
1401 {
1402    const enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
1403    const unsigned max_verts_per_prim = radv_get_num_input_vertices(es_info, gs_info);
1404    const unsigned min_verts_per_prim = gs_info ? max_verts_per_prim : 1;
1405 
1406    const unsigned gs_num_invocations = gs_info ? MAX2(gs_info->gs.invocations, 1) : 1;
1407 
1408    const unsigned input_prim = radv_get_pre_rast_input_topology(es_info, gs_info);
1409    const bool uses_adjacency = input_prim == MESA_PRIM_LINES_ADJACENCY || input_prim == MESA_PRIM_TRIANGLES_ADJACENCY;
1410 
1411    /* All these are in dwords: */
1412    /* We can't allow using the whole LDS, because GS waves compete with
1413     * other shader stages for LDS space.
1414     *
1415     * TODO: We should really take the shader's internal LDS use into
1416     *       account. The linker will fail if the size is greater than
1417     *       8K dwords.
1418     */
1419    const unsigned max_lds_size = 8 * 1024 - 768;
1420    const unsigned target_lds_size = max_lds_size;
1421    unsigned esvert_lds_size = 0;
1422    unsigned gsprim_lds_size = 0;
1423 
1424    /* All these are per subgroup: */
1425    const unsigned min_esverts = gfx_level >= GFX11 ? 3 : /* gfx11 requires at least 1 primitive per TG */
1426                                    gfx_level >= GFX10_3 ? 29
1427                                                         : 24;
1428    bool max_vert_out_per_gs_instance = false;
1429    unsigned max_esverts_base = 128;
1430    unsigned max_gsprims_base = 128; /* default prim group size clamp */
1431 
1432    /* Hardware has the following non-natural restrictions on the value
1433     * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1434     * the draw:
1435     *  - at most 252 for any line input primitive type
1436     *  - at most 251 for any quad input primitive type
1437     *  - at most 251 for triangle strips with adjacency (this happens to
1438     *    be the natural limit for triangle *lists* with adjacency)
1439     */
1440    max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1441 
1442    if (gs_info) {
1443       unsigned max_out_verts_per_gsprim = gs_info->gs.vertices_out * gs_num_invocations;
1444 
1445       if (max_out_verts_per_gsprim <= 256) {
1446          if (max_out_verts_per_gsprim) {
1447             max_gsprims_base = MIN2(max_gsprims_base, 256 / max_out_verts_per_gsprim);
1448          }
1449       } else {
1450          /* Use special multi-cycling mode in which each GS
1451           * instance gets its own subgroup. Does not work with
1452           * tessellation. */
1453          max_vert_out_per_gs_instance = true;
1454          max_gsprims_base = 1;
1455          max_out_verts_per_gsprim = gs_info->gs.vertices_out;
1456       }
1457 
1458       esvert_lds_size = es_info->esgs_itemsize / 4;
1459       gsprim_lds_size = (gs_info->gs.gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1460    } else {
1461       /* VS and TES. */
1462       /* LDS size for passing data from GS to ES. */
1463       struct radv_streamout_info *so_info = &es_info->so;
1464 
1465       if (so_info->num_outputs) {
1466          /* Compute the same pervertex LDS size as the NGG streamout lowering pass which allocates
1467           * space for all outputs.
1468           * TODO: only alloc space for outputs that really need streamout.
1469           */
1470          const uint32_t num_outputs =
1471             es_info->stage == MESA_SHADER_VERTEX ? es_info->vs.num_outputs : es_info->tes.num_outputs;
1472          esvert_lds_size = 4 * num_outputs + 1;
1473       }
1474 
1475       /* GS stores Primitive IDs (one DWORD) into LDS at the address
1476        * corresponding to the ES thread of the provoking vertex. All
1477        * ES threads load and export PrimitiveID for their thread.
1478        */
1479       if (es_info->stage == MESA_SHADER_VERTEX && es_info->outinfo.export_prim_id)
1480          esvert_lds_size = MAX2(esvert_lds_size, 1);
1481    }
1482 
1483    unsigned max_gsprims = max_gsprims_base;
1484    unsigned max_esverts = max_esverts_base;
1485 
1486    if (esvert_lds_size)
1487       max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1488    if (gsprim_lds_size)
1489       max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1490 
1491    max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1492    clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
1493    assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1494 
1495    if (esvert_lds_size || gsprim_lds_size) {
1496       /* Now that we have a rough proportionality between esverts
1497        * and gsprims based on the primitive type, scale both of them
1498        * down simultaneously based on required LDS space.
1499        *
1500        * We could be smarter about this if we knew how much vertex
1501        * reuse to expect.
1502        */
1503       unsigned lds_total = max_esverts * esvert_lds_size + max_gsprims * gsprim_lds_size;
1504       if (lds_total > target_lds_size) {
1505          max_esverts = max_esverts * target_lds_size / lds_total;
1506          max_gsprims = max_gsprims * target_lds_size / lds_total;
1507 
1508          max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1509          clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
1510          assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1511       }
1512    }
1513 
1514    /* Round up towards full wave sizes for better ALU utilization. */
1515    if (!max_vert_out_per_gs_instance) {
1516       unsigned orig_max_esverts;
1517       unsigned orig_max_gsprims;
1518       unsigned wavesize;
1519 
1520       if (gs_info) {
1521          wavesize = gs_info->wave_size;
1522       } else {
1523          wavesize = es_info->wave_size;
1524       }
1525 
1526       do {
1527          orig_max_esverts = max_esverts;
1528          orig_max_gsprims = max_gsprims;
1529 
1530          max_esverts = align(max_esverts, wavesize);
1531          max_esverts = MIN2(max_esverts, max_esverts_base);
1532          if (esvert_lds_size)
1533             max_esverts = MIN2(max_esverts, (max_lds_size - max_gsprims * gsprim_lds_size) / esvert_lds_size);
1534          max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1535 
1536          /* Hardware restriction: minimum value of max_esverts */
1537          if (gfx_level == GFX10)
1538             max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
1539          else
1540             max_esverts = MAX2(max_esverts, min_esverts);
1541 
1542          max_gsprims = align(max_gsprims, wavesize);
1543          max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1544          if (gsprim_lds_size) {
1545             /* Don't count unusable vertices to the LDS
1546              * size. Those are vertices above the maximum
1547              * number of vertices that can occur in the
1548              * workgroup, which is e.g. max_gsprims * 3
1549              * for triangles.
1550              */
1551             unsigned usable_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1552             max_gsprims = MIN2(max_gsprims, (max_lds_size - usable_esverts * esvert_lds_size) / gsprim_lds_size);
1553          }
1554          clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
1555          assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1556       } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1557 
1558       /* Verify the restriction. */
1559       if (gfx_level == GFX10)
1560          assert(max_esverts >= min_esverts - 1 + max_verts_per_prim);
1561       else
1562          assert(max_esverts >= min_esverts);
1563    } else {
1564       /* Hardware restriction: minimum value of max_esverts */
1565       if (gfx_level == GFX10)
1566          max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
1567       else
1568          max_esverts = MAX2(max_esverts, min_esverts);
1569    }
1570 
1571    unsigned max_out_vertices = max_vert_out_per_gs_instance ? gs_info->gs.vertices_out
1572                                : gs_info ? max_gsprims * gs_num_invocations * gs_info->gs.vertices_out
1573                                          : max_esverts;
1574    assert(max_out_vertices <= 256);
1575 
1576    unsigned prim_amp_factor = 1;
1577    if (gs_info) {
1578       /* Number of output primitives per GS input primitive after
1579        * GS instancing. */
1580       prim_amp_factor = gs_info->gs.vertices_out;
1581    }
1582 
1583    /* On Gfx10, the GE only checks against the maximum number of ES verts
1584     * after allocating a full GS primitive. So we need to ensure that
1585     * whenever this check passes, there is enough space for a full
1586     * primitive without vertex reuse.
1587     */
1588    if (gfx_level == GFX10)
1589       out->hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1590    else
1591       out->hw_max_esverts = max_esverts;
1592 
1593    out->max_gsprims = max_gsprims;
1594    out->max_out_verts = max_out_vertices;
1595    out->prim_amp_factor = prim_amp_factor;
1596    out->max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1597    out->ngg_emit_size = max_gsprims * gsprim_lds_size;
1598 
1599    /* Don't count unusable vertices. */
1600    out->esgs_ring_size = MIN2(max_esverts, max_gsprims * max_verts_per_prim) * esvert_lds_size * 4;
1601 
1602    if (gs_info) {
1603       out->vgt_esgs_ring_itemsize = es_info->esgs_itemsize / 4;
1604    } else {
1605       out->vgt_esgs_ring_itemsize = 1;
1606    }
1607 
1608    assert(out->hw_max_esverts >= min_esverts); /* HW limitation */
1609 
1610    out->scratch_lds_base = gfx10_get_ngg_scratch_lds_base(device, es_info, gs_info, out);
1611 
1612    /* Get scratch LDS usage. */
1613    const struct radv_shader_info *info = gs_info ? gs_info : es_info;
1614    const unsigned scratch_lds_size =
1615       ac_ngg_get_scratch_lds_size(info->stage, info->workgroup_size, info->wave_size,
1616                                   device->physical_device->use_ngg_streamout, info->has_ngg_culling);
1617    out->lds_size = out->scratch_lds_base + scratch_lds_size;
1618 
1619    unsigned workgroup_size =
1620       ac_compute_ngg_workgroup_size(max_esverts, max_gsprims * gs_num_invocations, max_out_vertices, prim_amp_factor);
1621    if (gs_info) {
1622       gs_info->workgroup_size = workgroup_size;
1623    }
1624    es_info->workgroup_size = workgroup_size;
1625 }
1626 
1627 static void
radv_determine_ngg_settings(struct radv_device * device,struct radv_shader_stage * es_stage,struct radv_shader_stage * fs_stage,const struct radv_graphics_state_key * gfx_state)1628 radv_determine_ngg_settings(struct radv_device *device, struct radv_shader_stage *es_stage,
1629                             struct radv_shader_stage *fs_stage, const struct radv_graphics_state_key *gfx_state)
1630 {
1631    assert(es_stage->stage == MESA_SHADER_VERTEX || es_stage->stage == MESA_SHADER_TESS_EVAL);
1632    assert(!fs_stage || fs_stage->stage == MESA_SHADER_FRAGMENT);
1633 
1634    uint64_t ps_inputs_read = fs_stage ? fs_stage->nir->info.inputs_read : 0;
1635 
1636    unsigned num_vertices_per_prim = 0;
1637    if (es_stage->stage == MESA_SHADER_VERTEX) {
1638       num_vertices_per_prim = radv_get_num_vertices_per_prim(gfx_state);
1639    } else if (es_stage->stage == MESA_SHADER_TESS_EVAL) {
1640       num_vertices_per_prim = es_stage->nir->info.tess.point_mode                                   ? 1
1641                               : es_stage->nir->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES ? 2
1642                                                                                                     : 3;
1643    }
1644 
1645    /* TODO: Enable culling for LLVM. */
1646    es_stage->info.has_ngg_culling = radv_consider_culling(device->physical_device, es_stage->nir, ps_inputs_read,
1647                                                           num_vertices_per_prim, &es_stage->info) &&
1648                                     !radv_use_llvm_for_stage(device, es_stage->stage);
1649 
1650    nir_function_impl *impl = nir_shader_get_entrypoint(es_stage->nir);
1651    es_stage->info.has_ngg_early_prim_export = exec_list_is_singular(&impl->body);
1652 
1653    /* NGG passthrough mode should be disabled when culling and when the vertex shader
1654     * exports the primitive ID.
1655     */
1656    es_stage->info.is_ngg_passthrough = !es_stage->info.has_ngg_culling && !(es_stage->stage == MESA_SHADER_VERTEX &&
1657                                                                             es_stage->info.outinfo.export_prim_id);
1658 }
1659 
1660 static void
radv_link_shaders_info(struct radv_device * device,struct radv_shader_stage * producer,struct radv_shader_stage * consumer,const struct radv_graphics_state_key * gfx_state)1661 radv_link_shaders_info(struct radv_device *device, struct radv_shader_stage *producer,
1662                        struct radv_shader_stage *consumer, const struct radv_graphics_state_key *gfx_state)
1663 {
1664    /* Export primitive ID and clip/cull distances if read by the FS, or export unconditionally when
1665     * the next stage is unknown (with graphics pipeline library).
1666     */
1667    if (producer->info.next_stage == MESA_SHADER_FRAGMENT ||
1668        !(gfx_state->lib_flags & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)) {
1669       struct radv_vs_output_info *outinfo = &producer->info.outinfo;
1670       const bool ps_prim_id_in = !consumer || consumer->info.ps.prim_id_input;
1671       const bool ps_clip_dists_in = !consumer || !!consumer->info.ps.num_input_clips_culls;
1672 
1673       if (ps_prim_id_in && (producer->stage == MESA_SHADER_VERTEX || producer->stage == MESA_SHADER_TESS_EVAL)) {
1674          /* Mark the primitive ID as output when it's implicitly exported by VS or TES. */
1675          if (outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] == AC_EXP_PARAM_UNDEFINED)
1676             outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = outinfo->param_exports++;
1677 
1678          outinfo->export_prim_id = true;
1679       }
1680 
1681       if (ps_clip_dists_in) {
1682          if (producer->nir->info.outputs_written & VARYING_BIT_CLIP_DIST0)
1683             outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0] = outinfo->param_exports++;
1684          if (producer->nir->info.outputs_written & VARYING_BIT_CLIP_DIST1)
1685             outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1] = outinfo->param_exports++;
1686       }
1687    }
1688 
1689    if (producer->stage == MESA_SHADER_VERTEX || producer->stage == MESA_SHADER_TESS_EVAL) {
1690       /* Compute NGG info (GFX10+) or GS info. */
1691       if (producer->info.is_ngg) {
1692          struct radv_shader_stage *gs_stage = consumer && consumer->stage == MESA_SHADER_GEOMETRY ? consumer : NULL;
1693          struct gfx10_ngg_info *out = gs_stage ? &gs_stage->info.ngg_info : &producer->info.ngg_info;
1694 
1695          /* Determine other NGG settings like culling for VS or TES without GS. */
1696          if (!gs_stage) {
1697             radv_determine_ngg_settings(device, producer, consumer, gfx_state);
1698          }
1699 
1700          gfx10_get_ngg_info(device, &producer->info, gs_stage ? &gs_stage->info : NULL, out);
1701       } else if (consumer && consumer->stage == MESA_SHADER_GEOMETRY) {
1702          struct radv_shader_info *gs_info = &consumer->info;
1703          struct radv_shader_info *es_info = &producer->info;
1704 
1705          es_info->workgroup_size = gs_info->workgroup_size;
1706       }
1707    }
1708 
1709    if (producer->stage == MESA_SHADER_VERTEX && consumer && consumer->stage == MESA_SHADER_TESS_CTRL) {
1710       struct radv_shader_stage *vs_stage = producer;
1711       struct radv_shader_stage *tcs_stage = consumer;
1712 
1713       if (gfx_state->ts.patch_control_points) {
1714          vs_stage->info.workgroup_size = ac_compute_lshs_workgroup_size(
1715             device->physical_device->rad_info.gfx_level, MESA_SHADER_VERTEX, tcs_stage->info.num_tess_patches,
1716             gfx_state->ts.patch_control_points, tcs_stage->info.tcs.tcs_vertices_out);
1717 
1718          if (!radv_use_llvm_for_stage(device, MESA_SHADER_VERTEX)) {
1719             /* When the number of TCS input and output vertices are the same (typically 3):
1720              * - There is an equal amount of LS and HS invocations
1721              * - In case of merged LSHS shaders, the LS and HS halves of the shader always process
1722              *   the exact same vertex. We can use this knowledge to optimize them.
1723              *
1724              * We don't set tcs_in_out_eq if the float controls differ because that might involve
1725              * different float modes for the same block and our optimizer doesn't handle a
1726              * instruction dominating another with a different mode.
1727              */
1728             vs_stage->info.vs.tcs_in_out_eq =
1729                device->physical_device->rad_info.gfx_level >= GFX9 &&
1730                gfx_state->ts.patch_control_points == tcs_stage->info.tcs.tcs_vertices_out &&
1731                vs_stage->nir->info.float_controls_execution_mode == tcs_stage->nir->info.float_controls_execution_mode;
1732 
1733             if (vs_stage->info.vs.tcs_in_out_eq)
1734                vs_stage->info.vs.tcs_temp_only_input_mask =
1735                   tcs_stage->nir->info.inputs_read & vs_stage->nir->info.outputs_written &
1736                   ~tcs_stage->nir->info.tess.tcs_cross_invocation_inputs_read &
1737                   ~tcs_stage->nir->info.inputs_read_indirectly & ~vs_stage->nir->info.outputs_accessed_indirectly;
1738          }
1739       }
1740    }
1741 
1742    /* Copy shader info between TCS<->TES. */
1743    if (producer->stage == MESA_SHADER_TESS_CTRL && consumer && consumer->stage == MESA_SHADER_TESS_EVAL) {
1744       struct radv_shader_stage *tcs_stage = producer;
1745       struct radv_shader_stage *tes_stage = consumer;
1746 
1747       tcs_stage->info.has_epilog = false;
1748       tcs_stage->info.tcs.tes_reads_tess_factors = tes_stage->info.tes.reads_tess_factors;
1749       tcs_stage->info.tcs.tes_inputs_read = tes_stage->nir->info.inputs_read;
1750       tcs_stage->info.tcs.tes_patch_inputs_read = tes_stage->nir->info.patch_inputs_read;
1751 
1752       if (gfx_state->ts.patch_control_points)
1753          tes_stage->info.num_tess_patches = tcs_stage->info.num_tess_patches;
1754    }
1755 }
1756 
1757 static void
radv_nir_shader_info_merge(const struct radv_shader_stage * src,struct radv_shader_stage * dst)1758 radv_nir_shader_info_merge(const struct radv_shader_stage *src, struct radv_shader_stage *dst)
1759 {
1760    const struct radv_shader_info *src_info = &src->info;
1761    struct radv_shader_info *dst_info = &dst->info;
1762 
1763    assert((src->stage == MESA_SHADER_VERTEX && dst->stage == MESA_SHADER_TESS_CTRL) ||
1764           (src->stage == MESA_SHADER_VERTEX && dst->stage == MESA_SHADER_GEOMETRY) ||
1765           (src->stage == MESA_SHADER_TESS_EVAL && dst->stage == MESA_SHADER_GEOMETRY));
1766 
1767    dst_info->loads_push_constants |= src_info->loads_push_constants;
1768    dst_info->loads_dynamic_offsets |= src_info->loads_dynamic_offsets;
1769    dst_info->desc_set_used_mask |= src_info->desc_set_used_mask;
1770    dst_info->uses_view_index |= src_info->uses_view_index;
1771    dst_info->uses_prim_id |= src_info->uses_prim_id;
1772    dst_info->inline_push_constant_mask |= src_info->inline_push_constant_mask;
1773 
1774    /* Only inline all push constants if both allows it. */
1775    dst_info->can_inline_all_push_constants &= src_info->can_inline_all_push_constants;
1776 
1777    if (src->stage == MESA_SHADER_VERTEX) {
1778       dst_info->vs = src_info->vs;
1779    } else {
1780       dst_info->tes = src_info->tes;
1781    }
1782 
1783    if (dst->stage == MESA_SHADER_GEOMETRY)
1784       dst_info->gs.es_type = src->stage;
1785 }
1786 
1787 static const gl_shader_stage graphics_shader_order[] = {
1788    MESA_SHADER_VERTEX, MESA_SHADER_TESS_CTRL, MESA_SHADER_TESS_EVAL, MESA_SHADER_GEOMETRY,
1789 
1790    MESA_SHADER_TASK,   MESA_SHADER_MESH,
1791 };
1792 
1793 void
radv_nir_shader_info_link(struct radv_device * device,const struct radv_graphics_state_key * gfx_state,struct radv_shader_stage * stages)1794 radv_nir_shader_info_link(struct radv_device *device, const struct radv_graphics_state_key *gfx_state,
1795                           struct radv_shader_stage *stages)
1796 {
1797    /* Walk backwards to link */
1798    struct radv_shader_stage *next_stage = stages[MESA_SHADER_FRAGMENT].nir ? &stages[MESA_SHADER_FRAGMENT] : NULL;
1799 
1800    for (int i = ARRAY_SIZE(graphics_shader_order) - 1; i >= 0; i--) {
1801       gl_shader_stage s = graphics_shader_order[i];
1802       if (!stages[s].nir)
1803          continue;
1804 
1805       radv_link_shaders_info(device, &stages[s], next_stage, gfx_state);
1806       next_stage = &stages[s];
1807    }
1808 
1809    if (device->physical_device->rad_info.gfx_level >= GFX9) {
1810       /* Merge shader info for VS+TCS. */
1811       if (stages[MESA_SHADER_VERTEX].nir && stages[MESA_SHADER_TESS_CTRL].nir) {
1812          radv_nir_shader_info_merge(&stages[MESA_SHADER_VERTEX], &stages[MESA_SHADER_TESS_CTRL]);
1813       }
1814 
1815       /* Merge shader info for VS+GS or TES+GS. */
1816       if ((stages[MESA_SHADER_VERTEX].nir || stages[MESA_SHADER_TESS_EVAL].nir) && stages[MESA_SHADER_GEOMETRY].nir) {
1817          gl_shader_stage pre_stage = stages[MESA_SHADER_TESS_EVAL].nir ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX;
1818 
1819          radv_nir_shader_info_merge(&stages[pre_stage], &stages[MESA_SHADER_GEOMETRY]);
1820       }
1821    }
1822 }
1823 
1824 enum ac_hw_stage
radv_select_hw_stage(const struct radv_shader_info * const info,const enum amd_gfx_level gfx_level)1825 radv_select_hw_stage(const struct radv_shader_info *const info, const enum amd_gfx_level gfx_level)
1826 {
1827    switch (info->stage) {
1828    case MESA_SHADER_VERTEX:
1829       if (info->is_ngg)
1830          return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1831       else if (info->vs.as_es)
1832          return gfx_level >= GFX9 ? AC_HW_LEGACY_GEOMETRY_SHADER : AC_HW_EXPORT_SHADER;
1833       else if (info->vs.as_ls)
1834          return gfx_level >= GFX9 ? AC_HW_HULL_SHADER : AC_HW_LOCAL_SHADER;
1835       else
1836          return AC_HW_VERTEX_SHADER;
1837    case MESA_SHADER_TESS_EVAL:
1838       if (info->is_ngg)
1839          return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1840       else if (info->tes.as_es)
1841          return gfx_level >= GFX9 ? AC_HW_LEGACY_GEOMETRY_SHADER : AC_HW_EXPORT_SHADER;
1842       else
1843          return AC_HW_VERTEX_SHADER;
1844    case MESA_SHADER_TESS_CTRL:
1845       return AC_HW_HULL_SHADER;
1846    case MESA_SHADER_GEOMETRY:
1847       if (info->is_ngg)
1848          return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1849       else
1850          return AC_HW_LEGACY_GEOMETRY_SHADER;
1851    case MESA_SHADER_MESH:
1852       return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1853    case MESA_SHADER_FRAGMENT:
1854       return AC_HW_PIXEL_SHADER;
1855    case MESA_SHADER_COMPUTE:
1856    case MESA_SHADER_KERNEL:
1857    case MESA_SHADER_TASK:
1858    case MESA_SHADER_RAYGEN:
1859    case MESA_SHADER_ANY_HIT:
1860    case MESA_SHADER_CLOSEST_HIT:
1861    case MESA_SHADER_MISS:
1862    case MESA_SHADER_INTERSECTION:
1863    case MESA_SHADER_CALLABLE:
1864       return AC_HW_COMPUTE_SHADER;
1865    default:
1866       unreachable("Unsupported HW stage");
1867    }
1868 }
1869