1 /*
2 * Copyright © 2017 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "nir/nir.h"
24 #include "nir/nir_xfb_info.h"
25 #include "radv_private.h"
26 #include "radv_shader.h"
27
28 #include "ac_exp_param.h"
29
30 static void
mark_sampler_desc(const nir_variable * var,struct radv_shader_info * info)31 mark_sampler_desc(const nir_variable *var, struct radv_shader_info *info)
32 {
33 info->desc_set_used_mask |= (1u << var->data.descriptor_set);
34 }
35
36 static void
gather_intrinsic_load_input_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)37 gather_intrinsic_load_input_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
38 struct radv_shader_info *info)
39 {
40 switch (nir->info.stage) {
41 case MESA_SHADER_VERTEX: {
42 unsigned idx = nir_intrinsic_io_semantics(instr).location;
43 unsigned component = nir_intrinsic_component(instr);
44 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
45
46 info->vs.input_usage_mask[idx] |= mask << component;
47 break;
48 }
49 default:
50 break;
51 }
52 }
53
54 static uint32_t
widen_writemask(uint32_t wrmask)55 widen_writemask(uint32_t wrmask)
56 {
57 uint32_t new_wrmask = 0;
58 for (unsigned i = 0; i < 4; i++)
59 new_wrmask |= (wrmask & (1 << i) ? 0x3 : 0x0) << (i * 2);
60 return new_wrmask;
61 }
62
63 static void
set_writes_memory(const nir_shader * nir,struct radv_shader_info * info)64 set_writes_memory(const nir_shader *nir, struct radv_shader_info *info)
65 {
66 if (nir->info.stage == MESA_SHADER_FRAGMENT)
67 info->ps.writes_memory = true;
68 }
69
70 static void
gather_intrinsic_store_output_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)71 gather_intrinsic_store_output_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
72 struct radv_shader_info *info)
73 {
74 unsigned idx = nir_intrinsic_base(instr);
75 unsigned num_slots = nir_intrinsic_io_semantics(instr).num_slots;
76 unsigned component = nir_intrinsic_component(instr);
77 unsigned write_mask = nir_intrinsic_write_mask(instr);
78 uint8_t *output_usage_mask = NULL;
79
80 if (instr->src[0].ssa->bit_size == 64)
81 write_mask = widen_writemask(write_mask);
82
83 switch (nir->info.stage) {
84 case MESA_SHADER_VERTEX:
85 output_usage_mask = info->vs.output_usage_mask;
86 break;
87 case MESA_SHADER_TESS_EVAL:
88 output_usage_mask = info->tes.output_usage_mask;
89 break;
90 case MESA_SHADER_GEOMETRY:
91 output_usage_mask = info->gs.output_usage_mask;
92 break;
93 default:
94 break;
95 }
96
97 if (output_usage_mask) {
98 for (unsigned i = 0; i < num_slots; i++) {
99 output_usage_mask[idx + i] |= ((write_mask >> (i * 4)) & 0xf) << component;
100 }
101 }
102 }
103
104 static void
gather_push_constant_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)105 gather_push_constant_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
106 struct radv_shader_info *info)
107 {
108 int base = nir_intrinsic_base(instr);
109
110 if (!nir_src_is_const(instr->src[0])) {
111 info->has_indirect_push_constants = true;
112 } else {
113 uint32_t min = base + nir_src_as_uint(instr->src[0]);
114 uint32_t max = min + instr->num_components * 4;
115
116 info->max_push_constant_used = MAX2(max, info->max_push_constant_used);
117 info->min_push_constant_used = MIN2(min, info->min_push_constant_used);
118 }
119
120 if (instr->dest.ssa.bit_size != 32)
121 info->has_only_32bit_push_constants = false;
122
123 info->loads_push_constants = true;
124 }
125
126 static void
gather_intrinsic_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)127 gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
128 struct radv_shader_info *info)
129 {
130 switch (instr->intrinsic) {
131 case nir_intrinsic_load_barycentric_sample:
132 case nir_intrinsic_load_barycentric_pixel:
133 case nir_intrinsic_load_barycentric_centroid:
134 case nir_intrinsic_load_barycentric_at_sample:
135 case nir_intrinsic_load_barycentric_at_offset: {
136 enum glsl_interp_mode mode = nir_intrinsic_interp_mode(instr);
137 switch (mode) {
138 case INTERP_MODE_SMOOTH:
139 case INTERP_MODE_NONE:
140 if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel ||
141 instr->intrinsic == nir_intrinsic_load_barycentric_at_sample ||
142 instr->intrinsic == nir_intrinsic_load_barycentric_at_offset)
143 info->ps.reads_persp_center = true;
144 else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
145 info->ps.reads_persp_centroid = true;
146 else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
147 info->ps.reads_persp_sample = true;
148 break;
149 case INTERP_MODE_NOPERSPECTIVE:
150 if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel ||
151 instr->intrinsic == nir_intrinsic_load_barycentric_at_sample ||
152 instr->intrinsic == nir_intrinsic_load_barycentric_at_offset)
153 info->ps.reads_linear_center = true;
154 else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
155 info->ps.reads_linear_centroid = true;
156 else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
157 info->ps.reads_linear_sample = true;
158 break;
159 default:
160 break;
161 }
162 if (instr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
163 info->ps.needs_sample_positions = true;
164 break;
165 }
166 case nir_intrinsic_load_barycentric_model:
167 info->ps.reads_barycentric_model = true;
168 break;
169 case nir_intrinsic_load_draw_id:
170 info->vs.needs_draw_id = true;
171 break;
172 case nir_intrinsic_load_base_instance:
173 info->vs.needs_base_instance = true;
174 break;
175 case nir_intrinsic_load_instance_id:
176 info->vs.needs_instance_id = true;
177 break;
178 case nir_intrinsic_load_num_workgroups:
179 info->cs.uses_grid_size = true;
180 break;
181 case nir_intrinsic_load_ray_launch_size:
182 info->cs.uses_ray_launch_size = true;
183 break;
184 case nir_intrinsic_load_local_invocation_id:
185 case nir_intrinsic_load_workgroup_id: {
186 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
187 while (mask) {
188 unsigned i = u_bit_scan(&mask);
189
190 if (instr->intrinsic == nir_intrinsic_load_workgroup_id)
191 info->cs.uses_block_id[i] = true;
192 else
193 info->cs.uses_thread_id[i] = true;
194 }
195 break;
196 }
197 case nir_intrinsic_load_local_invocation_index:
198 case nir_intrinsic_load_subgroup_id:
199 case nir_intrinsic_load_num_subgroups:
200 info->cs.uses_local_invocation_idx = true;
201 break;
202 case nir_intrinsic_load_sample_mask_in:
203 info->ps.reads_sample_mask_in = true;
204 break;
205 case nir_intrinsic_load_sample_id:
206 info->ps.reads_sample_id = true;
207 break;
208 case nir_intrinsic_load_frag_shading_rate:
209 info->ps.reads_frag_shading_rate = true;
210 break;
211 case nir_intrinsic_load_front_face:
212 info->ps.reads_front_face = true;
213 break;
214 case nir_intrinsic_load_frag_coord:
215 info->ps.reads_frag_coord_mask = nir_ssa_def_components_read(&instr->dest.ssa);
216 break;
217 case nir_intrinsic_load_sample_pos:
218 info->ps.reads_sample_pos_mask = nir_ssa_def_components_read(&instr->dest.ssa);
219 break;
220 case nir_intrinsic_load_view_index:
221 info->uses_view_index = true;
222 break;
223 case nir_intrinsic_load_invocation_id:
224 info->uses_invocation_id = true;
225 break;
226 case nir_intrinsic_load_primitive_id:
227 info->uses_prim_id = true;
228 break;
229 case nir_intrinsic_load_push_constant:
230 gather_push_constant_info(nir, instr, info);
231 break;
232 case nir_intrinsic_vulkan_resource_index:
233 info->desc_set_used_mask |= (1u << nir_intrinsic_desc_set(instr));
234 break;
235 case nir_intrinsic_image_deref_load:
236 case nir_intrinsic_image_deref_sparse_load:
237 case nir_intrinsic_image_deref_store:
238 case nir_intrinsic_image_deref_atomic_add:
239 case nir_intrinsic_image_deref_atomic_imin:
240 case nir_intrinsic_image_deref_atomic_umin:
241 case nir_intrinsic_image_deref_atomic_imax:
242 case nir_intrinsic_image_deref_atomic_umax:
243 case nir_intrinsic_image_deref_atomic_and:
244 case nir_intrinsic_image_deref_atomic_or:
245 case nir_intrinsic_image_deref_atomic_xor:
246 case nir_intrinsic_image_deref_atomic_exchange:
247 case nir_intrinsic_image_deref_atomic_comp_swap:
248 case nir_intrinsic_image_deref_atomic_fmin:
249 case nir_intrinsic_image_deref_atomic_fmax:
250 case nir_intrinsic_image_deref_size:
251 case nir_intrinsic_image_deref_samples: {
252 nir_variable *var =
253 nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
254 mark_sampler_desc(var, info);
255
256 if (instr->intrinsic == nir_intrinsic_image_deref_store ||
257 instr->intrinsic == nir_intrinsic_image_deref_atomic_add ||
258 instr->intrinsic == nir_intrinsic_image_deref_atomic_imin ||
259 instr->intrinsic == nir_intrinsic_image_deref_atomic_umin ||
260 instr->intrinsic == nir_intrinsic_image_deref_atomic_imax ||
261 instr->intrinsic == nir_intrinsic_image_deref_atomic_umax ||
262 instr->intrinsic == nir_intrinsic_image_deref_atomic_and ||
263 instr->intrinsic == nir_intrinsic_image_deref_atomic_or ||
264 instr->intrinsic == nir_intrinsic_image_deref_atomic_xor ||
265 instr->intrinsic == nir_intrinsic_image_deref_atomic_exchange ||
266 instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap ||
267 instr->intrinsic == nir_intrinsic_image_deref_atomic_fmin ||
268 instr->intrinsic == nir_intrinsic_image_deref_atomic_fmax) {
269 set_writes_memory(nir, info);
270 }
271 break;
272 }
273 case nir_intrinsic_store_ssbo:
274 case nir_intrinsic_ssbo_atomic_add:
275 case nir_intrinsic_ssbo_atomic_imin:
276 case nir_intrinsic_ssbo_atomic_umin:
277 case nir_intrinsic_ssbo_atomic_imax:
278 case nir_intrinsic_ssbo_atomic_umax:
279 case nir_intrinsic_ssbo_atomic_and:
280 case nir_intrinsic_ssbo_atomic_or:
281 case nir_intrinsic_ssbo_atomic_xor:
282 case nir_intrinsic_ssbo_atomic_exchange:
283 case nir_intrinsic_ssbo_atomic_comp_swap:
284 case nir_intrinsic_ssbo_atomic_fmin:
285 case nir_intrinsic_ssbo_atomic_fmax:
286 case nir_intrinsic_store_global:
287 case nir_intrinsic_global_atomic_add:
288 case nir_intrinsic_global_atomic_imin:
289 case nir_intrinsic_global_atomic_umin:
290 case nir_intrinsic_global_atomic_imax:
291 case nir_intrinsic_global_atomic_umax:
292 case nir_intrinsic_global_atomic_and:
293 case nir_intrinsic_global_atomic_or:
294 case nir_intrinsic_global_atomic_xor:
295 case nir_intrinsic_global_atomic_exchange:
296 case nir_intrinsic_global_atomic_comp_swap:
297 case nir_intrinsic_global_atomic_fmin:
298 case nir_intrinsic_global_atomic_fmax:
299 set_writes_memory(nir, info);
300 break;
301 case nir_intrinsic_load_input:
302 gather_intrinsic_load_input_info(nir, instr, info);
303 break;
304 case nir_intrinsic_store_output:
305 gather_intrinsic_store_output_info(nir, instr, info);
306 break;
307 case nir_intrinsic_load_sbt_amd:
308 info->cs.uses_sbt = true;
309 break;
310 default:
311 break;
312 }
313 }
314
315 static void
gather_tex_info(const nir_shader * nir,const nir_tex_instr * instr,struct radv_shader_info * info)316 gather_tex_info(const nir_shader *nir, const nir_tex_instr *instr, struct radv_shader_info *info)
317 {
318 for (unsigned i = 0; i < instr->num_srcs; i++) {
319 switch (instr->src[i].src_type) {
320 case nir_tex_src_texture_deref:
321 mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
322 break;
323 case nir_tex_src_sampler_deref:
324 mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
325 break;
326 default:
327 break;
328 }
329 }
330 }
331
332 static void
gather_info_block(const nir_shader * nir,const nir_block * block,struct radv_shader_info * info)333 gather_info_block(const nir_shader *nir, const nir_block *block, struct radv_shader_info *info)
334 {
335 nir_foreach_instr (instr, block) {
336 switch (instr->type) {
337 case nir_instr_type_intrinsic:
338 gather_intrinsic_info(nir, nir_instr_as_intrinsic(instr), info);
339 break;
340 case nir_instr_type_tex:
341 gather_tex_info(nir, nir_instr_as_tex(instr), info);
342 break;
343 default:
344 break;
345 }
346 }
347 }
348
349 static void
gather_info_input_decl_vs(const nir_shader * nir,const nir_variable * var,const struct radv_pipeline_key * key,struct radv_shader_info * info)350 gather_info_input_decl_vs(const nir_shader *nir, const nir_variable *var,
351 const struct radv_pipeline_key *key, struct radv_shader_info *info)
352 {
353 unsigned attrib_count = glsl_count_attribute_slots(var->type, true);
354
355 for (unsigned i = 0; i < attrib_count; ++i) {
356 unsigned attrib_index = var->data.location + i - VERT_ATTRIB_GENERIC0;
357
358 if (key->vs.instance_rate_inputs & (1u << attrib_index)) {
359 info->vs.needs_instance_id = true;
360 info->vs.needs_base_instance = true;
361 }
362
363 if (info->vs.use_per_attribute_vb_descs)
364 info->vs.vb_desc_usage_mask |= 1u << attrib_index;
365 else
366 info->vs.vb_desc_usage_mask |= 1u << key->vs.vertex_attribute_bindings[attrib_index];
367 }
368 }
369
370 static void
mark_16bit_ps_input(struct radv_shader_info * info,const struct glsl_type * type,int location)371 mark_16bit_ps_input(struct radv_shader_info *info, const struct glsl_type *type, int location)
372 {
373 if (glsl_type_is_scalar(type) || glsl_type_is_vector(type) || glsl_type_is_matrix(type)) {
374 unsigned attrib_count = glsl_count_attribute_slots(type, false);
375 if (glsl_type_is_16bit(type)) {
376 info->ps.float16_shaded_mask |= ((1ull << attrib_count) - 1) << location;
377 }
378 } else if (glsl_type_is_array(type)) {
379 unsigned stride = glsl_count_attribute_slots(glsl_get_array_element(type), false);
380 for (unsigned i = 0; i < glsl_get_length(type); ++i) {
381 mark_16bit_ps_input(info, glsl_get_array_element(type), location + i * stride);
382 }
383 } else {
384 assert(glsl_type_is_struct_or_ifc(type));
385 for (unsigned i = 0; i < glsl_get_length(type); i++) {
386 mark_16bit_ps_input(info, glsl_get_struct_field(type, i), location);
387 location += glsl_count_attribute_slots(glsl_get_struct_field(type, i), false);
388 }
389 }
390 }
391 static void
gather_info_input_decl_ps(const nir_shader * nir,const nir_variable * var,struct radv_shader_info * info)392 gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var,
393 struct radv_shader_info *info)
394 {
395 unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
396 int idx = var->data.location;
397
398 switch (idx) {
399 case VARYING_SLOT_PNTC:
400 info->ps.has_pcoord = true;
401 break;
402 case VARYING_SLOT_PRIMITIVE_ID:
403 info->ps.prim_id_input = true;
404 break;
405 case VARYING_SLOT_LAYER:
406 info->ps.layer_input = true;
407 break;
408 case VARYING_SLOT_CLIP_DIST0:
409 case VARYING_SLOT_CLIP_DIST1:
410 info->ps.num_input_clips_culls += attrib_count;
411 break;
412 case VARYING_SLOT_VIEWPORT:
413 info->ps.viewport_index_input = true;
414 break;
415 default:
416 break;
417 }
418
419 if (var->data.compact) {
420 unsigned component_count = var->data.location_frac + glsl_get_length(var->type);
421 attrib_count = (component_count + 3) / 4;
422 } else {
423 mark_16bit_ps_input(info, var->type, var->data.driver_location);
424 }
425
426 uint64_t mask = ((1ull << attrib_count) - 1);
427
428 if (var->data.interpolation == INTERP_MODE_FLAT)
429 info->ps.flat_shaded_mask |= mask << var->data.driver_location;
430 if (var->data.interpolation == INTERP_MODE_EXPLICIT)
431 info->ps.explicit_shaded_mask |= mask << var->data.driver_location;
432
433 if (var->data.location >= VARYING_SLOT_VAR0)
434 info->ps.input_mask |= mask << (var->data.location - VARYING_SLOT_VAR0);
435 }
436
437 static void
gather_info_input_decl(const nir_shader * nir,const nir_variable * var,const struct radv_pipeline_key * key,struct radv_shader_info * info)438 gather_info_input_decl(const nir_shader *nir, const nir_variable *var,
439 const struct radv_pipeline_key *key, struct radv_shader_info *info)
440 {
441 switch (nir->info.stage) {
442 case MESA_SHADER_VERTEX:
443 gather_info_input_decl_vs(nir, var, key, info);
444 break;
445 case MESA_SHADER_FRAGMENT:
446 gather_info_input_decl_ps(nir, var, info);
447 break;
448 default:
449 break;
450 }
451 }
452
453 static void
gather_info_output_decl_ps(const nir_shader * nir,const nir_variable * var,struct radv_shader_info * info)454 gather_info_output_decl_ps(const nir_shader *nir, const nir_variable *var,
455 struct radv_shader_info *info)
456 {
457 int idx = var->data.location;
458
459 switch (idx) {
460 case FRAG_RESULT_DEPTH:
461 info->ps.writes_z = true;
462 break;
463 case FRAG_RESULT_STENCIL:
464 info->ps.writes_stencil = true;
465 break;
466 case FRAG_RESULT_SAMPLE_MASK:
467 info->ps.writes_sample_mask = true;
468 break;
469 default:
470 break;
471 }
472 }
473
474 static void
gather_info_output_decl_gs(const nir_shader * nir,const nir_variable * var,struct radv_shader_info * info)475 gather_info_output_decl_gs(const nir_shader *nir, const nir_variable *var,
476 struct radv_shader_info *info)
477 {
478 unsigned num_components = glsl_get_component_slots(var->type);
479 unsigned stream = var->data.stream;
480 unsigned idx = var->data.location;
481
482 assert(stream < 4);
483
484 info->gs.max_stream = MAX2(info->gs.max_stream, stream);
485 info->gs.num_stream_output_components[stream] += num_components;
486 info->gs.output_streams[idx] = stream;
487 }
488
489 static struct radv_vs_output_info *
get_vs_output_info(const nir_shader * nir,struct radv_shader_info * info)490 get_vs_output_info(const nir_shader *nir, struct radv_shader_info *info)
491 {
492
493 switch (nir->info.stage) {
494 case MESA_SHADER_VERTEX:
495 if (!info->vs.as_ls && !info->vs.as_es)
496 return &info->vs.outinfo;
497 break;
498 case MESA_SHADER_GEOMETRY:
499 return &info->vs.outinfo;
500 break;
501 case MESA_SHADER_TESS_EVAL:
502 if (!info->tes.as_es)
503 return &info->tes.outinfo;
504 break;
505 default:
506 break;
507 }
508
509 return NULL;
510 }
511
512 static void
gather_info_output_decl(const nir_shader * nir,const nir_variable * var,struct radv_shader_info * info)513 gather_info_output_decl(const nir_shader *nir, const nir_variable *var,
514 struct radv_shader_info *info)
515 {
516 struct radv_vs_output_info *vs_info = get_vs_output_info(nir, info);
517
518 switch (nir->info.stage) {
519 case MESA_SHADER_FRAGMENT:
520 gather_info_output_decl_ps(nir, var, info);
521 break;
522 case MESA_SHADER_VERTEX:
523 break;
524 case MESA_SHADER_GEOMETRY:
525 gather_info_output_decl_gs(nir, var, info);
526 break;
527 case MESA_SHADER_TESS_EVAL:
528 break;
529 default:
530 break;
531 }
532
533 if (vs_info) {
534 switch (var->data.location) {
535 case VARYING_SLOT_CLIP_DIST0:
536 vs_info->clip_dist_mask = (1 << nir->info.clip_distance_array_size) - 1;
537 vs_info->cull_dist_mask = (1 << nir->info.cull_distance_array_size) - 1;
538 vs_info->cull_dist_mask <<= nir->info.clip_distance_array_size;
539 break;
540 case VARYING_SLOT_PSIZ:
541 vs_info->writes_pointsize = true;
542 break;
543 case VARYING_SLOT_VIEWPORT:
544 vs_info->writes_viewport_index = true;
545 break;
546 case VARYING_SLOT_LAYER:
547 vs_info->writes_layer = true;
548 break;
549 case VARYING_SLOT_PRIMITIVE_SHADING_RATE:
550 vs_info->writes_primitive_shading_rate = true;
551 break;
552 default:
553 break;
554 }
555 }
556 }
557
558 static void
gather_xfb_info(const nir_shader * nir,struct radv_shader_info * info)559 gather_xfb_info(const nir_shader *nir, struct radv_shader_info *info)
560 {
561 nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
562 struct radv_streamout_info *so = &info->so;
563
564 if (!xfb)
565 return;
566
567 assert(xfb->output_count < MAX_SO_OUTPUTS);
568 so->num_outputs = xfb->output_count;
569
570 for (unsigned i = 0; i < xfb->output_count; i++) {
571 struct radv_stream_output *output = &so->outputs[i];
572
573 output->buffer = xfb->outputs[i].buffer;
574 output->stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
575 output->offset = xfb->outputs[i].offset;
576 output->location = xfb->outputs[i].location;
577 output->component_mask = xfb->outputs[i].component_mask;
578
579 so->enabled_stream_buffers_mask |= (1 << output->buffer) << (output->stream * 4);
580 }
581
582 for (unsigned i = 0; i < NIR_MAX_XFB_BUFFERS; i++) {
583 so->strides[i] = xfb->buffers[i].stride / 4;
584 }
585
586 ralloc_free(xfb);
587 }
588
589 void
radv_nir_shader_info_init(struct radv_shader_info * info)590 radv_nir_shader_info_init(struct radv_shader_info *info)
591 {
592 /* Assume that shaders only have 32-bit push constants by default. */
593 info->min_push_constant_used = UINT8_MAX;
594 info->has_only_32bit_push_constants = true;
595 }
596
597 void
radv_nir_shader_info_pass(struct radv_device * device,const struct nir_shader * nir,const struct radv_pipeline_layout * layout,const struct radv_pipeline_key * pipeline_key,struct radv_shader_info * info)598 radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
599 const struct radv_pipeline_layout *layout,
600 const struct radv_pipeline_key *pipeline_key,
601 struct radv_shader_info *info)
602 {
603 struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
604
605 if (layout && layout->dynamic_offset_count &&
606 (layout->dynamic_shader_stages & mesa_to_vk_shader_stage(nir->info.stage))) {
607 info->loads_push_constants = true;
608 info->loads_dynamic_offsets = true;
609 }
610
611 if (nir->info.stage == MESA_SHADER_VERTEX) {
612 if (pipeline_key->vs.dynamic_input_state && nir->info.inputs_read) {
613 info->vs.has_prolog = true;
614 info->vs.dynamic_inputs = true;
615 }
616
617 /* Use per-attribute vertex descriptors to prevent faults and
618 * for correct bounds checking.
619 */
620 info->vs.use_per_attribute_vb_descs = device->robust_buffer_access || info->vs.dynamic_inputs;
621 }
622
623 /* We have to ensure consistent input register assignments between the main shader and the
624 * prolog. */
625 info->vs.needs_instance_id |= info->vs.has_prolog;
626 info->vs.needs_base_instance |= info->vs.has_prolog;
627 info->vs.needs_draw_id |= info->vs.has_prolog;
628
629 nir_foreach_shader_in_variable (variable, nir)
630 gather_info_input_decl(nir, variable, pipeline_key, info);
631
632 nir_foreach_block (block, func->impl) {
633 gather_info_block(nir, block, info);
634 }
635
636 nir_foreach_shader_out_variable(variable, nir) gather_info_output_decl(nir, variable, info);
637
638 if (nir->info.stage == MESA_SHADER_VERTEX || nir->info.stage == MESA_SHADER_TESS_EVAL ||
639 nir->info.stage == MESA_SHADER_GEOMETRY)
640 gather_xfb_info(nir, info);
641
642 /* Make sure to export the LayerID if the subpass has multiviews. */
643 if (pipeline_key->has_multiview_view_index) {
644 switch (nir->info.stage) {
645 case MESA_SHADER_VERTEX:
646 info->vs.outinfo.writes_layer = true;
647 break;
648 case MESA_SHADER_TESS_EVAL:
649 info->tes.outinfo.writes_layer = true;
650 break;
651 case MESA_SHADER_GEOMETRY:
652 info->vs.outinfo.writes_layer = true;
653 break;
654 default:
655 break;
656 }
657 }
658
659 struct radv_vs_output_info *outinfo = get_vs_output_info(nir, info);
660 if (outinfo) {
661 bool writes_primitive_shading_rate =
662 outinfo->writes_primitive_shading_rate || device->force_vrs != RADV_FORCE_VRS_NONE;
663 int pos_written = 0x1;
664
665 if (outinfo->writes_pointsize || outinfo->writes_viewport_index || outinfo->writes_layer ||
666 writes_primitive_shading_rate)
667 pos_written |= 1 << 1;
668
669 unsigned num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
670 unsigned num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
671
672 if (num_clip_distances + num_cull_distances > 0)
673 pos_written |= 1 << 2;
674 if (num_clip_distances + num_cull_distances > 4)
675 pos_written |= 1 << 3;
676
677 outinfo->pos_exports = util_bitcount(pos_written);
678
679 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
680 sizeof(outinfo->vs_output_param_offset));
681 outinfo->param_exports = 0;
682
683 uint64_t mask = nir->info.outputs_written;
684 while (mask) {
685 int idx = u_bit_scan64(&mask);
686 if (idx >= VARYING_SLOT_VAR0 || idx == VARYING_SLOT_LAYER ||
687 idx == VARYING_SLOT_PRIMITIVE_ID || idx == VARYING_SLOT_VIEWPORT ||
688 ((idx == VARYING_SLOT_CLIP_DIST0 || idx == VARYING_SLOT_CLIP_DIST1) &&
689 outinfo->export_clip_dists)) {
690 if (outinfo->vs_output_param_offset[idx] == AC_EXP_PARAM_UNDEFINED)
691 outinfo->vs_output_param_offset[idx] = outinfo->param_exports++;
692 }
693 }
694 if (outinfo->writes_layer &&
695 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] == AC_EXP_PARAM_UNDEFINED) {
696 /* when ctx->options->key.has_multiview_view_index = true, the layer
697 * variable isn't declared in NIR and it's isel's job to get the layer */
698 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] = outinfo->param_exports++;
699 }
700
701 if (outinfo->export_prim_id) {
702 assert(outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] == AC_EXP_PARAM_UNDEFINED);
703 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = outinfo->param_exports++;
704 }
705 }
706
707 if (nir->info.stage == MESA_SHADER_FRAGMENT)
708 info->ps.num_interp = nir->num_inputs;
709
710 switch (nir->info.stage) {
711 case MESA_SHADER_COMPUTE:
712 for (int i = 0; i < 3; ++i)
713 info->cs.block_size[i] = nir->info.workgroup_size[i];
714 break;
715 case MESA_SHADER_FRAGMENT:
716 info->ps.can_discard = nir->info.fs.uses_discard;
717 info->ps.early_fragment_test = nir->info.fs.early_fragment_tests;
718 info->ps.post_depth_coverage = nir->info.fs.post_depth_coverage;
719 info->ps.depth_layout = nir->info.fs.depth_layout;
720 info->ps.uses_sample_shading = nir->info.fs.uses_sample_shading;
721 break;
722 case MESA_SHADER_GEOMETRY:
723 info->gs.vertices_in = nir->info.gs.vertices_in;
724 info->gs.vertices_out = nir->info.gs.vertices_out;
725 info->gs.output_prim = nir->info.gs.output_primitive;
726 info->gs.invocations = nir->info.gs.invocations;
727 break;
728 case MESA_SHADER_TESS_EVAL:
729 info->tes.primitive_mode = nir->info.tess.primitive_mode;
730 info->tes.spacing = nir->info.tess.spacing;
731 info->tes.ccw = nir->info.tess.ccw;
732 info->tes.point_mode = nir->info.tess.point_mode;
733 break;
734 case MESA_SHADER_TESS_CTRL:
735 info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
736 break;
737 case MESA_SHADER_VERTEX:
738 break;
739 default:
740 break;
741 }
742
743 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
744 unsigned add_clip =
745 nir->info.clip_distance_array_size + nir->info.cull_distance_array_size > 4;
746 info->gs.gsvs_vertex_size = (util_bitcount64(nir->info.outputs_written) + add_clip) * 16;
747 info->gs.max_gsvs_emit_size = info->gs.gsvs_vertex_size * nir->info.gs.vertices_out;
748 }
749
750 /* Compute the ESGS item size for VS or TES as ES. */
751 if ((nir->info.stage == MESA_SHADER_VERTEX && info->vs.as_es) ||
752 (nir->info.stage == MESA_SHADER_TESS_EVAL && info->tes.as_es)) {
753 struct radv_es_output_info *es_info =
754 nir->info.stage == MESA_SHADER_VERTEX ? &info->vs.es_info : &info->tes.es_info;
755 uint32_t num_outputs_written = nir->info.stage == MESA_SHADER_VERTEX
756 ? info->vs.num_linked_outputs
757 : info->tes.num_linked_outputs;
758 es_info->esgs_itemsize = num_outputs_written * 16;
759 }
760
761 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
762 bool uses_persp_or_linear_interp = info->ps.reads_persp_center ||
763 info->ps.reads_persp_centroid ||
764 info->ps.reads_persp_sample ||
765 info->ps.reads_linear_center ||
766 info->ps.reads_linear_centroid ||
767 info->ps.reads_linear_sample;
768
769 info->ps.allow_flat_shading =
770 !(uses_persp_or_linear_interp || info->ps.needs_sample_positions ||
771 info->ps.writes_memory || nir->info.fs.needs_quad_helper_invocations ||
772 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRAG_COORD) ||
773 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_POINT_COORD) ||
774 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
775 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS) ||
776 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN) ||
777 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_HELPER_INVOCATION));
778
779 info->ps.spi_ps_input = radv_compute_spi_ps_input(device, info);
780 }
781 }
782