1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #ifndef SI_STATE_H
8 #define SI_STATE_H
9
10 #include "si_pm4.h"
11 #include "util/format/u_format.h"
12 #include "util/bitset.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_FRAGMENT + 1)
19 #define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE + 1)
20
21 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
22 #define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
23 #define SI_NUM_CONST_BUFFERS 16
24 #define SI_NUM_IMAGES 16
25 #define SI_NUM_IMAGE_SLOTS (SI_NUM_IMAGES * 2) /* the second half are FMASK slots */
26 #define SI_NUM_SHADER_BUFFERS 32
27
28 struct si_screen;
29 struct si_shader;
30 struct si_shader_ctx_state;
31 struct si_shader_selector;
32 struct si_texture;
33 struct si_qbo_state;
34 struct legacy_surf_level;
35 struct pb_slab_entry;
36
37 struct si_state_blend {
38 struct si_pm4_state pm4;
39 uint32_t cb_target_mask;
40 /* Set 0xf or 0x0 (4 bits) per render target if the following is
41 * true. ANDed with spi_shader_col_format.
42 */
43 unsigned cb_target_enabled_4bit;
44 unsigned blend_enable_4bit;
45 unsigned need_src_alpha_4bit;
46 unsigned commutative_4bit;
47 unsigned dcc_msaa_corruption_4bit;
48 bool alpha_to_coverage : 1;
49 bool alpha_to_one : 1;
50 bool dual_src_blend : 1;
51 bool logicop_enable : 1;
52 bool allows_noop_optimization : 1;
53 };
54
55 struct si_state_rasterizer {
56 struct si_pm4_state pm4;
57
58 /* Register values. */
59 unsigned spi_interp_control_0;
60 unsigned pa_su_point_size;
61 unsigned pa_su_point_minmax;
62 unsigned pa_su_line_cntl;
63 unsigned pa_sc_mode_cntl_0;
64 unsigned pa_su_sc_mode_cntl;
65 unsigned pa_cl_ngg_cntl;
66 unsigned pa_sc_edgerule;
67 unsigned pa_su_poly_offset_db_fmt_cntl[3];
68 unsigned pa_su_poly_offset_clamp;
69 unsigned pa_su_poly_offset_frontback_scale;
70 unsigned pa_su_poly_offset_frontback_offset[3];
71
72 unsigned pa_sc_line_stipple;
73 unsigned pa_cl_clip_cntl;
74 float line_width;
75 float max_point_size;
76 unsigned ngg_cull_flags_tris : 16;
77 unsigned ngg_cull_flags_tris_y_inverted : 16;
78 unsigned ngg_cull_flags_lines : 16;
79 unsigned sprite_coord_enable : 8;
80 unsigned clip_plane_enable : 8;
81 unsigned half_pixel_center : 1;
82 unsigned flatshade : 1;
83 unsigned flatshade_first : 1;
84 unsigned two_side : 1;
85 unsigned multisample_enable : 1;
86 unsigned force_persample_interp : 1;
87 unsigned line_stipple_enable : 1;
88 unsigned poly_stipple_enable : 1;
89 unsigned line_smooth : 1;
90 unsigned poly_smooth : 1;
91 unsigned point_smooth : 1;
92 unsigned uses_poly_offset : 1;
93 unsigned clamp_fragment_color : 1;
94 unsigned clamp_vertex_color : 1;
95 unsigned rasterizer_discard : 1;
96 unsigned scissor_enable : 1;
97 unsigned clip_halfz : 1;
98 unsigned polygon_mode_is_lines : 1;
99 unsigned polygon_mode_is_points : 1;
100 unsigned perpendicular_end_caps : 1;
101 unsigned bottom_edge_rule : 1;
102 int force_front_face_input : 2;
103 };
104
105 struct si_dsa_stencil_ref_part {
106 uint8_t valuemask[2];
107 uint8_t writemask[2];
108 };
109
110 struct si_dsa_order_invariance {
111 /** Whether the final result in Z/S buffers is guaranteed to be
112 * invariant under changes to the order in which fragments arrive. */
113 bool zs : 1;
114
115 /** Whether the set of fragments that pass the combined Z/S test is
116 * guaranteed to be invariant under changes to the order in which
117 * fragments arrive. */
118 bool pass_set : 1;
119 };
120
121 struct si_state_dsa {
122 struct si_pm4_state pm4;
123 struct si_dsa_stencil_ref_part stencil_ref;
124
125 /* Register values. */
126 unsigned db_depth_control;
127 unsigned db_stencil_control;
128 unsigned db_depth_bounds_min;
129 unsigned db_depth_bounds_max;
130 unsigned spi_shader_user_data_ps_alpha_ref;
131
132 /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
133 struct si_dsa_order_invariance order_invariance[2];
134
135 uint8_t alpha_func : 3;
136 bool depth_enabled : 1;
137 bool depth_write_enabled : 1;
138 bool stencil_enabled : 1;
139 bool stencil_write_enabled : 1;
140 bool db_can_write : 1;
141 bool depth_bounds_enabled : 1;
142 };
143
144 struct si_stencil_ref {
145 struct pipe_stencil_ref state;
146 struct si_dsa_stencil_ref_part dsa_part;
147 };
148
149 struct si_vertex_elements {
150 struct si_resource *instance_divisor_factor_buffer;
151
152 /* Bitmask of elements that always need a fixup to be applied. */
153 uint16_t fix_fetch_always;
154
155 /* Bitmask of elements whose fetch should always be opencoded. */
156 uint16_t fix_fetch_opencode;
157
158 /* Bitmask of elements which need to be opencoded if the vertex buffer
159 * is unaligned. */
160 uint16_t fix_fetch_unaligned;
161
162 /* For elements in fix_fetch_unaligned: whether the effective
163 * element load size as seen by the hardware is a dword (as opposed
164 * to a short).
165 */
166 uint16_t hw_load_is_dword;
167
168 /* Bitmask of vertex buffers requiring alignment check */
169 uint16_t vb_alignment_check_mask;
170
171 uint8_t count;
172
173 /* Vertex buffer descriptor list size aligned for optimal prefetch. */
174 uint16_t vb_desc_list_alloc_size;
175 uint16_t instance_divisor_is_one; /* bitmask of inputs */
176 uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
177
178 uint8_t fix_fetch[SI_MAX_ATTRIBS];
179 uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
180
181 struct {
182 uint32_t rsrc_word3;
183 uint16_t src_offset;
184 uint16_t stride;
185 uint8_t format_size;
186 } elem[SI_MAX_ATTRIBS];
187 };
188
189 union si_state {
190 struct si_state_named {
191 struct si_state_blend *blend;
192 struct si_state_rasterizer *rasterizer;
193 struct si_state_dsa *dsa;
194 struct si_shader *ls;
195 struct si_shader *hs;
196 struct si_shader *es;
197 struct si_shader *gs;
198 struct si_shader *vs;
199 struct si_shader *ps;
200 struct si_sqtt_fake_pipeline *sqtt_pipeline;
201 } named;
202 struct si_pm4_state *array[sizeof(struct si_state_named) / sizeof(struct si_pm4_state *)];
203 };
204
205 #define SI_STATE_IDX(name) (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
206 #define SI_STATE_BIT(name) (1ull << SI_STATE_IDX(name))
207 #define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
208
209 union si_state_atoms {
210 struct si_atoms_s {
211 /* This must be first. */
212 struct si_atom pm4_states[SI_NUM_STATES];
213 struct si_atom gfx_add_all_to_bo_list;
214 struct si_atom streamout_enable;
215 struct si_atom framebuffer;
216 struct si_atom sample_locations;
217 struct si_atom db_render_state;
218 struct si_atom dpbb_state;
219 struct si_atom msaa_config;
220 struct si_atom sample_mask;
221 struct si_atom cb_render_state;
222 struct si_atom blend_color;
223 struct si_atom clip_regs;
224 struct si_atom clip_state;
225 struct si_atom gfx_shader_pointers;
226 struct si_atom guardband;
227 struct si_atom scissors;
228 struct si_atom viewports;
229 struct si_atom stencil_ref;
230 struct si_atom spi_map;
231 struct si_atom scratch_state;
232 struct si_atom window_rectangles;
233 struct si_atom shader_query;
234 struct si_atom ngg_cull_state;
235 struct si_atom vgt_pipeline_state;
236 struct si_atom tess_io_layout;
237 struct si_atom cache_flush;
238 struct si_atom streamout_begin; /* this must be done after cache_flush */
239 struct si_atom render_cond; /* this must be after cache_flush */
240 } s;
241 struct si_atom array[sizeof(struct si_atoms_s) / sizeof(struct si_atom)];
242 };
243
244 #define SI_ATOM_BIT(name) (1ull << (offsetof(union si_state_atoms, s.name) / sizeof(struct si_atom)))
245 #define SI_NUM_ATOMS (sizeof(union si_state_atoms) / sizeof(struct si_atom))
246
si_atoms_that_always_roll_context(void)247 static inline uint64_t si_atoms_that_always_roll_context(void)
248 {
249 return SI_STATE_BIT(blend) |
250 SI_ATOM_BIT(streamout_begin) | SI_ATOM_BIT(streamout_enable) | SI_ATOM_BIT(framebuffer) |
251 SI_ATOM_BIT(sample_locations) | SI_ATOM_BIT(sample_mask) | SI_ATOM_BIT(blend_color)|
252 SI_ATOM_BIT(clip_state) | SI_ATOM_BIT(scissors) | SI_ATOM_BIT(viewports)|
253 SI_ATOM_BIT(stencil_ref) | SI_ATOM_BIT(scratch_state) | SI_ATOM_BIT(window_rectangles);
254 }
255
256 struct si_shader_data {
257 uint32_t sh_base[SI_NUM_SHADERS];
258 };
259
260 /* Registers whose values are tracked by si_context. */
261 enum si_tracked_reg
262 {
263 /* CONTEXT registers. */
264 /* 2 consecutive registers */
265 SI_TRACKED_DB_RENDER_CONTROL,
266 SI_TRACKED_DB_COUNT_CONTROL,
267
268 SI_TRACKED_DB_DEPTH_CONTROL,
269 SI_TRACKED_DB_STENCIL_CONTROL,
270 /* 2 consecutive registers */
271 SI_TRACKED_DB_DEPTH_BOUNDS_MIN,
272 SI_TRACKED_DB_DEPTH_BOUNDS_MAX,
273
274 SI_TRACKED_SPI_INTERP_CONTROL_0,
275 SI_TRACKED_PA_SU_POINT_SIZE,
276 SI_TRACKED_PA_SU_POINT_MINMAX,
277 SI_TRACKED_PA_SU_LINE_CNTL,
278 SI_TRACKED_PA_SC_MODE_CNTL_0,
279 SI_TRACKED_PA_SU_SC_MODE_CNTL,
280 SI_TRACKED_PA_SC_EDGERULE,
281
282 /* 6 consecutive registers */
283 SI_TRACKED_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
284 SI_TRACKED_PA_SU_POLY_OFFSET_CLAMP,
285 SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_SCALE,
286 SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_OFFSET,
287 SI_TRACKED_PA_SU_POLY_OFFSET_BACK_SCALE,
288 SI_TRACKED_PA_SU_POLY_OFFSET_BACK_OFFSET,
289
290 /* 2 consecutive registers */
291 SI_TRACKED_PA_SC_LINE_CNTL,
292 SI_TRACKED_PA_SC_AA_CONFIG,
293
294 /* 5 consecutive registers */
295 SI_TRACKED_PA_SU_VTX_CNTL,
296 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
297 SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
298 SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
299 SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
300
301 /* Non-consecutive register */
302 SI_TRACKED_SPI_SHADER_POS_FORMAT,
303
304 /* 2 consecutive registers */
305 SI_TRACKED_SPI_SHADER_Z_FORMAT,
306 SI_TRACKED_SPI_SHADER_COL_FORMAT,
307
308 SI_TRACKED_SPI_BARYC_CNTL,
309
310 /* 2 consecutive registers */
311 SI_TRACKED_SPI_PS_INPUT_ENA,
312 SI_TRACKED_SPI_PS_INPUT_ADDR,
313
314 SI_TRACKED_DB_EQAA,
315 SI_TRACKED_DB_SHADER_CONTROL,
316 SI_TRACKED_CB_SHADER_MASK,
317 SI_TRACKED_CB_TARGET_MASK,
318 SI_TRACKED_PA_CL_CLIP_CNTL,
319 SI_TRACKED_PA_CL_VS_OUT_CNTL,
320 SI_TRACKED_PA_CL_VTE_CNTL,
321 SI_TRACKED_PA_SC_CLIPRECT_RULE,
322 SI_TRACKED_PA_SC_LINE_STIPPLE,
323 SI_TRACKED_PA_SC_MODE_CNTL_1,
324 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
325 SI_TRACKED_SPI_PS_IN_CONTROL,
326 SI_TRACKED_VGT_GS_INSTANCE_CNT,
327 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
328 SI_TRACKED_VGT_SHADER_STAGES_EN,
329 SI_TRACKED_VGT_LS_HS_CONFIG,
330 SI_TRACKED_VGT_TF_PARAM,
331 SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL, /* GFX8-9 (only with has_small_prim_filter_sample_loc_bug) */
332 SI_TRACKED_PA_SC_BINNER_CNTL_0, /* GFX9+ */
333 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP, /* GFX10+ - the SMALL_PRIM_FILTER slot above can be reused */
334 SI_TRACKED_GE_NGG_SUBGRP_CNTL, /* GFX10+ */
335 SI_TRACKED_PA_CL_NGG_CNTL, /* GFX10+ */
336 SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL, /* GFX10.3+ */
337
338 /* 3 consecutive registers */
339 SI_TRACKED_SX_PS_DOWNCONVERT, /* GFX8+ */
340 SI_TRACKED_SX_BLEND_OPT_EPSILON, /* GFX8+ */
341 SI_TRACKED_SX_BLEND_OPT_CONTROL, /* GFX8+ */
342
343 /* The slots below can be reused by other generations. */
344 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, /* GFX6-8 (GFX9+ can reuse this slot) */
345 SI_TRACKED_VGT_REUSE_OFF, /* GFX6-8 (GFX9+ can reuse this slot) */
346 SI_TRACKED_IA_MULTI_VGT_PARAM, /* GFX6-8 (GFX9+ can reuse this slot) */
347
348 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP, /* GFX9 - the slots above can be reused */
349 SI_TRACKED_VGT_GS_ONCHIP_CNTL, /* GFX9-10 - the slots above can be reused */
350
351 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE, /* GFX6-10 (GFX11+ can reuse this slot) */
352 SI_TRACKED_VGT_GS_MODE, /* GFX6-10 (GFX11+ can reuse this slot) */
353 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, /* GFX6-10 (GFX11+ can reuse this slot) */
354 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE, /* GFX6-10 (GFX11+ can reuse this slot) */
355
356 /* 3 consecutive registers */
357 SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* GFX6-10 (GFX11+ can reuse this slot) */
358 SI_TRACKED_VGT_GSVS_RING_OFFSET_2, /* GFX6-10 (GFX11+ can reuse this slot) */
359 SI_TRACKED_VGT_GSVS_RING_OFFSET_3, /* GFX6-10 (GFX11+ can reuse this slot) */
360
361 /* 4 consecutive registers */
362 SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* GFX6-10 (GFX11+ can reuse this slot) */
363 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1, /* GFX6-10 (GFX11+ can reuse this slot) */
364 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2, /* GFX6-10 (GFX11+ can reuse this slot) */
365 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3, /* GFX6-10 (GFX11+ can reuse this slot) */
366
367 SI_TRACKED_DB_RENDER_OVERRIDE2, /* GFX6-xx (TBD) */
368 SI_TRACKED_SPI_VS_OUT_CONFIG, /* GFX6-xx (TBD) */
369 SI_TRACKED_VGT_PRIMITIVEID_EN, /* GFX6-xx (TBD) */
370 SI_TRACKED_CB_DCC_CONTROL, /* GFX8-xx (TBD) */
371
372 SI_NUM_TRACKED_CONTEXT_REGS,
373 SI_FIRST_TRACKED_OTHER_REG = SI_NUM_TRACKED_CONTEXT_REGS,
374
375 /* SH and UCONFIG registers. */
376 SI_TRACKED_GE_PC_ALLOC = SI_FIRST_TRACKED_OTHER_REG, /* GFX10+ */
377 SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS, /* GFX7+ */
378 SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS, /* GFX10+ */
379 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE_UCONFIG, /* GFX11+ */
380
381 SI_TRACKED_IA_MULTI_VGT_PARAM_UCONFIG, /* GFX9 only */
382 SI_TRACKED_GE_CNTL = SI_TRACKED_IA_MULTI_VGT_PARAM_UCONFIG, /* GFX10+ */
383
384 SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, /* GFX9+ (not tracked on previous chips) */
385
386 /* 3 consecutive registers. */
387 SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_LAYOUT,
388 SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_ADDR,
389 SI_TRACKED_SPI_SHADER_USER_DATA_HS__VS_STATE_BITS, /* GFX6-8 */
390
391 SI_TRACKED_SPI_SHADER_USER_DATA_LS__BASE_VERTEX,
392 SI_TRACKED_SPI_SHADER_USER_DATA_LS__DRAWID,
393 SI_TRACKED_SPI_SHADER_USER_DATA_LS__START_INSTANCE,
394
395 SI_TRACKED_SPI_SHADER_USER_DATA_ES__BASE_VERTEX,
396 SI_TRACKED_SPI_SHADER_USER_DATA_ES__DRAWID,
397 SI_TRACKED_SPI_SHADER_USER_DATA_ES__START_INSTANCE,
398
399 SI_TRACKED_SPI_SHADER_USER_DATA_VS__BASE_VERTEX, /* GFX6-10 */
400 SI_TRACKED_SPI_SHADER_USER_DATA_VS__DRAWID, /* GFX6-10 */
401 SI_TRACKED_SPI_SHADER_USER_DATA_VS__START_INSTANCE, /* GFX6-10 */
402
403 SI_TRACKED_SPI_SHADER_USER_DATA_PS__ALPHA_REF,
404
405 SI_TRACKED_COMPUTE_RESOURCE_LIMITS,
406 SI_TRACKED_COMPUTE_NUM_THREAD_X,
407 SI_TRACKED_COMPUTE_NUM_THREAD_Y,
408 SI_TRACKED_COMPUTE_NUM_THREAD_Z,
409 SI_TRACKED_COMPUTE_TMPRING_SIZE,
410 SI_TRACKED_COMPUTE_PGM_RSRC3, /* GFX11+ */
411
412 /* 2 consecutive registers. */
413 SI_TRACKED_COMPUTE_PGM_RSRC1,
414 SI_TRACKED_COMPUTE_PGM_RSRC2,
415
416 /* 2 consecutive registers. */
417 SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_LO, /* GFX11+ */
418 SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_HI, /* GFX11+ */
419
420 SI_NUM_ALL_TRACKED_REGS,
421 };
422
423 /* For 3 draw constants: BaseVertex, DrawID, StartInstance */
424 #define BASEVERTEX_MASK 0x1
425 #define DRAWID_MASK 0x2
426 #define STARTINSTANCE_MASK 0x4
427 #define BASEVERTEX_DRAWID_MASK (BASEVERTEX_MASK | DRAWID_MASK)
428 #define BASEVERTEX_DRAWID_STARTINSTANCE_MASK (BASEVERTEX_MASK | DRAWID_MASK | STARTINSTANCE_MASK)
429
430 struct si_tracked_regs {
431 BITSET_DECLARE(reg_saved_mask, SI_NUM_ALL_TRACKED_REGS);
432 uint32_t reg_value[SI_NUM_ALL_TRACKED_REGS];
433 uint32_t spi_ps_input_cntl[32];
434 };
435
436 /* Private read-write buffer slots. */
437 enum
438 {
439 SI_VS_STREAMOUT_BUF0,
440 SI_VS_STREAMOUT_BUF1,
441 SI_VS_STREAMOUT_BUF2,
442 SI_VS_STREAMOUT_BUF3,
443
444 /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
445 SI_PS_IMAGE_COLORBUF0,
446 SI_PS_IMAGE_COLORBUF0_HI,
447 SI_PS_IMAGE_COLORBUF0_FMASK, /* gfx6-10 */
448 SI_PS_IMAGE_COLORBUF0_FMASK_HI, /* gfx6-10 */
449
450 /* Internal constant buffers. */
451 SI_HS_CONST_DEFAULT_TESS_LEVELS,
452 SI_VS_CONST_INSTANCE_DIVISORS,
453 SI_VS_CONST_CLIP_PLANES,
454 SI_PS_CONST_POLY_STIPPLE,
455 SI_PS_CONST_SAMPLE_POSITIONS,
456
457 SI_RING_ESGS, /* gfx6-8 */
458 SI_RING_GSVS, /* gfx6-10 */
459 SI_GS_QUERY_EMULATED_COUNTERS_BUF, /* gfx10+ */
460
461 SI_NUM_INTERNAL_BINDINGS,
462
463 /* Aliases to reuse slots that are unused on other generations. */
464 SI_GS_QUERY_BUF = SI_RING_ESGS, /* gfx10+ */
465 };
466
467 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
468 * are contiguous:
469 *
470 * 0 - rw buffers
471 * 1 - vertex const and shader buffers
472 * 2 - vertex samplers and images
473 * 3 - fragment const and shader buffer
474 * ...
475 * 11 - compute const and shader buffers
476 * 12 - compute samplers and images
477 */
478 enum
479 {
480 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
481 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
482 SI_NUM_SHADER_DESCS,
483 };
484
485 #define SI_DESCS_INTERNAL 0
486 #define SI_DESCS_FIRST_SHADER 1
487 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
488 #define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
489
490 #define SI_DESCS_SHADER_MASK(name) \
491 u_bit_consecutive(SI_DESCS_FIRST_SHADER + PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
492 SI_NUM_SHADER_DESCS)
493
si_const_and_shader_buffer_descriptors_idx(unsigned shader)494 static inline unsigned si_const_and_shader_buffer_descriptors_idx(unsigned shader)
495 {
496 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
497 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
498 }
499
si_sampler_and_image_descriptors_idx(unsigned shader)500 static inline unsigned si_sampler_and_image_descriptors_idx(unsigned shader)
501 {
502 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
503 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
504 }
505
506 /* This represents descriptors in memory, such as buffer resources,
507 * image resources, and sampler states.
508 */
509 struct si_descriptors {
510 /* The list of descriptors in malloc'd memory. */
511 uint32_t *list;
512 /* The list in mapped GPU memory. */
513 uint32_t *gpu_list;
514
515 /* The buffer where the descriptors have been uploaded. */
516 struct si_resource *buffer;
517 uint64_t gpu_address;
518
519 /* The maximum number of descriptors. */
520 uint32_t num_elements;
521
522 /* Slots that are used by currently-bound shaders.
523 * It determines which slots are uploaded.
524 */
525 uint32_t first_active_slot;
526 uint32_t num_active_slots;
527
528 /* The SH register offset relative to USER_DATA*_0 where the pointer
529 * to the descriptor array will be stored. */
530 short shader_userdata_offset;
531 /* The size of one descriptor. */
532 uint8_t element_dw_size;
533 /* If there is only one slot enabled, bind it directly instead of
534 * uploading descriptors. -1 if disabled. */
535 signed char slot_index_to_bind_directly;
536 };
537
538 struct si_buffer_resources {
539 struct pipe_resource **buffers; /* this has num_buffers elements */
540 unsigned *offsets; /* this has num_buffers elements */
541
542 unsigned priority;
543 unsigned priority_constbuf;
544
545 /* The i-th bit is set if that element is enabled (non-NULL resource). */
546 uint64_t enabled_mask;
547 uint64_t writable_mask;
548 };
549
550 #define si_pm4_state_changed(sctx, member) \
551 ((sctx)->queued.named.member != (sctx)->emitted.named.member)
552
553 #define si_pm4_state_enabled_and_changed(sctx, member) \
554 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
555
556 #define si_pm4_bind_state(sctx, member, value) \
557 do { \
558 (sctx)->queued.named.member = (value); \
559 if (value && value != (sctx)->emitted.named.member) \
560 (sctx)->dirty_atoms |= SI_STATE_BIT(member); \
561 else \
562 (sctx)->dirty_atoms &= ~SI_STATE_BIT(member); \
563 } while (0)
564
565 /* si_descriptors.c */
566 void si_get_inline_uniform_state(union si_shader_key *key, enum pipe_shader_type shader,
567 bool *inline_uniforms, uint32_t **inlined_values);
568 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture *tex,
569 const struct legacy_surf_level *base_level_info,
570 unsigned base_level, unsigned first_level, unsigned block_width,
571 /* restrict decreases overhead of si_set_sampler_view_desc ~8x. */
572 bool is_stencil, uint16_t access, uint32_t * restrict state);
573 void si_update_ps_colorbuf0_slot(struct si_context *sctx);
574 void si_force_disable_ps_colorbuf0_slot(struct si_context *sctx);
575 void si_invalidate_inlinable_uniforms(struct si_context *sctx, enum pipe_shader_type shader);
576 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader, uint slot,
577 struct pipe_constant_buffer *cbuf);
578 void si_set_shader_buffers(struct pipe_context *ctx, enum pipe_shader_type shader,
579 unsigned start_slot, unsigned count,
580 const struct pipe_shader_buffer *sbuffers,
581 unsigned writable_bitmask, bool internal_blit);
582 void si_get_shader_buffers(struct si_context *sctx, enum pipe_shader_type shader, uint start_slot,
583 uint count, struct pipe_shader_buffer *sbuf);
584 void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource *buffer,
585 unsigned stride, unsigned num_records, bool add_tid, bool swizzle,
586 unsigned element_size, unsigned index_stride, uint64_t offset);
587 void si_init_all_descriptors(struct si_context *sctx);
588 void si_release_all_descriptors(struct si_context *sctx);
589 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx);
590 bool si_gfx_resources_check_encrypted(struct si_context *sctx);
591 bool si_compute_resources_check_encrypted(struct si_context *sctx);
592 void si_shader_pointers_mark_dirty(struct si_context *sctx);
593 void si_add_all_descriptors_to_bo_list(struct si_context *sctx);
594 void si_update_all_texture_descriptors(struct si_context *sctx);
595 void si_shader_change_notify(struct si_context *sctx);
596 void si_update_needs_color_decompress_masks(struct si_context *sctx);
597 void si_emit_graphics_shader_pointers(struct si_context *sctx, unsigned index);
598 void si_emit_compute_shader_pointers(struct si_context *sctx);
599 void si_set_internal_const_buffer(struct si_context *sctx, uint slot,
600 const struct pipe_constant_buffer *input);
601 void si_set_internal_shader_buffer(struct si_context *sctx, uint slot,
602 const struct pipe_shader_buffer *sbuffer);
603 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
604 uint64_t new_active_mask);
605 void si_set_active_descriptors_for_shader(struct si_context *sctx, struct si_shader_selector *sel);
606 bool si_bindless_descriptor_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
607 struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
608 unsigned group_index);
609 void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
610 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf);
611 /* si_state.c */
612 void si_init_state_compute_functions(struct si_context *sctx);
613 void si_init_state_functions(struct si_context *sctx);
614 void si_init_screen_state_functions(struct si_screen *sscreen);
615 void si_init_gfx_preamble_state(struct si_context *sctx);
616 void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
617 enum pipe_format format, unsigned offset, unsigned num_elements,
618 uint32_t *state);
619 void si_set_sampler_depth_decompress_mask(struct si_context *sctx, struct si_texture *tex);
620 void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
621 void si_mark_display_dcc_dirty(struct si_context *sctx, struct si_texture *tex);
622 void si_update_ps_iter_samples(struct si_context *sctx);
623 void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
624 void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
625 unsigned gfx103_get_cu_mask_ps(struct si_screen *sscreen);
626
627 struct si_fast_udiv_info32 {
628 unsigned multiplier; /* the "magic number" multiplier */
629 unsigned pre_shift; /* shift for the dividend before multiplying */
630 unsigned post_shift; /* shift for the dividend after multiplying */
631 int increment; /* 0 or 1; if set then increment the numerator, using one of
632 the two strategies */
633 };
634
635 struct si_fast_udiv_info32 si_compute_fast_udiv_info32(uint32_t D, unsigned num_bits);
636
637 /* si_state_binning.c */
638 void si_emit_dpbb_state(struct si_context *sctx, unsigned index);
639
640 /* si_state_shaders.cpp */
641 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
642 unsigned wave_size, unsigned char ir_sha1_cache_key[20]);
643 bool si_shader_cache_load_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
644 struct si_shader *shader);
645 void si_shader_cache_insert_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
646 struct si_shader *shader, bool insert_into_disk_cache);
647 bool si_shader_mem_ordered(struct si_shader *shader);
648 void si_init_screen_live_shader_cache(struct si_screen *sscreen);
649 void si_init_shader_functions(struct si_context *sctx);
650 bool si_init_shader_cache(struct si_screen *sscreen);
651 void si_destroy_shader_cache(struct si_screen *sscreen);
652 void si_schedule_initial_compile(struct si_context *sctx, gl_shader_stage stage,
653 struct util_queue_fence *ready_fence,
654 struct si_compiler_ctx_state *compiler_ctx_state, void *job,
655 util_queue_execute_func execute);
656 void si_get_active_slot_masks(struct si_screen *sscreen, const struct si_shader_info *info,
657 uint64_t *const_and_shader_buffers, uint64_t *samplers_and_images);
658 int si_shader_select(struct pipe_context *ctx, struct si_shader_ctx_state *state);
659 void si_vs_key_update_inputs(struct si_context *sctx);
660 void si_update_ps_inputs_read_or_disabled(struct si_context *sctx);
661 void si_update_vrs_flat_shading(struct si_context *sctx);
662 unsigned si_get_input_prim(const struct si_shader_selector *gs, const union si_shader_key *key);
663 bool si_update_ngg(struct si_context *sctx);
664 void si_vs_ps_key_update_rast_prim_smooth_stipple(struct si_context *sctx);
665 void si_ps_key_update_framebuffer(struct si_context *sctx);
666 void si_ps_key_update_framebuffer_blend_rasterizer(struct si_context *sctx);
667 void si_ps_key_update_rasterizer(struct si_context *sctx);
668 void si_ps_key_update_dsa(struct si_context *sctx);
669 void si_ps_key_update_sample_shading(struct si_context *sctx);
670 void si_ps_key_update_framebuffer_rasterizer_sample_shading(struct si_context *sctx);
671 void si_init_tess_factor_ring(struct si_context *sctx);
672 bool si_update_gs_ring_buffers(struct si_context *sctx);
673 bool si_update_spi_tmpring_size(struct si_context *sctx, unsigned bytes);
674 unsigned si_get_shader_prefetch_size(struct si_shader *shader);
675 bool si_set_tcs_to_fixed_func_shader(struct si_context *sctx);
676 void si_update_tess_io_layout_state(struct si_context *sctx);
677
678 /* si_state_draw.cpp */
679 void si_cp_dma_prefetch(struct si_context *sctx, struct pipe_resource *buf,
680 unsigned offset, unsigned size);
681 void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex_elements *velems,
682 struct pipe_vertex_buffer *vb, unsigned element_index,
683 uint32_t *out);
684 void si_emit_buffered_compute_sh_regs(struct si_context *sctx);
685 void si_init_draw_functions_GFX6(struct si_context *sctx);
686 void si_init_draw_functions_GFX7(struct si_context *sctx);
687 void si_init_draw_functions_GFX8(struct si_context *sctx);
688 void si_init_draw_functions_GFX9(struct si_context *sctx);
689 void si_init_draw_functions_GFX10(struct si_context *sctx);
690 void si_init_draw_functions_GFX10_3(struct si_context *sctx);
691 void si_init_draw_functions_GFX11(struct si_context *sctx);
692 void si_init_draw_functions_GFX11_5(struct si_context *sctx);
693
694 /* si_state_msaa.c */
695 extern unsigned si_msaa_max_distance[5];
696 void si_init_msaa_functions(struct si_context *sctx);
697
698 /* si_state_streamout.c */
699 void si_streamout_buffers_dirty(struct si_context *sctx);
700 void si_emit_streamout_end(struct si_context *sctx);
701 void si_update_prims_generated_query_state(struct si_context *sctx, unsigned type, int diff);
702 void si_init_streamout_functions(struct si_context *sctx);
703
si_get_constbuf_slot(unsigned slot)704 static inline unsigned si_get_constbuf_slot(unsigned slot)
705 {
706 /* Constant buffers are in slots [32..47], ascending */
707 return SI_NUM_SHADER_BUFFERS + slot;
708 }
709
si_get_shaderbuf_slot(unsigned slot)710 static inline unsigned si_get_shaderbuf_slot(unsigned slot)
711 {
712 /* shader buffers are in slots [31..0], descending */
713 return SI_NUM_SHADER_BUFFERS - 1 - slot;
714 }
715
si_get_sampler_slot(unsigned slot)716 static inline unsigned si_get_sampler_slot(unsigned slot)
717 {
718 /* 32 samplers are in sampler slots [16..47], 16 dw per slot, ascending */
719 /* those are equivalent to image slots [32..95], 8 dw per slot, ascending */
720 return SI_NUM_IMAGE_SLOTS / 2 + slot;
721 }
722
si_get_image_slot(unsigned slot)723 static inline unsigned si_get_image_slot(unsigned slot)
724 {
725 /* image slots are in [31..0] (sampler slots [15..0]), descending */
726 /* images are in slots [31..16], while FMASKs are in slots [15..0] */
727 return SI_NUM_IMAGE_SLOTS - 1 - slot;
728 }
729
si_clamp_texture_texel_count(unsigned max_texel_buffer_elements,enum pipe_format format,uint32_t size)730 static inline unsigned si_clamp_texture_texel_count(unsigned max_texel_buffer_elements,
731 enum pipe_format format,
732 uint32_t size)
733 {
734 /* The spec says:
735 * The number of texels in the texel array is then clamped to the value of
736 * the implementation-dependent limit GL_MAX_TEXTURE_BUFFER_SIZE.
737 *
738 * So compute the number of texels, compare to GL_MAX_TEXTURE_BUFFER_SIZE and update it.
739 */
740 unsigned stride = util_format_get_blocksize(format);
741 return MIN2(max_texel_buffer_elements, size / stride);
742 }
743
744 #ifdef __cplusplus
745 }
746 #endif
747
748 #endif
749