1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #ifndef SI_STATE_H
8 #define SI_STATE_H
9
10 #include "si_pm4.h"
11 #include "util/format/u_format.h"
12 #include "util/bitset.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_FRAGMENT + 1)
19 #define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE + 1)
20
21 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
22 #define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
23 #define SI_NUM_CONST_BUFFERS 16
24 #define SI_NUM_IMAGES 16
25 #define SI_NUM_IMAGE_SLOTS (SI_NUM_IMAGES * 2) /* the second half are FMASK slots */
26 #define SI_NUM_SHADER_BUFFERS 32
27
28 struct si_screen;
29 struct si_shader;
30 struct si_shader_ctx_state;
31 struct si_shader_selector;
32 struct si_texture;
33 struct si_qbo_state;
34 struct legacy_surf_level;
35 struct pb_slab_entry;
36
37 struct si_state_blend {
38 struct si_pm4_state pm4;
39 uint32_t cb_target_mask;
40 /* Set 0xf or 0x0 (4 bits) per render target if the following is
41 * true. ANDed with spi_shader_col_format.
42 */
43 unsigned cb_target_enabled_4bit;
44 unsigned blend_enable_4bit;
45 unsigned need_src_alpha_4bit;
46 unsigned commutative_4bit;
47 unsigned dcc_msaa_corruption_4bit;
48 bool alpha_to_coverage : 1;
49 bool alpha_to_one : 1;
50 bool dual_src_blend : 1;
51 bool logicop_enable : 1;
52 bool allows_noop_optimization : 1;
53 };
54
55 struct si_state_rasterizer {
56 struct si_pm4_state pm4;
57
58 /* Register values. */
59 unsigned spi_interp_control_0;
60 unsigned pa_su_point_size;
61 unsigned pa_su_point_minmax;
62 unsigned pa_su_line_cntl;
63 unsigned pa_sc_mode_cntl_0;
64 unsigned pa_su_sc_mode_cntl;
65 unsigned pa_cl_ngg_cntl;
66 unsigned pa_sc_edgerule;
67 unsigned pa_su_poly_offset_db_fmt_cntl[3];
68 unsigned pa_su_poly_offset_clamp;
69 unsigned pa_su_poly_offset_frontback_scale;
70 unsigned pa_su_poly_offset_frontback_offset[3];
71
72 unsigned pa_sc_line_stipple;
73 unsigned pa_cl_clip_cntl;
74 float line_width;
75 float max_point_size;
76 unsigned ngg_cull_flags_tris : 16;
77 unsigned ngg_cull_flags_lines : 16;
78 unsigned sprite_coord_enable : 8;
79 unsigned clip_plane_enable : 8;
80 bool ngg_cull_front : 1;
81 bool ngg_cull_back : 1;
82 unsigned half_pixel_center : 1;
83 unsigned flatshade : 1;
84 unsigned flatshade_first : 1;
85 unsigned two_side : 1;
86 unsigned multisample_enable : 1;
87 unsigned force_persample_interp : 1;
88 unsigned line_stipple_enable : 1;
89 unsigned poly_stipple_enable : 1;
90 unsigned line_smooth : 1;
91 unsigned poly_smooth : 1;
92 unsigned point_smooth : 1;
93 unsigned uses_poly_offset : 1;
94 unsigned clamp_fragment_color : 1;
95 unsigned clamp_vertex_color : 1;
96 unsigned rasterizer_discard : 1;
97 unsigned scissor_enable : 1;
98 unsigned clip_halfz : 1;
99 unsigned polygon_mode_is_lines : 1;
100 unsigned polygon_mode_is_points : 1;
101 unsigned perpendicular_end_caps : 1;
102 unsigned bottom_edge_rule : 1;
103 int force_front_face_input : 2;
104 };
105
106 struct si_dsa_stencil_ref_part {
107 uint8_t valuemask[2];
108 uint8_t writemask[2];
109 };
110
111 struct si_dsa_order_invariance {
112 /** Whether the final result in Z/S buffers is guaranteed to be
113 * invariant under changes to the order in which fragments arrive. */
114 bool zs : 1;
115
116 /** Whether the set of fragments that pass the combined Z/S test is
117 * guaranteed to be invariant under changes to the order in which
118 * fragments arrive. */
119 bool pass_set : 1;
120 };
121
122 struct si_state_dsa {
123 struct si_pm4_state pm4;
124 struct si_dsa_stencil_ref_part stencil_ref;
125
126 /* Register values. */
127 unsigned db_depth_control;
128 unsigned db_stencil_control;
129 unsigned db_depth_bounds_min;
130 unsigned db_depth_bounds_max;
131 unsigned spi_shader_user_data_ps_alpha_ref;
132 unsigned db_stencil_read_mask;
133 unsigned db_stencil_write_mask;
134 unsigned db_render_override; /* only gfx12 */
135
136 /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
137 struct si_dsa_order_invariance order_invariance[2];
138
139 uint8_t alpha_func : 3;
140 bool depth_enabled : 1;
141 bool depth_write_enabled : 1;
142 bool stencil_enabled : 1;
143 bool stencil_write_enabled : 1;
144 bool db_can_write : 1;
145 bool depth_bounds_enabled : 1;
146 };
147
148 struct si_stencil_ref {
149 struct pipe_stencil_ref state;
150 struct si_dsa_stencil_ref_part dsa_part;
151 };
152
153 struct si_vertex_elements {
154 struct si_resource *instance_divisor_factor_buffer;
155
156 /* Bitmask of elements that always need a fixup to be applied. */
157 uint16_t fix_fetch_always;
158
159 /* Bitmask of elements whose fetch should always be opencoded. */
160 uint16_t fix_fetch_opencode;
161
162 /* Bitmask of elements which need to be opencoded if the vertex buffer
163 * is unaligned. */
164 uint16_t fix_fetch_unaligned;
165
166 /* For elements in fix_fetch_unaligned: whether the effective
167 * element load size as seen by the hardware is a dword (as opposed
168 * to a short).
169 */
170 uint16_t hw_load_is_dword;
171
172 /* Bitmask of vertex buffers requiring alignment check */
173 uint16_t vb_alignment_check_mask;
174
175 uint8_t count;
176
177 /* Vertex buffer descriptor list size aligned for optimal prefetch. */
178 uint16_t vb_desc_list_alloc_size;
179 uint16_t instance_divisor_is_one; /* bitmask of inputs */
180 uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
181
182 uint8_t fix_fetch[SI_MAX_ATTRIBS];
183 uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
184
185 struct {
186 uint32_t rsrc_word3;
187 uint16_t src_offset;
188 uint16_t stride;
189 uint8_t format_size;
190 } elem[SI_MAX_ATTRIBS];
191 };
192
193 union si_state {
194 struct si_state_named {
195 struct si_state_blend *blend;
196 struct si_state_rasterizer *rasterizer;
197 struct si_state_dsa *dsa;
198 struct si_shader *ls;
199 struct si_shader *hs;
200 struct si_shader *es;
201 struct si_shader *gs;
202 struct si_shader *vs;
203 struct si_shader *ps;
204 struct si_sqtt_fake_pipeline *sqtt_pipeline;
205 } named;
206 struct si_pm4_state *array[sizeof(struct si_state_named) / sizeof(struct si_pm4_state *)];
207 };
208
209 #define SI_STATE_IDX(name) (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
210 #define SI_STATE_BIT(name) (1ull << SI_STATE_IDX(name))
211 #define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
212
213 union si_state_atoms {
214 struct si_atoms_s {
215 /* This must be first. */
216 struct si_atom pm4_states[SI_NUM_STATES];
217 struct si_atom gfx_add_all_to_bo_list;
218 struct si_atom streamout_enable;
219 struct si_atom framebuffer;
220 struct si_atom sample_locations;
221 struct si_atom db_render_state;
222 struct si_atom dpbb_state;
223 struct si_atom msaa_config;
224 struct si_atom sample_mask;
225 struct si_atom cb_render_state;
226 struct si_atom blend_color;
227 struct si_atom clip_regs;
228 struct si_atom clip_state;
229 struct si_atom gfx_shader_pointers;
230 struct si_atom guardband;
231 struct si_atom scissors;
232 struct si_atom viewports;
233 struct si_atom stencil_ref;
234 struct si_atom spi_map;
235 struct si_atom scratch_state;
236 struct si_atom window_rectangles;
237 struct si_atom shader_query;
238 struct si_atom ngg_cull_state;
239 struct si_atom vgt_pipeline_state;
240 struct si_atom tess_io_layout;
241 struct si_atom barrier;
242 struct si_atom streamout_begin; /* this must be done after barrier */
243 struct si_atom render_cond; /* this must be after barrier */
244 struct si_atom spi_ge_ring_state; /* this must be last because it waits for idle. */
245 } s;
246 struct si_atom array[sizeof(struct si_atoms_s) / sizeof(struct si_atom)];
247 };
248
249 #define SI_ATOM_BIT(name) (1ull << (offsetof(union si_state_atoms, s.name) / sizeof(struct si_atom)))
250 #define SI_NUM_ATOMS (sizeof(union si_state_atoms) / sizeof(struct si_atom))
251
si_atoms_that_always_roll_context(void)252 static inline uint64_t si_atoms_that_always_roll_context(void)
253 {
254 return SI_STATE_BIT(blend) |
255 SI_ATOM_BIT(streamout_begin) | SI_ATOM_BIT(streamout_enable) | SI_ATOM_BIT(framebuffer) |
256 SI_ATOM_BIT(sample_locations) | SI_ATOM_BIT(sample_mask) | SI_ATOM_BIT(blend_color)|
257 SI_ATOM_BIT(clip_state) | SI_ATOM_BIT(scissors) | SI_ATOM_BIT(viewports)|
258 SI_ATOM_BIT(stencil_ref) | SI_ATOM_BIT(scratch_state) | SI_ATOM_BIT(window_rectangles);
259 }
260
261 struct si_shader_data {
262 uint32_t sh_base[SI_NUM_SHADERS];
263 };
264
265 /* Registers whose values are tracked by si_context. */
266 enum si_tracked_reg
267 {
268 /* CONTEXT registers. */
269 /* 2 consecutive registers (GFX6-11), or separate registers (GFX12) */
270 SI_TRACKED_DB_RENDER_CONTROL,
271 SI_TRACKED_DB_COUNT_CONTROL,
272
273 SI_TRACKED_DB_DEPTH_CONTROL,
274 SI_TRACKED_DB_STENCIL_CONTROL,
275 /* 2 consecutive registers */
276 SI_TRACKED_DB_DEPTH_BOUNDS_MIN,
277 SI_TRACKED_DB_DEPTH_BOUNDS_MAX,
278
279 SI_TRACKED_SPI_INTERP_CONTROL_0,
280 SI_TRACKED_PA_SU_POINT_SIZE,
281 SI_TRACKED_PA_SU_POINT_MINMAX,
282 SI_TRACKED_PA_SU_LINE_CNTL,
283 SI_TRACKED_PA_SC_MODE_CNTL_0,
284 SI_TRACKED_PA_SU_SC_MODE_CNTL,
285 SI_TRACKED_PA_SC_EDGERULE,
286
287 /* 6 consecutive registers */
288 SI_TRACKED_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
289 SI_TRACKED_PA_SU_POLY_OFFSET_CLAMP,
290 SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_SCALE,
291 SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_OFFSET,
292 SI_TRACKED_PA_SU_POLY_OFFSET_BACK_SCALE,
293 SI_TRACKED_PA_SU_POLY_OFFSET_BACK_OFFSET,
294
295 /* 2 consecutive registers */
296 SI_TRACKED_PA_SC_LINE_CNTL,
297 SI_TRACKED_PA_SC_AA_CONFIG,
298
299 /* 5 consecutive registers (GFX6-11) */
300 SI_TRACKED_PA_SU_VTX_CNTL,
301 /* 4 consecutive registers (GFX12) */
302 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
303 SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
304 SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
305 SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
306
307 /* Non-consecutive register */
308 SI_TRACKED_SPI_SHADER_POS_FORMAT,
309
310 /* 5 consecutive registers (GFX12), or 2 consecutive registers (GFX6-11) */
311 SI_TRACKED_SPI_SHADER_Z_FORMAT,
312 SI_TRACKED_SPI_SHADER_COL_FORMAT,
313 /* Continuing consecutive registers (GFX12), or separate register (GFX6-11) */
314 SI_TRACKED_SPI_BARYC_CNTL,
315 /* Continuing consecutive registers (GFX12), or 2 consecutive registers (GFX6-11) */
316 SI_TRACKED_SPI_PS_INPUT_ENA,
317 SI_TRACKED_SPI_PS_INPUT_ADDR,
318
319 SI_TRACKED_DB_EQAA,
320 SI_TRACKED_DB_RENDER_OVERRIDE2,
321 SI_TRACKED_DB_SHADER_CONTROL,
322 SI_TRACKED_CB_SHADER_MASK,
323 SI_TRACKED_CB_TARGET_MASK,
324 SI_TRACKED_PA_CL_CLIP_CNTL,
325 SI_TRACKED_PA_CL_VS_OUT_CNTL,
326 SI_TRACKED_PA_CL_VTE_CNTL,
327 SI_TRACKED_PA_SC_CLIPRECT_RULE,
328 SI_TRACKED_PA_SC_LINE_STIPPLE,
329 SI_TRACKED_PA_SC_MODE_CNTL_1,
330 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
331 SI_TRACKED_SPI_PS_IN_CONTROL,
332 SI_TRACKED_VGT_GS_INSTANCE_CNT,
333 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
334 SI_TRACKED_VGT_SHADER_STAGES_EN,
335 SI_TRACKED_VGT_LS_HS_CONFIG,
336 SI_TRACKED_VGT_TF_PARAM,
337 SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL, /* GFX8-9 (only with has_small_prim_filter_sample_loc_bug) */
338 SI_TRACKED_PA_SC_BINNER_CNTL_0, /* GFX9+ */
339 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP, /* GFX10+ - the SMALL_PRIM_FILTER slot above can be reused */
340 SI_TRACKED_GE_NGG_SUBGRP_CNTL, /* GFX10+ */
341 SI_TRACKED_PA_CL_NGG_CNTL, /* GFX10+ */
342 SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL, /* GFX10.3+ */
343
344 /* 3 consecutive registers */
345 SI_TRACKED_SX_PS_DOWNCONVERT, /* GFX8+ */
346 SI_TRACKED_SX_BLEND_OPT_EPSILON, /* GFX8+ */
347 SI_TRACKED_SX_BLEND_OPT_CONTROL, /* GFX8+ */
348
349 /* The slots below can be reused by other generations. */
350 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, /* GFX6-8 (GFX9+ can reuse this slot) */
351 SI_TRACKED_VGT_REUSE_OFF, /* GFX6-8,10.3 */
352 SI_TRACKED_IA_MULTI_VGT_PARAM, /* GFX6-8 (GFX9+ can reuse this slot) */
353
354 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP, /* GFX9 - the slots above can be reused */
355 SI_TRACKED_VGT_GS_ONCHIP_CNTL, /* GFX9-10 - the slots above can be reused */
356
357 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE, /* GFX6-10 (GFX11+ can reuse this slot) */
358 SI_TRACKED_VGT_GS_MODE, /* GFX6-10 (GFX11+ can reuse this slot) */
359 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, /* GFX6-10 (GFX11+ can reuse this slot) */
360 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE, /* GFX6-10 (GFX11+ can reuse this slot) */
361
362 /* 3 consecutive registers */
363 SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* GFX6-10 (GFX11+ can reuse this slot) */
364 SI_TRACKED_VGT_GSVS_RING_OFFSET_2, /* GFX6-10 (GFX11+ can reuse this slot) */
365 SI_TRACKED_VGT_GSVS_RING_OFFSET_3, /* GFX6-10 (GFX11+ can reuse this slot) */
366
367 /* 4 consecutive registers */
368 SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* GFX6-10 (GFX11+ can reuse this slot) */
369 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1, /* GFX6-10 (GFX11+ can reuse this slot) */
370 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2, /* GFX6-10 (GFX11+ can reuse this slot) */
371 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3, /* GFX6-10 (GFX11+ can reuse this slot) */
372
373 SI_TRACKED_SPI_VS_OUT_CONFIG, /* GFX6-11 */
374 SI_TRACKED_DB_RENDER_OVERRIDE = SI_TRACKED_SPI_VS_OUT_CONFIG, /* GFX12+ (slot reused) */
375 SI_TRACKED_VGT_PRIMITIVEID_EN, /* GFX6-11 */
376 SI_TRACKED_CB_DCC_CONTROL, /* GFX8-11 */
377 SI_TRACKED_DB_STENCIL_READ_MASK, /* GFX12+ */
378 SI_TRACKED_DB_STENCIL_WRITE_MASK, /* GFX12+ */
379 SI_TRACKED_PA_SC_HISZ_CONTROL, /* GFX12+ */
380 SI_TRACKED_PA_SC_LINE_STIPPLE_RESET, /* GFX12+ */
381
382 SI_NUM_TRACKED_CONTEXT_REGS,
383 SI_FIRST_TRACKED_OTHER_REG = SI_NUM_TRACKED_CONTEXT_REGS,
384
385 /* SH and UCONFIG registers. */
386 SI_TRACKED_GE_PC_ALLOC = SI_FIRST_TRACKED_OTHER_REG, /* GFX10-11 */
387 SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS, /* GFX7-11 */
388 SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS, /* GFX10+ */
389 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE_UCONFIG, /* GFX11+ */
390 SI_TRACKED_SPI_SHADER_GS_OUT_CONFIG_PS, /* GFX12+ */
391 SI_TRACKED_VGT_PRIMITIVEID_EN_UCONFIG, /* GFX12+ */
392
393 SI_TRACKED_IA_MULTI_VGT_PARAM_UCONFIG, /* GFX9 only */
394 SI_TRACKED_GE_CNTL = SI_TRACKED_IA_MULTI_VGT_PARAM_UCONFIG, /* GFX10+ */
395
396 SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, /* GFX9+ (not tracked on previous chips) */
397
398 /* 3 consecutive registers. */
399 SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_LAYOUT,
400 SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_ADDR,
401 SI_TRACKED_SPI_SHADER_USER_DATA_HS__VS_STATE_BITS, /* GFX6-8 */
402
403 SI_TRACKED_SPI_SHADER_USER_DATA_LS__BASE_VERTEX,
404 SI_TRACKED_SPI_SHADER_USER_DATA_LS__DRAWID,
405 SI_TRACKED_SPI_SHADER_USER_DATA_LS__START_INSTANCE,
406
407 SI_TRACKED_SPI_SHADER_USER_DATA_ES__BASE_VERTEX,
408 SI_TRACKED_SPI_SHADER_USER_DATA_ES__DRAWID,
409 SI_TRACKED_SPI_SHADER_USER_DATA_ES__START_INSTANCE,
410
411 SI_TRACKED_SPI_SHADER_USER_DATA_VS__BASE_VERTEX, /* GFX6-10 */
412 SI_TRACKED_SPI_SHADER_USER_DATA_VS__DRAWID, /* GFX6-10 */
413 SI_TRACKED_SPI_SHADER_USER_DATA_VS__START_INSTANCE, /* GFX6-10 */
414
415 SI_TRACKED_SPI_SHADER_USER_DATA_PS__ALPHA_REF,
416
417 SI_TRACKED_COMPUTE_RESOURCE_LIMITS,
418 SI_TRACKED_COMPUTE_DISPATCH_INTERLEAVE, /* GFX12+ (not tracked on previous chips) */
419 SI_TRACKED_COMPUTE_NUM_THREAD_X,
420 SI_TRACKED_COMPUTE_NUM_THREAD_Y,
421 SI_TRACKED_COMPUTE_NUM_THREAD_Z,
422 SI_TRACKED_COMPUTE_TMPRING_SIZE,
423 SI_TRACKED_COMPUTE_PGM_RSRC3, /* GFX11+ */
424
425 /* 2 consecutive registers. */
426 SI_TRACKED_COMPUTE_PGM_RSRC1,
427 SI_TRACKED_COMPUTE_PGM_RSRC2,
428
429 /* 2 consecutive registers. */
430 SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_LO, /* GFX11+ */
431 SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_HI, /* GFX11+ */
432
433 SI_NUM_ALL_TRACKED_REGS,
434 };
435
436 /* For 3 draw constants: BaseVertex, DrawID, StartInstance */
437 #define BASEVERTEX_MASK 0x1
438 #define DRAWID_MASK 0x2
439 #define STARTINSTANCE_MASK 0x4
440 #define BASEVERTEX_DRAWID_MASK (BASEVERTEX_MASK | DRAWID_MASK)
441 #define BASEVERTEX_DRAWID_STARTINSTANCE_MASK (BASEVERTEX_MASK | DRAWID_MASK | STARTINSTANCE_MASK)
442
443 struct si_tracked_regs {
444 BITSET_DECLARE(reg_saved_mask, SI_NUM_ALL_TRACKED_REGS);
445 uint32_t reg_value[SI_NUM_ALL_TRACKED_REGS];
446 uint32_t spi_ps_input_cntl[32];
447 };
448
449 /* Private read-write buffer slots. */
450 enum
451 {
452 SI_VS_STREAMOUT_BUF0,
453 SI_VS_STREAMOUT_BUF1,
454 SI_VS_STREAMOUT_BUF2,
455 SI_VS_STREAMOUT_BUF3,
456
457 /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
458 SI_PS_IMAGE_COLORBUF0,
459 SI_PS_IMAGE_COLORBUF0_HI,
460 SI_PS_IMAGE_COLORBUF0_FMASK, /* gfx6-10 */
461 SI_PS_IMAGE_COLORBUF0_FMASK_HI, /* gfx6-10 */
462
463 /* Internal constant buffers. */
464 SI_HS_CONST_DEFAULT_TESS_LEVELS,
465 SI_VS_CONST_INSTANCE_DIVISORS,
466 SI_VS_CONST_CLIP_PLANES,
467 SI_PS_CONST_POLY_STIPPLE,
468 SI_PS_CONST_SAMPLE_POSITIONS,
469
470 SI_RING_ESGS, /* gfx6-8 */
471 SI_RING_GSVS, /* gfx6-10 */
472 SI_GS_QUERY_EMULATED_COUNTERS_BUF, /* gfx10+ */
473 SI_RING_SHADER_LOG,
474
475 SI_NUM_INTERNAL_BINDINGS,
476
477 /* Aliases to reuse slots that are unused on other generations. */
478 SI_GS_QUERY_BUF = SI_RING_ESGS, /* gfx10+ */
479 SI_STREAMOUT_STATE_BUF = SI_RING_GSVS, /* gfx12+ */
480 };
481
482 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
483 * are contiguous:
484 *
485 * 0 - rw buffers
486 * 1 - vertex const and shader buffers
487 * 2 - vertex samplers and images
488 * 3 - fragment const and shader buffer
489 * ...
490 * 11 - compute const and shader buffers
491 * 12 - compute samplers and images
492 */
493 enum
494 {
495 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
496 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
497 SI_NUM_SHADER_DESCS,
498 };
499
500 #define SI_DESCS_INTERNAL 0
501 #define SI_DESCS_FIRST_SHADER 1
502 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
503 #define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
504
505 #define SI_DESCS_SHADER_MASK(name) \
506 u_bit_consecutive(SI_DESCS_FIRST_SHADER + PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
507 SI_NUM_SHADER_DESCS)
508
si_const_and_shader_buffer_descriptors_idx(unsigned shader)509 static inline unsigned si_const_and_shader_buffer_descriptors_idx(unsigned shader)
510 {
511 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
512 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
513 }
514
si_sampler_and_image_descriptors_idx(unsigned shader)515 static inline unsigned si_sampler_and_image_descriptors_idx(unsigned shader)
516 {
517 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
518 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
519 }
520
521 /* This represents descriptors in memory, such as buffer resources,
522 * image resources, and sampler states.
523 */
524 struct si_descriptors {
525 /* The list of descriptors in malloc'd memory. */
526 uint32_t *list;
527 /* The list in mapped GPU memory. */
528 uint32_t *gpu_list;
529
530 /* The buffer where the descriptors have been uploaded. */
531 struct si_resource *buffer;
532 uint64_t gpu_address;
533
534 /* The maximum number of descriptors. */
535 uint32_t num_elements;
536
537 /* Slots that are used by currently-bound shaders.
538 * It determines which slots are uploaded.
539 */
540 uint32_t first_active_slot;
541 uint32_t num_active_slots;
542
543 /* The SH register offset relative to USER_DATA*_0 where the pointer
544 * to the descriptor array will be stored. */
545 short shader_userdata_offset;
546 /* The size of one descriptor. */
547 uint8_t element_dw_size;
548 /* If there is only one slot enabled, bind it directly instead of
549 * uploading descriptors. -1 if disabled. */
550 signed char slot_index_to_bind_directly;
551 };
552
553 struct si_buffer_resources {
554 struct pipe_resource **buffers; /* this has num_buffers elements */
555 unsigned *offsets; /* this has num_buffers elements */
556
557 unsigned priority;
558 unsigned priority_constbuf;
559
560 /* The i-th bit is set if that element is enabled (non-NULL resource). */
561 uint64_t enabled_mask;
562 uint64_t writable_mask;
563 };
564
565 #define si_pm4_state_changed(sctx, member) \
566 ((sctx)->queued.named.member != (sctx)->emitted.named.member)
567
568 #define si_pm4_state_enabled_and_changed(sctx, member) \
569 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
570
571 #define si_pm4_bind_state(sctx, member, value) \
572 do { \
573 (sctx)->queued.named.member = (value); \
574 if (value && value != (sctx)->emitted.named.member) \
575 (sctx)->dirty_atoms |= SI_STATE_BIT(member); \
576 else \
577 (sctx)->dirty_atoms &= ~SI_STATE_BIT(member); \
578 } while (0)
579
580 /* si_descriptors.c */
581 void si_get_inline_uniform_state(union si_shader_key *key, enum pipe_shader_type shader,
582 bool *inline_uniforms, uint32_t **inlined_values);
583 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture *tex,
584 const struct legacy_surf_level *base_level_info,
585 unsigned base_level, unsigned first_level, unsigned block_width,
586 /* restrict decreases overhead of si_set_sampler_view_desc ~8x. */
587 bool is_stencil, uint16_t access, uint32_t * restrict state);
588 void si_update_ps_colorbuf0_slot(struct si_context *sctx);
589 void si_force_disable_ps_colorbuf0_slot(struct si_context *sctx);
590 void si_invalidate_inlinable_uniforms(struct si_context *sctx, enum pipe_shader_type shader);
591 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader, uint slot,
592 struct pipe_constant_buffer *cbuf);
593 void si_set_shader_buffers(struct pipe_context *ctx, enum pipe_shader_type shader,
594 unsigned start_slot, unsigned count,
595 const struct pipe_shader_buffer *sbuffers,
596 unsigned writable_bitmask, bool internal_blit);
597 void si_get_shader_buffers(struct si_context *sctx, enum pipe_shader_type shader, uint start_slot,
598 uint count, struct pipe_shader_buffer *sbuf);
599 void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource *buffer,
600 unsigned stride, unsigned num_records, bool add_tid, bool swizzle,
601 unsigned element_size, unsigned index_stride, uint64_t offset);
602 void si_init_all_descriptors(struct si_context *sctx);
603 void si_release_all_descriptors(struct si_context *sctx);
604 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx);
605 bool si_gfx_resources_check_encrypted(struct si_context *sctx);
606 bool si_compute_resources_check_encrypted(struct si_context *sctx);
607 void si_shader_pointers_mark_dirty(struct si_context *sctx);
608 void si_add_all_descriptors_to_bo_list(struct si_context *sctx);
609 void si_update_all_texture_descriptors(struct si_context *sctx);
610 void si_shader_change_notify(struct si_context *sctx);
611 void si_update_needs_color_decompress_masks(struct si_context *sctx);
612 void si_emit_graphics_shader_pointers(struct si_context *sctx, unsigned index);
613 void si_emit_compute_shader_pointers(struct si_context *sctx);
614 void si_set_internal_const_buffer(struct si_context *sctx, uint slot,
615 const struct pipe_constant_buffer *input);
616 void si_set_internal_shader_buffer(struct si_context *sctx, uint slot,
617 const struct pipe_shader_buffer *sbuffer);
618 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
619 uint64_t new_active_mask);
620 void si_set_active_descriptors_for_shader(struct si_context *sctx, struct si_shader_selector *sel);
621 bool si_bindless_descriptor_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
622 struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
623 unsigned group_index);
624 void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
625 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf);
626 /* si_state.c */
627 void si_make_texture_descriptor(struct si_screen *screen, struct si_texture *tex,
628 bool sampler, enum pipe_texture_target target,
629 enum pipe_format pipe_format,
630 const unsigned char state_swizzle[4], unsigned first_level,
631 unsigned last_level, unsigned first_layer,
632 unsigned last_layer, unsigned width, unsigned height,
633 unsigned depth, bool get_bo_metadata,
634 uint32_t *state, uint32_t *fmask_state);
635 void si_init_state_compute_functions(struct si_context *sctx);
636 void si_init_state_functions(struct si_context *sctx);
637 void si_init_screen_state_functions(struct si_screen *sscreen);
638 void si_init_gfx_preamble_state(struct si_context *sctx);
639 void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
640 enum pipe_format format, unsigned offset, unsigned num_elements,
641 uint32_t *state);
642 void si_mark_display_dcc_dirty(struct si_context *sctx, struct si_texture *tex);
643 void si_update_ps_iter_samples(struct si_context *sctx);
644 void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
645 void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
646
647 struct si_fast_udiv_info32 {
648 unsigned multiplier; /* the "magic number" multiplier */
649 unsigned pre_shift; /* shift for the dividend before multiplying */
650 unsigned post_shift; /* shift for the dividend after multiplying */
651 int increment; /* 0 or 1; if set then increment the numerator, using one of
652 the two strategies */
653 };
654
655 struct si_fast_udiv_info32 si_compute_fast_udiv_info32(uint32_t D, unsigned num_bits);
656
657 /* si_state_binning.c */
658 void si_emit_dpbb_state(struct si_context *sctx, unsigned index);
659
660 /* si_state_shaders.cpp */
661 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
662 unsigned wave_size, unsigned char ir_sha1_cache_key[20]);
663 bool si_shader_cache_load_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
664 struct si_shader *shader);
665 void si_shader_cache_insert_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
666 struct si_shader *shader, bool insert_into_disk_cache);
667 bool si_shader_mem_ordered(struct si_shader *shader);
668 void si_init_screen_live_shader_cache(struct si_screen *sscreen);
669 void si_init_shader_functions(struct si_context *sctx);
670 bool si_init_shader_cache(struct si_screen *sscreen);
671 void si_destroy_shader_cache(struct si_screen *sscreen);
672 void si_schedule_initial_compile(struct si_context *sctx, gl_shader_stage stage,
673 struct util_queue_fence *ready_fence,
674 struct si_compiler_ctx_state *compiler_ctx_state, void *job,
675 util_queue_execute_func execute);
676 void si_get_active_slot_masks(struct si_screen *sscreen, const struct si_shader_info *info,
677 uint64_t *const_and_shader_buffers, uint64_t *samplers_and_images);
678 int si_shader_select(struct pipe_context *ctx, struct si_shader_ctx_state *state);
679 void si_vs_key_update_inputs(struct si_context *sctx);
680 void si_update_ps_inputs_read_or_disabled(struct si_context *sctx);
681 void si_update_vrs_flat_shading(struct si_context *sctx);
682 unsigned si_get_input_prim(const struct si_shader_selector *gs, const union si_shader_key *key,
683 bool return_unknown);
684 unsigned si_get_num_vertices_per_output_prim(struct si_shader *shader);
685 bool si_update_ngg(struct si_context *sctx);
686 void si_vs_ps_key_update_rast_prim_smooth_stipple(struct si_context *sctx);
687 void si_ps_key_update_framebuffer(struct si_context *sctx);
688 void si_ps_key_update_framebuffer_blend_dsa_rasterizer(struct si_context *sctx);
689 void si_ps_key_update_rasterizer(struct si_context *sctx);
690 void si_ps_key_update_dsa(struct si_context *sctx);
691 void si_ps_key_update_sample_shading(struct si_context *sctx);
692 void si_ps_key_update_framebuffer_rasterizer_sample_shading(struct si_context *sctx);
693 void si_init_tess_factor_ring(struct si_context *sctx);
694 bool si_update_gs_ring_buffers(struct si_context *sctx);
695 bool si_update_spi_tmpring_size(struct si_context *sctx, unsigned bytes);
696 bool si_set_tcs_to_fixed_func_shader(struct si_context *sctx);
697 void si_update_tess_io_layout_state(struct si_context *sctx);
698
699 /* si_state_draw.cpp */
700 void si_cp_dma_prefetch(struct si_context *sctx, struct pipe_resource *buf,
701 unsigned offset, unsigned size);
702 void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex_elements *velems,
703 const struct pipe_vertex_buffer *vb, unsigned element_index,
704 uint32_t *out);
705 void si_emit_buffered_compute_sh_regs(struct si_context *sctx);
706 void si_init_draw_functions_GFX6(struct si_context *sctx);
707 void si_init_draw_functions_GFX7(struct si_context *sctx);
708 void si_init_draw_functions_GFX8(struct si_context *sctx);
709 void si_init_draw_functions_GFX9(struct si_context *sctx);
710 void si_init_draw_functions_GFX10(struct si_context *sctx);
711 void si_init_draw_functions_GFX10_3(struct si_context *sctx);
712 void si_init_draw_functions_GFX11(struct si_context *sctx);
713 void si_init_draw_functions_GFX11_5(struct si_context *sctx);
714 void si_init_draw_functions_GFX12(struct si_context *sctx);
715
716 /* si_state_msaa.c */
717 extern unsigned si_msaa_max_distance[5];
718 void si_init_msaa_functions(struct si_context *sctx);
719
720 /* si_state_streamout.c */
721 void si_streamout_buffers_dirty(struct si_context *sctx);
722 void si_emit_streamout_end(struct si_context *sctx);
723 void si_update_prims_generated_query_state(struct si_context *sctx, unsigned type, int diff);
724 void si_init_streamout_functions(struct si_context *sctx);
725
si_get_constbuf_slot(unsigned slot)726 static inline unsigned si_get_constbuf_slot(unsigned slot)
727 {
728 /* Constant buffers are in slots [32..47], ascending */
729 return SI_NUM_SHADER_BUFFERS + slot;
730 }
731
si_get_shaderbuf_slot(unsigned slot)732 static inline unsigned si_get_shaderbuf_slot(unsigned slot)
733 {
734 /* shader buffers are in slots [31..0], descending */
735 return SI_NUM_SHADER_BUFFERS - 1 - slot;
736 }
737
si_get_sampler_slot(unsigned slot)738 static inline unsigned si_get_sampler_slot(unsigned slot)
739 {
740 /* 32 samplers are in sampler slots [16..47], 16 dw per slot, ascending */
741 /* those are equivalent to image slots [32..95], 8 dw per slot, ascending */
742 return SI_NUM_IMAGE_SLOTS / 2 + slot;
743 }
744
si_get_image_slot(unsigned slot)745 static inline unsigned si_get_image_slot(unsigned slot)
746 {
747 /* image slots are in [31..0] (sampler slots [15..0]), descending */
748 /* images are in slots [31..16], while FMASKs are in slots [15..0] */
749 return SI_NUM_IMAGE_SLOTS - 1 - slot;
750 }
751
si_clamp_texture_texel_count(unsigned max_texel_buffer_elements,enum pipe_format format,uint32_t size)752 static inline unsigned si_clamp_texture_texel_count(unsigned max_texel_buffer_elements,
753 enum pipe_format format,
754 uint32_t size)
755 {
756 /* The spec says:
757 * The number of texels in the texel array is then clamped to the value of
758 * the implementation-dependent limit GL_MAX_TEXTURE_BUFFER_SIZE.
759 *
760 * So compute the number of texels, compare to GL_MAX_TEXTURE_BUFFER_SIZE and update it.
761 */
762 unsigned stride = util_format_get_blocksize(format);
763 return MIN2(max_texel_buffer_elements, size / stride);
764 }
765
766 #ifdef __cplusplus
767 }
768 #endif
769
770 #endif
771