1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * SPDX-License-Identifier: MIT
9 */
10
11 #ifndef RADV_CMD_BUFFER_H
12 #define RADV_CMD_BUFFER_H
13
14 #include "ac_vcn.h"
15
16 #include "vk_command_buffer.h"
17
18 #include "radv_device.h"
19 #include "radv_physical_device.h"
20 #include "radv_pipeline_graphics.h"
21 #include "radv_video.h"
22
23 extern const struct vk_command_buffer_ops radv_cmd_buffer_ops;
24
25 enum radv_dynamic_state_bits {
26 RADV_DYNAMIC_VIEWPORT = 1ull << 0,
27 RADV_DYNAMIC_SCISSOR = 1ull << 1,
28 RADV_DYNAMIC_LINE_WIDTH = 1ull << 2,
29 RADV_DYNAMIC_DEPTH_BIAS = 1ull << 3,
30 RADV_DYNAMIC_BLEND_CONSTANTS = 1ull << 4,
31 RADV_DYNAMIC_DEPTH_BOUNDS = 1ull << 5,
32 RADV_DYNAMIC_STENCIL_COMPARE_MASK = 1ull << 6,
33 RADV_DYNAMIC_STENCIL_WRITE_MASK = 1ull << 7,
34 RADV_DYNAMIC_STENCIL_REFERENCE = 1ull << 8,
35 RADV_DYNAMIC_DISCARD_RECTANGLE = 1ull << 9,
36 RADV_DYNAMIC_SAMPLE_LOCATIONS = 1ull << 10,
37 RADV_DYNAMIC_LINE_STIPPLE = 1ull << 11,
38 RADV_DYNAMIC_CULL_MODE = 1ull << 12,
39 RADV_DYNAMIC_FRONT_FACE = 1ull << 13,
40 RADV_DYNAMIC_PRIMITIVE_TOPOLOGY = 1ull << 14,
41 RADV_DYNAMIC_DEPTH_TEST_ENABLE = 1ull << 15,
42 RADV_DYNAMIC_DEPTH_WRITE_ENABLE = 1ull << 16,
43 RADV_DYNAMIC_DEPTH_COMPARE_OP = 1ull << 17,
44 RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE = 1ull << 18,
45 RADV_DYNAMIC_STENCIL_TEST_ENABLE = 1ull << 19,
46 RADV_DYNAMIC_STENCIL_OP = 1ull << 20,
47 RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE = 1ull << 21,
48 RADV_DYNAMIC_FRAGMENT_SHADING_RATE = 1ull << 22,
49 RADV_DYNAMIC_PATCH_CONTROL_POINTS = 1ull << 23,
50 RADV_DYNAMIC_RASTERIZER_DISCARD_ENABLE = 1ull << 24,
51 RADV_DYNAMIC_DEPTH_BIAS_ENABLE = 1ull << 25,
52 RADV_DYNAMIC_LOGIC_OP = 1ull << 26,
53 RADV_DYNAMIC_PRIMITIVE_RESTART_ENABLE = 1ull << 27,
54 RADV_DYNAMIC_COLOR_WRITE_ENABLE = 1ull << 28,
55 RADV_DYNAMIC_VERTEX_INPUT = 1ull << 29,
56 RADV_DYNAMIC_POLYGON_MODE = 1ull << 30,
57 RADV_DYNAMIC_TESS_DOMAIN_ORIGIN = 1ull << 31,
58 RADV_DYNAMIC_LOGIC_OP_ENABLE = 1ull << 32,
59 RADV_DYNAMIC_LINE_STIPPLE_ENABLE = 1ull << 33,
60 RADV_DYNAMIC_ALPHA_TO_COVERAGE_ENABLE = 1ull << 34,
61 RADV_DYNAMIC_SAMPLE_MASK = 1ull << 35,
62 RADV_DYNAMIC_DEPTH_CLIP_ENABLE = 1ull << 36,
63 RADV_DYNAMIC_CONSERVATIVE_RAST_MODE = 1ull << 37,
64 RADV_DYNAMIC_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE = 1ull << 38,
65 RADV_DYNAMIC_PROVOKING_VERTEX_MODE = 1ull << 39,
66 RADV_DYNAMIC_DEPTH_CLAMP_ENABLE = 1ull << 40,
67 RADV_DYNAMIC_COLOR_WRITE_MASK = 1ull << 41,
68 RADV_DYNAMIC_COLOR_BLEND_ENABLE = 1ull << 42,
69 RADV_DYNAMIC_RASTERIZATION_SAMPLES = 1ull << 43,
70 RADV_DYNAMIC_LINE_RASTERIZATION_MODE = 1ull << 44,
71 RADV_DYNAMIC_COLOR_BLEND_EQUATION = 1ull << 45,
72 RADV_DYNAMIC_DISCARD_RECTANGLE_ENABLE = 1ull << 46,
73 RADV_DYNAMIC_DISCARD_RECTANGLE_MODE = 1ull << 47,
74 RADV_DYNAMIC_ATTACHMENT_FEEDBACK_LOOP_ENABLE = 1ull << 48,
75 RADV_DYNAMIC_SAMPLE_LOCATIONS_ENABLE = 1ull << 49,
76 RADV_DYNAMIC_ALPHA_TO_ONE_ENABLE = 1ull << 50,
77 RADV_DYNAMIC_COLOR_ATTACHMENT_MAP = 1ull << 51,
78 RADV_DYNAMIC_INPUT_ATTACHMENT_MAP = 1ull << 52,
79 RADV_DYNAMIC_DEPTH_CLAMP_RANGE = 1ull << 53,
80 RADV_DYNAMIC_ALL = (1ull << 54) - 1,
81 };
82
83 enum radv_cmd_dirty_bits {
84 RADV_CMD_DIRTY_PIPELINE = 1ull << 0,
85 RADV_CMD_DIRTY_INDEX_BUFFER = 1ull << 1,
86 RADV_CMD_DIRTY_FRAMEBUFFER = 1ull << 2,
87 RADV_CMD_DIRTY_VERTEX_BUFFER = 1ull << 3,
88 RADV_CMD_DIRTY_STREAMOUT_BUFFER = 1ull << 4,
89 RADV_CMD_DIRTY_GUARDBAND = 1ull << 5,
90 RADV_CMD_DIRTY_RBPLUS = 1ull << 6,
91 RADV_CMD_DIRTY_OCCLUSION_QUERY = 1ull << 7,
92 RADV_CMD_DIRTY_DB_SHADER_CONTROL = 1ull << 8,
93 RADV_CMD_DIRTY_STREAMOUT_ENABLE = 1ull << 9,
94 RADV_CMD_DIRTY_GRAPHICS_SHADERS = 1ull << 10,
95 RADV_CMD_DIRTY_FRAGMENT_OUTPUT = 1ull << 11,
96 RADV_CMD_DIRTY_FBFETCH_OUTPUT = 1ull << 12,
97 RADV_CMD_DIRTY_FS_STATE = 1ull << 13,
98 RADV_CMD_DIRTY_NGG_STATE = 1ull << 14,
99 RADV_CMD_DIRTY_TASK_STATE = 1ull << 15,
100 RADV_CMD_DIRTY_DEPTH_STENCIL_STATE = 1ull << 16,
101 RADV_CMD_DIRTY_RASTER_STATE = 1ull << 17,
102 RADV_CMD_DIRTY_ALL = (1ull << 18) - 1,
103
104 RADV_CMD_DIRTY_SHADER_QUERY = RADV_CMD_DIRTY_NGG_STATE | RADV_CMD_DIRTY_TASK_STATE,
105 };
106
107 enum radv_cmd_flush_bits {
108 /* Instruction cache. */
109 RADV_CMD_FLAG_INV_ICACHE = 1 << 0,
110 /* Scalar L1 cache. */
111 RADV_CMD_FLAG_INV_SCACHE = 1 << 1,
112 /* Vector L1 cache. */
113 RADV_CMD_FLAG_INV_VCACHE = 1 << 2,
114 /* L2 cache + L2 metadata cache writeback & invalidate.
115 * GFX6-8: Used by shaders only. GFX9-10: Used by everything. */
116 RADV_CMD_FLAG_INV_L2 = 1 << 3,
117 /* L2 writeback (write dirty L2 lines to memory for non-L2 clients).
118 * Only used for coherency with non-L2 clients like CB, DB, CP on GFX6-8.
119 * GFX6-7 will do complete invalidation, because the writeback is unsupported. */
120 RADV_CMD_FLAG_WB_L2 = 1 << 4,
121 /* Invalidate the metadata cache. To be used when the DCC/HTILE metadata
122 * changed and we want to read an image from shaders. */
123 RADV_CMD_FLAG_INV_L2_METADATA = 1 << 5,
124 /* Framebuffer caches */
125 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META = 1 << 6,
126 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META = 1 << 7,
127 RADV_CMD_FLAG_FLUSH_AND_INV_DB = 1 << 8,
128 RADV_CMD_FLAG_FLUSH_AND_INV_CB = 1 << 9,
129 /* Engine synchronization. */
130 RADV_CMD_FLAG_VS_PARTIAL_FLUSH = 1 << 10,
131 RADV_CMD_FLAG_PS_PARTIAL_FLUSH = 1 << 11,
132 RADV_CMD_FLAG_CS_PARTIAL_FLUSH = 1 << 12,
133 RADV_CMD_FLAG_VGT_FLUSH = 1 << 13,
134 /* Pipeline query controls. */
135 RADV_CMD_FLAG_START_PIPELINE_STATS = 1 << 14,
136 RADV_CMD_FLAG_STOP_PIPELINE_STATS = 1 << 15,
137 RADV_CMD_FLAG_VGT_STREAMOUT_SYNC = 1 << 16,
138
139 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER = (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
140 RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META),
141
142 RADV_CMD_FLUSH_ALL_COMPUTE = (RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE | RADV_CMD_FLAG_INV_VCACHE |
143 RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_WB_L2 | RADV_CMD_FLAG_CS_PARTIAL_FLUSH),
144 };
145
146 struct radv_vertex_binding {
147 VkDeviceSize offset;
148 VkDeviceSize size;
149 VkDeviceSize stride;
150 };
151
152 struct radv_streamout_binding {
153 struct radv_buffer *buffer;
154 VkDeviceSize offset;
155 VkDeviceSize size;
156 };
157
158 struct radv_streamout_state {
159 /* Mask of bound streamout buffers. */
160 uint8_t enabled_mask;
161
162 /* State of VGT_STRMOUT_BUFFER_(CONFIG|END) */
163 uint32_t hw_enabled_mask;
164
165 /* State of VGT_STRMOUT_(CONFIG|EN) */
166 bool streamout_enabled;
167
168 /* VA of the streamout state (GFX12+). */
169 uint64_t state_va;
170 };
171
172 /**
173 * Attachment state when recording a renderpass instance.
174 *
175 * The clear value is valid only if there exists a pending clear.
176 */
177 struct radv_attachment {
178 VkFormat format;
179 struct radv_image_view *iview;
180 VkImageLayout layout;
181 VkImageLayout stencil_layout;
182
183 union {
184 struct radv_color_buffer_info cb;
185 struct radv_ds_buffer_info ds;
186 };
187
188 struct radv_image_view *resolve_iview;
189 VkResolveModeFlagBits resolve_mode;
190 VkResolveModeFlagBits stencil_resolve_mode;
191 VkImageLayout resolve_layout;
192 VkImageLayout stencil_resolve_layout;
193 };
194
195 struct radv_rendering_state {
196 bool active;
197 bool has_image_views;
198 bool has_input_attachment_no_concurrent_writes;
199 VkRect2D area;
200 uint32_t layer_count;
201 uint32_t view_mask;
202 uint32_t color_samples;
203 uint32_t ds_samples;
204 uint32_t max_samples;
205 struct radv_sample_locations_state sample_locations;
206 uint32_t color_att_count;
207 struct radv_attachment color_att[MAX_RTS];
208 struct radv_attachment ds_att;
209 VkImageAspectFlags ds_att_aspects;
210 struct radv_attachment vrs_att;
211 VkExtent2D vrs_texel_size;
212 };
213
214 struct radv_descriptor_state {
215 struct radv_descriptor_set *sets[MAX_SETS];
216 uint32_t dirty;
217 uint32_t valid;
218 struct radv_push_descriptor_set push_set;
219 uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
220 uint64_t descriptor_buffers[MAX_SETS];
221 bool need_indirect_descriptor_sets;
222 uint64_t indirect_descriptor_sets_va;
223 };
224
225 struct radv_push_constant_state {
226 uint32_t size;
227 uint32_t dynamic_offset_count;
228 };
229
230 enum rgp_flush_bits {
231 RGP_FLUSH_WAIT_ON_EOP_TS = 0x1,
232 RGP_FLUSH_VS_PARTIAL_FLUSH = 0x2,
233 RGP_FLUSH_PS_PARTIAL_FLUSH = 0x4,
234 RGP_FLUSH_CS_PARTIAL_FLUSH = 0x8,
235 RGP_FLUSH_PFP_SYNC_ME = 0x10,
236 RGP_FLUSH_SYNC_CP_DMA = 0x20,
237 RGP_FLUSH_INVAL_VMEM_L0 = 0x40,
238 RGP_FLUSH_INVAL_ICACHE = 0x80,
239 RGP_FLUSH_INVAL_SMEM_L0 = 0x100,
240 RGP_FLUSH_FLUSH_L2 = 0x200,
241 RGP_FLUSH_INVAL_L2 = 0x400,
242 RGP_FLUSH_FLUSH_CB = 0x800,
243 RGP_FLUSH_INVAL_CB = 0x1000,
244 RGP_FLUSH_FLUSH_DB = 0x2000,
245 RGP_FLUSH_INVAL_DB = 0x4000,
246 RGP_FLUSH_INVAL_L1 = 0x8000,
247 };
248
249 enum radv_tracked_reg {
250 RADV_TRACKED_DB_COUNT_CONTROL,
251 RADV_TRACKED_DB_SHADER_CONTROL,
252 RADV_TRACKED_DB_VRS_OVERRIDE_CNTL,
253
254 /* 2 consecutive registers */
255 RADV_TRACKED_DB_DEPTH_BOUNDS_MIN,
256 RADV_TRACKED_DB_DEPTH_BOUNDS_MAX,
257
258 /* 2 consecutive registers */
259 RADV_TRACKED_DB_STENCILREFMASK, /* GFX6-11.5 */
260 RADV_TRACKED_DB_STENCILREFMASK_BF, /* GFX6-11.5 */
261
262 /* 2 consecutive registers */
263 RADV_TRACKED_DB_STENCIL_READ_MASK, /* GFX12 */
264 RADV_TRACKED_DB_STENCIL_WRITE_MASK, /* GFX12 */
265
266 RADV_TRACKED_DB_DEPTH_CONTROL,
267 RADV_TRACKED_DB_STENCIL_CONTROL,
268 RADV_TRACKED_DB_STENCIL_REF, /* GFX12 */
269
270 RADV_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP,
271 RADV_TRACKED_GE_NGG_SUBGRP_CNTL,
272
273 RADV_TRACKED_PA_CL_CLIP_CNTL,
274 RADV_TRACKED_PA_CL_VRS_CNTL,
275 RADV_TRACKED_PA_CL_VS_OUT_CNTL,
276
277 RADV_TRACKED_PA_SC_BINNER_CNTL_0,
278 RADV_TRACKED_PA_SC_SHADER_CONTROL,
279 RADV_TRACKED_PA_SC_LINE_CNTL,
280 RADV_TRACKED_PA_SC_LINE_STIPPLE,
281 RADV_TRACKED_PA_SC_LINE_STIPPLE_RESET, /* GFX12 */
282
283 /* 2 consecutive registers */
284 RADV_TRACKED_SPI_PS_INPUT_ENA,
285 RADV_TRACKED_SPI_PS_INPUT_ADDR,
286
287 RADV_TRACKED_SPI_PS_IN_CONTROL,
288
289 /* 2 consecutive registers */
290 RADV_TRACKED_SPI_SHADER_IDX_FORMAT,
291 RADV_TRACKED_SPI_SHADER_POS_FORMAT,
292
293 RADV_TRACKED_SPI_SHADER_Z_FORMAT,
294 RADV_TRACKED_SPI_VS_OUT_CONFIG,
295
296 /* 3 consecutive registers */
297 RADV_TRACKED_SX_PS_DOWNCONVERT,
298 RADV_TRACKED_SX_BLEND_OPT_EPSILON,
299 RADV_TRACKED_SX_BLEND_OPT_CONTROL,
300
301 RADV_TRACKED_VGT_DRAW_PAYLOAD_CNTL,
302 RADV_TRACKED_VGT_ESGS_RING_ITEMSIZE, /* GFX6-8 */
303 RADV_TRACKED_VGT_GS_MODE,
304 RADV_TRACKED_VGT_GS_INSTANCE_CNT,
305 RADV_TRACKED_VGT_GS_ONCHIP_CNTL,
306 RADV_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
307 RADV_TRACKED_VGT_GS_MAX_VERT_OUT,
308 RADV_TRACKED_VGT_GS_OUT_PRIM_TYPE,
309
310 /* 4 consecutive registers */
311 RADV_TRACKED_VGT_GS_VERT_ITEMSIZE,
312 RADV_TRACKED_VGT_GS_VERT_ITEMSIZE_1,
313 RADV_TRACKED_VGT_GS_VERT_ITEMSIZE_2,
314 RADV_TRACKED_VGT_GS_VERT_ITEMSIZE_3,
315
316 RADV_TRACKED_VGT_GSVS_RING_ITEMSIZE,
317
318 /* 3 consecutive registers */
319 RADV_TRACKED_VGT_GSVS_RING_OFFSET_1,
320 RADV_TRACKED_VGT_GSVS_RING_OFFSET_2,
321 RADV_TRACKED_VGT_GSVS_RING_OFFSET_3,
322
323 RADV_TRACKED_VGT_MULTI_PRIM_IB_RESET_INDX, /* GFX6-7 */
324 RADV_TRACKED_VGT_PRIMITIVEID_EN,
325 RADV_TRACKED_VGT_REUSE_OFF,
326 RADV_TRACKED_VGT_SHADER_STAGES_EN,
327 RADV_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
328
329 RADV_TRACKED_PA_SU_LINE_CNTL,
330 RADV_TRACKED_PA_SU_SC_MODE_CNTL,
331
332 RADV_NUM_ALL_TRACKED_REGS,
333 };
334
335 struct radv_tracked_regs {
336 BITSET_DECLARE(reg_saved_mask, RADV_NUM_ALL_TRACKED_REGS);
337 uint32_t reg_value[RADV_NUM_ALL_TRACKED_REGS];
338 uint32_t spi_ps_input_cntl[32];
339 };
340
341 struct radv_cmd_state {
342 /* Vertex descriptors */
343 uint64_t vb_va;
344 unsigned vb_size;
345
346 bool predicating;
347 uint64_t dirty_dynamic;
348 uint32_t dirty;
349
350 VkShaderStageFlags active_stages;
351 struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES];
352 struct radv_shader *gs_copy_shader;
353 struct radv_shader *last_vgt_shader;
354 struct radv_shader *rt_prolog;
355
356 struct radv_shader_object *shader_objs[MESA_VULKAN_SHADER_STAGES];
357
358 uint32_t prefetch_L2_mask;
359
360 struct radv_graphics_pipeline *graphics_pipeline;
361 struct radv_graphics_pipeline *emitted_graphics_pipeline;
362 struct radv_compute_pipeline *compute_pipeline;
363 struct radv_compute_pipeline *emitted_compute_pipeline;
364 struct radv_ray_tracing_pipeline *rt_pipeline; /* emitted = emitted_compute_pipeline */
365 struct radv_dynamic_state dynamic;
366 struct radv_vertex_input_state vertex_input;
367 struct radv_streamout_state streamout;
368
369 struct radv_rendering_state render;
370
371 /* Index buffer */
372 uint32_t index_type;
373 uint32_t max_index_count;
374 uint64_t index_va;
375 int32_t last_index_type;
376
377 enum radv_cmd_flush_bits flush_bits;
378 unsigned active_occlusion_queries;
379 bool perfect_occlusion_queries_enabled;
380 unsigned active_pipeline_queries;
381 unsigned active_emulated_pipeline_queries;
382 unsigned active_pipeline_ace_queries; /* Task shader invocations query */
383 unsigned active_prims_gen_queries;
384 unsigned active_prims_xfb_queries;
385 unsigned active_emulated_prims_gen_queries;
386 unsigned active_emulated_prims_xfb_queries;
387 uint32_t trace_id;
388 uint32_t last_ia_multi_vgt_param;
389 uint32_t last_ge_cntl;
390
391 uint32_t last_num_instances;
392 uint32_t last_first_instance;
393 bool last_vertex_offset_valid;
394 uint32_t last_vertex_offset;
395 uint32_t last_drawid;
396 uint32_t last_subpass_color_count;
397
398 /* Whether CP DMA is busy/idle. */
399 bool dma_is_busy;
400
401 /* Whether any images that are not L2 coherent are dirty from the CB. */
402 bool rb_noncoherent_dirty;
403
404 /* Conditional rendering info. */
405 uint8_t predication_op; /* 32-bit or 64-bit predicate value */
406 int predication_type; /* -1: disabled, 0: normal, 1: inverted */
407 uint64_t predication_va;
408 uint64_t mec_inv_pred_va; /* For inverted predication when using MEC. */
409 bool mec_inv_pred_emitted; /* To ensure we don't have to repeat inverting the VA. */
410
411 /* Inheritance info. */
412 VkQueryPipelineStatisticFlags inherited_pipeline_statistics;
413 bool inherited_occlusion_queries;
414 VkQueryControlFlags inherited_query_control_flags;
415
416 bool context_roll_without_scissor_emitted;
417
418 /* SQTT related state. */
419 uint32_t current_event_type;
420 uint32_t num_events;
421 uint32_t num_layout_transitions;
422 bool in_barrier;
423 bool pending_sqtt_barrier_end;
424 enum rgp_flush_bits sqtt_flush_bits;
425
426 /* NGG culling state. */
427 bool has_nggc;
428
429 /* Mesh shading state. */
430 bool mesh_shading;
431
432 uint8_t cb_mip[MAX_RTS];
433 uint8_t ds_mip;
434
435 /* Whether DRAW_{INDEX}_INDIRECT_{MULTI} is emitted. */
436 bool uses_draw_indirect;
437
438 uint32_t rt_stack_size;
439
440 struct radv_shader_part *emitted_vs_prolog;
441 uint32_t vbo_misaligned_mask;
442 uint32_t vbo_unaligned_mask;
443 uint32_t vbo_misaligned_mask_invalid;
444 uint32_t vbo_bound_mask;
445
446 struct radv_shader_part *emitted_ps_epilog;
447
448 /* Per-vertex VRS state. */
449 uint32_t last_vrs_rates;
450 int32_t last_force_vrs_rates_offset;
451
452 /* Whether to suspend streamout for internal driver operations. */
453 bool suspend_streamout;
454
455 /* Whether this commandbuffer uses performance counters. */
456 bool uses_perf_counters;
457
458 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param;
459
460 /* Tessellation info when patch control points is dynamic. */
461 unsigned tess_num_patches;
462 unsigned tess_lds_size;
463
464 unsigned spi_shader_col_format;
465 unsigned spi_shader_z_format;
466 unsigned cb_shader_mask;
467
468 struct radv_multisample_state ms;
469
470 /* Custom blend mode for internal operations. */
471 unsigned custom_blend_mode;
472 unsigned db_render_control;
473
474 unsigned last_cb_target_mask;
475
476 unsigned rast_prim;
477
478 uint32_t vtx_base_sgpr;
479 uint8_t vtx_emit_num;
480 bool uses_drawid;
481 bool uses_baseinstance;
482
483 bool uses_out_of_order_rast;
484 bool uses_vrs;
485 bool uses_vrs_attachment;
486 bool uses_vrs_coarse_shading;
487 bool uses_dynamic_patch_control_points;
488 bool uses_fbfetch_output;
489
490 uint64_t shader_query_buf_va; /* GFX12+ */
491 };
492
493 struct radv_enc_state {
494 uint32_t task_size_offset;
495 uint32_t total_task_size;
496 unsigned shifter;
497 unsigned bits_in_shifter;
498 uint32_t num_zeros;
499 uint32_t byte_index;
500 unsigned bits_output;
501 unsigned bits_size;
502 bool emulation_prevention;
503 bool is_even_frame;
504 unsigned task_id;
505 uint32_t copy_start_offset;
506 };
507
508 struct radv_cmd_buffer_upload {
509 uint8_t *map;
510 unsigned offset;
511 uint64_t size;
512 struct radeon_winsys_bo *upload_bo;
513 struct list_head list;
514 };
515
516 struct radv_cmd_buffer {
517 struct vk_command_buffer vk;
518
519 struct radv_tracked_regs tracked_regs;
520
521 VkCommandBufferUsageFlags usage_flags;
522 struct radeon_cmdbuf *cs;
523 struct radv_cmd_state state;
524 struct radv_buffer *vertex_binding_buffers[MAX_VBS];
525 struct radv_vertex_binding vertex_bindings[MAX_VBS];
526 uint32_t used_vertex_bindings;
527 struct radv_streamout_binding streamout_bindings[MAX_SO_BUFFERS];
528 enum radv_queue_family qf;
529
530 uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
531 VkShaderStageFlags push_constant_stages;
532 struct radv_descriptor_set_header meta_push_descriptors;
533
534 struct radv_descriptor_state descriptors[MAX_BIND_POINTS];
535
536 struct radv_push_constant_state push_constant_state[MAX_BIND_POINTS];
537
538 uint64_t descriptor_buffers[MAX_SETS];
539
540 struct radv_cmd_buffer_upload upload;
541
542 uint32_t scratch_size_per_wave_needed;
543 uint32_t scratch_waves_wanted;
544 uint32_t compute_scratch_size_per_wave_needed;
545 uint32_t compute_scratch_waves_wanted;
546 uint32_t esgs_ring_size_needed;
547 uint32_t gsvs_ring_size_needed;
548 bool tess_rings_needed;
549 bool task_rings_needed;
550 bool mesh_scratch_ring_needed;
551 bool gds_needed; /* for GFX10 streamout and NGG GS queries */
552 bool gds_oa_needed; /* for GFX10 streamout */
553 bool sample_positions_needed;
554
555 uint64_t gfx9_fence_va;
556 uint32_t gfx9_fence_idx;
557 uint64_t gfx9_eop_bug_va;
558
559 struct set vs_prologs;
560 struct set ps_epilogs;
561
562 /**
563 * Gang state.
564 * Used when the command buffer needs work done on a different queue
565 * (eg. when a graphics command buffer needs compute work).
566 * Currently only one follower is possible per command buffer.
567 */
568 struct {
569 /** Follower command stream. */
570 struct radeon_cmdbuf *cs;
571
572 /** Flush bits for the follower cmdbuf. */
573 enum radv_cmd_flush_bits flush_bits;
574
575 /**
576 * For synchronization between the follower and leader.
577 * The value of these semaphores are incremented whenever we
578 * encounter a barrier that affects the follower.
579 *
580 * DWORD 0: Leader to follower semaphore.
581 * The leader writes the value and the follower waits.
582 * DWORD 1: Follower to leader semaphore.
583 * The follower writes the value, and the leader waits.
584 */
585 struct {
586 uint64_t va; /* Virtual address of the semaphore. */
587 uint32_t leader_value; /* Current value of the leader. */
588 uint32_t emitted_leader_value; /* Last value emitted by the leader. */
589 uint32_t follower_value; /* Current value of the follower. */
590 uint32_t emitted_follower_value; /* Last value emitted by the follower. */
591 } sem;
592 } gang;
593
594 /**
595 * Whether a query pool has been reset and we have to flush caches.
596 */
597 bool pending_reset_query;
598
599 /**
600 * Bitmask of pending active query flushes.
601 */
602 enum radv_cmd_flush_bits active_query_flush_bits;
603
604 struct {
605 struct radv_video_session *vid;
606 struct radv_video_session_params *params;
607 struct rvcn_sq_var sq;
608 struct rvcn_decode_buffer_s *decode_buffer;
609 struct radv_enc_state enc;
610 uint64_t feedback_query_va;
611 } video;
612
613 struct {
614 /* Temporary space for some transfer queue copy command workarounds. */
615 struct radeon_winsys_bo *copy_temp;
616 } transfer;
617
618 uint64_t shader_upload_seq;
619
620 uint32_t sqtt_cb_id;
621
622 struct set *accel_struct_buffers;
623 struct util_dynarray ray_history;
624 };
625
626 VK_DEFINE_HANDLE_CASTS(radv_cmd_buffer, vk.base, VkCommandBuffer, VK_OBJECT_TYPE_COMMAND_BUFFER)
627
628 static inline struct radv_device *
radv_cmd_buffer_device(const struct radv_cmd_buffer * cmd_buffer)629 radv_cmd_buffer_device(const struct radv_cmd_buffer *cmd_buffer)
630 {
631 return (struct radv_device *)cmd_buffer->vk.base.device;
632 }
633
634 ALWAYS_INLINE static bool
radv_is_streamout_enabled(struct radv_cmd_buffer * cmd_buffer)635 radv_is_streamout_enabled(struct radv_cmd_buffer *cmd_buffer)
636 {
637 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
638
639 /* Streamout must be enabled for the PRIMITIVES_GENERATED query to work. */
640 return (so->streamout_enabled || cmd_buffer->state.active_prims_gen_queries) && !cmd_buffer->state.suspend_streamout;
641 }
642
643 static inline unsigned
vk_to_bind_point(VkPipelineBindPoint bind_point)644 vk_to_bind_point(VkPipelineBindPoint bind_point)
645 {
646 return bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR ? 2 : bind_point;
647 }
648
649 static inline struct radv_descriptor_state *
radv_get_descriptors_state(struct radv_cmd_buffer * cmd_buffer,VkPipelineBindPoint bind_point)650 radv_get_descriptors_state(struct radv_cmd_buffer *cmd_buffer, VkPipelineBindPoint bind_point)
651 {
652 return &cmd_buffer->descriptors[vk_to_bind_point(bind_point)];
653 }
654
655 static inline const struct radv_push_constant_state *
radv_get_push_constants_state(const struct radv_cmd_buffer * cmd_buffer,VkPipelineBindPoint bind_point)656 radv_get_push_constants_state(const struct radv_cmd_buffer *cmd_buffer, VkPipelineBindPoint bind_point)
657 {
658 return &cmd_buffer->push_constant_state[vk_to_bind_point(bind_point)];
659 }
660
661 static inline bool
radv_cmdbuf_has_stage(const struct radv_cmd_buffer * cmd_buffer,gl_shader_stage stage)662 radv_cmdbuf_has_stage(const struct radv_cmd_buffer *cmd_buffer, gl_shader_stage stage)
663 {
664 return !!(cmd_buffer->state.active_stages & mesa_to_vk_shader_stage(stage));
665 }
666
667 static inline uint32_t
radv_get_num_pipeline_stat_queries(struct radv_cmd_buffer * cmd_buffer)668 radv_get_num_pipeline_stat_queries(struct radv_cmd_buffer *cmd_buffer)
669 {
670 /* SAMPLE_STREAMOUTSTATS also requires PIPELINESTAT_START to be enabled. */
671 return cmd_buffer->state.active_pipeline_queries + cmd_buffer->state.active_prims_gen_queries +
672 cmd_buffer->state.active_prims_xfb_queries;
673 }
674
675 static inline void
radv_emit_shader_pointer_head(struct radeon_cmdbuf * cs,unsigned sh_offset,unsigned pointer_count,bool use_32bit_pointers)676 radv_emit_shader_pointer_head(struct radeon_cmdbuf *cs, unsigned sh_offset, unsigned pointer_count,
677 bool use_32bit_pointers)
678 {
679 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, pointer_count * (use_32bit_pointers ? 1 : 2), 0));
680 radeon_emit(cs, (sh_offset - SI_SH_REG_OFFSET) >> 2);
681 }
682
683 static inline void
radv_emit_shader_pointer_body(const struct radv_device * device,struct radeon_cmdbuf * cs,uint64_t va,bool use_32bit_pointers)684 radv_emit_shader_pointer_body(const struct radv_device *device, struct radeon_cmdbuf *cs, uint64_t va,
685 bool use_32bit_pointers)
686 {
687 const struct radv_physical_device *pdev = radv_device_physical(device);
688
689 radeon_emit(cs, va);
690
691 if (use_32bit_pointers) {
692 assert(va == 0 || (va >> 32) == pdev->info.address32_hi);
693 } else {
694 radeon_emit(cs, va >> 32);
695 }
696 }
697
698 static inline void
radv_emit_shader_pointer(const struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t sh_offset,uint64_t va,bool global)699 radv_emit_shader_pointer(const struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t sh_offset, uint64_t va,
700 bool global)
701 {
702 bool use_32bit_pointers = !global;
703
704 radv_emit_shader_pointer_head(cs, sh_offset, 1, use_32bit_pointers);
705 radv_emit_shader_pointer_body(device, cs, va, use_32bit_pointers);
706 }
707
708 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer);
709
710 void radv_cmd_buffer_reset_rendering(struct radv_cmd_buffer *cmd_buffer);
711
712 bool radv_cmd_buffer_upload_alloc_aligned(struct radv_cmd_buffer *cmd_buffer, unsigned size, unsigned alignment,
713 unsigned *out_offset, void **ptr);
714
715 bool radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, unsigned size, unsigned *out_offset, void **ptr);
716
717 bool radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer, unsigned size, const void *data,
718 unsigned *out_offset);
719
720 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer);
721
722 void radv_cmd_buffer_annotate(struct radv_cmd_buffer *cmd_buffer, const char *annotation);
723
724 void radv_gang_cache_flush(struct radv_cmd_buffer *cmd_buffer);
725
726 bool radv_gang_init(struct radv_cmd_buffer *cmd_buffer);
727
728 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer, VkPipelineBindPoint bind_point,
729 struct radv_descriptor_set *set, unsigned idx);
730
731 void radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
732 VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects);
733
734 void radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
735 const VkImageSubresourceRange *range, bool value);
736
737 void radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
738 const VkImageSubresourceRange *range, bool value);
739
740 void radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
741 int cb_idx, uint32_t color_values[2]);
742
743 unsigned radv_instance_rate_prolog_index(unsigned num_attributes, uint32_t instance_rate_inputs);
744
745 enum radv_cmd_flush_bits radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, VkPipelineStageFlags2 src_stages,
746 VkAccessFlags2 src_flags, VkAccessFlags3KHR src3_flags,
747 const struct radv_image *image, const VkImageSubresourceRange *range);
748
749 enum radv_cmd_flush_bits radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, VkPipelineStageFlags2 dst_stages,
750 VkAccessFlags2 dst_flags, VkAccessFlags3KHR dst3_flags,
751 const struct radv_image *image, const VkImageSubresourceRange *range);
752
753 struct radv_resolve_barrier {
754 VkPipelineStageFlags2 src_stage_mask;
755 VkPipelineStageFlags2 dst_stage_mask;
756 VkAccessFlags2 src_access_mask;
757 VkAccessFlags2 dst_access_mask;
758 };
759
760 void radv_emit_resolve_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_resolve_barrier *barrier);
761
762 void radv_meta_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer, VkPipelineBindPoint pipelineBindPoint,
763 VkPipelineLayout _layout, uint32_t set, uint32_t descriptorWriteCount,
764 const VkWriteDescriptorSet *pDescriptorWrites);
765
766 struct radv_dispatch_info {
767 /**
768 * Determine the layout of the grid (in block units) to be used.
769 */
770 uint32_t blocks[3];
771
772 /**
773 * A starting offset for the grid. If unaligned is set, the offset
774 * must still be aligned.
775 */
776 uint32_t offsets[3];
777
778 /**
779 * Whether it's an unaligned compute dispatch.
780 */
781 bool unaligned;
782
783 /**
784 * Whether waves must be launched in order.
785 */
786 bool ordered;
787
788 /**
789 * Indirect compute parameters resource.
790 */
791 struct radeon_winsys_bo *indirect;
792 uint64_t va;
793 };
794
795 void radv_compute_dispatch(struct radv_cmd_buffer *cmd_buffer, const struct radv_dispatch_info *info);
796
797 /*
798 * Takes x,y,z as exact numbers of invocations, instead of blocks.
799 *
800 * Limitations: Can't call normal dispatch functions without binding or rebinding
801 * the compute pipeline.
802 */
803 void radv_unaligned_dispatch(struct radv_cmd_buffer *cmd_buffer, uint32_t x, uint32_t y, uint32_t z);
804
805 void radv_indirect_dispatch(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *bo, uint64_t va);
806
807 uint32_t radv_init_fmask(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
808 const VkImageSubresourceRange *range);
809
810 uint32_t radv_init_dcc(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
811 const VkImageSubresourceRange *range, uint32_t value);
812
813 void radv_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer);
814
815 void radv_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, bool draw_visible, unsigned pred_op,
816 uint64_t va);
817
818 void radv_begin_conditional_rendering(struct radv_cmd_buffer *cmd_buffer, uint64_t va, bool draw_visible);
819
820 void radv_end_conditional_rendering(struct radv_cmd_buffer *cmd_buffer);
821
822 uint64_t radv_descriptor_get_va(const struct radv_descriptor_state *descriptors_state, unsigned set_idx);
823
824 struct radv_vbo_info {
825 uint64_t va;
826
827 uint32_t binding;
828 uint32_t stride;
829 uint32_t size;
830
831 uint32_t attrib_offset;
832 uint32_t attrib_index_offset;
833 uint32_t attrib_format_size;
834
835 uint32_t non_trivial_format;
836 };
837
838 void radv_get_vbo_info(const struct radv_cmd_buffer *cmd_buffer, uint32_t vbo_idx, struct radv_vbo_info *vbo_info);
839
840 void radv_emit_compute_shader(const struct radv_physical_device *pdev, struct radeon_cmdbuf *cs,
841 const struct radv_shader *shader);
842
843 void radv_upload_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
844 struct radv_descriptor_state *descriptors_state);
845
846 #endif /* RADV_CMD_BUFFER_H */
847