1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * SPDX-License-Identifier: MIT
9 */
10
11 #include "radv_queue.h"
12 #include "radv_buffer.h"
13 #include "radv_cp_reg_shadowing.h"
14 #include "radv_cs.h"
15 #include "radv_debug.h"
16 #include "radv_device_memory.h"
17 #include "radv_image.h"
18 #include "radv_printf.h"
19 #include "radv_rmv.h"
20 #include "vk_semaphore.h"
21 #include "vk_sync.h"
22
23 #include "ac_cmdbuf.h"
24 #include "ac_debug.h"
25 #include "ac_descriptors.h"
26
27 enum radeon_ctx_priority
radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfo * pObj)28 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfo *pObj)
29 {
30 /* Default to MEDIUM when a specific global priority isn't requested */
31 if (!pObj)
32 return RADEON_CTX_PRIORITY_MEDIUM;
33
34 switch (pObj->globalPriority) {
35 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME:
36 return RADEON_CTX_PRIORITY_REALTIME;
37 case VK_QUEUE_GLOBAL_PRIORITY_HIGH:
38 return RADEON_CTX_PRIORITY_HIGH;
39 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM:
40 return RADEON_CTX_PRIORITY_MEDIUM;
41 case VK_QUEUE_GLOBAL_PRIORITY_LOW:
42 return RADEON_CTX_PRIORITY_LOW;
43 default:
44 unreachable("Illegal global priority value");
45 return RADEON_CTX_PRIORITY_INVALID;
46 }
47 }
48
49 static VkResult
radv_sparse_buffer_bind_memory(struct radv_device * device,const VkSparseBufferMemoryBindInfo * bind)50 radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind)
51 {
52 VK_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
53 VkResult result = VK_SUCCESS;
54
55 struct radv_device_memory *mem = NULL;
56 VkDeviceSize resourceOffset = 0;
57 VkDeviceSize size = 0;
58 VkDeviceSize memoryOffset = 0;
59 for (uint32_t i = 0; i < bind->bindCount; ++i) {
60 struct radv_device_memory *cur_mem = NULL;
61
62 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
63 cur_mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
64 if (i && mem == cur_mem) {
65 if (mem) {
66 if (bind->pBinds[i].resourceOffset == resourceOffset + size &&
67 bind->pBinds[i].memoryOffset == memoryOffset + size) {
68 size += bind->pBinds[i].size;
69 continue;
70 }
71 } else {
72 if (bind->pBinds[i].resourceOffset == resourceOffset + size) {
73 size += bind->pBinds[i].size;
74 continue;
75 }
76 }
77 }
78 if (size) {
79 result = radv_bo_virtual_bind(device, &buffer->vk.base, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
80 memoryOffset);
81 if (result != VK_SUCCESS)
82 return result;
83 }
84 mem = cur_mem;
85 resourceOffset = bind->pBinds[i].resourceOffset;
86 size = bind->pBinds[i].size;
87 memoryOffset = bind->pBinds[i].memoryOffset;
88 }
89 if (size) {
90 result = radv_bo_virtual_bind(device, &buffer->vk.base, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
91 memoryOffset);
92 }
93
94 return result;
95 }
96
97 static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device * device,const VkSparseImageOpaqueMemoryBindInfo * bind)98 radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind)
99 {
100 VK_FROM_HANDLE(radv_image, image, bind->image);
101 VkResult result;
102
103 for (uint32_t i = 0; i < bind->bindCount; ++i) {
104 struct radv_device_memory *mem = NULL;
105
106 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
107 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
108
109 result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, bind->pBinds[i].resourceOffset,
110 bind->pBinds[i].size, mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset);
111 if (result != VK_SUCCESS)
112 return result;
113 }
114
115 return VK_SUCCESS;
116 }
117
118 static VkResult
radv_sparse_image_bind_memory(struct radv_device * device,const VkSparseImageMemoryBindInfo * bind)119 radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMemoryBindInfo *bind)
120 {
121 VK_FROM_HANDLE(radv_image, image, bind->image);
122 const struct radv_physical_device *pdev = radv_device_physical(device);
123 struct radeon_surf *surface = &image->planes[0].surface;
124 uint32_t bs = vk_format_get_blocksize(image->vk.format);
125 VkResult result;
126
127 for (uint32_t i = 0; i < bind->bindCount; ++i) {
128 struct radv_device_memory *mem = NULL;
129 uint64_t offset, depth_pitch;
130 uint32_t pitch;
131 uint64_t mem_offset = bind->pBinds[i].memoryOffset;
132 const uint32_t layer = bind->pBinds[i].subresource.arrayLayer;
133 const uint32_t level = bind->pBinds[i].subresource.mipLevel;
134
135 VkExtent3D bind_extent = bind->pBinds[i].extent;
136 bind_extent.width = DIV_ROUND_UP(bind_extent.width, vk_format_get_blockwidth(image->vk.format));
137 bind_extent.height = DIV_ROUND_UP(bind_extent.height, vk_format_get_blockheight(image->vk.format));
138
139 VkOffset3D bind_offset = bind->pBinds[i].offset;
140 bind_offset.x /= vk_format_get_blockwidth(image->vk.format);
141 bind_offset.y /= vk_format_get_blockheight(image->vk.format);
142
143 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
144 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
145
146 if (pdev->info.gfx_level >= GFX9) {
147 offset = surface->u.gfx9.surf_slice_size * layer + surface->u.gfx9.prt_level_offset[level];
148 pitch = surface->u.gfx9.prt_level_pitch[level];
149 depth_pitch = surface->u.gfx9.surf_slice_size;
150 } else {
151 depth_pitch = surface->u.legacy.level[level].slice_size_dw * 4;
152 offset = (uint64_t)surface->u.legacy.level[level].offset_256B * 256 + depth_pitch * layer;
153 pitch = surface->u.legacy.level[level].nblk_x;
154 }
155
156 offset +=
157 bind_offset.z * depth_pitch + ((uint64_t)bind_offset.y * pitch * surface->prt_tile_depth +
158 (uint64_t)bind_offset.x * surface->prt_tile_height * surface->prt_tile_depth) *
159 bs;
160
161 uint32_t aligned_extent_width = ALIGN(bind_extent.width, surface->prt_tile_width);
162 uint32_t aligned_extent_height = ALIGN(bind_extent.height, surface->prt_tile_height);
163 uint32_t aligned_extent_depth = ALIGN(bind_extent.depth, surface->prt_tile_depth);
164
165 bool whole_subres = (bind_extent.height <= surface->prt_tile_height || aligned_extent_width == pitch) &&
166 (bind_extent.depth <= surface->prt_tile_depth ||
167 (uint64_t)aligned_extent_width * aligned_extent_height * bs == depth_pitch);
168
169 if (whole_subres) {
170 uint64_t size = (uint64_t)aligned_extent_width * aligned_extent_height * aligned_extent_depth * bs;
171 result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, offset, size,
172 mem ? mem->bo : NULL, mem_offset);
173 if (result != VK_SUCCESS)
174 return result;
175 } else {
176 uint32_t img_y_increment = pitch * bs * surface->prt_tile_depth;
177 uint32_t mem_y_increment = aligned_extent_width * bs * surface->prt_tile_depth;
178 uint64_t mem_z_increment = (uint64_t)aligned_extent_width * aligned_extent_height * bs;
179 uint64_t size = mem_y_increment * surface->prt_tile_height;
180 for (unsigned z = 0; z < bind_extent.depth;
181 z += surface->prt_tile_depth, offset += depth_pitch * surface->prt_tile_depth) {
182 for (unsigned y = 0; y < bind_extent.height; y += surface->prt_tile_height) {
183 uint64_t bo_offset = offset + (uint64_t)img_y_increment * y;
184
185 result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, bo_offset, size,
186 mem ? mem->bo : NULL,
187 mem_offset + (uint64_t)mem_y_increment * y + mem_z_increment * z);
188 if (result != VK_SUCCESS)
189 return result;
190 }
191 }
192 }
193 }
194
195 return VK_SUCCESS;
196 }
197
198 static VkResult
radv_queue_submit_bind_sparse_memory(struct radv_device * device,struct vk_queue_submit * submission)199 radv_queue_submit_bind_sparse_memory(struct radv_device *device, struct vk_queue_submit *submission)
200 {
201 for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
202 VkResult result = radv_sparse_buffer_bind_memory(device, submission->buffer_binds + i);
203 if (result != VK_SUCCESS)
204 return result;
205 }
206
207 for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
208 VkResult result = radv_sparse_image_opaque_bind_memory(device, submission->image_opaque_binds + i);
209 if (result != VK_SUCCESS)
210 return result;
211 }
212
213 for (uint32_t i = 0; i < submission->image_bind_count; ++i) {
214 VkResult result = radv_sparse_image_bind_memory(device, submission->image_binds + i);
215 if (result != VK_SUCCESS)
216 return result;
217 }
218
219 return VK_SUCCESS;
220 }
221
222 static VkResult
radv_queue_submit_empty(struct radv_queue * queue,struct vk_queue_submit * submission)223 radv_queue_submit_empty(struct radv_queue *queue, struct vk_queue_submit *submission)
224 {
225 struct radv_device *device = radv_queue_device(queue);
226 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
227 struct radv_winsys_submit_info submit = {
228 .ip_type = radv_queue_ring(queue),
229 .queue_index = queue->vk.index_in_family,
230 };
231
232 return device->ws->cs_submit(ctx, &submit, submission->wait_count, submission->waits, submission->signal_count,
233 submission->signals);
234 }
235
236 static void
radv_set_ring_buffer(const struct radv_physical_device * pdev,struct radeon_winsys_bo * bo,uint32_t offset,uint32_t ring_size,bool add_tid,bool swizzle_enable,bool oob_select_raw,uint32_t element_size,uint32_t index_stride,uint32_t desc[4])237 radv_set_ring_buffer(const struct radv_physical_device *pdev, struct radeon_winsys_bo *bo, uint32_t offset,
238 uint32_t ring_size, bool add_tid, bool swizzle_enable, bool oob_select_raw, uint32_t element_size,
239 uint32_t index_stride, uint32_t desc[4])
240 {
241 const uint8_t oob_select = oob_select_raw ? V_008F0C_OOB_SELECT_RAW : V_008F0C_OOB_SELECT_DISABLED;
242 const uint64_t va = radv_buffer_get_va(bo) + offset;
243 const struct ac_buffer_state ac_state = {
244 .va = va,
245 .size = ring_size,
246 .format = PIPE_FORMAT_R32_FLOAT,
247 .swizzle =
248 {
249 PIPE_SWIZZLE_X,
250 PIPE_SWIZZLE_Y,
251 PIPE_SWIZZLE_Z,
252 PIPE_SWIZZLE_W,
253 },
254 .swizzle_enable = swizzle_enable,
255 .element_size = element_size,
256 .index_stride = index_stride,
257 .add_tid = add_tid,
258 .gfx10_oob_select = oob_select,
259 };
260
261 ac_build_buffer_descriptor(pdev->info.gfx_level, &ac_state, desc);
262 }
263
264 static void
radv_fill_shader_rings(struct radv_device * device,uint32_t * desc,struct radeon_winsys_bo * scratch_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * esgs_ring_bo,uint32_t gsvs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,struct radeon_winsys_bo * tess_rings_bo,struct radeon_winsys_bo * task_rings_bo,struct radeon_winsys_bo * mesh_scratch_ring_bo,struct radeon_winsys_bo * ge_rings_bo)265 radv_fill_shader_rings(struct radv_device *device, uint32_t *desc, struct radeon_winsys_bo *scratch_bo,
266 uint32_t esgs_ring_size, struct radeon_winsys_bo *esgs_ring_bo, uint32_t gsvs_ring_size,
267 struct radeon_winsys_bo *gsvs_ring_bo, struct radeon_winsys_bo *tess_rings_bo,
268 struct radeon_winsys_bo *task_rings_bo, struct radeon_winsys_bo *mesh_scratch_ring_bo,
269 struct radeon_winsys_bo *ge_rings_bo)
270 {
271 const struct radv_physical_device *pdev = radv_device_physical(device);
272
273 if (scratch_bo) {
274 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
275 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
276
277 if (pdev->info.gfx_level >= GFX11)
278 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
279 else
280 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
281
282 desc[0] = scratch_va;
283 desc[1] = rsrc1;
284 }
285
286 desc += 4;
287
288 if (esgs_ring_bo) {
289 /* stride 0, num records - size, add tid, swizzle, elsize4,
290 index stride 64 */
291 radv_set_ring_buffer(pdev, esgs_ring_bo, 0, esgs_ring_size, true, true, false, 1, 3, &desc[0]);
292
293 /* GS entry for ES->GS ring */
294 /* stride 0, num records - size, elsize0,
295 index stride 0 */
296 radv_set_ring_buffer(pdev, esgs_ring_bo, 0, esgs_ring_size, false, false, false, 0, 0, &desc[4]);
297 }
298
299 desc += 8;
300
301 if (gsvs_ring_bo) {
302 /* VS entry for GS->VS ring */
303 /* stride 0, num records - size, elsize0,
304 index stride 0 */
305 radv_set_ring_buffer(pdev, gsvs_ring_bo, 0, gsvs_ring_size, false, false, false, 0, 0, &desc[0]);
306
307 /* stride gsvs_itemsize, num records 64
308 elsize 4, index stride 16 */
309 /* shader will patch stride and desc[2] */
310 radv_set_ring_buffer(pdev, gsvs_ring_bo, 0, 0, true, true, false, 1, 1, &desc[4]);
311 }
312
313 desc += 8;
314
315 if (tess_rings_bo) {
316 radv_set_ring_buffer(pdev, tess_rings_bo, 0, pdev->hs.tess_factor_ring_size, false, false, true, 0, 0, &desc[0]);
317
318 radv_set_ring_buffer(pdev, tess_rings_bo, pdev->hs.tess_offchip_ring_offset, pdev->hs.tess_offchip_ring_size,
319 false, false, true, 0, 0, &desc[4]);
320 }
321
322 desc += 8;
323
324 if (task_rings_bo) {
325 radv_set_ring_buffer(pdev, task_rings_bo, pdev->task_info.draw_ring_offset,
326 pdev->task_info.num_entries * AC_TASK_DRAW_ENTRY_BYTES, false, false, false, 0, 0, &desc[0]);
327
328 radv_set_ring_buffer(pdev, task_rings_bo, pdev->task_info.payload_ring_offset,
329 pdev->task_info.num_entries * AC_TASK_PAYLOAD_ENTRY_BYTES, false, false, false, 0, 0,
330 &desc[4]);
331 }
332
333 desc += 8;
334
335 if (mesh_scratch_ring_bo) {
336 radv_set_ring_buffer(pdev, mesh_scratch_ring_bo, 0, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES,
337 false, false, false, 0, 0, &desc[0]);
338 }
339
340 desc += 4;
341
342 if (ge_rings_bo) {
343 assert(pdev->info.gfx_level >= GFX11);
344
345 ac_build_attr_ring_descriptor(pdev->info.gfx_level, radv_buffer_get_va(ge_rings_bo),
346 pdev->info.total_attribute_pos_prim_ring_size, 0, &desc[0]);
347 }
348
349 desc += 4;
350
351 /* add sample positions after all rings */
352 memcpy(desc, device->sample_locations_1x, 8);
353 desc += 2;
354 memcpy(desc, device->sample_locations_2x, 16);
355 desc += 4;
356 memcpy(desc, device->sample_locations_4x, 32);
357 desc += 8;
358 memcpy(desc, device->sample_locations_8x, 64);
359 }
360
361 static void
radv_emit_gs_ring_sizes(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * esgs_ring_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,uint32_t gsvs_ring_size)362 radv_emit_gs_ring_sizes(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *esgs_ring_bo,
363 uint32_t esgs_ring_size, struct radeon_winsys_bo *gsvs_ring_bo, uint32_t gsvs_ring_size)
364 {
365 const struct radv_physical_device *pdev = radv_device_physical(device);
366
367 if (!esgs_ring_bo && !gsvs_ring_bo)
368 return;
369
370 if (esgs_ring_bo)
371 radv_cs_add_buffer(device->ws, cs, esgs_ring_bo);
372
373 if (gsvs_ring_bo)
374 radv_cs_add_buffer(device->ws, cs, gsvs_ring_bo);
375
376 if (pdev->info.gfx_level >= GFX7) {
377 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
378 radeon_emit(cs, esgs_ring_size >> 8);
379 radeon_emit(cs, gsvs_ring_size >> 8);
380 } else {
381 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
382 radeon_emit(cs, esgs_ring_size >> 8);
383 radeon_emit(cs, gsvs_ring_size >> 8);
384 }
385 }
386
387 static void
radv_emit_tess_factor_ring(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * tess_rings_bo)388 radv_emit_tess_factor_ring(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *tess_rings_bo)
389 {
390 const struct radv_physical_device *pdev = radv_device_physical(device);
391 uint64_t tf_va;
392 uint32_t tf_ring_size;
393 if (!tess_rings_bo)
394 return;
395
396 tf_ring_size = pdev->hs.tess_factor_ring_size / 4;
397 tf_va = radv_buffer_get_va(tess_rings_bo);
398
399 radv_cs_add_buffer(device->ws, cs, tess_rings_bo);
400
401 if (pdev->info.gfx_level >= GFX7) {
402 if (pdev->info.gfx_level >= GFX11) {
403 /* TF_RING_SIZE is per SE on GFX11. */
404 tf_ring_size /= pdev->info.max_se;
405 }
406
407 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE, S_030938_SIZE(tf_ring_size));
408 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE, tf_va >> 8);
409
410 if (pdev->info.gfx_level >= GFX12) {
411 radeon_set_uconfig_reg(cs, R_03099C_VGT_TF_MEMORY_BASE_HI, S_03099C_BASE_HI(tf_va >> 40));
412 } else if (pdev->info.gfx_level >= GFX10) {
413 radeon_set_uconfig_reg(cs, R_030984_VGT_TF_MEMORY_BASE_HI, S_030984_BASE_HI(tf_va >> 40));
414 } else if (pdev->info.gfx_level == GFX9) {
415 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(tf_va >> 40));
416 }
417
418 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, pdev->hs.hs_offchip_param);
419 } else {
420 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE, S_008988_SIZE(tf_ring_size));
421 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE, tf_va >> 8);
422 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM, pdev->hs.hs_offchip_param);
423 }
424 }
425
426 static VkResult
radv_initialise_task_control_buffer(struct radv_device * device,struct radeon_winsys_bo * task_rings_bo)427 radv_initialise_task_control_buffer(struct radv_device *device, struct radeon_winsys_bo *task_rings_bo)
428 {
429 const struct radv_physical_device *pdev = radv_device_physical(device);
430 uint32_t *ptr = (uint32_t *)radv_buffer_map(device->ws, task_rings_bo);
431 if (!ptr)
432 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
433
434 const uint32_t num_entries = pdev->task_info.num_entries;
435 const uint64_t task_va = radv_buffer_get_va(task_rings_bo);
436 const uint64_t task_draw_ring_va = task_va + pdev->task_info.draw_ring_offset;
437 assert((task_draw_ring_va & 0xFFFFFF00) == (task_draw_ring_va & 0xFFFFFFFF));
438
439 /* 64-bit write_ptr */
440 ptr[0] = num_entries;
441 ptr[1] = 0;
442 /* 64-bit read_ptr */
443 ptr[2] = num_entries;
444 ptr[3] = 0;
445 /* 64-bit dealloc_ptr */
446 ptr[4] = num_entries;
447 ptr[5] = 0;
448 /* num_entries */
449 ptr[6] = num_entries;
450 /* 64-bit draw ring address */
451 ptr[7] = task_draw_ring_va;
452 ptr[8] = task_draw_ring_va >> 32;
453
454 device->ws->buffer_unmap(device->ws, task_rings_bo, false);
455 return VK_SUCCESS;
456 }
457
458 static void
radv_emit_task_rings(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * task_rings_bo,bool compute)459 radv_emit_task_rings(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *task_rings_bo,
460 bool compute)
461 {
462 if (!task_rings_bo)
463 return;
464
465 const uint64_t task_ctrlbuf_va = radv_buffer_get_va(task_rings_bo);
466 assert(util_is_aligned(task_ctrlbuf_va, 256));
467 radv_cs_add_buffer(device->ws, cs, task_rings_bo);
468
469 /* Tell the GPU where the task control buffer is. */
470 radeon_emit(cs, PKT3(PKT3_DISPATCH_TASK_STATE_INIT, 1, 0) | PKT3_SHADER_TYPE_S(!!compute));
471 /* bits [31:8]: control buffer address lo, bits[7:0]: reserved (set to zero) */
472 radeon_emit(cs, task_ctrlbuf_va & 0xFFFFFF00);
473 /* bits [31:0]: control buffer address hi */
474 radeon_emit(cs, task_ctrlbuf_va >> 32);
475 }
476
477 static void
radv_emit_graphics_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * scratch_bo)478 radv_emit_graphics_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
479 struct radeon_winsys_bo *scratch_bo)
480 {
481 const struct radv_physical_device *pdev = radv_device_physical(device);
482 const struct radeon_info *gpu_info = &pdev->info;
483
484 if (!scratch_bo)
485 return;
486
487 radv_cs_add_buffer(device->ws, cs, scratch_bo);
488
489 if (gpu_info->gfx_level >= GFX11) {
490 uint64_t va = radv_buffer_get_va(scratch_bo);
491
492 /* WAVES is per SE for SPI_TMPRING_SIZE. */
493 waves /= gpu_info->max_se;
494
495 radeon_set_context_reg_seq(cs, R_0286E8_SPI_TMPRING_SIZE, 3);
496 radeon_emit(cs, S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 256)));
497 radeon_emit(cs, va >> 8); /* SPI_GFX_SCRATCH_BASE_LO */
498 radeon_emit(cs, va >> 40); /* SPI_GFX_SCRATCH_BASE_HI */
499 } else {
500 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
501 S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 1024)));
502 }
503 }
504
505 static void
radv_emit_compute_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * compute_scratch_bo)506 radv_emit_compute_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
507 struct radeon_winsys_bo *compute_scratch_bo)
508 {
509 const struct radv_physical_device *pdev = radv_device_physical(device);
510 const struct radeon_info *gpu_info = &pdev->info;
511 uint64_t scratch_va;
512 uint32_t rsrc1;
513
514 if (!compute_scratch_bo)
515 return;
516
517 scratch_va = radv_buffer_get_va(compute_scratch_bo);
518 rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
519
520 if (gpu_info->gfx_level >= GFX11)
521 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
522 else
523 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
524
525 radv_cs_add_buffer(device->ws, cs, compute_scratch_bo);
526
527 if (gpu_info->gfx_level >= GFX11) {
528 radeon_set_sh_reg_seq(cs, R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO, 2);
529 radeon_emit(cs, scratch_va >> 8);
530 radeon_emit(cs, scratch_va >> 40);
531
532 waves /= gpu_info->max_se;
533 }
534
535 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
536 radeon_emit(cs, scratch_va);
537 radeon_emit(cs, rsrc1);
538
539 radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
540 S_00B860_WAVES(waves) |
541 S_00B860_WAVESIZE(DIV_ROUND_UP(size_per_wave, gpu_info->gfx_level >= GFX11 ? 256 : 1024)));
542 }
543
544 static void
radv_emit_compute_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)545 radv_emit_compute_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
546 struct radeon_winsys_bo *descriptor_bo)
547 {
548 if (!descriptor_bo)
549 return;
550
551 uint64_t va = radv_buffer_get_va(descriptor_bo);
552 radv_cs_add_buffer(device->ws, cs, descriptor_bo);
553
554 /* Compute shader user data 0-1 have the scratch pointer (unlike GFX shaders),
555 * so emit the descriptor pointer to user data 2-3 instead (task_ring_offsets arg).
556 */
557 radv_emit_shader_pointer(device, cs, R_00B908_COMPUTE_USER_DATA_2, va, true);
558 }
559
560 static void
radv_emit_graphics_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)561 radv_emit_graphics_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
562 struct radeon_winsys_bo *descriptor_bo)
563 {
564 const struct radv_physical_device *pdev = radv_device_physical(device);
565 uint64_t va;
566
567 if (!descriptor_bo)
568 return;
569
570 va = radv_buffer_get_va(descriptor_bo);
571
572 radv_cs_add_buffer(device->ws, cs, descriptor_bo);
573
574 if (pdev->info.gfx_level >= GFX12) {
575 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B410_SPI_SHADER_PGM_LO_HS,
576 R_00B210_SPI_SHADER_PGM_LO_GS};
577
578 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
579 radv_emit_shader_pointer(device, cs, regs[i], va, true);
580 }
581 } else if (pdev->info.gfx_level >= GFX11) {
582 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B420_SPI_SHADER_PGM_LO_HS,
583 R_00B220_SPI_SHADER_PGM_LO_GS};
584
585 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
586 radv_emit_shader_pointer(device, cs, regs[i], va, true);
587 }
588 } else if (pdev->info.gfx_level >= GFX10) {
589 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
590 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
591
592 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
593 radv_emit_shader_pointer(device, cs, regs[i], va, true);
594 }
595 } else if (pdev->info.gfx_level == GFX9) {
596 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
597 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
598
599 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
600 radv_emit_shader_pointer(device, cs, regs[i], va, true);
601 }
602 } else {
603 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
604 R_00B230_SPI_SHADER_USER_DATA_GS_0, R_00B330_SPI_SHADER_USER_DATA_ES_0,
605 R_00B430_SPI_SHADER_USER_DATA_HS_0, R_00B530_SPI_SHADER_USER_DATA_LS_0};
606
607 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
608 radv_emit_shader_pointer(device, cs, regs[i], va, true);
609 }
610 }
611 }
612
613 static void
radv_emit_ge_rings(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * ge_rings_bo)614 radv_emit_ge_rings(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *ge_rings_bo)
615 {
616 const struct radv_physical_device *pdev = radv_device_physical(device);
617 uint64_t va;
618
619 if (!ge_rings_bo)
620 return;
621
622 assert(pdev->info.gfx_level >= GFX11);
623
624 va = radv_buffer_get_va(ge_rings_bo);
625 assert((va >> 32) == pdev->info.address32_hi);
626
627 radv_cs_add_buffer(device->ws, cs, ge_rings_bo);
628
629 /* We must wait for idle using an EOP event before changing the attribute ring registers. Use the
630 * bottom-of-pipe EOP event, but increment the PWS counter instead of writing memory.
631 */
632 radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
633 radeon_emit(cs, S_490_EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | S_490_EVENT_INDEX(5) | S_490_PWS_ENABLE(1));
634 radeon_emit(cs, 0); /* DST_SEL, INT_SEL, DATA_SEL */
635 radeon_emit(cs, 0); /* ADDRESS_LO */
636 radeon_emit(cs, 0); /* ADDRESS_HI */
637 radeon_emit(cs, 0); /* DATA_LO */
638 radeon_emit(cs, 0); /* DATA_HI */
639 radeon_emit(cs, 0); /* INT_CTXID */
640
641 /* Wait for the PWS counter. */
642 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
643 radeon_emit(cs, S_580_PWS_STAGE_SEL(V_580_CP_ME) | S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) | S_580_PWS_ENA2(1) |
644 S_580_PWS_COUNT(0));
645 radeon_emit(cs, 0xffffffff); /* GCR_SIZE */
646 radeon_emit(cs, 0x01ffffff); /* GCR_SIZE_HI */
647 radeon_emit(cs, 0); /* GCR_BASE_LO */
648 radeon_emit(cs, 0); /* GCR_BASE_HI */
649 radeon_emit(cs, S_585_PWS_ENA(1));
650 radeon_emit(cs, 0); /* GCR_CNTL */
651
652 /* The PS will read inputs from this address. */
653 radeon_set_uconfig_reg_seq(cs, R_031110_SPI_GS_THROTTLE_CNTL1, 4);
654 radeon_emit(cs, 0x12355123); /* SPI_GS_THROTTLE_CNTL1 */
655 radeon_emit(cs, 0x1544D); /* SPI_GS_THROTTLE_CNTL2 */
656 radeon_emit(cs, va >> 16); /* SPI_ATTRIBUTE_RING_BASE */
657 radeon_emit(cs, S_03111C_MEM_SIZE((pdev->info.attribute_ring_size_per_se >> 16) - 1) |
658 S_03111C_BIG_PAGE(pdev->info.discardable_allows_big_page) |
659 S_03111C_L1_POLICY(1)); /* SPI_ATTRIBUTE_RING_SIZE */
660
661 if (pdev->info.gfx_level >= GFX12) {
662 const uint64_t pos_address = va + pdev->info.pos_ring_offset;
663 const uint64_t prim_address = va + pdev->info.prim_ring_offset;
664
665 /* When one of these 4 registers is updated, all 4 must be updated. */
666 radeon_set_uconfig_reg_seq(cs, R_0309A0_GE_POS_RING_BASE, 4);
667 radeon_emit(cs, pos_address >> 16); /* R_0309A0_GE_POS_RING_BASE */
668 radeon_emit(cs, S_0309A4_MEM_SIZE(pdev->info.pos_ring_size_per_se >> 5)); /* R_0309A4_GE_POS_RING_SIZE */
669 radeon_emit(cs, prim_address >> 16); /* R_0309A8_GE_PRIM_RING_BASE */
670 radeon_emit(cs, S_0309AC_MEM_SIZE(pdev->info.prim_ring_size_per_se >> 5) | S_0309AC_SCOPE(gfx12_scope_device) |
671 S_0309AC_PAF_TEMPORAL(gfx12_store_high_temporal_stay_dirty) |
672 S_0309AC_PAB_TEMPORAL(gfx12_load_last_use_discard) |
673 S_0309AC_SPEC_DATA_READ(gfx12_spec_read_auto) | S_0309AC_FORCE_SE_SCOPE(1) |
674 S_0309AC_PAB_NOFILL(1)); /* R_0309AC_GE_PRIM_RING_SIZE */
675 }
676 }
677
678 static void
radv_emit_compute(struct radv_device * device,struct radeon_cmdbuf * cs,bool is_compute_queue)679 radv_emit_compute(struct radv_device *device, struct radeon_cmdbuf *cs, bool is_compute_queue)
680 {
681 const struct radv_physical_device *pdev = radv_device_physical(device);
682 const uint64_t border_color_va = device->border_color_data.bo ? radv_buffer_get_va(device->border_color_data.bo) : 0;
683
684 struct ac_pm4_state *pm4 = ac_pm4_create_sized(&pdev->info, false, 64, is_compute_queue);
685 if (!pm4)
686 return;
687
688 const struct ac_preamble_state preamble_state = {
689 .border_color_va = border_color_va,
690 .gfx11 =
691 {
692 .compute_dispatch_interleave = 64,
693 },
694 };
695
696 ac_init_compute_preamble_state(&preamble_state, pm4);
697
698 ac_pm4_set_reg(pm4, R_00B810_COMPUTE_START_X, 0);
699 ac_pm4_set_reg(pm4, R_00B814_COMPUTE_START_Y, 0);
700 ac_pm4_set_reg(pm4, R_00B818_COMPUTE_START_Z, 0);
701
702 if (pdev->info.gfx_level == GFX8 && device->tma_bo) {
703 uint64_t tba_va, tma_va;
704
705 tba_va = radv_shader_get_va(device->trap_handler_shader);
706 tma_va = radv_buffer_get_va(device->tma_bo);
707
708 ac_pm4_set_reg(pm4, R_00B838_COMPUTE_TBA_LO, tba_va >> 8);
709 ac_pm4_set_reg(pm4, R_00B83C_COMPUTE_TBA_HI, tba_va >> 40);
710 ac_pm4_set_reg(pm4, R_00B840_COMPUTE_TMA_LO, tma_va >> 8);
711 ac_pm4_set_reg(pm4, R_00B844_COMPUTE_TMA_HI, tma_va >> 40);
712 }
713
714 ac_pm4_finalize(pm4);
715
716 radeon_emit_array(cs, pm4->pm4, pm4->ndw);
717
718 ac_pm4_free_state(pm4);
719 }
720
721 /* 12.4 fixed-point */
722 static unsigned
radv_pack_float_12p4(float x)723 radv_pack_float_12p4(float x)
724 {
725 return x <= 0 ? 0 : x >= 4096 ? 0xffff : x * 16;
726 }
727
728 void
radv_emit_graphics(struct radv_device * device,struct radeon_cmdbuf * cs)729 radv_emit_graphics(struct radv_device *device, struct radeon_cmdbuf *cs)
730 {
731 struct radv_physical_device *pdev = radv_device_physical(device);
732 const uint64_t border_color_va = device->border_color_data.bo ? radv_buffer_get_va(device->border_color_data.bo) : 0;
733 bool has_clear_state = pdev->info.has_clear_state;
734 int i;
735
736 if (!device->uses_shadow_regs) {
737 radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
738 radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
739 radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
740
741 if (has_clear_state) {
742 radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
743 radeon_emit(cs, 0);
744 }
745 }
746
747 struct ac_pm4_state *pm4 = ac_pm4_create_sized(&pdev->info, false, 512, false);
748 if (!pm4)
749 return;
750
751 const struct ac_preamble_state preamble_state = {
752 .border_color_va = border_color_va,
753 };
754
755 ac_init_graphics_preamble_state(&preamble_state, pm4);
756
757 if (!has_clear_state) {
758 for (i = 0; i < 16; i++) {
759 radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i * 8, 0);
760 radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i * 8, fui(1.0));
761 }
762 }
763
764 if (!has_clear_state) {
765 radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
766 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
767 radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
768 }
769
770 if (pdev->info.gfx_level <= GFX8)
771 radeon_set_sh_reg(cs, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(pdev->info.address32_hi >> 8));
772
773 if (pdev->info.gfx_level < GFX11)
774 radeon_set_sh_reg(cs, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(pdev->info.address32_hi >> 8));
775
776 unsigned cu_mask_ps = pdev->info.gfx_level >= GFX10_3 ? ac_gfx103_get_cu_mask_ps(&pdev->info) : ~0u;
777
778 if (pdev->info.gfx_level >= GFX12) {
779 radeon_set_sh_reg(cs, R_00B420_SPI_SHADER_PGM_RSRC4_HS,
780 S_00B420_WAVE_LIMIT(0x3ff) | S_00B420_GLG_FORCE_DISABLE(1));
781 radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC4_PS,
782 S_00B01C_WAVE_LIMIT_GFX12(0x3FF) | S_00B01C_LDS_GROUP_SIZE_GFX12(1));
783 } else if (pdev->info.gfx_level >= GFX11) {
784 radeon_set_sh_reg_idx(&pdev->info, cs, R_00B404_SPI_SHADER_PGM_RSRC4_HS, 3,
785 ac_apply_cu_en(S_00B404_CU_EN(0xffff), C_00B404_CU_EN, 16, &pdev->info));
786 radeon_set_sh_reg_idx(&pdev->info, cs, R_00B004_SPI_SHADER_PGM_RSRC4_PS, 3,
787 ac_apply_cu_en(S_00B004_CU_EN(cu_mask_ps >> 16), C_00B004_CU_EN, 16, &pdev->info));
788 }
789
790 if (pdev->info.gfx_level >= GFX10) {
791 /* Vulkan doesn't support user edge flags and it also doesn't
792 * need to prevent drawing lines on internal edges of
793 * decomposed primitives (such as quads) with polygon mode = lines.
794 */
795 unsigned vertex_reuse_depth = pdev->info.gfx_level >= GFX10_3 ? 30 : 0;
796 radeon_set_context_reg(cs, R_028838_PA_CL_NGG_CNTL,
797 S_028838_INDEX_BUF_EDGE_FLAG_ENA(0) | S_028838_VERTEX_REUSE_DEPTH(vertex_reuse_depth));
798
799 if (pdev->info.gfx_level >= GFX10_3) {
800 /* This allows sample shading. */
801 radeon_set_context_reg(cs, R_028848_PA_CL_VRS_CNTL,
802 S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE));
803 }
804 }
805
806 if (pdev->info.gfx_level >= GFX8) {
807 /* GFX8+ only compares the bits according to the index type by default,
808 * so we can always leave the programmed value at the maximum.
809 */
810 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0xffffffff);
811 }
812
813 unsigned tmp = (unsigned)(1.0 * 8.0);
814 radeon_set_context_reg(cs, R_028A00_PA_SU_POINT_SIZE, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
815 radeon_set_context_reg(
816 cs, R_028A04_PA_SU_POINT_MINMAX,
817 S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | S_028A04_MAX_SIZE(radv_pack_float_12p4(8191.875 / 2)));
818
819 /* Enable the Polaris small primitive filter control.
820 * XXX: There is possibly an issue when MSAA is off (see RadeonSI
821 * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
822 * and AMDVLK doesn't have a workaround as well.
823 */
824 if (pdev->info.family >= CHIP_POLARIS10) {
825 unsigned small_prim_filter_cntl = S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
826 /* Workaround for a hw line bug. */
827 S_028830_LINE_FILTER_DISABLE(pdev->info.family <= CHIP_POLARIS12);
828
829 radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL, small_prim_filter_cntl);
830 }
831
832 if (pdev->info.gfx_level >= GFX12) {
833 radeon_set_context_reg(cs, R_028644_SPI_INTERP_CONTROL_0,
834 S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
835 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
836 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
837 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
838 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
839 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
840 } else {
841 radeon_set_context_reg(cs, R_0286D4_SPI_INTERP_CONTROL_0,
842 S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
843 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
844 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
845 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
846 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
847 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
848 }
849
850 radeon_set_context_reg(cs, R_028BE4_PA_SU_VTX_CNTL,
851 S_028BE4_PIX_CENTER(1) | S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
852 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
853
854 if (pdev->info.gfx_level >= GFX12) {
855 radeon_set_context_reg(cs, R_028814_PA_CL_VTE_CNTL,
856 S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
857 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
858 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
859 } else {
860 radeon_set_context_reg(cs, R_028818_PA_CL_VTE_CNTL,
861 S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
862 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
863 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
864 }
865
866 if (pdev->info.gfx_level == GFX8 && device->tma_bo) {
867 uint64_t tba_va, tma_va;
868
869 tba_va = radv_shader_get_va(device->trap_handler_shader);
870 tma_va = radv_buffer_get_va(device->tma_bo);
871
872 uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS, R_00B100_SPI_SHADER_TBA_LO_VS, R_00B200_SPI_SHADER_TBA_LO_GS,
873 R_00B300_SPI_SHADER_TBA_LO_ES, R_00B400_SPI_SHADER_TBA_LO_HS, R_00B500_SPI_SHADER_TBA_LO_LS};
874
875 for (i = 0; i < ARRAY_SIZE(regs); ++i) {
876 radeon_set_sh_reg_seq(cs, regs[i], 4);
877 radeon_emit(cs, tba_va >> 8);
878 radeon_emit(cs, tba_va >> 40);
879 radeon_emit(cs, tma_va >> 8);
880 radeon_emit(cs, tma_va >> 40);
881 }
882 }
883
884 radeon_set_context_reg(cs, R_028828_PA_SU_LINE_STIPPLE_SCALE, 0x3f800000);
885
886 if (pdev->info.gfx_level >= GFX12) {
887 radeon_set_context_reg(cs, R_028000_DB_RENDER_CONTROL, 0);
888 }
889
890 ac_pm4_finalize(pm4);
891 radeon_emit_array(cs, pm4->pm4, pm4->ndw);
892 ac_pm4_free_state(pm4);
893
894 radv_emit_compute(device, cs, false);
895 }
896
897 static void
radv_init_graphics_state(struct radeon_cmdbuf * cs,struct radv_device * device)898 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_device *device)
899 {
900 if (device->gfx_init) {
901 struct radeon_winsys *ws = device->ws;
902
903 ws->cs_execute_ib(cs, device->gfx_init, 0, device->gfx_init_size_dw & 0xffff, false);
904
905 radv_cs_add_buffer(device->ws, cs, device->gfx_init);
906 } else {
907 radv_emit_graphics(device, cs);
908 }
909 }
910
911 static VkResult
radv_update_preamble_cs(struct radv_queue_state * queue,struct radv_device * device,const struct radv_queue_ring_info * needs)912 radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *device,
913 const struct radv_queue_ring_info *needs)
914 {
915 const struct radv_physical_device *pdev = radv_device_physical(device);
916 struct radeon_winsys *ws = device->ws;
917 struct radeon_winsys_bo *scratch_bo = queue->scratch_bo;
918 struct radeon_winsys_bo *descriptor_bo = queue->descriptor_bo;
919 struct radeon_winsys_bo *compute_scratch_bo = queue->compute_scratch_bo;
920 struct radeon_winsys_bo *esgs_ring_bo = queue->esgs_ring_bo;
921 struct radeon_winsys_bo *gsvs_ring_bo = queue->gsvs_ring_bo;
922 struct radeon_winsys_bo *tess_rings_bo = queue->tess_rings_bo;
923 struct radeon_winsys_bo *task_rings_bo = queue->task_rings_bo;
924 struct radeon_winsys_bo *mesh_scratch_ring_bo = queue->mesh_scratch_ring_bo;
925 struct radeon_winsys_bo *ge_rings_bo = queue->ge_rings_bo;
926 struct radeon_winsys_bo *gds_bo = queue->gds_bo;
927 struct radeon_winsys_bo *gds_oa_bo = queue->gds_oa_bo;
928 struct radeon_cmdbuf *dest_cs[3] = {0};
929 const uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
930 VkResult result = VK_SUCCESS;
931
932 const bool add_sample_positions = !queue->ring_info.sample_positions && needs->sample_positions;
933 const uint32_t scratch_size = needs->scratch_size_per_wave * needs->scratch_waves;
934 const uint32_t queue_scratch_size = queue->ring_info.scratch_size_per_wave * queue->ring_info.scratch_waves;
935
936 if (scratch_size > queue_scratch_size) {
937 result = radv_bo_create(device, NULL, scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
938 RADV_BO_PRIORITY_SCRATCH, 0, true, &scratch_bo);
939 if (result != VK_SUCCESS)
940 goto fail;
941 radv_rmv_log_command_buffer_bo_create(device, scratch_bo, 0, 0, scratch_size);
942 }
943
944 const uint32_t compute_scratch_size = needs->compute_scratch_size_per_wave * needs->compute_scratch_waves;
945 const uint32_t compute_queue_scratch_size =
946 queue->ring_info.compute_scratch_size_per_wave * queue->ring_info.compute_scratch_waves;
947 if (compute_scratch_size > compute_queue_scratch_size) {
948 result = radv_bo_create(device, NULL, compute_scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
949 RADV_BO_PRIORITY_SCRATCH, 0, true, &compute_scratch_bo);
950 if (result != VK_SUCCESS)
951 goto fail;
952 radv_rmv_log_command_buffer_bo_create(device, compute_scratch_bo, 0, 0, compute_scratch_size);
953 }
954
955 if (needs->esgs_ring_size > queue->ring_info.esgs_ring_size) {
956 result = radv_bo_create(device, NULL, needs->esgs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
957 RADV_BO_PRIORITY_SCRATCH, 0, true, &esgs_ring_bo);
958 if (result != VK_SUCCESS)
959 goto fail;
960 radv_rmv_log_command_buffer_bo_create(device, esgs_ring_bo, 0, 0, needs->esgs_ring_size);
961 }
962
963 if (needs->gsvs_ring_size > queue->ring_info.gsvs_ring_size) {
964 result = radv_bo_create(device, NULL, needs->gsvs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
965 RADV_BO_PRIORITY_SCRATCH, 0, true, &gsvs_ring_bo);
966 if (result != VK_SUCCESS)
967 goto fail;
968 radv_rmv_log_command_buffer_bo_create(device, gsvs_ring_bo, 0, 0, needs->gsvs_ring_size);
969 }
970
971 if (!queue->ring_info.tess_rings && needs->tess_rings) {
972 uint64_t tess_rings_size = pdev->hs.tess_offchip_ring_offset + pdev->hs.tess_offchip_ring_size;
973 result = radv_bo_create(device, NULL, tess_rings_size, 256, RADEON_DOMAIN_VRAM, ring_bo_flags,
974 RADV_BO_PRIORITY_SCRATCH, 0, true, &tess_rings_bo);
975 if (result != VK_SUCCESS)
976 goto fail;
977 radv_rmv_log_command_buffer_bo_create(device, tess_rings_bo, 0, 0, tess_rings_size);
978 }
979
980 if (!queue->ring_info.task_rings && needs->task_rings) {
981 assert(pdev->info.gfx_level >= GFX10_3);
982
983 /* We write the control buffer from the CPU, so need to grant CPU access to the BO.
984 * The draw ring needs to be zero-initialized otherwise the ready bits will be incorrect.
985 */
986 uint32_t task_rings_bo_flags =
987 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM;
988
989 result = radv_bo_create(device, NULL, pdev->task_info.bo_size_bytes, 256, RADEON_DOMAIN_VRAM, task_rings_bo_flags,
990 RADV_BO_PRIORITY_SCRATCH, 0, true, &task_rings_bo);
991 if (result != VK_SUCCESS)
992 goto fail;
993 radv_rmv_log_command_buffer_bo_create(device, task_rings_bo, 0, 0, pdev->task_info.bo_size_bytes);
994
995 result = radv_initialise_task_control_buffer(device, task_rings_bo);
996 if (result != VK_SUCCESS)
997 goto fail;
998 }
999
1000 if (!queue->ring_info.mesh_scratch_ring && needs->mesh_scratch_ring) {
1001 assert(pdev->info.gfx_level >= GFX10_3);
1002 result =
1003 radv_bo_create(device, NULL, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES, 256,
1004 RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true, &mesh_scratch_ring_bo);
1005
1006 if (result != VK_SUCCESS)
1007 goto fail;
1008 radv_rmv_log_command_buffer_bo_create(device, mesh_scratch_ring_bo, 0, 0,
1009 RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES);
1010 }
1011
1012 if (!queue->ring_info.ge_rings && needs->ge_rings) {
1013 assert(pdev->info.gfx_level >= GFX11);
1014 result = radv_bo_create(device, NULL, pdev->info.total_attribute_pos_prim_ring_size, 2 * 1024 * 1024 /* 2MiB */,
1015 RADEON_DOMAIN_VRAM, RADEON_FLAG_32BIT | RADEON_FLAG_DISCARDABLE | ring_bo_flags,
1016 RADV_BO_PRIORITY_SCRATCH, 0, true, &ge_rings_bo);
1017 if (result != VK_SUCCESS)
1018 goto fail;
1019 radv_rmv_log_command_buffer_bo_create(device, ge_rings_bo, 0, 0, pdev->info.total_attribute_pos_prim_ring_size);
1020 }
1021
1022 if (!queue->ring_info.gds && needs->gds) {
1023 assert(pdev->info.gfx_level >= GFX10 && pdev->info.gfx_level < GFX12);
1024
1025 /* 4 streamout GDS counters.
1026 * We need 256B (64 dw) of GDS, otherwise streamout hangs.
1027 */
1028 result = radv_bo_create(device, NULL, 256, 4, RADEON_DOMAIN_GDS, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true,
1029 &gds_bo);
1030 if (result != VK_SUCCESS)
1031 goto fail;
1032
1033 /* Add the GDS BO to our global BO list to prevent the kernel to emit a GDS switch and reset
1034 * the state when a compute queue is used.
1035 */
1036 result = device->ws->buffer_make_resident(ws, gds_bo, true);
1037 if (result != VK_SUCCESS)
1038 goto fail;
1039 }
1040
1041 if (!queue->ring_info.gds_oa && needs->gds_oa) {
1042 assert(pdev->info.gfx_level >= GFX10 && pdev->info.gfx_level < GFX12);
1043
1044 result = radv_bo_create(device, NULL, 1, 1, RADEON_DOMAIN_OA, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true,
1045 &gds_oa_bo);
1046 if (result != VK_SUCCESS)
1047 goto fail;
1048
1049 /* Add the GDS OA BO to our global BO list to prevent the kernel to emit a GDS switch and
1050 * reset the state when a compute queue is used.
1051 */
1052 result = device->ws->buffer_make_resident(ws, gds_oa_bo, true);
1053 if (result != VK_SUCCESS)
1054 goto fail;
1055 }
1056
1057 /* Re-initialize the descriptor BO when any ring BOs changed.
1058 *
1059 * Additionally, make sure to create the descriptor BO for the compute queue
1060 * when it uses the task shader rings. The task rings BO is shared between the
1061 * GFX and compute queues and already initialized here.
1062 */
1063 if ((queue->qf == RADV_QUEUE_COMPUTE && !descriptor_bo && task_rings_bo) || scratch_bo != queue->scratch_bo ||
1064 esgs_ring_bo != queue->esgs_ring_bo || gsvs_ring_bo != queue->gsvs_ring_bo ||
1065 tess_rings_bo != queue->tess_rings_bo || task_rings_bo != queue->task_rings_bo ||
1066 mesh_scratch_ring_bo != queue->mesh_scratch_ring_bo || ge_rings_bo != queue->ge_rings_bo ||
1067 add_sample_positions) {
1068 const uint32_t size = 304;
1069
1070 result = radv_bo_create(device, NULL, size, 4096, RADEON_DOMAIN_VRAM,
1071 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
1072 RADV_BO_PRIORITY_DESCRIPTOR, 0, true, &descriptor_bo);
1073 if (result != VK_SUCCESS)
1074 goto fail;
1075 }
1076
1077 if (descriptor_bo != queue->descriptor_bo) {
1078 uint32_t *map = (uint32_t *)radv_buffer_map(ws, descriptor_bo);
1079 if (!map) {
1080 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1081 goto fail;
1082 }
1083
1084 radv_fill_shader_rings(device, map, scratch_bo, needs->esgs_ring_size, esgs_ring_bo, needs->gsvs_ring_size,
1085 gsvs_ring_bo, tess_rings_bo, task_rings_bo, mesh_scratch_ring_bo, ge_rings_bo);
1086
1087 ws->buffer_unmap(ws, descriptor_bo, false);
1088 }
1089
1090 for (int i = 0; i < 3; ++i) {
1091 enum rgp_flush_bits sqtt_flush_bits = 0;
1092 struct radeon_cmdbuf *cs = NULL;
1093 cs = ws->cs_create(ws, radv_queue_family_to_ring(pdev, queue->qf), false);
1094 if (!cs) {
1095 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1096 goto fail;
1097 }
1098
1099 radeon_check_space(ws, cs, 512);
1100 dest_cs[i] = cs;
1101
1102 if (scratch_bo)
1103 radv_cs_add_buffer(ws, cs, scratch_bo);
1104
1105 /* Emit initial configuration. */
1106 switch (queue->qf) {
1107 case RADV_QUEUE_GENERAL:
1108 if (queue->uses_shadow_regs)
1109 radv_emit_shadow_regs_preamble(cs, device, queue);
1110 radv_init_graphics_state(cs, device);
1111
1112 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo || task_rings_bo) {
1113 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1114 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1115
1116 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1117 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1118 }
1119
1120 radv_emit_gs_ring_sizes(device, cs, esgs_ring_bo, needs->esgs_ring_size, gsvs_ring_bo, needs->gsvs_ring_size);
1121 radv_emit_tess_factor_ring(device, cs, tess_rings_bo);
1122 radv_emit_task_rings(device, cs, task_rings_bo, false);
1123 radv_emit_ge_rings(device, cs, ge_rings_bo);
1124 radv_emit_graphics_shader_pointers(device, cs, descriptor_bo);
1125 radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1126 compute_scratch_bo);
1127 radv_emit_graphics_scratch(device, cs, needs->scratch_size_per_wave, needs->scratch_waves, scratch_bo);
1128 break;
1129 case RADV_QUEUE_COMPUTE:
1130 radv_emit_compute(device, cs, true);
1131
1132 if (task_rings_bo) {
1133 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1134 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1135 }
1136
1137 radv_emit_task_rings(device, cs, task_rings_bo, true);
1138 radv_emit_compute_shader_pointers(device, cs, descriptor_bo);
1139 radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1140 compute_scratch_bo);
1141 break;
1142 default:
1143 break;
1144 }
1145
1146 if (i < 2) {
1147 /* The two initial preambles have a cache flush at the beginning. */
1148 const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1149 enum radv_cmd_flush_bits flush_bits = RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE |
1150 RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_L2 |
1151 RADV_CMD_FLAG_START_PIPELINE_STATS;
1152
1153 if (i == 0) {
1154 /* The full flush preamble should also wait for previous shader work to finish. */
1155 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1156 if (queue->qf == RADV_QUEUE_GENERAL)
1157 flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1158 }
1159
1160 radv_cs_emit_cache_flush(ws, cs, gfx_level, NULL, 0, queue->qf, flush_bits, &sqtt_flush_bits, 0);
1161 }
1162
1163 result = ws->cs_finalize(cs);
1164 if (result != VK_SUCCESS)
1165 goto fail;
1166 }
1167
1168 if (queue->initial_full_flush_preamble_cs)
1169 ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1170
1171 if (queue->initial_preamble_cs)
1172 ws->cs_destroy(queue->initial_preamble_cs);
1173
1174 if (queue->continue_preamble_cs)
1175 ws->cs_destroy(queue->continue_preamble_cs);
1176
1177 queue->initial_full_flush_preamble_cs = dest_cs[0];
1178 queue->initial_preamble_cs = dest_cs[1];
1179 queue->continue_preamble_cs = dest_cs[2];
1180
1181 if (scratch_bo != queue->scratch_bo) {
1182 if (queue->scratch_bo) {
1183 radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
1184 radv_bo_destroy(device, NULL, queue->scratch_bo);
1185 }
1186 queue->scratch_bo = scratch_bo;
1187 }
1188
1189 if (compute_scratch_bo != queue->compute_scratch_bo) {
1190 if (queue->compute_scratch_bo) {
1191 radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
1192 radv_bo_destroy(device, NULL, queue->compute_scratch_bo);
1193 }
1194 queue->compute_scratch_bo = compute_scratch_bo;
1195 }
1196
1197 if (esgs_ring_bo != queue->esgs_ring_bo) {
1198 if (queue->esgs_ring_bo) {
1199 radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
1200 radv_bo_destroy(device, NULL, queue->esgs_ring_bo);
1201 }
1202 queue->esgs_ring_bo = esgs_ring_bo;
1203 }
1204
1205 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1206 if (queue->gsvs_ring_bo) {
1207 radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
1208 radv_bo_destroy(device, NULL, queue->gsvs_ring_bo);
1209 }
1210 queue->gsvs_ring_bo = gsvs_ring_bo;
1211 }
1212
1213 if (descriptor_bo != queue->descriptor_bo) {
1214 if (queue->descriptor_bo)
1215 radv_bo_destroy(device, NULL, queue->descriptor_bo);
1216 queue->descriptor_bo = descriptor_bo;
1217 }
1218
1219 queue->tess_rings_bo = tess_rings_bo;
1220 queue->task_rings_bo = task_rings_bo;
1221 queue->mesh_scratch_ring_bo = mesh_scratch_ring_bo;
1222 queue->ge_rings_bo = ge_rings_bo;
1223 queue->gds_bo = gds_bo;
1224 queue->gds_oa_bo = gds_oa_bo;
1225 queue->ring_info = *needs;
1226 return VK_SUCCESS;
1227 fail:
1228 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1229 if (dest_cs[i])
1230 ws->cs_destroy(dest_cs[i]);
1231 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1232 radv_bo_destroy(device, NULL, descriptor_bo);
1233 if (scratch_bo && scratch_bo != queue->scratch_bo)
1234 radv_bo_destroy(device, NULL, scratch_bo);
1235 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1236 radv_bo_destroy(device, NULL, compute_scratch_bo);
1237 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1238 radv_bo_destroy(device, NULL, esgs_ring_bo);
1239 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1240 radv_bo_destroy(device, NULL, gsvs_ring_bo);
1241 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
1242 radv_bo_destroy(device, NULL, tess_rings_bo);
1243 if (task_rings_bo && task_rings_bo != queue->task_rings_bo)
1244 radv_bo_destroy(device, NULL, task_rings_bo);
1245 if (ge_rings_bo && ge_rings_bo != queue->ge_rings_bo)
1246 radv_bo_destroy(device, NULL, ge_rings_bo);
1247 if (gds_bo && gds_bo != queue->gds_bo) {
1248 ws->buffer_make_resident(ws, queue->gds_bo, false);
1249 radv_bo_destroy(device, NULL, gds_bo);
1250 }
1251 if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo) {
1252 ws->buffer_make_resident(ws, queue->gds_oa_bo, false);
1253 radv_bo_destroy(device, NULL, gds_oa_bo);
1254 }
1255
1256 return vk_error(queue, result);
1257 }
1258
1259 static VkResult
radv_update_preambles(struct radv_queue_state * queue,struct radv_device * device,struct vk_command_buffer * const * cmd_buffers,uint32_t cmd_buffer_count,bool * use_perf_counters,bool * has_follower)1260 radv_update_preambles(struct radv_queue_state *queue, struct radv_device *device,
1261 struct vk_command_buffer *const *cmd_buffers, uint32_t cmd_buffer_count, bool *use_perf_counters,
1262 bool *has_follower)
1263 {
1264 const struct radv_physical_device *pdev = radv_device_physical(device);
1265
1266 if (queue->qf != RADV_QUEUE_GENERAL && queue->qf != RADV_QUEUE_COMPUTE) {
1267 for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1268 struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1269
1270 *has_follower |= !!cmd_buffer->gang.cs;
1271 }
1272
1273 return VK_SUCCESS;
1274 }
1275
1276 /* Figure out the needs of the current submission.
1277 * Start by copying the queue's current info.
1278 * This is done because we only allow two possible behaviours for these buffers:
1279 * - Grow when the newly needed amount is larger than what we had
1280 * - Allocate the max size and reuse it, but don't free it until the queue is destroyed
1281 */
1282 struct radv_queue_ring_info needs = queue->ring_info;
1283 *use_perf_counters = false;
1284 *has_follower = false;
1285
1286 for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1287 struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1288
1289 needs.scratch_size_per_wave = MAX2(needs.scratch_size_per_wave, cmd_buffer->scratch_size_per_wave_needed);
1290 needs.scratch_waves = MAX2(needs.scratch_waves, cmd_buffer->scratch_waves_wanted);
1291 needs.compute_scratch_size_per_wave =
1292 MAX2(needs.compute_scratch_size_per_wave, cmd_buffer->compute_scratch_size_per_wave_needed);
1293 needs.compute_scratch_waves = MAX2(needs.compute_scratch_waves, cmd_buffer->compute_scratch_waves_wanted);
1294 needs.esgs_ring_size = MAX2(needs.esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1295 needs.gsvs_ring_size = MAX2(needs.gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1296 needs.tess_rings |= cmd_buffer->tess_rings_needed;
1297 needs.task_rings |= cmd_buffer->task_rings_needed;
1298 needs.mesh_scratch_ring |= cmd_buffer->mesh_scratch_ring_needed;
1299 needs.gds |= cmd_buffer->gds_needed;
1300 needs.gds_oa |= cmd_buffer->gds_oa_needed;
1301 needs.sample_positions |= cmd_buffer->sample_positions_needed;
1302 *use_perf_counters |= cmd_buffer->state.uses_perf_counters;
1303 *has_follower |= !!cmd_buffer->gang.cs;
1304 }
1305
1306 /* Sanitize scratch size information. */
1307 needs.scratch_waves =
1308 needs.scratch_size_per_wave ? MIN2(needs.scratch_waves, UINT32_MAX / needs.scratch_size_per_wave) : 0;
1309 needs.compute_scratch_waves =
1310 needs.compute_scratch_size_per_wave
1311 ? MIN2(needs.compute_scratch_waves, UINT32_MAX / needs.compute_scratch_size_per_wave)
1312 : 0;
1313
1314 if (pdev->info.gfx_level >= GFX11 && queue->qf == RADV_QUEUE_GENERAL) {
1315 needs.ge_rings = true;
1316 }
1317
1318 /* Return early if we already match these needs.
1319 * Note that it's not possible for any of the needed values to be less
1320 * than what the queue already had, because we only ever increase the allocated size.
1321 */
1322 if (queue->initial_full_flush_preamble_cs && queue->ring_info.scratch_size_per_wave == needs.scratch_size_per_wave &&
1323 queue->ring_info.scratch_waves == needs.scratch_waves &&
1324 queue->ring_info.compute_scratch_size_per_wave == needs.compute_scratch_size_per_wave &&
1325 queue->ring_info.compute_scratch_waves == needs.compute_scratch_waves &&
1326 queue->ring_info.esgs_ring_size == needs.esgs_ring_size &&
1327 queue->ring_info.gsvs_ring_size == needs.gsvs_ring_size && queue->ring_info.tess_rings == needs.tess_rings &&
1328 queue->ring_info.task_rings == needs.task_rings &&
1329 queue->ring_info.mesh_scratch_ring == needs.mesh_scratch_ring && queue->ring_info.ge_rings == needs.ge_rings &&
1330 queue->ring_info.gds == needs.gds && queue->ring_info.gds_oa == needs.gds_oa &&
1331 queue->ring_info.sample_positions == needs.sample_positions)
1332 return VK_SUCCESS;
1333
1334 return radv_update_preamble_cs(queue, device, &needs);
1335 }
1336
1337 /* Creates a postamble CS that executes cache flush commands
1338 * that we can use at the end of each submission.
1339 *
1340 * GFX6: The kernel flushes L2 before shaders are finished.
1341 * Therefore we need to wait for idle at the end of each submission.
1342 */
1343 static VkResult
radv_create_flush_postamble(struct radv_queue * queue)1344 radv_create_flush_postamble(struct radv_queue *queue)
1345 {
1346 const struct radv_device *device = radv_queue_device(queue);
1347 const struct radv_physical_device *pdev = radv_device_physical(device);
1348 const enum amd_ip_type ip = radv_queue_family_to_ring(pdev, queue->state.qf);
1349 struct radeon_winsys *ws = device->ws;
1350
1351 struct radeon_cmdbuf *cs = ws->cs_create(ws, ip, false);
1352 if (!cs)
1353 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1354
1355 radeon_check_space(ws, cs, 256);
1356
1357 const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1358 enum radv_cmd_flush_bits flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
1359
1360 if (ip == AMD_IP_GFX)
1361 flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1362
1363 enum rgp_flush_bits sqtt_flush_bits = 0;
1364 radv_cs_emit_cache_flush(ws, cs, gfx_level, NULL, 0, queue->state.qf, flush_bits, &sqtt_flush_bits, 0);
1365
1366 VkResult r = ws->cs_finalize(cs);
1367 if (r != VK_SUCCESS) {
1368 ws->cs_destroy(cs);
1369 return r;
1370 }
1371
1372 queue->state.flush_postamble_cs = cs;
1373 return VK_SUCCESS;
1374 }
1375
1376 static VkResult
radv_create_gang_wait_preambles_postambles(struct radv_queue * queue)1377 radv_create_gang_wait_preambles_postambles(struct radv_queue *queue)
1378 {
1379 struct radv_device *device = radv_queue_device(queue);
1380 const struct radv_physical_device *pdev = radv_device_physical(device);
1381
1382 if (queue->gang_sem_bo)
1383 return VK_SUCCESS;
1384
1385 VkResult r = VK_SUCCESS;
1386 struct radeon_winsys *ws = device->ws;
1387 const enum amd_ip_type leader_ip = radv_queue_family_to_ring(pdev, queue->state.qf);
1388 struct radeon_winsys_bo *gang_sem_bo = NULL;
1389
1390 /* Gang semaphores BO.
1391 * DWORD 0: used in preambles, gang leader writes, gang members wait.
1392 * DWORD 1: used in postambles, gang leader waits, gang members write.
1393 */
1394 r = radv_bo_create(device, NULL, 8, 4, RADEON_DOMAIN_VRAM,
1395 RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM, RADV_BO_PRIORITY_SCRATCH, 0, true,
1396 &gang_sem_bo);
1397 if (r != VK_SUCCESS)
1398 return r;
1399
1400 struct radeon_cmdbuf *leader_pre_cs = ws->cs_create(ws, leader_ip, false);
1401 struct radeon_cmdbuf *leader_post_cs = ws->cs_create(ws, leader_ip, false);
1402 struct radeon_cmdbuf *ace_pre_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1403 struct radeon_cmdbuf *ace_post_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1404
1405 if (!leader_pre_cs || !leader_post_cs || !ace_pre_cs || !ace_post_cs) {
1406 r = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1407 goto fail;
1408 }
1409
1410 radeon_check_space(ws, leader_pre_cs, 256);
1411 radeon_check_space(ws, leader_post_cs, 256);
1412 radeon_check_space(ws, ace_pre_cs, 256);
1413 radeon_check_space(ws, ace_post_cs, 256);
1414
1415 radv_cs_add_buffer(ws, leader_pre_cs, gang_sem_bo);
1416 radv_cs_add_buffer(ws, leader_post_cs, gang_sem_bo);
1417 radv_cs_add_buffer(ws, ace_pre_cs, gang_sem_bo);
1418 radv_cs_add_buffer(ws, ace_post_cs, gang_sem_bo);
1419
1420 const uint64_t ace_wait_va = radv_buffer_get_va(gang_sem_bo);
1421 const uint64_t leader_wait_va = ace_wait_va + 4;
1422 const uint32_t zero = 0;
1423 const uint32_t one = 1;
1424
1425 /* Preambles for gang submission.
1426 * Make gang members wait until the gang leader starts.
1427 * Userspace is required to emit this wait to make sure it behaves correctly
1428 * in a multi-process environment, because task shader dispatches are not
1429 * meant to be executed on multiple compute engines at the same time.
1430 */
1431 radv_cp_wait_mem(ace_pre_cs, RADV_QUEUE_COMPUTE, WAIT_REG_MEM_GREATER_OR_EQUAL, ace_wait_va, 1, 0xffffffff);
1432 radv_cs_write_data(device, ace_pre_cs, RADV_QUEUE_COMPUTE, V_370_ME, ace_wait_va, 1, &zero, false);
1433 radv_cs_write_data(device, leader_pre_cs, queue->state.qf, V_370_ME, ace_wait_va, 1, &one, false);
1434
1435 /* Create postambles for gang submission.
1436 * This ensures that the gang leader waits for the whole gang,
1437 * which is necessary because the kernel signals the userspace fence
1438 * as soon as the gang leader is done, which may lead to bugs because the
1439 * same command buffers could be submitted again while still being executed.
1440 */
1441 radv_cp_wait_mem(leader_post_cs, queue->state.qf, WAIT_REG_MEM_GREATER_OR_EQUAL, leader_wait_va, 1, 0xffffffff);
1442 radv_cs_write_data(device, leader_post_cs, queue->state.qf, V_370_ME, leader_wait_va, 1, &zero, false);
1443 radv_cs_emit_write_event_eop(ace_post_cs, pdev->info.gfx_level, RADV_QUEUE_COMPUTE, V_028A90_BOTTOM_OF_PIPE_TS, 0,
1444 EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, leader_wait_va, 1, 0);
1445
1446 r = ws->cs_finalize(leader_pre_cs);
1447 if (r != VK_SUCCESS)
1448 goto fail;
1449 r = ws->cs_finalize(leader_post_cs);
1450 if (r != VK_SUCCESS)
1451 goto fail;
1452 r = ws->cs_finalize(ace_pre_cs);
1453 if (r != VK_SUCCESS)
1454 goto fail;
1455 r = ws->cs_finalize(ace_post_cs);
1456 if (r != VK_SUCCESS)
1457 goto fail;
1458
1459 queue->gang_sem_bo = gang_sem_bo;
1460 queue->state.gang_wait_preamble_cs = leader_pre_cs;
1461 queue->state.gang_wait_postamble_cs = leader_post_cs;
1462 queue->follower_state->gang_wait_preamble_cs = ace_pre_cs;
1463 queue->follower_state->gang_wait_postamble_cs = ace_post_cs;
1464
1465 return VK_SUCCESS;
1466
1467 fail:
1468 if (leader_pre_cs)
1469 ws->cs_destroy(leader_pre_cs);
1470 if (leader_post_cs)
1471 ws->cs_destroy(leader_post_cs);
1472 if (ace_pre_cs)
1473 ws->cs_destroy(ace_pre_cs);
1474 if (ace_post_cs)
1475 ws->cs_destroy(ace_post_cs);
1476 if (gang_sem_bo)
1477 radv_bo_destroy(device, &queue->vk.base, gang_sem_bo);
1478
1479 return r;
1480 }
1481
1482 static bool
radv_queue_init_follower_state(struct radv_queue * queue)1483 radv_queue_init_follower_state(struct radv_queue *queue)
1484 {
1485 if (queue->follower_state)
1486 return true;
1487
1488 queue->follower_state = calloc(1, sizeof(struct radv_queue_state));
1489 if (!queue->follower_state)
1490 return false;
1491
1492 queue->follower_state->qf = RADV_QUEUE_COMPUTE;
1493 return true;
1494 }
1495
1496 static VkResult
radv_update_gang_preambles(struct radv_queue * queue)1497 radv_update_gang_preambles(struct radv_queue *queue)
1498 {
1499 struct radv_device *device = radv_queue_device(queue);
1500
1501 if (!radv_queue_init_follower_state(queue))
1502 return VK_ERROR_OUT_OF_HOST_MEMORY;
1503
1504 VkResult r = VK_SUCCESS;
1505
1506 /* Copy task rings state.
1507 * Task shaders that are submitted on the ACE queue need to share
1508 * their ring buffers with the mesh shaders on the GFX queue.
1509 */
1510 queue->follower_state->ring_info.task_rings = queue->state.ring_info.task_rings;
1511 queue->follower_state->task_rings_bo = queue->state.task_rings_bo;
1512
1513 /* Copy some needed states from the parent queue state.
1514 * These can only increase so it's okay to copy them as-is without checking.
1515 * Note, task shaders use the scratch size from their graphics pipeline.
1516 */
1517 struct radv_queue_ring_info needs = queue->follower_state->ring_info;
1518 needs.compute_scratch_size_per_wave = queue->state.ring_info.scratch_size_per_wave;
1519 needs.compute_scratch_waves = queue->state.ring_info.scratch_waves;
1520 needs.task_rings = queue->state.ring_info.task_rings;
1521
1522 r = radv_update_preamble_cs(queue->follower_state, device, &needs);
1523 if (r != VK_SUCCESS)
1524 return r;
1525
1526 r = radv_create_gang_wait_preambles_postambles(queue);
1527 if (r != VK_SUCCESS)
1528 return r;
1529
1530 return VK_SUCCESS;
1531 }
1532
1533 static struct radeon_cmdbuf *
radv_create_perf_counter_lock_cs(struct radv_device * device,unsigned pass,bool unlock)1534 radv_create_perf_counter_lock_cs(struct radv_device *device, unsigned pass, bool unlock)
1535 {
1536 struct radeon_cmdbuf **cs_ref = &device->perf_counter_lock_cs[pass * 2 + (unlock ? 1 : 0)];
1537 struct radeon_cmdbuf *cs;
1538
1539 if (*cs_ref)
1540 return *cs_ref;
1541
1542 cs = device->ws->cs_create(device->ws, AMD_IP_GFX, false);
1543 if (!cs)
1544 return NULL;
1545
1546 ASSERTED unsigned cdw = radeon_check_space(device->ws, cs, 21);
1547
1548 radv_cs_add_buffer(device->ws, cs, device->perf_counter_bo);
1549
1550 if (!unlock) {
1551 uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1552 radeon_emit(cs, PKT3(PKT3_ATOMIC_MEM, 7, 0));
1553 radeon_emit(cs, ATOMIC_OP(TC_OP_ATOMIC_CMPSWAP_32) | ATOMIC_COMMAND(ATOMIC_COMMAND_LOOP));
1554 radeon_emit(cs, mutex_va); /* addr lo */
1555 radeon_emit(cs, mutex_va >> 32); /* addr hi */
1556 radeon_emit(cs, 1); /* data lo */
1557 radeon_emit(cs, 0); /* data hi */
1558 radeon_emit(cs, 0); /* compare data lo */
1559 radeon_emit(cs, 0); /* compare data hi */
1560 radeon_emit(cs, 10); /* loop interval */
1561 }
1562
1563 uint64_t va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_PASS_OFFSET;
1564 uint64_t unset_va = va + (unlock ? 8 * pass : 0);
1565 uint64_t set_va = va + (unlock ? 0 : 8 * pass);
1566
1567 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1568 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1569 COPY_DATA_WR_CONFIRM);
1570 radeon_emit(cs, 0); /* immediate */
1571 radeon_emit(cs, 0);
1572 radeon_emit(cs, unset_va);
1573 radeon_emit(cs, unset_va >> 32);
1574
1575 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1576 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1577 COPY_DATA_WR_CONFIRM);
1578 radeon_emit(cs, 1); /* immediate */
1579 radeon_emit(cs, 0);
1580 radeon_emit(cs, set_va);
1581 radeon_emit(cs, set_va >> 32);
1582
1583 if (unlock) {
1584 uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1585
1586 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1587 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1588 COPY_DATA_WR_CONFIRM);
1589 radeon_emit(cs, 0); /* immediate */
1590 radeon_emit(cs, 0);
1591 radeon_emit(cs, mutex_va);
1592 radeon_emit(cs, mutex_va >> 32);
1593 }
1594
1595 assert(cs->cdw <= cdw);
1596
1597 VkResult result = device->ws->cs_finalize(cs);
1598 if (result != VK_SUCCESS) {
1599 device->ws->cs_destroy(cs);
1600 return NULL;
1601 }
1602
1603 /* All the casts are to avoid MSVC errors around pointer truncation in a non-taken
1604 * alternative.
1605 */
1606 if (p_atomic_cmpxchg((uintptr_t *)cs_ref, 0, (uintptr_t)cs) != 0) {
1607 device->ws->cs_destroy(cs);
1608 }
1609
1610 return *cs_ref;
1611 }
1612
1613 static void
radv_get_shader_upload_sync_wait(struct radv_device * device,uint64_t shader_upload_seq,struct vk_sync_wait * out_sync_wait)1614 radv_get_shader_upload_sync_wait(struct radv_device *device, uint64_t shader_upload_seq,
1615 struct vk_sync_wait *out_sync_wait)
1616 {
1617 struct vk_semaphore *semaphore = vk_semaphore_from_handle(device->shader_upload_sem);
1618 struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
1619 *out_sync_wait = (struct vk_sync_wait){
1620 .sync = sync,
1621 .wait_value = shader_upload_seq,
1622 .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
1623 };
1624 }
1625
1626 static VkResult
radv_queue_submit_normal(struct radv_queue * queue,struct vk_queue_submit * submission)1627 radv_queue_submit_normal(struct radv_queue *queue, struct vk_queue_submit *submission)
1628 {
1629 struct radv_device *device = radv_queue_device(queue);
1630 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1631 bool use_ace = false;
1632 bool use_perf_counters = false;
1633 VkResult result;
1634 uint64_t shader_upload_seq = 0;
1635 uint32_t wait_count = submission->wait_count;
1636 struct vk_sync_wait *waits = submission->waits;
1637
1638 result = radv_update_preambles(&queue->state, device, submission->command_buffers, submission->command_buffer_count,
1639 &use_perf_counters, &use_ace);
1640 if (result != VK_SUCCESS)
1641 return result;
1642
1643 if (use_ace) {
1644 result = radv_update_gang_preambles(queue);
1645 if (result != VK_SUCCESS)
1646 return result;
1647 }
1648
1649 const unsigned cmd_buffer_count = submission->command_buffer_count;
1650 const unsigned max_cs_submission = radv_device_fault_detection_enabled(device) ? 1 : cmd_buffer_count;
1651 const unsigned cs_array_size = (use_ace ? 2 : 1) * MIN2(max_cs_submission, cmd_buffer_count);
1652
1653 struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) * cs_array_size);
1654 if (!cs_array)
1655 return VK_ERROR_OUT_OF_HOST_MEMORY;
1656
1657 if (radv_device_fault_detection_enabled(device))
1658 simple_mtx_lock(&device->trace_mtx);
1659
1660 for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
1661 struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j];
1662 shader_upload_seq = MAX2(shader_upload_seq, cmd_buffer->shader_upload_seq);
1663 }
1664
1665 if (shader_upload_seq > queue->last_shader_upload_seq) {
1666 /* Patch the wait array to add waiting for referenced shaders to upload. */
1667 struct vk_sync_wait *new_waits = malloc(sizeof(struct vk_sync_wait) * (wait_count + 1));
1668 if (!new_waits) {
1669 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1670 goto fail;
1671 }
1672
1673 memcpy(new_waits, submission->waits, sizeof(struct vk_sync_wait) * submission->wait_count);
1674 radv_get_shader_upload_sync_wait(device, shader_upload_seq, &new_waits[submission->wait_count]);
1675
1676 waits = new_waits;
1677 wait_count += 1;
1678 }
1679
1680 /* For fences on the same queue/vm amdgpu doesn't wait till all processing is finished
1681 * before starting the next cmdbuffer, so we need to do it here.
1682 */
1683 const bool need_wait = wait_count > 0;
1684 unsigned num_initial_preambles = 0;
1685 unsigned num_continue_preambles = 0;
1686 unsigned num_postambles = 0;
1687 struct radeon_cmdbuf *initial_preambles[5] = {0};
1688 struct radeon_cmdbuf *continue_preambles[5] = {0};
1689 struct radeon_cmdbuf *postambles[4] = {0};
1690
1691 if (queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE) {
1692 initial_preambles[num_initial_preambles++] =
1693 need_wait ? queue->state.initial_full_flush_preamble_cs : queue->state.initial_preamble_cs;
1694
1695 continue_preambles[num_continue_preambles++] = queue->state.continue_preamble_cs;
1696
1697 if (use_perf_counters) {
1698 /* RADV only supports perf counters on the GFX queue currently. */
1699 assert(queue->state.qf == RADV_QUEUE_GENERAL);
1700
1701 /* Create the lock/unlock CS. */
1702 struct radeon_cmdbuf *perf_ctr_lock_cs =
1703 radv_create_perf_counter_lock_cs(device, submission->perf_pass_index, false);
1704 struct radeon_cmdbuf *perf_ctr_unlock_cs =
1705 radv_create_perf_counter_lock_cs(device, submission->perf_pass_index, true);
1706
1707 if (!perf_ctr_lock_cs || !perf_ctr_unlock_cs) {
1708 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1709 goto fail;
1710 }
1711
1712 initial_preambles[num_initial_preambles++] = perf_ctr_lock_cs;
1713 continue_preambles[num_continue_preambles++] = perf_ctr_lock_cs;
1714 postambles[num_postambles++] = perf_ctr_unlock_cs;
1715 }
1716 }
1717
1718 if (queue->state.flush_postamble_cs) {
1719 postambles[num_postambles++] = queue->state.flush_postamble_cs;
1720 }
1721
1722 const unsigned num_1q_initial_preambles = num_initial_preambles;
1723 const unsigned num_1q_continue_preambles = num_continue_preambles;
1724 const unsigned num_1q_postambles = num_postambles;
1725
1726 if (use_ace) {
1727 initial_preambles[num_initial_preambles++] = queue->state.gang_wait_preamble_cs;
1728 initial_preambles[num_initial_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1729 initial_preambles[num_initial_preambles++] =
1730 need_wait ? queue->follower_state->initial_full_flush_preamble_cs : queue->follower_state->initial_preamble_cs;
1731
1732 continue_preambles[num_continue_preambles++] = queue->state.gang_wait_preamble_cs;
1733 continue_preambles[num_continue_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1734 continue_preambles[num_continue_preambles++] = queue->follower_state->continue_preamble_cs;
1735
1736 postambles[num_postambles++] = queue->follower_state->gang_wait_postamble_cs;
1737 postambles[num_postambles++] = queue->state.gang_wait_postamble_cs;
1738 }
1739
1740 struct radv_winsys_submit_info submit = {
1741 .ip_type = radv_queue_ring(queue),
1742 .queue_index = queue->vk.index_in_family,
1743 .cs_array = cs_array,
1744 .cs_count = 0,
1745 .initial_preamble_count = num_1q_initial_preambles,
1746 .continue_preamble_count = num_1q_continue_preambles,
1747 .postamble_count = num_1q_postambles,
1748 .initial_preamble_cs = initial_preambles,
1749 .continue_preamble_cs = continue_preambles,
1750 .postamble_cs = postambles,
1751 .uses_shadow_regs = queue->state.uses_shadow_regs,
1752 };
1753
1754 for (uint32_t j = 0, advance; j < cmd_buffer_count; j += advance) {
1755 advance = MIN2(max_cs_submission, cmd_buffer_count - j);
1756 const bool last_submit = j + advance == cmd_buffer_count;
1757 bool submit_ace = false;
1758 unsigned num_submitted_cs = 0;
1759
1760 if (radv_device_fault_detection_enabled(device))
1761 device->trace_data->primary_id = 0;
1762
1763 struct radeon_cmdbuf *chainable = NULL;
1764 struct radeon_cmdbuf *chainable_ace = NULL;
1765
1766 /* Add CS from submitted command buffers. */
1767 for (unsigned c = 0; c < advance; ++c) {
1768 struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j + c];
1769 assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1770 const bool can_chain_next = !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
1771
1772 /* Follower needs to be before the gang leader because the last CS must match the queue's IP type. */
1773 if (cmd_buffer->gang.cs) {
1774 device->ws->cs_unchain(cmd_buffer->gang.cs);
1775 if (!chainable_ace || !device->ws->cs_chain(chainable_ace, cmd_buffer->gang.cs, false)) {
1776 cs_array[num_submitted_cs++] = cmd_buffer->gang.cs;
1777
1778 /* Prevent chaining the gang leader when the follower couldn't be chained.
1779 * Otherwise, they would be in the wrong order.
1780 */
1781 chainable = NULL;
1782 }
1783
1784 chainable_ace = can_chain_next ? cmd_buffer->gang.cs : NULL;
1785 submit_ace = true;
1786 }
1787
1788 device->ws->cs_unchain(cmd_buffer->cs);
1789 if (!chainable || !device->ws->cs_chain(chainable, cmd_buffer->cs, queue->state.uses_shadow_regs)) {
1790 /* don't submit empty command buffers to the kernel. */
1791 if ((radv_queue_ring(queue) != AMD_IP_VCN_ENC && radv_queue_ring(queue) != AMD_IP_UVD) ||
1792 cmd_buffer->cs->cdw != 0)
1793 cs_array[num_submitted_cs++] = cmd_buffer->cs;
1794 }
1795
1796 chainable = can_chain_next ? cmd_buffer->cs : NULL;
1797 }
1798
1799 submit.cs_count = num_submitted_cs;
1800 submit.initial_preamble_count = submit_ace ? num_initial_preambles : num_1q_initial_preambles;
1801 submit.continue_preamble_count = submit_ace ? num_continue_preambles : num_1q_continue_preambles;
1802 submit.postamble_count = submit_ace ? num_postambles : num_1q_postambles;
1803
1804 result = device->ws->cs_submit(ctx, &submit, j == 0 ? wait_count : 0, waits,
1805 last_submit ? submission->signal_count : 0, submission->signals);
1806
1807 if (result != VK_SUCCESS)
1808 goto fail;
1809
1810 if (radv_device_fault_detection_enabled(device)) {
1811 result = radv_check_gpu_hangs(queue, &submit);
1812 }
1813
1814 if (device->tma_bo) {
1815 radv_check_trap_handler(queue);
1816 }
1817
1818 initial_preambles[0] = queue->state.initial_preamble_cs;
1819 initial_preambles[1] = !use_ace ? NULL : queue->follower_state->initial_preamble_cs;
1820 }
1821
1822 queue->last_shader_upload_seq = MAX2(queue->last_shader_upload_seq, shader_upload_seq);
1823
1824 radv_dump_printf_data(device, stdout);
1825
1826 fail:
1827 free(cs_array);
1828 if (waits != submission->waits)
1829 free(waits);
1830 if (radv_device_fault_detection_enabled(device))
1831 simple_mtx_unlock(&device->trace_mtx);
1832
1833 return result;
1834 }
1835
1836 static void
radv_report_gpuvm_fault(struct radv_device * device)1837 radv_report_gpuvm_fault(struct radv_device *device)
1838 {
1839 const struct radv_physical_device *pdev = radv_device_physical(device);
1840 struct radv_winsys_gpuvm_fault_info fault_info = {0};
1841
1842 if (!radv_vm_fault_occurred(device, &fault_info))
1843 return;
1844
1845 fprintf(stderr, "radv: GPUVM fault detected at address 0x%08" PRIx64 ".\n", fault_info.addr);
1846 ac_print_gpuvm_fault_status(stderr, pdev->info.gfx_level, fault_info.status);
1847 }
1848
1849 static VkResult
radv_queue_sparse_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1850 radv_queue_sparse_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1851 {
1852 struct radv_queue *queue = (struct radv_queue *)vqueue;
1853 struct radv_device *device = radv_queue_device(queue);
1854 VkResult result;
1855
1856 result = radv_queue_submit_bind_sparse_memory(device, submission);
1857 if (result != VK_SUCCESS)
1858 goto fail;
1859
1860 /* We do a CPU wait here, in part to avoid more winsys mechanisms. In the likely kernel explicit
1861 * sync mechanism, we'd need to do a CPU wait anyway. Haven't seen this be a perf issue yet, but
1862 * we have to make sure the queue always has its submission thread enabled. */
1863 result = vk_sync_wait_many(&device->vk, submission->wait_count, submission->waits, 0, UINT64_MAX);
1864 if (result != VK_SUCCESS)
1865 goto fail;
1866
1867 /* Ignore all the commandbuffers. They're necessarily empty anyway. */
1868
1869 for (unsigned i = 0; i < submission->signal_count; ++i) {
1870 result = vk_sync_signal(&device->vk, submission->signals[i].sync, submission->signals[i].signal_value);
1871 if (result != VK_SUCCESS)
1872 goto fail;
1873 }
1874
1875 fail:
1876 if (result != VK_SUCCESS) {
1877 /* When something bad happened during the submission, such as
1878 * an out of memory issue, it might be hard to recover from
1879 * this inconsistent state. To avoid this sort of problem, we
1880 * assume that we are in a really bad situation and return
1881 * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1882 * to submit the same job again to this device.
1883 */
1884 radv_report_gpuvm_fault(device);
1885 result = vk_device_set_lost(&device->vk, "vkQueueSubmit() failed");
1886 }
1887 return result;
1888 }
1889
1890 static VkResult
radv_queue_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1891 radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1892 {
1893 struct radv_queue *queue = (struct radv_queue *)vqueue;
1894 struct radv_device *device = radv_queue_device(queue);
1895 const struct radv_physical_device *pdev = radv_device_physical(device);
1896 VkResult result;
1897
1898 if (!radv_sparse_queue_enabled(pdev)) {
1899 result = radv_queue_submit_bind_sparse_memory(device, submission);
1900 if (result != VK_SUCCESS)
1901 goto fail;
1902 } else {
1903 assert(!submission->buffer_bind_count && !submission->image_bind_count && !submission->image_opaque_bind_count);
1904 }
1905
1906 if (!submission->command_buffer_count && !submission->wait_count && !submission->signal_count)
1907 return VK_SUCCESS;
1908
1909 if (!submission->command_buffer_count) {
1910 result = radv_queue_submit_empty(queue, submission);
1911 } else {
1912 result = radv_queue_submit_normal(queue, submission);
1913 }
1914
1915 fail:
1916 if (result != VK_SUCCESS) {
1917 /* When something bad happened during the submission, such as
1918 * an out of memory issue, it might be hard to recover from
1919 * this inconsistent state. To avoid this sort of problem, we
1920 * assume that we are in a really bad situation and return
1921 * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1922 * to submit the same job again to this device.
1923 */
1924 radv_report_gpuvm_fault(device);
1925 result = vk_device_set_lost(&device->vk, "vkQueueSubmit() failed");
1926 }
1927 return result;
1928 }
1929
1930 bool
radv_queue_internal_submit(struct radv_queue * queue,struct radeon_cmdbuf * cs)1931 radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs)
1932 {
1933 struct radv_device *device = radv_queue_device(queue);
1934 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1935 struct radv_winsys_submit_info submit = {
1936 .ip_type = radv_queue_ring(queue),
1937 .queue_index = queue->vk.index_in_family,
1938 .cs_array = &cs,
1939 .cs_count = 1,
1940 };
1941
1942 VkResult result = device->ws->cs_submit(ctx, &submit, 0, NULL, 0, NULL);
1943 if (result != VK_SUCCESS)
1944 return false;
1945
1946 return true;
1947 }
1948
1949 int
radv_queue_init(struct radv_device * device,struct radv_queue * queue,int idx,const VkDeviceQueueCreateInfo * create_info,const VkDeviceQueueGlobalPriorityCreateInfo * global_priority)1950 radv_queue_init(struct radv_device *device, struct radv_queue *queue, int idx,
1951 const VkDeviceQueueCreateInfo *create_info,
1952 const VkDeviceQueueGlobalPriorityCreateInfo *global_priority)
1953 {
1954 const struct radv_physical_device *pdev = radv_device_physical(device);
1955
1956 queue->priority = radv_get_queue_global_priority(global_priority);
1957 queue->hw_ctx = device->hw_ctx[queue->priority];
1958 queue->state.qf = vk_queue_to_radv(pdev, create_info->queueFamilyIndex);
1959
1960 VkResult result = vk_queue_init(&queue->vk, &device->vk, create_info, idx);
1961 if (result != VK_SUCCESS)
1962 return result;
1963
1964 queue->state.uses_shadow_regs = device->uses_shadow_regs && queue->state.qf == RADV_QUEUE_GENERAL;
1965 if (queue->state.uses_shadow_regs) {
1966 result = radv_create_shadow_regs_preamble(device, &queue->state);
1967 if (result != VK_SUCCESS)
1968 goto fail;
1969 result = radv_init_shadowed_regs_buffer_state(device, queue);
1970 if (result != VK_SUCCESS)
1971 goto fail;
1972 }
1973
1974 if (pdev->info.gfx_level == GFX6 &&
1975 (queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE)) {
1976 result = radv_create_flush_postamble(queue);
1977 if (result != VK_SUCCESS)
1978 goto fail;
1979 }
1980
1981 if (queue->state.qf == RADV_QUEUE_SPARSE) {
1982 queue->vk.driver_submit = radv_queue_sparse_submit;
1983 vk_queue_enable_submit_thread(&queue->vk);
1984 } else {
1985 queue->vk.driver_submit = radv_queue_submit;
1986 }
1987 return VK_SUCCESS;
1988 fail:
1989 vk_queue_finish(&queue->vk);
1990 return result;
1991 }
1992
1993 static void
radv_queue_state_finish(struct radv_queue_state * queue,struct radv_device * device)1994 radv_queue_state_finish(struct radv_queue_state *queue, struct radv_device *device)
1995 {
1996 radv_destroy_shadow_regs_preamble(device, queue, device->ws);
1997 if (queue->initial_full_flush_preamble_cs)
1998 device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1999 if (queue->initial_preamble_cs)
2000 device->ws->cs_destroy(queue->initial_preamble_cs);
2001 if (queue->continue_preamble_cs)
2002 device->ws->cs_destroy(queue->continue_preamble_cs);
2003 if (queue->gang_wait_preamble_cs)
2004 device->ws->cs_destroy(queue->gang_wait_preamble_cs);
2005 if (queue->gang_wait_postamble_cs)
2006 device->ws->cs_destroy(queue->gang_wait_postamble_cs);
2007 if (queue->flush_postamble_cs)
2008 device->ws->cs_destroy(queue->flush_postamble_cs);
2009 if (queue->descriptor_bo)
2010 radv_bo_destroy(device, NULL, queue->descriptor_bo);
2011 if (queue->scratch_bo) {
2012 radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
2013 radv_bo_destroy(device, NULL, queue->scratch_bo);
2014 }
2015 if (queue->esgs_ring_bo) {
2016 radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
2017 radv_bo_destroy(device, NULL, queue->esgs_ring_bo);
2018 }
2019 if (queue->gsvs_ring_bo) {
2020 radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
2021 radv_bo_destroy(device, NULL, queue->gsvs_ring_bo);
2022 }
2023 if (queue->tess_rings_bo) {
2024 radv_rmv_log_command_buffer_bo_destroy(device, queue->tess_rings_bo);
2025 radv_bo_destroy(device, NULL, queue->tess_rings_bo);
2026 }
2027 if (queue->task_rings_bo) {
2028 radv_rmv_log_command_buffer_bo_destroy(device, queue->task_rings_bo);
2029 radv_bo_destroy(device, NULL, queue->task_rings_bo);
2030 }
2031 if (queue->mesh_scratch_ring_bo) {
2032 radv_rmv_log_command_buffer_bo_destroy(device, queue->mesh_scratch_ring_bo);
2033 radv_bo_destroy(device, NULL, queue->mesh_scratch_ring_bo);
2034 }
2035 if (queue->ge_rings_bo) {
2036 radv_rmv_log_command_buffer_bo_destroy(device, queue->ge_rings_bo);
2037 radv_bo_destroy(device, NULL, queue->ge_rings_bo);
2038 }
2039 if (queue->gds_bo) {
2040 device->ws->buffer_make_resident(device->ws, queue->gds_bo, false);
2041 radv_bo_destroy(device, NULL, queue->gds_bo);
2042 }
2043 if (queue->gds_oa_bo) {
2044 device->ws->buffer_make_resident(device->ws, queue->gds_oa_bo, false);
2045 radv_bo_destroy(device, NULL, queue->gds_oa_bo);
2046 }
2047 if (queue->compute_scratch_bo) {
2048 radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
2049 radv_bo_destroy(device, NULL, queue->compute_scratch_bo);
2050 }
2051 }
2052
2053 void
radv_queue_finish(struct radv_queue * queue)2054 radv_queue_finish(struct radv_queue *queue)
2055 {
2056 struct radv_device *device = radv_queue_device(queue);
2057
2058 if (queue->follower_state) {
2059 /* Prevent double free */
2060 queue->follower_state->task_rings_bo = NULL;
2061
2062 /* Clean up the internal ACE queue state. */
2063 radv_queue_state_finish(queue->follower_state, device);
2064 free(queue->follower_state);
2065 }
2066
2067 if (queue->gang_sem_bo)
2068 radv_bo_destroy(device, &queue->vk.base, queue->gang_sem_bo);
2069
2070 radv_queue_state_finish(&queue->state, device);
2071 vk_queue_finish(&queue->vk);
2072 }
2073
2074 enum amd_ip_type
radv_queue_ring(const struct radv_queue * queue)2075 radv_queue_ring(const struct radv_queue *queue)
2076 {
2077 struct radv_device *device = radv_queue_device(queue);
2078 const struct radv_physical_device *pdev = radv_device_physical(device);
2079 return radv_queue_family_to_ring(pdev, queue->state.qf);
2080 }
2081
2082 enum amd_ip_type
radv_queue_family_to_ring(const struct radv_physical_device * pdev,enum radv_queue_family f)2083 radv_queue_family_to_ring(const struct radv_physical_device *pdev, enum radv_queue_family f)
2084 {
2085 switch (f) {
2086 case RADV_QUEUE_GENERAL:
2087 return AMD_IP_GFX;
2088 case RADV_QUEUE_COMPUTE:
2089 return AMD_IP_COMPUTE;
2090 case RADV_QUEUE_TRANSFER:
2091 return AMD_IP_SDMA;
2092 case RADV_QUEUE_VIDEO_DEC:
2093 return pdev->vid_decode_ip;
2094 case RADV_QUEUE_VIDEO_ENC:
2095 return AMD_IP_VCN_ENC;
2096 default:
2097 unreachable("Unknown queue family");
2098 }
2099 }
2100