• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * SPDX-License-Identifier: MIT
9  */
10 
11 #include "radv_queue.h"
12 #include "radv_buffer.h"
13 #include "radv_cp_reg_shadowing.h"
14 #include "radv_cs.h"
15 #include "radv_debug.h"
16 #include "radv_device_memory.h"
17 #include "radv_image.h"
18 #include "radv_printf.h"
19 #include "radv_rmv.h"
20 #include "vk_semaphore.h"
21 #include "vk_sync.h"
22 
23 #include "ac_cmdbuf.h"
24 #include "ac_debug.h"
25 #include "ac_descriptors.h"
26 
27 enum radeon_ctx_priority
radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfo * pObj)28 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfo *pObj)
29 {
30    /* Default to MEDIUM when a specific global priority isn't requested */
31    if (!pObj)
32       return RADEON_CTX_PRIORITY_MEDIUM;
33 
34    switch (pObj->globalPriority) {
35    case VK_QUEUE_GLOBAL_PRIORITY_REALTIME:
36       return RADEON_CTX_PRIORITY_REALTIME;
37    case VK_QUEUE_GLOBAL_PRIORITY_HIGH:
38       return RADEON_CTX_PRIORITY_HIGH;
39    case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM:
40       return RADEON_CTX_PRIORITY_MEDIUM;
41    case VK_QUEUE_GLOBAL_PRIORITY_LOW:
42       return RADEON_CTX_PRIORITY_LOW;
43    default:
44       unreachable("Illegal global priority value");
45       return RADEON_CTX_PRIORITY_INVALID;
46    }
47 }
48 
49 static VkResult
radv_sparse_buffer_bind_memory(struct radv_device * device,const VkSparseBufferMemoryBindInfo * bind)50 radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind)
51 {
52    VK_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
53    VkResult result = VK_SUCCESS;
54 
55    struct radv_device_memory *mem = NULL;
56    VkDeviceSize resourceOffset = 0;
57    VkDeviceSize size = 0;
58    VkDeviceSize memoryOffset = 0;
59    for (uint32_t i = 0; i < bind->bindCount; ++i) {
60       struct radv_device_memory *cur_mem = NULL;
61 
62       if (bind->pBinds[i].memory != VK_NULL_HANDLE)
63          cur_mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
64       if (i && mem == cur_mem) {
65          if (mem) {
66             if (bind->pBinds[i].resourceOffset == resourceOffset + size &&
67                 bind->pBinds[i].memoryOffset == memoryOffset + size) {
68                size += bind->pBinds[i].size;
69                continue;
70             }
71          } else {
72             if (bind->pBinds[i].resourceOffset == resourceOffset + size) {
73                size += bind->pBinds[i].size;
74                continue;
75             }
76          }
77       }
78       if (size) {
79          result = radv_bo_virtual_bind(device, &buffer->vk.base, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
80                                        memoryOffset);
81          if (result != VK_SUCCESS)
82             return result;
83       }
84       mem = cur_mem;
85       resourceOffset = bind->pBinds[i].resourceOffset;
86       size = bind->pBinds[i].size;
87       memoryOffset = bind->pBinds[i].memoryOffset;
88    }
89    if (size) {
90       result = radv_bo_virtual_bind(device, &buffer->vk.base, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
91                                     memoryOffset);
92    }
93 
94    return result;
95 }
96 
97 static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device * device,const VkSparseImageOpaqueMemoryBindInfo * bind)98 radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind)
99 {
100    VK_FROM_HANDLE(radv_image, image, bind->image);
101    VkResult result;
102 
103    for (uint32_t i = 0; i < bind->bindCount; ++i) {
104       struct radv_device_memory *mem = NULL;
105 
106       if (bind->pBinds[i].memory != VK_NULL_HANDLE)
107          mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
108 
109       result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, bind->pBinds[i].resourceOffset,
110                                     bind->pBinds[i].size, mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset);
111       if (result != VK_SUCCESS)
112          return result;
113    }
114 
115    return VK_SUCCESS;
116 }
117 
118 static VkResult
radv_sparse_image_bind_memory(struct radv_device * device,const VkSparseImageMemoryBindInfo * bind)119 radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMemoryBindInfo *bind)
120 {
121    VK_FROM_HANDLE(radv_image, image, bind->image);
122    const struct radv_physical_device *pdev = radv_device_physical(device);
123    struct radeon_surf *surface = &image->planes[0].surface;
124    uint32_t bs = vk_format_get_blocksize(image->vk.format);
125    VkResult result;
126 
127    for (uint32_t i = 0; i < bind->bindCount; ++i) {
128       struct radv_device_memory *mem = NULL;
129       uint64_t offset, depth_pitch;
130       uint32_t pitch;
131       uint64_t mem_offset = bind->pBinds[i].memoryOffset;
132       const uint32_t layer = bind->pBinds[i].subresource.arrayLayer;
133       const uint32_t level = bind->pBinds[i].subresource.mipLevel;
134 
135       VkExtent3D bind_extent = bind->pBinds[i].extent;
136       bind_extent.width = DIV_ROUND_UP(bind_extent.width, vk_format_get_blockwidth(image->vk.format));
137       bind_extent.height = DIV_ROUND_UP(bind_extent.height, vk_format_get_blockheight(image->vk.format));
138 
139       VkOffset3D bind_offset = bind->pBinds[i].offset;
140       bind_offset.x /= vk_format_get_blockwidth(image->vk.format);
141       bind_offset.y /= vk_format_get_blockheight(image->vk.format);
142 
143       if (bind->pBinds[i].memory != VK_NULL_HANDLE)
144          mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
145 
146       if (pdev->info.gfx_level >= GFX9) {
147          offset = surface->u.gfx9.surf_slice_size * layer + surface->u.gfx9.prt_level_offset[level];
148          pitch = surface->u.gfx9.prt_level_pitch[level];
149          depth_pitch = surface->u.gfx9.surf_slice_size;
150       } else {
151          depth_pitch = surface->u.legacy.level[level].slice_size_dw * 4;
152          offset = (uint64_t)surface->u.legacy.level[level].offset_256B * 256 + depth_pitch * layer;
153          pitch = surface->u.legacy.level[level].nblk_x;
154       }
155 
156       offset +=
157          bind_offset.z * depth_pitch + ((uint64_t)bind_offset.y * pitch * surface->prt_tile_depth +
158                                         (uint64_t)bind_offset.x * surface->prt_tile_height * surface->prt_tile_depth) *
159                                           bs;
160 
161       uint32_t aligned_extent_width = ALIGN(bind_extent.width, surface->prt_tile_width);
162       uint32_t aligned_extent_height = ALIGN(bind_extent.height, surface->prt_tile_height);
163       uint32_t aligned_extent_depth = ALIGN(bind_extent.depth, surface->prt_tile_depth);
164 
165       bool whole_subres = (bind_extent.height <= surface->prt_tile_height || aligned_extent_width == pitch) &&
166                           (bind_extent.depth <= surface->prt_tile_depth ||
167                            (uint64_t)aligned_extent_width * aligned_extent_height * bs == depth_pitch);
168 
169       if (whole_subres) {
170          uint64_t size = (uint64_t)aligned_extent_width * aligned_extent_height * aligned_extent_depth * bs;
171          result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, offset, size,
172                                        mem ? mem->bo : NULL, mem_offset);
173          if (result != VK_SUCCESS)
174             return result;
175       } else {
176          uint32_t img_y_increment = pitch * bs * surface->prt_tile_depth;
177          uint32_t mem_y_increment = aligned_extent_width * bs * surface->prt_tile_depth;
178          uint64_t mem_z_increment = (uint64_t)aligned_extent_width * aligned_extent_height * bs;
179          uint64_t size = mem_y_increment * surface->prt_tile_height;
180          for (unsigned z = 0; z < bind_extent.depth;
181               z += surface->prt_tile_depth, offset += depth_pitch * surface->prt_tile_depth) {
182             for (unsigned y = 0; y < bind_extent.height; y += surface->prt_tile_height) {
183                uint64_t bo_offset = offset + (uint64_t)img_y_increment * y;
184 
185                result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, bo_offset, size,
186                                              mem ? mem->bo : NULL,
187                                              mem_offset + (uint64_t)mem_y_increment * y + mem_z_increment * z);
188                if (result != VK_SUCCESS)
189                   return result;
190             }
191          }
192       }
193    }
194 
195    return VK_SUCCESS;
196 }
197 
198 static VkResult
radv_queue_submit_bind_sparse_memory(struct radv_device * device,struct vk_queue_submit * submission)199 radv_queue_submit_bind_sparse_memory(struct radv_device *device, struct vk_queue_submit *submission)
200 {
201    for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
202       VkResult result = radv_sparse_buffer_bind_memory(device, submission->buffer_binds + i);
203       if (result != VK_SUCCESS)
204          return result;
205    }
206 
207    for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
208       VkResult result = radv_sparse_image_opaque_bind_memory(device, submission->image_opaque_binds + i);
209       if (result != VK_SUCCESS)
210          return result;
211    }
212 
213    for (uint32_t i = 0; i < submission->image_bind_count; ++i) {
214       VkResult result = radv_sparse_image_bind_memory(device, submission->image_binds + i);
215       if (result != VK_SUCCESS)
216          return result;
217    }
218 
219    return VK_SUCCESS;
220 }
221 
222 static VkResult
radv_queue_submit_empty(struct radv_queue * queue,struct vk_queue_submit * submission)223 radv_queue_submit_empty(struct radv_queue *queue, struct vk_queue_submit *submission)
224 {
225    struct radv_device *device = radv_queue_device(queue);
226    struct radeon_winsys_ctx *ctx = queue->hw_ctx;
227    struct radv_winsys_submit_info submit = {
228       .ip_type = radv_queue_ring(queue),
229       .queue_index = queue->vk.index_in_family,
230    };
231 
232    return device->ws->cs_submit(ctx, &submit, submission->wait_count, submission->waits, submission->signal_count,
233                                 submission->signals);
234 }
235 
236 static void
radv_set_ring_buffer(const struct radv_physical_device * pdev,struct radeon_winsys_bo * bo,uint32_t offset,uint32_t ring_size,bool add_tid,bool swizzle_enable,bool oob_select_raw,uint32_t element_size,uint32_t index_stride,uint32_t desc[4])237 radv_set_ring_buffer(const struct radv_physical_device *pdev, struct radeon_winsys_bo *bo, uint32_t offset,
238                      uint32_t ring_size, bool add_tid, bool swizzle_enable, bool oob_select_raw, uint32_t element_size,
239                      uint32_t index_stride, uint32_t desc[4])
240 {
241    const uint8_t oob_select = oob_select_raw ? V_008F0C_OOB_SELECT_RAW : V_008F0C_OOB_SELECT_DISABLED;
242    const uint64_t va = radv_buffer_get_va(bo) + offset;
243    const struct ac_buffer_state ac_state = {
244       .va = va,
245       .size = ring_size,
246       .format = PIPE_FORMAT_R32_FLOAT,
247       .swizzle =
248          {
249             PIPE_SWIZZLE_X,
250             PIPE_SWIZZLE_Y,
251             PIPE_SWIZZLE_Z,
252             PIPE_SWIZZLE_W,
253          },
254       .swizzle_enable = swizzle_enable,
255       .element_size = element_size,
256       .index_stride = index_stride,
257       .add_tid = add_tid,
258       .gfx10_oob_select = oob_select,
259    };
260 
261    ac_build_buffer_descriptor(pdev->info.gfx_level, &ac_state, desc);
262 }
263 
264 static void
radv_fill_shader_rings(struct radv_device * device,uint32_t * desc,struct radeon_winsys_bo * scratch_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * esgs_ring_bo,uint32_t gsvs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,struct radeon_winsys_bo * tess_rings_bo,struct radeon_winsys_bo * task_rings_bo,struct radeon_winsys_bo * mesh_scratch_ring_bo,struct radeon_winsys_bo * ge_rings_bo)265 radv_fill_shader_rings(struct radv_device *device, uint32_t *desc, struct radeon_winsys_bo *scratch_bo,
266                        uint32_t esgs_ring_size, struct radeon_winsys_bo *esgs_ring_bo, uint32_t gsvs_ring_size,
267                        struct radeon_winsys_bo *gsvs_ring_bo, struct radeon_winsys_bo *tess_rings_bo,
268                        struct radeon_winsys_bo *task_rings_bo, struct radeon_winsys_bo *mesh_scratch_ring_bo,
269                        struct radeon_winsys_bo *ge_rings_bo)
270 {
271    const struct radv_physical_device *pdev = radv_device_physical(device);
272 
273    if (scratch_bo) {
274       uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
275       uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
276 
277       if (pdev->info.gfx_level >= GFX11)
278          rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
279       else
280          rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
281 
282       desc[0] = scratch_va;
283       desc[1] = rsrc1;
284    }
285 
286    desc += 4;
287 
288    if (esgs_ring_bo) {
289       /* stride 0, num records - size, add tid, swizzle, elsize4,
290          index stride 64 */
291       radv_set_ring_buffer(pdev, esgs_ring_bo, 0, esgs_ring_size, true, true, false, 1, 3, &desc[0]);
292 
293       /* GS entry for ES->GS ring */
294       /* stride 0, num records - size, elsize0,
295          index stride 0 */
296       radv_set_ring_buffer(pdev, esgs_ring_bo, 0, esgs_ring_size, false, false, false, 0, 0, &desc[4]);
297    }
298 
299    desc += 8;
300 
301    if (gsvs_ring_bo) {
302       /* VS entry for GS->VS ring */
303       /* stride 0, num records - size, elsize0,
304          index stride 0 */
305       radv_set_ring_buffer(pdev, gsvs_ring_bo, 0, gsvs_ring_size, false, false, false, 0, 0, &desc[0]);
306 
307       /* stride gsvs_itemsize, num records 64
308          elsize 4, index stride 16 */
309       /* shader will patch stride and desc[2] */
310       radv_set_ring_buffer(pdev, gsvs_ring_bo, 0, 0, true, true, false, 1, 1, &desc[4]);
311    }
312 
313    desc += 8;
314 
315    if (tess_rings_bo) {
316       radv_set_ring_buffer(pdev, tess_rings_bo, 0, pdev->hs.tess_factor_ring_size, false, false, true, 0, 0, &desc[0]);
317 
318       radv_set_ring_buffer(pdev, tess_rings_bo, pdev->hs.tess_offchip_ring_offset, pdev->hs.tess_offchip_ring_size,
319                            false, false, true, 0, 0, &desc[4]);
320    }
321 
322    desc += 8;
323 
324    if (task_rings_bo) {
325       radv_set_ring_buffer(pdev, task_rings_bo, pdev->task_info.draw_ring_offset,
326                            pdev->task_info.num_entries * AC_TASK_DRAW_ENTRY_BYTES, false, false, false, 0, 0, &desc[0]);
327 
328       radv_set_ring_buffer(pdev, task_rings_bo, pdev->task_info.payload_ring_offset,
329                            pdev->task_info.num_entries * AC_TASK_PAYLOAD_ENTRY_BYTES, false, false, false, 0, 0,
330                            &desc[4]);
331    }
332 
333    desc += 8;
334 
335    if (mesh_scratch_ring_bo) {
336       radv_set_ring_buffer(pdev, mesh_scratch_ring_bo, 0, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES,
337                            false, false, false, 0, 0, &desc[0]);
338    }
339 
340    desc += 4;
341 
342    if (ge_rings_bo) {
343       assert(pdev->info.gfx_level >= GFX11);
344 
345       ac_build_attr_ring_descriptor(pdev->info.gfx_level, radv_buffer_get_va(ge_rings_bo),
346                                     pdev->info.total_attribute_pos_prim_ring_size, 0, &desc[0]);
347    }
348 
349    desc += 4;
350 
351    /* add sample positions after all rings */
352    memcpy(desc, device->sample_locations_1x, 8);
353    desc += 2;
354    memcpy(desc, device->sample_locations_2x, 16);
355    desc += 4;
356    memcpy(desc, device->sample_locations_4x, 32);
357    desc += 8;
358    memcpy(desc, device->sample_locations_8x, 64);
359 }
360 
361 static void
radv_emit_gs_ring_sizes(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * esgs_ring_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,uint32_t gsvs_ring_size)362 radv_emit_gs_ring_sizes(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *esgs_ring_bo,
363                         uint32_t esgs_ring_size, struct radeon_winsys_bo *gsvs_ring_bo, uint32_t gsvs_ring_size)
364 {
365    const struct radv_physical_device *pdev = radv_device_physical(device);
366 
367    if (!esgs_ring_bo && !gsvs_ring_bo)
368       return;
369 
370    if (esgs_ring_bo)
371       radv_cs_add_buffer(device->ws, cs, esgs_ring_bo);
372 
373    if (gsvs_ring_bo)
374       radv_cs_add_buffer(device->ws, cs, gsvs_ring_bo);
375 
376    if (pdev->info.gfx_level >= GFX7) {
377       radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
378       radeon_emit(cs, esgs_ring_size >> 8);
379       radeon_emit(cs, gsvs_ring_size >> 8);
380    } else {
381       radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
382       radeon_emit(cs, esgs_ring_size >> 8);
383       radeon_emit(cs, gsvs_ring_size >> 8);
384    }
385 }
386 
387 static void
radv_emit_tess_factor_ring(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * tess_rings_bo)388 radv_emit_tess_factor_ring(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *tess_rings_bo)
389 {
390    const struct radv_physical_device *pdev = radv_device_physical(device);
391    uint64_t tf_va;
392    uint32_t tf_ring_size;
393    if (!tess_rings_bo)
394       return;
395 
396    tf_ring_size = pdev->hs.tess_factor_ring_size / 4;
397    tf_va = radv_buffer_get_va(tess_rings_bo);
398 
399    radv_cs_add_buffer(device->ws, cs, tess_rings_bo);
400 
401    if (pdev->info.gfx_level >= GFX7) {
402       if (pdev->info.gfx_level >= GFX11) {
403          /* TF_RING_SIZE is per SE on GFX11. */
404          tf_ring_size /= pdev->info.max_se;
405       }
406 
407       radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE, S_030938_SIZE(tf_ring_size));
408       radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE, tf_va >> 8);
409 
410       if (pdev->info.gfx_level >= GFX12) {
411          radeon_set_uconfig_reg(cs, R_03099C_VGT_TF_MEMORY_BASE_HI, S_03099C_BASE_HI(tf_va >> 40));
412       } else if (pdev->info.gfx_level >= GFX10) {
413          radeon_set_uconfig_reg(cs, R_030984_VGT_TF_MEMORY_BASE_HI, S_030984_BASE_HI(tf_va >> 40));
414       } else if (pdev->info.gfx_level == GFX9) {
415          radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(tf_va >> 40));
416       }
417 
418       radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, pdev->hs.hs_offchip_param);
419    } else {
420       radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE, S_008988_SIZE(tf_ring_size));
421       radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE, tf_va >> 8);
422       radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM, pdev->hs.hs_offchip_param);
423    }
424 }
425 
426 static VkResult
radv_initialise_task_control_buffer(struct radv_device * device,struct radeon_winsys_bo * task_rings_bo)427 radv_initialise_task_control_buffer(struct radv_device *device, struct radeon_winsys_bo *task_rings_bo)
428 {
429    const struct radv_physical_device *pdev = radv_device_physical(device);
430    uint32_t *ptr = (uint32_t *)radv_buffer_map(device->ws, task_rings_bo);
431    if (!ptr)
432       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
433 
434    const uint32_t num_entries = pdev->task_info.num_entries;
435    const uint64_t task_va = radv_buffer_get_va(task_rings_bo);
436    const uint64_t task_draw_ring_va = task_va + pdev->task_info.draw_ring_offset;
437    assert((task_draw_ring_va & 0xFFFFFF00) == (task_draw_ring_va & 0xFFFFFFFF));
438 
439    /* 64-bit write_ptr */
440    ptr[0] = num_entries;
441    ptr[1] = 0;
442    /* 64-bit read_ptr */
443    ptr[2] = num_entries;
444    ptr[3] = 0;
445    /* 64-bit dealloc_ptr */
446    ptr[4] = num_entries;
447    ptr[5] = 0;
448    /* num_entries */
449    ptr[6] = num_entries;
450    /* 64-bit draw ring address */
451    ptr[7] = task_draw_ring_va;
452    ptr[8] = task_draw_ring_va >> 32;
453 
454    device->ws->buffer_unmap(device->ws, task_rings_bo, false);
455    return VK_SUCCESS;
456 }
457 
458 static void
radv_emit_task_rings(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * task_rings_bo,bool compute)459 radv_emit_task_rings(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *task_rings_bo,
460                      bool compute)
461 {
462    if (!task_rings_bo)
463       return;
464 
465    const uint64_t task_ctrlbuf_va = radv_buffer_get_va(task_rings_bo);
466    assert(util_is_aligned(task_ctrlbuf_va, 256));
467    radv_cs_add_buffer(device->ws, cs, task_rings_bo);
468 
469    /* Tell the GPU where the task control buffer is. */
470    radeon_emit(cs, PKT3(PKT3_DISPATCH_TASK_STATE_INIT, 1, 0) | PKT3_SHADER_TYPE_S(!!compute));
471    /* bits [31:8]: control buffer address lo, bits[7:0]: reserved (set to zero) */
472    radeon_emit(cs, task_ctrlbuf_va & 0xFFFFFF00);
473    /* bits [31:0]: control buffer address hi */
474    radeon_emit(cs, task_ctrlbuf_va >> 32);
475 }
476 
477 static void
radv_emit_graphics_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * scratch_bo)478 radv_emit_graphics_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
479                            struct radeon_winsys_bo *scratch_bo)
480 {
481    const struct radv_physical_device *pdev = radv_device_physical(device);
482    const struct radeon_info *gpu_info = &pdev->info;
483 
484    if (!scratch_bo)
485       return;
486 
487    radv_cs_add_buffer(device->ws, cs, scratch_bo);
488 
489    if (gpu_info->gfx_level >= GFX11) {
490       uint64_t va = radv_buffer_get_va(scratch_bo);
491 
492       /* WAVES is per SE for SPI_TMPRING_SIZE. */
493       waves /= gpu_info->max_se;
494 
495       radeon_set_context_reg_seq(cs, R_0286E8_SPI_TMPRING_SIZE, 3);
496       radeon_emit(cs, S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 256)));
497       radeon_emit(cs, va >> 8);  /* SPI_GFX_SCRATCH_BASE_LO */
498       radeon_emit(cs, va >> 40); /* SPI_GFX_SCRATCH_BASE_HI */
499    } else {
500       radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
501                              S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 1024)));
502    }
503 }
504 
505 static void
radv_emit_compute_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * compute_scratch_bo)506 radv_emit_compute_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
507                           struct radeon_winsys_bo *compute_scratch_bo)
508 {
509    const struct radv_physical_device *pdev = radv_device_physical(device);
510    const struct radeon_info *gpu_info = &pdev->info;
511    uint64_t scratch_va;
512    uint32_t rsrc1;
513 
514    if (!compute_scratch_bo)
515       return;
516 
517    scratch_va = radv_buffer_get_va(compute_scratch_bo);
518    rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
519 
520    if (gpu_info->gfx_level >= GFX11)
521       rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
522    else
523       rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
524 
525    radv_cs_add_buffer(device->ws, cs, compute_scratch_bo);
526 
527    if (gpu_info->gfx_level >= GFX11) {
528       radeon_set_sh_reg_seq(cs, R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO, 2);
529       radeon_emit(cs, scratch_va >> 8);
530       radeon_emit(cs, scratch_va >> 40);
531 
532       waves /= gpu_info->max_se;
533    }
534 
535    radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
536    radeon_emit(cs, scratch_va);
537    radeon_emit(cs, rsrc1);
538 
539    radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
540                      S_00B860_WAVES(waves) |
541                         S_00B860_WAVESIZE(DIV_ROUND_UP(size_per_wave, gpu_info->gfx_level >= GFX11 ? 256 : 1024)));
542 }
543 
544 static void
radv_emit_compute_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)545 radv_emit_compute_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
546                                   struct radeon_winsys_bo *descriptor_bo)
547 {
548    if (!descriptor_bo)
549       return;
550 
551    uint64_t va = radv_buffer_get_va(descriptor_bo);
552    radv_cs_add_buffer(device->ws, cs, descriptor_bo);
553 
554    /* Compute shader user data 0-1 have the scratch pointer (unlike GFX shaders),
555     * so emit the descriptor pointer to user data 2-3 instead (task_ring_offsets arg).
556     */
557    radv_emit_shader_pointer(device, cs, R_00B908_COMPUTE_USER_DATA_2, va, true);
558 }
559 
560 static void
radv_emit_graphics_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)561 radv_emit_graphics_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
562                                    struct radeon_winsys_bo *descriptor_bo)
563 {
564    const struct radv_physical_device *pdev = radv_device_physical(device);
565    uint64_t va;
566 
567    if (!descriptor_bo)
568       return;
569 
570    va = radv_buffer_get_va(descriptor_bo);
571 
572    radv_cs_add_buffer(device->ws, cs, descriptor_bo);
573 
574    if (pdev->info.gfx_level >= GFX12) {
575       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B410_SPI_SHADER_PGM_LO_HS,
576                          R_00B210_SPI_SHADER_PGM_LO_GS};
577 
578       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
579          radv_emit_shader_pointer(device, cs, regs[i], va, true);
580       }
581    } else if (pdev->info.gfx_level >= GFX11) {
582       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B420_SPI_SHADER_PGM_LO_HS,
583                          R_00B220_SPI_SHADER_PGM_LO_GS};
584 
585       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
586          radv_emit_shader_pointer(device, cs, regs[i], va, true);
587       }
588    } else if (pdev->info.gfx_level >= GFX10) {
589       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
590                          R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
591 
592       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
593          radv_emit_shader_pointer(device, cs, regs[i], va, true);
594       }
595    } else if (pdev->info.gfx_level == GFX9) {
596       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
597                          R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
598 
599       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
600          radv_emit_shader_pointer(device, cs, regs[i], va, true);
601       }
602    } else {
603       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
604                          R_00B230_SPI_SHADER_USER_DATA_GS_0, R_00B330_SPI_SHADER_USER_DATA_ES_0,
605                          R_00B430_SPI_SHADER_USER_DATA_HS_0, R_00B530_SPI_SHADER_USER_DATA_LS_0};
606 
607       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
608          radv_emit_shader_pointer(device, cs, regs[i], va, true);
609       }
610    }
611 }
612 
613 static void
radv_emit_ge_rings(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * ge_rings_bo)614 radv_emit_ge_rings(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *ge_rings_bo)
615 {
616    const struct radv_physical_device *pdev = radv_device_physical(device);
617    uint64_t va;
618 
619    if (!ge_rings_bo)
620       return;
621 
622    assert(pdev->info.gfx_level >= GFX11);
623 
624    va = radv_buffer_get_va(ge_rings_bo);
625    assert((va >> 32) == pdev->info.address32_hi);
626 
627    radv_cs_add_buffer(device->ws, cs, ge_rings_bo);
628 
629    /* We must wait for idle using an EOP event before changing the attribute ring registers. Use the
630     * bottom-of-pipe EOP event, but increment the PWS counter instead of writing memory.
631     */
632    radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
633    radeon_emit(cs, S_490_EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | S_490_EVENT_INDEX(5) | S_490_PWS_ENABLE(1));
634    radeon_emit(cs, 0); /* DST_SEL, INT_SEL, DATA_SEL */
635    radeon_emit(cs, 0); /* ADDRESS_LO */
636    radeon_emit(cs, 0); /* ADDRESS_HI */
637    radeon_emit(cs, 0); /* DATA_LO */
638    radeon_emit(cs, 0); /* DATA_HI */
639    radeon_emit(cs, 0); /* INT_CTXID */
640 
641    /* Wait for the PWS counter. */
642    radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
643    radeon_emit(cs, S_580_PWS_STAGE_SEL(V_580_CP_ME) | S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) | S_580_PWS_ENA2(1) |
644                       S_580_PWS_COUNT(0));
645    radeon_emit(cs, 0xffffffff); /* GCR_SIZE */
646    radeon_emit(cs, 0x01ffffff); /* GCR_SIZE_HI */
647    radeon_emit(cs, 0);          /* GCR_BASE_LO */
648    radeon_emit(cs, 0);          /* GCR_BASE_HI */
649    radeon_emit(cs, S_585_PWS_ENA(1));
650    radeon_emit(cs, 0); /* GCR_CNTL */
651 
652    /* The PS will read inputs from this address. */
653    radeon_set_uconfig_reg_seq(cs, R_031110_SPI_GS_THROTTLE_CNTL1, 4);
654    radeon_emit(cs, 0x12355123); /* SPI_GS_THROTTLE_CNTL1 */
655    radeon_emit(cs, 0x1544D);    /* SPI_GS_THROTTLE_CNTL2 */
656    radeon_emit(cs, va >> 16);   /* SPI_ATTRIBUTE_RING_BASE */
657    radeon_emit(cs, S_03111C_MEM_SIZE((pdev->info.attribute_ring_size_per_se >> 16) - 1) |
658                       S_03111C_BIG_PAGE(pdev->info.discardable_allows_big_page) |
659                       S_03111C_L1_POLICY(1)); /* SPI_ATTRIBUTE_RING_SIZE */
660 
661    if (pdev->info.gfx_level >= GFX12) {
662       const uint64_t pos_address = va + pdev->info.pos_ring_offset;
663       const uint64_t prim_address = va + pdev->info.prim_ring_offset;
664 
665       /* When one of these 4 registers is updated, all 4 must be updated. */
666       radeon_set_uconfig_reg_seq(cs, R_0309A0_GE_POS_RING_BASE, 4);
667       radeon_emit(cs, pos_address >> 16);                                          /* R_0309A0_GE_POS_RING_BASE */
668       radeon_emit(cs, S_0309A4_MEM_SIZE(pdev->info.pos_ring_size_per_se >> 5));    /* R_0309A4_GE_POS_RING_SIZE */
669       radeon_emit(cs, prim_address >> 16);                                         /* R_0309A8_GE_PRIM_RING_BASE */
670       radeon_emit(cs, S_0309AC_MEM_SIZE(pdev->info.prim_ring_size_per_se >> 5) | S_0309AC_SCOPE(gfx12_scope_device) |
671                          S_0309AC_PAF_TEMPORAL(gfx12_store_high_temporal_stay_dirty) |
672                          S_0309AC_PAB_TEMPORAL(gfx12_load_last_use_discard) |
673                          S_0309AC_SPEC_DATA_READ(gfx12_spec_read_auto) | S_0309AC_FORCE_SE_SCOPE(1) |
674                          S_0309AC_PAB_NOFILL(1)); /* R_0309AC_GE_PRIM_RING_SIZE */
675    }
676 }
677 
678 static void
radv_emit_compute(struct radv_device * device,struct radeon_cmdbuf * cs,bool is_compute_queue)679 radv_emit_compute(struct radv_device *device, struct radeon_cmdbuf *cs, bool is_compute_queue)
680 {
681    const struct radv_physical_device *pdev = radv_device_physical(device);
682    const uint64_t border_color_va = device->border_color_data.bo ? radv_buffer_get_va(device->border_color_data.bo) : 0;
683 
684    struct ac_pm4_state *pm4 = ac_pm4_create_sized(&pdev->info, false, 64, is_compute_queue);
685    if (!pm4)
686       return;
687 
688    const struct ac_preamble_state preamble_state = {
689       .border_color_va = border_color_va,
690       .gfx11 =
691          {
692             .compute_dispatch_interleave = 64,
693          },
694    };
695 
696    ac_init_compute_preamble_state(&preamble_state, pm4);
697 
698    ac_pm4_set_reg(pm4, R_00B810_COMPUTE_START_X, 0);
699    ac_pm4_set_reg(pm4, R_00B814_COMPUTE_START_Y, 0);
700    ac_pm4_set_reg(pm4, R_00B818_COMPUTE_START_Z, 0);
701 
702    if (pdev->info.gfx_level == GFX8 && device->tma_bo) {
703       uint64_t tba_va, tma_va;
704 
705       tba_va = radv_shader_get_va(device->trap_handler_shader);
706       tma_va = radv_buffer_get_va(device->tma_bo);
707 
708       ac_pm4_set_reg(pm4, R_00B838_COMPUTE_TBA_LO, tba_va >> 8);
709       ac_pm4_set_reg(pm4, R_00B83C_COMPUTE_TBA_HI, tba_va >> 40);
710       ac_pm4_set_reg(pm4, R_00B840_COMPUTE_TMA_LO, tma_va >> 8);
711       ac_pm4_set_reg(pm4, R_00B844_COMPUTE_TMA_HI, tma_va >> 40);
712    }
713 
714    if (pdev->info.gfx_level >= GFX12)
715       ac_pm4_set_reg(pm4, R_00B8BC_COMPUTE_DISPATCH_INTERLEAVE,
716                      S_00B8BC_INTERLEAVE_1D(preamble_state.gfx11.compute_dispatch_interleave));
717 
718    ac_pm4_finalize(pm4);
719 
720    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
721 
722    ac_pm4_free_state(pm4);
723 }
724 
725 /* 12.4 fixed-point */
726 static unsigned
radv_pack_float_12p4(float x)727 radv_pack_float_12p4(float x)
728 {
729    return x <= 0 ? 0 : x >= 4096 ? 0xffff : x * 16;
730 }
731 
732 void
radv_emit_graphics(struct radv_device * device,struct radeon_cmdbuf * cs)733 radv_emit_graphics(struct radv_device *device, struct radeon_cmdbuf *cs)
734 {
735    struct radv_physical_device *pdev = radv_device_physical(device);
736    const uint64_t border_color_va = device->border_color_data.bo ? radv_buffer_get_va(device->border_color_data.bo) : 0;
737    bool has_clear_state = pdev->info.has_clear_state;
738    int i;
739 
740    if (!device->uses_shadow_regs) {
741       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
742       radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
743       radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
744 
745       if (has_clear_state) {
746          radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
747          radeon_emit(cs, 0);
748       }
749    }
750 
751    struct ac_pm4_state *pm4 = ac_pm4_create_sized(&pdev->info, false, 512, false);
752    if (!pm4)
753       return;
754 
755    const struct ac_preamble_state preamble_state = {
756       .border_color_va = border_color_va,
757    };
758 
759    ac_init_graphics_preamble_state(&preamble_state, pm4);
760 
761    if (!has_clear_state) {
762       for (i = 0; i < 16; i++) {
763          radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i * 8, 0);
764          radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i * 8, fui(1.0));
765       }
766    }
767 
768    if (!has_clear_state) {
769       radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
770       /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
771       radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
772    }
773 
774    if (pdev->info.gfx_level <= GFX8)
775       radeon_set_sh_reg(cs, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(pdev->info.address32_hi >> 8));
776 
777    if (pdev->info.gfx_level < GFX11)
778       radeon_set_sh_reg(cs, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(pdev->info.address32_hi >> 8));
779 
780    unsigned cu_mask_ps = pdev->info.gfx_level >= GFX10_3 ? ac_gfx103_get_cu_mask_ps(&pdev->info) : ~0u;
781 
782    if (pdev->info.gfx_level >= GFX12) {
783       radeon_set_sh_reg(cs, R_00B420_SPI_SHADER_PGM_RSRC4_HS,
784                         S_00B420_WAVE_LIMIT(0x3ff) | S_00B420_GLG_FORCE_DISABLE(1));
785       radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC4_PS,
786                         S_00B01C_WAVE_LIMIT_GFX12(0x3FF) | S_00B01C_LDS_GROUP_SIZE_GFX12(1));
787    } else if (pdev->info.gfx_level >= GFX11) {
788       radeon_set_sh_reg_idx(&pdev->info, cs, R_00B404_SPI_SHADER_PGM_RSRC4_HS, 3,
789                             ac_apply_cu_en(S_00B404_CU_EN(0xffff), C_00B404_CU_EN, 16, &pdev->info));
790       radeon_set_sh_reg_idx(&pdev->info, cs, R_00B004_SPI_SHADER_PGM_RSRC4_PS, 3,
791                             ac_apply_cu_en(S_00B004_CU_EN(cu_mask_ps >> 16), C_00B004_CU_EN, 16, &pdev->info));
792    }
793 
794    if (pdev->info.gfx_level >= GFX10) {
795       /* Vulkan doesn't support user edge flags and it also doesn't
796        * need to prevent drawing lines on internal edges of
797        * decomposed primitives (such as quads) with polygon mode = lines.
798        */
799       unsigned vertex_reuse_depth = pdev->info.gfx_level >= GFX10_3 ? 30 : 0;
800       radeon_set_context_reg(cs, R_028838_PA_CL_NGG_CNTL,
801                              S_028838_INDEX_BUF_EDGE_FLAG_ENA(0) | S_028838_VERTEX_REUSE_DEPTH(vertex_reuse_depth));
802 
803       if (pdev->info.gfx_level >= GFX10_3) {
804          /* This allows sample shading. */
805          radeon_set_context_reg(cs, R_028848_PA_CL_VRS_CNTL,
806                                 S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE));
807       }
808    }
809 
810    if (pdev->info.gfx_level >= GFX8) {
811       /* GFX8+ only compares the bits according to the index type by default,
812        * so we can always leave the programmed value at the maximum.
813        */
814       radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0xffffffff);
815    }
816 
817    unsigned tmp = (unsigned)(1.0 * 8.0);
818    radeon_set_context_reg(cs, R_028A00_PA_SU_POINT_SIZE, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
819    radeon_set_context_reg(
820       cs, R_028A04_PA_SU_POINT_MINMAX,
821       S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | S_028A04_MAX_SIZE(radv_pack_float_12p4(8191.875 / 2)));
822 
823    /* Enable the Polaris small primitive filter control.
824     * XXX: There is possibly an issue when MSAA is off (see RadeonSI
825     * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
826     * and AMDVLK doesn't have a workaround as well.
827     */
828    if (pdev->info.family >= CHIP_POLARIS10) {
829       unsigned small_prim_filter_cntl = S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
830                                         /* Workaround for a hw line bug. */
831                                         S_028830_LINE_FILTER_DISABLE(pdev->info.family <= CHIP_POLARIS12);
832 
833       radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL, small_prim_filter_cntl);
834    }
835 
836    if (pdev->info.gfx_level >= GFX12) {
837       radeon_set_context_reg(cs, R_028644_SPI_INTERP_CONTROL_0,
838                              S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
839                                 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
840                                 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
841                                 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
842                                 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
843                                 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
844    } else {
845       radeon_set_context_reg(cs, R_0286D4_SPI_INTERP_CONTROL_0,
846                              S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
847                                 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
848                                 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
849                                 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
850                                 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
851                                 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
852    }
853 
854    radeon_set_context_reg(cs, R_028BE4_PA_SU_VTX_CNTL,
855                           S_028BE4_PIX_CENTER(1) | S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
856                              S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
857 
858    if (pdev->info.gfx_level >= GFX12) {
859       radeon_set_context_reg(cs, R_028814_PA_CL_VTE_CNTL,
860                              S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
861                                 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
862                                 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
863    } else {
864       radeon_set_context_reg(cs, R_028818_PA_CL_VTE_CNTL,
865                              S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
866                                 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
867                                 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
868    }
869 
870    if (pdev->info.gfx_level == GFX8 && device->tma_bo) {
871       uint64_t tba_va, tma_va;
872 
873       tba_va = radv_shader_get_va(device->trap_handler_shader);
874       tma_va = radv_buffer_get_va(device->tma_bo);
875 
876       uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS, R_00B100_SPI_SHADER_TBA_LO_VS, R_00B200_SPI_SHADER_TBA_LO_GS,
877                          R_00B300_SPI_SHADER_TBA_LO_ES, R_00B400_SPI_SHADER_TBA_LO_HS, R_00B500_SPI_SHADER_TBA_LO_LS};
878 
879       for (i = 0; i < ARRAY_SIZE(regs); ++i) {
880          radeon_set_sh_reg_seq(cs, regs[i], 4);
881          radeon_emit(cs, tba_va >> 8);
882          radeon_emit(cs, tba_va >> 40);
883          radeon_emit(cs, tma_va >> 8);
884          radeon_emit(cs, tma_va >> 40);
885       }
886    }
887 
888    radeon_set_context_reg(cs, R_028828_PA_SU_LINE_STIPPLE_SCALE, 0x3f800000);
889 
890    if (pdev->info.gfx_level >= GFX12) {
891       radeon_set_context_reg(cs, R_028000_DB_RENDER_CONTROL, 0);
892    }
893 
894    ac_pm4_finalize(pm4);
895    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
896    ac_pm4_free_state(pm4);
897 
898    radv_emit_compute(device, cs, false);
899 }
900 
901 static void
radv_init_graphics_state(struct radeon_cmdbuf * cs,struct radv_device * device)902 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_device *device)
903 {
904    if (device->gfx_init) {
905       struct radeon_winsys *ws = device->ws;
906 
907       ws->cs_execute_ib(cs, device->gfx_init, 0, device->gfx_init_size_dw & 0xffff, false);
908 
909       radv_cs_add_buffer(device->ws, cs, device->gfx_init);
910    } else {
911       radv_emit_graphics(device, cs);
912    }
913 }
914 
915 static VkResult
radv_update_preamble_cs(struct radv_queue_state * queue,struct radv_device * device,const struct radv_queue_ring_info * needs)916 radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *device,
917                         const struct radv_queue_ring_info *needs)
918 {
919    const struct radv_physical_device *pdev = radv_device_physical(device);
920    struct radeon_winsys *ws = device->ws;
921    struct radeon_winsys_bo *scratch_bo = queue->scratch_bo;
922    struct radeon_winsys_bo *descriptor_bo = queue->descriptor_bo;
923    struct radeon_winsys_bo *compute_scratch_bo = queue->compute_scratch_bo;
924    struct radeon_winsys_bo *esgs_ring_bo = queue->esgs_ring_bo;
925    struct radeon_winsys_bo *gsvs_ring_bo = queue->gsvs_ring_bo;
926    struct radeon_winsys_bo *tess_rings_bo = queue->tess_rings_bo;
927    struct radeon_winsys_bo *task_rings_bo = queue->task_rings_bo;
928    struct radeon_winsys_bo *mesh_scratch_ring_bo = queue->mesh_scratch_ring_bo;
929    struct radeon_winsys_bo *ge_rings_bo = queue->ge_rings_bo;
930    struct radeon_winsys_bo *gds_bo = queue->gds_bo;
931    struct radeon_winsys_bo *gds_oa_bo = queue->gds_oa_bo;
932    struct radeon_cmdbuf *dest_cs[3] = {0};
933    const uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
934    VkResult result = VK_SUCCESS;
935 
936    const bool add_sample_positions = !queue->ring_info.sample_positions && needs->sample_positions;
937    const uint32_t scratch_size = needs->scratch_size_per_wave * needs->scratch_waves;
938    const uint32_t queue_scratch_size = queue->ring_info.scratch_size_per_wave * queue->ring_info.scratch_waves;
939 
940    if (scratch_size > queue_scratch_size) {
941       result = radv_bo_create(device, NULL, scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
942                               RADV_BO_PRIORITY_SCRATCH, 0, true, &scratch_bo);
943       if (result != VK_SUCCESS)
944          goto fail;
945       radv_rmv_log_command_buffer_bo_create(device, scratch_bo, 0, 0, scratch_size);
946    }
947 
948    const uint32_t compute_scratch_size = needs->compute_scratch_size_per_wave * needs->compute_scratch_waves;
949    const uint32_t compute_queue_scratch_size =
950       queue->ring_info.compute_scratch_size_per_wave * queue->ring_info.compute_scratch_waves;
951    if (compute_scratch_size > compute_queue_scratch_size) {
952       result = radv_bo_create(device, NULL, compute_scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
953                               RADV_BO_PRIORITY_SCRATCH, 0, true, &compute_scratch_bo);
954       if (result != VK_SUCCESS)
955          goto fail;
956       radv_rmv_log_command_buffer_bo_create(device, compute_scratch_bo, 0, 0, compute_scratch_size);
957    }
958 
959    if (needs->esgs_ring_size > queue->ring_info.esgs_ring_size) {
960       result = radv_bo_create(device, NULL, needs->esgs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
961                               RADV_BO_PRIORITY_SCRATCH, 0, true, &esgs_ring_bo);
962       if (result != VK_SUCCESS)
963          goto fail;
964       radv_rmv_log_command_buffer_bo_create(device, esgs_ring_bo, 0, 0, needs->esgs_ring_size);
965    }
966 
967    if (needs->gsvs_ring_size > queue->ring_info.gsvs_ring_size) {
968       result = radv_bo_create(device, NULL, needs->gsvs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
969                               RADV_BO_PRIORITY_SCRATCH, 0, true, &gsvs_ring_bo);
970       if (result != VK_SUCCESS)
971          goto fail;
972       radv_rmv_log_command_buffer_bo_create(device, gsvs_ring_bo, 0, 0, needs->gsvs_ring_size);
973    }
974 
975    if (!queue->ring_info.tess_rings && needs->tess_rings) {
976       uint64_t tess_rings_size = pdev->hs.tess_offchip_ring_offset + pdev->hs.tess_offchip_ring_size;
977       result = radv_bo_create(device, NULL, tess_rings_size, 256, RADEON_DOMAIN_VRAM, ring_bo_flags,
978                               RADV_BO_PRIORITY_SCRATCH, 0, true, &tess_rings_bo);
979       if (result != VK_SUCCESS)
980          goto fail;
981       radv_rmv_log_command_buffer_bo_create(device, tess_rings_bo, 0, 0, tess_rings_size);
982    }
983 
984    if (!queue->ring_info.task_rings && needs->task_rings) {
985       assert(pdev->info.gfx_level >= GFX10_3);
986 
987       /* We write the control buffer from the CPU, so need to grant CPU access to the BO.
988        * The draw ring needs to be zero-initialized otherwise the ready bits will be incorrect.
989        */
990       uint32_t task_rings_bo_flags =
991          RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM;
992 
993       result = radv_bo_create(device, NULL, pdev->task_info.bo_size_bytes, 256, RADEON_DOMAIN_VRAM, task_rings_bo_flags,
994                               RADV_BO_PRIORITY_SCRATCH, 0, true, &task_rings_bo);
995       if (result != VK_SUCCESS)
996          goto fail;
997       radv_rmv_log_command_buffer_bo_create(device, task_rings_bo, 0, 0, pdev->task_info.bo_size_bytes);
998 
999       result = radv_initialise_task_control_buffer(device, task_rings_bo);
1000       if (result != VK_SUCCESS)
1001          goto fail;
1002    }
1003 
1004    if (!queue->ring_info.mesh_scratch_ring && needs->mesh_scratch_ring) {
1005       assert(pdev->info.gfx_level >= GFX10_3);
1006       result =
1007          radv_bo_create(device, NULL, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES, 256,
1008                         RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true, &mesh_scratch_ring_bo);
1009 
1010       if (result != VK_SUCCESS)
1011          goto fail;
1012       radv_rmv_log_command_buffer_bo_create(device, mesh_scratch_ring_bo, 0, 0,
1013                                             RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES);
1014    }
1015 
1016    if (!queue->ring_info.ge_rings && needs->ge_rings) {
1017       assert(pdev->info.gfx_level >= GFX11);
1018       result = radv_bo_create(device, NULL, pdev->info.total_attribute_pos_prim_ring_size, 2 * 1024 * 1024 /* 2MiB */,
1019                               RADEON_DOMAIN_VRAM, RADEON_FLAG_32BIT | RADEON_FLAG_DISCARDABLE | ring_bo_flags,
1020                               RADV_BO_PRIORITY_SCRATCH, 0, true, &ge_rings_bo);
1021       if (result != VK_SUCCESS)
1022          goto fail;
1023       radv_rmv_log_command_buffer_bo_create(device, ge_rings_bo, 0, 0, pdev->info.total_attribute_pos_prim_ring_size);
1024    }
1025 
1026    if (!queue->ring_info.gds && needs->gds) {
1027       assert(pdev->info.gfx_level >= GFX10 && pdev->info.gfx_level < GFX12);
1028 
1029       /* 4 streamout GDS counters.
1030        * We need 256B (64 dw) of GDS, otherwise streamout hangs.
1031        */
1032       result = radv_bo_create(device, NULL, 256, 4, RADEON_DOMAIN_GDS, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true,
1033                               &gds_bo);
1034       if (result != VK_SUCCESS)
1035          goto fail;
1036 
1037       /* Add the GDS BO to our global BO list to prevent the kernel to emit a GDS switch and reset
1038        * the state when a compute queue is used.
1039        */
1040       result = device->ws->buffer_make_resident(ws, gds_bo, true);
1041       if (result != VK_SUCCESS)
1042          goto fail;
1043    }
1044 
1045    if (!queue->ring_info.gds_oa && needs->gds_oa) {
1046       assert(pdev->info.gfx_level >= GFX10 && pdev->info.gfx_level < GFX12);
1047 
1048       result = radv_bo_create(device, NULL, 1, 1, RADEON_DOMAIN_OA, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true,
1049                               &gds_oa_bo);
1050       if (result != VK_SUCCESS)
1051          goto fail;
1052 
1053       /* Add the GDS OA BO to our global BO list to prevent the kernel to emit a GDS switch and
1054        * reset the state when a compute queue is used.
1055        */
1056       result = device->ws->buffer_make_resident(ws, gds_oa_bo, true);
1057       if (result != VK_SUCCESS)
1058          goto fail;
1059    }
1060 
1061    /* Re-initialize the descriptor BO when any ring BOs changed.
1062     *
1063     * Additionally, make sure to create the descriptor BO for the compute queue
1064     * when it uses the task shader rings. The task rings BO is shared between the
1065     * GFX and compute queues and already initialized here.
1066     */
1067    if ((queue->qf == RADV_QUEUE_COMPUTE && !descriptor_bo && task_rings_bo) || scratch_bo != queue->scratch_bo ||
1068        esgs_ring_bo != queue->esgs_ring_bo || gsvs_ring_bo != queue->gsvs_ring_bo ||
1069        tess_rings_bo != queue->tess_rings_bo || task_rings_bo != queue->task_rings_bo ||
1070        mesh_scratch_ring_bo != queue->mesh_scratch_ring_bo || ge_rings_bo != queue->ge_rings_bo ||
1071        add_sample_positions) {
1072       const uint32_t size = 304;
1073 
1074       result = radv_bo_create(device, NULL, size, 4096, RADEON_DOMAIN_VRAM,
1075                               RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
1076                               RADV_BO_PRIORITY_DESCRIPTOR, 0, true, &descriptor_bo);
1077       if (result != VK_SUCCESS)
1078          goto fail;
1079    }
1080 
1081    if (descriptor_bo != queue->descriptor_bo) {
1082       uint32_t *map = (uint32_t *)radv_buffer_map(ws, descriptor_bo);
1083       if (!map) {
1084          result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1085          goto fail;
1086       }
1087 
1088       radv_fill_shader_rings(device, map, scratch_bo, needs->esgs_ring_size, esgs_ring_bo, needs->gsvs_ring_size,
1089                              gsvs_ring_bo, tess_rings_bo, task_rings_bo, mesh_scratch_ring_bo, ge_rings_bo);
1090 
1091       ws->buffer_unmap(ws, descriptor_bo, false);
1092    }
1093 
1094    for (int i = 0; i < 3; ++i) {
1095       enum rgp_flush_bits sqtt_flush_bits = 0;
1096       struct radeon_cmdbuf *cs = NULL;
1097       cs = ws->cs_create(ws, radv_queue_family_to_ring(pdev, queue->qf), false);
1098       if (!cs) {
1099          result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1100          goto fail;
1101       }
1102 
1103       radeon_check_space(ws, cs, 512);
1104       dest_cs[i] = cs;
1105 
1106       if (scratch_bo)
1107          radv_cs_add_buffer(ws, cs, scratch_bo);
1108 
1109       /* Emit initial configuration. */
1110       switch (queue->qf) {
1111       case RADV_QUEUE_GENERAL:
1112          if (queue->uses_shadow_regs)
1113             radv_emit_shadow_regs_preamble(cs, device, queue);
1114          radv_init_graphics_state(cs, device);
1115 
1116          if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo || task_rings_bo) {
1117             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1118             radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1119 
1120             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1121             radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1122          }
1123 
1124          radv_emit_gs_ring_sizes(device, cs, esgs_ring_bo, needs->esgs_ring_size, gsvs_ring_bo, needs->gsvs_ring_size);
1125          radv_emit_tess_factor_ring(device, cs, tess_rings_bo);
1126          radv_emit_task_rings(device, cs, task_rings_bo, false);
1127          radv_emit_ge_rings(device, cs, ge_rings_bo);
1128          radv_emit_graphics_shader_pointers(device, cs, descriptor_bo);
1129          radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1130                                    compute_scratch_bo);
1131          radv_emit_graphics_scratch(device, cs, needs->scratch_size_per_wave, needs->scratch_waves, scratch_bo);
1132          break;
1133       case RADV_QUEUE_COMPUTE:
1134          radv_emit_compute(device, cs, true);
1135 
1136          if (task_rings_bo) {
1137             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1138             radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1139          }
1140 
1141          radv_emit_task_rings(device, cs, task_rings_bo, true);
1142          radv_emit_compute_shader_pointers(device, cs, descriptor_bo);
1143          radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1144                                    compute_scratch_bo);
1145          break;
1146       default:
1147          break;
1148       }
1149 
1150       if (i < 2) {
1151          /* The two initial preambles have a cache flush at the beginning. */
1152          const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1153          enum radv_cmd_flush_bits flush_bits = RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE |
1154                                                RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_L2 |
1155                                                RADV_CMD_FLAG_START_PIPELINE_STATS;
1156 
1157          if (i == 0) {
1158             /* The full flush preamble should also wait for previous shader work to finish. */
1159             flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1160             if (queue->qf == RADV_QUEUE_GENERAL)
1161                flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1162          }
1163 
1164          radv_cs_emit_cache_flush(ws, cs, gfx_level, NULL, 0, queue->qf, flush_bits, &sqtt_flush_bits, 0);
1165       }
1166 
1167       result = ws->cs_finalize(cs);
1168       if (result != VK_SUCCESS)
1169          goto fail;
1170    }
1171 
1172    if (queue->initial_full_flush_preamble_cs)
1173       ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1174 
1175    if (queue->initial_preamble_cs)
1176       ws->cs_destroy(queue->initial_preamble_cs);
1177 
1178    if (queue->continue_preamble_cs)
1179       ws->cs_destroy(queue->continue_preamble_cs);
1180 
1181    queue->initial_full_flush_preamble_cs = dest_cs[0];
1182    queue->initial_preamble_cs = dest_cs[1];
1183    queue->continue_preamble_cs = dest_cs[2];
1184 
1185    if (scratch_bo != queue->scratch_bo) {
1186       if (queue->scratch_bo) {
1187          radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
1188          radv_bo_destroy(device, NULL, queue->scratch_bo);
1189       }
1190       queue->scratch_bo = scratch_bo;
1191    }
1192 
1193    if (compute_scratch_bo != queue->compute_scratch_bo) {
1194       if (queue->compute_scratch_bo) {
1195          radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
1196          radv_bo_destroy(device, NULL, queue->compute_scratch_bo);
1197       }
1198       queue->compute_scratch_bo = compute_scratch_bo;
1199    }
1200 
1201    if (esgs_ring_bo != queue->esgs_ring_bo) {
1202       if (queue->esgs_ring_bo) {
1203          radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
1204          radv_bo_destroy(device, NULL, queue->esgs_ring_bo);
1205       }
1206       queue->esgs_ring_bo = esgs_ring_bo;
1207    }
1208 
1209    if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1210       if (queue->gsvs_ring_bo) {
1211          radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
1212          radv_bo_destroy(device, NULL, queue->gsvs_ring_bo);
1213       }
1214       queue->gsvs_ring_bo = gsvs_ring_bo;
1215    }
1216 
1217    if (descriptor_bo != queue->descriptor_bo) {
1218       if (queue->descriptor_bo)
1219          radv_bo_destroy(device, NULL, queue->descriptor_bo);
1220       queue->descriptor_bo = descriptor_bo;
1221    }
1222 
1223    queue->tess_rings_bo = tess_rings_bo;
1224    queue->task_rings_bo = task_rings_bo;
1225    queue->mesh_scratch_ring_bo = mesh_scratch_ring_bo;
1226    queue->ge_rings_bo = ge_rings_bo;
1227    queue->gds_bo = gds_bo;
1228    queue->gds_oa_bo = gds_oa_bo;
1229    queue->ring_info = *needs;
1230    return VK_SUCCESS;
1231 fail:
1232    for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1233       if (dest_cs[i])
1234          ws->cs_destroy(dest_cs[i]);
1235    if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1236       radv_bo_destroy(device, NULL, descriptor_bo);
1237    if (scratch_bo && scratch_bo != queue->scratch_bo)
1238       radv_bo_destroy(device, NULL, scratch_bo);
1239    if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1240       radv_bo_destroy(device, NULL, compute_scratch_bo);
1241    if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1242       radv_bo_destroy(device, NULL, esgs_ring_bo);
1243    if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1244       radv_bo_destroy(device, NULL, gsvs_ring_bo);
1245    if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
1246       radv_bo_destroy(device, NULL, tess_rings_bo);
1247    if (task_rings_bo && task_rings_bo != queue->task_rings_bo)
1248       radv_bo_destroy(device, NULL, task_rings_bo);
1249    if (ge_rings_bo && ge_rings_bo != queue->ge_rings_bo)
1250       radv_bo_destroy(device, NULL, ge_rings_bo);
1251    if (gds_bo && gds_bo != queue->gds_bo) {
1252       ws->buffer_make_resident(ws, queue->gds_bo, false);
1253       radv_bo_destroy(device, NULL, gds_bo);
1254    }
1255    if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo) {
1256       ws->buffer_make_resident(ws, queue->gds_oa_bo, false);
1257       radv_bo_destroy(device, NULL, gds_oa_bo);
1258    }
1259 
1260    return vk_error(queue, result);
1261 }
1262 
1263 static VkResult
radv_update_preambles(struct radv_queue_state * queue,struct radv_device * device,struct vk_command_buffer * const * cmd_buffers,uint32_t cmd_buffer_count,bool * use_perf_counters,bool * has_follower)1264 radv_update_preambles(struct radv_queue_state *queue, struct radv_device *device,
1265                       struct vk_command_buffer *const *cmd_buffers, uint32_t cmd_buffer_count, bool *use_perf_counters,
1266                       bool *has_follower)
1267 {
1268    const struct radv_physical_device *pdev = radv_device_physical(device);
1269 
1270    if (queue->qf != RADV_QUEUE_GENERAL && queue->qf != RADV_QUEUE_COMPUTE) {
1271       for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1272          struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1273 
1274          *has_follower |= !!cmd_buffer->gang.cs;
1275       }
1276 
1277       return VK_SUCCESS;
1278    }
1279 
1280    /* Figure out the needs of the current submission.
1281     * Start by copying the queue's current info.
1282     * This is done because we only allow two possible behaviours for these buffers:
1283     * - Grow when the newly needed amount is larger than what we had
1284     * - Allocate the max size and reuse it, but don't free it until the queue is destroyed
1285     */
1286    struct radv_queue_ring_info needs = queue->ring_info;
1287    *use_perf_counters = false;
1288    *has_follower = false;
1289 
1290    for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1291       struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1292 
1293       needs.scratch_size_per_wave = MAX2(needs.scratch_size_per_wave, cmd_buffer->scratch_size_per_wave_needed);
1294       needs.scratch_waves = MAX2(needs.scratch_waves, cmd_buffer->scratch_waves_wanted);
1295       needs.compute_scratch_size_per_wave =
1296          MAX2(needs.compute_scratch_size_per_wave, cmd_buffer->compute_scratch_size_per_wave_needed);
1297       needs.compute_scratch_waves = MAX2(needs.compute_scratch_waves, cmd_buffer->compute_scratch_waves_wanted);
1298       needs.esgs_ring_size = MAX2(needs.esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1299       needs.gsvs_ring_size = MAX2(needs.gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1300       needs.tess_rings |= cmd_buffer->tess_rings_needed;
1301       needs.task_rings |= cmd_buffer->task_rings_needed;
1302       needs.mesh_scratch_ring |= cmd_buffer->mesh_scratch_ring_needed;
1303       needs.gds |= cmd_buffer->gds_needed;
1304       needs.gds_oa |= cmd_buffer->gds_oa_needed;
1305       needs.sample_positions |= cmd_buffer->sample_positions_needed;
1306       *use_perf_counters |= cmd_buffer->state.uses_perf_counters;
1307       *has_follower |= !!cmd_buffer->gang.cs;
1308    }
1309 
1310    /* Sanitize scratch size information. */
1311    needs.scratch_waves =
1312       needs.scratch_size_per_wave ? MIN2(needs.scratch_waves, UINT32_MAX / needs.scratch_size_per_wave) : 0;
1313    needs.compute_scratch_waves =
1314       needs.compute_scratch_size_per_wave
1315          ? MIN2(needs.compute_scratch_waves, UINT32_MAX / needs.compute_scratch_size_per_wave)
1316          : 0;
1317 
1318    if (pdev->info.gfx_level >= GFX11 && queue->qf == RADV_QUEUE_GENERAL) {
1319       needs.ge_rings = true;
1320    }
1321 
1322    /* Return early if we already match these needs.
1323     * Note that it's not possible for any of the needed values to be less
1324     * than what the queue already had, because we only ever increase the allocated size.
1325     */
1326    if (queue->initial_full_flush_preamble_cs && queue->ring_info.scratch_size_per_wave == needs.scratch_size_per_wave &&
1327        queue->ring_info.scratch_waves == needs.scratch_waves &&
1328        queue->ring_info.compute_scratch_size_per_wave == needs.compute_scratch_size_per_wave &&
1329        queue->ring_info.compute_scratch_waves == needs.compute_scratch_waves &&
1330        queue->ring_info.esgs_ring_size == needs.esgs_ring_size &&
1331        queue->ring_info.gsvs_ring_size == needs.gsvs_ring_size && queue->ring_info.tess_rings == needs.tess_rings &&
1332        queue->ring_info.task_rings == needs.task_rings &&
1333        queue->ring_info.mesh_scratch_ring == needs.mesh_scratch_ring && queue->ring_info.ge_rings == needs.ge_rings &&
1334        queue->ring_info.gds == needs.gds && queue->ring_info.gds_oa == needs.gds_oa &&
1335        queue->ring_info.sample_positions == needs.sample_positions)
1336       return VK_SUCCESS;
1337 
1338    return radv_update_preamble_cs(queue, device, &needs);
1339 }
1340 
1341 /* Creates a postamble CS that executes cache flush commands
1342  * that we can use at the end of each submission.
1343  *
1344  * GFX6: The kernel flushes L2 before shaders are finished.
1345  *       Therefore we need to wait for idle at the end of each submission.
1346  */
1347 static VkResult
radv_create_flush_postamble(struct radv_queue * queue)1348 radv_create_flush_postamble(struct radv_queue *queue)
1349 {
1350    const struct radv_device *device = radv_queue_device(queue);
1351    const struct radv_physical_device *pdev = radv_device_physical(device);
1352    const enum amd_ip_type ip = radv_queue_family_to_ring(pdev, queue->state.qf);
1353    struct radeon_winsys *ws = device->ws;
1354 
1355    struct radeon_cmdbuf *cs = ws->cs_create(ws, ip, false);
1356    if (!cs)
1357       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1358 
1359    radeon_check_space(ws, cs, 256);
1360 
1361    const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1362    enum radv_cmd_flush_bits flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
1363 
1364    if (ip == AMD_IP_GFX)
1365       flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1366 
1367    enum rgp_flush_bits sqtt_flush_bits = 0;
1368    radv_cs_emit_cache_flush(ws, cs, gfx_level, NULL, 0, queue->state.qf, flush_bits, &sqtt_flush_bits, 0);
1369 
1370    VkResult r = ws->cs_finalize(cs);
1371    if (r != VK_SUCCESS) {
1372       ws->cs_destroy(cs);
1373       return r;
1374    }
1375 
1376    queue->state.flush_postamble_cs = cs;
1377    return VK_SUCCESS;
1378 }
1379 
1380 static VkResult
radv_create_gang_wait_preambles_postambles(struct radv_queue * queue)1381 radv_create_gang_wait_preambles_postambles(struct radv_queue *queue)
1382 {
1383    struct radv_device *device = radv_queue_device(queue);
1384    const struct radv_physical_device *pdev = radv_device_physical(device);
1385 
1386    if (queue->gang_sem_bo)
1387       return VK_SUCCESS;
1388 
1389    VkResult r = VK_SUCCESS;
1390    struct radeon_winsys *ws = device->ws;
1391    const enum amd_ip_type leader_ip = radv_queue_family_to_ring(pdev, queue->state.qf);
1392    struct radeon_winsys_bo *gang_sem_bo = NULL;
1393 
1394    /* Gang semaphores BO.
1395     * DWORD 0: used in preambles, gang leader writes, gang members wait.
1396     * DWORD 1: used in postambles, gang leader waits, gang members write.
1397     */
1398    r = radv_bo_create(device, NULL, 8, 4, RADEON_DOMAIN_VRAM,
1399                       RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM, RADV_BO_PRIORITY_SCRATCH, 0, true,
1400                       &gang_sem_bo);
1401    if (r != VK_SUCCESS)
1402       return r;
1403 
1404    struct radeon_cmdbuf *leader_pre_cs = ws->cs_create(ws, leader_ip, false);
1405    struct radeon_cmdbuf *leader_post_cs = ws->cs_create(ws, leader_ip, false);
1406    struct radeon_cmdbuf *ace_pre_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1407    struct radeon_cmdbuf *ace_post_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1408 
1409    if (!leader_pre_cs || !leader_post_cs || !ace_pre_cs || !ace_post_cs) {
1410       r = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1411       goto fail;
1412    }
1413 
1414    radeon_check_space(ws, leader_pre_cs, 256);
1415    radeon_check_space(ws, leader_post_cs, 256);
1416    radeon_check_space(ws, ace_pre_cs, 256);
1417    radeon_check_space(ws, ace_post_cs, 256);
1418 
1419    radv_cs_add_buffer(ws, leader_pre_cs, gang_sem_bo);
1420    radv_cs_add_buffer(ws, leader_post_cs, gang_sem_bo);
1421    radv_cs_add_buffer(ws, ace_pre_cs, gang_sem_bo);
1422    radv_cs_add_buffer(ws, ace_post_cs, gang_sem_bo);
1423 
1424    const uint64_t ace_wait_va = radv_buffer_get_va(gang_sem_bo);
1425    const uint64_t leader_wait_va = ace_wait_va + 4;
1426    const uint32_t zero = 0;
1427    const uint32_t one = 1;
1428 
1429    /* Preambles for gang submission.
1430     * Make gang members wait until the gang leader starts.
1431     * Userspace is required to emit this wait to make sure it behaves correctly
1432     * in a multi-process environment, because task shader dispatches are not
1433     * meant to be executed on multiple compute engines at the same time.
1434     */
1435    radv_cp_wait_mem(ace_pre_cs, RADV_QUEUE_COMPUTE, WAIT_REG_MEM_GREATER_OR_EQUAL, ace_wait_va, 1, 0xffffffff);
1436    radv_cs_write_data(device, ace_pre_cs, RADV_QUEUE_COMPUTE, V_370_ME, ace_wait_va, 1, &zero, false);
1437    radv_cs_write_data(device, leader_pre_cs, queue->state.qf, V_370_ME, ace_wait_va, 1, &one, false);
1438 
1439    /* Create postambles for gang submission.
1440     * This ensures that the gang leader waits for the whole gang,
1441     * which is necessary because the kernel signals the userspace fence
1442     * as soon as the gang leader is done, which may lead to bugs because the
1443     * same command buffers could be submitted again while still being executed.
1444     */
1445    radv_cp_wait_mem(leader_post_cs, queue->state.qf, WAIT_REG_MEM_GREATER_OR_EQUAL, leader_wait_va, 1, 0xffffffff);
1446    radv_cs_write_data(device, leader_post_cs, queue->state.qf, V_370_ME, leader_wait_va, 1, &zero, false);
1447    radv_cs_emit_write_event_eop(ace_post_cs, pdev->info.gfx_level, RADV_QUEUE_COMPUTE, V_028A90_BOTTOM_OF_PIPE_TS, 0,
1448                                 EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, leader_wait_va, 1, 0);
1449 
1450    r = ws->cs_finalize(leader_pre_cs);
1451    if (r != VK_SUCCESS)
1452       goto fail;
1453    r = ws->cs_finalize(leader_post_cs);
1454    if (r != VK_SUCCESS)
1455       goto fail;
1456    r = ws->cs_finalize(ace_pre_cs);
1457    if (r != VK_SUCCESS)
1458       goto fail;
1459    r = ws->cs_finalize(ace_post_cs);
1460    if (r != VK_SUCCESS)
1461       goto fail;
1462 
1463    queue->gang_sem_bo = gang_sem_bo;
1464    queue->state.gang_wait_preamble_cs = leader_pre_cs;
1465    queue->state.gang_wait_postamble_cs = leader_post_cs;
1466    queue->follower_state->gang_wait_preamble_cs = ace_pre_cs;
1467    queue->follower_state->gang_wait_postamble_cs = ace_post_cs;
1468 
1469    return VK_SUCCESS;
1470 
1471 fail:
1472    if (leader_pre_cs)
1473       ws->cs_destroy(leader_pre_cs);
1474    if (leader_post_cs)
1475       ws->cs_destroy(leader_post_cs);
1476    if (ace_pre_cs)
1477       ws->cs_destroy(ace_pre_cs);
1478    if (ace_post_cs)
1479       ws->cs_destroy(ace_post_cs);
1480    if (gang_sem_bo)
1481       radv_bo_destroy(device, &queue->vk.base, gang_sem_bo);
1482 
1483    return r;
1484 }
1485 
1486 static bool
radv_queue_init_follower_state(struct radv_queue * queue)1487 radv_queue_init_follower_state(struct radv_queue *queue)
1488 {
1489    if (queue->follower_state)
1490       return true;
1491 
1492    queue->follower_state = calloc(1, sizeof(struct radv_queue_state));
1493    if (!queue->follower_state)
1494       return false;
1495 
1496    queue->follower_state->qf = RADV_QUEUE_COMPUTE;
1497    return true;
1498 }
1499 
1500 static VkResult
radv_update_gang_preambles(struct radv_queue * queue)1501 radv_update_gang_preambles(struct radv_queue *queue)
1502 {
1503    struct radv_device *device = radv_queue_device(queue);
1504 
1505    if (!radv_queue_init_follower_state(queue))
1506       return VK_ERROR_OUT_OF_HOST_MEMORY;
1507 
1508    VkResult r = VK_SUCCESS;
1509 
1510    /* Copy task rings state.
1511     * Task shaders that are submitted on the ACE queue need to share
1512     * their ring buffers with the mesh shaders on the GFX queue.
1513     */
1514    queue->follower_state->ring_info.task_rings = queue->state.ring_info.task_rings;
1515    queue->follower_state->task_rings_bo = queue->state.task_rings_bo;
1516 
1517    /* Copy some needed states from the parent queue state.
1518     * These can only increase so it's okay to copy them as-is without checking.
1519     * Note, task shaders use the scratch size from their graphics pipeline.
1520     */
1521    struct radv_queue_ring_info needs = queue->follower_state->ring_info;
1522    needs.compute_scratch_size_per_wave = queue->state.ring_info.scratch_size_per_wave;
1523    needs.compute_scratch_waves = queue->state.ring_info.scratch_waves;
1524    needs.task_rings = queue->state.ring_info.task_rings;
1525 
1526    r = radv_update_preamble_cs(queue->follower_state, device, &needs);
1527    if (r != VK_SUCCESS)
1528       return r;
1529 
1530    r = radv_create_gang_wait_preambles_postambles(queue);
1531    if (r != VK_SUCCESS)
1532       return r;
1533 
1534    return VK_SUCCESS;
1535 }
1536 
1537 static struct radeon_cmdbuf *
radv_create_perf_counter_lock_cs(struct radv_device * device,unsigned pass,bool unlock)1538 radv_create_perf_counter_lock_cs(struct radv_device *device, unsigned pass, bool unlock)
1539 {
1540    struct radeon_cmdbuf **cs_ref = &device->perf_counter_lock_cs[pass * 2 + (unlock ? 1 : 0)];
1541    struct radeon_cmdbuf *cs;
1542 
1543    if (*cs_ref)
1544       return *cs_ref;
1545 
1546    cs = device->ws->cs_create(device->ws, AMD_IP_GFX, false);
1547    if (!cs)
1548       return NULL;
1549 
1550    ASSERTED unsigned cdw = radeon_check_space(device->ws, cs, 21);
1551 
1552    radv_cs_add_buffer(device->ws, cs, device->perf_counter_bo);
1553 
1554    if (!unlock) {
1555       uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1556       radeon_emit(cs, PKT3(PKT3_ATOMIC_MEM, 7, 0));
1557       radeon_emit(cs, ATOMIC_OP(TC_OP_ATOMIC_CMPSWAP_32) | ATOMIC_COMMAND(ATOMIC_COMMAND_LOOP));
1558       radeon_emit(cs, mutex_va);       /* addr lo */
1559       radeon_emit(cs, mutex_va >> 32); /* addr hi */
1560       radeon_emit(cs, 1);              /* data lo */
1561       radeon_emit(cs, 0);              /* data hi */
1562       radeon_emit(cs, 0);              /* compare data lo */
1563       radeon_emit(cs, 0);              /* compare data hi */
1564       radeon_emit(cs, 10);             /* loop interval */
1565    }
1566 
1567    uint64_t va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_PASS_OFFSET;
1568    uint64_t unset_va = va + (unlock ? 8 * pass : 0);
1569    uint64_t set_va = va + (unlock ? 0 : 8 * pass);
1570 
1571    radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1572    radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1573                       COPY_DATA_WR_CONFIRM);
1574    radeon_emit(cs, 0); /* immediate */
1575    radeon_emit(cs, 0);
1576    radeon_emit(cs, unset_va);
1577    radeon_emit(cs, unset_va >> 32);
1578 
1579    radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1580    radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1581                       COPY_DATA_WR_CONFIRM);
1582    radeon_emit(cs, 1); /* immediate */
1583    radeon_emit(cs, 0);
1584    radeon_emit(cs, set_va);
1585    radeon_emit(cs, set_va >> 32);
1586 
1587    if (unlock) {
1588       uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1589 
1590       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1591       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1592                          COPY_DATA_WR_CONFIRM);
1593       radeon_emit(cs, 0); /* immediate */
1594       radeon_emit(cs, 0);
1595       radeon_emit(cs, mutex_va);
1596       radeon_emit(cs, mutex_va >> 32);
1597    }
1598 
1599    assert(cs->cdw <= cdw);
1600 
1601    VkResult result = device->ws->cs_finalize(cs);
1602    if (result != VK_SUCCESS) {
1603       device->ws->cs_destroy(cs);
1604       return NULL;
1605    }
1606 
1607    /* All the casts are to avoid MSVC errors around pointer truncation in a non-taken
1608     * alternative.
1609     */
1610    if (p_atomic_cmpxchg((uintptr_t *)cs_ref, 0, (uintptr_t)cs) != 0) {
1611       device->ws->cs_destroy(cs);
1612    }
1613 
1614    return *cs_ref;
1615 }
1616 
1617 static void
radv_get_shader_upload_sync_wait(struct radv_device * device,uint64_t shader_upload_seq,struct vk_sync_wait * out_sync_wait)1618 radv_get_shader_upload_sync_wait(struct radv_device *device, uint64_t shader_upload_seq,
1619                                  struct vk_sync_wait *out_sync_wait)
1620 {
1621    struct vk_semaphore *semaphore = vk_semaphore_from_handle(device->shader_upload_sem);
1622    struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
1623    *out_sync_wait = (struct vk_sync_wait){
1624       .sync = sync,
1625       .wait_value = shader_upload_seq,
1626       .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
1627    };
1628 }
1629 
1630 static VkResult
radv_queue_submit_normal(struct radv_queue * queue,struct vk_queue_submit * submission)1631 radv_queue_submit_normal(struct radv_queue *queue, struct vk_queue_submit *submission)
1632 {
1633    struct radv_device *device = radv_queue_device(queue);
1634    struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1635    bool use_ace = false;
1636    bool use_perf_counters = false;
1637    VkResult result;
1638    uint64_t shader_upload_seq = 0;
1639    uint32_t wait_count = submission->wait_count;
1640    struct vk_sync_wait *waits = submission->waits;
1641 
1642    result = radv_update_preambles(&queue->state, device, submission->command_buffers, submission->command_buffer_count,
1643                                   &use_perf_counters, &use_ace);
1644    if (result != VK_SUCCESS)
1645       return result;
1646 
1647    if (use_ace) {
1648       result = radv_update_gang_preambles(queue);
1649       if (result != VK_SUCCESS)
1650          return result;
1651    }
1652 
1653    const unsigned cmd_buffer_count = submission->command_buffer_count;
1654    const unsigned max_cs_submission = radv_device_fault_detection_enabled(device) ? 1 : cmd_buffer_count;
1655    const unsigned cs_array_size = (use_ace ? 2 : 1) * MIN2(max_cs_submission, cmd_buffer_count);
1656 
1657    struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) * cs_array_size);
1658    if (!cs_array)
1659       return VK_ERROR_OUT_OF_HOST_MEMORY;
1660 
1661    if (radv_device_fault_detection_enabled(device))
1662       simple_mtx_lock(&device->trace_mtx);
1663 
1664    for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
1665       struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j];
1666       shader_upload_seq = MAX2(shader_upload_seq, cmd_buffer->shader_upload_seq);
1667    }
1668 
1669    if (shader_upload_seq > queue->last_shader_upload_seq) {
1670       /* Patch the wait array to add waiting for referenced shaders to upload. */
1671       struct vk_sync_wait *new_waits = malloc(sizeof(struct vk_sync_wait) * (wait_count + 1));
1672       if (!new_waits) {
1673          result = VK_ERROR_OUT_OF_HOST_MEMORY;
1674          goto fail;
1675       }
1676 
1677       memcpy(new_waits, submission->waits, sizeof(struct vk_sync_wait) * submission->wait_count);
1678       radv_get_shader_upload_sync_wait(device, shader_upload_seq, &new_waits[submission->wait_count]);
1679 
1680       waits = new_waits;
1681       wait_count += 1;
1682    }
1683 
1684    /* For fences on the same queue/vm amdgpu doesn't wait till all processing is finished
1685     * before starting the next cmdbuffer, so we need to do it here.
1686     */
1687    const bool need_wait = wait_count > 0;
1688    unsigned num_initial_preambles = 0;
1689    unsigned num_continue_preambles = 0;
1690    unsigned num_postambles = 0;
1691    struct radeon_cmdbuf *initial_preambles[5] = {0};
1692    struct radeon_cmdbuf *continue_preambles[5] = {0};
1693    struct radeon_cmdbuf *postambles[4] = {0};
1694 
1695    if (queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE) {
1696       initial_preambles[num_initial_preambles++] =
1697          need_wait ? queue->state.initial_full_flush_preamble_cs : queue->state.initial_preamble_cs;
1698 
1699       continue_preambles[num_continue_preambles++] = queue->state.continue_preamble_cs;
1700 
1701       if (use_perf_counters) {
1702          /* RADV only supports perf counters on the GFX queue currently. */
1703          assert(queue->state.qf == RADV_QUEUE_GENERAL);
1704 
1705          /* Create the lock/unlock CS. */
1706          struct radeon_cmdbuf *perf_ctr_lock_cs =
1707             radv_create_perf_counter_lock_cs(device, submission->perf_pass_index, false);
1708          struct radeon_cmdbuf *perf_ctr_unlock_cs =
1709             radv_create_perf_counter_lock_cs(device, submission->perf_pass_index, true);
1710 
1711          if (!perf_ctr_lock_cs || !perf_ctr_unlock_cs) {
1712             result = VK_ERROR_OUT_OF_HOST_MEMORY;
1713             goto fail;
1714          }
1715 
1716          initial_preambles[num_initial_preambles++] = perf_ctr_lock_cs;
1717          continue_preambles[num_continue_preambles++] = perf_ctr_lock_cs;
1718          postambles[num_postambles++] = perf_ctr_unlock_cs;
1719       }
1720    }
1721 
1722    if (queue->state.flush_postamble_cs) {
1723       postambles[num_postambles++] = queue->state.flush_postamble_cs;
1724    }
1725 
1726    const unsigned num_1q_initial_preambles = num_initial_preambles;
1727    const unsigned num_1q_continue_preambles = num_continue_preambles;
1728    const unsigned num_1q_postambles = num_postambles;
1729 
1730    if (use_ace) {
1731       initial_preambles[num_initial_preambles++] = queue->state.gang_wait_preamble_cs;
1732       initial_preambles[num_initial_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1733       initial_preambles[num_initial_preambles++] =
1734          need_wait ? queue->follower_state->initial_full_flush_preamble_cs : queue->follower_state->initial_preamble_cs;
1735 
1736       continue_preambles[num_continue_preambles++] = queue->state.gang_wait_preamble_cs;
1737       continue_preambles[num_continue_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1738       continue_preambles[num_continue_preambles++] = queue->follower_state->continue_preamble_cs;
1739 
1740       postambles[num_postambles++] = queue->follower_state->gang_wait_postamble_cs;
1741       postambles[num_postambles++] = queue->state.gang_wait_postamble_cs;
1742    }
1743 
1744    struct radv_winsys_submit_info submit = {
1745       .ip_type = radv_queue_ring(queue),
1746       .queue_index = queue->vk.index_in_family,
1747       .cs_array = cs_array,
1748       .cs_count = 0,
1749       .initial_preamble_count = num_1q_initial_preambles,
1750       .continue_preamble_count = num_1q_continue_preambles,
1751       .postamble_count = num_1q_postambles,
1752       .initial_preamble_cs = initial_preambles,
1753       .continue_preamble_cs = continue_preambles,
1754       .postamble_cs = postambles,
1755       .uses_shadow_regs = queue->state.uses_shadow_regs,
1756    };
1757 
1758    for (uint32_t j = 0, advance; j < cmd_buffer_count; j += advance) {
1759       advance = MIN2(max_cs_submission, cmd_buffer_count - j);
1760       const bool last_submit = j + advance == cmd_buffer_count;
1761       bool submit_ace = false;
1762       unsigned num_submitted_cs = 0;
1763 
1764       if (radv_device_fault_detection_enabled(device))
1765          device->trace_data->primary_id = 0;
1766 
1767       struct radeon_cmdbuf *chainable = NULL;
1768       struct radeon_cmdbuf *chainable_ace = NULL;
1769 
1770       /* Add CS from submitted command buffers. */
1771       for (unsigned c = 0; c < advance; ++c) {
1772          struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j + c];
1773          assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1774          const bool can_chain_next = !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
1775 
1776          /* Follower needs to be before the gang leader because the last CS must match the queue's IP type. */
1777          if (cmd_buffer->gang.cs) {
1778             device->ws->cs_unchain(cmd_buffer->gang.cs);
1779             if (!chainable_ace || !device->ws->cs_chain(chainable_ace, cmd_buffer->gang.cs, false)) {
1780                cs_array[num_submitted_cs++] = cmd_buffer->gang.cs;
1781 
1782                /* Prevent chaining the gang leader when the follower couldn't be chained.
1783                 * Otherwise, they would be in the wrong order.
1784                 */
1785                chainable = NULL;
1786             }
1787 
1788             chainable_ace = can_chain_next ? cmd_buffer->gang.cs : NULL;
1789             submit_ace = true;
1790          }
1791 
1792          device->ws->cs_unchain(cmd_buffer->cs);
1793          if (!chainable || !device->ws->cs_chain(chainable, cmd_buffer->cs, queue->state.uses_shadow_regs)) {
1794             /* don't submit empty command buffers to the kernel. */
1795             if ((radv_queue_ring(queue) != AMD_IP_VCN_ENC && radv_queue_ring(queue) != AMD_IP_UVD) ||
1796                 cmd_buffer->cs->cdw != 0)
1797                cs_array[num_submitted_cs++] = cmd_buffer->cs;
1798          }
1799 
1800          chainable = can_chain_next ? cmd_buffer->cs : NULL;
1801       }
1802 
1803       submit.cs_count = num_submitted_cs;
1804       submit.initial_preamble_count = submit_ace ? num_initial_preambles : num_1q_initial_preambles;
1805       submit.continue_preamble_count = submit_ace ? num_continue_preambles : num_1q_continue_preambles;
1806       submit.postamble_count = submit_ace ? num_postambles : num_1q_postambles;
1807 
1808       result = device->ws->cs_submit(ctx, &submit, j == 0 ? wait_count : 0, waits,
1809                                      last_submit ? submission->signal_count : 0, submission->signals);
1810 
1811       if (result != VK_SUCCESS)
1812          goto fail;
1813 
1814       if (radv_device_fault_detection_enabled(device)) {
1815          result = radv_check_gpu_hangs(queue, &submit);
1816       }
1817 
1818       if (device->tma_bo) {
1819          radv_check_trap_handler(queue);
1820       }
1821 
1822       initial_preambles[0] = queue->state.initial_preamble_cs;
1823       initial_preambles[1] = !use_ace ? NULL : queue->follower_state->initial_preamble_cs;
1824    }
1825 
1826    queue->last_shader_upload_seq = MAX2(queue->last_shader_upload_seq, shader_upload_seq);
1827 
1828    radv_dump_printf_data(device, stdout);
1829 
1830 fail:
1831    free(cs_array);
1832    if (waits != submission->waits)
1833       free(waits);
1834    if (radv_device_fault_detection_enabled(device))
1835       simple_mtx_unlock(&device->trace_mtx);
1836 
1837    return result;
1838 }
1839 
1840 static void
radv_report_gpuvm_fault(struct radv_device * device)1841 radv_report_gpuvm_fault(struct radv_device *device)
1842 {
1843    const struct radv_physical_device *pdev = radv_device_physical(device);
1844    struct radv_winsys_gpuvm_fault_info fault_info = {0};
1845 
1846    if (!radv_vm_fault_occurred(device, &fault_info))
1847       return;
1848 
1849    fprintf(stderr, "radv: GPUVM fault detected at address 0x%08" PRIx64 ".\n", fault_info.addr);
1850    ac_print_gpuvm_fault_status(stderr, pdev->info.gfx_level, fault_info.status);
1851 }
1852 
1853 static VkResult
radv_queue_sparse_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1854 radv_queue_sparse_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1855 {
1856    struct radv_queue *queue = (struct radv_queue *)vqueue;
1857    struct radv_device *device = radv_queue_device(queue);
1858    VkResult result;
1859 
1860    result = radv_queue_submit_bind_sparse_memory(device, submission);
1861    if (result != VK_SUCCESS)
1862       goto fail;
1863 
1864    /* We do a CPU wait here, in part to avoid more winsys mechanisms. In the likely kernel explicit
1865     * sync mechanism, we'd need to do a CPU wait anyway. Haven't seen this be a perf issue yet, but
1866     * we have to make sure the queue always has its submission thread enabled. */
1867    result = vk_sync_wait_many(&device->vk, submission->wait_count, submission->waits, 0, UINT64_MAX);
1868    if (result != VK_SUCCESS)
1869       goto fail;
1870 
1871    /* Ignore all the commandbuffers. They're necessarily empty anyway. */
1872 
1873    for (unsigned i = 0; i < submission->signal_count; ++i) {
1874       result = vk_sync_signal(&device->vk, submission->signals[i].sync, submission->signals[i].signal_value);
1875       if (result != VK_SUCCESS)
1876          goto fail;
1877    }
1878 
1879 fail:
1880    if (result != VK_SUCCESS) {
1881       /* When something bad happened during the submission, such as
1882        * an out of memory issue, it might be hard to recover from
1883        * this inconsistent state. To avoid this sort of problem, we
1884        * assume that we are in a really bad situation and return
1885        * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1886        * to submit the same job again to this device.
1887        */
1888       radv_report_gpuvm_fault(device);
1889       result = vk_device_set_lost(&device->vk, "vkQueueSubmit() failed");
1890    }
1891    return result;
1892 }
1893 
1894 static VkResult
radv_queue_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1895 radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1896 {
1897    struct radv_queue *queue = (struct radv_queue *)vqueue;
1898    struct radv_device *device = radv_queue_device(queue);
1899    const struct radv_physical_device *pdev = radv_device_physical(device);
1900    VkResult result;
1901 
1902    if (!radv_sparse_queue_enabled(pdev)) {
1903       result = radv_queue_submit_bind_sparse_memory(device, submission);
1904       if (result != VK_SUCCESS)
1905          goto fail;
1906    } else {
1907       assert(!submission->buffer_bind_count && !submission->image_bind_count && !submission->image_opaque_bind_count);
1908    }
1909 
1910    if (!submission->command_buffer_count && !submission->wait_count && !submission->signal_count)
1911       return VK_SUCCESS;
1912 
1913    if (!submission->command_buffer_count) {
1914       result = radv_queue_submit_empty(queue, submission);
1915    } else {
1916       result = radv_queue_submit_normal(queue, submission);
1917    }
1918 
1919 fail:
1920    if (result != VK_SUCCESS) {
1921       /* When something bad happened during the submission, such as
1922        * an out of memory issue, it might be hard to recover from
1923        * this inconsistent state. To avoid this sort of problem, we
1924        * assume that we are in a really bad situation and return
1925        * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1926        * to submit the same job again to this device.
1927        */
1928       radv_report_gpuvm_fault(device);
1929       result = vk_device_set_lost(&device->vk, "vkQueueSubmit() failed");
1930    }
1931    return result;
1932 }
1933 
1934 bool
radv_queue_internal_submit(struct radv_queue * queue,struct radeon_cmdbuf * cs)1935 radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs)
1936 {
1937    struct radv_device *device = radv_queue_device(queue);
1938    struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1939    struct radv_winsys_submit_info submit = {
1940       .ip_type = radv_queue_ring(queue),
1941       .queue_index = queue->vk.index_in_family,
1942       .cs_array = &cs,
1943       .cs_count = 1,
1944    };
1945 
1946    VkResult result = device->ws->cs_submit(ctx, &submit, 0, NULL, 0, NULL);
1947    if (result != VK_SUCCESS)
1948       return false;
1949 
1950    return true;
1951 }
1952 
1953 int
radv_queue_init(struct radv_device * device,struct radv_queue * queue,int idx,const VkDeviceQueueCreateInfo * create_info,const VkDeviceQueueGlobalPriorityCreateInfo * global_priority)1954 radv_queue_init(struct radv_device *device, struct radv_queue *queue, int idx,
1955                 const VkDeviceQueueCreateInfo *create_info,
1956                 const VkDeviceQueueGlobalPriorityCreateInfo *global_priority)
1957 {
1958    const struct radv_physical_device *pdev = radv_device_physical(device);
1959 
1960    queue->priority = radv_get_queue_global_priority(global_priority);
1961    queue->hw_ctx = device->hw_ctx[queue->priority];
1962    queue->state.qf = vk_queue_to_radv(pdev, create_info->queueFamilyIndex);
1963 
1964    VkResult result = vk_queue_init(&queue->vk, &device->vk, create_info, idx);
1965    if (result != VK_SUCCESS)
1966       return result;
1967 
1968    queue->state.uses_shadow_regs = device->uses_shadow_regs && queue->state.qf == RADV_QUEUE_GENERAL;
1969    if (queue->state.uses_shadow_regs) {
1970       result = radv_create_shadow_regs_preamble(device, &queue->state);
1971       if (result != VK_SUCCESS)
1972          goto fail;
1973       result = radv_init_shadowed_regs_buffer_state(device, queue);
1974       if (result != VK_SUCCESS)
1975          goto fail;
1976    }
1977 
1978    if (pdev->info.gfx_level == GFX6 &&
1979        (queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE)) {
1980       result = radv_create_flush_postamble(queue);
1981       if (result != VK_SUCCESS)
1982          goto fail;
1983    }
1984 
1985    if (queue->state.qf == RADV_QUEUE_SPARSE) {
1986       queue->vk.driver_submit = radv_queue_sparse_submit;
1987       vk_queue_enable_submit_thread(&queue->vk);
1988    } else {
1989       queue->vk.driver_submit = radv_queue_submit;
1990    }
1991    return VK_SUCCESS;
1992 fail:
1993    vk_queue_finish(&queue->vk);
1994    return result;
1995 }
1996 
1997 static void
radv_queue_state_finish(struct radv_queue_state * queue,struct radv_device * device)1998 radv_queue_state_finish(struct radv_queue_state *queue, struct radv_device *device)
1999 {
2000    radv_destroy_shadow_regs_preamble(device, queue, device->ws);
2001    if (queue->initial_full_flush_preamble_cs)
2002       device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
2003    if (queue->initial_preamble_cs)
2004       device->ws->cs_destroy(queue->initial_preamble_cs);
2005    if (queue->continue_preamble_cs)
2006       device->ws->cs_destroy(queue->continue_preamble_cs);
2007    if (queue->gang_wait_preamble_cs)
2008       device->ws->cs_destroy(queue->gang_wait_preamble_cs);
2009    if (queue->gang_wait_postamble_cs)
2010       device->ws->cs_destroy(queue->gang_wait_postamble_cs);
2011    if (queue->flush_postamble_cs)
2012       device->ws->cs_destroy(queue->flush_postamble_cs);
2013    if (queue->descriptor_bo)
2014       radv_bo_destroy(device, NULL, queue->descriptor_bo);
2015    if (queue->scratch_bo) {
2016       radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
2017       radv_bo_destroy(device, NULL, queue->scratch_bo);
2018    }
2019    if (queue->esgs_ring_bo) {
2020       radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
2021       radv_bo_destroy(device, NULL, queue->esgs_ring_bo);
2022    }
2023    if (queue->gsvs_ring_bo) {
2024       radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
2025       radv_bo_destroy(device, NULL, queue->gsvs_ring_bo);
2026    }
2027    if (queue->tess_rings_bo) {
2028       radv_rmv_log_command_buffer_bo_destroy(device, queue->tess_rings_bo);
2029       radv_bo_destroy(device, NULL, queue->tess_rings_bo);
2030    }
2031    if (queue->task_rings_bo) {
2032       radv_rmv_log_command_buffer_bo_destroy(device, queue->task_rings_bo);
2033       radv_bo_destroy(device, NULL, queue->task_rings_bo);
2034    }
2035    if (queue->mesh_scratch_ring_bo) {
2036       radv_rmv_log_command_buffer_bo_destroy(device, queue->mesh_scratch_ring_bo);
2037       radv_bo_destroy(device, NULL, queue->mesh_scratch_ring_bo);
2038    }
2039    if (queue->ge_rings_bo) {
2040       radv_rmv_log_command_buffer_bo_destroy(device, queue->ge_rings_bo);
2041       radv_bo_destroy(device, NULL, queue->ge_rings_bo);
2042    }
2043    if (queue->gds_bo) {
2044       device->ws->buffer_make_resident(device->ws, queue->gds_bo, false);
2045       radv_bo_destroy(device, NULL, queue->gds_bo);
2046    }
2047    if (queue->gds_oa_bo) {
2048       device->ws->buffer_make_resident(device->ws, queue->gds_oa_bo, false);
2049       radv_bo_destroy(device, NULL, queue->gds_oa_bo);
2050    }
2051    if (queue->compute_scratch_bo) {
2052       radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
2053       radv_bo_destroy(device, NULL, queue->compute_scratch_bo);
2054    }
2055 }
2056 
2057 void
radv_queue_finish(struct radv_queue * queue)2058 radv_queue_finish(struct radv_queue *queue)
2059 {
2060    struct radv_device *device = radv_queue_device(queue);
2061 
2062    if (queue->follower_state) {
2063       /* Prevent double free */
2064       queue->follower_state->task_rings_bo = NULL;
2065 
2066       /* Clean up the internal ACE queue state. */
2067       radv_queue_state_finish(queue->follower_state, device);
2068       free(queue->follower_state);
2069    }
2070 
2071    if (queue->gang_sem_bo)
2072       radv_bo_destroy(device, &queue->vk.base, queue->gang_sem_bo);
2073 
2074    radv_queue_state_finish(&queue->state, device);
2075    vk_queue_finish(&queue->vk);
2076 }
2077 
2078 enum amd_ip_type
radv_queue_ring(const struct radv_queue * queue)2079 radv_queue_ring(const struct radv_queue *queue)
2080 {
2081    struct radv_device *device = radv_queue_device(queue);
2082    const struct radv_physical_device *pdev = radv_device_physical(device);
2083    return radv_queue_family_to_ring(pdev, queue->state.qf);
2084 }
2085 
2086 enum amd_ip_type
radv_queue_family_to_ring(const struct radv_physical_device * pdev,enum radv_queue_family f)2087 radv_queue_family_to_ring(const struct radv_physical_device *pdev, enum radv_queue_family f)
2088 {
2089    switch (f) {
2090    case RADV_QUEUE_GENERAL:
2091       return AMD_IP_GFX;
2092    case RADV_QUEUE_COMPUTE:
2093       return AMD_IP_COMPUTE;
2094    case RADV_QUEUE_TRANSFER:
2095       return AMD_IP_SDMA;
2096    case RADV_QUEUE_VIDEO_DEC:
2097       return pdev->vid_decode_ip;
2098    case RADV_QUEUE_VIDEO_ENC:
2099       return AMD_IP_VCN_ENC;
2100    default:
2101       unreachable("Unknown queue family");
2102    }
2103 }
2104