1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_cs.h"
29 #include "radv_debug.h"
30 #include "radv_private.h"
31 #include "vk_semaphore.h"
32 #include "vk_sync.h"
33
34 #include "ac_debug.h"
35
36 enum radeon_ctx_priority
radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoKHR * pObj)37 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoKHR *pObj)
38 {
39 /* Default to MEDIUM when a specific global priority isn't requested */
40 if (!pObj)
41 return RADEON_CTX_PRIORITY_MEDIUM;
42
43 switch (pObj->globalPriority) {
44 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR:
45 return RADEON_CTX_PRIORITY_REALTIME;
46 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR:
47 return RADEON_CTX_PRIORITY_HIGH;
48 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR:
49 return RADEON_CTX_PRIORITY_MEDIUM;
50 case VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR:
51 return RADEON_CTX_PRIORITY_LOW;
52 default:
53 unreachable("Illegal global priority value");
54 return RADEON_CTX_PRIORITY_INVALID;
55 }
56 }
57
58 static VkResult
radv_sparse_buffer_bind_memory(struct radv_device * device,const VkSparseBufferMemoryBindInfo * bind)59 radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind)
60 {
61 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
62 VkResult result = VK_SUCCESS;
63
64 struct radv_device_memory *mem = NULL;
65 VkDeviceSize resourceOffset = 0;
66 VkDeviceSize size = 0;
67 VkDeviceSize memoryOffset = 0;
68 for (uint32_t i = 0; i < bind->bindCount; ++i) {
69 struct radv_device_memory *cur_mem = NULL;
70
71 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
72 cur_mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
73 if (i && mem == cur_mem) {
74 if (mem) {
75 if (bind->pBinds[i].resourceOffset == resourceOffset + size &&
76 bind->pBinds[i].memoryOffset == memoryOffset + size) {
77 size += bind->pBinds[i].size;
78 continue;
79 }
80 } else {
81 if (bind->pBinds[i].resourceOffset == resourceOffset + size) {
82 size += bind->pBinds[i].size;
83 continue;
84 }
85 }
86 }
87 if (size) {
88 result = device->ws->buffer_virtual_bind(device->ws, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
89 memoryOffset);
90 if (result != VK_SUCCESS)
91 return result;
92
93 if (bind->pBinds[i].memory)
94 radv_rmv_log_sparse_add_residency(device, buffer->bo, memoryOffset);
95 else
96 radv_rmv_log_sparse_remove_residency(device, buffer->bo, memoryOffset);
97 }
98 mem = cur_mem;
99 resourceOffset = bind->pBinds[i].resourceOffset;
100 size = bind->pBinds[i].size;
101 memoryOffset = bind->pBinds[i].memoryOffset;
102 }
103 if (size) {
104 result = device->ws->buffer_virtual_bind(device->ws, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
105 memoryOffset);
106
107 if (mem)
108 radv_rmv_log_sparse_add_residency(device, buffer->bo, memoryOffset);
109 else
110 radv_rmv_log_sparse_remove_residency(device, buffer->bo, memoryOffset);
111 }
112
113 return result;
114 }
115
116 static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device * device,const VkSparseImageOpaqueMemoryBindInfo * bind)117 radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind)
118 {
119 RADV_FROM_HANDLE(radv_image, image, bind->image);
120 VkResult result;
121
122 for (uint32_t i = 0; i < bind->bindCount; ++i) {
123 struct radv_device_memory *mem = NULL;
124
125 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
126 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
127
128 result =
129 device->ws->buffer_virtual_bind(device->ws, image->bindings[0].bo, bind->pBinds[i].resourceOffset,
130 bind->pBinds[i].size, mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset);
131 if (result != VK_SUCCESS)
132 return result;
133
134 if (bind->pBinds[i].memory)
135 radv_rmv_log_sparse_add_residency(device, image->bindings[0].bo, bind->pBinds[i].resourceOffset);
136 else
137 radv_rmv_log_sparse_remove_residency(device, image->bindings[0].bo, bind->pBinds[i].resourceOffset);
138 }
139
140 return VK_SUCCESS;
141 }
142
143 static VkResult
radv_sparse_image_bind_memory(struct radv_device * device,const VkSparseImageMemoryBindInfo * bind)144 radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMemoryBindInfo *bind)
145 {
146 RADV_FROM_HANDLE(radv_image, image, bind->image);
147 struct radeon_surf *surface = &image->planes[0].surface;
148 uint32_t bs = vk_format_get_blocksize(image->vk.format);
149 VkResult result;
150
151 for (uint32_t i = 0; i < bind->bindCount; ++i) {
152 struct radv_device_memory *mem = NULL;
153 uint64_t offset, depth_pitch;
154 uint32_t pitch;
155 uint64_t mem_offset = bind->pBinds[i].memoryOffset;
156 const uint32_t layer = bind->pBinds[i].subresource.arrayLayer;
157 const uint32_t level = bind->pBinds[i].subresource.mipLevel;
158
159 VkExtent3D bind_extent = bind->pBinds[i].extent;
160 bind_extent.width = DIV_ROUND_UP(bind_extent.width, vk_format_get_blockwidth(image->vk.format));
161 bind_extent.height = DIV_ROUND_UP(bind_extent.height, vk_format_get_blockheight(image->vk.format));
162
163 VkOffset3D bind_offset = bind->pBinds[i].offset;
164 bind_offset.x /= vk_format_get_blockwidth(image->vk.format);
165 bind_offset.y /= vk_format_get_blockheight(image->vk.format);
166
167 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
168 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
169
170 if (device->physical_device->rad_info.gfx_level >= GFX9) {
171 offset = surface->u.gfx9.surf_slice_size * layer + surface->u.gfx9.prt_level_offset[level];
172 pitch = surface->u.gfx9.prt_level_pitch[level];
173 depth_pitch = surface->u.gfx9.surf_slice_size;
174 } else {
175 depth_pitch = surface->u.legacy.level[level].slice_size_dw * 4;
176 offset = (uint64_t)surface->u.legacy.level[level].offset_256B * 256 + depth_pitch * layer;
177 pitch = surface->u.legacy.level[level].nblk_x;
178 }
179
180 offset +=
181 bind_offset.z * depth_pitch + ((uint64_t)bind_offset.y * pitch * surface->prt_tile_depth +
182 (uint64_t)bind_offset.x * surface->prt_tile_height * surface->prt_tile_depth) *
183 bs;
184
185 uint32_t aligned_extent_width = ALIGN(bind_extent.width, surface->prt_tile_width);
186 uint32_t aligned_extent_height = ALIGN(bind_extent.height, surface->prt_tile_height);
187 uint32_t aligned_extent_depth = ALIGN(bind_extent.depth, surface->prt_tile_depth);
188
189 bool whole_subres = (bind_extent.height <= surface->prt_tile_height || aligned_extent_width == pitch) &&
190 (bind_extent.depth <= surface->prt_tile_depth ||
191 (uint64_t)aligned_extent_width * aligned_extent_height * bs == depth_pitch);
192
193 if (whole_subres) {
194 uint64_t size = (uint64_t)aligned_extent_width * aligned_extent_height * aligned_extent_depth * bs;
195 result = device->ws->buffer_virtual_bind(device->ws, image->bindings[0].bo, offset, size, mem ? mem->bo : NULL,
196 mem_offset);
197 if (result != VK_SUCCESS)
198 return result;
199
200 if (bind->pBinds[i].memory)
201 radv_rmv_log_sparse_add_residency(device, image->bindings[0].bo, offset);
202 else
203 radv_rmv_log_sparse_remove_residency(device, image->bindings[0].bo, offset);
204
205 } else {
206 uint32_t img_y_increment = pitch * bs * surface->prt_tile_depth;
207 uint32_t mem_y_increment = aligned_extent_width * bs * surface->prt_tile_depth;
208 uint64_t mem_z_increment = (uint64_t)aligned_extent_width * aligned_extent_height * bs;
209 uint64_t size = mem_y_increment * surface->prt_tile_height;
210 for (unsigned z = 0; z < bind_extent.depth;
211 z += surface->prt_tile_depth, offset += depth_pitch * surface->prt_tile_depth) {
212 for (unsigned y = 0; y < bind_extent.height; y += surface->prt_tile_height) {
213 result = device->ws->buffer_virtual_bind(
214 device->ws, image->bindings[0].bo, offset + (uint64_t)img_y_increment * y, size, mem ? mem->bo : NULL,
215 mem_offset + (uint64_t)mem_y_increment * y + mem_z_increment * z);
216 if (result != VK_SUCCESS)
217 return result;
218
219 if (bind->pBinds[i].memory)
220 radv_rmv_log_sparse_add_residency(device, image->bindings[0].bo, offset);
221 else
222 radv_rmv_log_sparse_remove_residency(device, image->bindings[0].bo, offset);
223 }
224 }
225 }
226 }
227
228 return VK_SUCCESS;
229 }
230
231 static VkResult
radv_queue_submit_bind_sparse_memory(struct radv_device * device,struct vk_queue_submit * submission)232 radv_queue_submit_bind_sparse_memory(struct radv_device *device, struct vk_queue_submit *submission)
233 {
234 for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
235 VkResult result = radv_sparse_buffer_bind_memory(device, submission->buffer_binds + i);
236 if (result != VK_SUCCESS)
237 return result;
238 }
239
240 for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
241 VkResult result = radv_sparse_image_opaque_bind_memory(device, submission->image_opaque_binds + i);
242 if (result != VK_SUCCESS)
243 return result;
244 }
245
246 for (uint32_t i = 0; i < submission->image_bind_count; ++i) {
247 VkResult result = radv_sparse_image_bind_memory(device, submission->image_binds + i);
248 if (result != VK_SUCCESS)
249 return result;
250 }
251
252 return VK_SUCCESS;
253 }
254
255 static VkResult
radv_queue_submit_empty(struct radv_queue * queue,struct vk_queue_submit * submission)256 radv_queue_submit_empty(struct radv_queue *queue, struct vk_queue_submit *submission)
257 {
258 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
259 struct radv_winsys_submit_info submit = {
260 .ip_type = radv_queue_ring(queue),
261 .queue_index = queue->vk.index_in_family,
262 };
263
264 return queue->device->ws->cs_submit(ctx, &submit, submission->wait_count, submission->waits,
265 submission->signal_count, submission->signals);
266 }
267
268 static void
radv_fill_shader_rings(struct radv_device * device,uint32_t * desc,struct radeon_winsys_bo * scratch_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * esgs_ring_bo,uint32_t gsvs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,struct radeon_winsys_bo * tess_rings_bo,struct radeon_winsys_bo * task_rings_bo,struct radeon_winsys_bo * mesh_scratch_ring_bo,uint32_t attr_ring_size,struct radeon_winsys_bo * attr_ring_bo)269 radv_fill_shader_rings(struct radv_device *device, uint32_t *desc, struct radeon_winsys_bo *scratch_bo,
270 uint32_t esgs_ring_size, struct radeon_winsys_bo *esgs_ring_bo, uint32_t gsvs_ring_size,
271 struct radeon_winsys_bo *gsvs_ring_bo, struct radeon_winsys_bo *tess_rings_bo,
272 struct radeon_winsys_bo *task_rings_bo, struct radeon_winsys_bo *mesh_scratch_ring_bo,
273 uint32_t attr_ring_size, struct radeon_winsys_bo *attr_ring_bo)
274 {
275 if (scratch_bo) {
276 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
277 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
278
279 if (device->physical_device->rad_info.gfx_level >= GFX11)
280 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
281 else
282 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
283
284 desc[0] = scratch_va;
285 desc[1] = rsrc1;
286 }
287
288 desc += 4;
289
290 if (esgs_ring_bo) {
291 uint64_t esgs_va = radv_buffer_get_va(esgs_ring_bo);
292
293 /* stride 0, num records - size, add tid, swizzle, elsize4,
294 index stride 64 */
295 desc[0] = esgs_va;
296 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32);
297 desc[2] = esgs_ring_size;
298 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
299 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
300 S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1);
301
302 if (device->physical_device->rad_info.gfx_level >= GFX11)
303 desc[1] |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
304 else
305 desc[1] |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
306
307 if (device->physical_device->rad_info.gfx_level >= GFX11) {
308 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
309 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
310 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
311 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
312 } else if (device->physical_device->rad_info.gfx_level >= GFX8) {
313 /* DATA_FORMAT is STRIDE[14:17] for MUBUF with ADD_TID_ENABLE=1 */
314 desc[3] |=
315 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(0) | S_008F0C_ELEMENT_SIZE(1);
316 } else {
317 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
318 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) | S_008F0C_ELEMENT_SIZE(1);
319 }
320
321 /* GS entry for ES->GS ring */
322 /* stride 0, num records - size, elsize0,
323 index stride 0 */
324 desc[4] = esgs_va;
325 desc[5] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32);
326 desc[6] = esgs_ring_size;
327 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
328 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
329
330 if (device->physical_device->rad_info.gfx_level >= GFX11) {
331 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
332 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
333 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
334 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
335 } else {
336 desc[7] |=
337 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
338 }
339 }
340
341 desc += 8;
342
343 if (gsvs_ring_bo) {
344 uint64_t gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
345
346 /* VS entry for GS->VS ring */
347 /* stride 0, num records - size, elsize0,
348 index stride 0 */
349 desc[0] = gsvs_va;
350 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32);
351 desc[2] = gsvs_ring_size;
352 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
353 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
354
355 if (device->physical_device->rad_info.gfx_level >= GFX11) {
356 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
357 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
358 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
359 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
360 } else {
361 desc[3] |=
362 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
363 }
364
365 /* stride gsvs_itemsize, num records 64
366 elsize 4, index stride 16 */
367 /* shader will patch stride and desc[2] */
368 desc[4] = gsvs_va;
369 desc[5] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32);
370 desc[6] = 0;
371 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
372 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
373 S_008F0C_INDEX_STRIDE(1) | S_008F0C_ADD_TID_ENABLE(true);
374
375 if (device->physical_device->rad_info.gfx_level >= GFX11)
376 desc[5] |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
377 else
378 desc[5] |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
379
380 if (device->physical_device->rad_info.gfx_level >= GFX11) {
381 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
382 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
383 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
384 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
385 } else if (device->physical_device->rad_info.gfx_level >= GFX8) {
386 /* DATA_FORMAT is STRIDE[14:17] for MUBUF with ADD_TID_ENABLE=1 */
387 desc[7] |=
388 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(0) | S_008F0C_ELEMENT_SIZE(1);
389 } else {
390 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
391 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) | S_008F0C_ELEMENT_SIZE(1);
392 }
393 }
394
395 desc += 8;
396
397 if (tess_rings_bo) {
398 uint64_t tess_va = radv_buffer_get_va(tess_rings_bo);
399 uint64_t tess_offchip_va = tess_va + device->physical_device->hs.tess_offchip_ring_offset;
400
401 desc[0] = tess_va;
402 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32);
403 desc[2] = device->physical_device->hs.tess_factor_ring_size;
404 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
405 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
406
407 if (device->physical_device->rad_info.gfx_level >= GFX11) {
408 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW);
409 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
410 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
411 S_008F0C_RESOURCE_LEVEL(1);
412 } else {
413 desc[3] |=
414 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
415 }
416
417 desc[4] = tess_offchip_va;
418 desc[5] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32);
419 desc[6] = device->physical_device->hs.tess_offchip_ring_size;
420 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
421 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
422
423 if (device->physical_device->rad_info.gfx_level >= GFX11) {
424 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW);
425 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
426 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
427 S_008F0C_RESOURCE_LEVEL(1);
428 } else {
429 desc[7] |=
430 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
431 }
432 }
433
434 desc += 8;
435
436 if (task_rings_bo) {
437 uint64_t task_va = radv_buffer_get_va(task_rings_bo);
438 uint64_t task_draw_ring_va = task_va + device->physical_device->task_info.draw_ring_offset;
439 uint64_t task_payload_ring_va = task_va + device->physical_device->task_info.payload_ring_offset;
440
441 desc[0] = task_draw_ring_va;
442 desc[1] = S_008F04_BASE_ADDRESS_HI(task_draw_ring_va >> 32);
443 desc[2] = device->physical_device->task_info.num_entries * AC_TASK_DRAW_ENTRY_BYTES;
444 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
445 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
446
447 if (device->physical_device->rad_info.gfx_level >= GFX11) {
448 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_UINT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
449 } else {
450 assert(device->physical_device->rad_info.gfx_level >= GFX10_3);
451 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_UINT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
452 S_008F0C_RESOURCE_LEVEL(1);
453 }
454
455 desc[4] = task_payload_ring_va;
456 desc[5] = S_008F04_BASE_ADDRESS_HI(task_payload_ring_va >> 32);
457 desc[6] = device->physical_device->task_info.num_entries * AC_TASK_PAYLOAD_ENTRY_BYTES;
458 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
459 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
460
461 if (device->physical_device->rad_info.gfx_level >= GFX11) {
462 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_UINT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
463 } else {
464 assert(device->physical_device->rad_info.gfx_level >= GFX10_3);
465 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_UINT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
466 S_008F0C_RESOURCE_LEVEL(1);
467 }
468 }
469
470 desc += 8;
471
472 if (mesh_scratch_ring_bo) {
473 uint64_t va = radv_buffer_get_va(mesh_scratch_ring_bo);
474
475 desc[0] = va;
476 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
477 desc[2] = RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES;
478 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
479 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
480
481 if (device->physical_device->rad_info.gfx_level >= GFX11) {
482 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_UINT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
483 } else {
484 assert(device->physical_device->rad_info.gfx_level >= GFX10_3);
485 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_UINT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
486 S_008F0C_RESOURCE_LEVEL(1);
487 }
488 }
489
490 desc += 4;
491
492 if (attr_ring_bo) {
493 assert(device->physical_device->rad_info.gfx_level >= GFX11);
494
495 uint64_t va = radv_buffer_get_va(attr_ring_bo);
496
497 desc[0] = va;
498 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_SWIZZLE_ENABLE_GFX11(3) /* 16B */;
499 desc[2] = attr_ring_size;
500 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
501 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
502 S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_32_32_32_FLOAT) | S_008F0C_INDEX_STRIDE(2) /* 32 elements */;
503 }
504
505 desc += 4;
506
507 /* add sample positions after all rings */
508 memcpy(desc, device->sample_locations_1x, 8);
509 desc += 2;
510 memcpy(desc, device->sample_locations_2x, 16);
511 desc += 4;
512 memcpy(desc, device->sample_locations_4x, 32);
513 desc += 8;
514 memcpy(desc, device->sample_locations_8x, 64);
515 }
516
517 static void
radv_emit_gs_ring_sizes(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * esgs_ring_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,uint32_t gsvs_ring_size)518 radv_emit_gs_ring_sizes(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *esgs_ring_bo,
519 uint32_t esgs_ring_size, struct radeon_winsys_bo *gsvs_ring_bo, uint32_t gsvs_ring_size)
520 {
521 if (!esgs_ring_bo && !gsvs_ring_bo)
522 return;
523
524 if (esgs_ring_bo)
525 radv_cs_add_buffer(device->ws, cs, esgs_ring_bo);
526
527 if (gsvs_ring_bo)
528 radv_cs_add_buffer(device->ws, cs, gsvs_ring_bo);
529
530 if (device->physical_device->rad_info.gfx_level >= GFX7) {
531 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
532 radeon_emit(cs, esgs_ring_size >> 8);
533 radeon_emit(cs, gsvs_ring_size >> 8);
534 } else {
535 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
536 radeon_emit(cs, esgs_ring_size >> 8);
537 radeon_emit(cs, gsvs_ring_size >> 8);
538 }
539 }
540
541 static void
radv_emit_tess_factor_ring(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * tess_rings_bo)542 radv_emit_tess_factor_ring(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *tess_rings_bo)
543 {
544 uint64_t tf_va;
545 uint32_t tf_ring_size;
546 if (!tess_rings_bo)
547 return;
548
549 tf_ring_size = device->physical_device->hs.tess_factor_ring_size / 4;
550 tf_va = radv_buffer_get_va(tess_rings_bo);
551
552 radv_cs_add_buffer(device->ws, cs, tess_rings_bo);
553
554 if (device->physical_device->rad_info.gfx_level >= GFX7) {
555 if (device->physical_device->rad_info.gfx_level >= GFX11) {
556 /* TF_RING_SIZE is per SE on GFX11. */
557 tf_ring_size /= device->physical_device->rad_info.max_se;
558 }
559
560 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE, S_030938_SIZE(tf_ring_size));
561 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE, tf_va >> 8);
562
563 if (device->physical_device->rad_info.gfx_level >= GFX10) {
564 radeon_set_uconfig_reg(cs, R_030984_VGT_TF_MEMORY_BASE_HI, S_030984_BASE_HI(tf_va >> 40));
565 } else if (device->physical_device->rad_info.gfx_level == GFX9) {
566 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(tf_va >> 40));
567 }
568
569 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, device->physical_device->hs.hs_offchip_param);
570 } else {
571 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE, S_008988_SIZE(tf_ring_size));
572 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE, tf_va >> 8);
573 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM, device->physical_device->hs.hs_offchip_param);
574 }
575 }
576
577 static VkResult
radv_initialise_task_control_buffer(struct radv_device * device,struct radeon_winsys_bo * task_rings_bo)578 radv_initialise_task_control_buffer(struct radv_device *device, struct radeon_winsys_bo *task_rings_bo)
579 {
580 uint32_t *ptr = (uint32_t *)device->ws->buffer_map(task_rings_bo);
581 if (!ptr)
582 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
583
584 const uint32_t num_entries = device->physical_device->task_info.num_entries;
585 const uint64_t task_va = radv_buffer_get_va(task_rings_bo);
586 const uint64_t task_draw_ring_va = task_va + device->physical_device->task_info.draw_ring_offset;
587 assert((task_draw_ring_va & 0xFFFFFF00) == (task_draw_ring_va & 0xFFFFFFFF));
588
589 /* 64-bit write_ptr */
590 ptr[0] = num_entries;
591 ptr[1] = 0;
592 /* 64-bit read_ptr */
593 ptr[2] = num_entries;
594 ptr[3] = 0;
595 /* 64-bit dealloc_ptr */
596 ptr[4] = num_entries;
597 ptr[5] = 0;
598 /* num_entries */
599 ptr[6] = num_entries;
600 /* 64-bit draw ring address */
601 ptr[7] = task_draw_ring_va;
602 ptr[8] = task_draw_ring_va >> 32;
603
604 device->ws->buffer_unmap(task_rings_bo);
605 return VK_SUCCESS;
606 }
607
608 static void
radv_emit_task_rings(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * task_rings_bo,bool compute)609 radv_emit_task_rings(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *task_rings_bo,
610 bool compute)
611 {
612 if (!task_rings_bo)
613 return;
614
615 const uint64_t task_ctrlbuf_va = radv_buffer_get_va(task_rings_bo);
616 assert(radv_is_aligned(task_ctrlbuf_va, 256));
617 radv_cs_add_buffer(device->ws, cs, task_rings_bo);
618
619 /* Tell the GPU where the task control buffer is. */
620 radeon_emit(cs, PKT3(PKT3_DISPATCH_TASK_STATE_INIT, 1, 0) | PKT3_SHADER_TYPE_S(!!compute));
621 /* bits [31:8]: control buffer address lo, bits[7:0]: reserved (set to zero) */
622 radeon_emit(cs, task_ctrlbuf_va & 0xFFFFFF00);
623 /* bits [31:0]: control buffer address hi */
624 radeon_emit(cs, task_ctrlbuf_va >> 32);
625 }
626
627 static void
radv_emit_graphics_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * scratch_bo)628 radv_emit_graphics_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
629 struct radeon_winsys_bo *scratch_bo)
630 {
631 const struct radeon_info *info = &device->physical_device->rad_info;
632
633 if (!scratch_bo)
634 return;
635
636 radv_cs_add_buffer(device->ws, cs, scratch_bo);
637
638 if (info->gfx_level >= GFX11) {
639 uint64_t va = radv_buffer_get_va(scratch_bo);
640
641 /* WAVES is per SE for SPI_TMPRING_SIZE. */
642 waves /= info->num_se;
643
644 radeon_set_context_reg_seq(cs, R_0286E8_SPI_TMPRING_SIZE, 3);
645 radeon_emit(cs, S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 256)));
646 radeon_emit(cs, va >> 8); /* SPI_GFX_SCRATCH_BASE_LO */
647 radeon_emit(cs, va >> 40); /* SPI_GFX_SCRATCH_BASE_HI */
648 } else {
649 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
650 S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 1024)));
651 }
652 }
653
654 static void
radv_emit_compute_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * compute_scratch_bo)655 radv_emit_compute_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
656 struct radeon_winsys_bo *compute_scratch_bo)
657 {
658 const struct radeon_info *info = &device->physical_device->rad_info;
659 uint64_t scratch_va;
660 uint32_t rsrc1;
661
662 if (!compute_scratch_bo)
663 return;
664
665 scratch_va = radv_buffer_get_va(compute_scratch_bo);
666 rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
667
668 if (info->gfx_level >= GFX11)
669 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
670 else
671 rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
672
673 radv_cs_add_buffer(device->ws, cs, compute_scratch_bo);
674
675 if (info->gfx_level >= GFX11) {
676 radeon_set_sh_reg_seq(cs, R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO, 2);
677 radeon_emit(cs, scratch_va >> 8);
678 radeon_emit(cs, scratch_va >> 40);
679
680 waves /= info->num_se;
681 }
682
683 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
684 radeon_emit(cs, scratch_va);
685 radeon_emit(cs, rsrc1);
686
687 radeon_set_sh_reg(
688 cs, R_00B860_COMPUTE_TMPRING_SIZE,
689 S_00B860_WAVES(waves) | S_00B860_WAVESIZE(DIV_ROUND_UP(size_per_wave, info->gfx_level >= GFX11 ? 256 : 1024)));
690 }
691
692 static void
radv_emit_compute_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)693 radv_emit_compute_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
694 struct radeon_winsys_bo *descriptor_bo)
695 {
696 if (!descriptor_bo)
697 return;
698
699 uint64_t va = radv_buffer_get_va(descriptor_bo);
700 radv_cs_add_buffer(device->ws, cs, descriptor_bo);
701
702 /* Compute shader user data 0-1 have the scratch pointer (unlike GFX shaders),
703 * so emit the descriptor pointer to user data 2-3 instead (task_ring_offsets arg).
704 */
705 radv_emit_shader_pointer(device, cs, R_00B908_COMPUTE_USER_DATA_2, va, true);
706 }
707
708 static void
radv_emit_graphics_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)709 radv_emit_graphics_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
710 struct radeon_winsys_bo *descriptor_bo)
711 {
712 uint64_t va;
713
714 if (!descriptor_bo)
715 return;
716
717 va = radv_buffer_get_va(descriptor_bo);
718
719 radv_cs_add_buffer(device->ws, cs, descriptor_bo);
720
721 if (device->physical_device->rad_info.gfx_level >= GFX11) {
722 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B420_SPI_SHADER_PGM_LO_HS,
723 R_00B220_SPI_SHADER_PGM_LO_GS};
724
725 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
726 radv_emit_shader_pointer(device, cs, regs[i], va, true);
727 }
728 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
729 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
730 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
731
732 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
733 radv_emit_shader_pointer(device, cs, regs[i], va, true);
734 }
735 } else if (device->physical_device->rad_info.gfx_level == GFX9) {
736 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
737 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
738
739 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
740 radv_emit_shader_pointer(device, cs, regs[i], va, true);
741 }
742 } else {
743 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
744 R_00B230_SPI_SHADER_USER_DATA_GS_0, R_00B330_SPI_SHADER_USER_DATA_ES_0,
745 R_00B430_SPI_SHADER_USER_DATA_HS_0, R_00B530_SPI_SHADER_USER_DATA_LS_0};
746
747 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
748 radv_emit_shader_pointer(device, cs, regs[i], va, true);
749 }
750 }
751 }
752
753 static void
radv_emit_attribute_ring(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * attr_ring_bo,uint32_t attr_ring_size)754 radv_emit_attribute_ring(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *attr_ring_bo,
755 uint32_t attr_ring_size)
756 {
757 const struct radv_physical_device *pdevice = device->physical_device;
758 uint64_t va;
759
760 if (!attr_ring_bo)
761 return;
762
763 assert(pdevice->rad_info.gfx_level >= GFX11);
764
765 va = radv_buffer_get_va(attr_ring_bo);
766 assert((va >> 32) == pdevice->rad_info.address32_hi);
767
768 radv_cs_add_buffer(device->ws, cs, attr_ring_bo);
769
770 /* We must wait for idle using an EOP event before changing the attribute ring registers. Use the
771 * bottom-of-pipe EOP event, but increment the PWS counter instead of writing memory.
772 */
773 radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
774 radeon_emit(cs, S_490_EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | S_490_EVENT_INDEX(5) | S_490_PWS_ENABLE(1));
775 radeon_emit(cs, 0); /* DST_SEL, INT_SEL, DATA_SEL */
776 radeon_emit(cs, 0); /* ADDRESS_LO */
777 radeon_emit(cs, 0); /* ADDRESS_HI */
778 radeon_emit(cs, 0); /* DATA_LO */
779 radeon_emit(cs, 0); /* DATA_HI */
780 radeon_emit(cs, 0); /* INT_CTXID */
781
782 /* Wait for the PWS counter. */
783 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
784 radeon_emit(cs, S_580_PWS_STAGE_SEL(V_580_CP_ME) | S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) | S_580_PWS_ENA2(1) |
785 S_580_PWS_COUNT(0));
786 radeon_emit(cs, 0xffffffff); /* GCR_SIZE */
787 radeon_emit(cs, 0x01ffffff); /* GCR_SIZE_HI */
788 radeon_emit(cs, 0); /* GCR_BASE_LO */
789 radeon_emit(cs, 0); /* GCR_BASE_HI */
790 radeon_emit(cs, S_585_PWS_ENA(1));
791 radeon_emit(cs, 0); /* GCR_CNTL */
792
793 /* The PS will read inputs from this address. */
794 radeon_set_uconfig_reg(cs, R_031118_SPI_ATTRIBUTE_RING_BASE, va >> 16);
795 radeon_set_uconfig_reg(cs, R_03111C_SPI_ATTRIBUTE_RING_SIZE,
796 S_03111C_MEM_SIZE(((attr_ring_size / pdevice->rad_info.max_se) >> 16) - 1) |
797 S_03111C_BIG_PAGE(pdevice->rad_info.discardable_allows_big_page) | S_03111C_L1_POLICY(1));
798 }
799
800 static void
radv_init_graphics_state(struct radeon_cmdbuf * cs,struct radv_device * device)801 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_device *device)
802 {
803 if (device->gfx_init) {
804 struct radeon_winsys *ws = device->ws;
805
806 ws->cs_execute_ib(cs, device->gfx_init, 0, device->gfx_init_size_dw & 0xffff, false);
807
808 radv_cs_add_buffer(device->ws, cs, device->gfx_init);
809 } else {
810 radv_emit_graphics(device, cs);
811 }
812 }
813
814 static void
radv_init_compute_state(struct radeon_cmdbuf * cs,struct radv_device * device)815 radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_device *device)
816 {
817 radv_emit_compute(device, cs);
818 }
819
820 static VkResult
radv_update_preamble_cs(struct radv_queue_state * queue,struct radv_device * device,const struct radv_queue_ring_info * needs)821 radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *device,
822 const struct radv_queue_ring_info *needs)
823 {
824 struct radeon_winsys *ws = device->ws;
825 struct radeon_winsys_bo *scratch_bo = queue->scratch_bo;
826 struct radeon_winsys_bo *descriptor_bo = queue->descriptor_bo;
827 struct radeon_winsys_bo *compute_scratch_bo = queue->compute_scratch_bo;
828 struct radeon_winsys_bo *esgs_ring_bo = queue->esgs_ring_bo;
829 struct radeon_winsys_bo *gsvs_ring_bo = queue->gsvs_ring_bo;
830 struct radeon_winsys_bo *tess_rings_bo = queue->tess_rings_bo;
831 struct radeon_winsys_bo *task_rings_bo = queue->task_rings_bo;
832 struct radeon_winsys_bo *mesh_scratch_ring_bo = queue->mesh_scratch_ring_bo;
833 struct radeon_winsys_bo *attr_ring_bo = queue->attr_ring_bo;
834 struct radeon_winsys_bo *gds_bo = queue->gds_bo;
835 struct radeon_winsys_bo *gds_oa_bo = queue->gds_oa_bo;
836 struct radeon_cmdbuf *dest_cs[3] = {0};
837 const uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
838 VkResult result = VK_SUCCESS;
839
840 const bool add_sample_positions = !queue->ring_info.sample_positions && needs->sample_positions;
841 const uint32_t scratch_size = needs->scratch_size_per_wave * needs->scratch_waves;
842 const uint32_t queue_scratch_size = queue->ring_info.scratch_size_per_wave * queue->ring_info.scratch_waves;
843
844 if (scratch_size > queue_scratch_size) {
845 result = ws->buffer_create(ws, scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0,
846 &scratch_bo);
847 if (result != VK_SUCCESS)
848 goto fail;
849 radv_rmv_log_command_buffer_bo_create(device, scratch_bo, 0, 0, scratch_size);
850 }
851
852 const uint32_t compute_scratch_size = needs->compute_scratch_size_per_wave * needs->compute_scratch_waves;
853 const uint32_t compute_queue_scratch_size =
854 queue->ring_info.compute_scratch_size_per_wave * queue->ring_info.compute_scratch_waves;
855 if (compute_scratch_size > compute_queue_scratch_size) {
856 result = ws->buffer_create(ws, compute_scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
857 RADV_BO_PRIORITY_SCRATCH, 0, &compute_scratch_bo);
858 if (result != VK_SUCCESS)
859 goto fail;
860 radv_rmv_log_command_buffer_bo_create(device, compute_scratch_bo, 0, 0, compute_scratch_size);
861 }
862
863 if (needs->esgs_ring_size > queue->ring_info.esgs_ring_size) {
864 result = ws->buffer_create(ws, needs->esgs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
865 RADV_BO_PRIORITY_SCRATCH, 0, &esgs_ring_bo);
866 if (result != VK_SUCCESS)
867 goto fail;
868 radv_rmv_log_command_buffer_bo_create(device, esgs_ring_bo, 0, 0, needs->esgs_ring_size);
869 }
870
871 if (needs->gsvs_ring_size > queue->ring_info.gsvs_ring_size) {
872 result = ws->buffer_create(ws, needs->gsvs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
873 RADV_BO_PRIORITY_SCRATCH, 0, &gsvs_ring_bo);
874 if (result != VK_SUCCESS)
875 goto fail;
876 radv_rmv_log_command_buffer_bo_create(device, gsvs_ring_bo, 0, 0, needs->gsvs_ring_size);
877 }
878
879 if (!queue->ring_info.tess_rings && needs->tess_rings) {
880 uint64_t tess_rings_size =
881 device->physical_device->hs.tess_offchip_ring_offset + device->physical_device->hs.tess_offchip_ring_size;
882 result = ws->buffer_create(ws, tess_rings_size, 256, RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH,
883 0, &tess_rings_bo);
884 if (result != VK_SUCCESS)
885 goto fail;
886 radv_rmv_log_command_buffer_bo_create(device, tess_rings_bo, 0, 0, tess_rings_size);
887 }
888
889 if (!queue->ring_info.task_rings && needs->task_rings) {
890 assert(device->physical_device->rad_info.gfx_level >= GFX10_3);
891
892 /* We write the control buffer from the CPU, so need to grant CPU access to the BO.
893 * The draw ring needs to be zero-initialized otherwise the ready bits will be incorrect.
894 */
895 uint32_t task_rings_bo_flags =
896 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM;
897
898 result = ws->buffer_create(ws, device->physical_device->task_info.bo_size_bytes, 256, RADEON_DOMAIN_VRAM,
899 task_rings_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &task_rings_bo);
900 if (result != VK_SUCCESS)
901 goto fail;
902 radv_rmv_log_command_buffer_bo_create(device, task_rings_bo, 0, 0,
903 device->physical_device->task_info.bo_size_bytes);
904
905 result = radv_initialise_task_control_buffer(device, task_rings_bo);
906 if (result != VK_SUCCESS)
907 goto fail;
908 }
909
910 if (!queue->ring_info.mesh_scratch_ring && needs->mesh_scratch_ring) {
911 assert(device->physical_device->rad_info.gfx_level >= GFX10_3);
912 result = ws->buffer_create(ws, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES, 256,
913 RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &mesh_scratch_ring_bo);
914
915 if (result != VK_SUCCESS)
916 goto fail;
917 radv_rmv_log_command_buffer_bo_create(device, mesh_scratch_ring_bo, 0, 0,
918 RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES);
919 }
920
921 if (needs->attr_ring_size > queue->ring_info.attr_ring_size) {
922 assert(device->physical_device->rad_info.gfx_level >= GFX11);
923 result = ws->buffer_create(ws, needs->attr_ring_size, 2 * 1024 * 1024 /* 2MiB */, RADEON_DOMAIN_VRAM,
924 RADEON_FLAG_32BIT | RADEON_FLAG_DISCARDABLE | ring_bo_flags, RADV_BO_PRIORITY_SCRATCH,
925 0, &attr_ring_bo);
926 if (result != VK_SUCCESS)
927 goto fail;
928 radv_rmv_log_command_buffer_bo_create(device, attr_ring_bo, 0, 0, needs->attr_ring_size);
929 }
930
931 if (!queue->ring_info.gds && needs->gds) {
932 assert(device->physical_device->rad_info.gfx_level >= GFX10);
933
934 /* 4 streamout GDS counters.
935 * We need 256B (64 dw) of GDS, otherwise streamout hangs.
936 */
937 result = ws->buffer_create(ws, 256, 4, RADEON_DOMAIN_GDS, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &gds_bo);
938 if (result != VK_SUCCESS)
939 goto fail;
940
941 /* Add the GDS BO to our global BO list to prevent the kernel to emit a GDS switch and reset
942 * the state when a compute queue is used.
943 */
944 result = device->ws->buffer_make_resident(ws, gds_bo, true);
945 if (result != VK_SUCCESS)
946 goto fail;
947 }
948
949 if (!queue->ring_info.gds_oa && needs->gds_oa) {
950 assert(device->physical_device->rad_info.gfx_level >= GFX10);
951
952 result = ws->buffer_create(ws, 1, 1, RADEON_DOMAIN_OA, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &gds_oa_bo);
953 if (result != VK_SUCCESS)
954 goto fail;
955
956 /* Add the GDS OA BO to our global BO list to prevent the kernel to emit a GDS switch and
957 * reset the state when a compute queue is used.
958 */
959 result = device->ws->buffer_make_resident(ws, gds_oa_bo, true);
960 if (result != VK_SUCCESS)
961 goto fail;
962 }
963
964 /* Re-initialize the descriptor BO when any ring BOs changed.
965 *
966 * Additionally, make sure to create the descriptor BO for the compute queue
967 * when it uses the task shader rings. The task rings BO is shared between the
968 * GFX and compute queues and already initialized here.
969 */
970 if ((queue->qf == RADV_QUEUE_COMPUTE && !descriptor_bo && task_rings_bo) || scratch_bo != queue->scratch_bo ||
971 esgs_ring_bo != queue->esgs_ring_bo || gsvs_ring_bo != queue->gsvs_ring_bo ||
972 tess_rings_bo != queue->tess_rings_bo || task_rings_bo != queue->task_rings_bo ||
973 mesh_scratch_ring_bo != queue->mesh_scratch_ring_bo || attr_ring_bo != queue->attr_ring_bo ||
974 add_sample_positions) {
975 const uint32_t size = 304;
976
977 result = ws->buffer_create(ws, size, 4096, RADEON_DOMAIN_VRAM,
978 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
979 RADV_BO_PRIORITY_DESCRIPTOR, 0, &descriptor_bo);
980 if (result != VK_SUCCESS)
981 goto fail;
982 }
983
984 if (descriptor_bo != queue->descriptor_bo) {
985 uint32_t *map = (uint32_t *)ws->buffer_map(descriptor_bo);
986 if (!map) {
987 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
988 goto fail;
989 }
990
991 radv_fill_shader_rings(device, map, scratch_bo, needs->esgs_ring_size, esgs_ring_bo, needs->gsvs_ring_size,
992 gsvs_ring_bo, tess_rings_bo, task_rings_bo, mesh_scratch_ring_bo, needs->attr_ring_size,
993 attr_ring_bo);
994
995 ws->buffer_unmap(descriptor_bo);
996 }
997
998 for (int i = 0; i < 3; ++i) {
999 enum rgp_flush_bits sqtt_flush_bits = 0;
1000 struct radeon_cmdbuf *cs = NULL;
1001 cs = ws->cs_create(ws, radv_queue_family_to_ring(device->physical_device, queue->qf), false);
1002 if (!cs) {
1003 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1004 goto fail;
1005 }
1006
1007 radeon_check_space(ws, cs, 512);
1008 dest_cs[i] = cs;
1009
1010 if (scratch_bo)
1011 radv_cs_add_buffer(ws, cs, scratch_bo);
1012
1013 /* Emit initial configuration. */
1014 switch (queue->qf) {
1015 case RADV_QUEUE_GENERAL:
1016 if (queue->uses_shadow_regs)
1017 radv_emit_shadow_regs_preamble(cs, device, queue);
1018 radv_init_graphics_state(cs, device);
1019
1020 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo || task_rings_bo) {
1021 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1022 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1023
1024 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1025 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1026 }
1027
1028 radv_emit_gs_ring_sizes(device, cs, esgs_ring_bo, needs->esgs_ring_size, gsvs_ring_bo, needs->gsvs_ring_size);
1029 radv_emit_tess_factor_ring(device, cs, tess_rings_bo);
1030 radv_emit_task_rings(device, cs, task_rings_bo, false);
1031 radv_emit_attribute_ring(device, cs, attr_ring_bo, needs->attr_ring_size);
1032 radv_emit_graphics_shader_pointers(device, cs, descriptor_bo);
1033 radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1034 compute_scratch_bo);
1035 radv_emit_graphics_scratch(device, cs, needs->scratch_size_per_wave, needs->scratch_waves, scratch_bo);
1036 break;
1037 case RADV_QUEUE_COMPUTE:
1038 radv_init_compute_state(cs, device);
1039
1040 if (task_rings_bo) {
1041 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1042 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1043 }
1044
1045 radv_emit_task_rings(device, cs, task_rings_bo, true);
1046 radv_emit_compute_shader_pointers(device, cs, descriptor_bo);
1047 radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1048 compute_scratch_bo);
1049 break;
1050 default:
1051 break;
1052 }
1053
1054 if (i < 2) {
1055 /* The two initial preambles have a cache flush at the beginning. */
1056 const enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
1057 enum radv_cmd_flush_bits flush_bits = RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE |
1058 RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_L2 |
1059 RADV_CMD_FLAG_START_PIPELINE_STATS;
1060
1061 if (i == 0) {
1062 /* The full flush preamble should also wait for previous shader work to finish. */
1063 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1064 if (queue->qf == RADV_QUEUE_GENERAL)
1065 flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1066 }
1067
1068 radv_cs_emit_cache_flush(ws, cs, gfx_level, NULL, 0, queue->qf, flush_bits, &sqtt_flush_bits, 0);
1069 }
1070
1071 result = ws->cs_finalize(cs);
1072 if (result != VK_SUCCESS)
1073 goto fail;
1074 }
1075
1076 if (queue->initial_full_flush_preamble_cs)
1077 ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1078
1079 if (queue->initial_preamble_cs)
1080 ws->cs_destroy(queue->initial_preamble_cs);
1081
1082 if (queue->continue_preamble_cs)
1083 ws->cs_destroy(queue->continue_preamble_cs);
1084
1085 queue->initial_full_flush_preamble_cs = dest_cs[0];
1086 queue->initial_preamble_cs = dest_cs[1];
1087 queue->continue_preamble_cs = dest_cs[2];
1088
1089 if (scratch_bo != queue->scratch_bo) {
1090 if (queue->scratch_bo) {
1091 ws->buffer_destroy(ws, queue->scratch_bo);
1092 radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
1093 }
1094 queue->scratch_bo = scratch_bo;
1095 }
1096
1097 if (compute_scratch_bo != queue->compute_scratch_bo) {
1098 if (queue->compute_scratch_bo) {
1099 ws->buffer_destroy(ws, queue->compute_scratch_bo);
1100 radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
1101 }
1102 queue->compute_scratch_bo = compute_scratch_bo;
1103 }
1104
1105 if (esgs_ring_bo != queue->esgs_ring_bo) {
1106 if (queue->esgs_ring_bo) {
1107 ws->buffer_destroy(ws, queue->esgs_ring_bo);
1108 radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
1109 }
1110 queue->esgs_ring_bo = esgs_ring_bo;
1111 }
1112
1113 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1114 if (queue->gsvs_ring_bo) {
1115 ws->buffer_destroy(ws, queue->gsvs_ring_bo);
1116 radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
1117 }
1118 queue->gsvs_ring_bo = gsvs_ring_bo;
1119 }
1120
1121 if (descriptor_bo != queue->descriptor_bo) {
1122 if (queue->descriptor_bo)
1123 ws->buffer_destroy(ws, queue->descriptor_bo);
1124 queue->descriptor_bo = descriptor_bo;
1125 }
1126
1127 queue->tess_rings_bo = tess_rings_bo;
1128 queue->task_rings_bo = task_rings_bo;
1129 queue->mesh_scratch_ring_bo = mesh_scratch_ring_bo;
1130 queue->attr_ring_bo = attr_ring_bo;
1131 queue->gds_bo = gds_bo;
1132 queue->gds_oa_bo = gds_oa_bo;
1133 queue->ring_info = *needs;
1134 return VK_SUCCESS;
1135 fail:
1136 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1137 if (dest_cs[i])
1138 ws->cs_destroy(dest_cs[i]);
1139 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1140 ws->buffer_destroy(ws, descriptor_bo);
1141 if (scratch_bo && scratch_bo != queue->scratch_bo)
1142 ws->buffer_destroy(ws, scratch_bo);
1143 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1144 ws->buffer_destroy(ws, compute_scratch_bo);
1145 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1146 ws->buffer_destroy(ws, esgs_ring_bo);
1147 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1148 ws->buffer_destroy(ws, gsvs_ring_bo);
1149 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
1150 ws->buffer_destroy(ws, tess_rings_bo);
1151 if (task_rings_bo && task_rings_bo != queue->task_rings_bo)
1152 ws->buffer_destroy(ws, task_rings_bo);
1153 if (attr_ring_bo && attr_ring_bo != queue->attr_ring_bo)
1154 ws->buffer_destroy(ws, attr_ring_bo);
1155 if (gds_bo && gds_bo != queue->gds_bo) {
1156 ws->buffer_make_resident(ws, queue->gds_bo, false);
1157 ws->buffer_destroy(ws, gds_bo);
1158 }
1159 if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo) {
1160 ws->buffer_make_resident(ws, queue->gds_oa_bo, false);
1161 ws->buffer_destroy(ws, gds_oa_bo);
1162 }
1163
1164 return vk_error(queue, result);
1165 }
1166
1167 static VkResult
radv_update_preambles(struct radv_queue_state * queue,struct radv_device * device,struct vk_command_buffer * const * cmd_buffers,uint32_t cmd_buffer_count,bool * use_perf_counters,bool * has_follower)1168 radv_update_preambles(struct radv_queue_state *queue, struct radv_device *device,
1169 struct vk_command_buffer *const *cmd_buffers, uint32_t cmd_buffer_count, bool *use_perf_counters,
1170 bool *has_follower)
1171 {
1172 if (queue->qf != RADV_QUEUE_GENERAL && queue->qf != RADV_QUEUE_COMPUTE) {
1173 for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1174 struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1175
1176 *has_follower |= !!cmd_buffer->gang.cs;
1177 }
1178
1179 return VK_SUCCESS;
1180 }
1181
1182 /* Figure out the needs of the current submission.
1183 * Start by copying the queue's current info.
1184 * This is done because we only allow two possible behaviours for these buffers:
1185 * - Grow when the newly needed amount is larger than what we had
1186 * - Allocate the max size and reuse it, but don't free it until the queue is destroyed
1187 */
1188 struct radv_queue_ring_info needs = queue->ring_info;
1189 *use_perf_counters = false;
1190 *has_follower = false;
1191
1192 for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1193 struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1194
1195 needs.scratch_size_per_wave = MAX2(needs.scratch_size_per_wave, cmd_buffer->scratch_size_per_wave_needed);
1196 needs.scratch_waves = MAX2(needs.scratch_waves, cmd_buffer->scratch_waves_wanted);
1197 needs.compute_scratch_size_per_wave =
1198 MAX2(needs.compute_scratch_size_per_wave, cmd_buffer->compute_scratch_size_per_wave_needed);
1199 needs.compute_scratch_waves = MAX2(needs.compute_scratch_waves, cmd_buffer->compute_scratch_waves_wanted);
1200 needs.esgs_ring_size = MAX2(needs.esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1201 needs.gsvs_ring_size = MAX2(needs.gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1202 needs.tess_rings |= cmd_buffer->tess_rings_needed;
1203 needs.task_rings |= cmd_buffer->task_rings_needed;
1204 needs.mesh_scratch_ring |= cmd_buffer->mesh_scratch_ring_needed;
1205 needs.gds |= cmd_buffer->gds_needed;
1206 needs.gds_oa |= cmd_buffer->gds_oa_needed;
1207 needs.sample_positions |= cmd_buffer->sample_positions_needed;
1208 *use_perf_counters |= cmd_buffer->state.uses_perf_counters;
1209 *has_follower |= !!cmd_buffer->gang.cs;
1210 }
1211
1212 /* Sanitize scratch size information. */
1213 needs.scratch_waves =
1214 needs.scratch_size_per_wave ? MIN2(needs.scratch_waves, UINT32_MAX / needs.scratch_size_per_wave) : 0;
1215 needs.compute_scratch_waves =
1216 needs.compute_scratch_size_per_wave
1217 ? MIN2(needs.compute_scratch_waves, UINT32_MAX / needs.compute_scratch_size_per_wave)
1218 : 0;
1219
1220 if (device->physical_device->rad_info.gfx_level >= GFX11 && queue->qf == RADV_QUEUE_GENERAL) {
1221 needs.attr_ring_size =
1222 device->physical_device->rad_info.attribute_ring_size_per_se * device->physical_device->rad_info.max_se;
1223 }
1224
1225 /* Return early if we already match these needs.
1226 * Note that it's not possible for any of the needed values to be less
1227 * than what the queue already had, because we only ever increase the allocated size.
1228 */
1229 if (queue->initial_full_flush_preamble_cs && queue->ring_info.scratch_size_per_wave == needs.scratch_size_per_wave &&
1230 queue->ring_info.scratch_waves == needs.scratch_waves &&
1231 queue->ring_info.compute_scratch_size_per_wave == needs.compute_scratch_size_per_wave &&
1232 queue->ring_info.compute_scratch_waves == needs.compute_scratch_waves &&
1233 queue->ring_info.esgs_ring_size == needs.esgs_ring_size &&
1234 queue->ring_info.gsvs_ring_size == needs.gsvs_ring_size && queue->ring_info.tess_rings == needs.tess_rings &&
1235 queue->ring_info.task_rings == needs.task_rings &&
1236 queue->ring_info.mesh_scratch_ring == needs.mesh_scratch_ring &&
1237 queue->ring_info.attr_ring_size == needs.attr_ring_size && queue->ring_info.gds == needs.gds &&
1238 queue->ring_info.gds_oa == needs.gds_oa && queue->ring_info.sample_positions == needs.sample_positions)
1239 return VK_SUCCESS;
1240
1241 return radv_update_preamble_cs(queue, device, &needs);
1242 }
1243
1244 static VkResult
radv_create_gang_wait_preambles_postambles(struct radv_queue * queue)1245 radv_create_gang_wait_preambles_postambles(struct radv_queue *queue)
1246 {
1247 if (queue->gang_sem_bo)
1248 return VK_SUCCESS;
1249
1250 VkResult r = VK_SUCCESS;
1251 struct radv_device *device = queue->device;
1252 struct radeon_winsys *ws = device->ws;
1253 const enum amd_ip_type leader_ip = radv_queue_family_to_ring(device->physical_device, queue->state.qf);
1254 struct radeon_winsys_bo *gang_sem_bo = NULL;
1255
1256 /* Gang semaphores BO.
1257 * DWORD 0: used in preambles, gang leader writes, gang members wait.
1258 * DWORD 1: used in postambles, gang leader waits, gang members write.
1259 */
1260 r = ws->buffer_create(ws, 8, 4, RADEON_DOMAIN_VRAM, RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
1261 RADV_BO_PRIORITY_SCRATCH, 0, &gang_sem_bo);
1262 if (r != VK_SUCCESS)
1263 return r;
1264
1265 struct radeon_cmdbuf *leader_pre_cs = ws->cs_create(ws, leader_ip, false);
1266 struct radeon_cmdbuf *leader_post_cs = ws->cs_create(ws, leader_ip, false);
1267 struct radeon_cmdbuf *ace_pre_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1268 struct radeon_cmdbuf *ace_post_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1269
1270 if (!leader_pre_cs || !leader_post_cs || !ace_pre_cs || !ace_post_cs) {
1271 r = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1272 goto fail;
1273 }
1274
1275 radeon_check_space(ws, leader_pre_cs, 256);
1276 radeon_check_space(ws, leader_post_cs, 256);
1277 radeon_check_space(ws, ace_pre_cs, 256);
1278 radeon_check_space(ws, ace_post_cs, 256);
1279
1280 radv_cs_add_buffer(ws, leader_pre_cs, gang_sem_bo);
1281 radv_cs_add_buffer(ws, leader_post_cs, gang_sem_bo);
1282 radv_cs_add_buffer(ws, ace_pre_cs, gang_sem_bo);
1283 radv_cs_add_buffer(ws, ace_post_cs, gang_sem_bo);
1284
1285 const uint64_t ace_wait_va = radv_buffer_get_va(gang_sem_bo);
1286 const uint64_t leader_wait_va = ace_wait_va + 4;
1287 const uint32_t zero = 0;
1288 const uint32_t one = 1;
1289
1290 /* Preambles for gang submission.
1291 * Make gang members wait until the gang leader starts.
1292 * Userspace is required to emit this wait to make sure it behaves correctly
1293 * in a multi-process environment, because task shader dispatches are not
1294 * meant to be executed on multiple compute engines at the same time.
1295 */
1296 radv_cp_wait_mem(ace_pre_cs, RADV_QUEUE_COMPUTE, WAIT_REG_MEM_GREATER_OR_EQUAL, ace_wait_va, 1, 0xffffffff);
1297 radv_cs_write_data(device, ace_pre_cs, RADV_QUEUE_COMPUTE, V_370_ME, ace_wait_va, 1, &zero, false);
1298 radv_cs_write_data(device, leader_pre_cs, queue->state.qf, V_370_ME, ace_wait_va, 1, &one, false);
1299
1300 /* Create postambles for gang submission.
1301 * This ensures that the gang leader waits for the whole gang,
1302 * which is necessary because the kernel signals the userspace fence
1303 * as soon as the gang leader is done, which may lead to bugs because the
1304 * same command buffers could be submitted again while still being executed.
1305 */
1306 radv_cp_wait_mem(leader_post_cs, queue->state.qf, WAIT_REG_MEM_GREATER_OR_EQUAL, leader_wait_va, 1, 0xffffffff);
1307 radv_cs_write_data(device, leader_post_cs, queue->state.qf, V_370_ME, leader_wait_va, 1, &zero, false);
1308 radv_cs_emit_write_event_eop(ace_post_cs, device->physical_device->rad_info.gfx_level, RADV_QUEUE_COMPUTE,
1309 V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT,
1310 leader_wait_va, 1, 0);
1311
1312 r = ws->cs_finalize(leader_pre_cs);
1313 if (r != VK_SUCCESS)
1314 goto fail;
1315 r = ws->cs_finalize(leader_post_cs);
1316 if (r != VK_SUCCESS)
1317 goto fail;
1318 r = ws->cs_finalize(ace_pre_cs);
1319 if (r != VK_SUCCESS)
1320 goto fail;
1321 r = ws->cs_finalize(ace_post_cs);
1322 if (r != VK_SUCCESS)
1323 goto fail;
1324
1325 queue->gang_sem_bo = gang_sem_bo;
1326 queue->state.gang_wait_preamble_cs = leader_pre_cs;
1327 queue->state.gang_wait_postamble_cs = leader_post_cs;
1328 queue->follower_state->gang_wait_preamble_cs = ace_pre_cs;
1329 queue->follower_state->gang_wait_postamble_cs = ace_post_cs;
1330
1331 return VK_SUCCESS;
1332
1333 fail:
1334 if (leader_pre_cs)
1335 ws->cs_destroy(leader_pre_cs);
1336 if (leader_post_cs)
1337 ws->cs_destroy(leader_post_cs);
1338 if (ace_pre_cs)
1339 ws->cs_destroy(ace_pre_cs);
1340 if (ace_post_cs)
1341 ws->cs_destroy(ace_post_cs);
1342 if (gang_sem_bo)
1343 ws->buffer_destroy(ws, gang_sem_bo);
1344
1345 return r;
1346 }
1347
1348 static bool
radv_queue_init_follower_state(struct radv_queue * queue)1349 radv_queue_init_follower_state(struct radv_queue *queue)
1350 {
1351 if (queue->follower_state)
1352 return true;
1353
1354 queue->follower_state = calloc(1, sizeof(struct radv_queue_state));
1355 if (!queue->follower_state)
1356 return false;
1357
1358 queue->follower_state->qf = RADV_QUEUE_COMPUTE;
1359 return true;
1360 }
1361
1362 static VkResult
radv_update_gang_preambles(struct radv_queue * queue)1363 radv_update_gang_preambles(struct radv_queue *queue)
1364 {
1365 if (!radv_queue_init_follower_state(queue))
1366 return VK_ERROR_OUT_OF_HOST_MEMORY;
1367
1368 VkResult r = VK_SUCCESS;
1369
1370 /* Copy task rings state.
1371 * Task shaders that are submitted on the ACE queue need to share
1372 * their ring buffers with the mesh shaders on the GFX queue.
1373 */
1374 queue->follower_state->ring_info.task_rings = queue->state.ring_info.task_rings;
1375 queue->follower_state->task_rings_bo = queue->state.task_rings_bo;
1376
1377 /* Copy some needed states from the parent queue state.
1378 * These can only increase so it's okay to copy them as-is without checking.
1379 * Note, task shaders use the scratch size from their graphics pipeline.
1380 */
1381 struct radv_queue_ring_info needs = queue->follower_state->ring_info;
1382 needs.compute_scratch_size_per_wave = queue->state.ring_info.scratch_size_per_wave;
1383 needs.compute_scratch_waves = queue->state.ring_info.scratch_waves;
1384 needs.task_rings = queue->state.ring_info.task_rings;
1385
1386 r = radv_update_preamble_cs(queue->follower_state, queue->device, &needs);
1387 if (r != VK_SUCCESS)
1388 return r;
1389
1390 r = radv_create_gang_wait_preambles_postambles(queue);
1391 if (r != VK_SUCCESS)
1392 return r;
1393
1394 return VK_SUCCESS;
1395 }
1396
1397 static struct radeon_cmdbuf *
radv_create_perf_counter_lock_cs(struct radv_device * device,unsigned pass,bool unlock)1398 radv_create_perf_counter_lock_cs(struct radv_device *device, unsigned pass, bool unlock)
1399 {
1400 struct radeon_cmdbuf **cs_ref = &device->perf_counter_lock_cs[pass * 2 + (unlock ? 1 : 0)];
1401 struct radeon_cmdbuf *cs;
1402
1403 if (*cs_ref)
1404 return *cs_ref;
1405
1406 cs = device->ws->cs_create(device->ws, AMD_IP_GFX, false);
1407 if (!cs)
1408 return NULL;
1409
1410 ASSERTED unsigned cdw = radeon_check_space(device->ws, cs, 21);
1411
1412 radv_cs_add_buffer(device->ws, cs, device->perf_counter_bo);
1413
1414 if (!unlock) {
1415 uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1416 radeon_emit(cs, PKT3(PKT3_ATOMIC_MEM, 7, 0));
1417 radeon_emit(cs, ATOMIC_OP(TC_OP_ATOMIC_CMPSWAP_32) | ATOMIC_COMMAND(ATOMIC_COMMAND_LOOP));
1418 radeon_emit(cs, mutex_va); /* addr lo */
1419 radeon_emit(cs, mutex_va >> 32); /* addr hi */
1420 radeon_emit(cs, 1); /* data lo */
1421 radeon_emit(cs, 0); /* data hi */
1422 radeon_emit(cs, 0); /* compare data lo */
1423 radeon_emit(cs, 0); /* compare data hi */
1424 radeon_emit(cs, 10); /* loop interval */
1425 }
1426
1427 uint64_t va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_PASS_OFFSET;
1428 uint64_t unset_va = va + (unlock ? 8 * pass : 0);
1429 uint64_t set_va = va + (unlock ? 0 : 8 * pass);
1430
1431 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1432 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1433 COPY_DATA_WR_CONFIRM);
1434 radeon_emit(cs, 0); /* immediate */
1435 radeon_emit(cs, 0);
1436 radeon_emit(cs, unset_va);
1437 radeon_emit(cs, unset_va >> 32);
1438
1439 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1440 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1441 COPY_DATA_WR_CONFIRM);
1442 radeon_emit(cs, 1); /* immediate */
1443 radeon_emit(cs, 0);
1444 radeon_emit(cs, set_va);
1445 radeon_emit(cs, set_va >> 32);
1446
1447 if (unlock) {
1448 uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1449
1450 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1451 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1452 COPY_DATA_WR_CONFIRM);
1453 radeon_emit(cs, 0); /* immediate */
1454 radeon_emit(cs, 0);
1455 radeon_emit(cs, mutex_va);
1456 radeon_emit(cs, mutex_va >> 32);
1457 }
1458
1459 assert(cs->cdw <= cdw);
1460
1461 VkResult result = device->ws->cs_finalize(cs);
1462 if (result != VK_SUCCESS) {
1463 device->ws->cs_destroy(cs);
1464 return NULL;
1465 }
1466
1467 /* All the casts are to avoid MSVC errors around pointer truncation in a non-taken
1468 * alternative.
1469 */
1470 if (p_atomic_cmpxchg((uintptr_t *)cs_ref, 0, (uintptr_t)cs) != 0) {
1471 device->ws->cs_destroy(cs);
1472 }
1473
1474 return *cs_ref;
1475 }
1476
1477 static void
radv_get_shader_upload_sync_wait(struct radv_device * device,uint64_t shader_upload_seq,struct vk_sync_wait * out_sync_wait)1478 radv_get_shader_upload_sync_wait(struct radv_device *device, uint64_t shader_upload_seq,
1479 struct vk_sync_wait *out_sync_wait)
1480 {
1481 struct vk_semaphore *semaphore = vk_semaphore_from_handle(device->shader_upload_sem);
1482 struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
1483 *out_sync_wait = (struct vk_sync_wait){
1484 .sync = sync,
1485 .wait_value = shader_upload_seq,
1486 .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
1487 };
1488 }
1489
1490 static VkResult
radv_queue_submit_normal(struct radv_queue * queue,struct vk_queue_submit * submission)1491 radv_queue_submit_normal(struct radv_queue *queue, struct vk_queue_submit *submission)
1492 {
1493 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1494 bool use_ace = false;
1495 bool use_perf_counters = false;
1496 VkResult result;
1497 uint64_t shader_upload_seq = 0;
1498 uint32_t wait_count = submission->wait_count;
1499 struct vk_sync_wait *waits = submission->waits;
1500
1501 result = radv_update_preambles(&queue->state, queue->device, submission->command_buffers,
1502 submission->command_buffer_count, &use_perf_counters, &use_ace);
1503 if (result != VK_SUCCESS)
1504 return result;
1505
1506 if (use_ace) {
1507 result = radv_update_gang_preambles(queue);
1508 if (result != VK_SUCCESS)
1509 return result;
1510 }
1511
1512 const unsigned cmd_buffer_count = submission->command_buffer_count;
1513 const unsigned max_cs_submission = radv_device_fault_detection_enabled(queue->device) ? 1 : cmd_buffer_count;
1514 const unsigned cs_array_size = (use_ace ? 2 : 1) * MIN2(max_cs_submission, cmd_buffer_count);
1515
1516 struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) * cs_array_size);
1517 if (!cs_array)
1518 return VK_ERROR_OUT_OF_HOST_MEMORY;
1519
1520 if (radv_device_fault_detection_enabled(queue->device))
1521 simple_mtx_lock(&queue->device->trace_mtx);
1522
1523 for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
1524 struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j];
1525 shader_upload_seq = MAX2(shader_upload_seq, cmd_buffer->shader_upload_seq);
1526 }
1527
1528 if (shader_upload_seq > queue->last_shader_upload_seq) {
1529 /* Patch the wait array to add waiting for referenced shaders to upload. */
1530 struct vk_sync_wait *new_waits = malloc(sizeof(struct vk_sync_wait) * (wait_count + 1));
1531 if (!new_waits) {
1532 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1533 goto fail;
1534 }
1535
1536 memcpy(new_waits, submission->waits, sizeof(struct vk_sync_wait) * submission->wait_count);
1537 radv_get_shader_upload_sync_wait(queue->device, shader_upload_seq, &new_waits[submission->wait_count]);
1538
1539 waits = new_waits;
1540 wait_count += 1;
1541 }
1542
1543 /* For fences on the same queue/vm amdgpu doesn't wait till all processing is finished
1544 * before starting the next cmdbuffer, so we need to do it here.
1545 */
1546 const bool need_wait = wait_count > 0;
1547 unsigned num_initial_preambles = 0;
1548 unsigned num_continue_preambles = 0;
1549 unsigned num_postambles = 0;
1550 struct radeon_cmdbuf *initial_preambles[5] = {0};
1551 struct radeon_cmdbuf *continue_preambles[5] = {0};
1552 struct radeon_cmdbuf *postambles[3] = {0};
1553
1554 if (queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE) {
1555 initial_preambles[num_initial_preambles++] =
1556 need_wait ? queue->state.initial_full_flush_preamble_cs : queue->state.initial_preamble_cs;
1557
1558 continue_preambles[num_continue_preambles++] = queue->state.continue_preamble_cs;
1559
1560 if (use_perf_counters) {
1561 /* RADV only supports perf counters on the GFX queue currently. */
1562 assert(queue->state.qf == RADV_QUEUE_GENERAL);
1563
1564 /* Create the lock/unlock CS. */
1565 struct radeon_cmdbuf *perf_ctr_lock_cs =
1566 radv_create_perf_counter_lock_cs(queue->device, submission->perf_pass_index, false);
1567 struct radeon_cmdbuf *perf_ctr_unlock_cs =
1568 radv_create_perf_counter_lock_cs(queue->device, submission->perf_pass_index, true);
1569
1570 if (!perf_ctr_lock_cs || !perf_ctr_unlock_cs) {
1571 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1572 goto fail;
1573 }
1574
1575 initial_preambles[num_initial_preambles++] = perf_ctr_lock_cs;
1576 continue_preambles[num_continue_preambles++] = perf_ctr_lock_cs;
1577 postambles[num_postambles++] = perf_ctr_unlock_cs;
1578 }
1579 }
1580
1581 const unsigned num_1q_initial_preambles = num_initial_preambles;
1582 const unsigned num_1q_continue_preambles = num_continue_preambles;
1583 const unsigned num_1q_postambles = num_postambles;
1584
1585 if (use_ace) {
1586 initial_preambles[num_initial_preambles++] = queue->state.gang_wait_preamble_cs;
1587 initial_preambles[num_initial_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1588 initial_preambles[num_initial_preambles++] =
1589 need_wait ? queue->follower_state->initial_full_flush_preamble_cs : queue->follower_state->initial_preamble_cs;
1590
1591 continue_preambles[num_continue_preambles++] = queue->state.gang_wait_preamble_cs;
1592 continue_preambles[num_continue_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1593 continue_preambles[num_continue_preambles++] = queue->follower_state->continue_preamble_cs;
1594
1595 postambles[num_postambles++] = queue->follower_state->gang_wait_postamble_cs;
1596 postambles[num_postambles++] = queue->state.gang_wait_postamble_cs;
1597 }
1598
1599 struct radv_winsys_submit_info submit = {
1600 .ip_type = radv_queue_ring(queue),
1601 .queue_index = queue->vk.index_in_family,
1602 .cs_array = cs_array,
1603 .cs_count = 0,
1604 .initial_preamble_count = num_1q_initial_preambles,
1605 .continue_preamble_count = num_1q_continue_preambles,
1606 .postamble_count = num_1q_postambles,
1607 .initial_preamble_cs = initial_preambles,
1608 .continue_preamble_cs = continue_preambles,
1609 .postamble_cs = postambles,
1610 .uses_shadow_regs = queue->state.uses_shadow_regs,
1611 };
1612
1613 for (uint32_t j = 0, advance; j < cmd_buffer_count; j += advance) {
1614 advance = MIN2(max_cs_submission, cmd_buffer_count - j);
1615 const bool last_submit = j + advance == cmd_buffer_count;
1616 bool submit_ace = false;
1617 unsigned num_submitted_cs = 0;
1618
1619 if (radv_device_fault_detection_enabled(queue->device))
1620 *queue->device->trace_id_ptr = 0;
1621
1622 struct radeon_cmdbuf *chainable = NULL;
1623 struct radeon_cmdbuf *chainable_ace = NULL;
1624
1625 /* Add CS from submitted command buffers. */
1626 for (unsigned c = 0; c < advance; ++c) {
1627 struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j + c];
1628 assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1629 const bool can_chain_next = !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
1630
1631 /* Follower needs to be before the gang leader because the last CS must match the queue's IP type. */
1632 if (cmd_buffer->gang.cs) {
1633 queue->device->ws->cs_unchain(cmd_buffer->gang.cs);
1634 if (!chainable_ace || !queue->device->ws->cs_chain(chainable_ace, cmd_buffer->gang.cs, false)) {
1635 cs_array[num_submitted_cs++] = cmd_buffer->gang.cs;
1636
1637 /* Prevent chaining the gang leader when the follower couldn't be chained.
1638 * Otherwise, they would be in the wrong order.
1639 */
1640 chainable = NULL;
1641 }
1642
1643 chainable_ace = can_chain_next ? cmd_buffer->gang.cs : NULL;
1644 submit_ace = true;
1645 }
1646
1647 queue->device->ws->cs_unchain(cmd_buffer->cs);
1648 if (!chainable || !queue->device->ws->cs_chain(chainable, cmd_buffer->cs, queue->state.uses_shadow_regs)) {
1649 /* don't submit empty command buffers to the kernel. */
1650 if ((radv_queue_ring(queue) != AMD_IP_VCN_ENC && radv_queue_ring(queue) != AMD_IP_UVD) ||
1651 cmd_buffer->cs->cdw != 0)
1652 cs_array[num_submitted_cs++] = cmd_buffer->cs;
1653 }
1654
1655 chainable = can_chain_next ? cmd_buffer->cs : NULL;
1656 }
1657
1658 submit.cs_count = num_submitted_cs;
1659 submit.initial_preamble_count = submit_ace ? num_initial_preambles : num_1q_initial_preambles;
1660 submit.continue_preamble_count = submit_ace ? num_continue_preambles : num_1q_continue_preambles;
1661 submit.postamble_count = submit_ace ? num_postambles : num_1q_postambles;
1662
1663 result = queue->device->ws->cs_submit(ctx, &submit, j == 0 ? wait_count : 0, waits,
1664 last_submit ? submission->signal_count : 0, submission->signals);
1665
1666 if (result != VK_SUCCESS)
1667 goto fail;
1668
1669 if (radv_device_fault_detection_enabled(queue->device)) {
1670 radv_check_gpu_hangs(queue, &submit);
1671 }
1672
1673 if (queue->device->tma_bo) {
1674 radv_check_trap_handler(queue);
1675 }
1676
1677 initial_preambles[0] = queue->state.initial_preamble_cs;
1678 initial_preambles[1] = !use_ace ? NULL : queue->follower_state->initial_preamble_cs;
1679 }
1680
1681 queue->last_shader_upload_seq = MAX2(queue->last_shader_upload_seq, shader_upload_seq);
1682
1683 radv_dump_printf_data(queue->device);
1684
1685 fail:
1686 free(cs_array);
1687 if (waits != submission->waits)
1688 free(waits);
1689 if (radv_device_fault_detection_enabled(queue->device))
1690 simple_mtx_unlock(&queue->device->trace_mtx);
1691
1692 return result;
1693 }
1694
1695 static void
radv_report_gpuvm_fault(struct radv_device * device)1696 radv_report_gpuvm_fault(struct radv_device *device)
1697 {
1698 struct radv_winsys_gpuvm_fault_info fault_info = {0};
1699
1700 if (!radv_vm_fault_occurred(device, &fault_info))
1701 return;
1702
1703 fprintf(stderr, "radv: GPUVM fault detected at address 0x%08" PRIx64 ".\n", fault_info.addr);
1704 ac_print_gpuvm_fault_status(stderr, device->physical_device->rad_info.gfx_level, fault_info.status);
1705 }
1706
1707 static VkResult
radv_queue_sparse_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1708 radv_queue_sparse_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1709 {
1710 struct radv_queue *queue = (struct radv_queue *)vqueue;
1711 struct radv_device *device = queue->device;
1712 VkResult result;
1713
1714 result = radv_queue_submit_bind_sparse_memory(device, submission);
1715 if (result != VK_SUCCESS)
1716 goto fail;
1717
1718 /* We do a CPU wait here, in part to avoid more winsys mechanisms. In the likely kernel explicit
1719 * sync mechanism, we'd need to do a CPU wait anyway. Haven't seen this be a perf issue yet, but
1720 * we have to make sure the queue always has its submission thread enabled. */
1721 result = vk_sync_wait_many(&device->vk, submission->wait_count, submission->waits, 0, UINT64_MAX);
1722 if (result != VK_SUCCESS)
1723 goto fail;
1724
1725 /* Ignore all the commandbuffers. They're necessarily empty anyway. */
1726
1727 for (unsigned i = 0; i < submission->signal_count; ++i) {
1728 result = vk_sync_signal(&device->vk, submission->signals[i].sync, submission->signals[i].signal_value);
1729 if (result != VK_SUCCESS)
1730 goto fail;
1731 }
1732
1733 fail:
1734 if (result != VK_SUCCESS) {
1735 /* When something bad happened during the submission, such as
1736 * an out of memory issue, it might be hard to recover from
1737 * this inconsistent state. To avoid this sort of problem, we
1738 * assume that we are in a really bad situation and return
1739 * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1740 * to submit the same job again to this device.
1741 */
1742 radv_report_gpuvm_fault(queue->device);
1743 result = vk_device_set_lost(&queue->device->vk, "vkQueueSubmit() failed");
1744 }
1745 return result;
1746 }
1747
1748 static VkResult
radv_queue_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1749 radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1750 {
1751 struct radv_queue *queue = (struct radv_queue *)vqueue;
1752 VkResult result;
1753
1754 if (queue->device->instance->drirc.legacy_sparse_binding) {
1755 result = radv_queue_submit_bind_sparse_memory(queue->device, submission);
1756 if (result != VK_SUCCESS)
1757 goto fail;
1758 } else {
1759 assert(!submission->buffer_bind_count && !submission->image_bind_count && !submission->image_opaque_bind_count);
1760 }
1761
1762 if (!submission->command_buffer_count && !submission->wait_count && !submission->signal_count)
1763 return VK_SUCCESS;
1764
1765 if (!submission->command_buffer_count) {
1766 result = radv_queue_submit_empty(queue, submission);
1767 } else {
1768 result = radv_queue_submit_normal(queue, submission);
1769 }
1770
1771 fail:
1772 if (result != VK_SUCCESS) {
1773 /* When something bad happened during the submission, such as
1774 * an out of memory issue, it might be hard to recover from
1775 * this inconsistent state. To avoid this sort of problem, we
1776 * assume that we are in a really bad situation and return
1777 * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1778 * to submit the same job again to this device.
1779 */
1780 radv_report_gpuvm_fault(queue->device);
1781 result = vk_device_set_lost(&queue->device->vk, "vkQueueSubmit() failed");
1782 }
1783 return result;
1784 }
1785
1786 bool
radv_queue_internal_submit(struct radv_queue * queue,struct radeon_cmdbuf * cs)1787 radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs)
1788 {
1789 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1790 struct radv_winsys_submit_info submit = {
1791 .ip_type = radv_queue_ring(queue),
1792 .queue_index = queue->vk.index_in_family,
1793 .cs_array = &cs,
1794 .cs_count = 1,
1795 };
1796
1797 VkResult result = queue->device->ws->cs_submit(ctx, &submit, 0, NULL, 0, NULL);
1798 if (result != VK_SUCCESS)
1799 return false;
1800
1801 return true;
1802 }
1803
1804 int
radv_queue_init(struct radv_device * device,struct radv_queue * queue,int idx,const VkDeviceQueueCreateInfo * create_info,const VkDeviceQueueGlobalPriorityCreateInfoKHR * global_priority)1805 radv_queue_init(struct radv_device *device, struct radv_queue *queue, int idx,
1806 const VkDeviceQueueCreateInfo *create_info,
1807 const VkDeviceQueueGlobalPriorityCreateInfoKHR *global_priority)
1808 {
1809 queue->device = device;
1810 queue->priority = radv_get_queue_global_priority(global_priority);
1811 queue->hw_ctx = device->hw_ctx[queue->priority];
1812 queue->state.qf = vk_queue_to_radv(device->physical_device, create_info->queueFamilyIndex);
1813 queue->gang_sem_bo = NULL;
1814
1815 VkResult result = vk_queue_init(&queue->vk, &device->vk, create_info, idx);
1816 if (result != VK_SUCCESS)
1817 return result;
1818
1819 queue->state.uses_shadow_regs = device->uses_shadow_regs && queue->state.qf == RADV_QUEUE_GENERAL;
1820 if (queue->state.uses_shadow_regs) {
1821 result = radv_create_shadow_regs_preamble(device, &queue->state);
1822 if (result != VK_SUCCESS)
1823 goto fail;
1824 result = radv_init_shadowed_regs_buffer_state(device, queue);
1825 if (result != VK_SUCCESS)
1826 goto fail;
1827 }
1828
1829 if (queue->state.qf == RADV_QUEUE_SPARSE) {
1830 queue->vk.driver_submit = radv_queue_sparse_submit;
1831 vk_queue_enable_submit_thread(&queue->vk);
1832 } else {
1833 queue->vk.driver_submit = radv_queue_submit;
1834 }
1835 return VK_SUCCESS;
1836 fail:
1837 vk_queue_finish(&queue->vk);
1838 return result;
1839 }
1840
1841 static void
radv_queue_state_finish(struct radv_queue_state * queue,struct radv_device * device)1842 radv_queue_state_finish(struct radv_queue_state *queue, struct radv_device *device)
1843 {
1844 radv_destroy_shadow_regs_preamble(queue, device->ws);
1845 if (queue->initial_full_flush_preamble_cs)
1846 device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1847 if (queue->initial_preamble_cs)
1848 device->ws->cs_destroy(queue->initial_preamble_cs);
1849 if (queue->continue_preamble_cs)
1850 device->ws->cs_destroy(queue->continue_preamble_cs);
1851 if (queue->gang_wait_preamble_cs)
1852 device->ws->cs_destroy(queue->gang_wait_preamble_cs);
1853 if (queue->gang_wait_postamble_cs)
1854 device->ws->cs_destroy(queue->gang_wait_postamble_cs);
1855 if (queue->descriptor_bo)
1856 device->ws->buffer_destroy(device->ws, queue->descriptor_bo);
1857 if (queue->scratch_bo) {
1858 device->ws->buffer_destroy(device->ws, queue->scratch_bo);
1859 radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
1860 }
1861 if (queue->esgs_ring_bo) {
1862 radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
1863 device->ws->buffer_destroy(device->ws, queue->esgs_ring_bo);
1864 }
1865 if (queue->gsvs_ring_bo) {
1866 radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
1867 device->ws->buffer_destroy(device->ws, queue->gsvs_ring_bo);
1868 }
1869 if (queue->tess_rings_bo) {
1870 radv_rmv_log_command_buffer_bo_destroy(device, queue->tess_rings_bo);
1871 device->ws->buffer_destroy(device->ws, queue->tess_rings_bo);
1872 }
1873 if (queue->task_rings_bo) {
1874 radv_rmv_log_command_buffer_bo_destroy(device, queue->task_rings_bo);
1875 device->ws->buffer_destroy(device->ws, queue->task_rings_bo);
1876 }
1877 if (queue->mesh_scratch_ring_bo) {
1878 radv_rmv_log_command_buffer_bo_destroy(device, queue->mesh_scratch_ring_bo);
1879 device->ws->buffer_destroy(device->ws, queue->mesh_scratch_ring_bo);
1880 }
1881 if (queue->attr_ring_bo) {
1882 radv_rmv_log_command_buffer_bo_destroy(device, queue->attr_ring_bo);
1883 device->ws->buffer_destroy(device->ws, queue->attr_ring_bo);
1884 }
1885 if (queue->gds_bo) {
1886 device->ws->buffer_make_resident(device->ws, queue->gds_bo, false);
1887 device->ws->buffer_destroy(device->ws, queue->gds_bo);
1888 }
1889 if (queue->gds_oa_bo) {
1890 device->ws->buffer_make_resident(device->ws, queue->gds_oa_bo, false);
1891 device->ws->buffer_destroy(device->ws, queue->gds_oa_bo);
1892 }
1893 if (queue->compute_scratch_bo) {
1894 radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
1895 device->ws->buffer_destroy(device->ws, queue->compute_scratch_bo);
1896 }
1897 }
1898
1899 void
radv_queue_finish(struct radv_queue * queue)1900 radv_queue_finish(struct radv_queue *queue)
1901 {
1902 if (queue->follower_state) {
1903 /* Prevent double free */
1904 queue->follower_state->task_rings_bo = NULL;
1905
1906 /* Clean up the internal ACE queue state. */
1907 radv_queue_state_finish(queue->follower_state, queue->device);
1908 free(queue->follower_state);
1909 }
1910
1911 if (queue->gang_sem_bo)
1912 queue->device->ws->buffer_destroy(queue->device->ws, queue->gang_sem_bo);
1913
1914 radv_queue_state_finish(&queue->state, queue->device);
1915 vk_queue_finish(&queue->vk);
1916 }
1917