1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <amdgpu.h>
26 #include <assert.h>
27 #include <libsync.h>
28 #include <pthread.h>
29 #include <stdlib.h>
30 #include "drm-uapi/amdgpu_drm.h"
31
32 #include "util/u_memory.h"
33 #include "ac_debug.h"
34 #include "radv_amdgpu_bo.h"
35 #include "radv_amdgpu_cs.h"
36 #include "radv_amdgpu_winsys.h"
37 #include "radv_debug.h"
38 #include "radv_radeon_winsys.h"
39 #include "sid.h"
40 #include "vk_alloc.h"
41 #include "vk_drm_syncobj.h"
42 #include "vk_sync.h"
43 #include "vk_sync_dummy.h"
44
45 #define GFX6_MAX_CS_SIZE 0xffff8 /* in dwords */
46
47 enum { VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024 };
48
49 struct radv_amdgpu_ib {
50 struct radeon_winsys_bo *bo;
51 unsigned cdw;
52 };
53
54 struct radv_amdgpu_cs {
55 struct radeon_cmdbuf base;
56 struct radv_amdgpu_winsys *ws;
57
58 struct amdgpu_cs_ib_info ib;
59
60 struct radeon_winsys_bo *ib_buffer;
61 uint8_t *ib_mapped;
62 unsigned max_num_buffers;
63 unsigned num_buffers;
64 struct drm_amdgpu_bo_list_entry *handles;
65
66 struct radv_amdgpu_ib *old_ib_buffers;
67 unsigned num_old_ib_buffers;
68 unsigned max_num_old_ib_buffers;
69 unsigned *ib_size_ptr;
70 VkResult status;
71 bool is_chained;
72 bool use_ib;
73
74 int buffer_hash_table[1024];
75 unsigned hw_ip;
76
77 unsigned num_virtual_buffers;
78 unsigned max_num_virtual_buffers;
79 struct radeon_winsys_bo **virtual_buffers;
80 int *virtual_buffer_hash_table;
81
82 /* For chips that don't support chaining. */
83 struct radeon_cmdbuf *old_cs_buffers;
84 unsigned num_old_cs_buffers;
85 };
86
87 struct radv_winsys_sem_counts {
88 uint32_t syncobj_count;
89 uint32_t timeline_syncobj_count;
90 uint32_t *syncobj;
91 uint64_t *points;
92 };
93
94 struct radv_winsys_sem_info {
95 bool cs_emit_signal;
96 bool cs_emit_wait;
97 struct radv_winsys_sem_counts wait;
98 struct radv_winsys_sem_counts signal;
99
100 /* Expresses a scheduled dependency, meaning that the sumbission of the
101 * referenced fence must be scheduled before the current submission.
102 */
103 struct radv_amdgpu_fence *scheduled_dependency;
104 };
105
106 static uint32_t radv_amdgpu_ctx_queue_syncobj(struct radv_amdgpu_ctx *ctx, unsigned ip,
107 unsigned ring);
108
109 static inline struct radv_amdgpu_cs *
radv_amdgpu_cs(struct radeon_cmdbuf * base)110 radv_amdgpu_cs(struct radeon_cmdbuf *base)
111 {
112 return (struct radv_amdgpu_cs *)base;
113 }
114
115 static bool
ring_can_use_ib_bos(const struct radv_amdgpu_winsys * ws,enum amd_ip_type ip_type)116 ring_can_use_ib_bos(const struct radv_amdgpu_winsys *ws,
117 enum amd_ip_type ip_type)
118 {
119 if (ip_type == AMD_IP_UVD ||
120 ip_type == AMD_IP_VCE ||
121 ip_type == AMD_IP_UVD_ENC ||
122 ip_type == AMD_IP_VCN_DEC ||
123 ip_type == AMD_IP_VCN_ENC)
124 return false;
125 return ws->use_ib_bos;
126 }
127
128 struct radv_amdgpu_cs_request {
129 /** Specify HW IP block type to which to send the IB. */
130 unsigned ip_type;
131
132 /** IP instance index if there are several IPs of the same type. */
133 unsigned ip_instance;
134
135 /**
136 * Specify ring index of the IP. We could have several rings
137 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
138 */
139 uint32_t ring;
140
141 /**
142 * BO list handles used by this request.
143 */
144 struct drm_amdgpu_bo_list_entry *handles;
145 uint32_t num_handles;
146
147 /** Number of IBs to submit in the field ibs. */
148 uint32_t number_of_ibs;
149
150 /**
151 * IBs to submit. Those IBs will be submit together as single entity
152 */
153 struct amdgpu_cs_ib_info *ibs;
154
155 /**
156 * The returned sequence number for the command submission
157 */
158 uint64_t seq_no;
159 };
160
161 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
162 struct radv_amdgpu_cs_request *request,
163 struct radv_winsys_sem_info *sem_info);
164
165 static void
radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx * ctx,struct radv_amdgpu_fence * fence,struct radv_amdgpu_cs_request * req)166 radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_fence *fence,
167 struct radv_amdgpu_cs_request *req)
168 {
169 fence->fence.context = ctx->ctx;
170 fence->fence.ip_type = req->ip_type;
171 fence->fence.ip_instance = req->ip_instance;
172 fence->fence.ring = req->ring;
173 fence->fence.fence = req->seq_no;
174 }
175
176 static void
radv_amdgpu_cs_destroy(struct radeon_cmdbuf * rcs)177 radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
178 {
179 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
180
181 if (cs->ib_buffer)
182 cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer);
183 else
184 free(cs->base.buf);
185
186 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
187 cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i].bo);
188
189 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
190 free(cs->old_cs_buffers[i].buf);
191 }
192
193 free(cs->old_cs_buffers);
194 free(cs->old_ib_buffers);
195 free(cs->virtual_buffers);
196 free(cs->virtual_buffer_hash_table);
197 free(cs->handles);
198 free(cs);
199 }
200
201 static void
radv_amdgpu_init_cs(struct radv_amdgpu_cs * cs,enum amd_ip_type ip_type)202 radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs, enum amd_ip_type ip_type)
203 {
204 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
205 cs->buffer_hash_table[i] = -1;
206
207 cs->hw_ip = ip_type;
208 }
209
210 static enum radeon_bo_domain
radv_amdgpu_cs_domain(const struct radeon_winsys * _ws)211 radv_amdgpu_cs_domain(const struct radeon_winsys *_ws)
212 {
213 const struct radv_amdgpu_winsys *ws = (const struct radv_amdgpu_winsys *)_ws;
214
215 bool enough_vram = ws->info.all_vram_visible ||
216 p_atomic_read_relaxed(&ws->allocated_vram_vis) * 2 <= (uint64_t)ws->info.vram_vis_size_kb * 1024;
217 bool use_sam =
218 (enough_vram && ws->info.has_dedicated_vram && !(ws->perftest & RADV_PERFTEST_NO_SAM)) ||
219 (ws->perftest & RADV_PERFTEST_SAM);
220 return use_sam ? RADEON_DOMAIN_VRAM : RADEON_DOMAIN_GTT;
221 }
222
223 static struct radeon_cmdbuf *
radv_amdgpu_cs_create(struct radeon_winsys * ws,enum amd_ip_type ip_type)224 radv_amdgpu_cs_create(struct radeon_winsys *ws, enum amd_ip_type ip_type)
225 {
226 struct radv_amdgpu_cs *cs;
227 uint32_t ib_pad_dw_mask = MAX2(3, radv_amdgpu_winsys(ws)->info.ib_pad_dw_mask[ip_type]);
228 uint32_t ib_size = align(20 * 1024 * 4, ib_pad_dw_mask + 1);
229 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
230 if (!cs)
231 return NULL;
232
233 cs->ws = radv_amdgpu_winsys(ws);
234 radv_amdgpu_init_cs(cs, ip_type);
235
236 cs->use_ib = ring_can_use_ib_bos(cs->ws, ip_type);
237
238 if (cs->use_ib) {
239 VkResult result =
240 ws->buffer_create(ws, ib_size, 0, radv_amdgpu_cs_domain(ws),
241 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
242 RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
243 RADV_BO_PRIORITY_CS, 0, &cs->ib_buffer);
244 if (result != VK_SUCCESS) {
245 free(cs);
246 return NULL;
247 }
248
249 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
250 if (!cs->ib_mapped) {
251 ws->buffer_destroy(ws, cs->ib_buffer);
252 free(cs);
253 return NULL;
254 }
255
256 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
257 cs->base.buf = (uint32_t *)cs->ib_mapped;
258 cs->base.max_dw = ib_size / 4 - 4;
259 cs->ib_size_ptr = &cs->ib.size;
260 cs->ib.size = 0;
261
262 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
263 } else {
264 uint32_t *buf = malloc(16384);
265 if (!buf) {
266 free(cs);
267 return NULL;
268 }
269 cs->base.buf = buf;
270 cs->base.max_dw = 4096;
271 }
272
273 return &cs->base;
274 }
275
hw_can_chain(unsigned hw_ip)276 static bool hw_can_chain(unsigned hw_ip)
277 {
278 return hw_ip == AMDGPU_HW_IP_GFX || hw_ip == AMDGPU_HW_IP_COMPUTE;
279 }
280
get_nop_packet(struct radv_amdgpu_cs * cs)281 static uint32_t get_nop_packet(struct radv_amdgpu_cs *cs)
282 {
283 switch(cs->hw_ip) {
284 case AMDGPU_HW_IP_GFX:
285 case AMDGPU_HW_IP_COMPUTE:
286 return cs->ws->info.gfx_ib_pad_with_type2 ? PKT2_NOP_PAD : PKT3_NOP_PAD;
287 case AMDGPU_HW_IP_DMA:
288 return cs->ws->info.gfx_level <= GFX6 ? 0xF0000000 : SDMA_NOP_PAD;
289 case AMDGPU_HW_IP_UVD:
290 case AMDGPU_HW_IP_UVD_ENC:
291 return PKT2_NOP_PAD;
292 case AMDGPU_HW_IP_VCN_DEC:
293 return 0x81FF;
294 default:
295 unreachable("Unknown IP type");
296 }
297 }
298
299 static void
radv_amdgpu_cs_grow(struct radeon_cmdbuf * _cs,size_t min_size)300 radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
301 {
302 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
303
304 if (cs->status != VK_SUCCESS) {
305 cs->base.cdw = 0;
306 return;
307 }
308
309 if (!cs->use_ib) {
310 const uint64_t limit_dws = GFX6_MAX_CS_SIZE;
311 uint64_t ib_dws = MAX2(cs->base.cdw + min_size, MIN2(cs->base.max_dw * 2, limit_dws));
312
313 /* The total ib size cannot exceed limit_dws dwords. */
314 if (ib_dws > limit_dws) {
315 /* The maximum size in dwords has been reached,
316 * try to allocate a new one.
317 */
318 struct radeon_cmdbuf *old_cs_buffers =
319 realloc(cs->old_cs_buffers, (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
320 if (!old_cs_buffers) {
321 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
322 cs->base.cdw = 0;
323 return;
324 }
325 cs->old_cs_buffers = old_cs_buffers;
326
327 /* Store the current one for submitting it later. */
328 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
329 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
330 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
331 cs->num_old_cs_buffers++;
332
333 /* Reset the cs, it will be re-allocated below. */
334 cs->base.cdw = 0;
335 cs->base.buf = NULL;
336
337 /* Re-compute the number of dwords to allocate. */
338 ib_dws = MAX2(cs->base.cdw + min_size, MIN2(cs->base.max_dw * 2, limit_dws));
339 if (ib_dws > limit_dws) {
340 fprintf(stderr, "radv/amdgpu: Too high number of "
341 "dwords to allocate\n");
342 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
343 return;
344 }
345 }
346
347 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
348 if (new_buf) {
349 cs->base.buf = new_buf;
350 cs->base.max_dw = ib_dws;
351 } else {
352 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
353 cs->base.cdw = 0;
354 }
355 return;
356 }
357
358 enum amd_ip_type ip_type = cs->hw_ip;
359 uint32_t ib_pad_dw_mask = MAX2(3, cs->ws->info.ib_pad_dw_mask[ip_type]);
360 uint32_t nop_packet = get_nop_packet(cs);
361 while (!cs->base.cdw || (cs->base.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)
362 radeon_emit(&cs->base, nop_packet);
363
364 *cs->ib_size_ptr |= cs->base.cdw + 4;
365
366 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
367 unsigned max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
368 struct radv_amdgpu_ib *old_ib_buffers =
369 realloc(cs->old_ib_buffers, max_num_old_ib_buffers * sizeof(*old_ib_buffers));
370 if (!old_ib_buffers) {
371 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
372 return;
373 }
374 cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
375 cs->old_ib_buffers = old_ib_buffers;
376 }
377
378 cs->old_ib_buffers[cs->num_old_ib_buffers].bo = cs->ib_buffer;
379 cs->old_ib_buffers[cs->num_old_ib_buffers++].cdw = cs->base.cdw;
380
381 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
382
383 /* max that fits in the chain size field. */
384 ib_size = align(MIN2(ib_size, 0xfffff), ib_pad_dw_mask + 1);
385
386 VkResult result =
387 cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0, radv_amdgpu_cs_domain(&cs->ws->base),
388 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
389 RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
390 RADV_BO_PRIORITY_CS, 0, &cs->ib_buffer);
391
392 if (result != VK_SUCCESS) {
393 cs->base.cdw = 0;
394 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
395 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo;
396 }
397
398 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
399 if (!cs->ib_mapped) {
400 cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer);
401 cs->base.cdw = 0;
402
403 /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
404 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
405 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo;
406 }
407
408 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
409
410 assert(hw_can_chain(cs->hw_ip)); /* TODO: Implement growing other queues if needed. */
411
412 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
413 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
414 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
415 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
416
417 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
418
419 cs->base.buf = (uint32_t *)cs->ib_mapped;
420 cs->base.cdw = 0;
421 cs->base.max_dw = ib_size / 4 - 4;
422 }
423
424 static VkResult
radv_amdgpu_cs_finalize(struct radeon_cmdbuf * _cs)425 radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
426 {
427 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
428 enum amd_ip_type ip_type = cs->hw_ip;
429
430 if (cs->use_ib) {
431 uint32_t ib_pad_dw_mask = MAX2(3, cs->ws->info.ib_pad_dw_mask[ip_type]);
432 uint32_t nop_packet = get_nop_packet(cs);
433
434 if (hw_can_chain(cs->hw_ip)) {
435 /* Ensure that with the 4 dword reservation we subtract from max_dw we always
436 * have 4 nops at the end for chaining. */
437 while (!cs->base.cdw || (cs->base.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)
438 radeon_emit(&cs->base, nop_packet);
439
440 radeon_emit(&cs->base, nop_packet);
441 radeon_emit(&cs->base, nop_packet);
442 radeon_emit(&cs->base, nop_packet);
443 radeon_emit(&cs->base, nop_packet);
444 } else {
445 while (!cs->base.cdw || (cs->base.cdw & ib_pad_dw_mask))
446 radeon_emit(&cs->base, nop_packet);
447 }
448
449 *cs->ib_size_ptr |= cs->base.cdw;
450
451 cs->is_chained = false;
452
453 assert(cs->base.cdw <= cs->base.max_dw + 4);
454 }
455
456 return cs->status;
457 }
458
459 static void
radv_amdgpu_cs_reset(struct radeon_cmdbuf * _cs)460 radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
461 {
462 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
463 cs->base.cdw = 0;
464 cs->status = VK_SUCCESS;
465
466 for (unsigned i = 0; i < cs->num_buffers; ++i) {
467 unsigned hash = cs->handles[i].bo_handle & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
468 cs->buffer_hash_table[hash] = -1;
469 }
470
471 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
472 unsigned hash =
473 ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
474 cs->virtual_buffer_hash_table[hash] = -1;
475 }
476
477 cs->num_buffers = 0;
478 cs->num_virtual_buffers = 0;
479
480 if (cs->use_ib) {
481 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
482
483 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
484 cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i].bo);
485
486 cs->num_old_ib_buffers = 0;
487 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
488 cs->ib_size_ptr = &cs->ib.size;
489 cs->ib.size = 0;
490 } else {
491 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
492 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
493 free(rcs->buf);
494 }
495
496 free(cs->old_cs_buffers);
497 cs->old_cs_buffers = NULL;
498 cs->num_old_cs_buffers = 0;
499 }
500 }
501
502 static int
radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs * cs,uint32_t bo)503 radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs, uint32_t bo)
504 {
505 unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
506 int index = cs->buffer_hash_table[hash];
507
508 if (index == -1)
509 return -1;
510
511 if (cs->handles[index].bo_handle == bo)
512 return index;
513
514 for (unsigned i = 0; i < cs->num_buffers; ++i) {
515 if (cs->handles[i].bo_handle == bo) {
516 cs->buffer_hash_table[hash] = i;
517 return i;
518 }
519 }
520
521 return -1;
522 }
523
524 static void
radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs * cs,uint32_t bo,uint8_t priority)525 radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs, uint32_t bo, uint8_t priority)
526 {
527 unsigned hash;
528 int index = radv_amdgpu_cs_find_buffer(cs, bo);
529
530 if (index != -1)
531 return;
532
533 if (cs->num_buffers == cs->max_num_buffers) {
534 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
535 struct drm_amdgpu_bo_list_entry *new_entries =
536 realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
537 if (new_entries) {
538 cs->max_num_buffers = new_count;
539 cs->handles = new_entries;
540 } else {
541 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
542 return;
543 }
544 }
545
546 cs->handles[cs->num_buffers].bo_handle = bo;
547 cs->handles[cs->num_buffers].bo_priority = priority;
548
549 hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
550 cs->buffer_hash_table[hash] = cs->num_buffers;
551
552 ++cs->num_buffers;
553 }
554
555 static void
radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf * _cs,struct radeon_winsys_bo * bo)556 radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs, struct radeon_winsys_bo *bo)
557 {
558 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
559 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
560
561 if (!cs->virtual_buffer_hash_table) {
562 int *virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
563 if (!virtual_buffer_hash_table) {
564 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
565 return;
566 }
567 cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
568
569 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
570 cs->virtual_buffer_hash_table[i] = -1;
571 }
572
573 if (cs->virtual_buffer_hash_table[hash] >= 0) {
574 int idx = cs->virtual_buffer_hash_table[hash];
575 if (cs->virtual_buffers[idx] == bo) {
576 return;
577 }
578 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
579 if (cs->virtual_buffers[i] == bo) {
580 cs->virtual_buffer_hash_table[hash] = i;
581 return;
582 }
583 }
584 }
585
586 if (cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
587 unsigned max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
588 struct radeon_winsys_bo **virtual_buffers =
589 realloc(cs->virtual_buffers, sizeof(struct radeon_winsys_bo *) * max_num_virtual_buffers);
590 if (!virtual_buffers) {
591 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
592 return;
593 }
594 cs->max_num_virtual_buffers = max_num_virtual_buffers;
595 cs->virtual_buffers = virtual_buffers;
596 }
597
598 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
599
600 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
601 ++cs->num_virtual_buffers;
602 }
603
604 static void
radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf * _cs,struct radeon_winsys_bo * _bo)605 radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs, struct radeon_winsys_bo *_bo)
606 {
607 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
608 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
609
610 if (cs->status != VK_SUCCESS)
611 return;
612
613 if (bo->is_virtual) {
614 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
615 return;
616 }
617
618 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
619 }
620
621 static void
radv_amdgpu_cs_add_buffers(struct radeon_cmdbuf * _to,struct radeon_cmdbuf * _from)622 radv_amdgpu_cs_add_buffers(struct radeon_cmdbuf *_to, struct radeon_cmdbuf *_from)
623 {
624 struct radv_amdgpu_cs *from = radv_amdgpu_cs(_from);
625 struct radv_amdgpu_cs *to = radv_amdgpu_cs(_to);
626
627 for (unsigned i = 0; i < from->num_buffers; ++i) {
628 radv_amdgpu_cs_add_buffer_internal(to, from->handles[i].bo_handle,
629 from->handles[i].bo_priority);
630 }
631
632 for (unsigned i = 0; i < from->num_virtual_buffers; ++i) {
633 radv_amdgpu_cs_add_buffer(&to->base, from->virtual_buffers[i]);
634 }
635 }
636
637 static void
radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf * _parent,struct radeon_cmdbuf * _child,bool allow_ib2)638 radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent, struct radeon_cmdbuf *_child,
639 bool allow_ib2)
640 {
641 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
642 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
643 struct radv_amdgpu_winsys *ws = parent->ws;
644 bool use_ib2 = parent->use_ib && allow_ib2;
645
646 if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
647 return;
648
649 for (unsigned i = 0; i < child->num_buffers; ++i) {
650 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i].bo_handle,
651 child->handles[i].bo_priority);
652 }
653
654 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
655 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
656 }
657
658 if (use_ib2) {
659 if (parent->base.cdw + 4 > parent->base.max_dw)
660 radv_amdgpu_cs_grow(&parent->base, 4);
661
662 /* Not setting the CHAIN bit will launch an IB2. */
663 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
664 radeon_emit(&parent->base, child->ib.ib_mc_address);
665 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
666 radeon_emit(&parent->base, child->ib.size);
667 } else {
668 if (parent->use_ib) {
669 /* Copy and chain old IB buffers from the child to the parent IB. */
670 for (unsigned i = 0; i < child->num_old_ib_buffers; i++) {
671 struct radv_amdgpu_ib *ib = &child->old_ib_buffers[i];
672 uint8_t *mapped;
673
674 if (parent->base.cdw + ib->cdw > parent->base.max_dw)
675 radv_amdgpu_cs_grow(&parent->base, ib->cdw);
676
677 mapped = ws->base.buffer_map(ib->bo);
678 if (!mapped) {
679 parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
680 return;
681 }
682
683 /* Copy the IB data without the original chain link. */
684 memcpy(parent->base.buf + parent->base.cdw, mapped, 4 * ib->cdw);
685 parent->base.cdw += ib->cdw;
686 }
687 } else {
688 /* When the secondary command buffer is huge we have to copy the list of CS buffers to the
689 * parent to submit multiple IBs.
690 */
691 if (child->num_old_cs_buffers > 0) {
692 unsigned num_cs_buffers;
693 uint32_t *new_buf;
694
695 /* Compute the total number of CS buffers needed. */
696 num_cs_buffers = parent->num_old_cs_buffers + child->num_old_cs_buffers + 1;
697
698 struct radeon_cmdbuf *old_cs_buffers =
699 realloc(parent->old_cs_buffers, num_cs_buffers * sizeof(*parent->old_cs_buffers));
700 if (!old_cs_buffers) {
701 parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
702 parent->base.cdw = 0;
703 return;
704 }
705 parent->old_cs_buffers = old_cs_buffers;
706
707 /* Copy the parent CS to its list of CS buffers, so submission ordering is maintained. */
708 new_buf = malloc(parent->base.max_dw * 4);
709 if (!new_buf) {
710 parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
711 parent->base.cdw = 0;
712 return;
713 }
714 memcpy(new_buf, parent->base.buf, parent->base.max_dw * 4);
715
716 parent->old_cs_buffers[parent->num_old_cs_buffers].cdw = parent->base.cdw;
717 parent->old_cs_buffers[parent->num_old_cs_buffers].max_dw = parent->base.max_dw;
718 parent->old_cs_buffers[parent->num_old_cs_buffers].buf = new_buf;
719 parent->num_old_cs_buffers++;
720
721 /* Then, copy all child CS buffers to the parent list. */
722 for (unsigned i = 0; i < child->num_old_cs_buffers; i++) {
723 new_buf = malloc(child->old_cs_buffers[i].max_dw * 4);
724 if (!new_buf) {
725 parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
726 parent->base.cdw = 0;
727 return;
728 }
729 memcpy(new_buf, child->old_cs_buffers[i].buf, child->old_cs_buffers[i].max_dw * 4);
730
731 parent->old_cs_buffers[parent->num_old_cs_buffers].cdw = child->old_cs_buffers[i].cdw;
732 parent->old_cs_buffers[parent->num_old_cs_buffers].max_dw = child->old_cs_buffers[i].max_dw;
733 parent->old_cs_buffers[parent->num_old_cs_buffers].buf = new_buf;
734 parent->num_old_cs_buffers++;
735 }
736
737 /* Reset the parent CS before copying the child CS into it. */
738 parent->base.cdw = 0;
739 }
740 }
741
742 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
743 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
744
745 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
746 parent->base.cdw += child->base.cdw;
747 }
748 }
749
750 static VkResult
radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys * ws,struct radeon_cmdbuf ** cs_array,unsigned count,struct radv_amdgpu_winsys_bo ** extra_bo_array,unsigned num_extra_bo,struct radeon_cmdbuf * extra_cs,unsigned * rnum_handles,struct drm_amdgpu_bo_list_entry ** rhandles)751 radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws, struct radeon_cmdbuf **cs_array,
752 unsigned count, struct radv_amdgpu_winsys_bo **extra_bo_array,
753 unsigned num_extra_bo, struct radeon_cmdbuf *extra_cs,
754 unsigned *rnum_handles, struct drm_amdgpu_bo_list_entry **rhandles)
755 {
756 struct drm_amdgpu_bo_list_entry *handles = NULL;
757 unsigned num_handles = 0;
758
759 if (ws->debug_all_bos) {
760 handles = malloc(sizeof(handles[0]) * ws->global_bo_list.count);
761 if (!handles) {
762 return VK_ERROR_OUT_OF_HOST_MEMORY;
763 }
764
765 for (uint32_t i = 0; i < ws->global_bo_list.count; i++) {
766 handles[i].bo_handle = ws->global_bo_list.bos[i]->bo_handle;
767 handles[i].bo_priority = ws->global_bo_list.bos[i]->priority;
768 num_handles++;
769 }
770 } else if (count == 1 && !num_extra_bo && !extra_cs &&
771 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers && !ws->global_bo_list.count) {
772 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)cs_array[0];
773 if (cs->num_buffers == 0)
774 return VK_SUCCESS;
775
776 handles = malloc(sizeof(handles[0]) * cs->num_buffers);
777 if (!handles)
778 return VK_ERROR_OUT_OF_HOST_MEMORY;
779
780 memcpy(handles, cs->handles, sizeof(handles[0]) * cs->num_buffers);
781 num_handles = cs->num_buffers;
782 } else {
783 unsigned total_buffer_count = num_extra_bo;
784 num_handles = num_extra_bo;
785 for (unsigned i = 0; i < count; ++i) {
786 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)cs_array[i];
787 total_buffer_count += cs->num_buffers;
788 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
789 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
790 }
791
792 if (extra_cs) {
793 total_buffer_count += ((struct radv_amdgpu_cs *)extra_cs)->num_buffers;
794 }
795
796 total_buffer_count += ws->global_bo_list.count;
797
798 if (total_buffer_count == 0)
799 return VK_SUCCESS;
800
801 handles = malloc(sizeof(handles[0]) * total_buffer_count);
802 if (!handles)
803 return VK_ERROR_OUT_OF_HOST_MEMORY;
804
805 for (unsigned i = 0; i < num_extra_bo; i++) {
806 handles[i].bo_handle = extra_bo_array[i]->bo_handle;
807 handles[i].bo_priority = extra_bo_array[i]->priority;
808 }
809
810 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
811 struct radv_amdgpu_cs *cs;
812
813 if (i == count)
814 cs = (struct radv_amdgpu_cs *)extra_cs;
815 else
816 cs = (struct radv_amdgpu_cs *)cs_array[i];
817
818 if (!cs->num_buffers)
819 continue;
820
821 if (num_handles == 0 && !cs->num_virtual_buffers) {
822 memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
823 num_handles = cs->num_buffers;
824 continue;
825 }
826 int unique_bo_so_far = num_handles;
827 for (unsigned j = 0; j < cs->num_buffers; ++j) {
828 bool found = false;
829 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
830 if (handles[k].bo_handle == cs->handles[j].bo_handle) {
831 found = true;
832 break;
833 }
834 }
835 if (!found) {
836 handles[num_handles] = cs->handles[j];
837 ++num_handles;
838 }
839 }
840 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
841 struct radv_amdgpu_winsys_bo *virtual_bo =
842 radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
843 for (unsigned k = 0; k < virtual_bo->bo_count; ++k) {
844 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
845 bool found = false;
846 for (unsigned m = 0; m < num_handles; ++m) {
847 if (handles[m].bo_handle == bo->bo_handle) {
848 found = true;
849 break;
850 }
851 }
852 if (!found) {
853 handles[num_handles].bo_handle = bo->bo_handle;
854 handles[num_handles].bo_priority = bo->priority;
855 ++num_handles;
856 }
857 }
858 }
859 }
860
861 unsigned unique_bo_so_far = num_handles;
862 for (unsigned i = 0; i < ws->global_bo_list.count; ++i) {
863 struct radv_amdgpu_winsys_bo *bo = ws->global_bo_list.bos[i];
864 bool found = false;
865 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
866 if (bo->bo_handle == handles[j].bo_handle) {
867 found = true;
868 break;
869 }
870 }
871 if (!found) {
872 handles[num_handles].bo_handle = bo->bo_handle;
873 handles[num_handles].bo_priority = bo->priority;
874 ++num_handles;
875 }
876 }
877 }
878
879 *rhandles = handles;
880 *rnum_handles = num_handles;
881
882 return VK_SUCCESS;
883 }
884
885 static void
radv_assign_last_submit(struct radv_amdgpu_ctx * ctx,struct radv_amdgpu_cs_request * request)886 radv_assign_last_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request *request)
887 {
888 radv_amdgpu_request_to_fence(ctx, &ctx->last_submission[request->ip_type][request->ring],
889 request);
890 }
891
892 static VkResult
radv_amdgpu_winsys_cs_submit_chained(struct radv_amdgpu_ctx * ctx,int queue_idx,struct radv_winsys_sem_info * sem_info,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs)893 radv_amdgpu_winsys_cs_submit_chained(struct radv_amdgpu_ctx *ctx, int queue_idx,
894 struct radv_winsys_sem_info *sem_info,
895 struct radeon_cmdbuf **cs_array, unsigned cs_count,
896 struct radeon_cmdbuf *initial_preamble_cs)
897 {
898 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
899 struct radv_amdgpu_winsys *aws = cs0->ws;
900 struct drm_amdgpu_bo_list_entry *handles = NULL;
901 struct radv_amdgpu_cs_request request;
902 struct amdgpu_cs_ib_info ibs[2];
903 unsigned number_of_ibs = 1;
904 unsigned num_handles = 0;
905 VkResult result;
906
907 for (unsigned i = cs_count; i--;) {
908 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
909
910 if (cs->is_chained) {
911 assert(cs->base.cdw <= cs->base.max_dw + 4);
912 assert(get_nop_packet(cs) == PKT3_NOP_PAD); /* Other shouldn't chain. */
913
914 cs->is_chained = false;
915 cs->base.buf[cs->base.cdw - 4] = PKT3_NOP_PAD;
916 cs->base.buf[cs->base.cdw - 3] = PKT3_NOP_PAD;
917 cs->base.buf[cs->base.cdw - 2] = PKT3_NOP_PAD;
918 cs->base.buf[cs->base.cdw - 1] = PKT3_NOP_PAD;
919 }
920
921 if (i + 1 < cs_count) {
922 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
923 assert(cs->base.cdw <= cs->base.max_dw + 4);
924 assert(get_nop_packet(cs) == PKT3_NOP_PAD); /* Other shouldn't chain. */
925
926 cs->is_chained = true;
927
928 cs->base.buf[cs->base.cdw - 4] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
929 cs->base.buf[cs->base.cdw - 3] = next->ib.ib_mc_address;
930 cs->base.buf[cs->base.cdw - 2] = next->ib.ib_mc_address >> 32;
931 cs->base.buf[cs->base.cdw - 1] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
932 }
933 }
934
935 u_rwlock_rdlock(&aws->global_bo_list.lock);
936
937 /* Get the BO list. */
938 result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0, initial_preamble_cs,
939 &num_handles, &handles);
940 if (result != VK_SUCCESS)
941 goto fail;
942
943 /* Configure the CS request. */
944 if (initial_preamble_cs) {
945 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
946 ibs[1] = cs0->ib;
947 number_of_ibs++;
948 } else {
949 ibs[0] = cs0->ib;
950 }
951
952 request.ip_type = cs0->hw_ip;
953 request.ip_instance = 0;
954 request.ring = queue_idx;
955 request.number_of_ibs = number_of_ibs;
956 request.ibs = ibs;
957 request.handles = handles;
958 request.num_handles = num_handles;
959
960 /* Submit the CS. */
961 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
962
963 free(request.handles);
964
965 if (result != VK_SUCCESS)
966 goto fail;
967
968 radv_assign_last_submit(ctx, &request);
969
970 fail:
971 u_rwlock_rdunlock(&aws->global_bo_list.lock);
972 return result;
973 }
974
975 static VkResult
radv_amdgpu_winsys_cs_submit_fallback(struct radv_amdgpu_ctx * ctx,int queue_idx,struct radv_winsys_sem_info * sem_info,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs)976 radv_amdgpu_winsys_cs_submit_fallback(struct radv_amdgpu_ctx *ctx, int queue_idx,
977 struct radv_winsys_sem_info *sem_info,
978 struct radeon_cmdbuf **cs_array, unsigned cs_count,
979 struct radeon_cmdbuf *initial_preamble_cs)
980 {
981 struct drm_amdgpu_bo_list_entry *handles = NULL;
982 struct radv_amdgpu_cs_request request;
983 struct amdgpu_cs_ib_info *ibs;
984 struct radv_amdgpu_cs *cs0;
985 struct radv_amdgpu_winsys *aws;
986 unsigned num_handles = 0;
987 unsigned number_of_ibs;
988 VkResult result;
989
990 assert(cs_count);
991 cs0 = radv_amdgpu_cs(cs_array[0]);
992 aws = cs0->ws;
993
994 /* Compute the number of IBs for this submit. */
995 number_of_ibs = cs_count + !!initial_preamble_cs;
996
997 u_rwlock_rdlock(&aws->global_bo_list.lock);
998
999 /* Get the BO list. */
1000 result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0, initial_preamble_cs,
1001 &num_handles, &handles);
1002 if (result != VK_SUCCESS) {
1003 goto fail;
1004 }
1005
1006 ibs = malloc(number_of_ibs * sizeof(*ibs));
1007 if (!ibs) {
1008 free(handles);
1009 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1010 goto fail;
1011 }
1012
1013 /* Configure the CS request. */
1014 if (initial_preamble_cs)
1015 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
1016
1017 for (unsigned i = 0; i < cs_count; i++) {
1018 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1019
1020 ibs[i + !!initial_preamble_cs] = cs->ib;
1021
1022 if (cs->is_chained) {
1023 assert(get_nop_packet(cs) == PKT3_NOP_PAD); /* Other shouldn't chain. */
1024
1025 cs->base.buf[cs->base.cdw - 4] = PKT3_NOP_PAD;
1026 cs->base.buf[cs->base.cdw - 3] = PKT3_NOP_PAD;
1027 cs->base.buf[cs->base.cdw - 2] = PKT3_NOP_PAD;
1028 cs->base.buf[cs->base.cdw - 1] = PKT3_NOP_PAD;
1029 cs->is_chained = false;
1030 }
1031 }
1032
1033 request.ip_type = cs0->hw_ip;
1034 request.ip_instance = 0;
1035 request.ring = queue_idx;
1036 request.handles = handles;
1037 request.num_handles = num_handles;
1038 request.number_of_ibs = number_of_ibs;
1039 request.ibs = ibs;
1040
1041 /* Submit the CS. */
1042 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1043
1044 free(request.handles);
1045 free(ibs);
1046
1047 if (result != VK_SUCCESS)
1048 goto fail;
1049
1050 radv_assign_last_submit(ctx, &request);
1051
1052 fail:
1053 u_rwlock_rdunlock(&aws->global_bo_list.lock);
1054 return result;
1055 }
1056
1057 static VkResult
radv_amdgpu_winsys_cs_submit_sysmem(struct radv_amdgpu_ctx * ctx,int queue_idx,struct radv_winsys_sem_info * sem_info,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs,struct radeon_cmdbuf * continue_preamble_cs)1058 radv_amdgpu_winsys_cs_submit_sysmem(struct radv_amdgpu_ctx *ctx, int queue_idx,
1059 struct radv_winsys_sem_info *sem_info,
1060 struct radeon_cmdbuf **cs_array, unsigned cs_count,
1061 struct radeon_cmdbuf *initial_preamble_cs,
1062 struct radeon_cmdbuf *continue_preamble_cs)
1063 {
1064 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1065 struct radeon_winsys *ws = (struct radeon_winsys *)cs0->ws;
1066 struct radv_amdgpu_winsys *aws = cs0->ws;
1067 struct radv_amdgpu_cs_request request;
1068 uint32_t pad_word = get_nop_packet(cs0);
1069 enum amd_ip_type ip_type = cs0->hw_ip;
1070 uint32_t ib_pad_dw_mask = cs0->ws->info.ib_pad_dw_mask[ip_type];
1071 bool emit_signal_sem = sem_info->cs_emit_signal;
1072 VkResult result;
1073
1074 assert(cs_count);
1075
1076 for (unsigned i = 0; i < cs_count;) {
1077 struct amdgpu_cs_ib_info *ibs;
1078 struct radeon_winsys_bo **bos;
1079 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1080 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1081 struct drm_amdgpu_bo_list_entry *handles = NULL;
1082 unsigned num_handles = 0;
1083 unsigned number_of_ibs;
1084 uint32_t *ptr;
1085 unsigned cnt = 0;
1086
1087 /* Compute the number of IBs for this submit. */
1088 number_of_ibs = cs->num_old_cs_buffers + 1;
1089
1090 ibs = malloc(number_of_ibs * sizeof(*ibs));
1091 if (!ibs)
1092 return VK_ERROR_OUT_OF_HOST_MEMORY;
1093
1094 bos = malloc(number_of_ibs * sizeof(*bos));
1095 if (!bos) {
1096 free(ibs);
1097 return VK_ERROR_OUT_OF_HOST_MEMORY;
1098 }
1099
1100 if (number_of_ibs > 1) {
1101 /* Special path when the maximum size in dwords has
1102 * been reached because we need to handle more than one
1103 * IB per submit.
1104 */
1105 struct radeon_cmdbuf **new_cs_array;
1106 unsigned idx = 0;
1107
1108 new_cs_array = malloc(number_of_ibs * sizeof(*new_cs_array));
1109 assert(new_cs_array);
1110
1111 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1112 new_cs_array[idx++] = &cs->old_cs_buffers[j];
1113 new_cs_array[idx++] = cs_array[i];
1114
1115 for (unsigned j = 0; j < number_of_ibs; j++) {
1116 struct radeon_cmdbuf *rcs = new_cs_array[j];
1117 bool needs_preamble = preamble_cs && j == 0;
1118 unsigned pad_words = 0;
1119 unsigned size = 0;
1120
1121 if (needs_preamble)
1122 size += preamble_cs->cdw;
1123 size += rcs->cdw;
1124
1125 assert(size < GFX6_MAX_CS_SIZE);
1126
1127 while (!size || (size & ib_pad_dw_mask)) {
1128 size++;
1129 pad_words++;
1130 }
1131
1132 ws->buffer_create(
1133 ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
1134 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY |
1135 RADEON_FLAG_GTT_WC, RADV_BO_PRIORITY_CS, 0, &bos[j]);
1136 ptr = ws->buffer_map(bos[j]);
1137
1138 if (needs_preamble) {
1139 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1140 ptr += preamble_cs->cdw;
1141 }
1142
1143 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1144 ptr += rcs->cdw;
1145
1146 for (unsigned k = 0; k < pad_words; ++k)
1147 *ptr++ = pad_word;
1148
1149 ibs[j].size = size;
1150 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1151 ibs[j].flags = 0;
1152 }
1153
1154 cnt++;
1155 free(new_cs_array);
1156 } else {
1157 unsigned pad_words = 0;
1158 unsigned size = 0;
1159
1160 if (preamble_cs)
1161 size += preamble_cs->cdw;
1162
1163 while (i + cnt < cs_count &&
1164 GFX6_MAX_CS_SIZE - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1165 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1166 ++cnt;
1167 }
1168
1169 while (!size || (size & ib_pad_dw_mask)) {
1170 size++;
1171 pad_words++;
1172 }
1173 assert(cnt);
1174
1175 ws->buffer_create(
1176 ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
1177 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY |
1178 RADEON_FLAG_GTT_WC, RADV_BO_PRIORITY_CS, 0, &bos[0]);
1179 ptr = ws->buffer_map(bos[0]);
1180
1181 if (preamble_cs) {
1182 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1183 ptr += preamble_cs->cdw;
1184 }
1185
1186 for (unsigned j = 0; j < cnt; ++j) {
1187 struct radv_amdgpu_cs *cs2 = radv_amdgpu_cs(cs_array[i + j]);
1188 memcpy(ptr, cs2->base.buf, 4 * cs2->base.cdw);
1189 ptr += cs2->base.cdw;
1190 }
1191
1192 for (unsigned j = 0; j < pad_words; ++j)
1193 *ptr++ = pad_word;
1194
1195 ibs[0].size = size;
1196 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1197 ibs[0].flags = 0;
1198 }
1199
1200 u_rwlock_rdlock(&aws->global_bo_list.lock);
1201
1202 result =
1203 radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt, (struct radv_amdgpu_winsys_bo **)bos,
1204 number_of_ibs, preamble_cs, &num_handles, &handles);
1205 if (result != VK_SUCCESS) {
1206 free(ibs);
1207 free(bos);
1208 u_rwlock_rdunlock(&aws->global_bo_list.lock);
1209 return result;
1210 }
1211
1212 request.ip_type = cs0->hw_ip;
1213 request.ip_instance = 0;
1214 request.ring = queue_idx;
1215 request.handles = handles;
1216 request.num_handles = num_handles;
1217 request.number_of_ibs = number_of_ibs;
1218 request.ibs = ibs;
1219
1220 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1221 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1222
1223 free(request.handles);
1224 u_rwlock_rdunlock(&aws->global_bo_list.lock);
1225
1226 for (unsigned j = 0; j < number_of_ibs; j++) {
1227 ws->buffer_destroy(ws, bos[j]);
1228 }
1229
1230 free(ibs);
1231 free(bos);
1232
1233 if (result != VK_SUCCESS)
1234 return result;
1235
1236 i += cnt;
1237 }
1238
1239 radv_assign_last_submit(ctx, &request);
1240
1241 return VK_SUCCESS;
1242 }
1243
1244 static VkResult
radv_amdgpu_cs_submit_zero(struct radv_amdgpu_ctx * ctx,enum amd_ip_type ip_type,int queue_idx,struct radv_winsys_sem_info * sem_info)1245 radv_amdgpu_cs_submit_zero(struct radv_amdgpu_ctx *ctx, enum amd_ip_type ip_type, int queue_idx,
1246 struct radv_winsys_sem_info *sem_info)
1247 {
1248 unsigned hw_ip = ip_type;
1249 unsigned queue_syncobj = radv_amdgpu_ctx_queue_syncobj(ctx, hw_ip, queue_idx);
1250 int ret;
1251
1252 if (!queue_syncobj)
1253 return VK_ERROR_OUT_OF_HOST_MEMORY;
1254
1255 if (sem_info->wait.syncobj_count || sem_info->wait.timeline_syncobj_count) {
1256 int fd;
1257 ret = amdgpu_cs_syncobj_export_sync_file(ctx->ws->dev, queue_syncobj, &fd);
1258 if (ret < 0)
1259 return VK_ERROR_DEVICE_LOST;
1260
1261 for (unsigned i = 0; i < sem_info->wait.syncobj_count; ++i) {
1262 int fd2;
1263 ret = amdgpu_cs_syncobj_export_sync_file(ctx->ws->dev, sem_info->wait.syncobj[i], &fd2);
1264 if (ret < 0) {
1265 close(fd);
1266 return VK_ERROR_DEVICE_LOST;
1267 }
1268
1269 sync_accumulate("radv", &fd, fd2);
1270 close(fd2);
1271 }
1272 for (unsigned i = 0; i < sem_info->wait.timeline_syncobj_count; ++i) {
1273 int fd2;
1274 ret = amdgpu_cs_syncobj_export_sync_file2(
1275 ctx->ws->dev, sem_info->wait.syncobj[i + sem_info->wait.syncobj_count],
1276 sem_info->wait.points[i], 0, &fd2);
1277 if (ret < 0) {
1278 /* This works around a kernel bug where the fence isn't copied if it is already
1279 * signalled. Since it is already signalled it is totally fine to not wait on it.
1280 *
1281 * kernel patch: https://patchwork.freedesktop.org/patch/465583/ */
1282 uint64_t point;
1283 ret = amdgpu_cs_syncobj_query2(
1284 ctx->ws->dev, &sem_info->wait.syncobj[i + sem_info->wait.syncobj_count], &point, 1,
1285 0);
1286 if (!ret && point >= sem_info->wait.points[i])
1287 continue;
1288
1289 close(fd);
1290 return VK_ERROR_DEVICE_LOST;
1291 }
1292
1293 sync_accumulate("radv", &fd, fd2);
1294 close(fd2);
1295 }
1296 ret = amdgpu_cs_syncobj_import_sync_file(ctx->ws->dev, queue_syncobj, fd);
1297 close(fd);
1298 if (ret < 0)
1299 return VK_ERROR_DEVICE_LOST;
1300
1301 ctx->queue_syncobj_wait[hw_ip][queue_idx] = true;
1302 }
1303
1304 for (unsigned i = 0; i < sem_info->signal.syncobj_count; ++i) {
1305 uint32_t dst_handle = sem_info->signal.syncobj[i];
1306 uint32_t src_handle = queue_syncobj;
1307
1308 if (ctx->ws->info.has_timeline_syncobj) {
1309 ret = amdgpu_cs_syncobj_transfer(ctx->ws->dev, dst_handle, 0, src_handle, 0, 0);
1310 if (ret < 0)
1311 return VK_ERROR_DEVICE_LOST;
1312 } else {
1313 int fd;
1314 ret = amdgpu_cs_syncobj_export_sync_file(ctx->ws->dev, src_handle, &fd);
1315 if (ret < 0)
1316 return VK_ERROR_DEVICE_LOST;
1317
1318 ret = amdgpu_cs_syncobj_import_sync_file(ctx->ws->dev, dst_handle, fd);
1319 close(fd);
1320 if (ret < 0)
1321 return VK_ERROR_DEVICE_LOST;
1322 }
1323 }
1324 for (unsigned i = 0; i < sem_info->signal.timeline_syncobj_count; ++i) {
1325 ret = amdgpu_cs_syncobj_transfer(ctx->ws->dev,
1326 sem_info->signal.syncobj[i + sem_info->signal.syncobj_count],
1327 sem_info->signal.points[i], queue_syncobj, 0, 0);
1328 if (ret < 0)
1329 return VK_ERROR_DEVICE_LOST;
1330 }
1331 return VK_SUCCESS;
1332 }
1333
1334 static VkResult
radv_amdgpu_winsys_cs_submit_internal(struct radv_amdgpu_ctx * ctx,const struct radv_winsys_submit_info * submit,struct radv_winsys_sem_info * sem_info,bool can_patch)1335 radv_amdgpu_winsys_cs_submit_internal(struct radv_amdgpu_ctx *ctx,
1336 const struct radv_winsys_submit_info *submit,
1337 struct radv_winsys_sem_info *sem_info, bool can_patch)
1338 {
1339 VkResult result;
1340
1341 assert(sem_info);
1342 if (!submit->cs_count) {
1343 result = radv_amdgpu_cs_submit_zero(ctx, submit->ip_type, submit->queue_index, sem_info);
1344 } else if (!ring_can_use_ib_bos(ctx->ws, submit->ip_type)) {
1345 result = radv_amdgpu_winsys_cs_submit_sysmem(
1346 ctx, submit->queue_index, sem_info, submit->cs_array, submit->cs_count,
1347 submit->initial_preamble_cs, submit->continue_preamble_cs);
1348 } else if (can_patch) {
1349 result =
1350 radv_amdgpu_winsys_cs_submit_chained(ctx, submit->queue_index, sem_info, submit->cs_array,
1351 submit->cs_count, submit->initial_preamble_cs);
1352 } else {
1353 result =
1354 radv_amdgpu_winsys_cs_submit_fallback(ctx, submit->queue_index, sem_info, submit->cs_array,
1355 submit->cs_count, submit->initial_preamble_cs);
1356 }
1357
1358 return result;
1359 }
1360
1361 static VkResult
radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx * _ctx,uint32_t submit_count,const struct radv_winsys_submit_info * submits,uint32_t wait_count,const struct vk_sync_wait * waits,uint32_t signal_count,const struct vk_sync_signal * signals,bool can_patch)1362 radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx, uint32_t submit_count,
1363 const struct radv_winsys_submit_info *submits, uint32_t wait_count,
1364 const struct vk_sync_wait *waits, uint32_t signal_count,
1365 const struct vk_sync_signal *signals, bool can_patch)
1366 {
1367 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1368 struct radv_amdgpu_winsys *ws = ctx->ws;
1369 VkResult result;
1370 unsigned wait_idx = 0, signal_idx = 0;
1371
1372 STACK_ARRAY(uint64_t, wait_points, wait_count);
1373 STACK_ARRAY(uint32_t, wait_syncobj, wait_count);
1374 STACK_ARRAY(uint64_t, signal_points, signal_count);
1375 STACK_ARRAY(uint32_t, signal_syncobj, signal_count);
1376
1377 if (!wait_points || !wait_syncobj || !signal_points || !signal_syncobj) {
1378 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1379 goto out;
1380 }
1381
1382 for (uint32_t i = 0; i < wait_count; ++i) {
1383 if (waits[i].sync->type == &vk_sync_dummy_type)
1384 continue;
1385
1386 assert(waits[i].sync->type == &ws->syncobj_sync_type);
1387 wait_syncobj[wait_idx] = ((struct vk_drm_syncobj *)waits[i].sync)->syncobj;
1388 wait_points[wait_idx] = waits[i].wait_value;
1389 ++wait_idx;
1390 }
1391
1392 for (uint32_t i = 0; i < signal_count; ++i) {
1393 if (signals[i].sync->type == &vk_sync_dummy_type)
1394 continue;
1395
1396 assert(signals[i].sync->type == &ws->syncobj_sync_type);
1397 signal_syncobj[signal_idx] = ((struct vk_drm_syncobj *)signals[i].sync)->syncobj;
1398 signal_points[signal_idx] = signals[i].signal_value;
1399 ++signal_idx;
1400 }
1401
1402 assert(signal_idx <= signal_count);
1403 assert(wait_idx <= wait_count);
1404
1405 const uint32_t wait_timeline_syncobj_count =
1406 (ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE) ? wait_idx : 0;
1407 const uint32_t signal_timeline_syncobj_count =
1408 (ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE) ? signal_idx : 0;
1409
1410 struct radv_winsys_sem_info sem_info = {
1411 .wait =
1412 {
1413 .points = wait_points,
1414 .syncobj = wait_syncobj,
1415 .timeline_syncobj_count = wait_timeline_syncobj_count,
1416 .syncobj_count = wait_idx - wait_timeline_syncobj_count,
1417 },
1418 .signal =
1419 {
1420 .points = signal_points,
1421 .syncobj = signal_syncobj,
1422 .timeline_syncobj_count = signal_timeline_syncobj_count,
1423 .syncobj_count = signal_idx - signal_timeline_syncobj_count,
1424 },
1425 .cs_emit_wait = true,
1426 .cs_emit_signal = true,
1427 };
1428
1429 /* Should submit to at least 1 queue. */
1430 assert(submit_count);
1431
1432 if (submit_count == 1) {
1433 result = radv_amdgpu_winsys_cs_submit_internal(ctx, &submits[0], &sem_info, can_patch);
1434 } else {
1435 /* Multiple queue submissions without gang submit.
1436 * This code path will submit each item separately and add the
1437 * previous submission as a scheduled dependency to the next one.
1438 */
1439
1440 assert(ws->info.has_scheduled_fence_dependency);
1441 struct radv_amdgpu_fence *next_dependency = NULL;
1442
1443 for (unsigned i = 0; i < submit_count; ++i) {
1444 sem_info.scheduled_dependency = next_dependency;
1445 sem_info.cs_emit_wait = i == 0;
1446 sem_info.cs_emit_signal = i == submit_count - 1;
1447
1448 result = radv_amdgpu_winsys_cs_submit_internal(ctx, &submits[i], &sem_info, can_patch);
1449
1450 if (result != VK_SUCCESS)
1451 goto out;
1452
1453 next_dependency = &ctx->last_submission[submits[i].ip_type][submits[i].queue_index];
1454 }
1455 }
1456
1457 out:
1458 STACK_ARRAY_FINISH(wait_points);
1459 STACK_ARRAY_FINISH(wait_syncobj);
1460 STACK_ARRAY_FINISH(signal_points);
1461 STACK_ARRAY_FINISH(signal_syncobj);
1462 return result;
1463 }
1464
1465 static void *
radv_amdgpu_winsys_get_cpu_addr(void * _cs,uint64_t addr)1466 radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1467 {
1468 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1469 void *ret = NULL;
1470
1471 if (!cs->ib_buffer)
1472 return NULL;
1473 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1474 struct radv_amdgpu_winsys_bo *bo;
1475
1476 bo = (struct radv_amdgpu_winsys_bo *)(i == cs->num_old_ib_buffers ? cs->ib_buffer
1477 : cs->old_ib_buffers[i].bo);
1478 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1479 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1480 return (char *)ret + (addr - bo->base.va);
1481 }
1482 }
1483 u_rwlock_rdlock(&cs->ws->global_bo_list.lock);
1484 for (uint32_t i = 0; i < cs->ws->global_bo_list.count; i++) {
1485 struct radv_amdgpu_winsys_bo *bo = cs->ws->global_bo_list.bos[i];
1486 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1487 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1488 u_rwlock_rdunlock(&cs->ws->global_bo_list.lock);
1489 return (char *)ret + (addr - bo->base.va);
1490 }
1491 }
1492 }
1493 u_rwlock_rdunlock(&cs->ws->global_bo_list.lock);
1494
1495 return ret;
1496 }
1497
1498 static void
radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf * _cs,FILE * file,const int * trace_ids,int trace_id_count)1499 radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs, FILE *file, const int *trace_ids,
1500 int trace_id_count)
1501 {
1502 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1503 void *ib = cs->base.buf;
1504 int num_dw = cs->base.cdw;
1505
1506 if (cs->use_ib) {
1507 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1508 num_dw = cs->ib.size;
1509 }
1510 assert(ib);
1511 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB", cs->ws->info.gfx_level,
1512 radv_amdgpu_winsys_get_cpu_addr, cs);
1513 }
1514
1515 static uint32_t
radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)1516 radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1517 {
1518 switch (radv_priority) {
1519 case RADEON_CTX_PRIORITY_REALTIME:
1520 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1521 case RADEON_CTX_PRIORITY_HIGH:
1522 return AMDGPU_CTX_PRIORITY_HIGH;
1523 case RADEON_CTX_PRIORITY_MEDIUM:
1524 return AMDGPU_CTX_PRIORITY_NORMAL;
1525 case RADEON_CTX_PRIORITY_LOW:
1526 return AMDGPU_CTX_PRIORITY_LOW;
1527 default:
1528 unreachable("Invalid context priority");
1529 }
1530 }
1531
1532 static VkResult
radv_amdgpu_ctx_create(struct radeon_winsys * _ws,enum radeon_ctx_priority priority,struct radeon_winsys_ctx ** rctx)1533 radv_amdgpu_ctx_create(struct radeon_winsys *_ws, enum radeon_ctx_priority priority,
1534 struct radeon_winsys_ctx **rctx)
1535 {
1536 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1537 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1538 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1539 VkResult result;
1540 int r;
1541
1542 if (!ctx)
1543 return VK_ERROR_OUT_OF_HOST_MEMORY;
1544
1545 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1546 if (r && r == -EACCES) {
1547 result = VK_ERROR_NOT_PERMITTED_KHR;
1548 goto fail_create;
1549 } else if (r) {
1550 fprintf(stderr, "radv/amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1551 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1552 goto fail_create;
1553 }
1554 ctx->ws = ws;
1555
1556 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1557 result = ws->base.buffer_create(&ws->base, 4096, 8, RADEON_DOMAIN_GTT,
1558 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
1559 RADV_BO_PRIORITY_CS, 0, &ctx->fence_bo);
1560 if (result != VK_SUCCESS) {
1561 goto fail_alloc;
1562 }
1563
1564 *rctx = (struct radeon_winsys_ctx *)ctx;
1565 return VK_SUCCESS;
1566
1567 fail_alloc:
1568 amdgpu_cs_ctx_free(ctx->ctx);
1569 fail_create:
1570 FREE(ctx);
1571 return result;
1572 }
1573
1574 static void
radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx * rwctx)1575 radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1576 {
1577 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1578
1579 for (unsigned ip = 0; ip <= AMDGPU_HW_IP_NUM; ++ip) {
1580 for (unsigned ring = 0; ring < MAX_RINGS_PER_TYPE; ++ring) {
1581 if (ctx->queue_syncobj[ip][ring])
1582 amdgpu_cs_destroy_syncobj(ctx->ws->dev, ctx->queue_syncobj[ip][ring]);
1583 }
1584 }
1585
1586 ctx->ws->base.buffer_destroy(&ctx->ws->base, ctx->fence_bo);
1587 amdgpu_cs_ctx_free(ctx->ctx);
1588 FREE(ctx);
1589 }
1590
1591 static uint32_t
radv_amdgpu_ctx_queue_syncobj(struct radv_amdgpu_ctx * ctx,unsigned ip,unsigned ring)1592 radv_amdgpu_ctx_queue_syncobj(struct radv_amdgpu_ctx *ctx, unsigned ip, unsigned ring)
1593 {
1594 uint32_t *syncobj = &ctx->queue_syncobj[ip][ring];
1595 if (!*syncobj) {
1596 amdgpu_cs_create_syncobj2(ctx->ws->dev, DRM_SYNCOBJ_CREATE_SIGNALED, syncobj);
1597 }
1598 return *syncobj;
1599 }
1600
1601 static bool
radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx * rwctx,enum amd_ip_type ip_type,int ring_index)1602 radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx, enum amd_ip_type ip_type, int ring_index)
1603 {
1604 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1605
1606 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1607 uint32_t expired;
1608 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1609 1000000000ull, 0, &expired);
1610
1611 if (ret || !expired)
1612 return false;
1613 }
1614
1615 return true;
1616 }
1617
1618 static uint32_t
radv_to_amdgpu_pstate(enum radeon_ctx_pstate radv_pstate)1619 radv_to_amdgpu_pstate(enum radeon_ctx_pstate radv_pstate)
1620 {
1621 switch (radv_pstate) {
1622 case RADEON_CTX_PSTATE_NONE:
1623 return AMDGPU_CTX_STABLE_PSTATE_NONE;
1624 case RADEON_CTX_PSTATE_STANDARD:
1625 return AMDGPU_CTX_STABLE_PSTATE_STANDARD;
1626 case RADEON_CTX_PSTATE_MIN_SCLK:
1627 return AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
1628 case RADEON_CTX_PSTATE_MIN_MCLK:
1629 return AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
1630 case RADEON_CTX_PSTATE_PEAK:
1631 return AMDGPU_CTX_STABLE_PSTATE_PEAK;
1632 default:
1633 unreachable("Invalid pstate");
1634 }
1635 }
1636
1637 static int
radv_amdgpu_ctx_set_pstate(struct radeon_winsys_ctx * rwctx,enum radeon_ctx_pstate pstate)1638 radv_amdgpu_ctx_set_pstate(struct radeon_winsys_ctx *rwctx, enum radeon_ctx_pstate pstate)
1639 {
1640 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1641 uint32_t amdgpu_pstate = radv_to_amdgpu_pstate(pstate);
1642 return amdgpu_cs_ctx_stable_pstate(ctx->ctx, AMDGPU_CTX_OP_SET_STABLE_PSTATE, amdgpu_pstate, NULL);
1643 }
1644
1645 static void *
radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts * counts,uint32_t queue_syncobj,struct drm_amdgpu_cs_chunk * chunk,int chunk_id)1646 radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts, uint32_t queue_syncobj,
1647 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1648 {
1649 unsigned count = counts->syncobj_count + (queue_syncobj ? 1 : 0);
1650 struct drm_amdgpu_cs_chunk_sem *syncobj =
1651 malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * count);
1652 if (!syncobj)
1653 return NULL;
1654
1655 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1656 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1657 sem->handle = counts->syncobj[i];
1658 }
1659
1660 if (queue_syncobj)
1661 syncobj[counts->syncobj_count].handle = queue_syncobj;
1662
1663 chunk->chunk_id = chunk_id;
1664 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * count;
1665 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1666 return syncobj;
1667 }
1668
1669 static void *
radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts * counts,uint32_t queue_syncobj,struct drm_amdgpu_cs_chunk * chunk,int chunk_id)1670 radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1671 uint32_t queue_syncobj,
1672 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1673 {
1674 uint32_t count =
1675 counts->syncobj_count + counts->timeline_syncobj_count + (queue_syncobj ? 1 : 0);
1676 struct drm_amdgpu_cs_chunk_syncobj *syncobj =
1677 malloc(sizeof(struct drm_amdgpu_cs_chunk_syncobj) * count);
1678 if (!syncobj)
1679 return NULL;
1680
1681 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1682 struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i];
1683 sem->handle = counts->syncobj[i];
1684 sem->flags = 0;
1685 sem->point = 0;
1686 }
1687
1688 for (unsigned i = 0; i < counts->timeline_syncobj_count; i++) {
1689 struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i + counts->syncobj_count];
1690 sem->handle = counts->syncobj[i + counts->syncobj_count];
1691 sem->flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
1692 sem->point = counts->points[i];
1693 }
1694
1695 if (queue_syncobj) {
1696 syncobj[count - 1].handle = queue_syncobj;
1697 syncobj[count - 1].flags = 0;
1698 syncobj[count - 1].point = 0;
1699 }
1700
1701 chunk->chunk_id = chunk_id;
1702 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4 * count;
1703 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1704 return syncobj;
1705 }
1706
1707 static bool
radv_amdgpu_cs_has_user_fence(struct radv_amdgpu_cs_request * request)1708 radv_amdgpu_cs_has_user_fence(struct radv_amdgpu_cs_request *request)
1709 {
1710 return request->ip_type != AMDGPU_HW_IP_UVD &&
1711 request->ip_type != AMDGPU_HW_IP_VCE &&
1712 request->ip_type != AMDGPU_HW_IP_UVD_ENC &&
1713 request->ip_type != AMDGPU_HW_IP_VCN_DEC &&
1714 request->ip_type != AMDGPU_HW_IP_VCN_ENC &&
1715 request->ip_type != AMDGPU_HW_IP_VCN_JPEG;
1716 }
1717
1718 static VkResult
radv_amdgpu_cs_submit(struct radv_amdgpu_ctx * ctx,struct radv_amdgpu_cs_request * request,struct radv_winsys_sem_info * sem_info)1719 radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request *request,
1720 struct radv_winsys_sem_info *sem_info)
1721 {
1722 int r;
1723 int num_chunks;
1724 int size;
1725 struct drm_amdgpu_cs_chunk *chunks;
1726 struct drm_amdgpu_cs_chunk_data *chunk_data;
1727 struct drm_amdgpu_cs_chunk_dep chunk_dep;
1728 bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
1729 struct drm_amdgpu_bo_list_in bo_list_in;
1730 void *wait_syncobj = NULL, *signal_syncobj = NULL;
1731 int i;
1732 uint32_t bo_list = 0;
1733 VkResult result = VK_SUCCESS;
1734 bool has_user_fence = radv_amdgpu_cs_has_user_fence(request);
1735 uint32_t queue_syncobj = radv_amdgpu_ctx_queue_syncobj(ctx, request->ip_type, request->ring);
1736 bool *queue_syncobj_wait = &ctx->queue_syncobj_wait[request->ip_type][request->ring];
1737
1738 if (!queue_syncobj)
1739 return VK_ERROR_OUT_OF_HOST_MEMORY;
1740
1741 size = request->number_of_ibs + 1 + (has_user_fence ? 1 : 0) + (!use_bo_list_create ? 1 : 0) +
1742 3 + !!sem_info->scheduled_dependency;
1743
1744 chunks = malloc(sizeof(chunks[0]) * size);
1745 if (!chunks)
1746 return VK_ERROR_OUT_OF_HOST_MEMORY;
1747
1748 size = request->number_of_ibs + (has_user_fence ? 1 : 0);
1749
1750 chunk_data = malloc(sizeof(chunk_data[0]) * size);
1751 if (!chunk_data) {
1752 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1753 goto error_out;
1754 }
1755
1756 num_chunks = request->number_of_ibs;
1757 for (i = 0; i < request->number_of_ibs; i++) {
1758 struct amdgpu_cs_ib_info *ib;
1759 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1760 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1761 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1762
1763 ib = &request->ibs[i];
1764
1765 chunk_data[i].ib_data._pad = 0;
1766 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1767 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1768 chunk_data[i].ib_data.ip_type = request->ip_type;
1769 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1770 chunk_data[i].ib_data.ring = request->ring;
1771 chunk_data[i].ib_data.flags = ib->flags;
1772 }
1773
1774 if (has_user_fence) {
1775 i = num_chunks++;
1776 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1777 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1778 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1779
1780 struct amdgpu_cs_fence_info fence_info;
1781 fence_info.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
1782 fence_info.offset = (request->ip_type * MAX_RINGS_PER_TYPE + request->ring) * sizeof(uint64_t);
1783 amdgpu_cs_chunk_fence_info_to_data(&fence_info, &chunk_data[i]);
1784 }
1785
1786 if (sem_info->scheduled_dependency) {
1787 amdgpu_cs_chunk_fence_to_dep(&sem_info->scheduled_dependency->fence, &chunk_dep);
1788 i = num_chunks++;
1789 chunks[i].chunk_id = AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES;
1790 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4;
1791 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_dep;
1792 }
1793
1794 if (sem_info->cs_emit_wait && (sem_info->wait.timeline_syncobj_count ||
1795 sem_info->wait.syncobj_count || *queue_syncobj_wait)) {
1796
1797 if (ctx->ws->info.has_timeline_syncobj) {
1798 wait_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(
1799 &sem_info->wait, queue_syncobj, &chunks[num_chunks],
1800 AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT);
1801 } else {
1802 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(
1803 &sem_info->wait, queue_syncobj, &chunks[num_chunks], AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1804 }
1805 if (!wait_syncobj) {
1806 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1807 goto error_out;
1808 }
1809 num_chunks++;
1810
1811 sem_info->cs_emit_wait = false;
1812 *queue_syncobj_wait = false;
1813 }
1814
1815 if (sem_info->cs_emit_signal) {
1816 if (ctx->ws->info.has_timeline_syncobj) {
1817 signal_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(
1818 &sem_info->signal, queue_syncobj, &chunks[num_chunks],
1819 AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL);
1820 } else {
1821 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(
1822 &sem_info->signal, queue_syncobj, &chunks[num_chunks], AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1823 }
1824 if (!signal_syncobj) {
1825 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1826 goto error_out;
1827 }
1828 num_chunks++;
1829 }
1830
1831 if (use_bo_list_create) {
1832 /* Legacy path creating the buffer list handle and passing it
1833 * to the CS ioctl.
1834 */
1835 r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
1836 request->handles, &bo_list);
1837 if (r) {
1838 if (r == -ENOMEM) {
1839 fprintf(stderr, "radv/amdgpu: Not enough memory for buffer list creation.\n");
1840 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1841 } else {
1842 fprintf(stderr, "radv/amdgpu: buffer list creation failed (%d).\n", r);
1843 result = VK_ERROR_UNKNOWN;
1844 }
1845 goto error_out;
1846 }
1847 } else {
1848 /* Standard path passing the buffer list via the CS ioctl. */
1849 bo_list_in.operation = ~0;
1850 bo_list_in.list_handle = ~0;
1851 bo_list_in.bo_number = request->num_handles;
1852 bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1853 bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
1854
1855 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1856 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1857 chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1858 num_chunks++;
1859 }
1860
1861 r = amdgpu_cs_submit_raw2(ctx->ws->dev, ctx->ctx, bo_list, num_chunks, chunks, &request->seq_no);
1862
1863 if (r) {
1864 if (r == -ENOMEM) {
1865 fprintf(stderr, "radv/amdgpu: Not enough memory for command submission.\n");
1866 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1867 } else if (r == -ECANCELED) {
1868 fprintf(stderr, "radv/amdgpu: The CS has been cancelled because the context is lost.\n");
1869 result = VK_ERROR_DEVICE_LOST;
1870 } else {
1871 fprintf(stderr,
1872 "amdgpu: The CS has been rejected, "
1873 "see dmesg for more information (%i).\n",
1874 r);
1875 result = VK_ERROR_UNKNOWN;
1876 }
1877 }
1878
1879 if (bo_list)
1880 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1881
1882 error_out:
1883 free(chunks);
1884 free(chunk_data);
1885 free(wait_syncobj);
1886 free(signal_syncobj);
1887 return result;
1888 }
1889
1890 void
radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys * ws)1891 radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1892 {
1893 ws->base.ctx_create = radv_amdgpu_ctx_create;
1894 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1895 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1896 ws->base.ctx_set_pstate = radv_amdgpu_ctx_set_pstate;
1897 ws->base.cs_domain = radv_amdgpu_cs_domain;
1898 ws->base.cs_create = radv_amdgpu_cs_create;
1899 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1900 ws->base.cs_grow = radv_amdgpu_cs_grow;
1901 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1902 ws->base.cs_reset = radv_amdgpu_cs_reset;
1903 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1904 ws->base.cs_add_buffers = radv_amdgpu_cs_add_buffers;
1905 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1906 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1907 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1908 }
1909