• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include "drm-uapi/amdgpu_drm.h"
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31 
32 #include "util/u_memory.h"
33 #include "ac_debug.h"
34 #include "radv_radeon_winsys.h"
35 #include "radv_amdgpu_cs.h"
36 #include "radv_amdgpu_bo.h"
37 #include "sid.h"
38 
39 
40 enum {
41 	VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
42 };
43 
44 struct radv_amdgpu_cs {
45 	struct radeon_cmdbuf base;
46 	struct radv_amdgpu_winsys *ws;
47 
48 	struct amdgpu_cs_ib_info    ib;
49 
50 	struct radeon_winsys_bo     *ib_buffer;
51 	uint8_t                 *ib_mapped;
52 	unsigned                    max_num_buffers;
53 	unsigned                    num_buffers;
54 	struct drm_amdgpu_bo_list_entry *handles;
55 
56 	struct radeon_winsys_bo     **old_ib_buffers;
57 	unsigned                    num_old_ib_buffers;
58 	unsigned                    max_num_old_ib_buffers;
59 	unsigned                    *ib_size_ptr;
60 	VkResult                    status;
61 	bool                        is_chained;
62 
63 	int                         buffer_hash_table[1024];
64 	unsigned                    hw_ip;
65 
66 	unsigned                    num_virtual_buffers;
67 	unsigned                    max_num_virtual_buffers;
68 	struct radeon_winsys_bo     **virtual_buffers;
69 	int                         *virtual_buffer_hash_table;
70 
71 	/* For chips that don't support chaining. */
72 	struct radeon_cmdbuf     *old_cs_buffers;
73 	unsigned                    num_old_cs_buffers;
74 };
75 
76 static inline struct radv_amdgpu_cs *
radv_amdgpu_cs(struct radeon_cmdbuf * base)77 radv_amdgpu_cs(struct radeon_cmdbuf *base)
78 {
79 	return (struct radv_amdgpu_cs*)base;
80 }
81 
ring_to_hw_ip(enum ring_type ring)82 static int ring_to_hw_ip(enum ring_type ring)
83 {
84 	switch (ring) {
85 	case RING_GFX:
86 		return AMDGPU_HW_IP_GFX;
87 	case RING_DMA:
88 		return AMDGPU_HW_IP_DMA;
89 	case RING_COMPUTE:
90 		return AMDGPU_HW_IP_COMPUTE;
91 	default:
92 		unreachable("unsupported ring");
93 	}
94 }
95 
96 struct radv_amdgpu_cs_request {
97 	/** Specify flags with additional information */
98 	uint64_t flags;
99 
100 	/** Specify HW IP block type to which to send the IB. */
101 	unsigned ip_type;
102 
103 	/** IP instance index if there are several IPs of the same type. */
104 	unsigned ip_instance;
105 
106 	/**
107 	 * Specify ring index of the IP. We could have several rings
108 	 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
109 	 */
110 	uint32_t ring;
111 
112 	/**
113 	 * BO list handles used by this request.
114 	 */
115 	struct drm_amdgpu_bo_list_entry *handles;
116 	uint32_t num_handles;
117 
118 	/**
119 	 * Number of dependencies this Command submission needs to
120 	 * wait for before starting execution.
121 	 */
122 	uint32_t number_of_dependencies;
123 
124 	/**
125 	 * Array of dependencies which need to be met before
126 	 * execution can start.
127 	 */
128 	struct amdgpu_cs_fence *dependencies;
129 
130 	/** Number of IBs to submit in the field ibs. */
131 	uint32_t number_of_ibs;
132 
133 	/**
134 	 * IBs to submit. Those IBs will be submit together as single entity
135 	 */
136 	struct amdgpu_cs_ib_info *ibs;
137 
138 	/**
139 	 * The returned sequence number for the command submission
140 	 */
141 	uint64_t seq_no;
142 
143 	/**
144 	 * The fence information
145 	 */
146 	struct amdgpu_cs_fence_info fence_info;
147 };
148 
149 
150 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
151 				   uint32_t ip_type,
152 				   uint32_t ring,
153 				   struct radv_winsys_sem_info *sem_info);
154 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
155 				 struct radv_amdgpu_cs_request *request,
156 				 struct radv_winsys_sem_info *sem_info);
157 
radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx * ctx,struct radv_amdgpu_fence * fence,struct radv_amdgpu_cs_request * req)158 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
159 					 struct radv_amdgpu_fence *fence,
160 					 struct radv_amdgpu_cs_request *req)
161 {
162 	fence->fence.context = ctx->ctx;
163 	fence->fence.ip_type = req->ip_type;
164 	fence->fence.ip_instance = req->ip_instance;
165 	fence->fence.ring = req->ring;
166 	fence->fence.fence = req->seq_no;
167 	fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
168 }
169 
radv_amdgpu_create_fence()170 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
171 {
172 	struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
173 	if (!fence)
174 		return NULL;
175 
176 	fence->fence.fence = UINT64_MAX;
177 	return (struct radeon_winsys_fence*)fence;
178 }
179 
radv_amdgpu_destroy_fence(struct radeon_winsys_fence * _fence)180 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
181 {
182 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
183 	free(fence);
184 }
185 
radv_amdgpu_reset_fence(struct radeon_winsys_fence * _fence)186 static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
187 {
188 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
189 	fence->fence.fence = UINT64_MAX;
190 }
191 
radv_amdgpu_signal_fence(struct radeon_winsys_fence * _fence)192 static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
193 {
194 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
195 	fence->fence.fence = 0;
196 }
197 
radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence * _fence)198 static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
199 {
200 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
201 	return fence->fence.fence < UINT64_MAX;
202 }
203 
radv_amdgpu_fence_wait(struct radeon_winsys * _ws,struct radeon_winsys_fence * _fence,bool absolute,uint64_t timeout)204 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
205 			      struct radeon_winsys_fence *_fence,
206 			      bool absolute,
207 			      uint64_t timeout)
208 {
209 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
210 	unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
211 	int r;
212 	uint32_t expired = 0;
213 
214 	/* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
215 	if (fence->fence.fence == UINT64_MAX)
216 		return false;
217 
218 	if (fence->fence.fence == 0)
219 		return true;
220 
221 	if (fence->user_ptr) {
222 		if (*fence->user_ptr >= fence->fence.fence)
223 			return true;
224 		if (!absolute && !timeout)
225 			return false;
226 	}
227 
228 	/* Now use the libdrm query. */
229 	r = amdgpu_cs_query_fence_status(&fence->fence,
230 	                                 timeout,
231 	                                 flags,
232 	                                 &expired);
233 
234 	if (r) {
235 		fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
236 		return false;
237 	}
238 
239 	if (expired)
240 		return true;
241 
242 	return false;
243 }
244 
245 
radv_amdgpu_fences_wait(struct radeon_winsys * _ws,struct radeon_winsys_fence * const * _fences,uint32_t fence_count,bool wait_all,uint64_t timeout)246 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
247 			      struct radeon_winsys_fence *const *_fences,
248 			      uint32_t fence_count,
249 			      bool wait_all,
250 			      uint64_t timeout)
251 {
252 	struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
253 	int r;
254 	uint32_t expired = 0, first = 0;
255 
256 	if (!fences)
257 		return false;
258 
259 	for (uint32_t i = 0; i < fence_count; ++i)
260 		fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
261 
262 	/* Now use the libdrm query. */
263 	r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
264 	                          timeout, &expired, &first);
265 
266 	free(fences);
267 	if (r) {
268 		fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
269 		return false;
270 	}
271 
272 	if (expired)
273 		return true;
274 
275 	return false;
276 }
277 
radv_amdgpu_cs_destroy(struct radeon_cmdbuf * rcs)278 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
279 {
280 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
281 
282 	if (cs->ib_buffer)
283 		cs->ws->base.buffer_destroy(cs->ib_buffer);
284 	else
285 		free(cs->base.buf);
286 
287 	for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
288 		cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
289 
290 	for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
291 		struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
292 		free(rcs->buf);
293 	}
294 
295 	free(cs->old_cs_buffers);
296 	free(cs->old_ib_buffers);
297 	free(cs->virtual_buffers);
298 	free(cs->virtual_buffer_hash_table);
299 	free(cs->handles);
300 	free(cs);
301 }
302 
radv_amdgpu_init_cs(struct radv_amdgpu_cs * cs,enum ring_type ring_type)303 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
304 				enum ring_type ring_type)
305 {
306 	for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
307 		cs->buffer_hash_table[i] = -1;
308 
309 	cs->hw_ip = ring_to_hw_ip(ring_type);
310 }
311 
312 static struct radeon_cmdbuf *
radv_amdgpu_cs_create(struct radeon_winsys * ws,enum ring_type ring_type)313 radv_amdgpu_cs_create(struct radeon_winsys *ws,
314 		      enum ring_type ring_type)
315 {
316 	struct radv_amdgpu_cs *cs;
317 	uint32_t ib_size = 20 * 1024 * 4;
318 	cs = calloc(1, sizeof(struct radv_amdgpu_cs));
319 	if (!cs)
320 		return NULL;
321 
322 	cs->ws = radv_amdgpu_winsys(ws);
323 	radv_amdgpu_init_cs(cs, ring_type);
324 
325 	if (cs->ws->use_ib_bos) {
326 		cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
327 						  RADEON_DOMAIN_GTT,
328 						  RADEON_FLAG_CPU_ACCESS |
329 						  RADEON_FLAG_NO_INTERPROCESS_SHARING |
330 						  RADEON_FLAG_READ_ONLY |
331 						  RADEON_FLAG_GTT_WC,
332 						  RADV_BO_PRIORITY_CS);
333 		if (!cs->ib_buffer) {
334 			free(cs);
335 			return NULL;
336 		}
337 
338 		cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
339 		if (!cs->ib_mapped) {
340 			ws->buffer_destroy(cs->ib_buffer);
341 			free(cs);
342 			return NULL;
343 		}
344 
345 		cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
346 		cs->base.buf = (uint32_t *)cs->ib_mapped;
347 		cs->base.max_dw = ib_size / 4 - 4;
348 		cs->ib_size_ptr = &cs->ib.size;
349 		cs->ib.size = 0;
350 
351 		ws->cs_add_buffer(&cs->base, cs->ib_buffer);
352 	} else {
353 		uint32_t *buf = malloc(16384);
354 		if (!buf) {
355 			free(cs);
356 			return NULL;
357 		}
358 		cs->base.buf = buf;
359 		cs->base.max_dw = 4096;
360 	}
361 
362 	return &cs->base;
363 }
364 
radv_amdgpu_cs_grow(struct radeon_cmdbuf * _cs,size_t min_size)365 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
366 {
367 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
368 
369 	if (cs->status != VK_SUCCESS) {
370 		cs->base.cdw = 0;
371 		return;
372 	}
373 
374 	if (!cs->ws->use_ib_bos) {
375 		const uint64_t limit_dws = 0xffff8;
376 		uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
377 				       MIN2(cs->base.max_dw * 2, limit_dws));
378 
379 		/* The total ib size cannot exceed limit_dws dwords. */
380 		if (ib_dws > limit_dws)
381 		{
382 			/* The maximum size in dwords has been reached,
383 			 * try to allocate a new one.
384 			 */
385 			struct radeon_cmdbuf *old_cs_buffers =
386 				realloc(cs->old_cs_buffers,
387 					(cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
388 			if (!old_cs_buffers) {
389 				cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
390 				cs->base.cdw = 0;
391 				return;
392 			}
393 			cs->old_cs_buffers = old_cs_buffers;
394 
395 			/* Store the current one for submitting it later. */
396 			cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
397 			cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
398 			cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
399 			cs->num_old_cs_buffers++;
400 
401 			/* Reset the cs, it will be re-allocated below. */
402 			cs->base.cdw = 0;
403 			cs->base.buf = NULL;
404 
405 			/* Re-compute the number of dwords to allocate. */
406 			ib_dws = MAX2(cs->base.cdw + min_size,
407 				      MIN2(cs->base.max_dw * 2, limit_dws));
408 			if (ib_dws > limit_dws) {
409 				fprintf(stderr, "amdgpu: Too high number of "
410 						"dwords to allocate\n");
411 				cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
412 				return;
413 			}
414 		}
415 
416 		uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
417 		if (new_buf) {
418 			cs->base.buf = new_buf;
419 			cs->base.max_dw = ib_dws;
420 		} else {
421 			cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
422 			cs->base.cdw = 0;
423 		}
424 		return;
425 	}
426 
427 	uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
428 
429 	/* max that fits in the chain size field. */
430 	ib_size = MIN2(ib_size, 0xfffff);
431 
432 	while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
433 		radeon_emit(&cs->base, PKT3_NOP_PAD);
434 
435 	*cs->ib_size_ptr |= cs->base.cdw + 4;
436 
437 	if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
438 		unsigned max_num_old_ib_buffers =
439 			MAX2(1, cs->max_num_old_ib_buffers * 2);
440 		struct radeon_winsys_bo **old_ib_buffers =
441 			realloc(cs->old_ib_buffers,
442 				max_num_old_ib_buffers * sizeof(void*));
443 		if (!old_ib_buffers) {
444 			cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
445 			return;
446 		}
447 		cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
448 		cs->old_ib_buffers = old_ib_buffers;
449 	}
450 
451 	cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
452 
453 	cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
454 						   RADEON_DOMAIN_GTT,
455 						   RADEON_FLAG_CPU_ACCESS |
456 						   RADEON_FLAG_NO_INTERPROCESS_SHARING |
457 						   RADEON_FLAG_READ_ONLY |
458 						   RADEON_FLAG_GTT_WC,
459 						   RADV_BO_PRIORITY_CS);
460 
461 	if (!cs->ib_buffer) {
462 		cs->base.cdw = 0;
463 		cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
464 		cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
465 	}
466 
467 	cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
468 	if (!cs->ib_mapped) {
469 		cs->ws->base.buffer_destroy(cs->ib_buffer);
470 		cs->base.cdw = 0;
471 
472 		/* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
473 		cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
474 		cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
475 	}
476 
477 	cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
478 
479 	radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
480 	radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
481 	radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
482 	radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
483 
484 	cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
485 
486 	cs->base.buf = (uint32_t *)cs->ib_mapped;
487 	cs->base.cdw = 0;
488 	cs->base.max_dw = ib_size / 4 - 4;
489 
490 }
491 
radv_amdgpu_cs_finalize(struct radeon_cmdbuf * _cs)492 static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
493 {
494 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
495 
496 	if (cs->ws->use_ib_bos) {
497 		while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
498 			radeon_emit(&cs->base, PKT3_NOP_PAD);
499 
500 		*cs->ib_size_ptr |= cs->base.cdw;
501 
502 		cs->is_chained = false;
503 	}
504 
505 	return cs->status;
506 }
507 
radv_amdgpu_cs_reset(struct radeon_cmdbuf * _cs)508 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
509 {
510 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
511 	cs->base.cdw = 0;
512 	cs->status = VK_SUCCESS;
513 
514 	for (unsigned i = 0; i < cs->num_buffers; ++i) {
515 		unsigned hash = cs->handles[i].bo_handle &
516 		                (ARRAY_SIZE(cs->buffer_hash_table) - 1);
517 		cs->buffer_hash_table[hash] = -1;
518 	}
519 
520 	for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
521 		unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
522 		cs->virtual_buffer_hash_table[hash] = -1;
523 	}
524 
525 	cs->num_buffers = 0;
526 	cs->num_virtual_buffers = 0;
527 
528 	if (cs->ws->use_ib_bos) {
529 		cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
530 
531 		for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
532 			cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
533 
534 		cs->num_old_ib_buffers = 0;
535 		cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
536 		cs->ib_size_ptr = &cs->ib.size;
537 		cs->ib.size = 0;
538 	} else {
539 		for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
540 			struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
541 			free(rcs->buf);
542 		}
543 
544 		free(cs->old_cs_buffers);
545 		cs->old_cs_buffers = NULL;
546 		cs->num_old_cs_buffers = 0;
547 	}
548 }
549 
radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs * cs,uint32_t bo)550 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
551 				      uint32_t bo)
552 {
553 	unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
554 	int index = cs->buffer_hash_table[hash];
555 
556 	if (index == -1)
557 		return -1;
558 
559 	if (cs->handles[index].bo_handle == bo)
560 		return index;
561 
562 	for (unsigned i = 0; i < cs->num_buffers; ++i) {
563 		if (cs->handles[i].bo_handle == bo) {
564 			cs->buffer_hash_table[hash] = i;
565 			return i;
566 		}
567 	}
568 
569 	return -1;
570 }
571 
radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs * cs,uint32_t bo,uint8_t priority)572 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
573 					       uint32_t bo, uint8_t priority)
574 {
575 	unsigned hash;
576 	int index = radv_amdgpu_cs_find_buffer(cs, bo);
577 
578 	if (index != -1)
579 		return;
580 
581 	if (cs->num_buffers == cs->max_num_buffers) {
582 		unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
583 		struct drm_amdgpu_bo_list_entry *new_entries =
584 			realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
585 		if (new_entries) {
586 			cs->max_num_buffers = new_count;
587 			cs->handles = new_entries;
588 		} else {
589 			cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
590 			return;
591 		}
592 	}
593 
594 	cs->handles[cs->num_buffers].bo_handle = bo;
595 	cs->handles[cs->num_buffers].bo_priority = priority;
596 
597 	hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
598 	cs->buffer_hash_table[hash] = cs->num_buffers;
599 
600 	++cs->num_buffers;
601 }
602 
radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf * _cs,struct radeon_winsys_bo * bo)603 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
604                                               struct radeon_winsys_bo *bo)
605 {
606 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
607 	unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
608 
609 
610 	if (!cs->virtual_buffer_hash_table) {
611 		int *virtual_buffer_hash_table =
612 			malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
613 		if (!virtual_buffer_hash_table) {
614 			cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
615 			return;
616 		}
617 		cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
618 
619 		for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
620 			cs->virtual_buffer_hash_table[i] = -1;
621 	}
622 
623 	if (cs->virtual_buffer_hash_table[hash] >= 0) {
624 		int idx = cs->virtual_buffer_hash_table[hash];
625 		if (cs->virtual_buffers[idx] == bo) {
626 			return;
627 		}
628 		for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
629 			if (cs->virtual_buffers[i] == bo) {
630 				cs->virtual_buffer_hash_table[hash] = i;
631 				return;
632 			}
633 		}
634 	}
635 
636 	if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
637 		unsigned max_num_virtual_buffers =
638 			MAX2(2, cs->max_num_virtual_buffers * 2);
639 		struct radeon_winsys_bo **virtual_buffers =
640 			realloc(cs->virtual_buffers,
641 				sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * max_num_virtual_buffers);
642 		if (!virtual_buffers) {
643 			cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
644 			return;
645 		}
646 		cs->max_num_virtual_buffers = max_num_virtual_buffers;
647 		cs->virtual_buffers = virtual_buffers;
648 	}
649 
650 	cs->virtual_buffers[cs->num_virtual_buffers] = bo;
651 
652 	cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
653 	++cs->num_virtual_buffers;
654 
655 }
656 
radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf * _cs,struct radeon_winsys_bo * _bo)657 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
658 				      struct radeon_winsys_bo *_bo)
659 {
660 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
661 	struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
662 
663 	if (cs->status != VK_SUCCESS)
664 		return;
665 
666 	if (bo->is_virtual)  {
667 		radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
668 		return;
669 	}
670 
671 	if (bo->base.is_local)
672 		return;
673 
674 	radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
675 }
676 
radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf * _parent,struct radeon_cmdbuf * _child)677 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
678 					     struct radeon_cmdbuf *_child)
679 {
680 	struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
681 	struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
682 
683 	if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
684 		return;
685 
686 	for (unsigned i = 0; i < child->num_buffers; ++i) {
687 		radv_amdgpu_cs_add_buffer_internal(parent,
688 		                                   child->handles[i].bo_handle,
689 		                                   child->handles[i].bo_priority);
690 	}
691 
692 	for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
693 		radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
694 	}
695 
696 	if (parent->ws->use_ib_bos) {
697 		if (parent->base.cdw + 4 > parent->base.max_dw)
698 			radv_amdgpu_cs_grow(&parent->base, 4);
699 
700 		radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
701 		radeon_emit(&parent->base, child->ib.ib_mc_address);
702 		radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
703 		radeon_emit(&parent->base, child->ib.size);
704 	} else {
705 		if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
706 			radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
707 
708 		memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
709 		parent->base.cdw += child->base.cdw;
710 	}
711 }
712 
713 static VkResult
radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys * ws,struct radeon_cmdbuf ** cs_array,unsigned count,struct radv_amdgpu_winsys_bo ** extra_bo_array,unsigned num_extra_bo,struct radeon_cmdbuf * extra_cs,const struct radv_winsys_bo_list * radv_bo_list,unsigned * rnum_handles,struct drm_amdgpu_bo_list_entry ** rhandles)714 radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws,
715 			struct radeon_cmdbuf **cs_array,
716 			unsigned count,
717 			struct radv_amdgpu_winsys_bo **extra_bo_array,
718 			unsigned num_extra_bo,
719 			struct radeon_cmdbuf *extra_cs,
720 			const struct radv_winsys_bo_list *radv_bo_list,
721 			unsigned *rnum_handles,
722 			struct drm_amdgpu_bo_list_entry **rhandles)
723 {
724 	struct drm_amdgpu_bo_list_entry *handles = NULL;
725 	unsigned num_handles = 0;
726 
727 	if (ws->debug_all_bos) {
728 		struct radv_amdgpu_winsys_bo *bo;
729 
730 		handles = malloc(sizeof(handles[0]) * ws->num_buffers);
731 		if (!handles) {
732 			return VK_ERROR_OUT_OF_HOST_MEMORY;
733 		}
734 
735 		LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
736 			assert(num_handles < ws->num_buffers);
737 			handles[num_handles].bo_handle = bo->bo_handle;
738 			handles[num_handles].bo_priority = bo->priority;
739 			num_handles++;
740 		}
741 	} else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
742 	           !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
743 		struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
744 		if (cs->num_buffers == 0)
745 			return VK_SUCCESS;
746 
747 		handles = malloc(sizeof(handles[0]) * cs->num_buffers);
748 		if (!handles)
749 			return VK_ERROR_OUT_OF_HOST_MEMORY;
750 
751 		memcpy(handles, cs->handles,
752 		       sizeof(handles[0]) * cs->num_buffers);
753 		num_handles = cs->num_buffers;
754 	} else {
755 		unsigned total_buffer_count = num_extra_bo;
756 		num_handles = num_extra_bo;
757 		for (unsigned i = 0; i < count; ++i) {
758 			struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
759 			total_buffer_count += cs->num_buffers;
760 			for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
761 				total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
762 		}
763 
764 		if (extra_cs) {
765 			total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
766 		}
767 
768 		if (radv_bo_list) {
769 			total_buffer_count += radv_bo_list->count;
770 		}
771 
772 		if (total_buffer_count == 0)
773 			return VK_SUCCESS;
774 
775 		handles = malloc(sizeof(handles[0]) * total_buffer_count);
776 		if (!handles)
777 			return VK_ERROR_OUT_OF_HOST_MEMORY;
778 
779 		for (unsigned i = 0; i < num_extra_bo; i++) {
780 			handles[i].bo_handle = extra_bo_array[i]->bo_handle;
781 			handles[i].bo_priority = extra_bo_array[i]->priority;
782 		}
783 
784 		for (unsigned i = 0; i < count + !!extra_cs; ++i) {
785 			struct radv_amdgpu_cs *cs;
786 
787 			if (i == count)
788 				cs = (struct radv_amdgpu_cs*)extra_cs;
789 			else
790 				cs = (struct radv_amdgpu_cs*)cs_array[i];
791 
792 			if (!cs->num_buffers)
793 				continue;
794 
795 			if (num_handles == 0 && !cs->num_virtual_buffers) {
796 				memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
797 				num_handles = cs->num_buffers;
798 				continue;
799 			}
800 			int unique_bo_so_far = num_handles;
801 			for (unsigned j = 0; j < cs->num_buffers; ++j) {
802 				bool found = false;
803 				for (unsigned k = 0; k < unique_bo_so_far; ++k) {
804 					if (handles[k].bo_handle == cs->handles[j].bo_handle) {
805 						found = true;
806 						break;
807 					}
808 				}
809 				if (!found) {
810 					handles[num_handles] = cs->handles[j];
811 					++num_handles;
812 				}
813 			}
814 			for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
815 				struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
816 				for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
817 					struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
818 					bool found = false;
819 					for (unsigned m = 0; m < num_handles; ++m) {
820 						if (handles[m].bo_handle == bo->bo_handle) {
821 							found = true;
822 							break;
823 						}
824 					}
825 					if (!found) {
826 						handles[num_handles].bo_handle = bo->bo_handle;
827 						handles[num_handles].bo_priority = bo->priority;
828 						++num_handles;
829 					}
830 				}
831 			}
832 		}
833 
834 		if (radv_bo_list) {
835 			unsigned unique_bo_so_far = num_handles;
836 			for (unsigned i = 0; i < radv_bo_list->count; ++i) {
837 				struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
838 				bool found = false;
839 				for (unsigned j = 0; j < unique_bo_so_far; ++j) {
840 					if (bo->bo_handle == handles[j].bo_handle) {
841 						found = true;
842 						break;
843 					}
844 				}
845 				if (!found) {
846 					handles[num_handles].bo_handle = bo->bo_handle;
847 					handles[num_handles].bo_priority = bo->priority;
848 					++num_handles;
849 				}
850 			}
851 		}
852 	}
853 
854 	*rhandles = handles;
855 	*rnum_handles = num_handles;
856 
857 	return VK_SUCCESS;
858 }
859 
radv_set_cs_fence(struct radv_amdgpu_ctx * ctx,int ip_type,int ring)860 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
861 {
862 	struct amdgpu_cs_fence_info ret = {0};
863 	if (ctx->fence_map) {
864 		ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
865 		ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
866 	}
867 	return ret;
868 }
869 
radv_assign_last_submit(struct radv_amdgpu_ctx * ctx,struct radv_amdgpu_cs_request * request)870 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
871 				    struct radv_amdgpu_cs_request *request)
872 {
873 	radv_amdgpu_request_to_fence(ctx,
874 	                             &ctx->last_submission[request->ip_type][request->ring],
875 	                             request);
876 }
877 
878 static VkResult
radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx * _ctx,int queue_idx,struct radv_winsys_sem_info * sem_info,const struct radv_winsys_bo_list * radv_bo_list,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs,struct radeon_cmdbuf * continue_preamble_cs,struct radeon_winsys_fence * _fence)879 radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
880 				     int queue_idx,
881 				     struct radv_winsys_sem_info *sem_info,
882 				     const struct radv_winsys_bo_list *radv_bo_list,
883 				     struct radeon_cmdbuf **cs_array,
884 				     unsigned cs_count,
885 				     struct radeon_cmdbuf *initial_preamble_cs,
886 				     struct radeon_cmdbuf *continue_preamble_cs,
887 				     struct radeon_winsys_fence *_fence)
888 {
889 	struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
890 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
891 	struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
892 	struct radv_amdgpu_winsys *aws = cs0->ws;
893 	struct drm_amdgpu_bo_list_entry *handles = NULL;
894 	struct radv_amdgpu_cs_request request = {0};
895 	struct amdgpu_cs_ib_info ibs[2];
896 	unsigned number_of_ibs = 1;
897 	unsigned num_handles = 0;
898 	VkResult result;
899 
900 	for (unsigned i = cs_count; i--;) {
901 		struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
902 
903 		if (cs->is_chained) {
904 			*cs->ib_size_ptr -= 4;
905 			cs->is_chained = false;
906 		}
907 
908 		if (i + 1 < cs_count) {
909 			struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
910 			assert(cs->base.cdw + 4 <= cs->base.max_dw);
911 
912 			cs->is_chained = true;
913 			*cs->ib_size_ptr += 4;
914 
915 			cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
916 			cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
917 			cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
918 			cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
919 		}
920 	}
921 
922 	if (aws->debug_all_bos)
923 		u_rwlock_rdlock(&aws->global_bo_list_lock);
924 
925 	/* Get the BO list. */
926 	result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
927 					 initial_preamble_cs, radv_bo_list,
928 					 &num_handles, &handles);
929 	if (result != VK_SUCCESS)
930 		goto fail;
931 
932 	/* Configure the CS request. */
933 	if (initial_preamble_cs) {
934 		ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
935 		ibs[1] = cs0->ib;
936 		number_of_ibs++;
937 	} else {
938 		ibs[0] = cs0->ib;
939 	}
940 
941 	request.ip_type = cs0->hw_ip;
942 	request.ring = queue_idx;
943 	request.number_of_ibs = number_of_ibs;
944 	request.ibs = ibs;
945 	request.handles = handles;
946 	request.num_handles = num_handles;
947 	request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
948 
949 	/* Submit the CS. */
950 	result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
951 
952 	free(request.handles);
953 
954 	if (result != VK_SUCCESS)
955 		goto fail;
956 
957 	if (fence)
958 		radv_amdgpu_request_to_fence(ctx, fence, &request);
959 
960 	radv_assign_last_submit(ctx, &request);
961 
962 fail:
963 	if (aws->debug_all_bos)
964 		u_rwlock_rdunlock(&aws->global_bo_list_lock);
965 	return result;
966 }
967 
968 static VkResult
radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx * _ctx,int queue_idx,struct radv_winsys_sem_info * sem_info,const struct radv_winsys_bo_list * radv_bo_list,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs,struct radeon_cmdbuf * continue_preamble_cs,struct radeon_winsys_fence * _fence)969 radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
970 				      int queue_idx,
971 				      struct radv_winsys_sem_info *sem_info,
972 				      const struct radv_winsys_bo_list *radv_bo_list,
973 				      struct radeon_cmdbuf **cs_array,
974 				      unsigned cs_count,
975 				      struct radeon_cmdbuf *initial_preamble_cs,
976 				      struct radeon_cmdbuf *continue_preamble_cs,
977 				      struct radeon_winsys_fence *_fence)
978 {
979 	struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
980 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
981 	struct drm_amdgpu_bo_list_entry *handles = NULL;
982 	struct radv_amdgpu_cs_request request = {0};
983 	struct amdgpu_cs_ib_info *ibs;
984 	struct radv_amdgpu_cs *cs0;
985 	struct radv_amdgpu_winsys *aws;
986 	unsigned num_handles = 0;
987 	unsigned number_of_ibs;
988 	VkResult result;
989 
990 	assert(cs_count);
991 	cs0 = radv_amdgpu_cs(cs_array[0]);
992 	aws = cs0->ws;
993 
994 	/* Compute the number of IBs for this submit. */
995 	number_of_ibs = cs_count + !!initial_preamble_cs;
996 
997 	if (aws->debug_all_bos)
998 		u_rwlock_rdlock(&aws->global_bo_list_lock);
999 
1000 	/* Get the BO list. */
1001 	result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
1002 					 initial_preamble_cs, radv_bo_list,
1003 					 &num_handles, &handles);
1004 	if (result != VK_SUCCESS) {
1005 		goto fail;
1006 	}
1007 
1008 	ibs = malloc(number_of_ibs * sizeof(*ibs));
1009 	if (!ibs) {
1010 		free(handles);
1011 		result = VK_ERROR_OUT_OF_HOST_MEMORY;
1012 		goto fail;
1013 	}
1014 
1015 	/* Configure the CS request. */
1016 	if (initial_preamble_cs)
1017 		ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
1018 
1019 	for (unsigned i = 0; i < cs_count; i++) {
1020 		struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1021 
1022 		ibs[i + !!initial_preamble_cs] = cs->ib;
1023 
1024 		if (cs->is_chained) {
1025 			*cs->ib_size_ptr -= 4;
1026 			cs->is_chained = false;
1027 		}
1028 	}
1029 
1030 	request.ip_type = cs0->hw_ip;
1031 	request.ring = queue_idx;
1032 	request.handles = handles;
1033 	request.num_handles = num_handles;
1034 	request.number_of_ibs = number_of_ibs;
1035 	request.ibs = ibs;
1036 	request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1037 
1038 	/* Submit the CS. */
1039 	result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1040 
1041 	free(request.handles);
1042 	free(ibs);
1043 
1044 	if (result != VK_SUCCESS)
1045 		goto fail;
1046 
1047 	if (fence)
1048 		radv_amdgpu_request_to_fence(ctx, fence, &request);
1049 
1050 	radv_assign_last_submit(ctx, &request);
1051 
1052 fail:
1053 	if (aws->debug_all_bos)
1054 		u_rwlock_rdunlock(&aws->global_bo_list_lock);
1055 	return result;
1056 }
1057 
1058 static VkResult
radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx * _ctx,int queue_idx,struct radv_winsys_sem_info * sem_info,const struct radv_winsys_bo_list * radv_bo_list,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs,struct radeon_cmdbuf * continue_preamble_cs,struct radeon_winsys_fence * _fence)1059 radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
1060 				    int queue_idx,
1061 				    struct radv_winsys_sem_info *sem_info,
1062 				    const struct radv_winsys_bo_list *radv_bo_list,
1063 				    struct radeon_cmdbuf **cs_array,
1064 				    unsigned cs_count,
1065 				    struct radeon_cmdbuf *initial_preamble_cs,
1066 				    struct radeon_cmdbuf *continue_preamble_cs,
1067 				    struct radeon_winsys_fence *_fence)
1068 {
1069 	struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1070 	struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
1071 	struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1072 	struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
1073 	struct radv_amdgpu_winsys *aws = cs0->ws;
1074 	struct radv_amdgpu_cs_request request;
1075 	uint32_t pad_word = PKT3_NOP_PAD;
1076 	bool emit_signal_sem = sem_info->cs_emit_signal;
1077 	VkResult result;
1078 
1079 	if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
1080 		pad_word = 0x80000000;
1081 
1082 	assert(cs_count);
1083 
1084 	for (unsigned i = 0; i < cs_count;) {
1085 		struct amdgpu_cs_ib_info *ibs;
1086 		struct radeon_winsys_bo **bos;
1087 		struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1088 		struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1089 		struct drm_amdgpu_bo_list_entry *handles = NULL;
1090 		unsigned num_handles = 0;
1091 		unsigned number_of_ibs;
1092 		uint32_t *ptr;
1093 		unsigned cnt = 0;
1094 		unsigned size = 0;
1095 		unsigned pad_words = 0;
1096 
1097 		/* Compute the number of IBs for this submit. */
1098 		number_of_ibs = cs->num_old_cs_buffers + 1;
1099 
1100 		ibs = malloc(number_of_ibs * sizeof(*ibs));
1101 		if (!ibs)
1102 			return VK_ERROR_OUT_OF_HOST_MEMORY;
1103 
1104 		bos = malloc(number_of_ibs * sizeof(*bos));
1105 		if (!bos) {
1106 			free(ibs);
1107 			return VK_ERROR_OUT_OF_HOST_MEMORY;
1108 		}
1109 
1110 		if (number_of_ibs > 1) {
1111 			/* Special path when the maximum size in dwords has
1112 			 * been reached because we need to handle more than one
1113 			 * IB per submit.
1114 			 */
1115 			struct radeon_cmdbuf **new_cs_array;
1116 			unsigned idx = 0;
1117 
1118 			new_cs_array = malloc(cs->num_old_cs_buffers *
1119 					      sizeof(*new_cs_array));
1120 			assert(new_cs_array);
1121 
1122 			for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1123 				new_cs_array[idx++] = &cs->old_cs_buffers[j];
1124 			new_cs_array[idx++] = cs_array[i];
1125 
1126 			for (unsigned j = 0; j < number_of_ibs; j++) {
1127 				struct radeon_cmdbuf *rcs = new_cs_array[j];
1128 				bool needs_preamble = preamble_cs && j == 0;
1129 				unsigned size = 0;
1130 
1131 				if (needs_preamble)
1132 					size += preamble_cs->cdw;
1133 				size += rcs->cdw;
1134 
1135 				assert(size < 0xffff8);
1136 
1137 				while (!size || (size & 7)) {
1138 					size++;
1139 					pad_words++;
1140 				}
1141 
1142 				bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1143 							   RADEON_DOMAIN_GTT,
1144 							   RADEON_FLAG_CPU_ACCESS |
1145 							   RADEON_FLAG_NO_INTERPROCESS_SHARING |
1146 							   RADEON_FLAG_READ_ONLY,
1147 							   RADV_BO_PRIORITY_CS);
1148 				ptr = ws->buffer_map(bos[j]);
1149 
1150 				if (needs_preamble) {
1151 					memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1152 					ptr += preamble_cs->cdw;
1153 				}
1154 
1155 				memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1156 				ptr += rcs->cdw;
1157 
1158 				for (unsigned k = 0; k < pad_words; ++k)
1159 					*ptr++ = pad_word;
1160 
1161 				ibs[j].size = size;
1162 				ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1163 				ibs[j].flags = 0;
1164 			}
1165 
1166 			cnt++;
1167 			free(new_cs_array);
1168 		} else {
1169 			if (preamble_cs)
1170 				size += preamble_cs->cdw;
1171 
1172 			while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1173 				size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1174 				++cnt;
1175 			}
1176 
1177 			while (!size || (size & 7)) {
1178 				size++;
1179 				pad_words++;
1180 			}
1181 			assert(cnt);
1182 
1183 			bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1184 						   RADEON_DOMAIN_GTT,
1185 						   RADEON_FLAG_CPU_ACCESS |
1186 						   RADEON_FLAG_NO_INTERPROCESS_SHARING |
1187 						   RADEON_FLAG_READ_ONLY,
1188 						   RADV_BO_PRIORITY_CS);
1189 			ptr = ws->buffer_map(bos[0]);
1190 
1191 			if (preamble_cs) {
1192 				memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1193 				ptr += preamble_cs->cdw;
1194 			}
1195 
1196 			for (unsigned j = 0; j < cnt; ++j) {
1197 				struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1198 				memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1199 				ptr += cs->base.cdw;
1200 
1201 			}
1202 
1203 			for (unsigned j = 0; j < pad_words; ++j)
1204 				*ptr++ = pad_word;
1205 
1206 			ibs[0].size = size;
1207 			ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1208 			ibs[0].flags = 0;
1209 		}
1210 
1211 		if (aws->debug_all_bos)
1212 			u_rwlock_rdlock(&aws->global_bo_list_lock);
1213 
1214 		result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt,
1215 						 (struct radv_amdgpu_winsys_bo **)bos,
1216 						 number_of_ibs, preamble_cs,
1217 						 radv_bo_list,
1218 						 &num_handles, &handles);
1219 		if (result != VK_SUCCESS) {
1220 			free(ibs);
1221 			free(bos);
1222 			if (aws->debug_all_bos)
1223 				u_rwlock_rdunlock(&aws->global_bo_list_lock);
1224 			return result;
1225 		}
1226 
1227 		memset(&request, 0, sizeof(request));
1228 
1229 		request.ip_type = cs0->hw_ip;
1230 		request.ring = queue_idx;
1231 		request.handles = handles;
1232 		request.num_handles = num_handles;
1233 		request.number_of_ibs = number_of_ibs;
1234 		request.ibs = ibs;
1235 		request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1236 
1237 		sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1238 		result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1239 
1240 		free(request.handles);
1241 		if (aws->debug_all_bos)
1242 			u_rwlock_rdunlock(&aws->global_bo_list_lock);
1243 
1244 		for (unsigned j = 0; j < number_of_ibs; j++) {
1245 			ws->buffer_destroy(bos[j]);
1246 		}
1247 
1248 		free(ibs);
1249 		free(bos);
1250 
1251 		if (result != VK_SUCCESS)
1252 			return result;
1253 
1254 		i += cnt;
1255 	}
1256 	if (fence)
1257 		radv_amdgpu_request_to_fence(ctx, fence, &request);
1258 
1259 	radv_assign_last_submit(ctx, &request);
1260 
1261 	return VK_SUCCESS;
1262 }
1263 
radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx * _ctx,int queue_idx,struct radeon_cmdbuf ** cs_array,unsigned cs_count,struct radeon_cmdbuf * initial_preamble_cs,struct radeon_cmdbuf * continue_preamble_cs,struct radv_winsys_sem_info * sem_info,const struct radv_winsys_bo_list * bo_list,bool can_patch,struct radeon_winsys_fence * _fence)1264 static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1265 					     int queue_idx,
1266 					     struct radeon_cmdbuf **cs_array,
1267 					     unsigned cs_count,
1268 					     struct radeon_cmdbuf *initial_preamble_cs,
1269 					     struct radeon_cmdbuf *continue_preamble_cs,
1270 					     struct radv_winsys_sem_info *sem_info,
1271 					     const struct radv_winsys_bo_list *bo_list,
1272 					     bool can_patch,
1273 					     struct radeon_winsys_fence *_fence)
1274 {
1275 	struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1276 	struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1277 	VkResult result;
1278 
1279 	assert(sem_info);
1280 	if (!cs->ws->use_ib_bos) {
1281 		result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1282 							     cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1283 	} else if (can_patch) {
1284 		result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1285 							      cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1286 	} else {
1287 		result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1288 							       cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1289 	}
1290 
1291 	radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1292 	return result;
1293 }
1294 
radv_amdgpu_winsys_get_cpu_addr(void * _cs,uint64_t addr)1295 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1296 {
1297 	struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1298 	void *ret = NULL;
1299 
1300 	if (!cs->ib_buffer)
1301 		return NULL;
1302 	for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1303 		struct radv_amdgpu_winsys_bo *bo;
1304 
1305 		bo = (struct radv_amdgpu_winsys_bo*)
1306 		       (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1307 		if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1308 			if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1309 				return (char *)ret + (addr - bo->base.va);
1310 		}
1311 	}
1312 	if(cs->ws->debug_all_bos) {
1313 		u_rwlock_rdlock(&cs->ws->global_bo_list_lock);
1314 		list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1315 		                    &cs->ws->global_bo_list, global_list_item) {
1316 			if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1317 				if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1318 					u_rwlock_rdunlock(&cs->ws->global_bo_list_lock);
1319 					return (char *)ret + (addr - bo->base.va);
1320 				}
1321 			}
1322 		}
1323 		u_rwlock_rdunlock(&cs->ws->global_bo_list_lock);
1324 	}
1325 	return ret;
1326 }
1327 
radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf * _cs,FILE * file,const int * trace_ids,int trace_id_count)1328 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1329                                        FILE* file,
1330                                        const int *trace_ids, int trace_id_count)
1331 {
1332 	struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1333 	void *ib = cs->base.buf;
1334 	int num_dw = cs->base.cdw;
1335 
1336 	if (cs->ws->use_ib_bos) {
1337 		ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1338 		num_dw = cs->ib.size;
1339 	}
1340 	assert(ib);
1341 	ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count,  "main IB",
1342 		    cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1343 }
1344 
radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)1345 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1346 {
1347 	switch (radv_priority) {
1348 		case RADEON_CTX_PRIORITY_REALTIME:
1349 			return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1350 		case RADEON_CTX_PRIORITY_HIGH:
1351 			return AMDGPU_CTX_PRIORITY_HIGH;
1352 		case RADEON_CTX_PRIORITY_MEDIUM:
1353 			return AMDGPU_CTX_PRIORITY_NORMAL;
1354 		case RADEON_CTX_PRIORITY_LOW:
1355 			return AMDGPU_CTX_PRIORITY_LOW;
1356 		default:
1357 			unreachable("Invalid context priority");
1358 	}
1359 }
1360 
radv_amdgpu_ctx_create(struct radeon_winsys * _ws,enum radeon_ctx_priority priority,struct radeon_winsys_ctx ** rctx)1361 static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1362                                        enum radeon_ctx_priority priority,
1363                                        struct radeon_winsys_ctx **rctx)
1364 {
1365 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1366 	struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1367 	uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1368 	VkResult result;
1369 	int r;
1370 
1371 	if (!ctx)
1372 		return VK_ERROR_OUT_OF_HOST_MEMORY;
1373 
1374 	r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1375 	if (r && r == -EACCES) {
1376 		result = VK_ERROR_NOT_PERMITTED_EXT;
1377 		goto fail_create;
1378 	} else if (r) {
1379 		fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1380 		result = VK_ERROR_OUT_OF_HOST_MEMORY;
1381 		goto fail_create;
1382 	}
1383 	ctx->ws = ws;
1384 
1385 	assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1386 	ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1387 	                                      RADEON_DOMAIN_GTT,
1388 	                                      RADEON_FLAG_CPU_ACCESS |
1389 					      RADEON_FLAG_NO_INTERPROCESS_SHARING,
1390 					      RADV_BO_PRIORITY_CS);
1391 	if (!ctx->fence_bo) {
1392 		result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1393 		goto fail_alloc;
1394 	}
1395 
1396 	ctx->fence_map = (uint64_t *)ws->base.buffer_map(ctx->fence_bo);
1397 	if (!ctx->fence_map) {
1398 		result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1399 		goto fail_map;
1400 	}
1401 
1402 	memset(ctx->fence_map, 0, 4096);
1403 
1404 	*rctx = (struct radeon_winsys_ctx *)ctx;
1405 	return VK_SUCCESS;
1406 
1407 fail_map:
1408 	ws->base.buffer_destroy(ctx->fence_bo);
1409 fail_alloc:
1410 	amdgpu_cs_ctx_free(ctx->ctx);
1411 fail_create:
1412 	FREE(ctx);
1413 	return result;
1414 }
1415 
radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx * rwctx)1416 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1417 {
1418 	struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1419 	ctx->ws->base.buffer_destroy(ctx->fence_bo);
1420 	amdgpu_cs_ctx_free(ctx->ctx);
1421 	FREE(ctx);
1422 }
1423 
radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx * rwctx,enum ring_type ring_type,int ring_index)1424 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1425                                       enum ring_type ring_type, int ring_index)
1426 {
1427 	struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1428 	int ip_type = ring_to_hw_ip(ring_type);
1429 
1430 	if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1431 		uint32_t expired;
1432 		int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1433 		                                       1000000000ull, 0, &expired);
1434 
1435 		if (ret || !expired)
1436 			return false;
1437 	}
1438 
1439 	return true;
1440 }
1441 
radv_amdgpu_create_sem(struct radeon_winsys * _ws)1442 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1443 {
1444 	struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1445 	if (!sem)
1446 		return NULL;
1447 
1448 	return (struct radeon_winsys_sem *)sem;
1449 }
1450 
radv_amdgpu_destroy_sem(struct radeon_winsys_sem * _sem)1451 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1452 {
1453 	struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1454 	FREE(sem);
1455 }
1456 
radv_amdgpu_signal_sems(struct radv_amdgpu_ctx * ctx,uint32_t ip_type,uint32_t ring,struct radv_winsys_sem_info * sem_info)1457 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1458 				   uint32_t ip_type,
1459 				   uint32_t ring,
1460 				   struct radv_winsys_sem_info *sem_info)
1461 {
1462 	for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1463 		struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1464 
1465 		if (sem->context)
1466 			return -EINVAL;
1467 
1468 		*sem = ctx->last_submission[ip_type][ring].fence;
1469 	}
1470 	return 0;
1471 }
1472 
radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts * counts,const uint32_t * syncobj_override,struct drm_amdgpu_cs_chunk * chunk,int chunk_id)1473 static void *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1474 						const uint32_t *syncobj_override,
1475 						struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1476 {
1477 	const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
1478 	struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1479 	if (!syncobj)
1480 		return NULL;
1481 
1482 	for (unsigned i = 0; i < counts->syncobj_count; i++) {
1483 		struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1484 		sem->handle = src[i];
1485 	}
1486 
1487 	chunk->chunk_id = chunk_id;
1488 	chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1489 	chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1490 	return syncobj;
1491 }
1492 
1493 static void *
radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts * counts,const uint32_t * syncobj_override,struct drm_amdgpu_cs_chunk * chunk,int chunk_id)1494 radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1495                                             const uint32_t *syncobj_override,
1496                                             struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1497 {
1498 	const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
1499 	struct drm_amdgpu_cs_chunk_syncobj *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_syncobj) *
1500 	                                                     (counts->syncobj_count + counts->timeline_syncobj_count));
1501 	if (!syncobj)
1502 		return NULL;
1503 
1504 	for (unsigned i = 0; i < counts->syncobj_count; i++) {
1505 		struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i];
1506 		sem->handle = src[i];
1507 		sem->flags = 0;
1508 		sem->point = 0;
1509 	}
1510 
1511 	for (unsigned i = 0; i < counts->timeline_syncobj_count; i++) {
1512 		struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i + counts->syncobj_count];
1513 		sem->handle = counts->syncobj[i + counts->syncobj_count];
1514 		sem->flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
1515 		sem->point = counts->points[i];
1516 	}
1517 
1518 	chunk->chunk_id = chunk_id;
1519 	chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4 *
1520 		(counts->syncobj_count + counts->timeline_syncobj_count);
1521 	chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1522 	return syncobj;
1523 }
1524 
radv_amdgpu_cache_alloc_syncobjs(struct radv_amdgpu_winsys * ws,unsigned count,uint32_t * dst)1525 static int radv_amdgpu_cache_alloc_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *dst)
1526 {
1527 	pthread_mutex_lock(&ws->syncobj_lock);
1528 	if (count > ws->syncobj_capacity) {
1529 		if (ws->syncobj_capacity > UINT32_MAX / 2)
1530 			goto fail;
1531 
1532 		unsigned new_capacity = MAX2(count, ws->syncobj_capacity * 2);
1533 		uint32_t *n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
1534 		if (!n)
1535 			goto fail;
1536 		ws->syncobj_capacity = new_capacity;
1537 		ws->syncobj = n;
1538 	}
1539 
1540 	while(ws->syncobj_count < count) {
1541 		int r = amdgpu_cs_create_syncobj(ws->dev, ws->syncobj + ws->syncobj_count);
1542 		if (r)
1543 			goto fail;
1544 		++ws->syncobj_count;
1545 	}
1546 
1547 	for (unsigned i = 0; i < count; ++i)
1548 		dst[i] = ws->syncobj[--ws->syncobj_count];
1549 
1550 	pthread_mutex_unlock(&ws->syncobj_lock);
1551 	return 0;
1552 
1553 fail:
1554 	pthread_mutex_unlock(&ws->syncobj_lock);
1555 	return -ENOMEM;
1556 }
1557 
radv_amdgpu_cache_free_syncobjs(struct radv_amdgpu_winsys * ws,unsigned count,uint32_t * src)1558 static void radv_amdgpu_cache_free_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *src)
1559 {
1560 	pthread_mutex_lock(&ws->syncobj_lock);
1561 
1562 	uint32_t cache_count = MIN2(count, UINT32_MAX - ws->syncobj_count);
1563 	if (cache_count + ws->syncobj_count > ws->syncobj_capacity) {
1564 		unsigned new_capacity = MAX2(ws->syncobj_count + cache_count, ws->syncobj_capacity * 2);
1565 		uint32_t* n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
1566 		if (n) {
1567 			ws->syncobj_capacity = new_capacity;
1568 			ws->syncobj = n;
1569 		}
1570 	}
1571 
1572 	for (unsigned i = 0; i < count; ++i) {
1573 		if (ws->syncobj_count < ws->syncobj_capacity)
1574 			ws->syncobj[ws->syncobj_count++] = src[i];
1575 		else
1576 			amdgpu_cs_destroy_syncobj(ws->dev, src[i]);
1577 	}
1578 
1579 	pthread_mutex_unlock(&ws->syncobj_lock);
1580 
1581 }
1582 
radv_amdgpu_cs_prepare_syncobjs(struct radv_amdgpu_winsys * ws,struct radv_winsys_sem_counts * counts,uint32_t ** out_syncobjs)1583 static int radv_amdgpu_cs_prepare_syncobjs(struct radv_amdgpu_winsys *ws,
1584                                            struct radv_winsys_sem_counts *counts,
1585                                            uint32_t **out_syncobjs)
1586 {
1587 	int r = 0;
1588 
1589 	if (!ws->info.has_timeline_syncobj || !counts->syncobj_count) {
1590 		*out_syncobjs = NULL;
1591 		return 0;
1592 	}
1593 
1594 	*out_syncobjs = malloc(counts->syncobj_count * sizeof(**out_syncobjs));
1595 	if (!*out_syncobjs)
1596 		return -ENOMEM;
1597 
1598 	r = radv_amdgpu_cache_alloc_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
1599 	if (r)
1600 		return r;
1601 
1602 	for (unsigned i = 0; i < counts->syncobj_count; ++i) {
1603 		r = amdgpu_cs_syncobj_transfer(ws->dev, (*out_syncobjs)[i], 0, counts->syncobj[i], 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT);
1604 		if (r)
1605 			goto fail;
1606 	}
1607 
1608 	r = amdgpu_cs_syncobj_reset(ws->dev, counts->syncobj, counts->syncobj_reset_count);
1609 	if (r)
1610 		goto fail;
1611 
1612 	return 0;
1613 fail:
1614 	radv_amdgpu_cache_free_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
1615 	free(*out_syncobjs);
1616 	*out_syncobjs = NULL;
1617 	return r;
1618 }
1619 
1620 static VkResult
radv_amdgpu_cs_submit(struct radv_amdgpu_ctx * ctx,struct radv_amdgpu_cs_request * request,struct radv_winsys_sem_info * sem_info)1621 radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1622 		      struct radv_amdgpu_cs_request *request,
1623 		      struct radv_winsys_sem_info *sem_info)
1624 {
1625 	int r;
1626 	int num_chunks;
1627 	int size;
1628 	bool user_fence;
1629 	struct drm_amdgpu_cs_chunk *chunks;
1630 	struct drm_amdgpu_cs_chunk_data *chunk_data;
1631 	struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1632 	bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
1633 	struct drm_amdgpu_bo_list_in bo_list_in;
1634 	void *wait_syncobj = NULL, *signal_syncobj = NULL;
1635 	uint32_t *in_syncobjs = NULL;
1636 	int i;
1637 	struct amdgpu_cs_fence *sem;
1638 	uint32_t bo_list = 0;
1639 	VkResult result = VK_SUCCESS;
1640 
1641 	user_fence = (request->fence_info.handle != NULL);
1642 	size = request->number_of_ibs + (user_fence ? 2 : 1) + (!use_bo_list_create ? 1 : 0) + 3;
1643 
1644 	chunks = malloc(sizeof(chunks[0]) * size);
1645 	if (!chunks)
1646 		return VK_ERROR_OUT_OF_HOST_MEMORY;
1647 
1648 	size = request->number_of_ibs + (user_fence ? 1 : 0);
1649 
1650 	chunk_data = malloc(sizeof(chunk_data[0]) * size);
1651 	if (!chunk_data) {
1652 		result = VK_ERROR_OUT_OF_HOST_MEMORY;
1653 		goto error_out;
1654 	}
1655 
1656 	num_chunks = request->number_of_ibs;
1657 	for (i = 0; i < request->number_of_ibs; i++) {
1658 		struct amdgpu_cs_ib_info *ib;
1659 		chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1660 		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1661 		chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1662 
1663 		ib = &request->ibs[i];
1664 
1665 		chunk_data[i].ib_data._pad = 0;
1666 		chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1667 		chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1668 		chunk_data[i].ib_data.ip_type = request->ip_type;
1669 		chunk_data[i].ib_data.ip_instance = request->ip_instance;
1670 		chunk_data[i].ib_data.ring = request->ring;
1671 		chunk_data[i].ib_data.flags = ib->flags;
1672 	}
1673 
1674 	if (user_fence) {
1675 		i = num_chunks++;
1676 
1677 		chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1678 		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1679 		chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1680 
1681 		amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1682 						   &chunk_data[i]);
1683 	}
1684 
1685 	if ((sem_info->wait.syncobj_count || sem_info->wait.timeline_syncobj_count) && sem_info->cs_emit_wait) {
1686 		r = radv_amdgpu_cs_prepare_syncobjs(ctx->ws, &sem_info->wait, &in_syncobjs);
1687 		if (r)
1688 			goto error_out;
1689 
1690 		if (ctx->ws->info.has_timeline_syncobj) {
1691 			wait_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->wait,
1692 										   in_syncobjs,
1693 										   &chunks[num_chunks],
1694 										   AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT);
1695 		} else {
1696 			wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1697 									  in_syncobjs,
1698 									  &chunks[num_chunks],
1699 									  AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1700 		}
1701 		if (!wait_syncobj) {
1702 			result = VK_ERROR_OUT_OF_HOST_MEMORY;
1703 			goto error_out;
1704 		}
1705 		num_chunks++;
1706 
1707 		if (sem_info->wait.sem_count == 0)
1708 			sem_info->cs_emit_wait = false;
1709 
1710 	}
1711 
1712 	if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1713 		sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
1714 		if (!sem_dependencies) {
1715 			result = VK_ERROR_OUT_OF_HOST_MEMORY;
1716 			goto error_out;
1717 		}
1718 
1719 		int sem_count = 0;
1720 
1721 		for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1722 			sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1723 			if (!sem->context)
1724 				continue;
1725 			struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1726 
1727 			amdgpu_cs_chunk_fence_to_dep(sem, dep);
1728 
1729 			sem->context = NULL;
1730 		}
1731 		i = num_chunks++;
1732 
1733 		/* dependencies chunk */
1734 		chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1735 		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1736 		chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1737 
1738 		sem_info->cs_emit_wait = false;
1739 	}
1740 
1741 	if ((sem_info->signal.syncobj_count || sem_info->signal.timeline_syncobj_count) && sem_info->cs_emit_signal) {
1742 		if (ctx->ws->info.has_timeline_syncobj) {
1743 			signal_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->signal,
1744 										     NULL,
1745 										     &chunks[num_chunks],
1746 										     AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL);
1747 		} else {
1748 			signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1749 									    NULL,
1750 									    &chunks[num_chunks],
1751 									    AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1752 		}
1753 		if (!signal_syncobj) {
1754 			result = VK_ERROR_OUT_OF_HOST_MEMORY;
1755 			goto error_out;
1756 		}
1757 		num_chunks++;
1758 	}
1759 
1760 	if (use_bo_list_create) {
1761 		/* Legacy path creating the buffer list handle and passing it
1762 		 * to the CS ioctl.
1763 		 */
1764 		r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
1765 					      request->handles, &bo_list);
1766 		if (r) {
1767 			if (r == -ENOMEM) {
1768 				fprintf(stderr, "amdgpu: Not enough memory for buffer list creation.\n");
1769 				result = VK_ERROR_OUT_OF_HOST_MEMORY;
1770 			} else {
1771 				fprintf(stderr, "amdgpu: buffer list creation failed (%d).\n", r);
1772 				result = VK_ERROR_UNKNOWN;
1773 			}
1774 			goto error_out;
1775 		}
1776 	} else {
1777 		/* Standard path passing the buffer list via the CS ioctl. */
1778 		bo_list_in.operation = ~0;
1779 		bo_list_in.list_handle = ~0;
1780 		bo_list_in.bo_number = request->num_handles;
1781 		bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1782 		bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
1783 
1784 		chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1785 		chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1786 		chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1787 		num_chunks++;
1788 	}
1789 
1790 	r = amdgpu_cs_submit_raw2(ctx->ws->dev,
1791 				 ctx->ctx,
1792 				 bo_list,
1793 				 num_chunks,
1794 				 chunks,
1795 				 &request->seq_no);
1796 
1797 	if (r) {
1798 		if (r == -ENOMEM) {
1799 			fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1800 			result = VK_ERROR_OUT_OF_HOST_MEMORY;
1801 		} else if (r == -ECANCELED) {
1802 			fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1803 			result = VK_ERROR_DEVICE_LOST;
1804 		} else {
1805 			fprintf(stderr, "amdgpu: The CS has been rejected, "
1806 					"see dmesg for more information (%i).\n", r);
1807 			result = VK_ERROR_UNKNOWN;
1808 		}
1809 	}
1810 
1811 	if (bo_list)
1812 		amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1813 
1814 error_out:
1815 	if (in_syncobjs) {
1816 		radv_amdgpu_cache_free_syncobjs(ctx->ws, sem_info->wait.syncobj_count, in_syncobjs);
1817 		free(in_syncobjs);
1818 	}
1819 	free(chunks);
1820 	free(chunk_data);
1821 	free(sem_dependencies);
1822 	free(wait_syncobj);
1823 	free(signal_syncobj);
1824 	return result;
1825 }
1826 
radv_amdgpu_create_syncobj(struct radeon_winsys * _ws,bool create_signaled,uint32_t * handle)1827 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1828 				      bool create_signaled,
1829 				      uint32_t *handle)
1830 {
1831 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1832 	uint32_t flags = 0;
1833 
1834 	if (create_signaled)
1835 		flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1836 
1837 	return amdgpu_cs_create_syncobj2(ws->dev, flags, handle);
1838 }
1839 
radv_amdgpu_destroy_syncobj(struct radeon_winsys * _ws,uint32_t handle)1840 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1841 				    uint32_t handle)
1842 {
1843 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1844 	amdgpu_cs_destroy_syncobj(ws->dev, handle);
1845 }
1846 
radv_amdgpu_reset_syncobj(struct radeon_winsys * _ws,uint32_t handle)1847 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1848 				    uint32_t handle)
1849 {
1850 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1851 	amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1852 }
1853 
radv_amdgpu_signal_syncobj(struct radeon_winsys * _ws,uint32_t handle,uint64_t point)1854 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1855 				    uint32_t handle, uint64_t point)
1856 {
1857 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1858 	if (point)
1859 		amdgpu_cs_syncobj_timeline_signal(ws->dev, &handle, &point, 1);
1860 	else
1861 		amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1862 }
1863 
radv_amdgpu_query_syncobj(struct radeon_winsys * _ws,uint32_t handle,uint64_t * point)1864 static VkResult radv_amdgpu_query_syncobj(struct radeon_winsys *_ws,
1865                                       uint32_t handle, uint64_t *point)
1866 {
1867 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1868 	int ret = amdgpu_cs_syncobj_query(ws->dev, &handle, point, 1);
1869 	if (ret == 0)
1870 		return VK_SUCCESS;
1871 	else if (ret == -ENOMEM)
1872 		return VK_ERROR_OUT_OF_HOST_MEMORY;
1873 	else {
1874 		/* Remaining error are driver internal issues: EFAULT for
1875 		 * dangling pointers and ENOENT for non-existing syncobj. */
1876 		fprintf(stderr, "amdgpu: internal error in radv_amdgpu_query_syncobj. (%d)\n", ret);
1877 		return VK_ERROR_UNKNOWN;
1878 	}
1879 }
1880 
radv_amdgpu_wait_syncobj(struct radeon_winsys * _ws,const uint32_t * handles,uint32_t handle_count,bool wait_all,uint64_t timeout)1881 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1882                                      uint32_t handle_count, bool wait_all, uint64_t timeout)
1883 {
1884 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1885 	uint32_t tmp;
1886 
1887 	/* The timeouts are signed, while vulkan timeouts are unsigned. */
1888 	timeout = MIN2(timeout, INT64_MAX);
1889 
1890 	int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1891 					 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1892 					 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1893 					 &tmp);
1894 	if (ret == 0) {
1895 		return true;
1896 	} else if (ret == -ETIME) {
1897 		return false;
1898 	} else {
1899 		fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1900 		return false;
1901 	}
1902 }
1903 
radv_amdgpu_wait_timeline_syncobj(struct radeon_winsys * _ws,const uint32_t * handles,const uint64_t * points,uint32_t handle_count,bool wait_all,bool available,uint64_t timeout)1904 static bool radv_amdgpu_wait_timeline_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1905                                               const uint64_t *points, uint32_t handle_count,
1906                                               bool wait_all, bool available, uint64_t timeout)
1907 {
1908 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1909 
1910 	/* The timeouts are signed, while vulkan timeouts are unsigned. */
1911 	timeout = MIN2(timeout, INT64_MAX);
1912 
1913 	int ret = amdgpu_cs_syncobj_timeline_wait(ws->dev, (uint32_t*)handles, (uint64_t*)points,
1914 	                                          handle_count, timeout,
1915 	                                          DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1916 	                                          (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0) |
1917 	                                          (available ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE : 0),
1918 	                                          NULL);
1919 	if (ret == 0) {
1920 		return true;
1921 	} else if (ret == -ETIME) {
1922 		return false;
1923 	} else {
1924 		fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed! (%d)\n", errno);
1925 		return false;
1926 	}
1927 }
1928 
1929 
radv_amdgpu_export_syncobj(struct radeon_winsys * _ws,uint32_t syncobj,int * fd)1930 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1931 				      uint32_t syncobj,
1932 				      int *fd)
1933 {
1934 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1935 
1936 	return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1937 }
1938 
radv_amdgpu_import_syncobj(struct radeon_winsys * _ws,int fd,uint32_t * syncobj)1939 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1940 				      int fd,
1941 				      uint32_t *syncobj)
1942 {
1943 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1944 
1945 	return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1946 }
1947 
1948 
radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys * _ws,uint32_t syncobj,int * fd)1949 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1950                                                    uint32_t syncobj,
1951                                                    int *fd)
1952 {
1953 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1954 
1955 	return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1956 }
1957 
radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys * _ws,uint32_t syncobj,int fd)1958 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1959                                                      uint32_t syncobj,
1960                                                      int fd)
1961 {
1962 	struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1963 
1964 	return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1965 }
1966 
radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys * ws)1967 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1968 {
1969 	ws->base.ctx_create = radv_amdgpu_ctx_create;
1970 	ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1971 	ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1972 	ws->base.cs_create = radv_amdgpu_cs_create;
1973 	ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1974 	ws->base.cs_grow = radv_amdgpu_cs_grow;
1975 	ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1976 	ws->base.cs_reset = radv_amdgpu_cs_reset;
1977 	ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1978 	ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1979 	ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1980 	ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1981 	ws->base.create_fence = radv_amdgpu_create_fence;
1982 	ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1983 	ws->base.reset_fence = radv_amdgpu_reset_fence;
1984 	ws->base.signal_fence = radv_amdgpu_signal_fence;
1985 	ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
1986 	ws->base.create_sem = radv_amdgpu_create_sem;
1987 	ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1988 	ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1989 	ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1990 	ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1991 	ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1992 	ws->base.query_syncobj = radv_amdgpu_query_syncobj;
1993 	ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1994 	ws->base.wait_timeline_syncobj = radv_amdgpu_wait_timeline_syncobj;
1995 	ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1996 	ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1997 	ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1998 	ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1999 	ws->base.fence_wait = radv_amdgpu_fence_wait;
2000 	ws->base.fences_wait = radv_amdgpu_fences_wait;
2001 }
2002