• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <sys/ioctl.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 
36 #include "libdrm_macros.h"
37 #include "xf86drm.h"
38 #include "amdgpu_drm.h"
39 #include "amdgpu_internal.h"
40 #include "util_math.h"
41 
amdgpu_close_kms_handle(int fd,uint32_t handle)42 static int amdgpu_close_kms_handle(int fd, uint32_t handle)
43 {
44 	struct drm_gem_close args = {};
45 
46 	args.handle = handle;
47 	return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
48 }
49 
amdgpu_bo_create(amdgpu_device_handle dev,uint64_t size,uint32_t handle,amdgpu_bo_handle * buf_handle)50 static int amdgpu_bo_create(amdgpu_device_handle dev,
51 			    uint64_t size,
52 			    uint32_t handle,
53 			    amdgpu_bo_handle *buf_handle)
54 {
55 	struct amdgpu_bo *bo;
56 	int r;
57 
58 	bo = calloc(1, sizeof(struct amdgpu_bo));
59 	if (!bo)
60 		return -ENOMEM;
61 
62 	r = handle_table_insert(&dev->bo_handles, handle, bo);
63 	if (r) {
64 		free(bo);
65 		return r;
66 	}
67 
68 	atomic_set(&bo->refcount, 1);
69 	bo->dev = dev;
70 	bo->alloc_size = size;
71 	bo->handle = handle;
72 	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
73 
74 	*buf_handle = bo;
75 	return 0;
76 }
77 
amdgpu_bo_alloc(amdgpu_device_handle dev,struct amdgpu_bo_alloc_request * alloc_buffer,amdgpu_bo_handle * buf_handle)78 drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
79 			       struct amdgpu_bo_alloc_request *alloc_buffer,
80 			       amdgpu_bo_handle *buf_handle)
81 {
82 	union drm_amdgpu_gem_create args;
83 	int r;
84 
85 	memset(&args, 0, sizeof(args));
86 	args.in.bo_size = alloc_buffer->alloc_size;
87 	args.in.alignment = alloc_buffer->phys_alignment;
88 
89 	/* Set the placement. */
90 	args.in.domains = alloc_buffer->preferred_heap;
91 	args.in.domain_flags = alloc_buffer->flags;
92 
93 	/* Allocate the buffer with the preferred heap. */
94 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
95 				&args, sizeof(args));
96 	if (r)
97 		goto out;
98 
99 	pthread_mutex_lock(&dev->bo_table_mutex);
100 	r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
101 			     buf_handle);
102 	pthread_mutex_unlock(&dev->bo_table_mutex);
103 	if (r) {
104 		amdgpu_close_kms_handle(dev->fd, args.out.handle);
105 	}
106 
107 out:
108 	return r;
109 }
110 
amdgpu_bo_set_metadata(amdgpu_bo_handle bo,struct amdgpu_bo_metadata * info)111 drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
112 				      struct amdgpu_bo_metadata *info)
113 {
114 	struct drm_amdgpu_gem_metadata args = {};
115 
116 	args.handle = bo->handle;
117 	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
118 	args.data.flags = info->flags;
119 	args.data.tiling_info = info->tiling_info;
120 
121 	if (info->size_metadata > sizeof(args.data.data))
122 		return -EINVAL;
123 
124 	if (info->size_metadata) {
125 		args.data.data_size_bytes = info->size_metadata;
126 		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
127 	}
128 
129 	return drmCommandWriteRead(bo->dev->fd,
130 				   DRM_AMDGPU_GEM_METADATA,
131 				   &args, sizeof(args));
132 }
133 
amdgpu_bo_query_info(amdgpu_bo_handle bo,struct amdgpu_bo_info * info)134 drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
135 				    struct amdgpu_bo_info *info)
136 {
137 	struct drm_amdgpu_gem_metadata metadata = {};
138 	struct drm_amdgpu_gem_create_in bo_info = {};
139 	struct drm_amdgpu_gem_op gem_op = {};
140 	int r;
141 
142 	/* Validate the BO passed in */
143 	if (!bo->handle)
144 		return -EINVAL;
145 
146 	/* Query metadata. */
147 	metadata.handle = bo->handle;
148 	metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
149 
150 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
151 				&metadata, sizeof(metadata));
152 	if (r)
153 		return r;
154 
155 	if (metadata.data.data_size_bytes >
156 	    sizeof(info->metadata.umd_metadata))
157 		return -EINVAL;
158 
159 	/* Query buffer info. */
160 	gem_op.handle = bo->handle;
161 	gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
162 	gem_op.value = (uintptr_t)&bo_info;
163 
164 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
165 				&gem_op, sizeof(gem_op));
166 	if (r)
167 		return r;
168 
169 	memset(info, 0, sizeof(*info));
170 	info->alloc_size = bo_info.bo_size;
171 	info->phys_alignment = bo_info.alignment;
172 	info->preferred_heap = bo_info.domains;
173 	info->alloc_flags = bo_info.domain_flags;
174 	info->metadata.flags = metadata.data.flags;
175 	info->metadata.tiling_info = metadata.data.tiling_info;
176 
177 	info->metadata.size_metadata = metadata.data.data_size_bytes;
178 	if (metadata.data.data_size_bytes > 0)
179 		memcpy(info->metadata.umd_metadata, metadata.data.data,
180 		       metadata.data.data_size_bytes);
181 
182 	return 0;
183 }
184 
amdgpu_bo_export_flink(amdgpu_bo_handle bo)185 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
186 {
187 	struct drm_gem_flink flink;
188 	int fd, dma_fd;
189 	uint32_t handle;
190 	int r;
191 
192 	fd = bo->dev->fd;
193 	handle = bo->handle;
194 	if (bo->flink_name)
195 		return 0;
196 
197 
198 	if (bo->dev->flink_fd != bo->dev->fd) {
199 		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
200 				       &dma_fd);
201 		if (!r) {
202 			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
203 			close(dma_fd);
204 		}
205 		if (r)
206 			return r;
207 		fd = bo->dev->flink_fd;
208 	}
209 	memset(&flink, 0, sizeof(flink));
210 	flink.handle = handle;
211 
212 	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
213 	if (r)
214 		return r;
215 
216 	bo->flink_name = flink.name;
217 
218 	if (bo->dev->flink_fd != bo->dev->fd)
219 		amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
220 
221 	pthread_mutex_lock(&bo->dev->bo_table_mutex);
222 	r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
223 	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
224 
225 	return r;
226 }
227 
amdgpu_bo_export(amdgpu_bo_handle bo,enum amdgpu_bo_handle_type type,uint32_t * shared_handle)228 drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
229 				enum amdgpu_bo_handle_type type,
230 				uint32_t *shared_handle)
231 {
232 	int r;
233 
234 	switch (type) {
235 	case amdgpu_bo_handle_type_gem_flink_name:
236 		r = amdgpu_bo_export_flink(bo);
237 		if (r)
238 			return r;
239 
240 		*shared_handle = bo->flink_name;
241 		return 0;
242 
243 	case amdgpu_bo_handle_type_kms:
244 	case amdgpu_bo_handle_type_kms_noimport:
245 		*shared_handle = bo->handle;
246 		return 0;
247 
248 	case amdgpu_bo_handle_type_dma_buf_fd:
249 		return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
250 					  DRM_CLOEXEC | DRM_RDWR,
251 					  (int*)shared_handle);
252 	}
253 	return -EINVAL;
254 }
255 
amdgpu_bo_import(amdgpu_device_handle dev,enum amdgpu_bo_handle_type type,uint32_t shared_handle,struct amdgpu_bo_import_result * output)256 drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
257 				enum amdgpu_bo_handle_type type,
258 				uint32_t shared_handle,
259 		     struct amdgpu_bo_import_result *output)
260 {
261 	struct drm_gem_open open_arg = {};
262 	struct amdgpu_bo *bo = NULL;
263 	uint32_t handle = 0, flink_name = 0;
264 	uint64_t alloc_size = 0;
265 	int r = 0;
266 	int dma_fd;
267 	uint64_t dma_buf_size = 0;
268 
269 	/* We must maintain a list of pairs <handle, bo>, so that we always
270 	 * return the same amdgpu_bo instance for the same handle. */
271 	pthread_mutex_lock(&dev->bo_table_mutex);
272 
273 	/* Convert a DMA buf handle to a KMS handle now. */
274 	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
275 		off_t size;
276 
277 		/* Get a KMS handle. */
278 		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
279 		if (r)
280 			goto unlock;
281 
282 		/* Query the buffer size. */
283 		size = lseek(shared_handle, 0, SEEK_END);
284 		if (size == (off_t)-1) {
285 			r = -errno;
286 			goto free_bo_handle;
287 		}
288 		lseek(shared_handle, 0, SEEK_SET);
289 
290 		dma_buf_size = size;
291 		shared_handle = handle;
292 	}
293 
294 	/* If we have already created a buffer with this handle, find it. */
295 	switch (type) {
296 	case amdgpu_bo_handle_type_gem_flink_name:
297 		bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
298 		break;
299 
300 	case amdgpu_bo_handle_type_dma_buf_fd:
301 		bo = handle_table_lookup(&dev->bo_handles, shared_handle);
302 		break;
303 
304 	case amdgpu_bo_handle_type_kms:
305 	case amdgpu_bo_handle_type_kms_noimport:
306 		/* Importing a KMS handle in not allowed. */
307 		r = -EPERM;
308 		goto unlock;
309 
310 	default:
311 		r = -EINVAL;
312 		goto unlock;
313 	}
314 
315 	if (bo) {
316 		/* The buffer already exists, just bump the refcount. */
317 		atomic_inc(&bo->refcount);
318 		pthread_mutex_unlock(&dev->bo_table_mutex);
319 
320 		output->buf_handle = bo;
321 		output->alloc_size = bo->alloc_size;
322 		return 0;
323 	}
324 
325 	/* Open the handle. */
326 	switch (type) {
327 	case amdgpu_bo_handle_type_gem_flink_name:
328 		open_arg.name = shared_handle;
329 		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
330 		if (r)
331 			goto unlock;
332 
333 		flink_name = shared_handle;
334 		handle = open_arg.handle;
335 		alloc_size = open_arg.size;
336 		if (dev->flink_fd != dev->fd) {
337 			r = drmPrimeHandleToFD(dev->flink_fd, handle,
338 					       DRM_CLOEXEC, &dma_fd);
339 			if (r)
340 				goto free_bo_handle;
341 			r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
342 			close(dma_fd);
343 			if (r)
344 				goto free_bo_handle;
345 			r = amdgpu_close_kms_handle(dev->flink_fd,
346 						    open_arg.handle);
347 			if (r)
348 				goto free_bo_handle;
349 		}
350 		open_arg.handle = 0;
351 		break;
352 
353 	case amdgpu_bo_handle_type_dma_buf_fd:
354 		handle = shared_handle;
355 		alloc_size = dma_buf_size;
356 		break;
357 
358 	case amdgpu_bo_handle_type_kms:
359 	case amdgpu_bo_handle_type_kms_noimport:
360 		assert(0); /* unreachable */
361 	}
362 
363 	/* Initialize it. */
364 	r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
365 	if (r)
366 		goto free_bo_handle;
367 
368 	if (flink_name) {
369 		bo->flink_name = flink_name;
370 		r = handle_table_insert(&dev->bo_flink_names, flink_name,
371 					bo);
372 		if (r)
373 			goto free_bo_handle;
374 
375 	}
376 
377 	output->buf_handle = bo;
378 	output->alloc_size = bo->alloc_size;
379 	pthread_mutex_unlock(&dev->bo_table_mutex);
380 	return 0;
381 
382 free_bo_handle:
383 	if (flink_name && open_arg.handle)
384 		amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
385 
386 	if (bo)
387 		amdgpu_bo_free(bo);
388 	else
389 		amdgpu_close_kms_handle(dev->fd, handle);
390 unlock:
391 	pthread_mutex_unlock(&dev->bo_table_mutex);
392 	return r;
393 }
394 
amdgpu_bo_free(amdgpu_bo_handle buf_handle)395 drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
396 {
397 	struct amdgpu_device *dev;
398 	struct amdgpu_bo *bo = buf_handle;
399 
400 	assert(bo != NULL);
401 	dev = bo->dev;
402 	pthread_mutex_lock(&dev->bo_table_mutex);
403 
404 	if (update_references(&bo->refcount, NULL)) {
405 		/* Remove the buffer from the hash tables. */
406 		handle_table_remove(&dev->bo_handles, bo->handle);
407 
408 		if (bo->flink_name)
409 			handle_table_remove(&dev->bo_flink_names,
410 					    bo->flink_name);
411 
412 		/* Release CPU access. */
413 		if (bo->cpu_map_count > 0) {
414 			bo->cpu_map_count = 1;
415 			amdgpu_bo_cpu_unmap(bo);
416 		}
417 
418 		amdgpu_close_kms_handle(dev->fd, bo->handle);
419 		pthread_mutex_destroy(&bo->cpu_access_mutex);
420 		free(bo);
421 	}
422 
423 	pthread_mutex_unlock(&dev->bo_table_mutex);
424 
425 	return 0;
426 }
427 
amdgpu_bo_inc_ref(amdgpu_bo_handle bo)428 drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
429 {
430 	atomic_inc(&bo->refcount);
431 }
432 
amdgpu_bo_cpu_map(amdgpu_bo_handle bo,void ** cpu)433 drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
434 {
435 	union drm_amdgpu_gem_mmap args;
436 	void *ptr;
437 	int r;
438 
439 	pthread_mutex_lock(&bo->cpu_access_mutex);
440 
441 	if (bo->cpu_ptr) {
442 		/* already mapped */
443 		assert(bo->cpu_map_count > 0);
444 		bo->cpu_map_count++;
445 		*cpu = bo->cpu_ptr;
446 		pthread_mutex_unlock(&bo->cpu_access_mutex);
447 		return 0;
448 	}
449 
450 	assert(bo->cpu_map_count == 0);
451 
452 	memset(&args, 0, sizeof(args));
453 
454 	/* Query the buffer address (args.addr_ptr).
455 	 * The kernel driver ignores the offset and size parameters. */
456 	args.in.handle = bo->handle;
457 
458 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
459 				sizeof(args));
460 	if (r) {
461 		pthread_mutex_unlock(&bo->cpu_access_mutex);
462 		return r;
463 	}
464 
465 	/* Map the buffer. */
466 	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
467 		       bo->dev->fd, args.out.addr_ptr);
468 	if (ptr == MAP_FAILED) {
469 		pthread_mutex_unlock(&bo->cpu_access_mutex);
470 		return -errno;
471 	}
472 
473 	bo->cpu_ptr = ptr;
474 	bo->cpu_map_count = 1;
475 	pthread_mutex_unlock(&bo->cpu_access_mutex);
476 
477 	*cpu = ptr;
478 	return 0;
479 }
480 
amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)481 drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
482 {
483 	int r;
484 
485 	pthread_mutex_lock(&bo->cpu_access_mutex);
486 	assert(bo->cpu_map_count >= 0);
487 
488 	if (bo->cpu_map_count == 0) {
489 		/* not mapped */
490 		pthread_mutex_unlock(&bo->cpu_access_mutex);
491 		return -EINVAL;
492 	}
493 
494 	bo->cpu_map_count--;
495 	if (bo->cpu_map_count > 0) {
496 		/* mapped multiple times */
497 		pthread_mutex_unlock(&bo->cpu_access_mutex);
498 		return 0;
499 	}
500 
501 	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
502 	bo->cpu_ptr = NULL;
503 	pthread_mutex_unlock(&bo->cpu_access_mutex);
504 	return r;
505 }
506 
amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,struct amdgpu_buffer_size_alignments * info)507 drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
508 				struct amdgpu_buffer_size_alignments *info)
509 {
510 	info->size_local = dev->dev_info.pte_fragment_size;
511 	info->size_remote = dev->dev_info.gart_page_size;
512 	return 0;
513 }
514 
amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,uint64_t timeout_ns,bool * busy)515 drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
516 				       uint64_t timeout_ns,
517 			    bool *busy)
518 {
519 	union drm_amdgpu_gem_wait_idle args;
520 	int r;
521 
522 	memset(&args, 0, sizeof(args));
523 	args.in.handle = bo->handle;
524 	args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
525 
526 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
527 				&args, sizeof(args));
528 
529 	if (r == 0) {
530 		*busy = args.out.status;
531 		return 0;
532 	} else {
533 		fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
534 		return r;
535 	}
536 }
537 
amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,void * cpu,uint64_t size,amdgpu_bo_handle * buf_handle,uint64_t * offset_in_bo)538 drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
539 					     void *cpu,
540 					     uint64_t size,
541 					     amdgpu_bo_handle *buf_handle,
542 					     uint64_t *offset_in_bo)
543 {
544 	struct amdgpu_bo *bo;
545 	uint32_t i;
546 	int r = 0;
547 
548 	if (cpu == NULL || size == 0)
549 		return -EINVAL;
550 
551 	/*
552 	 * Workaround for a buggy application which tries to import previously
553 	 * exposed CPU pointers. If we find a real world use case we should
554 	 * improve that by asking the kernel for the right handle.
555 	 */
556 	pthread_mutex_lock(&dev->bo_table_mutex);
557 	for (i = 0; i < dev->bo_handles.max_key; i++) {
558 		bo = handle_table_lookup(&dev->bo_handles, i);
559 		if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
560 			continue;
561 		if (cpu >= bo->cpu_ptr &&
562 		    cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
563 			break;
564 	}
565 
566 	if (i < dev->bo_handles.max_key) {
567 		atomic_inc(&bo->refcount);
568 		*buf_handle = bo;
569 		*offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
570 	} else {
571 		*buf_handle = NULL;
572 		*offset_in_bo = 0;
573 		r = -ENXIO;
574 	}
575 	pthread_mutex_unlock(&dev->bo_table_mutex);
576 
577 	return r;
578 }
579 
amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,void * cpu,uint64_t size,amdgpu_bo_handle * buf_handle)580 drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
581 					      void *cpu,
582 					      uint64_t size,
583 					      amdgpu_bo_handle *buf_handle)
584 {
585 	int r;
586 	struct drm_amdgpu_gem_userptr args;
587 
588 	args.addr = (uintptr_t)cpu;
589 	args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
590 		AMDGPU_GEM_USERPTR_VALIDATE;
591 	args.size = size;
592 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
593 				&args, sizeof(args));
594 	if (r)
595 		goto out;
596 
597 	pthread_mutex_lock(&dev->bo_table_mutex);
598 	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
599 	pthread_mutex_unlock(&dev->bo_table_mutex);
600 	if (r) {
601 		amdgpu_close_kms_handle(dev->fd, args.handle);
602 	}
603 
604 out:
605 	return r;
606 }
607 
amdgpu_bo_list_create_raw(amdgpu_device_handle dev,uint32_t number_of_buffers,struct drm_amdgpu_bo_list_entry * buffers,uint32_t * result)608 drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
609 					 uint32_t number_of_buffers,
610 					 struct drm_amdgpu_bo_list_entry *buffers,
611 					 uint32_t *result)
612 {
613 	union drm_amdgpu_bo_list args;
614 	int r;
615 
616 	memset(&args, 0, sizeof(args));
617 	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
618 	args.in.bo_number = number_of_buffers;
619 	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
620 	args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
621 
622 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
623 				&args, sizeof(args));
624 	if (!r)
625 		*result = args.out.list_handle;
626 	return r;
627 }
628 
amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,uint32_t bo_list)629 drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
630 					  uint32_t bo_list)
631 {
632 	union drm_amdgpu_bo_list args;
633 
634 	memset(&args, 0, sizeof(args));
635 	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
636 	args.in.list_handle = bo_list;
637 
638 	return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
639 				   &args, sizeof(args));
640 }
641 
amdgpu_bo_list_create(amdgpu_device_handle dev,uint32_t number_of_resources,amdgpu_bo_handle * resources,uint8_t * resource_prios,amdgpu_bo_list_handle * result)642 drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
643 				     uint32_t number_of_resources,
644 				     amdgpu_bo_handle *resources,
645 				     uint8_t *resource_prios,
646 				     amdgpu_bo_list_handle *result)
647 {
648 	struct drm_amdgpu_bo_list_entry *list;
649 	union drm_amdgpu_bo_list args;
650 	unsigned i;
651 	int r;
652 
653 	if (!number_of_resources)
654 		return -EINVAL;
655 
656 	/* overflow check for multiplication */
657 	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
658 		return -EINVAL;
659 
660 	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
661 	if (!list)
662 		return -ENOMEM;
663 
664 	*result = malloc(sizeof(struct amdgpu_bo_list));
665 	if (!*result) {
666 		free(list);
667 		return -ENOMEM;
668 	}
669 
670 	memset(&args, 0, sizeof(args));
671 	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
672 	args.in.bo_number = number_of_resources;
673 	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
674 	args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
675 
676 	for (i = 0; i < number_of_resources; i++) {
677 		list[i].bo_handle = resources[i]->handle;
678 		if (resource_prios)
679 			list[i].bo_priority = resource_prios[i];
680 		else
681 			list[i].bo_priority = 0;
682 	}
683 
684 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
685 				&args, sizeof(args));
686 	free(list);
687 	if (r) {
688 		free(*result);
689 		return r;
690 	}
691 
692 	(*result)->dev = dev;
693 	(*result)->handle = args.out.list_handle;
694 	return 0;
695 }
696 
amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)697 drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
698 {
699 	union drm_amdgpu_bo_list args;
700 	int r;
701 
702 	memset(&args, 0, sizeof(args));
703 	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
704 	args.in.list_handle = list->handle;
705 
706 	r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
707 				&args, sizeof(args));
708 
709 	if (!r)
710 		free(list);
711 
712 	return r;
713 }
714 
amdgpu_bo_list_update(amdgpu_bo_list_handle handle,uint32_t number_of_resources,amdgpu_bo_handle * resources,uint8_t * resource_prios)715 drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
716 				     uint32_t number_of_resources,
717 				     amdgpu_bo_handle *resources,
718 				     uint8_t *resource_prios)
719 {
720 	struct drm_amdgpu_bo_list_entry *list;
721 	union drm_amdgpu_bo_list args;
722 	unsigned i;
723 	int r;
724 
725 	if (!number_of_resources)
726 		return -EINVAL;
727 
728 	/* overflow check for multiplication */
729 	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
730 		return -EINVAL;
731 
732 	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
733 	if (!list)
734 		return -ENOMEM;
735 
736 	args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
737 	args.in.list_handle = handle->handle;
738 	args.in.bo_number = number_of_resources;
739 	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
740 	args.in.bo_info_ptr = (uintptr_t)list;
741 
742 	for (i = 0; i < number_of_resources; i++) {
743 		list[i].bo_handle = resources[i]->handle;
744 		if (resource_prios)
745 			list[i].bo_priority = resource_prios[i];
746 		else
747 			list[i].bo_priority = 0;
748 	}
749 
750 	r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
751 				&args, sizeof(args));
752 	free(list);
753 	return r;
754 }
755 
amdgpu_bo_va_op(amdgpu_bo_handle bo,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)756 drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
757 			       uint64_t offset,
758 			       uint64_t size,
759 			       uint64_t addr,
760 			       uint64_t flags,
761 			       uint32_t ops)
762 {
763 	amdgpu_device_handle dev = bo->dev;
764 
765 	size = ALIGN(size, getpagesize());
766 
767 	return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
768 				   AMDGPU_VM_PAGE_READABLE |
769 				   AMDGPU_VM_PAGE_WRITEABLE |
770 				   AMDGPU_VM_PAGE_EXECUTABLE, ops);
771 }
772 
amdgpu_bo_va_op_raw(amdgpu_device_handle dev,amdgpu_bo_handle bo,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)773 drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
774 				   amdgpu_bo_handle bo,
775 				   uint64_t offset,
776 				   uint64_t size,
777 				   uint64_t addr,
778 				   uint64_t flags,
779 				   uint32_t ops)
780 {
781 	struct drm_amdgpu_gem_va va;
782 	int r;
783 
784 	if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
785 	    ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
786 		return -EINVAL;
787 
788 	memset(&va, 0, sizeof(va));
789 	va.handle = bo ? bo->handle : 0;
790 	va.operation = ops;
791 	va.flags = flags;
792 	va.va_address = addr;
793 	va.offset_in_bo = offset;
794 	va.map_size = size;
795 
796 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
797 
798 	return r;
799 }
800