1 /*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <stdint.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/ioctl.h>
37 #include <sys/mman.h>
38 #include <sys/time.h>
39
40 #include "libdrm_macros.h"
41 #include "xf86drm.h"
42 #include "amdgpu_drm.h"
43 #include "amdgpu_internal.h"
44 #include "util_hash_table.h"
45 #include "util_math.h"
46
amdgpu_close_kms_handle(amdgpu_device_handle dev,uint32_t handle)47 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
48 uint32_t handle)
49 {
50 struct drm_gem_close args = {};
51
52 args.handle = handle;
53 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54 }
55
amdgpu_bo_free_internal(amdgpu_bo_handle bo)56 drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
57 {
58 /* Remove the buffer from the hash tables. */
59 pthread_mutex_lock(&bo->dev->bo_table_mutex);
60 util_hash_table_remove(bo->dev->bo_handles,
61 (void*)(uintptr_t)bo->handle);
62 if (bo->flink_name) {
63 util_hash_table_remove(bo->dev->bo_flink_names,
64 (void*)(uintptr_t)bo->flink_name);
65 }
66 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
67
68 /* Release CPU access. */
69 if (bo->cpu_map_count > 0) {
70 bo->cpu_map_count = 1;
71 amdgpu_bo_cpu_unmap(bo);
72 }
73
74 amdgpu_close_kms_handle(bo->dev, bo->handle);
75 pthread_mutex_destroy(&bo->cpu_access_mutex);
76 free(bo);
77 }
78
amdgpu_bo_alloc(amdgpu_device_handle dev,struct amdgpu_bo_alloc_request * alloc_buffer,amdgpu_bo_handle * buf_handle)79 int amdgpu_bo_alloc(amdgpu_device_handle dev,
80 struct amdgpu_bo_alloc_request *alloc_buffer,
81 amdgpu_bo_handle *buf_handle)
82 {
83 struct amdgpu_bo *bo;
84 union drm_amdgpu_gem_create args;
85 unsigned heap = alloc_buffer->preferred_heap;
86 int r = 0;
87
88 /* It's an error if the heap is not specified */
89 if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
90 return -EINVAL;
91
92 bo = calloc(1, sizeof(struct amdgpu_bo));
93 if (!bo)
94 return -ENOMEM;
95
96 atomic_set(&bo->refcount, 1);
97 bo->dev = dev;
98 bo->alloc_size = alloc_buffer->alloc_size;
99
100 memset(&args, 0, sizeof(args));
101 args.in.bo_size = alloc_buffer->alloc_size;
102 args.in.alignment = alloc_buffer->phys_alignment;
103
104 /* Set the placement. */
105 args.in.domains = heap;
106 args.in.domain_flags = alloc_buffer->flags;
107
108 /* Allocate the buffer with the preferred heap. */
109 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
110 &args, sizeof(args));
111 if (r) {
112 free(bo);
113 return r;
114 }
115
116 bo->handle = args.out.handle;
117
118 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
119
120 *buf_handle = bo;
121 return 0;
122 }
123
amdgpu_bo_set_metadata(amdgpu_bo_handle bo,struct amdgpu_bo_metadata * info)124 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
125 struct amdgpu_bo_metadata *info)
126 {
127 struct drm_amdgpu_gem_metadata args = {};
128
129 args.handle = bo->handle;
130 args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
131 args.data.flags = info->flags;
132 args.data.tiling_info = info->tiling_info;
133
134 if (info->size_metadata > sizeof(args.data.data))
135 return -EINVAL;
136
137 if (info->size_metadata) {
138 args.data.data_size_bytes = info->size_metadata;
139 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
140 }
141
142 return drmCommandWriteRead(bo->dev->fd,
143 DRM_AMDGPU_GEM_METADATA,
144 &args, sizeof(args));
145 }
146
amdgpu_bo_query_info(amdgpu_bo_handle bo,struct amdgpu_bo_info * info)147 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
148 struct amdgpu_bo_info *info)
149 {
150 struct drm_amdgpu_gem_metadata metadata = {};
151 struct drm_amdgpu_gem_create_in bo_info = {};
152 struct drm_amdgpu_gem_op gem_op = {};
153 int r;
154
155 /* Validate the BO passed in */
156 if (!bo->handle)
157 return -EINVAL;
158
159 /* Query metadata. */
160 metadata.handle = bo->handle;
161 metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
162
163 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
164 &metadata, sizeof(metadata));
165 if (r)
166 return r;
167
168 if (metadata.data.data_size_bytes >
169 sizeof(info->metadata.umd_metadata))
170 return -EINVAL;
171
172 /* Query buffer info. */
173 gem_op.handle = bo->handle;
174 gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
175 gem_op.value = (uintptr_t)&bo_info;
176
177 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
178 &gem_op, sizeof(gem_op));
179 if (r)
180 return r;
181
182 memset(info, 0, sizeof(*info));
183 info->alloc_size = bo_info.bo_size;
184 info->phys_alignment = bo_info.alignment;
185 info->preferred_heap = bo_info.domains;
186 info->alloc_flags = bo_info.domain_flags;
187 info->metadata.flags = metadata.data.flags;
188 info->metadata.tiling_info = metadata.data.tiling_info;
189
190 info->metadata.size_metadata = metadata.data.data_size_bytes;
191 if (metadata.data.data_size_bytes > 0)
192 memcpy(info->metadata.umd_metadata, metadata.data.data,
193 metadata.data.data_size_bytes);
194
195 return 0;
196 }
197
amdgpu_add_handle_to_table(amdgpu_bo_handle bo)198 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
199 {
200 pthread_mutex_lock(&bo->dev->bo_table_mutex);
201 util_hash_table_set(bo->dev->bo_handles,
202 (void*)(uintptr_t)bo->handle, bo);
203 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
204 }
205
amdgpu_bo_export_flink(amdgpu_bo_handle bo)206 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
207 {
208 struct drm_gem_flink flink;
209 int fd, dma_fd;
210 uint32_t handle;
211 int r;
212
213 fd = bo->dev->fd;
214 handle = bo->handle;
215 if (bo->flink_name)
216 return 0;
217
218
219 if (bo->dev->flink_fd != bo->dev->fd) {
220 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
221 &dma_fd);
222 if (!r) {
223 r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
224 close(dma_fd);
225 }
226 if (r)
227 return r;
228 fd = bo->dev->flink_fd;
229 }
230 memset(&flink, 0, sizeof(flink));
231 flink.handle = handle;
232
233 r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
234 if (r)
235 return r;
236
237 bo->flink_name = flink.name;
238
239 if (bo->dev->flink_fd != bo->dev->fd) {
240 struct drm_gem_close args = {};
241 args.handle = handle;
242 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
243 }
244
245 pthread_mutex_lock(&bo->dev->bo_table_mutex);
246 util_hash_table_set(bo->dev->bo_flink_names,
247 (void*)(uintptr_t)bo->flink_name,
248 bo);
249 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
250
251 return 0;
252 }
253
amdgpu_bo_export(amdgpu_bo_handle bo,enum amdgpu_bo_handle_type type,uint32_t * shared_handle)254 int amdgpu_bo_export(amdgpu_bo_handle bo,
255 enum amdgpu_bo_handle_type type,
256 uint32_t *shared_handle)
257 {
258 int r;
259
260 switch (type) {
261 case amdgpu_bo_handle_type_gem_flink_name:
262 r = amdgpu_bo_export_flink(bo);
263 if (r)
264 return r;
265
266 *shared_handle = bo->flink_name;
267 return 0;
268
269 case amdgpu_bo_handle_type_kms:
270 amdgpu_add_handle_to_table(bo);
271 *shared_handle = bo->handle;
272 return 0;
273
274 case amdgpu_bo_handle_type_dma_buf_fd:
275 amdgpu_add_handle_to_table(bo);
276 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
277 (int*)shared_handle);
278 }
279 return -EINVAL;
280 }
281
amdgpu_bo_import(amdgpu_device_handle dev,enum amdgpu_bo_handle_type type,uint32_t shared_handle,struct amdgpu_bo_import_result * output)282 int amdgpu_bo_import(amdgpu_device_handle dev,
283 enum amdgpu_bo_handle_type type,
284 uint32_t shared_handle,
285 struct amdgpu_bo_import_result *output)
286 {
287 struct drm_gem_open open_arg = {};
288 struct amdgpu_bo *bo = NULL;
289 int r;
290 int dma_fd;
291 uint64_t dma_buf_size = 0;
292
293 /* We must maintain a list of pairs <handle, bo>, so that we always
294 * return the same amdgpu_bo instance for the same handle. */
295 pthread_mutex_lock(&dev->bo_table_mutex);
296
297 /* Convert a DMA buf handle to a KMS handle now. */
298 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
299 uint32_t handle;
300 off_t size;
301
302 /* Get a KMS handle. */
303 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
304 if (r) {
305 return r;
306 }
307
308 /* Query the buffer size. */
309 size = lseek(shared_handle, 0, SEEK_END);
310 if (size == (off_t)-1) {
311 pthread_mutex_unlock(&dev->bo_table_mutex);
312 amdgpu_close_kms_handle(dev, handle);
313 return -errno;
314 }
315 lseek(shared_handle, 0, SEEK_SET);
316
317 dma_buf_size = size;
318 shared_handle = handle;
319 }
320
321 /* If we have already created a buffer with this handle, find it. */
322 switch (type) {
323 case amdgpu_bo_handle_type_gem_flink_name:
324 bo = util_hash_table_get(dev->bo_flink_names,
325 (void*)(uintptr_t)shared_handle);
326 break;
327
328 case amdgpu_bo_handle_type_dma_buf_fd:
329 bo = util_hash_table_get(dev->bo_handles,
330 (void*)(uintptr_t)shared_handle);
331 break;
332
333 case amdgpu_bo_handle_type_kms:
334 /* Importing a KMS handle in not allowed. */
335 pthread_mutex_unlock(&dev->bo_table_mutex);
336 return -EPERM;
337
338 default:
339 pthread_mutex_unlock(&dev->bo_table_mutex);
340 return -EINVAL;
341 }
342
343 if (bo) {
344 pthread_mutex_unlock(&dev->bo_table_mutex);
345
346 /* The buffer already exists, just bump the refcount. */
347 atomic_inc(&bo->refcount);
348
349 output->buf_handle = bo;
350 output->alloc_size = bo->alloc_size;
351 return 0;
352 }
353
354 bo = calloc(1, sizeof(struct amdgpu_bo));
355 if (!bo) {
356 pthread_mutex_unlock(&dev->bo_table_mutex);
357 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
358 amdgpu_close_kms_handle(dev, shared_handle);
359 }
360 return -ENOMEM;
361 }
362
363 /* Open the handle. */
364 switch (type) {
365 case amdgpu_bo_handle_type_gem_flink_name:
366 open_arg.name = shared_handle;
367 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
368 if (r) {
369 free(bo);
370 pthread_mutex_unlock(&dev->bo_table_mutex);
371 return r;
372 }
373
374 bo->handle = open_arg.handle;
375 if (dev->flink_fd != dev->fd) {
376 r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
377 if (r) {
378 free(bo);
379 pthread_mutex_unlock(&dev->bo_table_mutex);
380 return r;
381 }
382 r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
383
384 close(dma_fd);
385
386 if (r) {
387 free(bo);
388 pthread_mutex_unlock(&dev->bo_table_mutex);
389 return r;
390 }
391 }
392 bo->flink_name = shared_handle;
393 bo->alloc_size = open_arg.size;
394 util_hash_table_set(dev->bo_flink_names,
395 (void*)(uintptr_t)bo->flink_name, bo);
396 break;
397
398 case amdgpu_bo_handle_type_dma_buf_fd:
399 bo->handle = shared_handle;
400 bo->alloc_size = dma_buf_size;
401 break;
402
403 case amdgpu_bo_handle_type_kms:
404 assert(0); /* unreachable */
405 }
406
407 /* Initialize it. */
408 atomic_set(&bo->refcount, 1);
409 bo->dev = dev;
410 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
411
412 util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
413 pthread_mutex_unlock(&dev->bo_table_mutex);
414
415 output->buf_handle = bo;
416 output->alloc_size = bo->alloc_size;
417 return 0;
418 }
419
amdgpu_bo_free(amdgpu_bo_handle buf_handle)420 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
421 {
422 /* Just drop the reference. */
423 amdgpu_bo_reference(&buf_handle, NULL);
424 return 0;
425 }
426
amdgpu_bo_cpu_map(amdgpu_bo_handle bo,void ** cpu)427 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
428 {
429 union drm_amdgpu_gem_mmap args;
430 void *ptr;
431 int r;
432
433 pthread_mutex_lock(&bo->cpu_access_mutex);
434
435 if (bo->cpu_ptr) {
436 /* already mapped */
437 assert(bo->cpu_map_count > 0);
438 bo->cpu_map_count++;
439 *cpu = bo->cpu_ptr;
440 pthread_mutex_unlock(&bo->cpu_access_mutex);
441 return 0;
442 }
443
444 assert(bo->cpu_map_count == 0);
445
446 memset(&args, 0, sizeof(args));
447
448 /* Query the buffer address (args.addr_ptr).
449 * The kernel driver ignores the offset and size parameters. */
450 args.in.handle = bo->handle;
451
452 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
453 sizeof(args));
454 if (r) {
455 pthread_mutex_unlock(&bo->cpu_access_mutex);
456 return r;
457 }
458
459 /* Map the buffer. */
460 ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
461 bo->dev->fd, args.out.addr_ptr);
462 if (ptr == MAP_FAILED) {
463 pthread_mutex_unlock(&bo->cpu_access_mutex);
464 return -errno;
465 }
466
467 bo->cpu_ptr = ptr;
468 bo->cpu_map_count = 1;
469 pthread_mutex_unlock(&bo->cpu_access_mutex);
470
471 *cpu = ptr;
472 return 0;
473 }
474
amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)475 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
476 {
477 int r;
478
479 pthread_mutex_lock(&bo->cpu_access_mutex);
480 assert(bo->cpu_map_count >= 0);
481
482 if (bo->cpu_map_count == 0) {
483 /* not mapped */
484 pthread_mutex_unlock(&bo->cpu_access_mutex);
485 return -EINVAL;
486 }
487
488 bo->cpu_map_count--;
489 if (bo->cpu_map_count > 0) {
490 /* mapped multiple times */
491 pthread_mutex_unlock(&bo->cpu_access_mutex);
492 return 0;
493 }
494
495 r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
496 bo->cpu_ptr = NULL;
497 pthread_mutex_unlock(&bo->cpu_access_mutex);
498 return r;
499 }
500
amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,struct amdgpu_buffer_size_alignments * info)501 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
502 struct amdgpu_buffer_size_alignments *info)
503 {
504 info->size_local = dev->dev_info.pte_fragment_size;
505 info->size_remote = dev->dev_info.gart_page_size;
506 return 0;
507 }
508
amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,uint64_t timeout_ns,bool * busy)509 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
510 uint64_t timeout_ns,
511 bool *busy)
512 {
513 union drm_amdgpu_gem_wait_idle args;
514 int r;
515
516 memset(&args, 0, sizeof(args));
517 args.in.handle = bo->handle;
518 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
519
520 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
521 &args, sizeof(args));
522
523 if (r == 0) {
524 *busy = args.out.status;
525 return 0;
526 } else {
527 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
528 return r;
529 }
530 }
531
amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,void * cpu,uint64_t size,amdgpu_bo_handle * buf_handle)532 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
533 void *cpu,
534 uint64_t size,
535 amdgpu_bo_handle *buf_handle)
536 {
537 int r;
538 struct amdgpu_bo *bo;
539 struct drm_amdgpu_gem_userptr args;
540 uintptr_t cpu0;
541 uint32_t ps, off;
542
543 memset(&args, 0, sizeof(args));
544 ps = getpagesize();
545
546 cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
547 off = (uintptr_t)cpu - cpu0;
548 size = ROUND_UP(size + off, ps);
549
550 args.addr = cpu0;
551 args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
552 args.size = size;
553 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
554 &args, sizeof(args));
555 if (r)
556 return r;
557
558 bo = calloc(1, sizeof(struct amdgpu_bo));
559 if (!bo)
560 return -ENOMEM;
561
562 atomic_set(&bo->refcount, 1);
563 bo->dev = dev;
564 bo->alloc_size = size;
565 bo->handle = args.handle;
566
567 *buf_handle = bo;
568
569 return r;
570 }
571
amdgpu_bo_list_create(amdgpu_device_handle dev,uint32_t number_of_resources,amdgpu_bo_handle * resources,uint8_t * resource_prios,amdgpu_bo_list_handle * result)572 int amdgpu_bo_list_create(amdgpu_device_handle dev,
573 uint32_t number_of_resources,
574 amdgpu_bo_handle *resources,
575 uint8_t *resource_prios,
576 amdgpu_bo_list_handle *result)
577 {
578 struct drm_amdgpu_bo_list_entry *list;
579 union drm_amdgpu_bo_list args;
580 unsigned i;
581 int r;
582
583 if (!number_of_resources)
584 return -EINVAL;
585
586 /* overflow check for multiplication */
587 if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
588 return -EINVAL;
589
590 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
591 if (!list)
592 return -ENOMEM;
593
594 *result = malloc(sizeof(struct amdgpu_bo_list));
595 if (!*result) {
596 free(list);
597 return -ENOMEM;
598 }
599
600 memset(&args, 0, sizeof(args));
601 args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
602 args.in.bo_number = number_of_resources;
603 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
604 args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
605
606 for (i = 0; i < number_of_resources; i++) {
607 list[i].bo_handle = resources[i]->handle;
608 if (resource_prios)
609 list[i].bo_priority = resource_prios[i];
610 else
611 list[i].bo_priority = 0;
612 }
613
614 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
615 &args, sizeof(args));
616 free(list);
617 if (r) {
618 free(*result);
619 return r;
620 }
621
622 (*result)->dev = dev;
623 (*result)->handle = args.out.list_handle;
624 return 0;
625 }
626
amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)627 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
628 {
629 union drm_amdgpu_bo_list args;
630 int r;
631
632 memset(&args, 0, sizeof(args));
633 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
634 args.in.list_handle = list->handle;
635
636 r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
637 &args, sizeof(args));
638
639 if (!r)
640 free(list);
641
642 return r;
643 }
644
amdgpu_bo_list_update(amdgpu_bo_list_handle handle,uint32_t number_of_resources,amdgpu_bo_handle * resources,uint8_t * resource_prios)645 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
646 uint32_t number_of_resources,
647 amdgpu_bo_handle *resources,
648 uint8_t *resource_prios)
649 {
650 struct drm_amdgpu_bo_list_entry *list;
651 union drm_amdgpu_bo_list args;
652 unsigned i;
653 int r;
654
655 if (!number_of_resources)
656 return -EINVAL;
657
658 /* overflow check for multiplication */
659 if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
660 return -EINVAL;
661
662 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
663 if (list == NULL)
664 return -ENOMEM;
665
666 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
667 args.in.list_handle = handle->handle;
668 args.in.bo_number = number_of_resources;
669 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
670 args.in.bo_info_ptr = (uintptr_t)list;
671
672 for (i = 0; i < number_of_resources; i++) {
673 list[i].bo_handle = resources[i]->handle;
674 if (resource_prios)
675 list[i].bo_priority = resource_prios[i];
676 else
677 list[i].bo_priority = 0;
678 }
679
680 r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
681 &args, sizeof(args));
682 free(list);
683 return r;
684 }
685
amdgpu_bo_va_op(amdgpu_bo_handle bo,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)686 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
687 uint64_t offset,
688 uint64_t size,
689 uint64_t addr,
690 uint64_t flags,
691 uint32_t ops)
692 {
693 amdgpu_device_handle dev = bo->dev;
694 struct drm_amdgpu_gem_va va;
695 int r;
696
697 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
698 return -EINVAL;
699
700 memset(&va, 0, sizeof(va));
701 va.handle = bo->handle;
702 va.operation = ops;
703 va.flags = AMDGPU_VM_PAGE_READABLE |
704 AMDGPU_VM_PAGE_WRITEABLE |
705 AMDGPU_VM_PAGE_EXECUTABLE;
706 va.va_address = addr;
707 va.offset_in_bo = offset;
708 va.map_size = ALIGN(size, getpagesize());
709
710 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
711
712 return r;
713 }
714