• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Advanced Micro Devices, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "util/os_drm.h"
7 #include "ac_linux_drm.h"
8 #include "util/u_math.h"
9 
10 #include <stdlib.h>
11 #include <time.h>
12 #include <unistd.h>
13 
14 #ifdef HAVE_AMDGPU_VIRTIO
15 #include "virtio/amdgpu_virtio.h"
16 #endif
17 
18 struct ac_drm_device {
19    union {
20       amdgpu_device_handle adev;
21 #ifdef HAVE_AMDGPU_VIRTIO
22       amdvgpu_device_handle vdev;
23 #endif
24    };
25    int fd;
26    bool is_virtio;
27 };
28 
ac_drm_device_initialize(int fd,bool is_virtio,uint32_t * major_version,uint32_t * minor_version,ac_drm_device ** dev)29 int ac_drm_device_initialize(int fd, bool is_virtio,
30                              uint32_t *major_version, uint32_t *minor_version,
31                              ac_drm_device **dev)
32 {
33    int r;
34 
35    *dev = malloc(sizeof(ac_drm_device));
36    if (!(*dev))
37       return -1;
38 
39 #ifdef HAVE_AMDGPU_VIRTIO
40    if (is_virtio) {
41       amdvgpu_device_handle vdev;
42       r = amdvgpu_device_initialize(fd, major_version, minor_version,
43                                     &vdev);
44       if (r == 0) {
45          (*dev)->vdev = vdev;
46          (*dev)->fd = amdvgpu_device_get_fd(vdev);
47       }
48    } else
49 #endif
50    {
51       amdgpu_device_handle adev;
52       r = amdgpu_device_initialize(fd, major_version, minor_version,
53                                    &adev);
54       if (r == 0) {
55          (*dev)->adev = adev;
56          (*dev)->fd = amdgpu_device_get_fd(adev);
57       }
58    }
59 
60    if (r == 0)
61       (*dev)->is_virtio = is_virtio;
62    else
63       free(*dev);
64 
65    return r;
66 }
67 
ac_drm_device_get_cookie(ac_drm_device * dev)68 uintptr_t ac_drm_device_get_cookie(ac_drm_device *dev)
69 {
70    return (uintptr_t) dev->adev;
71 }
72 
ac_drm_device_deinitialize(ac_drm_device * dev)73 void ac_drm_device_deinitialize(ac_drm_device *dev)
74 {
75 #ifdef HAVE_AMDGPU_VIRTIO
76    if (dev->is_virtio)
77       amdvgpu_device_deinitialize(dev->vdev);
78    else
79 #endif
80       amdgpu_device_deinitialize(dev->adev);
81    free(dev);
82 }
83 
ac_drm_device_get_fd(ac_drm_device * device_handle)84 int ac_drm_device_get_fd(ac_drm_device *device_handle)
85 {
86    return device_handle->fd;
87 }
88 
ac_drm_bo_set_metadata(ac_drm_device * dev,uint32_t bo_handle,struct amdgpu_bo_metadata * info)89 int ac_drm_bo_set_metadata(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_metadata *info)
90 {
91 #ifdef HAVE_AMDGPU_VIRTIO
92    if (dev->is_virtio)
93       return amdvgpu_bo_set_metadata(dev->vdev, bo_handle, info);
94 #endif
95    struct drm_amdgpu_gem_metadata args = {};
96 
97    args.handle = bo_handle;
98    args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
99    args.data.flags = info->flags;
100    args.data.tiling_info = info->tiling_info;
101 
102    if (info->size_metadata > sizeof(args.data.data))
103       return -EINVAL;
104 
105    if (info->size_metadata) {
106       args.data.data_size_bytes = info->size_metadata;
107       memcpy(args.data.data, info->umd_metadata, info->size_metadata);
108    }
109 
110    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
111 
112 }
113 
ac_drm_bo_query_info(ac_drm_device * dev,uint32_t bo_handle,struct amdgpu_bo_info * info)114 int ac_drm_bo_query_info(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_info *info)
115 {
116 #ifdef HAVE_AMDGPU_VIRTIO
117    if (dev->is_virtio)
118       return amdvgpu_bo_query_info(dev->vdev, bo_handle, info);
119 #endif
120    struct drm_amdgpu_gem_metadata metadata = {};
121    struct drm_amdgpu_gem_create_in bo_info = {};
122    struct drm_amdgpu_gem_op gem_op = {};
123    int r;
124 
125    /* Validate the BO passed in */
126    if (!bo_handle)
127       return -EINVAL;
128 
129    /* Query metadata. */
130    metadata.handle = bo_handle;
131    metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
132 
133    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_METADATA, &metadata, sizeof(metadata));
134    if (r)
135       return r;
136 
137    if (metadata.data.data_size_bytes > sizeof(info->metadata.umd_metadata))
138       return -EINVAL;
139 
140    /* Query buffer info. */
141    gem_op.handle = bo_handle;
142    gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
143    gem_op.value = (uintptr_t)&bo_info;
144 
145    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
146    if (r)
147       return r;
148 
149    memset(info, 0, sizeof(*info));
150    info->alloc_size = bo_info.bo_size;
151    info->phys_alignment = bo_info.alignment;
152    info->preferred_heap = bo_info.domains;
153    info->alloc_flags = bo_info.domain_flags;
154    info->metadata.flags = metadata.data.flags;
155    info->metadata.tiling_info = metadata.data.tiling_info;
156 
157    info->metadata.size_metadata = metadata.data.data_size_bytes;
158    if (metadata.data.data_size_bytes > 0)
159       memcpy(info->metadata.umd_metadata, metadata.data.data, metadata.data.data_size_bytes);
160 
161    return 0;
162 }
163 
amdgpu_cs_calculate_timeout(uint64_t timeout)164 static uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
165 {
166    int r;
167 
168    if (timeout != AMDGPU_TIMEOUT_INFINITE) {
169       struct timespec current;
170       uint64_t current_ns;
171       r = clock_gettime(CLOCK_MONOTONIC, &current);
172       if (r) {
173          fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
174          return AMDGPU_TIMEOUT_INFINITE;
175       }
176 
177       current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
178       current_ns += current.tv_nsec;
179       timeout += current_ns;
180       if (timeout < current_ns)
181          timeout = AMDGPU_TIMEOUT_INFINITE;
182    }
183    return timeout;
184 }
185 
ac_drm_bo_wait_for_idle(ac_drm_device * dev,ac_drm_bo bo,uint64_t timeout_ns,bool * busy)186 int ac_drm_bo_wait_for_idle(ac_drm_device *dev, ac_drm_bo bo, uint64_t timeout_ns, bool *busy)
187 {
188    int r;
189    union drm_amdgpu_gem_wait_idle args;
190 
191    memset(&args, 0, sizeof(args));
192    args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
193 
194 #ifdef HAVE_AMDGPU_VIRTIO
195    if (dev->is_virtio) {
196       r = amdvgpu_bo_wait_for_idle(dev->vdev, bo.vbo, args.in.timeout);
197    } else
198 #endif
199    {
200       ac_drm_bo_export(dev, bo, amdgpu_bo_handle_type_kms,
201                        &args.in.handle);
202       r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &args, sizeof(args));
203    }
204 
205    if (r == 0) {
206       *busy = args.out.status;
207       return 0;
208    } else {
209       fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
210       return r;
211    }
212 }
213 
ac_drm_bo_va_op(ac_drm_device * dev,uint32_t bo_handle,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)214 int ac_drm_bo_va_op(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
215                     uint64_t addr, uint64_t flags, uint32_t ops)
216 {
217    size = ALIGN(size, getpagesize());
218 
219    return ac_drm_bo_va_op_raw(
220       dev, bo_handle, offset, size, addr,
221       AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE, ops);
222 }
223 
ac_drm_bo_va_op_raw(ac_drm_device * dev,uint32_t bo_handle,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)224 int ac_drm_bo_va_op_raw(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
225                         uint64_t addr, uint64_t flags, uint32_t ops)
226 {
227    struct drm_amdgpu_gem_va va;
228    int r;
229 
230    if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP && ops != AMDGPU_VA_OP_REPLACE &&
231        ops != AMDGPU_VA_OP_CLEAR)
232       return -EINVAL;
233 
234 #ifdef HAVE_AMDGPU_VIRTIO
235    if (dev->is_virtio)
236       return amdvgpu_bo_va_op_raw(dev->vdev, bo_handle, offset, size, addr, flags, ops);
237 #endif
238 
239    memset(&va, 0, sizeof(va));
240    va.handle = bo_handle;
241    va.operation = ops;
242    va.flags = flags;
243    va.va_address = addr;
244    va.offset_in_bo = offset;
245    va.map_size = size;
246 
247    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
248 
249    return r;
250 }
251 
ac_drm_bo_va_op_raw2(ac_drm_device * dev,uint32_t bo_handle,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops,uint32_t vm_timeline_syncobj_out,uint64_t vm_timeline_point,uint64_t input_fence_syncobj_handles,uint32_t num_syncobj_handles)252 int ac_drm_bo_va_op_raw2(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
253                          uint64_t addr, uint64_t flags, uint32_t ops,
254                          uint32_t vm_timeline_syncobj_out, uint64_t vm_timeline_point,
255                          uint64_t input_fence_syncobj_handles, uint32_t num_syncobj_handles)
256 {
257    struct drm_amdgpu_gem_va va;
258    int r;
259 
260    if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
261        ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
262       return -EINVAL;
263 
264    memset(&va, 0, sizeof(va));
265    va.handle = bo_handle;
266    va.operation = ops;
267    va.flags = flags;
268    va.va_address = addr;
269    va.offset_in_bo = offset;
270    va.map_size = size;
271    va.vm_timeline_syncobj_out = vm_timeline_syncobj_out;
272    va.vm_timeline_point = vm_timeline_point;
273    va.input_fence_syncobj_handles = input_fence_syncobj_handles;
274    va.num_syncobj_handles = num_syncobj_handles;
275 
276    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
277 
278    return r;
279 }
280 
ac_drm_cs_ctx_create2(ac_drm_device * dev,uint32_t priority,uint32_t * ctx_id)281 int ac_drm_cs_ctx_create2(ac_drm_device *dev, uint32_t priority, uint32_t *ctx_id)
282 {
283    int r;
284    union drm_amdgpu_ctx args;
285    char *override_priority;
286 
287    override_priority = getenv("AMD_PRIORITY");
288    if (override_priority) {
289       /* The priority is a signed integer. The variable type is
290        * wrong. If parsing fails, priority is unchanged.
291        */
292       if (sscanf(override_priority, "%i", &priority) == 1) {
293          printf("amdgpu: context priority changed to %i\n", priority);
294       }
295    }
296 
297 #ifdef HAVE_AMDGPU_VIRTIO
298    if (dev->is_virtio)
299       return amdvgpu_cs_ctx_create2(dev->vdev, priority, ctx_id);
300 #endif
301    /* Create the context */
302    memset(&args, 0, sizeof(args));
303    args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
304    args.in.priority = priority;
305 
306    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
307 
308    if (r)
309       return r;
310 
311    *ctx_id = args.out.alloc.ctx_id;
312 
313    return 0;
314 }
315 
ac_drm_cs_ctx_free(ac_drm_device * dev,uint32_t ctx_id)316 int ac_drm_cs_ctx_free(ac_drm_device *dev, uint32_t ctx_id)
317 {
318 #ifdef HAVE_AMDGPU_VIRTIO
319    if (dev->is_virtio)
320       return amdvgpu_cs_ctx_free(dev->vdev, ctx_id);
321 #endif
322    union drm_amdgpu_ctx args;
323 
324    /* now deal with kernel side */
325    memset(&args, 0, sizeof(args));
326    args.in.op = AMDGPU_CTX_OP_FREE_CTX;
327    args.in.ctx_id = ctx_id;
328    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
329 }
330 
ac_drm_cs_ctx_stable_pstate(ac_drm_device * dev,uint32_t ctx_id,uint32_t op,uint32_t flags,uint32_t * out_flags)331 int ac_drm_cs_ctx_stable_pstate(ac_drm_device *dev, uint32_t ctx_id, uint32_t op, uint32_t flags,
332                                 uint32_t *out_flags)
333 {
334 #ifdef HAVE_AMDGPU_VIRTIO
335    if (dev->is_virtio)
336       return amdvgpu_cs_ctx_stable_pstate(dev->vdev, ctx_id, op, flags, out_flags);
337 #endif
338    union drm_amdgpu_ctx args;
339    int r;
340 
341    if (!ctx_id)
342       return -EINVAL;
343 
344    memset(&args, 0, sizeof(args));
345    args.in.op = op;
346    args.in.ctx_id = ctx_id;
347    args.in.flags = flags;
348    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
349    if (!r && out_flags)
350       *out_flags = args.out.pstate.flags;
351    return r;
352 }
353 
ac_drm_cs_query_reset_state2(ac_drm_device * dev,uint32_t ctx_id,uint64_t * flags)354 int ac_drm_cs_query_reset_state2(ac_drm_device *dev, uint32_t ctx_id, uint64_t *flags)
355 {
356 #ifdef HAVE_AMDGPU_VIRTIO
357    if (dev->is_virtio)
358       return amdvgpu_cs_query_reset_state2(dev->vdev, ctx_id, flags);
359 #endif
360 
361    union drm_amdgpu_ctx args;
362    int r;
363 
364    if (!ctx_id)
365       return -EINVAL;
366 
367    memset(&args, 0, sizeof(args));
368    args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
369    args.in.ctx_id = ctx_id;
370    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
371    if (!r)
372       *flags = args.out.state.flags;
373    return r;
374 }
375 
amdgpu_ioctl_wait_cs(int device_fd,uint32_t ctx_handle,unsigned ip,unsigned ip_instance,uint32_t ring,uint64_t handle,uint64_t timeout_ns,uint64_t flags,bool * busy)376 static int amdgpu_ioctl_wait_cs(int device_fd, uint32_t ctx_handle, unsigned ip,
377                                 unsigned ip_instance, uint32_t ring, uint64_t handle,
378                                 uint64_t timeout_ns, uint64_t flags, bool *busy)
379 {
380    union drm_amdgpu_wait_cs args;
381    int r;
382 
383    memset(&args, 0, sizeof(args));
384    args.in.handle = handle;
385    args.in.ip_type = ip;
386    args.in.ip_instance = ip_instance;
387    args.in.ring = ring;
388    args.in.ctx_id = ctx_handle;
389 
390    if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
391       args.in.timeout = timeout_ns;
392    else
393       args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
394 
395    r = drm_ioctl(device_fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
396    if (r)
397       return -errno;
398 
399    *busy = args.out.status;
400    return 0;
401 }
402 
ac_drm_cs_query_fence_status(ac_drm_device * dev,uint32_t ctx_id,uint32_t ip_type,uint32_t ip_instance,uint32_t ring,uint64_t fence_seq_no,uint64_t timeout_ns,uint64_t flags,uint32_t * expired)403 int ac_drm_cs_query_fence_status(ac_drm_device *dev, uint32_t ctx_id, uint32_t ip_type,
404                                  uint32_t ip_instance, uint32_t ring, uint64_t fence_seq_no,
405                                  uint64_t timeout_ns, uint64_t flags, uint32_t *expired)
406 {
407    bool busy = true;
408    int r;
409 
410    if (!fence_seq_no) {
411       *expired = true;
412       return 0;
413    }
414 
415    *expired = false;
416 
417 #ifdef HAVE_AMDGPU_VIRTIO
418    if (dev->is_virtio)
419       r = amdvgpu_cs_query_fence_status(dev->vdev, ctx_id, ip_type, ip_instance, ring, fence_seq_no,
420                                         timeout_ns, flags, expired);
421    else
422 #endif
423       r = amdgpu_ioctl_wait_cs(dev->fd, ctx_id, ip_type, ip_instance, ring, fence_seq_no,
424                                timeout_ns, flags, &busy);
425 
426    if (!r && !busy)
427       *expired = true;
428 
429    return r;
430 }
431 
ac_drm_cs_create_syncobj2(int device_fd,uint32_t flags,uint32_t * handle)432 int ac_drm_cs_create_syncobj2(int device_fd, uint32_t flags, uint32_t *handle)
433 {
434    return drmSyncobjCreate(device_fd, flags, handle);
435 }
436 
ac_drm_cs_create_syncobj(int device_fd,uint32_t * handle)437 int ac_drm_cs_create_syncobj(int device_fd, uint32_t *handle)
438 {
439    return drmSyncobjCreate(device_fd, 0, handle);
440 }
441 
ac_drm_cs_destroy_syncobj(int device_fd,uint32_t handle)442 int ac_drm_cs_destroy_syncobj(int device_fd, uint32_t handle)
443 {
444    return drmSyncobjDestroy(device_fd, handle);
445 }
446 
ac_drm_cs_syncobj_wait(int device_fd,uint32_t * handles,unsigned num_handles,int64_t timeout_nsec,unsigned flags,uint32_t * first_signaled)447 int ac_drm_cs_syncobj_wait(int device_fd, uint32_t *handles, unsigned num_handles,
448                            int64_t timeout_nsec, unsigned flags, uint32_t *first_signaled)
449 {
450    return drmSyncobjWait(device_fd, handles, num_handles, timeout_nsec, flags, first_signaled);
451 }
452 
ac_drm_cs_syncobj_query2(int device_fd,uint32_t * handles,uint64_t * points,unsigned num_handles,uint32_t flags)453 int ac_drm_cs_syncobj_query2(int device_fd, uint32_t *handles, uint64_t *points,
454                              unsigned num_handles, uint32_t flags)
455 {
456    return drmSyncobjQuery2(device_fd, handles, points, num_handles, flags);
457 }
458 
ac_drm_cs_import_syncobj(int device_fd,int shared_fd,uint32_t * handle)459 int ac_drm_cs_import_syncobj(int device_fd, int shared_fd, uint32_t *handle)
460 {
461    return drmSyncobjFDToHandle(device_fd, shared_fd, handle);
462 }
463 
ac_drm_cs_syncobj_export_sync_file(int device_fd,uint32_t syncobj,int * sync_file_fd)464 int ac_drm_cs_syncobj_export_sync_file(int device_fd, uint32_t syncobj, int *sync_file_fd)
465 {
466    return drmSyncobjExportSyncFile(device_fd, syncobj, sync_file_fd);
467 }
468 
ac_drm_cs_syncobj_import_sync_file(int device_fd,uint32_t syncobj,int sync_file_fd)469 int ac_drm_cs_syncobj_import_sync_file(int device_fd, uint32_t syncobj, int sync_file_fd)
470 {
471    return drmSyncobjImportSyncFile(device_fd, syncobj, sync_file_fd);
472 }
473 
ac_drm_cs_syncobj_export_sync_file2(int device_fd,uint32_t syncobj,uint64_t point,uint32_t flags,int * sync_file_fd)474 int ac_drm_cs_syncobj_export_sync_file2(int device_fd, uint32_t syncobj, uint64_t point,
475                                         uint32_t flags, int *sync_file_fd)
476 {
477    uint32_t binary_handle;
478    int ret;
479 
480    if (!point)
481       return drmSyncobjExportSyncFile(device_fd, syncobj, sync_file_fd);
482 
483    ret = drmSyncobjCreate(device_fd, 0, &binary_handle);
484    if (ret)
485       return ret;
486 
487    ret = drmSyncobjTransfer(device_fd, binary_handle, 0, syncobj, point, flags);
488    if (ret)
489       goto out;
490    ret = drmSyncobjExportSyncFile(device_fd, binary_handle, sync_file_fd);
491 out:
492    drmSyncobjDestroy(device_fd, binary_handle);
493    return ret;
494 }
495 
ac_drm_cs_syncobj_transfer(int device_fd,uint32_t dst_handle,uint64_t dst_point,uint32_t src_handle,uint64_t src_point,uint32_t flags)496 int ac_drm_cs_syncobj_transfer(int device_fd, uint32_t dst_handle, uint64_t dst_point,
497                                uint32_t src_handle, uint64_t src_point, uint32_t flags)
498 {
499    return drmSyncobjTransfer(device_fd, dst_handle, dst_point, src_handle, src_point, flags);
500 }
501 
ac_drm_cs_syncobj_timeline_wait(int device_fd,uint32_t * handles,uint64_t * points,unsigned num_handles,int64_t timeout_nsec,unsigned flags,uint32_t * first_signaled)502 int ac_drm_cs_syncobj_timeline_wait(int device_fd, uint32_t *handles, uint64_t *points,
503                                     unsigned num_handles, int64_t timeout_nsec, unsigned flags,
504                                     uint32_t *first_signaled)
505 {
506    return drmSyncobjTimelineWait(device_fd, handles, points, num_handles, timeout_nsec, flags,
507                                  first_signaled);
508 }
509 
ac_drm_cs_submit_raw2(ac_drm_device * dev,uint32_t ctx_id,uint32_t bo_list_handle,int num_chunks,struct drm_amdgpu_cs_chunk * chunks,uint64_t * seq_no)510 int ac_drm_cs_submit_raw2(ac_drm_device *dev, uint32_t ctx_id, uint32_t bo_list_handle,
511                           int num_chunks, struct drm_amdgpu_cs_chunk *chunks, uint64_t *seq_no)
512 {
513 #ifdef HAVE_AMDGPU_VIRTIO
514    if (dev->is_virtio)
515       return amdvgpu_cs_submit_raw2(dev->vdev, ctx_id, bo_list_handle, num_chunks, chunks, seq_no);
516 #endif
517 
518    union drm_amdgpu_cs cs;
519    uint64_t *chunk_array;
520    int i, r;
521 
522    memset(&cs, 0, sizeof(cs));
523    chunk_array = alloca(sizeof(uint64_t) * num_chunks);
524    for (i = 0; i < num_chunks; i++)
525       chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
526    cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
527    cs.in.ctx_id = ctx_id;
528    cs.in.bo_list_handle = bo_list_handle;
529    cs.in.num_chunks = num_chunks;
530    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
531    if (!r && seq_no)
532       *seq_no = cs.out.handle;
533    return r;
534 }
535 
ac_drm_cs_chunk_fence_info_to_data(uint32_t bo_handle,uint64_t offset,struct drm_amdgpu_cs_chunk_data * data)536 void ac_drm_cs_chunk_fence_info_to_data(uint32_t bo_handle, uint64_t offset,
537                                         struct drm_amdgpu_cs_chunk_data *data)
538 {
539    data->fence_data.handle = bo_handle;
540    data->fence_data.offset = offset * sizeof(uint64_t);
541 }
542 
ac_drm_query_info(ac_drm_device * dev,unsigned info_id,unsigned size,void * value)543 int ac_drm_query_info(ac_drm_device *dev, unsigned info_id, unsigned size, void *value)
544 {
545    struct drm_amdgpu_info request;
546 
547    memset(&request, 0, sizeof(request));
548    request.return_pointer = (uintptr_t)value;
549    request.return_size = size;
550    request.query = info_id;
551 
552 #ifdef HAVE_AMDGPU_VIRTIO
553    if (dev->is_virtio)
554       return amdvgpu_query_info(dev->vdev, &request);
555 #endif
556    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
557 }
558 
ac_drm_read_mm_registers(ac_drm_device * dev,unsigned dword_offset,unsigned count,uint32_t instance,uint32_t flags,uint32_t * values)559 int ac_drm_read_mm_registers(ac_drm_device *dev, unsigned dword_offset, unsigned count,
560                              uint32_t instance, uint32_t flags, uint32_t *values)
561 {
562    struct drm_amdgpu_info request;
563 
564    memset(&request, 0, sizeof(request));
565    request.return_pointer = (uintptr_t)values;
566    request.return_size = count * sizeof(uint32_t);
567    request.query = AMDGPU_INFO_READ_MMR_REG;
568    request.read_mmr_reg.dword_offset = dword_offset;
569    request.read_mmr_reg.count = count;
570    request.read_mmr_reg.instance = instance;
571    request.read_mmr_reg.flags = flags;
572 
573 #ifdef HAVE_AMDGPU_VIRTIO
574    if (dev->is_virtio)
575       return amdvgpu_query_info(dev->vdev, &request);
576 #endif
577    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
578 }
579 
ac_drm_query_hw_ip_count(ac_drm_device * dev,unsigned type,uint32_t * count)580 int ac_drm_query_hw_ip_count(ac_drm_device *dev, unsigned type, uint32_t *count)
581 {
582    struct drm_amdgpu_info request;
583 
584    memset(&request, 0, sizeof(request));
585    request.return_pointer = (uintptr_t)count;
586    request.return_size = sizeof(*count);
587    request.query = AMDGPU_INFO_HW_IP_COUNT;
588    request.query_hw_ip.type = type;
589 
590 #ifdef HAVE_AMDGPU_VIRTIO
591    if (dev->is_virtio)
592       return amdvgpu_query_info(dev->vdev, &request);
593 #endif
594    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
595 }
596 
ac_drm_query_hw_ip_info(ac_drm_device * dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)597 int ac_drm_query_hw_ip_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
598                             struct drm_amdgpu_info_hw_ip *info)
599 {
600    struct drm_amdgpu_info request;
601 
602    memset(&request, 0, sizeof(request));
603    request.return_pointer = (uintptr_t)info;
604    request.return_size = sizeof(*info);
605    request.query = AMDGPU_INFO_HW_IP_INFO;
606    request.query_hw_ip.type = type;
607    request.query_hw_ip.ip_instance = ip_instance;
608 
609 #ifdef HAVE_AMDGPU_VIRTIO
610    if (dev->is_virtio)
611       return amdvgpu_query_info(dev->vdev, &request);
612 #endif
613    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
614 }
615 
ac_drm_query_firmware_version(ac_drm_device * dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)616 int ac_drm_query_firmware_version(ac_drm_device *dev, unsigned fw_type, unsigned ip_instance,
617                                   unsigned index, uint32_t *version, uint32_t *feature)
618 {
619    struct drm_amdgpu_info request;
620    struct drm_amdgpu_info_firmware firmware = {};
621    int r;
622 
623    memset(&request, 0, sizeof(request));
624    request.return_pointer = (uintptr_t)&firmware;
625    request.return_size = sizeof(firmware);
626    request.query = AMDGPU_INFO_FW_VERSION;
627    request.query_fw.fw_type = fw_type;
628    request.query_fw.ip_instance = ip_instance;
629    request.query_fw.index = index;
630 
631 #ifdef HAVE_AMDGPU_VIRTIO
632    if (dev->is_virtio)
633       r = amdvgpu_query_info(dev->vdev, &request);
634    else
635 #endif
636       r = drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
637    if (r)
638       return r;
639 
640    *version = firmware.ver;
641    *feature = firmware.feature;
642    return 0;
643 }
644 
ac_drm_query_uq_fw_area_info(ac_drm_device * dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_uq_fw_areas * info)645 int ac_drm_query_uq_fw_area_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
646                                  struct drm_amdgpu_info_uq_fw_areas *info)
647 {
648    struct drm_amdgpu_info request;
649 
650    memset(&request, 0, sizeof(request));
651    request.return_pointer = (uintptr_t)info;
652    request.return_size = sizeof(*info);
653    request.query = AMDGPU_INFO_UQ_FW_AREAS;
654    request.query_hw_ip.type = type;
655    request.query_hw_ip.ip_instance = ip_instance;
656 
657    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
658 }
659 
ac_drm_query_gpu_info(ac_drm_device * dev,struct amdgpu_gpu_info * info)660 int ac_drm_query_gpu_info(ac_drm_device *dev, struct amdgpu_gpu_info *info)
661 {
662    struct drm_amdgpu_info_device dev_info = {0};
663    int r, i;
664 
665    r = ac_drm_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev_info), &dev_info);
666    if (r)
667       return r;
668 
669    memset(info, 0, sizeof(*info));
670    info->asic_id = dev_info.device_id;
671    info->chip_rev = dev_info.chip_rev;
672    info->chip_external_rev = dev_info.external_rev;
673    info->family_id = dev_info.family;
674    info->max_engine_clk = dev_info.max_engine_clock;
675    info->max_memory_clk = dev_info.max_memory_clock;
676    info->gpu_counter_freq = dev_info.gpu_counter_freq;
677    info->enabled_rb_pipes_mask = dev_info.enabled_rb_pipes_mask;
678    info->rb_pipes = dev_info.num_rb_pipes;
679    info->ids_flags = dev_info.ids_flags;
680    info->num_hw_gfx_contexts = dev_info.num_hw_gfx_contexts;
681    info->num_shader_engines = dev_info.num_shader_engines;
682    info->num_shader_arrays_per_engine = dev_info.num_shader_arrays_per_engine;
683    info->vram_type = dev_info.vram_type;
684    info->vram_bit_width = dev_info.vram_bit_width;
685    info->ce_ram_size = dev_info.ce_ram_size;
686    info->vce_harvest_config = dev_info.vce_harvest_config;
687    info->pci_rev_id = dev_info.pci_rev;
688 
689    if (info->family_id < AMDGPU_FAMILY_AI) {
690       for (i = 0; i < (int)info->num_shader_engines; i++) {
691          unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
692                              (AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
693 
694          r = ac_drm_read_mm_registers(dev, 0x263d, 1, instance, 0, &info->backend_disable[i]);
695          if (r)
696             return r;
697          /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
698          info->backend_disable[i] = (info->backend_disable[i] >> 16) & 0xff;
699 
700          r =
701             ac_drm_read_mm_registers(dev, 0xa0d4, 1, instance, 0, &info->pa_sc_raster_cfg[i]);
702          if (r)
703             return r;
704 
705          if (info->family_id >= AMDGPU_FAMILY_CI) {
706             r = ac_drm_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
707                                          &info->pa_sc_raster_cfg1[i]);
708             if (r)
709                return r;
710          }
711       }
712    }
713 
714    r = ac_drm_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0, &info->gb_addr_cfg);
715    if (r)
716       return r;
717 
718    if (info->family_id < AMDGPU_FAMILY_AI) {
719       r = ac_drm_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0, info->gb_tile_mode);
720       if (r)
721          return r;
722 
723       if (info->family_id >= AMDGPU_FAMILY_CI) {
724          r = ac_drm_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
725                                       info->gb_macro_tile_mode);
726          if (r)
727             return r;
728       }
729 
730       r = ac_drm_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0, &info->mc_arb_ramcfg);
731       if (r)
732          return r;
733    }
734 
735    info->cu_active_number = dev_info.cu_active_number;
736    info->cu_ao_mask = dev_info.cu_ao_mask;
737    memcpy(&info->cu_bitmap[0][0], &dev_info.cu_bitmap[0][0], sizeof(info->cu_bitmap));
738    return 0;
739 }
740 
ac_drm_query_heap_info(ac_drm_device * dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)741 int ac_drm_query_heap_info(ac_drm_device *dev, uint32_t heap, uint32_t flags,
742                            struct amdgpu_heap_info *info)
743 {
744    struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
745    int r;
746 
747    r = ac_drm_query_info(dev, AMDGPU_INFO_VRAM_GTT, sizeof(vram_gtt_info), &vram_gtt_info);
748    if (r)
749       return r;
750 
751    /* Get heap information */
752    switch (heap) {
753    case AMDGPU_GEM_DOMAIN_VRAM:
754       /* query visible only vram heap */
755       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
756          info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
757       else /* query total vram heap */
758          info->heap_size = vram_gtt_info.vram_size;
759 
760       info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
761 
762       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
763          r = ac_drm_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE, sizeof(info->heap_usage),
764                                &info->heap_usage);
765       else
766          r = ac_drm_query_info(dev, AMDGPU_INFO_VRAM_USAGE, sizeof(info->heap_usage),
767                                &info->heap_usage);
768       if (r)
769          return r;
770       break;
771    case AMDGPU_GEM_DOMAIN_GTT:
772       info->heap_size = vram_gtt_info.gtt_size;
773       info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
774 
775       r = ac_drm_query_info(dev, AMDGPU_INFO_GTT_USAGE, sizeof(info->heap_usage),
776                             &info->heap_usage);
777       if (r)
778          return r;
779       break;
780    default:
781       return -EINVAL;
782    }
783 
784    return 0;
785 }
786 
ac_drm_query_sensor_info(ac_drm_device * dev,unsigned sensor_type,unsigned size,void * value)787 int ac_drm_query_sensor_info(ac_drm_device *dev, unsigned sensor_type, unsigned size, void *value)
788 {
789    struct drm_amdgpu_info request;
790 
791    memset(&request, 0, sizeof(request));
792    request.return_pointer = (uintptr_t)value;
793    request.return_size = size;
794    request.query = AMDGPU_INFO_SENSOR;
795    request.sensor_info.type = sensor_type;
796 
797 #ifdef HAVE_AMDGPU_VIRTIO
798    if (dev->is_virtio)
799       return amdvgpu_query_info(dev->vdev, &request);
800 #endif
801    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
802 }
803 
ac_drm_query_video_caps_info(ac_drm_device * dev,unsigned cap_type,unsigned size,void * value)804 int ac_drm_query_video_caps_info(ac_drm_device *dev, unsigned cap_type, unsigned size, void *value)
805 {
806    struct drm_amdgpu_info request;
807 
808    memset(&request, 0, sizeof(request));
809    request.return_pointer = (uintptr_t)value;
810    request.return_size = size;
811    request.query = AMDGPU_INFO_VIDEO_CAPS;
812    request.sensor_info.type = cap_type;
813 
814 #ifdef HAVE_AMDGPU_VIRTIO
815    if (dev->is_virtio)
816       return amdvgpu_query_info(dev->vdev, &request);
817 #endif
818    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
819 }
820 
ac_drm_query_gpuvm_fault_info(ac_drm_device * dev,unsigned size,void * value)821 int ac_drm_query_gpuvm_fault_info(ac_drm_device *dev, unsigned size, void *value)
822 {
823    struct drm_amdgpu_info request;
824 
825    memset(&request, 0, sizeof(request));
826    request.return_pointer = (uintptr_t)value;
827    request.return_size = size;
828    request.query = AMDGPU_INFO_GPUVM_FAULT;
829 
830 #ifdef HAVE_AMDGPU_VIRTIO
831    if (dev->is_virtio)
832       return amdvgpu_query_info(dev->vdev, &request);
833 #endif
834    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
835 }
836 
ac_drm_vm_reserve_vmid(ac_drm_device * dev,uint32_t flags)837 int ac_drm_vm_reserve_vmid(ac_drm_device *dev, uint32_t flags)
838 {
839 #ifdef HAVE_AMDGPU_VIRTIO
840    if (dev->is_virtio) {
841       assert(flags == 0);
842       return amdvgpu_vm_reserve_vmid(dev->vdev, 1);
843    }
844 #endif
845    union drm_amdgpu_vm vm;
846 
847    vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
848    vm.in.flags = flags;
849 
850    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
851 }
852 
ac_drm_vm_unreserve_vmid(ac_drm_device * dev,uint32_t flags)853 int ac_drm_vm_unreserve_vmid(ac_drm_device *dev, uint32_t flags)
854 {
855 #ifdef HAVE_AMDGPU_VIRTIO
856    if (dev->is_virtio) {
857       assert(flags == 0);
858       return amdvgpu_vm_reserve_vmid(dev->vdev, 0);
859    }
860 #endif
861    union drm_amdgpu_vm vm;
862 
863    vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
864    vm.in.flags = flags;
865 
866    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
867 }
868 
ac_drm_get_marketing_name(ac_drm_device * dev)869 const char *ac_drm_get_marketing_name(ac_drm_device *dev)
870 {
871 #ifdef HAVE_AMDGPU_VIRTIO
872    if (dev->is_virtio)
873       return amdvgpu_get_marketing_name(dev->vdev);
874 #endif
875    return amdgpu_get_marketing_name(dev->adev);
876 }
877 
ac_drm_query_sw_info(ac_drm_device * dev,enum amdgpu_sw_info info,void * value)878 int ac_drm_query_sw_info(ac_drm_device *dev,
879                          enum amdgpu_sw_info info, void *value)
880 {
881 #ifdef HAVE_AMDGPU_VIRTIO
882    if (dev->is_virtio) {
883       assert(info == amdgpu_sw_info_address32_hi);
884       return amdvgpu_query_sw_info(dev->vdev, info, value);
885    }
886 #endif
887    return amdgpu_query_sw_info(dev->adev, info, value);
888 }
889 
ac_drm_bo_alloc(ac_drm_device * dev,struct amdgpu_bo_alloc_request * alloc_buffer,ac_drm_bo * bo)890 int ac_drm_bo_alloc(ac_drm_device *dev, struct amdgpu_bo_alloc_request *alloc_buffer,
891                     ac_drm_bo *bo)
892 {
893 #ifdef HAVE_AMDGPU_VIRTIO
894    if (dev->is_virtio)
895       return amdvgpu_bo_alloc(dev->vdev, alloc_buffer, &bo->vbo);
896 #endif
897    return amdgpu_bo_alloc(dev->adev, alloc_buffer, &bo->abo);
898 }
ac_drm_bo_export(ac_drm_device * dev,ac_drm_bo bo,enum amdgpu_bo_handle_type type,uint32_t * shared_handle)899 int ac_drm_bo_export(ac_drm_device *dev, ac_drm_bo bo,
900                      enum amdgpu_bo_handle_type type, uint32_t *shared_handle)
901 {
902 #ifdef HAVE_AMDGPU_VIRTIO
903    if (dev->is_virtio)
904       return amdvgpu_bo_export(dev->vdev, bo.vbo, type, shared_handle);
905 #endif
906    return amdgpu_bo_export(bo.abo, type, shared_handle);
907 }
908 
ac_drm_bo_import(ac_drm_device * dev,enum amdgpu_bo_handle_type type,uint32_t shared_handle,struct ac_drm_bo_import_result * output)909 int ac_drm_bo_import(ac_drm_device *dev, enum amdgpu_bo_handle_type type,
910                      uint32_t shared_handle, struct ac_drm_bo_import_result *output)
911 {
912    int r;
913 
914 #ifdef HAVE_AMDGPU_VIRTIO
915    if (dev->is_virtio) {
916       struct amdvgpu_bo_import_result result;
917       r = amdvgpu_bo_import(dev->vdev, type, shared_handle, &result);
918       if (r == 0) {
919          output->bo.vbo = result.buf_handle;
920          output->alloc_size = result.alloc_size;
921       }
922    }
923    else
924 #endif
925    {
926       struct amdgpu_bo_import_result result;
927       r = amdgpu_bo_import(dev->adev, type, shared_handle, &result);
928       if (r == 0) {
929          output->bo.abo = result.buf_handle;
930          output->alloc_size = result.alloc_size;
931       }
932    }
933 
934    return r;
935 }
ac_drm_create_bo_from_user_mem(ac_drm_device * dev,void * cpu,uint64_t size,ac_drm_bo * bo)936 int ac_drm_create_bo_from_user_mem(ac_drm_device *dev, void *cpu,
937                                    uint64_t size, ac_drm_bo *bo)
938 {
939 #ifdef HAVE_AMDGPU_VIRTIO
940    if (dev->is_virtio) {
941       assert(false);
942       return -1;
943    }
944 #endif
945    return amdgpu_create_bo_from_user_mem(dev->adev, cpu, size, &bo->abo);
946 }
947 
ac_drm_bo_free(ac_drm_device * dev,ac_drm_bo bo)948 int ac_drm_bo_free(ac_drm_device *dev, ac_drm_bo bo)
949 {
950 #ifdef HAVE_AMDGPU_VIRTIO
951    if (dev->is_virtio)
952       return amdvgpu_bo_free(dev->vdev, bo.vbo);
953 #endif
954    return amdgpu_bo_free(bo.abo);
955 }
956 
ac_drm_bo_cpu_map(ac_drm_device * dev,ac_drm_bo bo,void ** cpu)957 int ac_drm_bo_cpu_map(ac_drm_device *dev, ac_drm_bo bo,
958                       void **cpu)
959 {
960 #ifdef HAVE_AMDGPU_VIRTIO
961    if (dev->is_virtio)
962       return amdvgpu_bo_cpu_map(dev->vdev, bo.vbo, cpu);
963 #endif
964    return amdgpu_bo_cpu_map(bo.abo, cpu);
965 }
966 
ac_drm_bo_cpu_unmap(ac_drm_device * dev,ac_drm_bo bo)967 int ac_drm_bo_cpu_unmap(ac_drm_device *dev, ac_drm_bo bo)
968 {
969 #ifdef HAVE_AMDGPU_VIRTIO
970    if (dev->is_virtio)
971       return amdvgpu_bo_cpu_unmap(dev->vdev, bo.vbo);
972 #endif
973    return amdgpu_bo_cpu_unmap(bo.abo);
974 }
975 
ac_drm_va_range_alloc(ac_drm_device * dev,enum amdgpu_gpu_va_range va_range_type,uint64_t size,uint64_t va_base_alignment,uint64_t va_base_required,uint64_t * va_base_allocated,amdgpu_va_handle * va_range_handle,uint64_t flags)976 int ac_drm_va_range_alloc(ac_drm_device *dev, enum amdgpu_gpu_va_range va_range_type,
977                           uint64_t size, uint64_t va_base_alignment, uint64_t va_base_required,
978                           uint64_t *va_base_allocated, amdgpu_va_handle *va_range_handle,
979                           uint64_t flags)
980 {
981 #ifdef HAVE_AMDGPU_VIRTIO
982    if (dev->is_virtio)
983       return amdvgpu_va_range_alloc(dev->vdev, va_range_type, size, va_base_alignment,
984                                     va_base_required, va_base_allocated,
985                                     va_range_handle, flags);
986 #endif
987    return amdgpu_va_range_alloc(dev->adev, va_range_type, size, va_base_alignment,
988                                 va_base_required, va_base_allocated,
989                                 va_range_handle, flags);
990 }
991 
ac_drm_va_range_free(amdgpu_va_handle va_range_handle)992 int ac_drm_va_range_free(amdgpu_va_handle va_range_handle)
993 {
994    return amdgpu_va_range_free(va_range_handle);
995 }
996 
ac_drm_create_userqueue(ac_drm_device * dev,uint32_t ip_type,uint32_t doorbell_handle,uint32_t doorbell_offset,uint64_t queue_va,uint64_t queue_size,uint64_t wptr_va,uint64_t rptr_va,void * mqd_in,uint32_t * queue_id)997 int ac_drm_create_userqueue(ac_drm_device *dev, uint32_t ip_type, uint32_t doorbell_handle,
998                             uint32_t doorbell_offset, uint64_t queue_va, uint64_t queue_size,
999                             uint64_t wptr_va, uint64_t rptr_va, void *mqd_in, uint32_t *queue_id)
1000 {
1001    int ret;
1002    union drm_amdgpu_userq userq;
1003    uint64_t mqd_size;
1004 
1005 #ifdef HAVE_AMDGPU_VIRTIO
1006    /* Not supported yet. */
1007    if (dev->is_virtio)
1008       return -1;
1009 #endif
1010 
1011    switch (ip_type) {
1012    case AMDGPU_HW_IP_GFX:
1013       mqd_size = sizeof(struct drm_amdgpu_userq_mqd_gfx11);
1014       break;
1015    case AMDGPU_HW_IP_DMA:
1016       mqd_size = sizeof(struct drm_amdgpu_userq_mqd_sdma_gfx11);
1017       break;
1018    case AMDGPU_HW_IP_COMPUTE:
1019       mqd_size = sizeof(struct drm_amdgpu_userq_mqd_compute_gfx11);
1020       break;
1021       default:
1022       return -EINVAL;
1023    }
1024 
1025    memset(&userq, 0, sizeof(userq));
1026 
1027    userq.in.op = AMDGPU_USERQ_OP_CREATE;
1028    userq.in.ip_type = ip_type;
1029 
1030    userq.in.doorbell_handle = doorbell_handle;
1031    userq.in.doorbell_offset = doorbell_offset;
1032 
1033    userq.in.queue_va = queue_va;
1034    userq.in.queue_size = queue_size;
1035    userq.in.wptr_va = wptr_va;
1036    userq.in.rptr_va = rptr_va;
1037 
1038    userq.in.mqd = (uintptr_t)mqd_in;
1039    userq.in.mqd_size = mqd_size;
1040 
1041    ret = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ,
1042                               &userq, sizeof(userq));
1043    *queue_id = userq.out.queue_id;
1044 
1045    return ret;
1046 }
1047 
ac_drm_free_userqueue(ac_drm_device * dev,uint32_t queue_id)1048 int ac_drm_free_userqueue(ac_drm_device *dev, uint32_t queue_id)
1049 {
1050    union drm_amdgpu_userq userq;
1051 
1052    memset(&userq, 0, sizeof(userq));
1053    userq.in.op = AMDGPU_USERQ_OP_FREE;
1054    userq.in.queue_id = queue_id;
1055 
1056    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ, &userq, sizeof(userq));
1057 }
1058 
ac_drm_userq_signal(ac_drm_device * dev,struct drm_amdgpu_userq_signal * signal_data)1059 int ac_drm_userq_signal(ac_drm_device *dev, struct drm_amdgpu_userq_signal *signal_data)
1060 {
1061    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
1062                                signal_data, sizeof(struct drm_amdgpu_userq_signal));
1063 }
1064 
ac_drm_userq_wait(ac_drm_device * dev,struct drm_amdgpu_userq_wait * wait_data)1065 int ac_drm_userq_wait(ac_drm_device *dev, struct drm_amdgpu_userq_wait *wait_data)
1066 {
1067    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ_WAIT, wait_data,
1068                                sizeof(struct drm_amdgpu_userq_wait));
1069 }
1070