• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Advanced Micro Devices, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "util/os_drm.h"
7 #include "ac_linux_drm.h"
8 #include "util/u_math.h"
9 
10 #include <stdlib.h>
11 #include <time.h>
12 #include <unistd.h>
13 
14 #ifdef HAVE_AMDGPU_VIRTIO
15 #include "virtio/amdgpu_virtio.h"
16 #endif
17 
18 struct ac_drm_device {
19    union {
20       amdgpu_device_handle adev;
21 #ifdef HAVE_AMDGPU_VIRTIO
22       amdvgpu_device_handle vdev;
23 #endif
24    };
25    int fd;
26    bool is_virtio;
27 };
28 
ac_drm_device_initialize(int fd,bool is_virtio,uint32_t * major_version,uint32_t * minor_version,ac_drm_device ** dev)29 int ac_drm_device_initialize(int fd, bool is_virtio,
30                              uint32_t *major_version, uint32_t *minor_version,
31                              ac_drm_device **dev)
32 {
33    int r;
34 
35    *dev = malloc(sizeof(ac_drm_device));
36    if (!(*dev))
37       return -1;
38 
39 #ifdef HAVE_AMDGPU_VIRTIO
40    if (is_virtio) {
41       amdvgpu_device_handle vdev;
42       r = amdvgpu_device_initialize(fd, major_version, minor_version,
43                                     &vdev);
44       if (r == 0) {
45          (*dev)->vdev = vdev;
46          (*dev)->fd = amdvgpu_device_get_fd(vdev);
47       }
48    } else
49 #endif
50    {
51       amdgpu_device_handle adev;
52       r = amdgpu_device_initialize(fd, major_version, minor_version,
53                                    &adev);
54       if (r == 0) {
55          (*dev)->adev = adev;
56          (*dev)->fd = amdgpu_device_get_fd(adev);
57       }
58    }
59 
60    if (r == 0)
61       (*dev)->is_virtio = is_virtio;
62    else
63       free(*dev);
64 
65    return r;
66 }
67 
ac_drm_device_deinitialize(ac_drm_device * dev)68 void ac_drm_device_deinitialize(ac_drm_device *dev)
69 {
70 #ifdef HAVE_AMDGPU_VIRTIO
71    if (dev->is_virtio)
72       amdvgpu_device_deinitialize(dev->vdev);
73    else
74 #endif
75       amdgpu_device_deinitialize(dev->adev);
76    free(dev);
77 }
78 
ac_drm_device_get_fd(ac_drm_device * device_handle)79 int ac_drm_device_get_fd(ac_drm_device *device_handle)
80 {
81    return device_handle->fd;
82 }
83 
ac_drm_bo_set_metadata(ac_drm_device * dev,uint32_t bo_handle,struct amdgpu_bo_metadata * info)84 int ac_drm_bo_set_metadata(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_metadata *info)
85 {
86 #ifdef HAVE_AMDGPU_VIRTIO
87    if (dev->is_virtio)
88       return amdvgpu_bo_set_metadata(dev->vdev, bo_handle, info);
89 #endif
90    struct drm_amdgpu_gem_metadata args = {};
91 
92    args.handle = bo_handle;
93    args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
94    args.data.flags = info->flags;
95    args.data.tiling_info = info->tiling_info;
96 
97    if (info->size_metadata > sizeof(args.data.data))
98       return -EINVAL;
99 
100    if (info->size_metadata) {
101       args.data.data_size_bytes = info->size_metadata;
102       memcpy(args.data.data, info->umd_metadata, info->size_metadata);
103    }
104 
105    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
106 
107 }
108 
ac_drm_bo_query_info(ac_drm_device * dev,uint32_t bo_handle,struct amdgpu_bo_info * info)109 int ac_drm_bo_query_info(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_info *info)
110 {
111 #ifdef HAVE_AMDGPU_VIRTIO
112    if (dev->is_virtio)
113       return amdvgpu_bo_query_info(dev->vdev, bo_handle, info);
114 #endif
115    struct drm_amdgpu_gem_metadata metadata = {};
116    struct drm_amdgpu_gem_create_in bo_info = {};
117    struct drm_amdgpu_gem_op gem_op = {};
118    int r;
119 
120    /* Validate the BO passed in */
121    if (!bo_handle)
122       return -EINVAL;
123 
124    /* Query metadata. */
125    metadata.handle = bo_handle;
126    metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
127 
128    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_METADATA, &metadata, sizeof(metadata));
129    if (r)
130       return r;
131 
132    if (metadata.data.data_size_bytes > sizeof(info->metadata.umd_metadata))
133       return -EINVAL;
134 
135    /* Query buffer info. */
136    gem_op.handle = bo_handle;
137    gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
138    gem_op.value = (uintptr_t)&bo_info;
139 
140    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
141    if (r)
142       return r;
143 
144    memset(info, 0, sizeof(*info));
145    info->alloc_size = bo_info.bo_size;
146    info->phys_alignment = bo_info.alignment;
147    info->preferred_heap = bo_info.domains;
148    info->alloc_flags = bo_info.domain_flags;
149    info->metadata.flags = metadata.data.flags;
150    info->metadata.tiling_info = metadata.data.tiling_info;
151 
152    info->metadata.size_metadata = metadata.data.data_size_bytes;
153    if (metadata.data.data_size_bytes > 0)
154       memcpy(info->metadata.umd_metadata, metadata.data.data, metadata.data.data_size_bytes);
155 
156    return 0;
157 }
158 
amdgpu_cs_calculate_timeout(uint64_t timeout)159 static uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
160 {
161    int r;
162 
163    if (timeout != AMDGPU_TIMEOUT_INFINITE) {
164       struct timespec current;
165       uint64_t current_ns;
166       r = clock_gettime(CLOCK_MONOTONIC, &current);
167       if (r) {
168          fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
169          return AMDGPU_TIMEOUT_INFINITE;
170       }
171 
172       current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
173       current_ns += current.tv_nsec;
174       timeout += current_ns;
175       if (timeout < current_ns)
176          timeout = AMDGPU_TIMEOUT_INFINITE;
177    }
178    return timeout;
179 }
180 
ac_drm_bo_wait_for_idle(ac_drm_device * dev,ac_drm_bo bo,uint64_t timeout_ns,bool * busy)181 int ac_drm_bo_wait_for_idle(ac_drm_device *dev, ac_drm_bo bo, uint64_t timeout_ns, bool *busy)
182 {
183    int r;
184    union drm_amdgpu_gem_wait_idle args;
185 
186    memset(&args, 0, sizeof(args));
187    args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
188 
189 #ifdef HAVE_AMDGPU_VIRTIO
190    if (dev->is_virtio) {
191       r = amdvgpu_bo_wait_for_idle(dev->vdev, bo.vbo, args.in.timeout);
192    } else
193 #endif
194    {
195       ac_drm_bo_export(dev, bo, amdgpu_bo_handle_type_kms,
196                        &args.in.handle);
197       r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &args, sizeof(args));
198    }
199 
200    if (r == 0) {
201       *busy = args.out.status;
202       return 0;
203    } else {
204       fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
205       return r;
206    }
207 }
208 
ac_drm_bo_va_op(ac_drm_device * dev,uint32_t bo_handle,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)209 int ac_drm_bo_va_op(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
210                     uint64_t addr, uint64_t flags, uint32_t ops)
211 {
212    size = ALIGN(size, getpagesize());
213 
214    return ac_drm_bo_va_op_raw(
215       dev, bo_handle, offset, size, addr,
216       AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE, ops);
217 }
218 
ac_drm_bo_va_op_raw(ac_drm_device * dev,uint32_t bo_handle,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops)219 int ac_drm_bo_va_op_raw(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
220                         uint64_t addr, uint64_t flags, uint32_t ops)
221 {
222    struct drm_amdgpu_gem_va va;
223    int r;
224 
225    if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP && ops != AMDGPU_VA_OP_REPLACE &&
226        ops != AMDGPU_VA_OP_CLEAR)
227       return -EINVAL;
228 
229 #ifdef HAVE_AMDGPU_VIRTIO
230    if (dev->is_virtio)
231       return amdvgpu_bo_va_op_raw(dev->vdev, bo_handle, offset, size, addr, flags, ops);
232 #endif
233 
234    memset(&va, 0, sizeof(va));
235    va.handle = bo_handle;
236    va.operation = ops;
237    va.flags = flags;
238    va.va_address = addr;
239    va.offset_in_bo = offset;
240    va.map_size = size;
241 
242    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
243 
244    return r;
245 }
246 
ac_drm_bo_va_op_raw2(ac_drm_device * dev,uint32_t bo_handle,uint64_t offset,uint64_t size,uint64_t addr,uint64_t flags,uint32_t ops,uint32_t vm_timeline_syncobj_out,uint64_t vm_timeline_point,uint64_t input_fence_syncobj_handles,uint32_t num_syncobj_handles)247 int ac_drm_bo_va_op_raw2(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
248                          uint64_t addr, uint64_t flags, uint32_t ops,
249                          uint32_t vm_timeline_syncobj_out, uint64_t vm_timeline_point,
250                          uint64_t input_fence_syncobj_handles, uint32_t num_syncobj_handles)
251 {
252    struct drm_amdgpu_gem_va va;
253    int r;
254 
255    if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
256        ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
257       return -EINVAL;
258 
259    memset(&va, 0, sizeof(va));
260    va.handle = bo_handle;
261    va.operation = ops;
262    va.flags = flags;
263    va.va_address = addr;
264    va.offset_in_bo = offset;
265    va.map_size = size;
266    va.vm_timeline_syncobj_out = vm_timeline_syncobj_out;
267    va.vm_timeline_point = vm_timeline_point;
268    va.input_fence_syncobj_handles = input_fence_syncobj_handles;
269    va.num_syncobj_handles = num_syncobj_handles;
270 
271    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
272 
273    return r;
274 }
275 
ac_drm_cs_ctx_create2(ac_drm_device * dev,uint32_t priority,uint32_t * ctx_id)276 int ac_drm_cs_ctx_create2(ac_drm_device *dev, uint32_t priority, uint32_t *ctx_id)
277 {
278    int r;
279    union drm_amdgpu_ctx args;
280    char *override_priority;
281 
282    override_priority = getenv("AMD_PRIORITY");
283    if (override_priority) {
284       /* The priority is a signed integer. The variable type is
285        * wrong. If parsing fails, priority is unchanged.
286        */
287       if (sscanf(override_priority, "%i", &priority) == 1) {
288          printf("amdgpu: context priority changed to %i\n", priority);
289       }
290    }
291 
292 #ifdef HAVE_AMDGPU_VIRTIO
293    if (dev->is_virtio)
294       return amdvgpu_cs_ctx_create2(dev->vdev, priority, ctx_id);
295 #endif
296    /* Create the context */
297    memset(&args, 0, sizeof(args));
298    args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
299    args.in.priority = priority;
300 
301    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
302 
303    if (r)
304       return r;
305 
306    *ctx_id = args.out.alloc.ctx_id;
307 
308    return 0;
309 }
310 
ac_drm_cs_ctx_free(ac_drm_device * dev,uint32_t ctx_id)311 int ac_drm_cs_ctx_free(ac_drm_device *dev, uint32_t ctx_id)
312 {
313 #ifdef HAVE_AMDGPU_VIRTIO
314    if (dev->is_virtio)
315       return amdvgpu_cs_ctx_free(dev->vdev, ctx_id);
316 #endif
317    union drm_amdgpu_ctx args;
318 
319    /* now deal with kernel side */
320    memset(&args, 0, sizeof(args));
321    args.in.op = AMDGPU_CTX_OP_FREE_CTX;
322    args.in.ctx_id = ctx_id;
323    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
324 }
325 
ac_drm_cs_ctx_stable_pstate(ac_drm_device * dev,uint32_t ctx_id,uint32_t op,uint32_t flags,uint32_t * out_flags)326 int ac_drm_cs_ctx_stable_pstate(ac_drm_device *dev, uint32_t ctx_id, uint32_t op, uint32_t flags,
327                                 uint32_t *out_flags)
328 {
329 #ifdef HAVE_AMDGPU_VIRTIO
330    if (dev->is_virtio)
331       return amdvgpu_cs_ctx_stable_pstate(dev->vdev, ctx_id, op, flags, out_flags);
332 #endif
333    union drm_amdgpu_ctx args;
334    int r;
335 
336    if (!ctx_id)
337       return -EINVAL;
338 
339    memset(&args, 0, sizeof(args));
340    args.in.op = op;
341    args.in.ctx_id = ctx_id;
342    args.in.flags = flags;
343    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
344    if (!r && out_flags)
345       *out_flags = args.out.pstate.flags;
346    return r;
347 }
348 
ac_drm_cs_query_reset_state2(ac_drm_device * dev,uint32_t ctx_id,uint64_t * flags)349 int ac_drm_cs_query_reset_state2(ac_drm_device *dev, uint32_t ctx_id, uint64_t *flags)
350 {
351 #ifdef HAVE_AMDGPU_VIRTIO
352    if (dev->is_virtio)
353       return amdvgpu_cs_query_reset_state2(dev->vdev, ctx_id, flags);
354 #endif
355 
356    union drm_amdgpu_ctx args;
357    int r;
358 
359    if (!ctx_id)
360       return -EINVAL;
361 
362    memset(&args, 0, sizeof(args));
363    args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
364    args.in.ctx_id = ctx_id;
365    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
366    if (!r)
367       *flags = args.out.state.flags;
368    return r;
369 }
370 
amdgpu_ioctl_wait_cs(int device_fd,uint32_t ctx_handle,unsigned ip,unsigned ip_instance,uint32_t ring,uint64_t handle,uint64_t timeout_ns,uint64_t flags,bool * busy)371 static int amdgpu_ioctl_wait_cs(int device_fd, uint32_t ctx_handle, unsigned ip,
372                                 unsigned ip_instance, uint32_t ring, uint64_t handle,
373                                 uint64_t timeout_ns, uint64_t flags, bool *busy)
374 {
375    union drm_amdgpu_wait_cs args;
376    int r;
377 
378    memset(&args, 0, sizeof(args));
379    args.in.handle = handle;
380    args.in.ip_type = ip;
381    args.in.ip_instance = ip_instance;
382    args.in.ring = ring;
383    args.in.ctx_id = ctx_handle;
384 
385    if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
386       args.in.timeout = timeout_ns;
387    else
388       args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
389 
390    r = drm_ioctl(device_fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
391    if (r)
392       return -errno;
393 
394    *busy = args.out.status;
395    return 0;
396 }
397 
ac_drm_cs_query_fence_status(ac_drm_device * dev,uint32_t ctx_id,uint32_t ip_type,uint32_t ip_instance,uint32_t ring,uint64_t fence_seq_no,uint64_t timeout_ns,uint64_t flags,uint32_t * expired)398 int ac_drm_cs_query_fence_status(ac_drm_device *dev, uint32_t ctx_id, uint32_t ip_type,
399                                  uint32_t ip_instance, uint32_t ring, uint64_t fence_seq_no,
400                                  uint64_t timeout_ns, uint64_t flags, uint32_t *expired)
401 {
402    bool busy = true;
403    int r;
404 
405    if (!fence_seq_no) {
406       *expired = true;
407       return 0;
408    }
409 
410    *expired = false;
411 
412 #ifdef HAVE_AMDGPU_VIRTIO
413    if (dev->is_virtio)
414       r = amdvgpu_cs_query_fence_status(dev->vdev, ctx_id, ip_type, ip_instance, ring, fence_seq_no,
415                                         timeout_ns, flags, expired);
416    else
417 #endif
418       r = amdgpu_ioctl_wait_cs(dev->fd, ctx_id, ip_type, ip_instance, ring, fence_seq_no,
419                                timeout_ns, flags, &busy);
420 
421    if (!r && !busy)
422       *expired = true;
423 
424    return r;
425 }
426 
ac_drm_cs_create_syncobj2(int device_fd,uint32_t flags,uint32_t * handle)427 int ac_drm_cs_create_syncobj2(int device_fd, uint32_t flags, uint32_t *handle)
428 {
429    return drmSyncobjCreate(device_fd, flags, handle);
430 }
431 
ac_drm_cs_create_syncobj(int device_fd,uint32_t * handle)432 int ac_drm_cs_create_syncobj(int device_fd, uint32_t *handle)
433 {
434    return drmSyncobjCreate(device_fd, 0, handle);
435 }
436 
ac_drm_cs_destroy_syncobj(int device_fd,uint32_t handle)437 int ac_drm_cs_destroy_syncobj(int device_fd, uint32_t handle)
438 {
439    return drmSyncobjDestroy(device_fd, handle);
440 }
441 
ac_drm_cs_syncobj_wait(int device_fd,uint32_t * handles,unsigned num_handles,int64_t timeout_nsec,unsigned flags,uint32_t * first_signaled)442 int ac_drm_cs_syncobj_wait(int device_fd, uint32_t *handles, unsigned num_handles,
443                            int64_t timeout_nsec, unsigned flags, uint32_t *first_signaled)
444 {
445    return drmSyncobjWait(device_fd, handles, num_handles, timeout_nsec, flags, first_signaled);
446 }
447 
ac_drm_cs_syncobj_query2(int device_fd,uint32_t * handles,uint64_t * points,unsigned num_handles,uint32_t flags)448 int ac_drm_cs_syncobj_query2(int device_fd, uint32_t *handles, uint64_t *points,
449                              unsigned num_handles, uint32_t flags)
450 {
451    return drmSyncobjQuery2(device_fd, handles, points, num_handles, flags);
452 }
453 
ac_drm_cs_import_syncobj(int device_fd,int shared_fd,uint32_t * handle)454 int ac_drm_cs_import_syncobj(int device_fd, int shared_fd, uint32_t *handle)
455 {
456    return drmSyncobjFDToHandle(device_fd, shared_fd, handle);
457 }
458 
ac_drm_cs_syncobj_export_sync_file(int device_fd,uint32_t syncobj,int * sync_file_fd)459 int ac_drm_cs_syncobj_export_sync_file(int device_fd, uint32_t syncobj, int *sync_file_fd)
460 {
461    return drmSyncobjExportSyncFile(device_fd, syncobj, sync_file_fd);
462 }
463 
ac_drm_cs_syncobj_import_sync_file(int device_fd,uint32_t syncobj,int sync_file_fd)464 int ac_drm_cs_syncobj_import_sync_file(int device_fd, uint32_t syncobj, int sync_file_fd)
465 {
466    return drmSyncobjImportSyncFile(device_fd, syncobj, sync_file_fd);
467 }
468 
ac_drm_cs_syncobj_export_sync_file2(int device_fd,uint32_t syncobj,uint64_t point,uint32_t flags,int * sync_file_fd)469 int ac_drm_cs_syncobj_export_sync_file2(int device_fd, uint32_t syncobj, uint64_t point,
470                                         uint32_t flags, int *sync_file_fd)
471 {
472    uint32_t binary_handle;
473    int ret;
474 
475    if (!point)
476       return drmSyncobjExportSyncFile(device_fd, syncobj, sync_file_fd);
477 
478    ret = drmSyncobjCreate(device_fd, 0, &binary_handle);
479    if (ret)
480       return ret;
481 
482    ret = drmSyncobjTransfer(device_fd, binary_handle, 0, syncobj, point, flags);
483    if (ret)
484       goto out;
485    ret = drmSyncobjExportSyncFile(device_fd, binary_handle, sync_file_fd);
486 out:
487    drmSyncobjDestroy(device_fd, binary_handle);
488    return ret;
489 }
490 
ac_drm_cs_syncobj_transfer(int device_fd,uint32_t dst_handle,uint64_t dst_point,uint32_t src_handle,uint64_t src_point,uint32_t flags)491 int ac_drm_cs_syncobj_transfer(int device_fd, uint32_t dst_handle, uint64_t dst_point,
492                                uint32_t src_handle, uint64_t src_point, uint32_t flags)
493 {
494    return drmSyncobjTransfer(device_fd, dst_handle, dst_point, src_handle, src_point, flags);
495 }
496 
ac_drm_cs_syncobj_timeline_wait(int device_fd,uint32_t * handles,uint64_t * points,unsigned num_handles,int64_t timeout_nsec,unsigned flags,uint32_t * first_signaled)497 int ac_drm_cs_syncobj_timeline_wait(int device_fd, uint32_t *handles, uint64_t *points,
498                                     unsigned num_handles, int64_t timeout_nsec, unsigned flags,
499                                     uint32_t *first_signaled)
500 {
501    return drmSyncobjTimelineWait(device_fd, handles, points, num_handles, timeout_nsec, flags,
502                                  first_signaled);
503 }
504 
ac_drm_cs_submit_raw2(ac_drm_device * dev,uint32_t ctx_id,uint32_t bo_list_handle,int num_chunks,struct drm_amdgpu_cs_chunk * chunks,uint64_t * seq_no)505 int ac_drm_cs_submit_raw2(ac_drm_device *dev, uint32_t ctx_id, uint32_t bo_list_handle,
506                           int num_chunks, struct drm_amdgpu_cs_chunk *chunks, uint64_t *seq_no)
507 {
508 #ifdef HAVE_AMDGPU_VIRTIO
509    if (dev->is_virtio)
510       return amdvgpu_cs_submit_raw2(dev->vdev, ctx_id, bo_list_handle, num_chunks, chunks, seq_no);
511 #endif
512 
513    union drm_amdgpu_cs cs;
514    uint64_t *chunk_array;
515    int i, r;
516 
517    memset(&cs, 0, sizeof(cs));
518    chunk_array = alloca(sizeof(uint64_t) * num_chunks);
519    for (i = 0; i < num_chunks; i++)
520       chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
521    cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
522    cs.in.ctx_id = ctx_id;
523    cs.in.bo_list_handle = bo_list_handle;
524    cs.in.num_chunks = num_chunks;
525    r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
526    if (!r && seq_no)
527       *seq_no = cs.out.handle;
528    return r;
529 }
530 
ac_drm_cs_chunk_fence_info_to_data(uint32_t bo_handle,uint64_t offset,struct drm_amdgpu_cs_chunk_data * data)531 void ac_drm_cs_chunk_fence_info_to_data(uint32_t bo_handle, uint64_t offset,
532                                         struct drm_amdgpu_cs_chunk_data *data)
533 {
534    data->fence_data.handle = bo_handle;
535    data->fence_data.offset = offset * sizeof(uint64_t);
536 }
537 
ac_drm_query_info(ac_drm_device * dev,unsigned info_id,unsigned size,void * value)538 int ac_drm_query_info(ac_drm_device *dev, unsigned info_id, unsigned size, void *value)
539 {
540    struct drm_amdgpu_info request;
541 
542    memset(&request, 0, sizeof(request));
543    request.return_pointer = (uintptr_t)value;
544    request.return_size = size;
545    request.query = info_id;
546 
547 #ifdef HAVE_AMDGPU_VIRTIO
548    if (dev->is_virtio)
549       return amdvgpu_query_info(dev->vdev, &request);
550 #endif
551    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
552 }
553 
ac_drm_read_mm_registers(ac_drm_device * dev,unsigned dword_offset,unsigned count,uint32_t instance,uint32_t flags,uint32_t * values)554 int ac_drm_read_mm_registers(ac_drm_device *dev, unsigned dword_offset, unsigned count,
555                              uint32_t instance, uint32_t flags, uint32_t *values)
556 {
557    struct drm_amdgpu_info request;
558 
559    memset(&request, 0, sizeof(request));
560    request.return_pointer = (uintptr_t)values;
561    request.return_size = count * sizeof(uint32_t);
562    request.query = AMDGPU_INFO_READ_MMR_REG;
563    request.read_mmr_reg.dword_offset = dword_offset;
564    request.read_mmr_reg.count = count;
565    request.read_mmr_reg.instance = instance;
566    request.read_mmr_reg.flags = flags;
567 
568 #ifdef HAVE_AMDGPU_VIRTIO
569    if (dev->is_virtio)
570       return amdvgpu_query_info(dev->vdev, &request);
571 #endif
572    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
573 }
574 
ac_drm_query_hw_ip_count(ac_drm_device * dev,unsigned type,uint32_t * count)575 int ac_drm_query_hw_ip_count(ac_drm_device *dev, unsigned type, uint32_t *count)
576 {
577    struct drm_amdgpu_info request;
578 
579    memset(&request, 0, sizeof(request));
580    request.return_pointer = (uintptr_t)count;
581    request.return_size = sizeof(*count);
582    request.query = AMDGPU_INFO_HW_IP_COUNT;
583    request.query_hw_ip.type = type;
584 
585 #ifdef HAVE_AMDGPU_VIRTIO
586    if (dev->is_virtio)
587       return amdvgpu_query_info(dev->vdev, &request);
588 #endif
589    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
590 }
591 
ac_drm_query_hw_ip_info(ac_drm_device * dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)592 int ac_drm_query_hw_ip_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
593                             struct drm_amdgpu_info_hw_ip *info)
594 {
595    struct drm_amdgpu_info request;
596 
597    memset(&request, 0, sizeof(request));
598    request.return_pointer = (uintptr_t)info;
599    request.return_size = sizeof(*info);
600    request.query = AMDGPU_INFO_HW_IP_INFO;
601    request.query_hw_ip.type = type;
602    request.query_hw_ip.ip_instance = ip_instance;
603 
604 #ifdef HAVE_AMDGPU_VIRTIO
605    if (dev->is_virtio)
606       return amdvgpu_query_info(dev->vdev, &request);
607 #endif
608    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
609 }
610 
ac_drm_query_firmware_version(ac_drm_device * dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)611 int ac_drm_query_firmware_version(ac_drm_device *dev, unsigned fw_type, unsigned ip_instance,
612                                   unsigned index, uint32_t *version, uint32_t *feature)
613 {
614    struct drm_amdgpu_info request;
615    struct drm_amdgpu_info_firmware firmware = {};
616    int r;
617 
618    memset(&request, 0, sizeof(request));
619    request.return_pointer = (uintptr_t)&firmware;
620    request.return_size = sizeof(firmware);
621    request.query = AMDGPU_INFO_FW_VERSION;
622    request.query_fw.fw_type = fw_type;
623    request.query_fw.ip_instance = ip_instance;
624    request.query_fw.index = index;
625 
626 #ifdef HAVE_AMDGPU_VIRTIO
627    if (dev->is_virtio)
628       r = amdvgpu_query_info(dev->vdev, &request);
629    else
630 #endif
631       r = drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
632    if (r)
633       return r;
634 
635    *version = firmware.ver;
636    *feature = firmware.feature;
637    return 0;
638 }
639 
ac_drm_query_uq_fw_area_info(ac_drm_device * dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_uq_fw_areas * info)640 int ac_drm_query_uq_fw_area_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
641                                  struct drm_amdgpu_info_uq_fw_areas *info)
642 {
643    struct drm_amdgpu_info request;
644 
645    memset(&request, 0, sizeof(request));
646    request.return_pointer = (uintptr_t)info;
647    request.return_size = sizeof(*info);
648    request.query = AMDGPU_INFO_UQ_FW_AREAS;
649    request.query_hw_ip.type = type;
650    request.query_hw_ip.ip_instance = ip_instance;
651 
652    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
653 }
654 
ac_drm_query_gpu_info(ac_drm_device * dev,struct amdgpu_gpu_info * info)655 int ac_drm_query_gpu_info(ac_drm_device *dev, struct amdgpu_gpu_info *info)
656 {
657    struct drm_amdgpu_info_device dev_info = {0};
658    int r, i;
659 
660    r = ac_drm_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev_info), &dev_info);
661    if (r)
662       return r;
663 
664    memset(info, 0, sizeof(*info));
665    info->asic_id = dev_info.device_id;
666    info->chip_rev = dev_info.chip_rev;
667    info->chip_external_rev = dev_info.external_rev;
668    info->family_id = dev_info.family;
669    info->max_engine_clk = dev_info.max_engine_clock;
670    info->max_memory_clk = dev_info.max_memory_clock;
671    info->gpu_counter_freq = dev_info.gpu_counter_freq;
672    info->enabled_rb_pipes_mask = dev_info.enabled_rb_pipes_mask;
673    info->rb_pipes = dev_info.num_rb_pipes;
674    info->ids_flags = dev_info.ids_flags;
675    info->num_hw_gfx_contexts = dev_info.num_hw_gfx_contexts;
676    info->num_shader_engines = dev_info.num_shader_engines;
677    info->num_shader_arrays_per_engine = dev_info.num_shader_arrays_per_engine;
678    info->vram_type = dev_info.vram_type;
679    info->vram_bit_width = dev_info.vram_bit_width;
680    info->ce_ram_size = dev_info.ce_ram_size;
681    info->vce_harvest_config = dev_info.vce_harvest_config;
682    info->pci_rev_id = dev_info.pci_rev;
683 
684    if (info->family_id < AMDGPU_FAMILY_AI) {
685       for (i = 0; i < (int)info->num_shader_engines; i++) {
686          unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
687                              (AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
688 
689          r = ac_drm_read_mm_registers(dev, 0x263d, 1, instance, 0, &info->backend_disable[i]);
690          if (r)
691             return r;
692          /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
693          info->backend_disable[i] = (info->backend_disable[i] >> 16) & 0xff;
694 
695          r =
696             ac_drm_read_mm_registers(dev, 0xa0d4, 1, instance, 0, &info->pa_sc_raster_cfg[i]);
697          if (r)
698             return r;
699 
700          if (info->family_id >= AMDGPU_FAMILY_CI) {
701             r = ac_drm_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
702                                          &info->pa_sc_raster_cfg1[i]);
703             if (r)
704                return r;
705          }
706       }
707    }
708 
709    r = ac_drm_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0, &info->gb_addr_cfg);
710    if (r)
711       return r;
712 
713    if (info->family_id < AMDGPU_FAMILY_AI) {
714       r = ac_drm_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0, info->gb_tile_mode);
715       if (r)
716          return r;
717 
718       if (info->family_id >= AMDGPU_FAMILY_CI) {
719          r = ac_drm_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
720                                       info->gb_macro_tile_mode);
721          if (r)
722             return r;
723       }
724 
725       r = ac_drm_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0, &info->mc_arb_ramcfg);
726       if (r)
727          return r;
728    }
729 
730    info->cu_active_number = dev_info.cu_active_number;
731    info->cu_ao_mask = dev_info.cu_ao_mask;
732    memcpy(&info->cu_bitmap[0][0], &dev_info.cu_bitmap[0][0], sizeof(info->cu_bitmap));
733    return 0;
734 }
735 
ac_drm_query_heap_info(ac_drm_device * dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)736 int ac_drm_query_heap_info(ac_drm_device *dev, uint32_t heap, uint32_t flags,
737                            struct amdgpu_heap_info *info)
738 {
739    struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
740    int r;
741 
742    r = ac_drm_query_info(dev, AMDGPU_INFO_VRAM_GTT, sizeof(vram_gtt_info), &vram_gtt_info);
743    if (r)
744       return r;
745 
746    /* Get heap information */
747    switch (heap) {
748    case AMDGPU_GEM_DOMAIN_VRAM:
749       /* query visible only vram heap */
750       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
751          info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
752       else /* query total vram heap */
753          info->heap_size = vram_gtt_info.vram_size;
754 
755       info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
756 
757       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
758          r = ac_drm_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE, sizeof(info->heap_usage),
759                                &info->heap_usage);
760       else
761          r = ac_drm_query_info(dev, AMDGPU_INFO_VRAM_USAGE, sizeof(info->heap_usage),
762                                &info->heap_usage);
763       if (r)
764          return r;
765       break;
766    case AMDGPU_GEM_DOMAIN_GTT:
767       info->heap_size = vram_gtt_info.gtt_size;
768       info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
769 
770       r = ac_drm_query_info(dev, AMDGPU_INFO_GTT_USAGE, sizeof(info->heap_usage),
771                             &info->heap_usage);
772       if (r)
773          return r;
774       break;
775    default:
776       return -EINVAL;
777    }
778 
779    return 0;
780 }
781 
ac_drm_query_sensor_info(ac_drm_device * dev,unsigned sensor_type,unsigned size,void * value)782 int ac_drm_query_sensor_info(ac_drm_device *dev, unsigned sensor_type, unsigned size, void *value)
783 {
784    struct drm_amdgpu_info request;
785 
786    memset(&request, 0, sizeof(request));
787    request.return_pointer = (uintptr_t)value;
788    request.return_size = size;
789    request.query = AMDGPU_INFO_SENSOR;
790    request.sensor_info.type = sensor_type;
791 
792 #ifdef HAVE_AMDGPU_VIRTIO
793    if (dev->is_virtio)
794       return amdvgpu_query_info(dev->vdev, &request);
795 #endif
796    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
797 }
798 
ac_drm_query_video_caps_info(ac_drm_device * dev,unsigned cap_type,unsigned size,void * value)799 int ac_drm_query_video_caps_info(ac_drm_device *dev, unsigned cap_type, unsigned size, void *value)
800 {
801    struct drm_amdgpu_info request;
802 
803    memset(&request, 0, sizeof(request));
804    request.return_pointer = (uintptr_t)value;
805    request.return_size = size;
806    request.query = AMDGPU_INFO_VIDEO_CAPS;
807    request.sensor_info.type = cap_type;
808 
809 #ifdef HAVE_AMDGPU_VIRTIO
810    if (dev->is_virtio)
811       return amdvgpu_query_info(dev->vdev, &request);
812 #endif
813    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
814 }
815 
ac_drm_query_gpuvm_fault_info(ac_drm_device * dev,unsigned size,void * value)816 int ac_drm_query_gpuvm_fault_info(ac_drm_device *dev, unsigned size, void *value)
817 {
818    struct drm_amdgpu_info request;
819 
820    memset(&request, 0, sizeof(request));
821    request.return_pointer = (uintptr_t)value;
822    request.return_size = size;
823    request.query = AMDGPU_INFO_GPUVM_FAULT;
824 
825 #ifdef HAVE_AMDGPU_VIRTIO
826    if (dev->is_virtio)
827       return amdvgpu_query_info(dev->vdev, &request);
828 #endif
829    return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
830 }
831 
ac_drm_vm_reserve_vmid(ac_drm_device * dev,uint32_t flags)832 int ac_drm_vm_reserve_vmid(ac_drm_device *dev, uint32_t flags)
833 {
834 #ifdef HAVE_AMDGPU_VIRTIO
835    if (dev->is_virtio) {
836       assert(flags == 0);
837       return amdvgpu_vm_reserve_vmid(dev->vdev, 1);
838    }
839 #endif
840    union drm_amdgpu_vm vm;
841 
842    vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
843    vm.in.flags = flags;
844 
845    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
846 }
847 
ac_drm_vm_unreserve_vmid(ac_drm_device * dev,uint32_t flags)848 int ac_drm_vm_unreserve_vmid(ac_drm_device *dev, uint32_t flags)
849 {
850 #ifdef HAVE_AMDGPU_VIRTIO
851    if (dev->is_virtio) {
852       assert(flags == 0);
853       return amdvgpu_vm_reserve_vmid(dev->vdev, 0);
854    }
855 #endif
856    union drm_amdgpu_vm vm;
857 
858    vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
859    vm.in.flags = flags;
860 
861    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
862 }
863 
ac_drm_get_marketing_name(ac_drm_device * dev)864 const char *ac_drm_get_marketing_name(ac_drm_device *dev)
865 {
866 #ifdef HAVE_AMDGPU_VIRTIO
867    if (dev->is_virtio)
868       return amdvgpu_get_marketing_name(dev->vdev);
869 #endif
870    return amdgpu_get_marketing_name(dev->adev);
871 }
872 
ac_drm_query_sw_info(ac_drm_device * dev,enum amdgpu_sw_info info,void * value)873 int ac_drm_query_sw_info(ac_drm_device *dev,
874                          enum amdgpu_sw_info info, void *value)
875 {
876 #ifdef HAVE_AMDGPU_VIRTIO
877    if (dev->is_virtio) {
878       assert(info == amdgpu_sw_info_address32_hi);
879       return amdvgpu_query_sw_info(dev->vdev, info, value);
880    }
881 #endif
882    return amdgpu_query_sw_info(dev->adev, info, value);
883 }
884 
ac_drm_bo_alloc(ac_drm_device * dev,struct amdgpu_bo_alloc_request * alloc_buffer,ac_drm_bo * bo)885 int ac_drm_bo_alloc(ac_drm_device *dev, struct amdgpu_bo_alloc_request *alloc_buffer,
886                     ac_drm_bo *bo)
887 {
888 #ifdef HAVE_AMDGPU_VIRTIO
889    if (dev->is_virtio)
890       return amdvgpu_bo_alloc(dev->vdev, alloc_buffer, &bo->vbo);
891 #endif
892    return amdgpu_bo_alloc(dev->adev, alloc_buffer, &bo->abo);
893 }
ac_drm_bo_export(ac_drm_device * dev,ac_drm_bo bo,enum amdgpu_bo_handle_type type,uint32_t * shared_handle)894 int ac_drm_bo_export(ac_drm_device *dev, ac_drm_bo bo,
895                      enum amdgpu_bo_handle_type type, uint32_t *shared_handle)
896 {
897 #ifdef HAVE_AMDGPU_VIRTIO
898    if (dev->is_virtio)
899       return amdvgpu_bo_export(dev->vdev, bo.vbo, type, shared_handle);
900 #endif
901    return amdgpu_bo_export(bo.abo, type, shared_handle);
902 }
903 
ac_drm_bo_import(ac_drm_device * dev,enum amdgpu_bo_handle_type type,uint32_t shared_handle,struct ac_drm_bo_import_result * output)904 int ac_drm_bo_import(ac_drm_device *dev, enum amdgpu_bo_handle_type type,
905                      uint32_t shared_handle, struct ac_drm_bo_import_result *output)
906 {
907    int r;
908 
909 #ifdef HAVE_AMDGPU_VIRTIO
910    if (dev->is_virtio) {
911       struct amdvgpu_bo_import_result result;
912       r = amdvgpu_bo_import(dev->vdev, type, shared_handle, &result);
913       if (r == 0) {
914          output->bo.vbo = result.buf_handle;
915          output->alloc_size = result.alloc_size;
916       }
917    }
918    else
919 #endif
920    {
921       struct amdgpu_bo_import_result result;
922       r = amdgpu_bo_import(dev->adev, type, shared_handle, &result);
923       if (r == 0) {
924          output->bo.abo = result.buf_handle;
925          output->alloc_size = result.alloc_size;
926       }
927    }
928 
929    return r;
930 }
ac_drm_create_bo_from_user_mem(ac_drm_device * dev,void * cpu,uint64_t size,ac_drm_bo * bo)931 int ac_drm_create_bo_from_user_mem(ac_drm_device *dev, void *cpu,
932                                    uint64_t size, ac_drm_bo *bo)
933 {
934 #ifdef HAVE_AMDGPU_VIRTIO
935    if (dev->is_virtio) {
936       assert(false);
937       return -1;
938    }
939 #endif
940    return amdgpu_create_bo_from_user_mem(dev->adev, cpu, size, &bo->abo);
941 }
942 
ac_drm_bo_free(ac_drm_device * dev,ac_drm_bo bo)943 int ac_drm_bo_free(ac_drm_device *dev, ac_drm_bo bo)
944 {
945 #ifdef HAVE_AMDGPU_VIRTIO
946    if (dev->is_virtio)
947       return amdvgpu_bo_free(dev->vdev, bo.vbo);
948 #endif
949    return amdgpu_bo_free(bo.abo);
950 }
951 
ac_drm_bo_cpu_map(ac_drm_device * dev,ac_drm_bo bo,void ** cpu)952 int ac_drm_bo_cpu_map(ac_drm_device *dev, ac_drm_bo bo,
953                       void **cpu)
954 {
955 #ifdef HAVE_AMDGPU_VIRTIO
956    if (dev->is_virtio)
957       return amdvgpu_bo_cpu_map(dev->vdev, bo.vbo, cpu);
958 #endif
959    return amdgpu_bo_cpu_map(bo.abo, cpu);
960 }
961 
ac_drm_bo_cpu_unmap(ac_drm_device * dev,ac_drm_bo bo)962 int ac_drm_bo_cpu_unmap(ac_drm_device *dev, ac_drm_bo bo)
963 {
964 #ifdef HAVE_AMDGPU_VIRTIO
965    if (dev->is_virtio)
966       return amdvgpu_bo_cpu_unmap(dev->vdev, bo.vbo);
967 #endif
968    return amdgpu_bo_cpu_unmap(bo.abo);
969 }
970 
ac_drm_va_range_alloc(ac_drm_device * dev,enum amdgpu_gpu_va_range va_range_type,uint64_t size,uint64_t va_base_alignment,uint64_t va_base_required,uint64_t * va_base_allocated,amdgpu_va_handle * va_range_handle,uint64_t flags)971 int ac_drm_va_range_alloc(ac_drm_device *dev, enum amdgpu_gpu_va_range va_range_type,
972                           uint64_t size, uint64_t va_base_alignment, uint64_t va_base_required,
973                           uint64_t *va_base_allocated, amdgpu_va_handle *va_range_handle,
974                           uint64_t flags)
975 {
976 #ifdef HAVE_AMDGPU_VIRTIO
977    if (dev->is_virtio)
978       return amdvgpu_va_range_alloc(dev->vdev, va_range_type, size, va_base_alignment,
979                                     va_base_required, va_base_allocated,
980                                     va_range_handle, flags);
981 #endif
982    return amdgpu_va_range_alloc(dev->adev, va_range_type, size, va_base_alignment,
983                                 va_base_required, va_base_allocated,
984                                 va_range_handle, flags);
985 }
986 
ac_drm_va_range_free(amdgpu_va_handle va_range_handle)987 int ac_drm_va_range_free(amdgpu_va_handle va_range_handle)
988 {
989    return amdgpu_va_range_free(va_range_handle);
990 }
991 
ac_drm_create_userqueue(ac_drm_device * dev,uint32_t ip_type,uint32_t doorbell_handle,uint32_t doorbell_offset,uint64_t queue_va,uint64_t queue_size,uint64_t wptr_va,uint64_t rptr_va,void * mqd_in,uint32_t * queue_id)992 int ac_drm_create_userqueue(ac_drm_device *dev, uint32_t ip_type, uint32_t doorbell_handle,
993                             uint32_t doorbell_offset, uint64_t queue_va, uint64_t queue_size,
994                             uint64_t wptr_va, uint64_t rptr_va, void *mqd_in, uint32_t *queue_id)
995 {
996    int ret;
997    union drm_amdgpu_userq userq;
998    uint64_t mqd_size;
999 
1000 #ifdef HAVE_AMDGPU_VIRTIO
1001    /* Not supported yet. */
1002    if (dev->is_virtio)
1003       return -1;
1004 #endif
1005 
1006    switch (ip_type) {
1007    case AMDGPU_HW_IP_GFX:
1008       mqd_size = sizeof(struct drm_amdgpu_userq_mqd_gfx11);
1009       break;
1010    case AMDGPU_HW_IP_DMA:
1011       mqd_size = sizeof(struct drm_amdgpu_userq_mqd_sdma_gfx11);
1012       break;
1013    case AMDGPU_HW_IP_COMPUTE:
1014       mqd_size = sizeof(struct drm_amdgpu_userq_mqd_compute_gfx11);
1015       break;
1016       default:
1017       return -EINVAL;
1018    }
1019 
1020    memset(&userq, 0, sizeof(userq));
1021 
1022    userq.in.op = AMDGPU_USERQ_OP_CREATE;
1023    userq.in.ip_type = ip_type;
1024 
1025    userq.in.doorbell_handle = doorbell_handle;
1026    userq.in.doorbell_offset = doorbell_offset;
1027 
1028    userq.in.queue_va = queue_va;
1029    userq.in.queue_size = queue_size;
1030    userq.in.wptr_va = wptr_va;
1031    userq.in.rptr_va = rptr_va;
1032 
1033    userq.in.mqd = (uintptr_t)mqd_in;
1034    userq.in.mqd_size = mqd_size;
1035 
1036    ret = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ,
1037                               &userq, sizeof(userq));
1038    *queue_id = userq.out.queue_id;
1039 
1040    return ret;
1041 }
1042 
ac_drm_free_userqueue(ac_drm_device * dev,uint32_t queue_id)1043 int ac_drm_free_userqueue(ac_drm_device *dev, uint32_t queue_id)
1044 {
1045    union drm_amdgpu_userq userq;
1046 
1047    memset(&userq, 0, sizeof(userq));
1048    userq.in.op = AMDGPU_USERQ_OP_FREE;
1049    userq.in.queue_id = queue_id;
1050 
1051    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ, &userq, sizeof(userq));
1052 }
1053 
ac_drm_userq_signal(ac_drm_device * dev,struct drm_amdgpu_userq_signal * signal_data)1054 int ac_drm_userq_signal(ac_drm_device *dev, struct drm_amdgpu_userq_signal *signal_data)
1055 {
1056    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
1057                                signal_data, sizeof(struct drm_amdgpu_userq_signal));
1058 }
1059 
ac_drm_userq_wait(ac_drm_device * dev,struct drm_amdgpu_userq_wait * wait_data)1060 int ac_drm_userq_wait(ac_drm_device *dev, struct drm_amdgpu_userq_wait *wait_data)
1061 {
1062    return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ_WAIT, wait_data,
1063                                sizeof(struct drm_amdgpu_userq_wait));
1064 }
1065