• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <assert.h>
12 #include <xf86drm.h>
13 #include <libsync.h>
14 
15 #include <dlfcn.h>
16 #include <libdrm/amdgpu.h>
17 
18 #include "amdgpu_virtio_private.h"
19 
20 #include "util/log.h"
21 
22 int
amdvgpu_query_info(amdvgpu_device_handle dev,struct drm_amdgpu_info * info)23 amdvgpu_query_info(amdvgpu_device_handle dev, struct drm_amdgpu_info *info)
24 {
25    unsigned req_len = sizeof(struct amdgpu_ccmd_query_info_req);
26    unsigned rsp_len = sizeof(struct amdgpu_ccmd_query_info_rsp) + info->return_size;
27 
28    uint8_t buf[req_len];
29    struct amdgpu_ccmd_query_info_req *req = (void *)buf;
30    struct amdgpu_ccmd_query_info_rsp *rsp;
31    assert(0 == (offsetof(struct amdgpu_ccmd_query_info_rsp, payload) % 8));
32 
33    req->hdr = AMDGPU_CCMD(QUERY_INFO, req_len);
34    memcpy(&req->info, info, sizeof(struct drm_amdgpu_info));
35 
36    rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, rsp_len);
37 
38    int r = vdrm_send_req_wrapper(dev, &req->hdr, &rsp->hdr, true);
39    if (r)
40       return r;
41 
42    memcpy((void*)(uintptr_t)info->return_pointer, rsp->payload, info->return_size);
43 
44    return 0;
45 }
46 
47 static int
amdvgpu_query_info_simple(amdvgpu_device_handle dev,unsigned info_id,unsigned size,void * out)48 amdvgpu_query_info_simple(amdvgpu_device_handle dev, unsigned info_id, unsigned size, void *out)
49 {
50    if (info_id == AMDGPU_INFO_DEV_INFO) {
51       assert(size == sizeof(dev->dev_info));
52       memcpy(out, &dev->dev_info, size);
53       return 0;
54    }
55    struct drm_amdgpu_info info;
56    info.return_pointer = (uintptr_t)out;
57    info.query = info_id;
58    info.return_size = size;
59    return amdvgpu_query_info(dev, &info);
60 }
61 
62 static int
amdvgpu_query_heap_info(amdvgpu_device_handle dev,unsigned heap,unsigned flags,struct amdgpu_heap_info * info)63 amdvgpu_query_heap_info(amdvgpu_device_handle dev, unsigned heap, unsigned flags, struct amdgpu_heap_info *info)
64 {
65    struct amdvgpu_shmem *shmem = to_amdvgpu_shmem(dev->vdev->shmem);
66    /* Get heap information from shared memory */
67    switch (heap) {
68    case AMDGPU_GEM_DOMAIN_VRAM:
69       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
70          memcpy(info, &shmem->vis_vram, sizeof(*info));
71       else
72          memcpy(info, &shmem->vram, sizeof(*info));
73       break;
74    case AMDGPU_GEM_DOMAIN_GTT:
75       memcpy(info, &shmem->gtt, sizeof(*info));
76       break;
77    default:
78       return -EINVAL;
79    }
80 
81    return 0;
82 }
83 
84 static int
amdvgpu_query_hw_ip_count(amdvgpu_device_handle dev,unsigned type,uint32_t * count)85 amdvgpu_query_hw_ip_count(amdvgpu_device_handle dev, unsigned type, uint32_t *count)
86 {
87    struct drm_amdgpu_info request;
88    request.return_pointer = (uintptr_t) count;
89    request.return_size = sizeof(*count);
90    request.query = AMDGPU_INFO_HW_IP_COUNT;
91    request.query_hw_ip.type = type;
92    return amdvgpu_query_info(dev, &request);
93 }
94 
95 static int
amdvgpu_query_video_caps_info(amdvgpu_device_handle dev,unsigned cap_type,unsigned size,void * value)96 amdvgpu_query_video_caps_info(amdvgpu_device_handle dev, unsigned cap_type,
97                               unsigned size, void *value)
98 {
99    struct drm_amdgpu_info request;
100    request.return_pointer = (uintptr_t)value;
101    request.return_size = size;
102    request.query = AMDGPU_INFO_VIDEO_CAPS;
103    request.sensor_info.type = cap_type;
104 
105    return amdvgpu_query_info(dev, &request);
106 }
107 
108 int
amdvgpu_query_sw_info(amdvgpu_device_handle dev,enum amdgpu_sw_info info,void * value)109 amdvgpu_query_sw_info(amdvgpu_device_handle dev, enum amdgpu_sw_info info, void *value)
110 {
111    if (info != amdgpu_sw_info_address32_hi)
112       return -EINVAL;
113    memcpy(value, &dev->vdev->caps.u.amdgpu.address32_hi, 4);
114    return 0;
115 }
116 
117 static int
amdvgpu_query_firmware_version(amdvgpu_device_handle dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)118 amdvgpu_query_firmware_version(amdvgpu_device_handle dev, unsigned fw_type, unsigned ip_instance, unsigned index,
119                                uint32_t *version, uint32_t *feature)
120 {
121    struct drm_amdgpu_info request;
122    struct drm_amdgpu_info_firmware firmware = {};
123    int r;
124 
125    memset(&request, 0, sizeof(request));
126    request.return_pointer = (uintptr_t)&firmware;
127    request.return_size = sizeof(firmware);
128    request.query = AMDGPU_INFO_FW_VERSION;
129    request.query_fw.fw_type = fw_type;
130    request.query_fw.ip_instance = ip_instance;
131    request.query_fw.index = index;
132 
133    r = amdvgpu_query_info(dev, &request);
134 
135    *version = firmware.ver;
136    *feature = firmware.feature;
137    return r;
138 }
139 
140 static int
amdvgpu_query_buffer_size_alignment(amdvgpu_device_handle dev,struct amdgpu_buffer_size_alignments * info)141 amdvgpu_query_buffer_size_alignment(amdvgpu_device_handle dev,
142                                     struct amdgpu_buffer_size_alignments *info)
143 {
144    memcpy(info, &dev->vdev->caps.u.amdgpu.alignments, sizeof(*info));
145    return 0;
146 }
147 
148 static int
amdvgpu_query_gpu_info(amdvgpu_device_handle dev,struct amdgpu_gpu_info * info)149 amdvgpu_query_gpu_info(amdvgpu_device_handle dev, struct amdgpu_gpu_info *info)
150 {
151    memcpy(info, &dev->vdev->caps.u.amdgpu.gpu_info, sizeof(*info));
152    return 0;
153 }
154 
155 int
amdvgpu_bo_set_metadata(amdvgpu_device_handle dev,uint32_t res_id,struct amdgpu_bo_metadata * info)156 amdvgpu_bo_set_metadata(amdvgpu_device_handle dev, uint32_t res_id,
157                         struct amdgpu_bo_metadata *info)
158 {
159    unsigned req_len = sizeof(struct amdgpu_ccmd_set_metadata_req) + info->size_metadata;
160    unsigned rsp_len = sizeof(struct amdgpu_ccmd_rsp);
161 
162    uint8_t buf[req_len];
163    struct amdgpu_ccmd_set_metadata_req *req = (void *)buf;
164    struct amdgpu_ccmd_rsp *rsp;
165 
166    req->hdr = AMDGPU_CCMD(SET_METADATA, req_len);
167    req->res_id = res_id;
168    req->flags = info->flags;
169    req->tiling_info = info->tiling_info;
170    req->size_metadata = info->size_metadata;
171    memcpy(req->umd_metadata, info->umd_metadata, info->size_metadata);
172 
173    rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, rsp_len);
174    return vdrm_send_req_wrapper(dev, &req->hdr, rsp, true);
175 }
176 
amdvgpu_bo_query_info(amdvgpu_device_handle dev,uint32_t res_id,struct amdgpu_bo_info * info)177 int amdvgpu_bo_query_info(amdvgpu_device_handle dev, uint32_t res_id, struct amdgpu_bo_info *info) {
178    unsigned req_len = sizeof(struct amdgpu_ccmd_bo_query_info_req);
179    unsigned rsp_len = sizeof(struct amdgpu_ccmd_bo_query_info_rsp);
180 
181    uint8_t buf[req_len];
182    struct amdgpu_ccmd_bo_query_info_req *req = (void *)buf;
183    struct amdgpu_ccmd_bo_query_info_rsp *rsp;
184 
185    req->hdr = AMDGPU_CCMD(BO_QUERY_INFO, req_len);
186    req->res_id = res_id;
187    req->pad = 0;
188 
189    rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, rsp_len);
190 
191    int r = vdrm_send_req_wrapper(dev, &req->hdr, &rsp->hdr, true);
192    if (r)
193       return r;
194 
195    info->alloc_size = rsp->info.alloc_size;
196    info->phys_alignment = rsp->info.phys_alignment;
197    info->preferred_heap = rsp->info.preferred_heap;
198    info->alloc_flags = rsp->info.alloc_flags;
199 
200    info->metadata.flags = rsp->info.metadata.flags;
201    info->metadata.tiling_info = rsp->info.metadata.tiling_info;
202    info->metadata.size_metadata = rsp->info.metadata.size_metadata;
203    memcpy(info->metadata.umd_metadata, rsp->info.metadata.umd_metadata,
204           MIN2(sizeof(info->metadata.umd_metadata), rsp->info.metadata.size_metadata));
205 
206    return 0;
207 }
208 
amdvgpu_cs_ctx_create2(amdvgpu_device_handle dev,int32_t priority,uint32_t * ctx_virtio)209 int amdvgpu_cs_ctx_create2(amdvgpu_device_handle dev, int32_t priority,
210                            uint32_t *ctx_virtio) {
211    simple_mtx_lock(&dev->contexts_mutex);
212    if (!dev->allow_multiple_amdgpu_ctx && _mesa_hash_table_num_entries(&dev->contexts)) {
213       assert(_mesa_hash_table_num_entries(&dev->contexts) == 1);
214       struct hash_entry *he = _mesa_hash_table_random_entry(&dev->contexts, NULL);
215       struct amdvgpu_context *ctx = he->data;
216       p_atomic_inc(&ctx->refcount);
217       *ctx_virtio = (uint32_t)(uintptr_t)he->key;
218       simple_mtx_unlock(&dev->contexts_mutex);
219       return 0;
220    }
221 
222    struct amdgpu_ccmd_create_ctx_req req = {
223       .priority = priority,
224       .flags = 0,
225    };
226    struct amdgpu_ccmd_create_ctx_rsp *rsp;
227 
228    req.hdr = AMDGPU_CCMD(CREATE_CTX, sizeof(req));
229 
230    rsp = vdrm_alloc_rsp(dev->vdev, &req.hdr, sizeof(struct amdgpu_ccmd_create_ctx_rsp));
231    int r = vdrm_send_req_wrapper(dev, &req.hdr, &rsp->hdr, true);
232 
233    if (r)
234       goto unlock;
235 
236    if (rsp->ctx_id == 0) {
237       r = -ENOTSUP;
238       goto unlock;
239    }
240 
241    struct amdvgpu_context *ctx = calloc(1, sizeof(struct amdvgpu_context) + dev->num_virtio_rings * sizeof(uint64_t));
242    if (ctx == NULL) {
243       r = -ENOMEM;
244       goto unlock;
245    }
246 
247    p_atomic_inc(&ctx->refcount);
248    ctx->host_context_id = rsp->ctx_id;
249    for (int i = 0; i < dev->num_virtio_rings; i++)
250       ctx->ring_next_seqno[i] = 1;
251    *ctx_virtio = ctx->host_context_id;
252 
253    _mesa_hash_table_insert(&dev->contexts, (void*)(uintptr_t)ctx->host_context_id, ctx);
254 
255 unlock:
256    simple_mtx_unlock(&dev->contexts_mutex);
257 
258    return r;
259 }
260 
amdvgpu_cs_ctx_free(amdvgpu_device_handle dev,uint32_t ctx_id)261 int amdvgpu_cs_ctx_free(amdvgpu_device_handle dev, uint32_t ctx_id)
262 {
263    struct hash_entry *he = _mesa_hash_table_search(&dev->contexts,
264                                                    (void*)(uintptr_t)ctx_id);
265 
266    if (!he)
267       return -1;
268 
269    if (!dev->allow_multiple_amdgpu_ctx) {
270       struct amdvgpu_context *ctx = he->data;
271       if (p_atomic_dec_return(&ctx->refcount))
272          return 0;
273    }
274 
275    struct amdgpu_ccmd_create_ctx_req req = {
276       .id = ctx_id,
277       .flags = AMDGPU_CCMD_CREATE_CTX_DESTROY,
278    };
279    req.hdr = AMDGPU_CCMD(CREATE_CTX, sizeof(req));
280 
281    _mesa_hash_table_remove(&dev->contexts, he);
282 
283    free(he->data);
284 
285    struct amdgpu_ccmd_create_ctx_rsp *rsp;
286    rsp = vdrm_alloc_rsp(dev->vdev, &req.hdr, sizeof(struct amdgpu_ccmd_create_ctx_rsp));
287 
288    return vdrm_send_req_wrapper(dev, &req.hdr, &rsp->hdr, false);
289 }
290 
291 int
amdvgpu_device_get_fd(amdvgpu_device_handle dev)292 amdvgpu_device_get_fd(amdvgpu_device_handle dev) {
293    return dev->fd;
294 }
295 
296 const char *
amdvgpu_get_marketing_name(amdvgpu_device_handle dev)297 amdvgpu_get_marketing_name(amdvgpu_device_handle dev) {
298    return dev->vdev->caps.u.amdgpu.marketing_name;
299 }
300 
cs_chunk_ib_to_virtio_ring_idx(amdvgpu_device_handle dev,struct drm_amdgpu_cs_chunk_ib * ib)301 static uint32_t cs_chunk_ib_to_virtio_ring_idx(amdvgpu_device_handle dev,
302                                                struct drm_amdgpu_cs_chunk_ib *ib) {
303    assert(dev->virtio_ring_mapping[ib->ip_type] != 0);
304    return dev->virtio_ring_mapping[ib->ip_type] + ib->ring;
305 }
306 
307 int
amdvgpu_cs_submit_raw2(amdvgpu_device_handle dev,uint32_t ctx_id,uint32_t bo_list_handle,int num_chunks,struct drm_amdgpu_cs_chunk * chunks,uint64_t * seqno)308 amdvgpu_cs_submit_raw2(amdvgpu_device_handle dev, uint32_t ctx_id,
309                        uint32_t bo_list_handle,
310                        int num_chunks, struct drm_amdgpu_cs_chunk *chunks,
311                        uint64_t *seqno)
312 {
313    unsigned rsp_len = sizeof(struct amdgpu_ccmd_rsp);
314 
315    struct extra_data_info {
316       const void *ptr;
317       uint32_t size;
318    } extra[1 + num_chunks];
319 
320    int chunk_count = 0;
321    unsigned offset = 0;
322 
323    struct desc {
324       uint16_t chunk_id;
325       uint16_t length_dw;
326       uint32_t offset;
327    };
328    struct desc descriptors[num_chunks];
329 
330    unsigned virtio_ring_idx = 0xffffffff;
331 
332    uint32_t syncobj_in_count = 0, syncobj_out_count = 0;
333    struct drm_virtgpu_execbuffer_syncobj *syncobj_in = NULL;
334    struct drm_virtgpu_execbuffer_syncobj *syncobj_out = NULL;
335    uint8_t *buf = NULL;
336    int ret;
337 
338    const bool sync_submit = dev->sync_cmd & (1u << AMDGPU_CCMD_CS_SUBMIT);
339 
340    struct hash_entry *he = _mesa_hash_table_search(&dev->contexts, (void*)(uintptr_t)ctx_id);
341    if (!he)
342       return -1;
343 
344    struct amdvgpu_context *vctx = he->data;
345 
346    /* Extract pointers from each chunk and copy them to the payload. */
347    for (int i = 0; i < num_chunks; i++) {
348       int extra_idx = 1 + chunk_count;
349       if (chunks[i].chunk_id == AMDGPU_CHUNK_ID_BO_HANDLES) {
350          struct drm_amdgpu_bo_list_in *list_in = (void*) (uintptr_t)chunks[i].chunk_data;
351          extra[extra_idx].ptr = (void*) (uintptr_t)list_in->bo_info_ptr;
352          extra[extra_idx].size = list_in->bo_info_size * list_in->bo_number;
353       } else if (chunks[i].chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
354                  chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE ||
355                  chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB) {
356          extra[extra_idx].ptr = (void*)(uintptr_t)chunks[i].chunk_data;
357          extra[extra_idx].size = chunks[i].length_dw * 4;
358 
359          if (chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB) {
360             struct drm_amdgpu_cs_chunk_ib *ib = (void*)(uintptr_t)chunks[i].chunk_data;
361             virtio_ring_idx = cs_chunk_ib_to_virtio_ring_idx(dev, ib);
362          }
363       } else if (chunks[i].chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT ||
364                  chunks[i].chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
365          /* Translate from amdgpu CHUNK_ID_SYNCOBJ_* to drm_virtgpu_execbuffer_syncobj */
366          struct drm_amdgpu_cs_chunk_sem *amd_syncobj = (void*) (uintptr_t)chunks[i].chunk_data;
367          unsigned syncobj_count = (chunks[i].length_dw * 4) / sizeof(struct drm_amdgpu_cs_chunk_sem);
368          struct drm_virtgpu_execbuffer_syncobj *syncobjs =
369             calloc(syncobj_count, sizeof(struct drm_virtgpu_execbuffer_syncobj));
370 
371          if (syncobjs == NULL) {
372             ret = -ENOMEM;
373             goto error;
374          }
375 
376          for (int j = 0; j < syncobj_count; j++)
377             syncobjs[j].handle = amd_syncobj[j].handle;
378 
379          if (chunks[i].chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
380             syncobj_in_count = syncobj_count;
381             syncobj_in = syncobjs;
382          } else {
383             syncobj_out_count = syncobj_count;
384             syncobj_out = syncobjs;
385          }
386 
387          /* This chunk was converted to virtgpu UAPI so we don't need to forward it
388           * to the host.
389           */
390          continue;
391       } else {
392          mesa_loge("Unhandled chunk_id: %d\n", chunks[i].chunk_id);
393          continue;
394       }
395       descriptors[chunk_count].chunk_id = chunks[i].chunk_id;
396       descriptors[chunk_count].offset = offset;
397       descriptors[chunk_count].length_dw = extra[extra_idx].size / 4;
398       offset += extra[extra_idx].size;
399       chunk_count++;
400    }
401    assert(virtio_ring_idx != 0xffffffff);
402 
403    /* Copy the descriptors at the beginning. */
404    extra[0].ptr = descriptors;
405    extra[0].size = chunk_count * sizeof(struct desc);
406 
407    /* Determine how much extra space we need. */
408    uint32_t req_len = sizeof(struct amdgpu_ccmd_cs_submit_req);
409    uint32_t e_offset = req_len;
410    for (unsigned i = 0; i < 1 + chunk_count; i++)
411       req_len += extra[i].size;
412 
413    /* Allocate the command buffer. */
414    buf = malloc(req_len);
415    if (buf == NULL) {
416       ret = -ENOMEM;
417       goto error;
418    }
419    struct amdgpu_ccmd_cs_submit_req *req = (void*)buf;
420    req->hdr = AMDGPU_CCMD(CS_SUBMIT, req_len);
421    req->ctx_id = ctx_id;
422    req->num_chunks = chunk_count;
423    req->ring_idx = virtio_ring_idx;
424    req->pad = 0;
425 
426    UNUSED struct amdgpu_ccmd_rsp *rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, rsp_len);
427 
428    /* Copy varying data after the fixed part of cs_submit_req. */
429    for (unsigned i = 0; i < 1 + chunk_count; i++) {
430       if (extra[i].size) {
431          memcpy(&buf[e_offset], extra[i].ptr, extra[i].size);
432          e_offset += extra[i].size;
433       }
434    }
435 
436    /* Optional fence out (if we want synchronous submits). */
437    int *fence_fd_ptr = NULL;
438 
439    struct vdrm_execbuf_params vdrm_execbuf_p = {
440       .ring_idx = virtio_ring_idx,
441       .req = &req->hdr,
442       .handles = NULL,
443       .num_handles = 0,
444       .in_syncobjs = syncobj_in,
445       .out_syncobjs = syncobj_out,
446       .has_in_fence_fd = 0,
447       .needs_out_fence_fd = sync_submit,
448       .fence_fd = 0,
449       .num_in_syncobjs = syncobj_in_count,
450       .num_out_syncobjs = syncobj_out_count,
451    };
452 
453    if (sync_submit)
454       fence_fd_ptr = &vdrm_execbuf_p.fence_fd;
455 
456    /* Push job to the host. */
457    ret = vdrm_execbuf(dev->vdev, &vdrm_execbuf_p);
458 
459    /* Determine the host seqno for this job. */
460    *seqno = vctx->ring_next_seqno[virtio_ring_idx - 1]++;
461 
462    if (ret == 0 && fence_fd_ptr) {
463       /* Sync execution */
464       sync_wait(*fence_fd_ptr, -1);
465       close(*fence_fd_ptr);
466       vdrm_host_sync(dev->vdev, &req->hdr);
467    }
468 
469 error:
470    free(buf);
471    free(syncobj_in);
472    free(syncobj_out);
473 
474    return ret;
475 }
476 
amdvgpu_cs_query_reset_state2(amdvgpu_device_handle dev,uint32_t ctx_id,uint64_t * flags)477 int amdvgpu_cs_query_reset_state2(amdvgpu_device_handle dev, uint32_t ctx_id,
478                                   uint64_t *flags)
479 {
480    *flags = 0;
481 
482    if (to_amdvgpu_shmem(dev->vdev->shmem)->async_error > 0)
483       *flags = AMDGPU_CTX_QUERY2_FLAGS_RESET | AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
484 
485    return 0;
486 }
487 
amdvgpu_cs_query_fence_status(amdvgpu_device_handle dev,uint32_t ctx_id,uint32_t ip_type,uint32_t ip_instance,uint32_t ring,uint64_t fence_seq_no,uint64_t timeout_ns,uint64_t flags,uint32_t * expired)488 int amdvgpu_cs_query_fence_status(amdvgpu_device_handle dev,
489                                   uint32_t ctx_id,
490                                   uint32_t ip_type,
491                                   uint32_t ip_instance, uint32_t ring,
492                                   uint64_t fence_seq_no,
493                                   uint64_t timeout_ns, uint64_t flags,
494                                   uint32_t *expired)
495 {
496    unsigned req_len = sizeof(struct amdgpu_ccmd_cs_query_fence_status_req);
497    unsigned rsp_len = sizeof(struct amdgpu_ccmd_cs_query_fence_status_rsp);
498 
499    uint8_t buf[req_len];
500    struct amdgpu_ccmd_cs_query_fence_status_req *req = (void *)buf;
501    struct amdgpu_ccmd_cs_query_fence_status_rsp *rsp;
502 
503    req->hdr = AMDGPU_CCMD(CS_QUERY_FENCE_STATUS, req_len);
504    req->ctx_id = ctx_id;
505    req->ip_type = ip_type;
506    req->ip_instance = ip_instance;
507    req->ring = ring;
508    req->fence = fence_seq_no;
509    req->timeout_ns = timeout_ns;
510    req->flags = flags;
511 
512    rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, rsp_len);
513 
514    int r = vdrm_send_req_wrapper(dev, &req->hdr, &rsp->hdr, true);
515 
516    if (r == 0)
517       *expired = rsp->expired;
518 
519    return r;
520 }
521 
amdvgpu_vm_reserve_vmid(amdvgpu_device_handle dev,int reserve)522 int amdvgpu_vm_reserve_vmid(amdvgpu_device_handle dev, int reserve) {
523    unsigned req_len = sizeof(struct amdgpu_ccmd_reserve_vmid_req);
524 
525    uint8_t buf[req_len];
526    struct amdgpu_ccmd_reserve_vmid_req *req = (void *)buf;
527    struct amdgpu_ccmd_rsp *rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, sizeof(struct amdgpu_ccmd_rsp));
528 
529    req->hdr = AMDGPU_CCMD(RESERVE_VMID, req_len);
530    req->flags = reserve ? 0 : AMDGPU_CCMD_RESERVE_VMID_UNRESERVE;
531 
532    return vdrm_send_req_wrapper(dev, &req->hdr, rsp, true);
533 }
534 
amdvgpu_cs_ctx_stable_pstate(amdvgpu_device_handle dev,uint32_t ctx_id,uint32_t op,uint32_t flags,uint32_t * out_flags)535 int amdvgpu_cs_ctx_stable_pstate(amdvgpu_device_handle dev,
536                                  uint32_t ctx_id,
537                                  uint32_t op,
538                                  uint32_t flags,
539                                  uint32_t *out_flags) {
540    unsigned req_len = sizeof(struct amdgpu_ccmd_set_pstate_req);
541    unsigned rsp_len = sizeof(struct amdgpu_ccmd_set_pstate_rsp);
542 
543    uint8_t buf[req_len];
544    struct amdgpu_ccmd_set_pstate_req *req = (void *)buf;
545    struct amdgpu_ccmd_set_pstate_rsp *rsp;
546 
547    req->hdr = AMDGPU_CCMD(SET_PSTATE, req_len);
548    req->ctx_id = ctx_id;
549    req->op = op;
550    req->flags = flags;
551    req->pad = 0;
552 
553    rsp = vdrm_alloc_rsp(dev->vdev, &req->hdr, rsp_len);
554 
555    int r = vdrm_send_req_wrapper(dev, &req->hdr, &rsp->hdr, out_flags);
556 
557    if (r == 0 && out_flags)
558       *out_flags = rsp->out_flags;
559 
560    return r;
561 }
562 
563 int
amdvgpu_va_range_alloc(amdvgpu_device_handle dev,enum amdgpu_gpu_va_range va_range_type,uint64_t size,uint64_t va_base_alignment,uint64_t va_base_required,uint64_t * va_base_allocated,amdgpu_va_handle * va_range_handle,uint64_t flags)564 amdvgpu_va_range_alloc(amdvgpu_device_handle dev,
565                        enum amdgpu_gpu_va_range va_range_type,
566                        uint64_t size,
567                        uint64_t va_base_alignment,
568                        uint64_t va_base_required,
569                        uint64_t *va_base_allocated,
570                        amdgpu_va_handle *va_range_handle,
571                        uint64_t flags)
572 {
573    return amdgpu_va_range_alloc2(dev->va_mgr, va_range_type, size,
574                                  va_base_alignment, va_base_required,
575                                  va_base_allocated, va_range_handle,
576                                  flags);
577 }
578