1 /*
2 * Copyright 2024 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "amdgpu_virtio_private.h"
8
9 #include "util/bitscan.h"
10 #include "util/log.h"
11 #include "util/os_file.h"
12 #include "util/u_debug.h"
13
14 #include <xf86drm.h>
15
16 /* amdvgpu_device manage the virtual GPU.
17 *
18 * It owns a vdrm_device instance, the rings and manage seqno.
19 * Since it's a drop-in replacement for libdrm_amdgpu's amdgpu_device,
20 * it follows its behavior: if the same device is opened multiple times,
21 * the same amdvgpu_device will be used.
22 */
23 static simple_mtx_t dev_mutex = SIMPLE_MTX_INITIALIZER;
24 static amdvgpu_device_handle dev_list;
25
fd_compare(int fd1,int fd2)26 static int fd_compare(int fd1, int fd2)
27 {
28 char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
29 char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
30 int result;
31
32 if (name1 == NULL || name2 == NULL) {
33 free(name1);
34 free(name2);
35 return 0;
36 }
37
38 result = strcmp(name1, name2);
39 free(name1);
40 free(name2);
41
42 return result;
43 }
44
amdvgpu_device_reference(struct amdvgpu_device ** dst,struct amdvgpu_device * src)45 static void amdvgpu_device_reference(struct amdvgpu_device **dst,
46 struct amdvgpu_device *src)
47 {
48 if (update_references(*dst ? &(*dst)->refcount : NULL,
49 src ? &src->refcount : NULL)) {
50 struct amdvgpu_device *dev, *prev = NULL;
51 for (dev = dev_list; dev; dev = dev->next) {
52 if (dev == (*dst)) {
53 if (prev == NULL)
54 dev_list = dev->next;
55 else
56 prev->next = dev->next;
57 break;
58 }
59 prev = dev;
60 }
61
62 dev = *dst;
63
64 /* Destroy BOs before closing vdrm */
65 hash_table_foreach(dev->handle_to_vbo, entry) {
66 struct amdvgpu_bo *bo = entry->data;
67 amdvgpu_bo_free(dev, bo);
68 }
69 _mesa_hash_table_destroy(dev->handle_to_vbo, NULL);
70 /* Destroy contextx. */
71 hash_table_foreach(&dev->contexts, entry)
72 amdvgpu_cs_ctx_free(dev, (uint32_t)(uintptr_t)entry->key);
73 _mesa_hash_table_clear(&dev->contexts, NULL);
74
75 simple_mtx_destroy(&dev->handle_to_vbo_mutex);
76 simple_mtx_destroy(&dev->contexts_mutex);
77
78 amdgpu_va_manager_deinit(dev->va_mgr);
79
80 vdrm_device_close(dev->vdev);
81
82 close(dev->fd);
83 free(dev);
84 }
85
86 *dst = src;
87 }
88
amdvgpu_device_deinitialize(amdvgpu_device_handle dev)89 int amdvgpu_device_deinitialize(amdvgpu_device_handle dev) {
90 simple_mtx_lock(&dev_mutex);
91 amdvgpu_device_reference(&dev, NULL);
92 simple_mtx_unlock(&dev_mutex);
93 return 0;
94 }
95
amdvgpu_device_initialize(int fd,uint32_t * drm_major,uint32_t * drm_minor,amdvgpu_device_handle * dev_out)96 int amdvgpu_device_initialize(int fd, uint32_t *drm_major, uint32_t *drm_minor,
97 amdvgpu_device_handle* dev_out) {
98 simple_mtx_lock(&dev_mutex);
99 amdvgpu_device_handle dev;
100
101 for (dev = dev_list; dev; dev = dev->next)
102 if (fd_compare(dev->fd, fd) == 0)
103 break;
104
105 if (dev) {
106 *dev_out = NULL;
107 amdvgpu_device_reference(dev_out, dev);
108 *drm_major = dev->vdev->caps.version_major;
109 *drm_minor = dev->vdev->caps.version_minor;
110 simple_mtx_unlock(&dev_mutex);
111 return 0;
112 }
113
114 /* fd is owned by the amdgpu_screen_winsys that called this function.
115 * amdgpu_screen_winsys' lifetime may be shorter than the device's one,
116 * so dup fd to tie its lifetime to the device's one.
117 */
118 fd = os_dupfd_cloexec(fd);
119
120 struct vdrm_device *vdev = vdrm_device_connect(fd, VIRTGPU_DRM_CONTEXT_AMDGPU);
121 if (vdev == NULL) {
122 mesa_loge("vdrm_device_connect failed\n");
123 simple_mtx_unlock(&dev_mutex);
124 return -1;
125 }
126
127 dev = calloc(1, sizeof(struct amdvgpu_device));
128 dev->refcount = 1;
129 dev->next = dev_list;
130 dev_list = dev;
131 dev->fd = fd;
132 dev->vdev = vdev;
133
134 simple_mtx_init(&dev->handle_to_vbo_mutex, mtx_plain);
135 simple_mtx_init(&dev->contexts_mutex, mtx_plain);
136
137 dev->handle_to_vbo = _mesa_hash_table_create_u32_keys(NULL);
138
139 p_atomic_set(&dev->next_blob_id, 1);
140
141 *dev_out = dev;
142
143 simple_mtx_unlock(&dev_mutex);
144
145 struct drm_amdgpu_info info;
146 info.return_pointer = (uintptr_t)&dev->dev_info;
147 info.query = AMDGPU_INFO_DEV_INFO;
148 info.return_size = sizeof(dev->dev_info);
149 int r = amdvgpu_query_info(dev, &info);
150 assert(r == 0);
151
152 /* Ring idx 0 is reserved for commands running on CPU. */
153 unsigned next_ring_idx = 1;
154 for (unsigned i = 0; i < AMD_NUM_IP_TYPES; ++i) {
155 struct drm_amdgpu_info_hw_ip ip_info = {0};
156 struct drm_amdgpu_info request = {0};
157 request.return_pointer = (uintptr_t)&ip_info;
158 request.return_size = sizeof(ip_info);
159 request.query = AMDGPU_INFO_HW_IP_INFO;
160 request.query_hw_ip.type = i;
161 request.query_hw_ip.ip_instance = 0;
162 r = amdvgpu_query_info(dev, &request);
163 if (r == 0 && ip_info.available_rings) {
164 int count = util_bitcount(ip_info.available_rings);
165 dev->virtio_ring_mapping[i] = next_ring_idx;
166 next_ring_idx += count;
167 }
168 }
169 /* VIRTGPU_CONTEXT_PARAM_NUM_RINGS is hardcoded for now. */
170 assert(next_ring_idx <= 64);
171 dev->num_virtio_rings = next_ring_idx - 1;
172
173 dev->va_mgr = amdgpu_va_manager_alloc();
174 amdgpu_va_manager_init(dev->va_mgr,
175 dev->dev_info.virtual_address_offset, dev->dev_info.virtual_address_max,
176 dev->dev_info.high_va_offset, dev->dev_info.high_va_max,
177 dev->dev_info.virtual_address_alignment);
178
179 _mesa_hash_table_init(&dev->contexts, NULL,
180 _mesa_hash_pointer, _mesa_key_pointer_equal);
181 dev->allow_multiple_amdgpu_ctx = debug_get_bool_option("MULTIPLE_AMDGPU_CTX", false);
182 dev->sync_cmd = debug_get_num_option("VIRTIO_SYNC_CMD", 0);
183
184 *drm_major = dev->vdev->caps.version_major;
185 *drm_minor = dev->vdev->caps.version_minor;
186
187 return 0;
188 }
189