1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 * based on amdgpu winsys.
5 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
6 * Copyright © 2015 Advanced Micro Devices, Inc.
7 *
8 * SPDX-License-Identifier: MIT
9 */
10 #include "radv_amdgpu_winsys.h"
11 #include <assert.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include "drm-uapi/amdgpu_drm.h"
16 #include "ac_linux_drm.h"
17 #include "ac_surface.h"
18 #include "radv_amdgpu_bo.h"
19 #include "radv_amdgpu_cs.h"
20 #include "radv_amdgpu_winsys_public.h"
21 #include "radv_debug.h"
22 #include "vk_drm_syncobj.h"
23 #include "xf86drm.h"
24
25 static bool
radv_is_gpu_supported(const struct radeon_info * info)26 radv_is_gpu_supported(const struct radeon_info *info)
27 {
28 /* AMD CDNA isn't supported. */
29 if (info->gfx_level == GFX9 && !info->has_graphics)
30 return false;
31
32 /* Unknown GPU generations aren't supported. */
33 if (info->gfx_level > GFX12)
34 return false;
35
36 return true;
37 }
38
39 static bool
do_winsys_init(struct radv_amdgpu_winsys * ws,int fd)40 do_winsys_init(struct radv_amdgpu_winsys *ws, int fd)
41 {
42 if (!ac_query_gpu_info(fd, ws->dev, &ws->info, true))
43 return false;
44
45 if (!radv_is_gpu_supported(&ws->info))
46 return false;
47
48 /*
49 * Override the max submits on video queues.
50 * If you submit multiple session contexts in the same IB sequence the
51 * hardware gets upset as it expects a kernel fence to be emitted to reset
52 * the session context in the hardware.
53 * Avoid this problem by never submitted more than one IB at a time.
54 * This possibly should be fixed in the kernel, and if it is this can be
55 * resolved.
56 */
57 for (enum amd_ip_type ip_type = AMD_IP_UVD; ip_type <= AMD_IP_VCN_ENC; ip_type++)
58 ws->info.max_submitted_ibs[ip_type] = 1;
59
60 ws->info.ip[AMD_IP_SDMA].num_queues = MIN2(ws->info.ip[AMD_IP_SDMA].num_queues, MAX_RINGS_PER_TYPE);
61 ws->info.ip[AMD_IP_COMPUTE].num_queues = MIN2(ws->info.ip[AMD_IP_COMPUTE].num_queues, MAX_RINGS_PER_TYPE);
62
63 ws->use_ib_bos = true;
64 return true;
65 }
66
67 static void
radv_amdgpu_winsys_query_info(struct radeon_winsys * rws,struct radeon_info * gpu_info)68 radv_amdgpu_winsys_query_info(struct radeon_winsys *rws, struct radeon_info *gpu_info)
69 {
70 *gpu_info = ((struct radv_amdgpu_winsys *)rws)->info;
71 }
72
73 static uint64_t
radv_amdgpu_winsys_query_value(struct radeon_winsys * rws,enum radeon_value_id value)74 radv_amdgpu_winsys_query_value(struct radeon_winsys *rws, enum radeon_value_id value)
75 {
76 struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
77 struct amdgpu_heap_info heap = {0};
78 uint64_t retval = 0;
79
80 switch (value) {
81 case RADEON_ALLOCATED_VRAM:
82 return ws->allocated_vram;
83 case RADEON_ALLOCATED_VRAM_VIS:
84 return ws->allocated_vram_vis;
85 case RADEON_ALLOCATED_GTT:
86 return ws->allocated_gtt;
87 case RADEON_TIMESTAMP:
88 ac_drm_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
89 return retval;
90 case RADEON_NUM_BYTES_MOVED:
91 ac_drm_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
92 return retval;
93 case RADEON_NUM_EVICTIONS:
94 ac_drm_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
95 return retval;
96 case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
97 ac_drm_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
98 return retval;
99 case RADEON_VRAM_USAGE:
100 ac_drm_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
101 return heap.heap_usage;
102 case RADEON_VRAM_VIS_USAGE:
103 ac_drm_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
104 return heap.heap_usage;
105 case RADEON_GTT_USAGE:
106 ac_drm_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
107 return heap.heap_usage;
108 case RADEON_GPU_TEMPERATURE:
109 ac_drm_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
110 return retval;
111 case RADEON_CURRENT_SCLK:
112 ac_drm_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
113 return retval;
114 case RADEON_CURRENT_MCLK:
115 ac_drm_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
116 return retval;
117 default:
118 unreachable("invalid query value");
119 }
120
121 return 0;
122 }
123
124 static bool
radv_amdgpu_winsys_read_registers(struct radeon_winsys * rws,unsigned reg_offset,unsigned num_registers,uint32_t * out)125 radv_amdgpu_winsys_read_registers(struct radeon_winsys *rws, unsigned reg_offset, unsigned num_registers, uint32_t *out)
126 {
127 struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
128
129 return ac_drm_read_mm_registers(ws->dev, reg_offset / 4, num_registers, 0xffffffff, 0, out) == 0;
130 }
131
132 static const char *
radv_amdgpu_winsys_get_chip_name(struct radeon_winsys * rws)133 radv_amdgpu_winsys_get_chip_name(struct radeon_winsys *rws)
134 {
135 ac_drm_device *dev = ((struct radv_amdgpu_winsys *)rws)->dev;
136
137 return ac_drm_get_marketing_name(dev);
138 }
139
140 static bool
radv_amdgpu_winsys_query_gpuvm_fault(struct radeon_winsys * rws,struct radv_winsys_gpuvm_fault_info * fault_info)141 radv_amdgpu_winsys_query_gpuvm_fault(struct radeon_winsys *rws, struct radv_winsys_gpuvm_fault_info *fault_info)
142 {
143 struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
144 struct drm_amdgpu_info_gpuvm_fault gpuvm_fault = {0};
145 int r;
146
147 r = ac_drm_query_info(ws->dev, AMDGPU_INFO_GPUVM_FAULT, sizeof(gpuvm_fault), &gpuvm_fault);
148 if (r < 0) {
149 fprintf(stderr, "radv/amdgpu: Failed to query the last GPUVM fault (%d).\n", r);
150 return false;
151 }
152
153 /* When the GPUVM fault status is 0, no faults happened. */
154 if (!gpuvm_fault.status)
155 return false;
156
157 fault_info->addr = gpuvm_fault.addr;
158 fault_info->status = gpuvm_fault.status;
159 fault_info->vmhub = gpuvm_fault.vmhub;
160
161 return true;
162 }
163
164 static simple_mtx_t winsys_creation_mutex = SIMPLE_MTX_INITIALIZER;
165 static struct hash_table *winsyses = NULL;
166
167 static void
radv_amdgpu_winsys_destroy(struct radeon_winsys * rws)168 radv_amdgpu_winsys_destroy(struct radeon_winsys *rws)
169 {
170 struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
171 bool destroy = false;
172
173 simple_mtx_lock(&winsys_creation_mutex);
174 if (!--ws->refcount) {
175 _mesa_hash_table_remove_key(winsyses, ws->dev);
176
177 /* Clean the hashtable up if empty, though there is no
178 * empty function. */
179 if (_mesa_hash_table_num_entries(winsyses) == 0) {
180 _mesa_hash_table_destroy(winsyses, NULL);
181 winsyses = NULL;
182 }
183
184 destroy = true;
185 }
186 simple_mtx_unlock(&winsys_creation_mutex);
187 if (!destroy)
188 return;
189
190 u_rwlock_destroy(&ws->global_bo_list.lock);
191 free(ws->global_bo_list.bos);
192
193 if (ws->reserve_vmid)
194 ac_drm_vm_unreserve_vmid(ws->dev, 0);
195
196 u_rwlock_destroy(&ws->log_bo_list_lock);
197 ac_drm_device_deinitialize(ws->dev);
198 FREE(rws);
199 }
200
201 static int
radv_amdgpu_winsys_get_fd(struct radeon_winsys * rws)202 radv_amdgpu_winsys_get_fd(struct radeon_winsys *rws)
203 {
204 struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
205 return ws->fd;
206 }
207
208 static const struct vk_sync_type *const *
radv_amdgpu_winsys_get_sync_types(struct radeon_winsys * rws)209 radv_amdgpu_winsys_get_sync_types(struct radeon_winsys *rws)
210 {
211 struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
212 return ws->sync_types;
213 }
214
215 struct radeon_winsys *
radv_amdgpu_winsys_create(int fd,uint64_t debug_flags,uint64_t perftest_flags,bool reserve_vmid,bool is_virtio)216 radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags, bool reserve_vmid, bool is_virtio)
217 {
218 uint32_t drm_major, drm_minor, r;
219 ac_drm_device *dev;
220 struct radv_amdgpu_winsys *ws = NULL;
221
222 r = ac_drm_device_initialize(fd, is_virtio, &drm_major, &drm_minor, &dev);
223 if (r) {
224 fprintf(stderr, "radv/amdgpu: failed to initialize device.\n");
225 return NULL;
226 }
227
228 /* We have to keep this lock till insertion. */
229 simple_mtx_lock(&winsys_creation_mutex);
230 if (!winsyses)
231 winsyses = _mesa_pointer_hash_table_create(NULL);
232 if (!winsyses) {
233 fprintf(stderr, "radv/amdgpu: failed to alloc winsys hash table.\n");
234 goto fail;
235 }
236
237 struct hash_entry *entry = _mesa_hash_table_search(winsyses, dev);
238 if (entry) {
239 ws = (struct radv_amdgpu_winsys *)entry->data;
240 ++ws->refcount;
241 }
242
243 if (ws) {
244 simple_mtx_unlock(&winsys_creation_mutex);
245 ac_drm_device_deinitialize(dev);
246
247 /* Check that options don't differ from the existing winsys. */
248 if (((debug_flags & RADV_DEBUG_ALL_BOS) && !ws->debug_all_bos) ||
249 ((debug_flags & RADV_DEBUG_HANG) && !ws->debug_log_bos) ||
250 ((debug_flags & RADV_DEBUG_NO_IBS) && ws->use_ib_bos) || (perftest_flags != ws->perftest)) {
251 fprintf(stderr, "radv/amdgpu: Found options that differ from the existing winsys.\n");
252 return NULL;
253 }
254
255 /* RADV_DEBUG_ZERO_VRAM is the only option that is allowed to be set again. */
256 if (debug_flags & RADV_DEBUG_ZERO_VRAM)
257 ws->zero_all_vram_allocs = true;
258
259 return &ws->base;
260 }
261
262 ws = calloc(1, sizeof(struct radv_amdgpu_winsys));
263 if (!ws)
264 goto fail;
265
266 ws->refcount = 1;
267 ws->dev = dev;
268 ws->fd = ac_drm_device_get_fd(dev);
269 ws->info.drm_major = drm_major;
270 ws->info.drm_minor = drm_minor;
271 ws->info.is_virtio = is_virtio;
272 if (!do_winsys_init(ws, fd))
273 goto winsys_fail;
274
275 ws->debug_all_bos = !!(debug_flags & RADV_DEBUG_ALL_BOS);
276 ws->debug_log_bos = debug_flags & RADV_DEBUG_HANG;
277 if (debug_flags & RADV_DEBUG_NO_IBS)
278 ws->use_ib_bos = false;
279
280 ws->reserve_vmid = reserve_vmid;
281 if (ws->reserve_vmid) {
282 r = ac_drm_vm_reserve_vmid(ws->dev, 0);
283 if (r) {
284 fprintf(stderr, "radv/amdgpu: failed to reserve vmid.\n");
285 goto winsys_fail;
286 }
287 }
288 int num_sync_types = 0;
289
290 ws->syncobj_sync_type = vk_drm_syncobj_get_type(ws->fd);
291 if (ws->syncobj_sync_type.features) {
292 /* multi wait is always supported */
293 ws->syncobj_sync_type.features |= VK_SYNC_FEATURE_GPU_MULTI_WAIT;
294
295 if (!ws->info.has_timeline_syncobj && ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE) {
296 /* Disable timeline feature if it was disabled in the driver. */
297 assert(is_virtio);
298 ws->syncobj_sync_type.get_value = NULL;
299 ws->syncobj_sync_type.features &= ~VK_SYNC_FEATURE_TIMELINE;
300 }
301
302 ws->sync_types[num_sync_types++] = &ws->syncobj_sync_type;
303 if (!(ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE)) {
304 ws->emulated_timeline_sync_type = vk_sync_timeline_get_type(&ws->syncobj_sync_type);
305 ws->sync_types[num_sync_types++] = &ws->emulated_timeline_sync_type.sync;
306 }
307 }
308
309 ws->sync_types[num_sync_types++] = NULL;
310 assert(num_sync_types <= ARRAY_SIZE(ws->sync_types));
311
312 ws->perftest = perftest_flags;
313 ws->zero_all_vram_allocs = debug_flags & RADV_DEBUG_ZERO_VRAM;
314 u_rwlock_init(&ws->global_bo_list.lock);
315 list_inithead(&ws->log_bo_list);
316 u_rwlock_init(&ws->log_bo_list_lock);
317 ws->base.query_info = radv_amdgpu_winsys_query_info;
318 ws->base.query_value = radv_amdgpu_winsys_query_value;
319 ws->base.read_registers = radv_amdgpu_winsys_read_registers;
320 ws->base.get_chip_name = radv_amdgpu_winsys_get_chip_name;
321 ws->base.query_gpuvm_fault = radv_amdgpu_winsys_query_gpuvm_fault;
322 ws->base.destroy = radv_amdgpu_winsys_destroy;
323 ws->base.get_fd = radv_amdgpu_winsys_get_fd;
324 ws->base.get_sync_types = radv_amdgpu_winsys_get_sync_types;
325 radv_amdgpu_bo_init_functions(ws);
326 radv_amdgpu_cs_init_functions(ws);
327
328 _mesa_hash_table_insert(winsyses, dev, ws);
329 simple_mtx_unlock(&winsys_creation_mutex);
330
331 return &ws->base;
332
333 winsys_fail:
334 free(ws);
335 fail:
336 if (winsyses && _mesa_hash_table_num_entries(winsyses) == 0) {
337 _mesa_hash_table_destroy(winsyses, NULL);
338 winsyses = NULL;
339 }
340 simple_mtx_unlock(&winsys_creation_mutex);
341 ac_drm_device_deinitialize(dev);
342 return NULL;
343 }
344