1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 #include <vulkan/vulkan.h>
28 #include <xf86drm.h>
29
30 #include "drm-uapi/pvr_drm.h"
31 #include "pvr_device_info.h"
32 #include "pvr_drm.h"
33 #include "pvr_drm_bo.h"
34 #include "pvr_drm_job_compute.h"
35 #include "pvr_drm_job_null.h"
36 #include "pvr_drm_job_render.h"
37 #include "pvr_drm_job_transfer.h"
38 #include "pvr_drm_public.h"
39 #include "pvr_private.h"
40 #include "pvr_winsys.h"
41 #include "pvr_winsys_helper.h"
42 #include "vk_alloc.h"
43 #include "vk_drm_syncobj.h"
44 #include "vk_log.h"
45
pvr_drm_finish_heaps(struct pvr_drm_winsys * const drm_ws)46 static void pvr_drm_finish_heaps(struct pvr_drm_winsys *const drm_ws)
47 {
48 if (!pvr_winsys_helper_winsys_heap_finish(
49 &drm_ws->transfer_frag_heap.base)) {
50 vk_errorf(NULL,
51 VK_ERROR_UNKNOWN,
52 "Transfer fragment heap in use, can't deinit");
53 }
54
55 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->vis_test_heap.base)) {
56 vk_errorf(NULL,
57 VK_ERROR_UNKNOWN,
58 "Visibility test heap in use, can't deinit");
59 }
60
61 if (drm_ws->rgn_hdr_heap_present) {
62 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->rgn_hdr_heap.base)) {
63 vk_errorf(NULL,
64 VK_ERROR_UNKNOWN,
65 "Region header heap in use, can't deinit");
66 }
67 }
68
69 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->usc_heap.base))
70 vk_errorf(NULL, VK_ERROR_UNKNOWN, "USC heap in use, can't deinit");
71
72 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->pds_heap.base))
73 vk_errorf(NULL, VK_ERROR_UNKNOWN, "PDS heap in use, can't deinit");
74
75 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->general_heap.base))
76 vk_errorf(NULL, VK_ERROR_UNKNOWN, "General heap in use, can't deinit");
77 }
78
pvr_drm_winsys_destroy(struct pvr_winsys * ws)79 static void pvr_drm_winsys_destroy(struct pvr_winsys *ws)
80 {
81 struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(ws);
82 struct drm_pvr_ioctl_destroy_vm_context_args destroy_vm_context_args = {
83 .handle = drm_ws->vm_context,
84 };
85
86 pvr_winsys_helper_free_static_memory(drm_ws->general_vma,
87 drm_ws->pds_vma,
88 drm_ws->usc_vma);
89
90 pvr_drm_finish_heaps(drm_ws);
91
92 pvr_ioctl(ws->render_fd,
93 DRM_IOCTL_PVR_DESTROY_VM_CONTEXT,
94 &destroy_vm_context_args,
95 VK_ERROR_UNKNOWN);
96
97 util_sparse_array_finish(&drm_ws->bo_map);
98 u_rwlock_destroy(&drm_ws->dmabuf_bo_lock);
99
100 vk_free(ws->alloc, drm_ws);
101 }
102
103 /**
104 * Linear search a uint32_t array for a value.
105 *
106 * \param array Pointer to array start.
107 * \param len Number of uint32_t terms to compare.
108 * \param val The value to search for.
109 * \return
110 * * true if val is found, or
111 * * false.
112 */
113 static bool
pvr_u32_in_array(const uint32_t * array,const size_t len,const uint32_t val)114 pvr_u32_in_array(const uint32_t *array, const size_t len, const uint32_t val)
115 {
116 for (int i = 0; i < len; i++) {
117 if (array[i] == val)
118 return true;
119 }
120
121 return false;
122 }
123
pvr_drm_override_quirks(struct pvr_drm_winsys * drm_ws,struct pvr_device_info * dev_info)124 static VkResult pvr_drm_override_quirks(struct pvr_drm_winsys *drm_ws,
125 struct pvr_device_info *dev_info)
126 {
127 struct drm_pvr_dev_query_quirks query = { 0 };
128 struct drm_pvr_ioctl_dev_query_args args = {
129 .type = DRM_PVR_DEV_QUERY_QUIRKS_GET,
130 .size = sizeof(query),
131 .pointer = (__u64)&query,
132 };
133
134 /* clang-format off */
135 #define PVR_QUIRKS(x) \
136 x(48545) \
137 x(49927) \
138 x(51764) \
139 x(62269)
140 /* clang-format on */
141
142 #define PVR_QUIRK_EXPAND_COMMA(number) number,
143
144 const uint32_t supported_quirks[] = { PVR_QUIRKS(PVR_QUIRK_EXPAND_COMMA) };
145
146 #undef PVR_QUIRK_EXPAND_COMMA
147
148 VkResult result;
149
150 /* Get the length and allocate enough for it */
151 result = pvr_ioctl(drm_ws->base.render_fd,
152 DRM_IOCTL_PVR_DEV_QUERY,
153 &args,
154 VK_ERROR_INITIALIZATION_FAILED);
155 if (result != VK_SUCCESS)
156 goto out;
157
158 /* It's possible there are no quirks, so we can skip the rest. */
159 if (!query.count) {
160 result = VK_SUCCESS;
161 goto out;
162 }
163
164 query.quirks = (__u64)vk_zalloc(drm_ws->base.alloc,
165 sizeof(uint32_t) * query.count,
166 8,
167 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
168 if (!query.quirks) {
169 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
170 goto out;
171 }
172
173 /* Get the data */
174 result = pvr_ioctl(drm_ws->base.render_fd,
175 DRM_IOCTL_PVR_DEV_QUERY,
176 &args,
177 VK_ERROR_INITIALIZATION_FAILED);
178 if (result != VK_SUCCESS)
179 goto out_free_quirks;
180
181 #define PVR_QUIRK_EXPAND_SET(number) \
182 dev_info->quirks.has_brn##number = \
183 pvr_u32_in_array((uint32_t *)query.quirks, query.count, number);
184
185 /*
186 * For each quirk, check that if it is a "must have" that it is set in
187 * dev_info, then set the dev_info value to the one received from the kernel.
188 */
189 PVR_QUIRKS(PVR_QUIRK_EXPAND_SET);
190
191 #undef PVR_QUIRK_EXPAND_SET
192 #undef PVR_QUIRKS
193
194 /* Check all musthave quirks are supported */
195 for (int i = 0; i < query.musthave_count; i++) {
196 if (!pvr_u32_in_array(supported_quirks,
197 ARRAY_SIZE(supported_quirks),
198 ((uint32_t *)query.quirks)[i])) {
199 result = VK_ERROR_INCOMPATIBLE_DRIVER;
200 goto out_free_quirks;
201 }
202 }
203
204 result = VK_SUCCESS;
205
206 out_free_quirks:
207 vk_free(drm_ws->base.alloc, (__u64 *)query.quirks);
208
209 out:
210 return result;
211 }
212
pvr_drm_override_enhancements(struct pvr_drm_winsys * drm_ws,struct pvr_device_info * dev_info)213 static VkResult pvr_drm_override_enhancements(struct pvr_drm_winsys *drm_ws,
214 struct pvr_device_info *dev_info)
215 {
216 struct drm_pvr_dev_query_enhancements query = { 0 };
217 struct drm_pvr_ioctl_dev_query_args args = {
218 .type = DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
219 .size = sizeof(query),
220 .pointer = (__u64)&query
221 };
222
223 VkResult result;
224
225 /* Get the length and allocate enough for it */
226 result = pvr_ioctl(drm_ws->base.render_fd,
227 DRM_IOCTL_PVR_DEV_QUERY,
228 &args,
229 VK_ERROR_INITIALIZATION_FAILED);
230 if (result != VK_SUCCESS)
231 goto out;
232
233 /* It's possible there are no enhancements, so we can skip the rest. */
234 if (!query.count) {
235 result = VK_SUCCESS;
236 goto out;
237 }
238
239 query.enhancements = (__u64)vk_zalloc(drm_ws->base.alloc,
240 sizeof(uint32_t) * query.count,
241 8,
242 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
243 if (!query.enhancements) {
244 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
245 goto out;
246 }
247
248 /* Get the data */
249 result = pvr_ioctl(drm_ws->base.render_fd,
250 DRM_IOCTL_PVR_DEV_QUERY,
251 &args,
252 VK_ERROR_INITIALIZATION_FAILED);
253 if (result != VK_SUCCESS)
254 goto out_free_enhancements;
255
256 /* clang-format off */
257 #define PVR_ENHANCEMENT_SET(number) \
258 dev_info->enhancements.has_ern##number = \
259 pvr_u32_in_array((uint32_t *)query.enhancements, query.count, number)
260 /* clang-format on */
261
262 PVR_ENHANCEMENT_SET(35421);
263
264 #undef PVR_ENHANCEMENT_SET
265
266 result = VK_SUCCESS;
267
268 out_free_enhancements:
269 vk_free(drm_ws->base.alloc, (__u64 *)query.enhancements);
270
271 out:
272 return result;
273 }
274
275 static VkResult
pvr_drm_get_runtime_info(struct pvr_drm_winsys * drm_ws,struct drm_pvr_dev_query_runtime_info * const value)276 pvr_drm_get_runtime_info(struct pvr_drm_winsys *drm_ws,
277 struct drm_pvr_dev_query_runtime_info *const value)
278 {
279 struct drm_pvr_ioctl_dev_query_args args = {
280 .type = DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
281 .size = sizeof(*value),
282 .pointer = (__u64)value
283 };
284
285 return pvr_ioctl(drm_ws->base.render_fd,
286 DRM_IOCTL_PVR_DEV_QUERY,
287 &args,
288 VK_ERROR_INITIALIZATION_FAILED);
289 }
290
291 static VkResult
pvr_drm_get_gpu_info(struct pvr_drm_winsys * drm_ws,struct drm_pvr_dev_query_gpu_info * const value)292 pvr_drm_get_gpu_info(struct pvr_drm_winsys *drm_ws,
293 struct drm_pvr_dev_query_gpu_info *const value)
294 {
295 struct drm_pvr_ioctl_dev_query_args args = {
296 .type = DRM_PVR_DEV_QUERY_GPU_INFO_GET,
297 .size = sizeof(*value),
298 .pointer = (__u64)value
299 };
300
301 return pvr_ioctl(drm_ws->base.render_fd,
302 DRM_IOCTL_PVR_DEV_QUERY,
303 &args,
304 VK_ERROR_INITIALIZATION_FAILED);
305 }
306
307 static VkResult
pvr_drm_winsys_device_info_init(struct pvr_winsys * ws,struct pvr_device_info * dev_info,struct pvr_device_runtime_info * runtime_info)308 pvr_drm_winsys_device_info_init(struct pvr_winsys *ws,
309 struct pvr_device_info *dev_info,
310 struct pvr_device_runtime_info *runtime_info)
311 {
312 struct drm_pvr_dev_query_runtime_info kmd_runtime_info = { 0 };
313 struct drm_pvr_dev_query_gpu_info gpu_info = { 0 };
314 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
315 VkResult result;
316 int ret;
317
318 ret = pvr_device_info_init(dev_info, drm_ws->bvnc);
319 if (ret) {
320 result = vk_errorf(NULL,
321 VK_ERROR_INCOMPATIBLE_DRIVER,
322 "Unsupported BVNC: %u.%u.%u.%u\n",
323 PVR_BVNC_UNPACK_B(drm_ws->bvnc),
324 PVR_BVNC_UNPACK_V(drm_ws->bvnc),
325 PVR_BVNC_UNPACK_N(drm_ws->bvnc),
326 PVR_BVNC_UNPACK_C(drm_ws->bvnc));
327 goto err_out;
328 }
329
330 result = pvr_drm_override_quirks(drm_ws, dev_info);
331 if (result != VK_SUCCESS) {
332 mesa_logw("Failed to get quirks for this GPU\n");
333 goto err_out;
334 }
335
336 result = pvr_drm_override_enhancements(drm_ws, dev_info);
337 if (result != VK_SUCCESS) {
338 mesa_logw("Failed to get enhancements for this GPU\n");
339 goto err_out;
340 }
341
342 /* TODO: When kernel support is added, fetch the actual core count. */
343 if (PVR_HAS_FEATURE(dev_info, gpu_multicore_support))
344 mesa_logw("Core count fetching is unimplemented. Setting 1 for now.");
345 runtime_info->core_count = 1;
346
347 result = pvr_drm_get_gpu_info(drm_ws, &gpu_info);
348 if (result != VK_SUCCESS)
349 goto err_out;
350
351 runtime_info->num_phantoms = gpu_info.num_phantoms;
352
353 result = pvr_drm_get_runtime_info(drm_ws, &kmd_runtime_info);
354 if (result != VK_SUCCESS)
355 goto err_out;
356
357 runtime_info->min_free_list_size = kmd_runtime_info.free_list_min_pages
358 << ROGUE_BIF_PM_PHYSICAL_PAGE_SHIFT;
359 runtime_info->max_free_list_size = kmd_runtime_info.free_list_max_pages
360 << ROGUE_BIF_PM_PHYSICAL_PAGE_SHIFT;
361 runtime_info->reserved_shared_size =
362 kmd_runtime_info.common_store_alloc_region_size;
363 runtime_info->total_reserved_partition_size =
364 kmd_runtime_info.common_store_partition_space_size;
365 runtime_info->max_coeffs = kmd_runtime_info.max_coeffs;
366 runtime_info->cdm_max_local_mem_size_regs =
367 kmd_runtime_info.cdm_max_local_mem_size_regs;
368
369 return VK_SUCCESS;
370
371 err_out:
372 return result;
373 }
374
pvr_drm_winsys_get_heaps_info(struct pvr_winsys * ws,struct pvr_winsys_heaps * heaps)375 static void pvr_drm_winsys_get_heaps_info(struct pvr_winsys *ws,
376 struct pvr_winsys_heaps *heaps)
377 {
378 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
379
380 heaps->general_heap = &drm_ws->general_heap.base;
381 heaps->pds_heap = &drm_ws->pds_heap.base;
382 heaps->transfer_frag_heap = &drm_ws->transfer_frag_heap.base;
383 heaps->usc_heap = &drm_ws->usc_heap.base;
384 heaps->vis_test_heap = &drm_ws->vis_test_heap.base;
385
386 if (drm_ws->rgn_hdr_heap_present)
387 heaps->rgn_hdr_heap = &drm_ws->rgn_hdr_heap.base;
388 else
389 heaps->rgn_hdr_heap = &drm_ws->general_heap.base;
390 }
391
392 static const struct pvr_winsys_ops drm_winsys_ops = {
393 .destroy = pvr_drm_winsys_destroy,
394 .device_info_init = pvr_drm_winsys_device_info_init,
395 .get_heaps_info = pvr_drm_winsys_get_heaps_info,
396 .buffer_create = pvr_drm_winsys_buffer_create,
397 .buffer_create_from_fd = pvr_drm_winsys_buffer_create_from_fd,
398 .buffer_destroy = pvr_drm_winsys_buffer_destroy,
399 .buffer_get_fd = pvr_drm_winsys_buffer_get_fd,
400 .buffer_map = pvr_drm_winsys_buffer_map,
401 .buffer_unmap = pvr_drm_winsys_buffer_unmap,
402 .heap_alloc = pvr_drm_winsys_heap_alloc,
403 .heap_free = pvr_drm_winsys_heap_free,
404 .vma_map = pvr_drm_winsys_vma_map,
405 .vma_unmap = pvr_drm_winsys_vma_unmap,
406 .free_list_create = pvr_drm_winsys_free_list_create,
407 .free_list_destroy = pvr_drm_winsys_free_list_destroy,
408 .render_target_dataset_create = pvr_drm_render_target_dataset_create,
409 .render_target_dataset_destroy = pvr_drm_render_target_dataset_destroy,
410 .render_ctx_create = pvr_drm_winsys_render_ctx_create,
411 .render_ctx_destroy = pvr_drm_winsys_render_ctx_destroy,
412 .render_submit = pvr_drm_winsys_render_submit,
413 .compute_ctx_create = pvr_drm_winsys_compute_ctx_create,
414 .compute_ctx_destroy = pvr_drm_winsys_compute_ctx_destroy,
415 .compute_submit = pvr_drm_winsys_compute_submit,
416 .transfer_ctx_create = pvr_drm_winsys_transfer_ctx_create,
417 .transfer_ctx_destroy = pvr_drm_winsys_transfer_ctx_destroy,
418 .transfer_submit = pvr_drm_winsys_transfer_submit,
419 .null_job_submit = pvr_drm_winsys_null_job_submit,
420 };
421
422 struct pvr_static_data_area_description {
423 struct pvr_winsys_static_data_offsets offsets;
424 size_t total_size;
425 };
426
pvr_drm_get_heap_static_data_descriptions(struct pvr_drm_winsys * const drm_ws,struct pvr_static_data_area_description desc_out[DRM_PVR_HEAP_COUNT])427 static VkResult pvr_drm_get_heap_static_data_descriptions(
428 struct pvr_drm_winsys *const drm_ws,
429 struct pvr_static_data_area_description desc_out[DRM_PVR_HEAP_COUNT])
430 {
431 struct drm_pvr_dev_query_static_data_areas query = { 0 };
432 struct drm_pvr_ioctl_dev_query_args args = {
433 .type = DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
434 .size = sizeof(query),
435 .pointer = (__u64)&query
436 };
437 struct drm_pvr_static_data_area *array;
438 VkResult result;
439
440 /* Get the array length */
441 result = pvr_ioctlf(drm_ws->base.render_fd,
442 DRM_IOCTL_PVR_DEV_QUERY,
443 &args,
444 VK_ERROR_INITIALIZATION_FAILED,
445 "Failed to fetch static area array size");
446 if (result != VK_SUCCESS)
447 goto out;
448
449 array = vk_alloc(drm_ws->base.alloc,
450 sizeof(*array) * query.static_data_areas.count,
451 8,
452 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
453 if (!array) {
454 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
455 goto out;
456 }
457
458 VG(VALGRIND_MAKE_MEM_DEFINED(array,
459 sizeof(*array) *
460 query.static_data_areas.count));
461
462 query.static_data_areas.array = (__u64)array;
463
464 /* Get the array */
465 result = pvr_ioctlf(drm_ws->base.render_fd,
466 DRM_IOCTL_PVR_DEV_QUERY,
467 &args,
468 VK_ERROR_INITIALIZATION_FAILED,
469 "Failed to fetch static area offset array");
470 if (result != VK_SUCCESS)
471 goto out_free_array;
472
473 for (size_t i = 0; i < query.static_data_areas.count; i++) {
474 /* Unknown heaps might cause a write outside the array bounds. */
475 if (array[i].location_heap_id >= DRM_PVR_HEAP_COUNT)
476 continue;
477
478 switch (array[i].area_usage) {
479 case DRM_PVR_STATIC_DATA_AREA_EOT:
480 desc_out[array[i].location_heap_id].offsets.eot = array[i].offset;
481 break;
482
483 case DRM_PVR_STATIC_DATA_AREA_FENCE:
484 desc_out[array[i].location_heap_id].offsets.fence = array[i].offset;
485 break;
486
487 case DRM_PVR_STATIC_DATA_AREA_VDM_SYNC:
488 desc_out[array[i].location_heap_id].offsets.vdm_sync = array[i].offset;
489 break;
490
491 case DRM_PVR_STATIC_DATA_AREA_YUV_CSC:
492 desc_out[array[i].location_heap_id].offsets.yuv_csc = array[i].offset;
493 break;
494
495 default:
496 mesa_logd("Unknown drm static area id. ID: %d.", array[i].area_usage);
497 continue;
498 }
499
500 desc_out[array[i].location_heap_id].total_size += array[i].size;
501 }
502
503 result = VK_SUCCESS;
504
505 out_free_array:
506 vk_free(drm_ws->base.alloc, array);
507
508 out:
509 return result;
510 }
511
pvr_drm_setup_heaps(struct pvr_drm_winsys * const drm_ws)512 static VkResult pvr_drm_setup_heaps(struct pvr_drm_winsys *const drm_ws)
513 {
514 struct pvr_winsys_heap *const winsys_heaps[DRM_PVR_HEAP_COUNT] = {
515 [DRM_PVR_HEAP_GENERAL] = &drm_ws->general_heap.base,
516 [DRM_PVR_HEAP_PDS_CODE_DATA] = &drm_ws->pds_heap.base,
517 [DRM_PVR_HEAP_USC_CODE] = &drm_ws->usc_heap.base,
518 [DRM_PVR_HEAP_RGNHDR] = &drm_ws->rgn_hdr_heap.base,
519 [DRM_PVR_HEAP_VIS_TEST] = &drm_ws->vis_test_heap.base,
520 [DRM_PVR_HEAP_TRANSFER_FRAG] = &drm_ws->transfer_frag_heap.base,
521 };
522 struct pvr_static_data_area_description
523 static_data_descriptions[DRM_PVR_HEAP_COUNT] = { 0 };
524 struct drm_pvr_dev_query_heap_info query = { 0 };
525 struct drm_pvr_ioctl_dev_query_args args = {
526 .type = DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
527 .size = sizeof(query),
528 .pointer = (__u64)&query
529 };
530 struct drm_pvr_heap *array;
531 VkResult result;
532 int i = 0;
533
534 /* Get the array length */
535 result = pvr_ioctlf(drm_ws->base.render_fd,
536 DRM_IOCTL_PVR_DEV_QUERY,
537 &args,
538 VK_ERROR_INITIALIZATION_FAILED,
539 "Failed to fetch heap info array size");
540 if (result != VK_SUCCESS)
541 goto out;
542
543 array = vk_alloc(drm_ws->base.alloc,
544 sizeof(*array) * query.heaps.count,
545 8,
546 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
547 if (!array) {
548 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
549 goto out;
550 }
551
552 VG(VALGRIND_MAKE_MEM_DEFINED(array, sizeof(*array) * query.heaps.count));
553
554 query.heaps.array = (__u64)array;
555
556 /* Get the array */
557 result = pvr_ioctlf(drm_ws->base.render_fd,
558 DRM_IOCTL_PVR_DEV_QUERY,
559 &args,
560 VK_ERROR_INITIALIZATION_FAILED,
561 "Failed to fetch heap info array");
562 if (result != VK_SUCCESS)
563 goto out_free_array;
564
565 result = pvr_drm_get_heap_static_data_descriptions(drm_ws,
566 static_data_descriptions);
567 if (result != VK_SUCCESS)
568 goto out_free_array;
569
570 for (; i < query.heaps.count; i++) {
571 const bool present = array[i].size;
572 const pvr_dev_addr_t base_addr = PVR_DEV_ADDR(array[i].base);
573 const pvr_dev_addr_t vma_heap_begin_addr =
574 PVR_DEV_ADDR_OFFSET(base_addr, static_data_descriptions[i].total_size);
575 const uint64_t vma_heap_size =
576 array[i].size - static_data_descriptions[i].total_size;
577
578 /* Optional heaps */
579 switch (i) {
580 case DRM_PVR_HEAP_RGNHDR:
581 drm_ws->rgn_hdr_heap_present = present;
582 if (!present)
583 continue;
584 break;
585 default:
586 break;
587 }
588
589 /* Required heaps */
590 if (!present) {
591 result = vk_errorf(NULL,
592 VK_ERROR_INITIALIZATION_FAILED,
593 "Required heap not present: %d.",
594 i);
595 goto err_pvr_drm_heap_finish_all_heaps;
596 }
597
598 assert(base_addr.addr);
599 assert(static_data_descriptions[i].total_size <= array[i].size);
600
601 winsys_heaps[i]->ws = &drm_ws->base;
602 winsys_heaps[i]->base_addr = base_addr;
603 winsys_heaps[i]->static_data_carveout_addr = base_addr;
604 winsys_heaps[i]->size = array[i].size;
605 winsys_heaps[i]->static_data_carveout_size =
606 static_data_descriptions[i].total_size;
607 winsys_heaps[i]->page_size = 1 << array[i].page_size_log2;
608 winsys_heaps[i]->log2_page_size = array[i].page_size_log2;
609
610 /* For now we don't support the heap page size being different from the
611 * host page size.
612 */
613 assert(winsys_heaps[i]->page_size == drm_ws->base.page_size);
614 assert(winsys_heaps[i]->log2_page_size == drm_ws->base.log2_page_size);
615
616 winsys_heaps[i]->static_data_offsets =
617 static_data_descriptions[i].offsets;
618
619 util_vma_heap_init(&winsys_heaps[i]->vma_heap,
620 vma_heap_begin_addr.addr,
621 vma_heap_size);
622
623 winsys_heaps[i]->vma_heap.alloc_high = false;
624
625 /* It's expected that the heap destroy function to be the last thing that
626 * is called, so we start the ref_count at 0.
627 */
628 p_atomic_set(&winsys_heaps[i]->ref_count, 0);
629
630 if (pthread_mutex_init(&winsys_heaps[i]->lock, NULL)) {
631 result = vk_error(NULL, VK_ERROR_INITIALIZATION_FAILED);
632 goto err_pvr_drm_heap_finish_all_heaps;
633 }
634 }
635
636 result = VK_SUCCESS;
637 goto out_free_array;
638
639 err_pvr_drm_heap_finish_all_heaps:
640 /* Undo from where we left off */
641 while (--i >= 0) {
642 /* Optional heaps */
643 switch (i) {
644 case DRM_PVR_HEAP_RGNHDR:
645 if (drm_ws->rgn_hdr_heap_present)
646 break;
647 continue;
648 default:
649 break;
650 }
651
652 pvr_winsys_helper_winsys_heap_finish(winsys_heaps[i]);
653 }
654
655 out_free_array:
656 vk_free(drm_ws->base.alloc, array);
657
658 out:
659 return result;
660 }
661
pvr_drm_winsys_create(const int render_fd,const int display_fd,const VkAllocationCallbacks * alloc,struct pvr_winsys ** const ws_out)662 VkResult pvr_drm_winsys_create(const int render_fd,
663 const int display_fd,
664 const VkAllocationCallbacks *alloc,
665 struct pvr_winsys **const ws_out)
666 {
667 struct drm_pvr_ioctl_create_vm_context_args create_vm_context_args = { 0 };
668 struct drm_pvr_ioctl_destroy_vm_context_args destroy_vm_context_args = { 0 };
669 struct drm_pvr_dev_query_gpu_info gpu_info = { 0 };
670
671 struct pvr_drm_winsys *drm_ws;
672 VkResult result;
673 int err;
674
675 drm_ws =
676 vk_zalloc(alloc, sizeof(*drm_ws), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
677 if (!drm_ws) {
678 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
679 goto err_out;
680 }
681
682 drm_ws->base.ops = &drm_winsys_ops;
683 drm_ws->base.render_fd = render_fd;
684 drm_ws->base.display_fd = display_fd;
685 drm_ws->base.alloc = alloc;
686
687 os_get_page_size(&drm_ws->base.page_size);
688 drm_ws->base.log2_page_size = util_logbase2(drm_ws->base.page_size);
689
690 drm_ws->base.syncobj_type = vk_drm_syncobj_get_type(render_fd);
691 drm_ws->base.sync_types[0] = &drm_ws->base.syncobj_type;
692 drm_ws->base.sync_types[1] = NULL;
693
694 err = u_rwlock_init(&drm_ws->dmabuf_bo_lock);
695 if (err) {
696 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
697 goto err_vk_free_drm_ws;
698 }
699
700 util_sparse_array_init(&drm_ws->bo_map,
701 sizeof(struct pvr_drm_winsys_bo),
702 512);
703
704 result = pvr_drm_get_gpu_info(drm_ws, &gpu_info);
705 if (result != VK_SUCCESS)
706 goto err_util_sparse_array_finish_bo_map;
707
708 drm_ws->bvnc = gpu_info.gpu_id;
709
710 result = pvr_ioctl(render_fd,
711 DRM_IOCTL_PVR_CREATE_VM_CONTEXT,
712 &create_vm_context_args,
713 VK_ERROR_INITIALIZATION_FAILED);
714 if (result != VK_SUCCESS)
715 goto err_pvr_destroy_vm_context;
716
717 drm_ws->vm_context = create_vm_context_args.handle;
718
719 result = pvr_drm_setup_heaps(drm_ws);
720 if (result != VK_SUCCESS)
721 goto err_pvr_destroy_vm_context;
722
723 result =
724 pvr_winsys_helper_allocate_static_memory(&drm_ws->base,
725 pvr_drm_heap_alloc_carveout,
726 &drm_ws->general_heap.base,
727 &drm_ws->pds_heap.base,
728 &drm_ws->usc_heap.base,
729 &drm_ws->general_vma,
730 &drm_ws->pds_vma,
731 &drm_ws->usc_vma);
732 if (result != VK_SUCCESS)
733 goto err_pvr_heap_finish;
734
735 result = pvr_winsys_helper_fill_static_memory(&drm_ws->base,
736 drm_ws->general_vma,
737 drm_ws->pds_vma,
738 drm_ws->usc_vma);
739 if (result != VK_SUCCESS)
740 goto err_pvr_free_static_memory;
741
742 *ws_out = &drm_ws->base;
743
744 return VK_SUCCESS;
745
746 err_pvr_free_static_memory:
747 pvr_winsys_helper_free_static_memory(drm_ws->general_vma,
748 drm_ws->pds_vma,
749 drm_ws->usc_vma);
750
751 err_pvr_heap_finish:
752 pvr_drm_finish_heaps(drm_ws);
753
754 err_pvr_destroy_vm_context:
755 destroy_vm_context_args.handle = drm_ws->vm_context;
756 pvr_ioctl(render_fd,
757 DRM_IOCTL_PVR_DESTROY_VM_CONTEXT,
758 &destroy_vm_context_args,
759 VK_ERROR_UNKNOWN);
760
761 err_util_sparse_array_finish_bo_map:
762 util_sparse_array_finish(&drm_ws->bo_map);
763 u_rwlock_destroy(&drm_ws->dmabuf_bo_lock);
764
765 err_vk_free_drm_ws:
766 vk_free(alloc, drm_ws);
767
768 err_out:
769 return result;
770 }
771