1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <errno.h>
26 #include <stdbool.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <xf86drm.h>
31
32 #include "fw-api/pvr_rogue_fwif_shared.h"
33 #include "pvr_private.h"
34 #include "pvr_srv.h"
35 #include "pvr_srv_bridge.h"
36 #include "pvr_types.h"
37 #include "util/log.h"
38 #include "util/macros.h"
39 #include "vk_log.h"
40
41 #define vk_bridge_err(vk_err, bridge_func, bridge_ret) \
42 vk_errorf(NULL, \
43 vk_err, \
44 "%s failed, PVR_SRV_ERROR: %d, Errno: %s", \
45 bridge_func, \
46 (bridge_ret).error, \
47 strerror(errno))
48
pvr_srv_bridge_call(int fd,uint8_t bridge_id,uint32_t function_id,void * input,uint32_t input_buffer_size,void * output,uint32_t output_buffer_size)49 static int pvr_srv_bridge_call(int fd,
50 uint8_t bridge_id,
51 uint32_t function_id,
52 void *input,
53 uint32_t input_buffer_size,
54 void *output,
55 uint32_t output_buffer_size)
56 {
57 struct drm_srvkm_cmd cmd = {
58 .bridge_id = bridge_id,
59 .bridge_func_id = function_id,
60 .in_data_ptr = (uint64_t)(uintptr_t)input,
61 .out_data_ptr = (uint64_t)(uintptr_t)output,
62 .in_data_size = input_buffer_size,
63 .out_data_size = output_buffer_size,
64 };
65
66 int ret = drmIoctl(fd, DRM_IOCTL_SRVKM_CMD, &cmd);
67 if (unlikely(ret))
68 return ret;
69
70 VG(VALGRIND_MAKE_MEM_DEFINED(output, output_buffer_size));
71
72 return 0U;
73 }
74
pvr_srv_init_module(int fd,enum pvr_srvkm_module_type module)75 VkResult pvr_srv_init_module(int fd, enum pvr_srvkm_module_type module)
76 {
77 struct drm_srvkm_init_data init_data = { .init_module = module };
78
79 int ret = drmIoctl(fd, DRM_IOCTL_SRVKM_INIT, &init_data);
80 if (unlikely(ret)) {
81 return vk_errorf(NULL,
82 VK_ERROR_INITIALIZATION_FAILED,
83 "DRM_IOCTL_SRVKM_INIT failed, Errno: %s",
84 strerror(errno));
85 }
86
87 return VK_SUCCESS;
88 }
89
pvr_srv_set_timeline_sw_only(int sw_timeline_fd)90 VkResult pvr_srv_set_timeline_sw_only(int sw_timeline_fd)
91 {
92 int ret;
93
94 assert(sw_timeline_fd >= 0);
95
96 ret = drmIoctl(sw_timeline_fd, DRM_IOCTL_SRVKM_SYNC_FORCE_SW_ONLY_CMD, NULL);
97
98 if (unlikely(ret < 0)) {
99 return vk_errorf(
100 NULL,
101 VK_ERROR_OUT_OF_HOST_MEMORY,
102 "DRM_IOCTL_SRVKM_SYNC_FORCE_SW_ONLY_CMD failed, Errno: %s",
103 strerror(errno));
104 }
105
106 return VK_SUCCESS;
107 }
108
pvr_srv_create_sw_fence(int sw_timeline_fd,int * new_fence_fd,uint64_t * sync_pt_idx)109 VkResult pvr_srv_create_sw_fence(int sw_timeline_fd,
110 int *new_fence_fd,
111 uint64_t *sync_pt_idx)
112 {
113 struct drm_srvkm_sw_sync_create_fence_data data = { .name[0] = '\0' };
114 int ret;
115
116 assert(sw_timeline_fd >= 0);
117 assert(new_fence_fd != NULL);
118
119 ret =
120 drmIoctl(sw_timeline_fd, DRM_IOCTL_SRVKM_SW_SYNC_CREATE_FENCE_CMD, &data);
121
122 if (unlikely(ret < 0)) {
123 return vk_errorf(
124 NULL,
125 VK_ERROR_OUT_OF_HOST_MEMORY,
126 "DRM_IOCTL_SRVKM_SW_SYNC_CREATE_FENCE_CMD failed, Errno: %s",
127 strerror(errno));
128 }
129
130 *new_fence_fd = data.fence;
131 if (sync_pt_idx)
132 *sync_pt_idx = data.sync_pt_idx;
133
134 return VK_SUCCESS;
135 }
136
pvr_srv_sw_sync_timeline_increment(int sw_timeline_fd,uint64_t * sync_pt_idx)137 VkResult pvr_srv_sw_sync_timeline_increment(int sw_timeline_fd,
138 uint64_t *sync_pt_idx)
139 {
140 struct drm_srvkm_sw_timeline_advance_data data = { 0 };
141 int ret;
142
143 assert(sw_timeline_fd >= 0);
144
145 ret = drmIoctl(sw_timeline_fd, DRM_IOCTL_SRVKM_SW_SYNC_INC_CMD, &data);
146
147 if (unlikely(ret < 0)) {
148 return vk_errorf(NULL,
149 VK_ERROR_OUT_OF_HOST_MEMORY,
150 "DRM_IOCTL_SRVKM_SW_SYNC_INC_CMD failed, Errno: %s",
151 strerror(errno));
152 }
153
154 if (sync_pt_idx)
155 *sync_pt_idx = data.sync_pt_idx;
156
157 return VK_SUCCESS;
158 }
159
pvr_srv_connection_create(int fd,uint64_t * const bvnc_out)160 VkResult pvr_srv_connection_create(int fd, uint64_t *const bvnc_out)
161 {
162 struct pvr_srv_bridge_connect_cmd cmd = {
163 .flags = PVR_SRV_FLAGS_CLIENT_64BIT_COMPAT,
164 .build_options = RGX_BUILD_OPTIONS,
165 .DDK_version = PVR_SRV_VERSION,
166 .DDK_build = PVR_SRV_VERSION_BUILD,
167 };
168
169 /* Initialize ret.error to a default error */
170 struct pvr_srv_bridge_connect_ret ret = {
171 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
172 };
173
174 int result;
175
176 result = pvr_srv_bridge_call(fd,
177 PVR_SRV_BRIDGE_SRVCORE,
178 PVR_SRV_BRIDGE_SRVCORE_CONNECT,
179 &cmd,
180 sizeof(cmd),
181 &ret,
182 sizeof(ret));
183 if (result || ret.error != PVR_SRV_OK) {
184 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
185 "PVR_SRV_BRIDGE_SRVCORE_CONNECT",
186 ret);
187 }
188
189 *bvnc_out = ret.bvnc;
190
191 return VK_SUCCESS;
192 }
193
pvr_srv_connection_destroy(int fd)194 void pvr_srv_connection_destroy(int fd)
195 {
196 /* Initialize ret.error to a default error */
197 struct pvr_srv_bridge_disconnect_ret ret = {
198 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
199 };
200
201 int result;
202
203 result = pvr_srv_bridge_call(fd,
204 PVR_SRV_BRIDGE_SRVCORE,
205 PVR_SRV_BRIDGE_SRVCORE_DISCONNECT,
206 NULL,
207 0,
208 &ret,
209 sizeof(ret));
210 if (result || ret.error != PVR_SRV_OK) {
211 vk_bridge_err(VK_ERROR_UNKNOWN, "PVR_SRV_BRIDGE_SRVCORE_DISCONNECT", ret);
212 }
213 }
214
pvr_srv_get_multicore_info(int fd,uint32_t caps_size,uint64_t * caps,uint32_t * num_cores)215 VkResult pvr_srv_get_multicore_info(int fd,
216 uint32_t caps_size,
217 uint64_t *caps,
218 uint32_t *num_cores)
219 {
220 struct pvr_srv_bridge_getmulticoreinfo_cmd cmd = {
221 .caps = caps,
222 .caps_size = caps_size,
223 };
224
225 struct pvr_srv_bridge_getmulticoreinfo_ret ret = {
226 .caps = caps,
227 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
228 };
229
230 int result;
231
232 result = pvr_srv_bridge_call(fd,
233 PVR_SRV_BRIDGE_SRVCORE,
234 PVR_SRV_BRIDGE_SRVCORE_GETMULTICOREINFO,
235 &cmd,
236 sizeof(cmd),
237 &ret,
238 sizeof(ret));
239 if (result || ret.error != PVR_SRV_OK) {
240 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
241 "PVR_SRV_BRIDGE_SRVCORE_GETMULTICOREINFO",
242 ret);
243 }
244
245 if (!num_cores)
246 *num_cores = ret.num_cores;
247
248 return VK_SUCCESS;
249 }
250
pvr_srv_alloc_sync_primitive_block(int fd,void ** const handle_out,void ** const pmr_out,uint32_t * const size_out,uint32_t * const addr_out)251 VkResult pvr_srv_alloc_sync_primitive_block(int fd,
252 void **const handle_out,
253 void **const pmr_out,
254 uint32_t *const size_out,
255 uint32_t *const addr_out)
256 {
257 /* Initialize ret.error to a default error */
258 struct pvr_srv_bridge_alloc_sync_primitive_block_ret ret = {
259 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
260 };
261
262 int result;
263
264 result = pvr_srv_bridge_call(fd,
265 PVR_SRV_BRIDGE_SYNC,
266 PVR_SRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK,
267 NULL,
268 0,
269 &ret,
270 sizeof(ret));
271 if (result || ret.error != PVR_SRV_OK) {
272 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
273 "PVR_SRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK",
274 ret);
275 }
276
277 *handle_out = ret.handle;
278 *pmr_out = ret.pmr;
279 *size_out = ret.size;
280 *addr_out = ret.addr;
281
282 return VK_SUCCESS;
283 }
284
pvr_srv_free_sync_primitive_block(int fd,void * handle)285 void pvr_srv_free_sync_primitive_block(int fd, void *handle)
286 {
287 struct pvr_srv_bridge_free_sync_primitive_block_cmd cmd = {
288 .handle = handle,
289 };
290
291 /* Initialize ret.error to a default error */
292 struct pvr_srv_bridge_free_sync_primitive_block_ret ret = {
293 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
294 };
295
296 int result;
297
298 result = pvr_srv_bridge_call(fd,
299 PVR_SRV_BRIDGE_SYNC,
300 PVR_SRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK,
301 &cmd,
302 sizeof(cmd),
303 &ret,
304 sizeof(ret));
305 if (result || ret.error != PVR_SRV_OK) {
306 vk_bridge_err(VK_ERROR_UNKNOWN,
307 "PVR_SRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK",
308 ret);
309 }
310 }
311
312 VkResult
pvr_srv_set_sync_primitive(int fd,void * handle,uint32_t index,uint32_t value)313 pvr_srv_set_sync_primitive(int fd, void *handle, uint32_t index, uint32_t value)
314 {
315 struct pvr_srv_bridge_sync_prim_set_cmd cmd = {
316 .handle = handle,
317 .index = index,
318 .value = value,
319 };
320
321 struct pvr_srv_bridge_sync_prim_set_ret ret = {
322 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
323 };
324
325 int result;
326
327 result = pvr_srv_bridge_call(fd,
328 PVR_SRV_BRIDGE_SYNC,
329 PVR_SRV_BRIDGE_SYNC_SYNCPRIMSET,
330 &cmd,
331 sizeof(cmd),
332 &ret,
333 sizeof(ret));
334 if (result || ret.error != PVR_SRV_OK) {
335 return vk_bridge_err(VK_ERROR_UNKNOWN,
336 "PVR_SRV_BRIDGE_SYNC_SYNCPRIMSET",
337 ret);
338 }
339
340 return VK_SUCCESS;
341 }
342
pvr_srv_get_heap_count(int fd,uint32_t * const heap_count_out)343 VkResult pvr_srv_get_heap_count(int fd, uint32_t *const heap_count_out)
344 {
345 struct pvr_srv_heap_count_cmd cmd = {
346 .heap_config_index = 0,
347 };
348
349 struct pvr_srv_heap_count_ret ret = {
350 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
351 };
352
353 int result;
354
355 result = pvr_srv_bridge_call(fd,
356 PVR_SRV_BRIDGE_MM,
357 PVR_SRV_BRIDGE_MM_HEAPCFGHEAPCOUNT,
358 &cmd,
359 sizeof(cmd),
360 &ret,
361 sizeof(ret));
362 if (result || ret.error != PVR_SRV_OK) {
363 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
364 "PVR_SRV_BRIDGE_MM_HEAPCFGHEAPCOUNT",
365 ret);
366 }
367
368 *heap_count_out = ret.heap_count;
369
370 return VK_SUCCESS;
371 }
372
pvr_srv_int_heap_create(int fd,pvr_dev_addr_t base_address,uint64_t size,uint32_t log2_page_size,void * server_memctx,void ** const server_heap_out)373 VkResult pvr_srv_int_heap_create(int fd,
374 pvr_dev_addr_t base_address,
375 uint64_t size,
376 uint32_t log2_page_size,
377 void *server_memctx,
378 void **const server_heap_out)
379 {
380 struct pvr_srv_devmem_int_heap_create_cmd cmd = {
381 .server_memctx = server_memctx,
382 .base_addr = base_address,
383 .size = size,
384 .log2_page_size = log2_page_size,
385 };
386
387 struct pvr_srv_devmem_int_heap_create_ret ret = {
388 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
389 };
390
391 int result;
392
393 result = pvr_srv_bridge_call(fd,
394 PVR_SRV_BRIDGE_MM,
395 PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPCREATE,
396 &cmd,
397 sizeof(cmd),
398 &ret,
399 sizeof(ret));
400 if (result || ret.error != PVR_SRV_OK) {
401 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
402 "PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPCREATE",
403 ret);
404 }
405
406 *server_heap_out = ret.server_heap;
407
408 return VK_SUCCESS;
409 }
410
pvr_srv_int_heap_destroy(int fd,void * server_heap)411 void pvr_srv_int_heap_destroy(int fd, void *server_heap)
412 {
413 struct pvr_srv_devmem_int_heap_destroy_cmd cmd = {
414 .server_heap = server_heap,
415 };
416
417 struct pvr_srv_devmem_int_heap_destroy_ret ret = {
418 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
419 };
420
421 int result;
422
423 result = pvr_srv_bridge_call(fd,
424 PVR_SRV_BRIDGE_MM,
425 PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY,
426 &cmd,
427 sizeof(cmd),
428 &ret,
429 sizeof(ret));
430 if (result || ret.error != PVR_SRV_OK) {
431 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
432 "PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY",
433 ret);
434 }
435 }
436
437 /* This bridge function allows to independently query heap name and heap
438 * details, i-e buffer/base_address/size/reserved_size/log2_page_size pointers
439 * are allowed to be NULL.
440 */
pvr_srv_get_heap_details(int fd,uint32_t heap_index,uint32_t buffer_size,char * const buffer_out,pvr_dev_addr_t * const base_address_out,uint64_t * const size_out,uint64_t * const reserved_size_out,uint32_t * const log2_page_size_out)441 VkResult pvr_srv_get_heap_details(int fd,
442 uint32_t heap_index,
443 uint32_t buffer_size,
444 char *const buffer_out,
445 pvr_dev_addr_t *const base_address_out,
446 uint64_t *const size_out,
447 uint64_t *const reserved_size_out,
448 uint32_t *const log2_page_size_out)
449 {
450 struct pvr_srv_heap_cfg_details_cmd cmd = {
451 .heap_config_index = 0,
452 .heap_index = heap_index,
453 .buffer_size = buffer_size,
454 .buffer = buffer_out,
455 };
456
457 struct pvr_srv_heap_cfg_details_ret ret = {
458 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
459 .buffer = buffer_out,
460 };
461
462 int result;
463
464 result = pvr_srv_bridge_call(fd,
465 PVR_SRV_BRIDGE_MM,
466 PVR_SRV_BRIDGE_MM_HEAPCFGHEAPDETAILS,
467 &cmd,
468 sizeof(cmd),
469 &ret,
470 sizeof(ret));
471 if (result || ret.error != PVR_SRV_OK) {
472 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
473 "PVR_SRV_BRIDGE_MM_HEAPCFGHEAPDETAILS",
474 ret);
475 }
476
477 VG(VALGRIND_MAKE_MEM_DEFINED(buffer_out, buffer_size));
478
479 if (base_address_out)
480 *base_address_out = ret.base_addr;
481
482 if (size_out)
483 *size_out = ret.size;
484
485 if (reserved_size_out)
486 *reserved_size_out = ret.reserved_size;
487
488 if (log2_page_size_out)
489 *log2_page_size_out = ret.log2_page_size;
490
491 return VK_SUCCESS;
492 }
493
pvr_srv_int_ctx_destroy(int fd,void * server_memctx)494 void pvr_srv_int_ctx_destroy(int fd, void *server_memctx)
495 {
496 struct pvr_srv_devmem_int_ctx_destroy_cmd cmd = {
497 .server_memctx = server_memctx,
498 };
499
500 struct pvr_srv_devmem_int_ctx_destroy_ret ret = {
501 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
502 };
503
504 int result;
505
506 result = pvr_srv_bridge_call(fd,
507 PVR_SRV_BRIDGE_MM,
508 PVR_SRV_BRIDGE_MM_DEVMEMINTCTXDESTROY,
509 &cmd,
510 sizeof(cmd),
511 &ret,
512 sizeof(ret));
513 if (result || ret.error != PVR_SRV_OK) {
514 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
515 "PVR_SRV_BRIDGE_MM_DEVMEMINTCTXDESTROY",
516 ret);
517 }
518 }
519
pvr_srv_int_ctx_create(int fd,void ** const server_memctx_out,void ** const server_memctx_data_out)520 VkResult pvr_srv_int_ctx_create(int fd,
521 void **const server_memctx_out,
522 void **const server_memctx_data_out)
523 {
524 struct pvr_srv_devmem_int_ctx_create_cmd cmd = {
525 .kernel_memory_ctx = false,
526 };
527
528 struct pvr_srv_devmem_int_ctx_create_ret ret = {
529 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
530 };
531
532 int result;
533
534 result = pvr_srv_bridge_call(fd,
535 PVR_SRV_BRIDGE_MM,
536 PVR_SRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
537 &cmd,
538 sizeof(cmd),
539 &ret,
540 sizeof(ret));
541 if (result || ret.error != PVR_SRV_OK) {
542 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
543 "PVR_SRV_BRIDGE_MM_DEVMEMINTCTXCREATE",
544 ret);
545 }
546
547 *server_memctx_out = ret.server_memctx;
548 *server_memctx_data_out = ret.server_memctx_data;
549
550 return VK_SUCCESS;
551 }
552
pvr_srv_int_reserve_addr(int fd,void * server_heap,pvr_dev_addr_t addr,uint64_t size,void ** const reservation_out)553 VkResult pvr_srv_int_reserve_addr(int fd,
554 void *server_heap,
555 pvr_dev_addr_t addr,
556 uint64_t size,
557 void **const reservation_out)
558 {
559 struct pvr_srv_devmem_int_reserve_range_cmd cmd = {
560 .server_heap = server_heap,
561 .addr = addr,
562 .size = size,
563 };
564
565 struct pvr_srv_devmem_int_reserve_range_ret ret = {
566 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
567 };
568
569 int result;
570
571 result = pvr_srv_bridge_call(fd,
572 PVR_SRV_BRIDGE_MM,
573 PVR_SRV_BRIDGE_MM_DEVMEMINTRESERVERANGE,
574 &cmd,
575 sizeof(cmd),
576 &ret,
577 sizeof(ret));
578 if (result || ret.error != PVR_SRV_OK) {
579 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
580 "PVR_SRV_BRIDGE_MM_DEVMEMINTRESERVERANGE",
581 ret);
582 }
583
584 *reservation_out = ret.reservation;
585
586 return VK_SUCCESS;
587 }
588
pvr_srv_int_unreserve_addr(int fd,void * reservation)589 void pvr_srv_int_unreserve_addr(int fd, void *reservation)
590 {
591 struct pvr_srv_bridge_in_devmem_int_unreserve_range_cmd cmd = {
592 .reservation = reservation,
593 };
594
595 struct pvr_srv_bridge_in_devmem_int_unreserve_range_ret ret = {
596 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
597 };
598
599 int result;
600
601 result = pvr_srv_bridge_call(fd,
602 PVR_SRV_BRIDGE_MM,
603 PVR_SRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE,
604 &cmd,
605 sizeof(cmd),
606 &ret,
607 sizeof(ret));
608 if (result || ret.error != PVR_SRV_OK) {
609 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
610 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE",
611 ret);
612 }
613 }
614
pvr_srv_alloc_pmr(int fd,uint64_t size,uint64_t block_size,uint32_t phy_blocks,uint32_t virt_blocks,uint32_t log2_page_size,uint64_t flags,uint32_t pid,void ** const pmr_out)615 VkResult pvr_srv_alloc_pmr(int fd,
616 uint64_t size,
617 uint64_t block_size,
618 uint32_t phy_blocks,
619 uint32_t virt_blocks,
620 uint32_t log2_page_size,
621 uint64_t flags,
622 uint32_t pid,
623 void **const pmr_out)
624 {
625 const char *annotation = "VK PHYSICAL ALLOCATION";
626 const uint32_t annotation_size =
627 strnlen(annotation, DEVMEM_ANNOTATION_MAX_LEN - 1) + 1;
628 uint32_t mapping_table = 0;
629
630 struct pvr_srv_physmem_new_ram_backed_locked_pmr_cmd cmd = {
631 .size = size,
632 .block_size = block_size,
633 .phy_blocks = phy_blocks,
634 .virt_blocks = virt_blocks,
635 .mapping_table = &mapping_table,
636 .log2_page_size = log2_page_size,
637 .flags = flags,
638 .annotation_size = annotation_size,
639 .annotation = annotation,
640 .pid = pid,
641 .pdump_flags = 0x00000000U,
642 };
643
644 struct pvr_srv_physmem_new_ram_backed_locked_pmr_ret ret = {
645 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
646 };
647
648 int result;
649
650 result = pvr_srv_bridge_call(fd,
651 PVR_SRV_BRIDGE_MM,
652 PVR_SRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR,
653 &cmd,
654 sizeof(cmd),
655 &ret,
656 sizeof(ret));
657 if (result || ret.error != PVR_SRV_OK) {
658 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
659 "PVR_SRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR",
660 ret);
661 }
662
663 *pmr_out = ret.pmr;
664
665 return VK_SUCCESS;
666 }
667
pvr_srv_free_pmr(int fd,void * pmr)668 void pvr_srv_free_pmr(int fd, void *pmr)
669 {
670 struct pvr_srv_pmr_unref_unlock_pmr_cmd cmd = {
671 .pmr = pmr,
672 };
673
674 struct pvr_srv_pmr_unref_unlock_pmr_ret ret = {
675 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
676 };
677
678 int result;
679
680 result = pvr_srv_bridge_call(fd,
681 PVR_SRV_BRIDGE_MM,
682 PVR_SRV_BRIDGE_MM_PMRUNREFUNLOCKPMR,
683 &cmd,
684 sizeof(cmd),
685 &ret,
686 sizeof(ret));
687 if (result || ret.error != PVR_SRV_OK) {
688 vk_bridge_err(VK_ERROR_UNKNOWN,
689 "PVR_SRV_BRIDGE_MM_PMRUNREFUNLOCKPMR",
690 ret);
691 }
692 }
693
pvr_srv_int_map_pages(int fd,void * reservation,void * pmr,uint32_t page_count,uint32_t page_offset,uint64_t flags,pvr_dev_addr_t addr)694 VkResult pvr_srv_int_map_pages(int fd,
695 void *reservation,
696 void *pmr,
697 uint32_t page_count,
698 uint32_t page_offset,
699 uint64_t flags,
700 pvr_dev_addr_t addr)
701 {
702 struct pvr_srv_devmem_int_map_pages_cmd cmd = {
703 .reservation = reservation,
704 .pmr = pmr,
705 .page_count = page_count,
706 .page_offset = page_offset,
707 .flags = flags,
708 .addr = addr,
709 };
710
711 struct pvr_srv_devmem_int_map_pages_ret ret = {
712 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
713 };
714
715 int result;
716
717 result = pvr_srv_bridge_call(fd,
718 PVR_SRV_BRIDGE_MM,
719 PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPAGES,
720 &cmd,
721 sizeof(cmd),
722 &ret,
723 sizeof(ret));
724 if (result || ret.error != PVR_SRV_OK) {
725 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
726 "PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPAGES",
727 ret);
728 }
729
730 return VK_SUCCESS;
731 }
732
pvr_srv_int_unmap_pages(int fd,void * reservation,pvr_dev_addr_t dev_addr,uint32_t page_count)733 void pvr_srv_int_unmap_pages(int fd,
734 void *reservation,
735 pvr_dev_addr_t dev_addr,
736 uint32_t page_count)
737 {
738 struct pvr_srv_devmem_int_unmap_pages_cmd cmd = {
739 .reservation = reservation,
740 .dev_addr = dev_addr,
741 .page_count = page_count,
742 };
743
744 struct pvr_srv_devmem_int_unmap_pages_ret ret = {
745 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
746 };
747
748 int result;
749
750 result = pvr_srv_bridge_call(fd,
751 PVR_SRV_BRIDGE_MM,
752 PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES,
753 &cmd,
754 sizeof(cmd),
755 &ret,
756 sizeof(ret));
757 if (result || ret.error != PVR_SRV_OK) {
758 vk_bridge_err(VK_ERROR_UNKNOWN,
759 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES",
760 ret);
761 }
762 }
763
pvr_srv_int_map_pmr(int fd,void * server_heap,void * reservation,void * pmr,uint64_t flags,void ** const mapping_out)764 VkResult pvr_srv_int_map_pmr(int fd,
765 void *server_heap,
766 void *reservation,
767 void *pmr,
768 uint64_t flags,
769 void **const mapping_out)
770 {
771 struct pvr_srv_devmem_int_map_pmr_cmd cmd = {
772 .server_heap = server_heap,
773 .reservation = reservation,
774 .pmr = pmr,
775 .flags = flags,
776 };
777
778 struct pvr_srv_devmem_int_map_pmr_ret ret = {
779 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
780 };
781
782 int result;
783
784 result = pvr_srv_bridge_call(fd,
785 PVR_SRV_BRIDGE_MM,
786 PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPMR,
787 &cmd,
788 sizeof(cmd),
789 &ret,
790 sizeof(ret));
791 if (result || ret.error != PVR_SRV_OK) {
792 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
793 "PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPMR",
794 ret);
795 }
796
797 *mapping_out = ret.mapping;
798
799 return VK_SUCCESS;
800 }
801
pvr_srv_int_unmap_pmr(int fd,void * mapping)802 void pvr_srv_int_unmap_pmr(int fd, void *mapping)
803 {
804 struct pvr_srv_devmem_int_unmap_pmr_cmd cmd = {
805 .mapping = mapping,
806 };
807
808 struct pvr_srv_devmem_int_unmap_pmr_ret ret = {
809 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
810 };
811
812 int result;
813
814 result = pvr_srv_bridge_call(fd,
815 PVR_SRV_BRIDGE_MM,
816 PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPMR,
817 &cmd,
818 sizeof(cmd),
819 &ret,
820 sizeof(ret));
821 if (result || ret.error != PVR_SRV_OK) {
822 vk_bridge_err(VK_ERROR_UNKNOWN,
823 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPMR",
824 ret);
825 }
826 }
827
pvr_srv_physmem_import_dmabuf(int fd,int buffer_fd,uint64_t flags,void ** const pmr_out,uint64_t * const size_out,uint64_t * const align_out)828 VkResult pvr_srv_physmem_import_dmabuf(int fd,
829 int buffer_fd,
830 uint64_t flags,
831 void **const pmr_out,
832 uint64_t *const size_out,
833 uint64_t *const align_out)
834 {
835 struct pvr_srv_phys_mem_import_dmabuf_cmd cmd = {
836 .buffer_fd = buffer_fd,
837 .flags = flags,
838 .name_size = 0,
839 .name = NULL,
840 };
841
842 struct pvr_srv_phys_mem_import_dmabuf_ret ret = {
843 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
844 };
845
846 int result;
847
848 result = pvr_srv_bridge_call(fd,
849 PVR_SRV_BRIDGE_DMABUF,
850 PVR_SRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF,
851 &cmd,
852 sizeof(cmd),
853 &ret,
854 sizeof(ret));
855 if (result || ret.error != PVR_SRV_OK) {
856 return vk_bridge_err(VK_ERROR_INVALID_EXTERNAL_HANDLE,
857 "PVR_SRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF",
858 ret);
859 }
860
861 *pmr_out = ret.pmr;
862 *size_out = ret.size;
863 *align_out = ret.align;
864
865 return VK_SUCCESS;
866 }
867
pvr_srv_physmem_export_dmabuf(int fd,void * pmr,int * const fd_out)868 VkResult pvr_srv_physmem_export_dmabuf(int fd, void *pmr, int *const fd_out)
869 {
870 struct pvr_srv_phys_mem_export_dmabuf_cmd cmd = {
871 .pmr = pmr,
872 };
873
874 struct pvr_srv_phys_mem_export_dmabuf_ret ret = {
875 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
876 };
877
878 int result;
879
880 result = pvr_srv_bridge_call(fd,
881 PVR_SRV_BRIDGE_DMABUF,
882 PVR_SRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF,
883 &cmd,
884 sizeof(cmd),
885 &ret,
886 sizeof(ret));
887 if (result || ret.error != PVR_SRV_OK) {
888 return vk_bridge_err(VK_ERROR_OUT_OF_HOST_MEMORY,
889 "PVR_SRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF",
890 ret);
891 }
892
893 *fd_out = ret.fd;
894
895 return VK_SUCCESS;
896 }
897
pvr_srv_rgx_create_transfer_context(int fd,uint32_t priority,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t packed_ccb_size_u8888,uint32_t context_flags,uint64_t robustness_address,void ** const cli_pmr_out,void ** const usc_pmr_out,void ** const transfer_context_out)898 VkResult pvr_srv_rgx_create_transfer_context(int fd,
899 uint32_t priority,
900 uint32_t reset_framework_cmd_size,
901 uint8_t *reset_framework_cmd,
902 void *priv_data,
903 uint32_t packed_ccb_size_u8888,
904 uint32_t context_flags,
905 uint64_t robustness_address,
906 void **const cli_pmr_out,
907 void **const usc_pmr_out,
908 void **const transfer_context_out)
909 {
910 struct pvr_srv_rgx_create_transfer_context_cmd cmd = {
911 .robustness_address = robustness_address,
912 .priority = priority,
913 .reset_framework_cmd_size = reset_framework_cmd_size,
914 .reset_framework_cmd = reset_framework_cmd,
915 .priv_data = priv_data,
916 .packed_ccb_size_u8888 = packed_ccb_size_u8888,
917 .context_flags = context_flags,
918 };
919
920 struct pvr_srv_rgx_create_transfer_context_ret ret = {
921 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
922 };
923
924 int result;
925
926 result = pvr_srv_bridge_call(fd,
927 PVR_SRV_BRIDGE_RGXTQ,
928 PVR_SRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT,
929 &cmd,
930 sizeof(cmd),
931 &ret,
932 sizeof(ret));
933 if (result || ret.error != PVR_SRV_OK) {
934 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
935 "PVR_SRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT",
936 ret);
937 }
938
939 if (cli_pmr_out)
940 *cli_pmr_out = ret.cli_pmr_mem;
941
942 if (usc_pmr_out)
943 *usc_pmr_out = ret.usc_pmr_mem;
944
945 *transfer_context_out = ret.transfer_context;
946
947 return VK_SUCCESS;
948 }
949
pvr_srv_rgx_destroy_transfer_context(int fd,void * transfer_context)950 void pvr_srv_rgx_destroy_transfer_context(int fd, void *transfer_context)
951 {
952 struct pvr_srv_rgx_destroy_transfer_context_cmd cmd = {
953 .transfer_context = transfer_context,
954 };
955
956 struct pvr_srv_rgx_destroy_transfer_context_ret ret = {
957 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
958 };
959
960 int result;
961
962 result = pvr_srv_bridge_call(fd,
963 PVR_SRV_BRIDGE_RGXTQ,
964 PVR_SRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT,
965 &cmd,
966 sizeof(cmd),
967 &ret,
968 sizeof(ret));
969 if (result || ret.error != PVR_SRV_OK) {
970 vk_bridge_err(VK_ERROR_UNKNOWN,
971 "PVR_SRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT",
972 ret);
973 }
974 }
975
pvr_srv_rgx_submit_transfer2(int fd,void * transfer_context,uint32_t prepare_count,uint32_t * client_update_count,void *** update_ufo_sync_prim_block,uint32_t ** update_sync_offset,uint32_t ** update_value,int32_t check_fence,int32_t update_timeline_2d,int32_t update_timeline_3d,char * update_fence_name,uint32_t * cmd_size,uint8_t ** fw_command,uint32_t * tq_prepare_flags,uint32_t ext_job_ref,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,int32_t * const update_fence_2d_out,int32_t * const update_fence_3d_out)976 VkResult pvr_srv_rgx_submit_transfer2(int fd,
977 void *transfer_context,
978 uint32_t prepare_count,
979 uint32_t *client_update_count,
980 void ***update_ufo_sync_prim_block,
981 uint32_t **update_sync_offset,
982 uint32_t **update_value,
983 int32_t check_fence,
984 int32_t update_timeline_2d,
985 int32_t update_timeline_3d,
986 char *update_fence_name,
987 uint32_t *cmd_size,
988 uint8_t **fw_command,
989 uint32_t *tq_prepare_flags,
990 uint32_t ext_job_ref,
991 uint32_t sync_pmr_count,
992 uint32_t *sync_pmr_flags,
993 void **sync_pmrs,
994 int32_t *const update_fence_2d_out,
995 int32_t *const update_fence_3d_out)
996 {
997 struct pvr_srv_rgx_submit_transfer2_cmd cmd = {
998 .transfer_context = transfer_context,
999 .client_update_count = client_update_count,
1000 .cmd_size = cmd_size,
1001 .sync_pmr_flags = sync_pmr_flags,
1002 .tq_prepare_flags = tq_prepare_flags,
1003 .update_sync_offset = update_sync_offset,
1004 .update_value = update_value,
1005 .fw_command = fw_command,
1006 .update_fence_name = update_fence_name,
1007 .sync_pmrs = sync_pmrs,
1008 .update_ufo_sync_prim_block = update_ufo_sync_prim_block,
1009 .update_timeline_2d = update_timeline_2d,
1010 .update_timeline_3d = update_timeline_3d,
1011 .check_fence = check_fence,
1012 .ext_job_ref = ext_job_ref,
1013 .prepare_count = prepare_count,
1014 .sync_pmr_count = sync_pmr_count,
1015 };
1016
1017 struct pvr_srv_rgx_submit_transfer2_ret ret = {
1018 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1019 };
1020
1021 int result;
1022
1023 result = pvr_srv_bridge_call(fd,
1024 PVR_SRV_BRIDGE_RGXTQ,
1025 PVR_SRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2,
1026 &cmd,
1027 sizeof(cmd),
1028 &ret,
1029 sizeof(ret));
1030 if (result || ret.error != PVR_SRV_OK) {
1031 /* There is no 'retry' VkResult, so treat it as VK_NOT_READY instead. */
1032 if (result == PVR_SRV_ERROR_RETRY || ret.error == PVR_SRV_ERROR_RETRY)
1033 return VK_NOT_READY;
1034
1035 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1036 "PVR_SRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2",
1037 ret);
1038 }
1039
1040 if (update_fence_2d_out)
1041 *update_fence_2d_out = ret.update_fence_2d;
1042
1043 if (update_fence_3d_out)
1044 *update_fence_3d_out = ret.update_fence_3d;
1045
1046 return VK_SUCCESS;
1047 }
1048
1049 VkResult
pvr_srv_rgx_create_compute_context(int fd,uint32_t priority,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t static_compute_context_state_size,uint8_t * static_compute_context_state,uint32_t packed_ccb_size,uint32_t context_flags,uint64_t robustness_address,uint32_t max_deadline_ms,void ** const compute_context_out)1050 pvr_srv_rgx_create_compute_context(int fd,
1051 uint32_t priority,
1052 uint32_t reset_framework_cmd_size,
1053 uint8_t *reset_framework_cmd,
1054 void *priv_data,
1055 uint32_t static_compute_context_state_size,
1056 uint8_t *static_compute_context_state,
1057 uint32_t packed_ccb_size,
1058 uint32_t context_flags,
1059 uint64_t robustness_address,
1060 uint32_t max_deadline_ms,
1061 void **const compute_context_out)
1062 {
1063 struct pvr_srv_rgx_create_compute_context_cmd cmd = {
1064 .priority = priority,
1065 .reset_framework_cmd_size = reset_framework_cmd_size,
1066 .reset_framework_cmd = reset_framework_cmd,
1067 .priv_data = priv_data,
1068 .static_compute_context_state_size = static_compute_context_state_size,
1069 .static_compute_context_state = static_compute_context_state,
1070 .packed_ccb_size = packed_ccb_size,
1071 .context_flags = context_flags,
1072 .robustness_address = robustness_address,
1073 .max_deadline_ms = max_deadline_ms,
1074 };
1075
1076 struct pvr_srv_rgx_create_compute_context_ret ret = {
1077 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1078 };
1079
1080 int result;
1081
1082 result = pvr_srv_bridge_call(fd,
1083 PVR_SRV_BRIDGE_RGXCMP,
1084 PVR_SRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT,
1085 &cmd,
1086 sizeof(cmd),
1087 &ret,
1088 sizeof(ret));
1089 if (result || ret.error != PVR_SRV_OK) {
1090 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1091 "PVR_SRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT",
1092 ret);
1093 }
1094
1095 *compute_context_out = ret.compute_context;
1096
1097 return VK_SUCCESS;
1098 }
1099
pvr_srv_rgx_destroy_compute_context(int fd,void * compute_context)1100 void pvr_srv_rgx_destroy_compute_context(int fd, void *compute_context)
1101 {
1102 struct pvr_srv_rgx_destroy_compute_context_cmd cmd = {
1103 .compute_context = compute_context,
1104 };
1105
1106 struct pvr_srv_rgx_destroy_compute_context_ret ret = {
1107 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1108 };
1109
1110 int result;
1111
1112 result = pvr_srv_bridge_call(fd,
1113 PVR_SRV_BRIDGE_RGXCMP,
1114 PVR_SRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT,
1115 &cmd,
1116 sizeof(cmd),
1117 &ret,
1118 sizeof(ret));
1119 if (result || ret.error != PVR_SRV_OK) {
1120 vk_bridge_err(VK_ERROR_UNKNOWN,
1121 "PVR_SRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT",
1122 ret);
1123 }
1124 }
1125
pvr_srv_rgx_kick_compute2(int fd,void * compute_context,uint32_t client_update_count,void ** client_update_ufo_sync_prim_block,uint32_t * client_update_offset,uint32_t * client_update_value,int32_t check_fence,int32_t update_timeline,uint32_t cmd_size,uint8_t * cdm_cmd,uint32_t ext_job_ref,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,uint32_t num_work_groups,uint32_t num_work_items,uint32_t pdump_flags,uint64_t max_deadline_us,char * update_fence_name,int32_t * const update_fence_out)1126 VkResult pvr_srv_rgx_kick_compute2(int fd,
1127 void *compute_context,
1128 uint32_t client_update_count,
1129 void **client_update_ufo_sync_prim_block,
1130 uint32_t *client_update_offset,
1131 uint32_t *client_update_value,
1132 int32_t check_fence,
1133 int32_t update_timeline,
1134 uint32_t cmd_size,
1135 uint8_t *cdm_cmd,
1136 uint32_t ext_job_ref,
1137 uint32_t sync_pmr_count,
1138 uint32_t *sync_pmr_flags,
1139 void **sync_pmrs,
1140 uint32_t num_work_groups,
1141 uint32_t num_work_items,
1142 uint32_t pdump_flags,
1143 uint64_t max_deadline_us,
1144 char *update_fence_name,
1145 int32_t *const update_fence_out)
1146 {
1147 struct pvr_srv_rgx_kick_cdm2_cmd cmd = {
1148 .max_deadline_us = max_deadline_us,
1149 .compute_context = compute_context,
1150 .client_update_offset = client_update_offset,
1151 .client_update_value = client_update_value,
1152 .sync_pmr_flags = sync_pmr_flags,
1153 .cdm_cmd = cdm_cmd,
1154 .update_fence_name = update_fence_name,
1155 .client_update_ufo_sync_prim_block = client_update_ufo_sync_prim_block,
1156 .sync_pmrs = sync_pmrs,
1157 .check_fence = check_fence,
1158 .update_timeline = update_timeline,
1159 .client_update_count = client_update_count,
1160 .cmd_size = cmd_size,
1161 .ext_job_ref = ext_job_ref,
1162 .num_work_groups = num_work_groups,
1163 .num_work_items = num_work_items,
1164 .pdump_flags = pdump_flags,
1165 .sync_pmr_count = sync_pmr_count,
1166 };
1167
1168 struct pvr_srv_rgx_kick_cdm2_ret ret = {
1169 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1170 };
1171
1172 int result;
1173
1174 result = pvr_srv_bridge_call(fd,
1175 PVR_SRV_BRIDGE_RGXCMP,
1176 PVR_SRV_BRIDGE_RGXCMP_RGXKICKCDM2,
1177 &cmd,
1178 sizeof(cmd),
1179 &ret,
1180 sizeof(ret));
1181 if (result || ret.error != PVR_SRV_OK) {
1182 /* There is no 'retry' VkResult, so treat it as VK_NOT_READY instead. */
1183 if (result == PVR_SRV_ERROR_RETRY || ret.error == PVR_SRV_ERROR_RETRY)
1184 return VK_NOT_READY;
1185
1186 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1187 "PVR_SRV_BRIDGE_RGXCMP_RGXKICKCDM2",
1188 ret);
1189 }
1190
1191 *update_fence_out = ret.update_fence;
1192
1193 return VK_SUCCESS;
1194 }
1195
1196 VkResult
pvr_srv_rgx_create_hwrt_dataset(int fd,uint64_t flipped_multi_sample_ctl,uint64_t multi_sample_ctl,const pvr_dev_addr_t * macrotile_array_dev_addrs,const pvr_dev_addr_t * pm_mlist_dev_addrs,const pvr_dev_addr_t * rtc_dev_addrs,const pvr_dev_addr_t * rgn_header_dev_addrs,const pvr_dev_addr_t * tail_ptrs_dev_addrs,const pvr_dev_addr_t * vheap_table_dev_adds,void ** free_lists,uint32_t isp_merge_lower_x,uint32_t isp_merge_lower_y,uint32_t isp_merge_scale_x,uint32_t isp_merge_scale_y,uint32_t isp_merge_upper_x,uint32_t isp_merge_upper_y,uint32_t isp_mtile_size,uint32_t mtile_stride,uint32_t ppp_screen,uint32_t rgn_header_size,uint32_t te_aa,uint32_t te_mtile1,uint32_t te_mtile2,uint32_t te_screen,uint32_t tpc_size,uint32_t tpc_stride,uint16_t max_rts,void ** hwrt_dataset_out)1197 pvr_srv_rgx_create_hwrt_dataset(int fd,
1198 uint64_t flipped_multi_sample_ctl,
1199 uint64_t multi_sample_ctl,
1200 const pvr_dev_addr_t *macrotile_array_dev_addrs,
1201 const pvr_dev_addr_t *pm_mlist_dev_addrs,
1202 const pvr_dev_addr_t *rtc_dev_addrs,
1203 const pvr_dev_addr_t *rgn_header_dev_addrs,
1204 const pvr_dev_addr_t *tail_ptrs_dev_addrs,
1205 const pvr_dev_addr_t *vheap_table_dev_adds,
1206 void **free_lists,
1207 uint32_t isp_merge_lower_x,
1208 uint32_t isp_merge_lower_y,
1209 uint32_t isp_merge_scale_x,
1210 uint32_t isp_merge_scale_y,
1211 uint32_t isp_merge_upper_x,
1212 uint32_t isp_merge_upper_y,
1213 uint32_t isp_mtile_size,
1214 uint32_t mtile_stride,
1215 uint32_t ppp_screen,
1216 uint32_t rgn_header_size,
1217 uint32_t te_aa,
1218 uint32_t te_mtile1,
1219 uint32_t te_mtile2,
1220 uint32_t te_screen,
1221 uint32_t tpc_size,
1222 uint32_t tpc_stride,
1223 uint16_t max_rts,
1224 void **hwrt_dataset_out)
1225 {
1226 /* Note that hwrt_dataset_out is passed in the cmd struct which the kernel
1227 * writes to. There's also a hwrt_dataset in the ret struct but we're not
1228 * going to use it since it's the same.
1229 */
1230 struct pvr_srv_rgx_create_hwrt_dataset_cmd cmd = {
1231 .flipped_multi_sample_ctl = flipped_multi_sample_ctl,
1232 .multi_sample_ctl = multi_sample_ctl,
1233 .macrotile_array_dev_addrs = macrotile_array_dev_addrs,
1234 .pm_mlist_dev_addrs = pm_mlist_dev_addrs,
1235 .rtc_dev_addrs = rtc_dev_addrs,
1236 .rgn_header_dev_addrs = rgn_header_dev_addrs,
1237 .tail_ptrs_dev_addrs = tail_ptrs_dev_addrs,
1238 .vheap_table_dev_adds = vheap_table_dev_adds,
1239 .hwrt_dataset = hwrt_dataset_out,
1240 .free_lists = free_lists,
1241 .isp_merge_lower_x = isp_merge_lower_x,
1242 .isp_merge_lower_y = isp_merge_lower_y,
1243 .isp_merge_scale_x = isp_merge_scale_x,
1244 .isp_merge_scale_y = isp_merge_scale_y,
1245 .isp_merge_upper_x = isp_merge_upper_x,
1246 .isp_merge_upper_y = isp_merge_upper_y,
1247 .isp_mtile_size = isp_mtile_size,
1248 .mtile_stride = mtile_stride,
1249 .ppp_screen = ppp_screen,
1250 .rgn_header_size = rgn_header_size,
1251 .te_aa = te_aa,
1252 .te_mtile1 = te_mtile1,
1253 .te_mtile2 = te_mtile2,
1254 .te_screen = te_screen,
1255 .tpc_size = tpc_size,
1256 .tpc_stride = tpc_stride,
1257 .max_rts = max_rts,
1258 };
1259
1260 struct pvr_srv_rgx_create_hwrt_dataset_ret ret = {
1261 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1262 };
1263
1264 int result;
1265
1266 result = pvr_srv_bridge_call(fd,
1267 PVR_SRV_BRIDGE_RGXTA3D,
1268 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET,
1269 &cmd,
1270 sizeof(cmd),
1271 &ret,
1272 sizeof(ret));
1273 if (result || ret.error != PVR_SRV_OK) {
1274 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1275 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET",
1276 ret);
1277 }
1278
1279 VG(VALGRIND_MAKE_MEM_DEFINED(cmd.hwrt_dataset,
1280 sizeof(*cmd.hwrt_dataset) *
1281 ROGUE_FWIF_NUM_RTDATAS));
1282
1283 return VK_SUCCESS;
1284 }
1285
pvr_srv_rgx_destroy_hwrt_dataset(int fd,void * hwrt_dataset)1286 void pvr_srv_rgx_destroy_hwrt_dataset(int fd, void *hwrt_dataset)
1287 {
1288 struct pvr_srv_rgx_destroy_hwrt_dataset_cmd cmd = {
1289 .hwrt_dataset = hwrt_dataset,
1290 };
1291
1292 struct pvr_srv_rgx_destroy_hwrt_dataset_ret ret = {
1293 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1294 };
1295
1296 int result;
1297
1298 result = pvr_srv_bridge_call(fd,
1299 PVR_SRV_BRIDGE_RGXTA3D,
1300 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET,
1301 &cmd,
1302 sizeof(cmd),
1303 &ret,
1304 sizeof(ret));
1305 if (result || ret.error != PVR_SRV_OK) {
1306 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1307 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET",
1308 ret);
1309 }
1310 }
1311
pvr_srv_rgx_create_free_list(int fd,void * mem_ctx_priv_data,uint32_t max_free_list_pages,uint32_t init_free_list_pages,uint32_t grow_free_list_pages,uint32_t grow_param_threshold,void * global_free_list,enum pvr_srv_bool free_list_check,pvr_dev_addr_t free_list_dev_addr,void * free_list_pmr,uint64_t pmr_offset,void ** const cleanup_cookie_out)1312 VkResult pvr_srv_rgx_create_free_list(int fd,
1313 void *mem_ctx_priv_data,
1314 uint32_t max_free_list_pages,
1315 uint32_t init_free_list_pages,
1316 uint32_t grow_free_list_pages,
1317 uint32_t grow_param_threshold,
1318 void *global_free_list,
1319 enum pvr_srv_bool free_list_check,
1320 pvr_dev_addr_t free_list_dev_addr,
1321 void *free_list_pmr,
1322 uint64_t pmr_offset,
1323 void **const cleanup_cookie_out)
1324 {
1325 struct pvr_srv_rgx_create_free_list_cmd cmd = {
1326 .free_list_dev_addr = free_list_dev_addr,
1327 .pmr_offset = pmr_offset,
1328 .mem_ctx_priv_data = mem_ctx_priv_data,
1329 .free_list_pmr = free_list_pmr,
1330 .global_free_list = global_free_list,
1331 .free_list_check = free_list_check,
1332 .grow_free_list_pages = grow_free_list_pages,
1333 .grow_param_threshold = grow_param_threshold,
1334 .init_free_list_pages = init_free_list_pages,
1335 .max_free_list_pages = max_free_list_pages,
1336 };
1337
1338 struct pvr_srv_rgx_create_free_list_ret ret = {
1339 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1340 };
1341
1342 int result;
1343
1344 result = pvr_srv_bridge_call(fd,
1345 PVR_SRV_BRIDGE_RGXTA3D,
1346 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST,
1347 &cmd,
1348 sizeof(cmd),
1349 &ret,
1350 sizeof(ret));
1351 if (result || ret.error != PVR_SRV_OK) {
1352 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1353 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST",
1354 ret);
1355 }
1356
1357 *cleanup_cookie_out = ret.cleanup_cookie;
1358
1359 return VK_SUCCESS;
1360 }
1361
pvr_srv_rgx_destroy_free_list(int fd,void * cleanup_cookie)1362 void pvr_srv_rgx_destroy_free_list(int fd, void *cleanup_cookie)
1363 {
1364 struct pvr_srv_rgx_destroy_free_list_cmd cmd = {
1365 .cleanup_cookie = cleanup_cookie,
1366 };
1367
1368 struct pvr_srv_rgx_destroy_free_list_ret ret = {
1369 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1370 };
1371
1372 int result;
1373
1374 /* FIXME: Do we want to propagate the retry error up the call chain so that
1375 * we can do something better than busy wait or is the expectation that we
1376 * should never get into this situation because the driver doesn't attempt
1377 * to free any resources while they're in use?
1378 */
1379 do {
1380 result = pvr_srv_bridge_call(fd,
1381 PVR_SRV_BRIDGE_RGXTA3D,
1382 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST,
1383 &cmd,
1384 sizeof(cmd),
1385 &ret,
1386 sizeof(ret));
1387 } while (result == PVR_SRV_ERROR_RETRY);
1388
1389 if (result || ret.error != PVR_SRV_OK) {
1390 vk_bridge_err(VK_ERROR_UNKNOWN,
1391 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST",
1392 ret);
1393 }
1394 }
1395
1396 VkResult
pvr_srv_rgx_create_render_context(int fd,uint32_t priority,pvr_dev_addr_t vdm_callstack_addr,uint32_t call_stack_depth,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t static_render_context_state_size,uint8_t * static_render_context_state,uint32_t packed_ccb_size,uint32_t context_flags,uint64_t robustness_address,uint32_t max_geom_deadline_ms,uint32_t max_frag_deadline_ms,void ** const render_context_out)1397 pvr_srv_rgx_create_render_context(int fd,
1398 uint32_t priority,
1399 pvr_dev_addr_t vdm_callstack_addr,
1400 uint32_t call_stack_depth,
1401 uint32_t reset_framework_cmd_size,
1402 uint8_t *reset_framework_cmd,
1403 void *priv_data,
1404 uint32_t static_render_context_state_size,
1405 uint8_t *static_render_context_state,
1406 uint32_t packed_ccb_size,
1407 uint32_t context_flags,
1408 uint64_t robustness_address,
1409 uint32_t max_geom_deadline_ms,
1410 uint32_t max_frag_deadline_ms,
1411 void **const render_context_out)
1412 {
1413 struct pvr_srv_rgx_create_render_context_cmd cmd = {
1414 .priority = priority,
1415 .vdm_callstack_addr = vdm_callstack_addr,
1416 .call_stack_depth = call_stack_depth,
1417 .reset_framework_cmd_size = reset_framework_cmd_size,
1418 .reset_framework_cmd = reset_framework_cmd,
1419 .priv_data = priv_data,
1420 .static_render_context_state_size = static_render_context_state_size,
1421 .static_render_context_state = static_render_context_state,
1422 .packed_ccb_size = packed_ccb_size,
1423 .context_flags = context_flags,
1424 .robustness_address = robustness_address,
1425 .max_ta_deadline_ms = max_geom_deadline_ms,
1426 .max_3d_deadline_ms = max_frag_deadline_ms,
1427 };
1428
1429 struct pvr_srv_rgx_create_render_context_ret ret = {
1430 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1431 };
1432
1433 int result;
1434
1435 result = pvr_srv_bridge_call(fd,
1436 PVR_SRV_BRIDGE_RGXTA3D,
1437 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT,
1438 &cmd,
1439 sizeof(cmd),
1440 &ret,
1441 sizeof(ret));
1442 if (result || ret.error != PVR_SRV_OK) {
1443 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1444 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT",
1445 ret);
1446 }
1447
1448 *render_context_out = ret.render_context;
1449
1450 return VK_SUCCESS;
1451 }
1452
pvr_srv_rgx_destroy_render_context(int fd,void * render_context)1453 void pvr_srv_rgx_destroy_render_context(int fd, void *render_context)
1454 {
1455 struct pvr_srv_rgx_destroy_render_context_cmd cmd = {
1456 .render_context = render_context,
1457 };
1458
1459 struct pvr_srv_rgx_destroy_render_context_ret ret = {
1460 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1461 };
1462
1463 int result;
1464
1465 result = pvr_srv_bridge_call(fd,
1466 PVR_SRV_BRIDGE_RGXTA3D,
1467 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT,
1468 &cmd,
1469 sizeof(cmd),
1470 &ret,
1471 sizeof(ret));
1472 if (result || ret.error != PVR_SRV_OK) {
1473 vk_bridge_err(VK_ERROR_UNKNOWN,
1474 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTORYRENDERCONTEXT",
1475 ret);
1476 }
1477 }
1478
pvr_srv_rgx_kick_render2(int fd,void * render_ctx,uint32_t client_geom_fence_count,void ** client_geom_fence_sync_prim_block,uint32_t * client_geom_fence_sync_offset,uint32_t * client_geom_fence_value,uint32_t client_geom_update_count,void ** client_geom_update_sync_prim_block,uint32_t * client_geom_update_sync_offset,uint32_t * client_geom_update_value,uint32_t client_frag_update_count,void ** client_frag_update_sync_prim_block,uint32_t * client_frag_update_sync_offset,uint32_t * client_frag_update_value,void * pr_fence_ufo_sync_prim_block,uint32_t client_pr_fence_ufo_sync_offset,uint32_t client_pr_fence_value,int32_t check_fence,int32_t update_timeline,int32_t * const update_fence_out,char * update_fence_name,int32_t check_fence_frag,int32_t update_timeline_frag,int32_t * const update_fence_frag_out,char * update_fence_name_frag,uint32_t cmd_geom_size,uint8_t * cmd_geom,uint32_t cmd_frag_pr_size,uint8_t * cmd_frag_pr,uint32_t cmd_frag_size,uint8_t * cmd_frag,uint32_t ext_job_ref,bool kick_geom,bool kick_pr,bool kick_frag,bool abort,uint32_t pdump_flags,void * hw_rt_dataset,void * zs_buffer,void * msaa_scratch_buffer,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,uint32_t render_target_size,uint32_t num_draw_calls,uint32_t num_indices,uint32_t num_mrts,uint64_t deadline)1479 VkResult pvr_srv_rgx_kick_render2(int fd,
1480 void *render_ctx,
1481 uint32_t client_geom_fence_count,
1482 void **client_geom_fence_sync_prim_block,
1483 uint32_t *client_geom_fence_sync_offset,
1484 uint32_t *client_geom_fence_value,
1485 uint32_t client_geom_update_count,
1486 void **client_geom_update_sync_prim_block,
1487 uint32_t *client_geom_update_sync_offset,
1488 uint32_t *client_geom_update_value,
1489 uint32_t client_frag_update_count,
1490 void **client_frag_update_sync_prim_block,
1491 uint32_t *client_frag_update_sync_offset,
1492 uint32_t *client_frag_update_value,
1493 void *pr_fence_ufo_sync_prim_block,
1494 uint32_t client_pr_fence_ufo_sync_offset,
1495 uint32_t client_pr_fence_value,
1496 int32_t check_fence,
1497 int32_t update_timeline,
1498 int32_t *const update_fence_out,
1499 char *update_fence_name,
1500 int32_t check_fence_frag,
1501 int32_t update_timeline_frag,
1502 int32_t *const update_fence_frag_out,
1503 char *update_fence_name_frag,
1504 uint32_t cmd_geom_size,
1505 uint8_t *cmd_geom,
1506 uint32_t cmd_frag_pr_size,
1507 uint8_t *cmd_frag_pr,
1508 uint32_t cmd_frag_size,
1509 uint8_t *cmd_frag,
1510 uint32_t ext_job_ref,
1511 bool kick_geom,
1512 bool kick_pr,
1513 bool kick_frag,
1514 bool abort,
1515 uint32_t pdump_flags,
1516 void *hw_rt_dataset,
1517 void *zs_buffer,
1518 void *msaa_scratch_buffer,
1519 uint32_t sync_pmr_count,
1520 uint32_t *sync_pmr_flags,
1521 void **sync_pmrs,
1522 uint32_t render_target_size,
1523 uint32_t num_draw_calls,
1524 uint32_t num_indices,
1525 uint32_t num_mrts,
1526 uint64_t deadline)
1527 {
1528 struct pvr_srv_rgx_kick_ta3d2_cmd cmd = {
1529 .deadline = deadline,
1530 .hw_rt_dataset = hw_rt_dataset,
1531 .msaa_scratch_buffer = msaa_scratch_buffer,
1532 .pr_fence_ufo_sync_prim_block = pr_fence_ufo_sync_prim_block,
1533 .render_ctx = render_ctx,
1534 .zs_buffer = zs_buffer,
1535 .client_3d_update_sync_offset = client_frag_update_sync_offset,
1536 .client_3d_update_value = client_frag_update_value,
1537 .client_ta_fence_sync_offset = client_geom_fence_sync_offset,
1538 .client_ta_fence_value = client_geom_fence_value,
1539 .client_ta_update_sync_offset = client_geom_update_sync_offset,
1540 .client_ta_update_value = client_geom_update_value,
1541 .sync_pmr_flags = sync_pmr_flags,
1542 .cmd_3d = cmd_frag,
1543 .cmd_3d_pr = cmd_frag_pr,
1544 .cmd_ta = cmd_geom,
1545 .update_fence_name = update_fence_name,
1546 .update_fence_name_3d = update_fence_name_frag,
1547 .client_3d_update_sync_prim_block = client_frag_update_sync_prim_block,
1548 .client_ta_fence_sync_prim_block = client_geom_fence_sync_prim_block,
1549 .client_ta_update_sync_prim_block = client_geom_update_sync_prim_block,
1550 .sync_pmrs = sync_pmrs,
1551 .abort = abort,
1552 .kick_3d = kick_frag,
1553 .kick_pr = kick_pr,
1554 .kick_ta = kick_geom,
1555 .check_fence = check_fence,
1556 .check_fence_3d = check_fence_frag,
1557 .update_timeline = update_timeline,
1558 .update_timeline_3d = update_timeline_frag,
1559 .cmd_3d_size = cmd_frag_size,
1560 .cmd_3d_pr_size = cmd_frag_pr_size,
1561 .client_3d_update_count = client_frag_update_count,
1562 .client_ta_fence_count = client_geom_fence_count,
1563 .client_ta_update_count = client_geom_update_count,
1564 .ext_job_ref = ext_job_ref,
1565 .client_pr_fence_ufo_sync_offset = client_pr_fence_ufo_sync_offset,
1566 .client_pr_fence_value = client_pr_fence_value,
1567 .num_draw_calls = num_draw_calls,
1568 .num_indices = num_indices,
1569 .num_mrts = num_mrts,
1570 .pdump_flags = pdump_flags,
1571 .render_target_size = render_target_size,
1572 .sync_pmr_count = sync_pmr_count,
1573 .cmd_ta_size = cmd_geom_size,
1574 };
1575
1576 struct pvr_srv_rgx_kick_ta3d2_ret ret = {
1577 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1578 .update_fence = -1,
1579 .update_fence_3d = -1,
1580 };
1581
1582 int result;
1583
1584 result = pvr_srv_bridge_call(fd,
1585 PVR_SRV_BRIDGE_RGXTA3D,
1586 PVR_SRV_BRIDGE_RGXTA3D_RGXKICKTA3D2,
1587 &cmd,
1588 sizeof(cmd),
1589 &ret,
1590 sizeof(ret));
1591 if (result || ret.error != PVR_SRV_OK) {
1592 /* There is no 'retry' VkResult, so treat it as VK_NOT_READY instead. */
1593 if (result == PVR_SRV_ERROR_RETRY || ret.error == PVR_SRV_ERROR_RETRY)
1594 return VK_NOT_READY;
1595
1596 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1597 "PVR_SRV_BRIDGE_RGXTA3D_RGXKICKTA3D2",
1598 ret);
1599 }
1600
1601 *update_fence_out = ret.update_fence;
1602 *update_fence_frag_out = ret.update_fence_3d;
1603
1604 return VK_SUCCESS;
1605 }
1606