1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <errno.h>
26 #include <stdbool.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <xf86drm.h>
31
32 #include "fw-api/pvr_rogue_fwif_shared.h"
33 #include "pvr_private.h"
34 #include "pvr_srv.h"
35 #include "pvr_srv_bridge.h"
36 #include "pvr_types.h"
37 #include "util/log.h"
38 #include "util/macros.h"
39 #include "vk_log.h"
40
41 #define vk_bridge_err(vk_err, bridge_func, bridge_ret) \
42 vk_errorf(NULL, \
43 vk_err, \
44 "%s failed, PVR_SRV_ERROR: %d, Errno: %s", \
45 bridge_func, \
46 (bridge_ret).error, \
47 strerror(errno))
48
pvr_srv_bridge_call(int fd,uint8_t bridge_id,uint32_t function_id,void * input,uint32_t input_buffer_size,void * output,uint32_t output_buffer_size)49 static int pvr_srv_bridge_call(int fd,
50 uint8_t bridge_id,
51 uint32_t function_id,
52 void *input,
53 uint32_t input_buffer_size,
54 void *output,
55 uint32_t output_buffer_size)
56 {
57 struct drm_srvkm_cmd cmd = {
58 .bridge_id = bridge_id,
59 .bridge_func_id = function_id,
60 .in_data_ptr = (uint64_t)(uintptr_t)input,
61 .out_data_ptr = (uint64_t)(uintptr_t)output,
62 .in_data_size = input_buffer_size,
63 .out_data_size = output_buffer_size,
64 };
65
66 int ret = drmIoctl(fd, DRM_IOCTL_SRVKM_CMD, &cmd);
67 if (unlikely(ret))
68 return ret;
69
70 VG(VALGRIND_MAKE_MEM_DEFINED(output, output_buffer_size));
71
72 return 0U;
73 }
74
pvr_srv_init_module(int fd,enum pvr_srvkm_module_type module)75 VkResult pvr_srv_init_module(int fd, enum pvr_srvkm_module_type module)
76 {
77 struct drm_srvkm_init_data init_data = { .init_module = module };
78
79 int ret = drmIoctl(fd, DRM_IOCTL_SRVKM_INIT, &init_data);
80 if (unlikely(ret)) {
81 return vk_errorf(NULL,
82 VK_ERROR_INITIALIZATION_FAILED,
83 "DRM_IOCTL_SRVKM_INIT failed, Errno: %s",
84 strerror(errno));
85 }
86
87 return VK_SUCCESS;
88 }
89
pvr_srv_connection_create(int fd,uint64_t * const bvnc_out)90 VkResult pvr_srv_connection_create(int fd, uint64_t *const bvnc_out)
91 {
92 struct pvr_srv_bridge_connect_cmd cmd = {
93 .flags = PVR_SRV_FLAGS_CLIENT_64BIT_COMPAT,
94 .build_options = RGX_BUILD_OPTIONS,
95 .DDK_version = PVR_SRV_VERSION,
96 .DDK_build = PVR_SRV_VERSION_BUILD,
97 };
98
99 /* Initialize ret.error to a default error */
100 struct pvr_srv_bridge_connect_ret ret = {
101 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
102 };
103
104 int result;
105
106 result = pvr_srv_bridge_call(fd,
107 PVR_SRV_BRIDGE_SRVCORE,
108 PVR_SRV_BRIDGE_SRVCORE_CONNECT,
109 &cmd,
110 sizeof(cmd),
111 &ret,
112 sizeof(ret));
113 if (result || ret.error != PVR_SRV_OK) {
114 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
115 "PVR_SRV_BRIDGE_SRVCORE_CONNECT",
116 ret);
117 }
118
119 *bvnc_out = ret.bvnc;
120
121 return VK_SUCCESS;
122 }
123
pvr_srv_connection_destroy(int fd)124 void pvr_srv_connection_destroy(int fd)
125 {
126 /* Initialize ret.error to a default error */
127 struct pvr_srv_bridge_disconnect_ret ret = {
128 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
129 };
130
131 int result;
132
133 result = pvr_srv_bridge_call(fd,
134 PVR_SRV_BRIDGE_SRVCORE,
135 PVR_SRV_BRIDGE_SRVCORE_DISCONNECT,
136 NULL,
137 0,
138 &ret,
139 sizeof(ret));
140 if (result || ret.error != PVR_SRV_OK) {
141 vk_bridge_err(VK_ERROR_UNKNOWN, "PVR_SRV_BRIDGE_SRVCORE_DISCONNECT", ret);
142 }
143 }
144
pvr_srv_get_multicore_info(int fd,uint32_t caps_size,uint64_t * caps,uint32_t * num_cores)145 VkResult pvr_srv_get_multicore_info(int fd,
146 uint32_t caps_size,
147 uint64_t *caps,
148 uint32_t *num_cores)
149 {
150 struct pvr_srv_bridge_getmulticoreinfo_cmd cmd = {
151 .caps = caps,
152 .caps_size = caps_size,
153 };
154
155 struct pvr_srv_bridge_getmulticoreinfo_ret ret = {
156 .caps = caps,
157 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
158 };
159
160 int result;
161
162 result = pvr_srv_bridge_call(fd,
163 PVR_SRV_BRIDGE_SRVCORE,
164 PVR_SRV_BRIDGE_SRVCORE_GETMULTICOREINFO,
165 &cmd,
166 sizeof(cmd),
167 &ret,
168 sizeof(ret));
169 if (result || ret.error != PVR_SRV_OK) {
170 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
171 "PVR_SRV_BRIDGE_SRVCORE_GETMULTICOREINFO",
172 ret);
173 }
174
175 if (!num_cores)
176 *num_cores = ret.num_cores;
177
178 return VK_SUCCESS;
179 }
180
pvr_srv_alloc_sync_primitive_block(int fd,void ** const handle_out,void ** const pmr_out,uint32_t * const size_out,uint32_t * const addr_out)181 VkResult pvr_srv_alloc_sync_primitive_block(int fd,
182 void **const handle_out,
183 void **const pmr_out,
184 uint32_t *const size_out,
185 uint32_t *const addr_out)
186 {
187 /* Initialize ret.error to a default error */
188 struct pvr_srv_bridge_alloc_sync_primitive_block_ret ret = {
189 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
190 };
191
192 int result;
193
194 result = pvr_srv_bridge_call(fd,
195 PVR_SRV_BRIDGE_SYNC,
196 PVR_SRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK,
197 NULL,
198 0,
199 &ret,
200 sizeof(ret));
201 if (result || ret.error != PVR_SRV_OK) {
202 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
203 "PVR_SRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK",
204 ret);
205 }
206
207 *handle_out = ret.handle;
208 *pmr_out = ret.pmr;
209 *size_out = ret.size;
210 *addr_out = ret.addr;
211
212 return VK_SUCCESS;
213 }
214
pvr_srv_free_sync_primitive_block(int fd,void * handle)215 void pvr_srv_free_sync_primitive_block(int fd, void *handle)
216 {
217 struct pvr_srv_bridge_free_sync_primitive_block_cmd cmd = {
218 .handle = handle,
219 };
220
221 /* Initialize ret.error to a default error */
222 struct pvr_srv_bridge_free_sync_primitive_block_ret ret = {
223 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
224 };
225
226 int result;
227
228 result = pvr_srv_bridge_call(fd,
229 PVR_SRV_BRIDGE_SYNC,
230 PVR_SRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK,
231 &cmd,
232 sizeof(cmd),
233 &ret,
234 sizeof(ret));
235 if (result || ret.error != PVR_SRV_OK) {
236 vk_bridge_err(VK_ERROR_UNKNOWN,
237 "PVR_SRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK",
238 ret);
239 }
240 }
241
pvr_srv_get_heap_count(int fd,uint32_t * const heap_count_out)242 VkResult pvr_srv_get_heap_count(int fd, uint32_t *const heap_count_out)
243 {
244 struct pvr_srv_heap_count_cmd cmd = {
245 .heap_config_index = 0,
246 };
247
248 struct pvr_srv_heap_count_ret ret = {
249 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
250 };
251
252 int result;
253
254 result = pvr_srv_bridge_call(fd,
255 PVR_SRV_BRIDGE_MM,
256 PVR_SRV_BRIDGE_MM_HEAPCFGHEAPCOUNT,
257 &cmd,
258 sizeof(cmd),
259 &ret,
260 sizeof(ret));
261 if (result || ret.error != PVR_SRV_OK) {
262 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
263 "PVR_SRV_BRIDGE_MM_HEAPCFGHEAPCOUNT",
264 ret);
265 }
266
267 *heap_count_out = ret.heap_count;
268
269 return VK_SUCCESS;
270 }
271
pvr_srv_int_heap_create(int fd,pvr_dev_addr_t base_address,uint64_t size,uint32_t log2_page_size,void * server_memctx,void ** const server_heap_out)272 VkResult pvr_srv_int_heap_create(int fd,
273 pvr_dev_addr_t base_address,
274 uint64_t size,
275 uint32_t log2_page_size,
276 void *server_memctx,
277 void **const server_heap_out)
278 {
279 struct pvr_srv_devmem_int_heap_create_cmd cmd = {
280 .server_memctx = server_memctx,
281 .base_addr = base_address,
282 .size = size,
283 .log2_page_size = log2_page_size,
284 };
285
286 struct pvr_srv_devmem_int_heap_create_ret ret = {
287 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
288 };
289
290 int result;
291
292 result = pvr_srv_bridge_call(fd,
293 PVR_SRV_BRIDGE_MM,
294 PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPCREATE,
295 &cmd,
296 sizeof(cmd),
297 &ret,
298 sizeof(ret));
299 if (result || ret.error != PVR_SRV_OK) {
300 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
301 "PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPCREATE",
302 ret);
303 }
304
305 *server_heap_out = ret.server_heap;
306
307 return VK_SUCCESS;
308 }
309
pvr_srv_int_heap_destroy(int fd,void * server_heap)310 void pvr_srv_int_heap_destroy(int fd, void *server_heap)
311 {
312 struct pvr_srv_devmem_int_heap_destroy_cmd cmd = {
313 .server_heap = server_heap,
314 };
315
316 struct pvr_srv_devmem_int_heap_destroy_ret ret = {
317 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
318 };
319
320 int result;
321
322 result = pvr_srv_bridge_call(fd,
323 PVR_SRV_BRIDGE_MM,
324 PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY,
325 &cmd,
326 sizeof(cmd),
327 &ret,
328 sizeof(ret));
329 if (result || ret.error != PVR_SRV_OK) {
330 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
331 "PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY",
332 ret);
333 }
334 }
335
336 /* This bridge function allows to independently query heap name and heap
337 * details, i-e buffer/base_address/size/reserved_size/log2_page_size pointers
338 * are allowed to be NULL.
339 */
pvr_srv_get_heap_details(int fd,uint32_t heap_index,uint32_t buffer_size,char * const buffer_out,pvr_dev_addr_t * const base_address_out,uint64_t * const size_out,uint64_t * const reserved_size_out,uint32_t * const log2_page_size_out)340 VkResult pvr_srv_get_heap_details(int fd,
341 uint32_t heap_index,
342 uint32_t buffer_size,
343 char *const buffer_out,
344 pvr_dev_addr_t *const base_address_out,
345 uint64_t *const size_out,
346 uint64_t *const reserved_size_out,
347 uint32_t *const log2_page_size_out)
348 {
349 struct pvr_srv_heap_cfg_details_cmd cmd = {
350 .heap_config_index = 0,
351 .heap_index = heap_index,
352 .buffer_size = buffer_size,
353 .buffer = buffer_out,
354 };
355
356 struct pvr_srv_heap_cfg_details_ret ret = {
357 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
358 .buffer = buffer_out,
359 };
360
361 int result;
362
363 result = pvr_srv_bridge_call(fd,
364 PVR_SRV_BRIDGE_MM,
365 PVR_SRV_BRIDGE_MM_HEAPCFGHEAPDETAILS,
366 &cmd,
367 sizeof(cmd),
368 &ret,
369 sizeof(ret));
370 if (result || ret.error != PVR_SRV_OK) {
371 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
372 "PVR_SRV_BRIDGE_MM_HEAPCFGHEAPDETAILS",
373 ret);
374 }
375
376 VG(VALGRIND_MAKE_MEM_DEFINED(buffer_out, buffer_size));
377
378 if (base_address_out)
379 *base_address_out = ret.base_addr;
380
381 if (size_out)
382 *size_out = ret.size;
383
384 if (reserved_size_out)
385 *reserved_size_out = ret.reserved_size;
386
387 if (log2_page_size_out)
388 *log2_page_size_out = ret.log2_page_size;
389
390 return VK_SUCCESS;
391 }
392
pvr_srv_int_ctx_destroy(int fd,void * server_memctx)393 void pvr_srv_int_ctx_destroy(int fd, void *server_memctx)
394 {
395 struct pvr_srv_devmem_int_ctx_destroy_cmd cmd = {
396 .server_memctx = server_memctx,
397 };
398
399 struct pvr_srv_devmem_int_ctx_destroy_ret ret = {
400 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
401 };
402
403 int result;
404
405 result = pvr_srv_bridge_call(fd,
406 PVR_SRV_BRIDGE_MM,
407 PVR_SRV_BRIDGE_MM_DEVMEMINTCTXDESTROY,
408 &cmd,
409 sizeof(cmd),
410 &ret,
411 sizeof(ret));
412 if (result || ret.error != PVR_SRV_OK) {
413 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
414 "PVR_SRV_BRIDGE_MM_DEVMEMINTCTXDESTROY",
415 ret);
416 }
417 }
418
pvr_srv_int_ctx_create(int fd,void ** const server_memctx_out,void ** const server_memctx_data_out)419 VkResult pvr_srv_int_ctx_create(int fd,
420 void **const server_memctx_out,
421 void **const server_memctx_data_out)
422 {
423 struct pvr_srv_devmem_int_ctx_create_cmd cmd = {
424 .kernel_memory_ctx = false,
425 };
426
427 struct pvr_srv_devmem_int_ctx_create_ret ret = {
428 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
429 };
430
431 int result;
432
433 result = pvr_srv_bridge_call(fd,
434 PVR_SRV_BRIDGE_MM,
435 PVR_SRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
436 &cmd,
437 sizeof(cmd),
438 &ret,
439 sizeof(ret));
440 if (result || ret.error != PVR_SRV_OK) {
441 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
442 "PVR_SRV_BRIDGE_MM_DEVMEMINTCTXCREATE",
443 ret);
444 }
445
446 *server_memctx_out = ret.server_memctx;
447 *server_memctx_data_out = ret.server_memctx_data;
448
449 return VK_SUCCESS;
450 }
451
pvr_srv_int_reserve_addr(int fd,void * server_heap,pvr_dev_addr_t addr,uint64_t size,void ** const reservation_out)452 VkResult pvr_srv_int_reserve_addr(int fd,
453 void *server_heap,
454 pvr_dev_addr_t addr,
455 uint64_t size,
456 void **const reservation_out)
457 {
458 struct pvr_srv_devmem_int_reserve_range_cmd cmd = {
459 .server_heap = server_heap,
460 .addr = addr,
461 .size = size,
462 };
463
464 struct pvr_srv_devmem_int_reserve_range_ret ret = {
465 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
466 };
467
468 int result;
469
470 result = pvr_srv_bridge_call(fd,
471 PVR_SRV_BRIDGE_MM,
472 PVR_SRV_BRIDGE_MM_DEVMEMINTRESERVERANGE,
473 &cmd,
474 sizeof(cmd),
475 &ret,
476 sizeof(ret));
477 if (result || ret.error != PVR_SRV_OK) {
478 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
479 "PVR_SRV_BRIDGE_MM_DEVMEMINTRESERVERANGE",
480 ret);
481 }
482
483 *reservation_out = ret.reservation;
484
485 return VK_SUCCESS;
486 }
487
pvr_srv_int_unreserve_addr(int fd,void * reservation)488 void pvr_srv_int_unreserve_addr(int fd, void *reservation)
489 {
490 struct pvr_srv_bridge_in_devmem_int_unreserve_range_cmd cmd = {
491 .reservation = reservation,
492 };
493
494 struct pvr_srv_bridge_in_devmem_int_unreserve_range_ret ret = {
495 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
496 };
497
498 int result;
499
500 result = pvr_srv_bridge_call(fd,
501 PVR_SRV_BRIDGE_MM,
502 PVR_SRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE,
503 &cmd,
504 sizeof(cmd),
505 &ret,
506 sizeof(ret));
507 if (result || ret.error != PVR_SRV_OK) {
508 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
509 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE",
510 ret);
511 }
512 }
513
pvr_srv_alloc_pmr(int fd,uint64_t size,uint64_t block_size,uint32_t phy_blocks,uint32_t virt_blocks,uint32_t log2_page_size,uint64_t flags,uint32_t pid,void ** const pmr_out)514 VkResult pvr_srv_alloc_pmr(int fd,
515 uint64_t size,
516 uint64_t block_size,
517 uint32_t phy_blocks,
518 uint32_t virt_blocks,
519 uint32_t log2_page_size,
520 uint64_t flags,
521 uint32_t pid,
522 void **const pmr_out)
523 {
524 const char *annotation = "VK PHYSICAL ALLOCATION";
525 const uint32_t annotation_size =
526 strnlen(annotation, DEVMEM_ANNOTATION_MAX_LEN - 1) + 1;
527 uint32_t mapping_table = 0;
528
529 struct pvr_srv_physmem_new_ram_backed_locked_pmr_cmd cmd = {
530 .size = size,
531 .block_size = block_size,
532 .phy_blocks = phy_blocks,
533 .virt_blocks = virt_blocks,
534 .mapping_table = &mapping_table,
535 .log2_page_size = log2_page_size,
536 .flags = flags,
537 .annotation_size = annotation_size,
538 .annotation = annotation,
539 .pid = pid,
540 .pdump_flags = 0x00000000U,
541 };
542
543 struct pvr_srv_physmem_new_ram_backed_locked_pmr_ret ret = {
544 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
545 };
546
547 int result;
548
549 result = pvr_srv_bridge_call(fd,
550 PVR_SRV_BRIDGE_MM,
551 PVR_SRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR,
552 &cmd,
553 sizeof(cmd),
554 &ret,
555 sizeof(ret));
556 if (result || ret.error != PVR_SRV_OK) {
557 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
558 "PVR_SRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR",
559 ret);
560 }
561
562 *pmr_out = ret.pmr;
563
564 return VK_SUCCESS;
565 }
566
pvr_srv_free_pmr(int fd,void * pmr)567 void pvr_srv_free_pmr(int fd, void *pmr)
568 {
569 struct pvr_srv_pmr_unref_unlock_pmr_cmd cmd = {
570 .pmr = pmr,
571 };
572
573 struct pvr_srv_pmr_unref_unlock_pmr_ret ret = {
574 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
575 };
576
577 int result;
578
579 result = pvr_srv_bridge_call(fd,
580 PVR_SRV_BRIDGE_MM,
581 PVR_SRV_BRIDGE_MM_PMRUNREFUNLOCKPMR,
582 &cmd,
583 sizeof(cmd),
584 &ret,
585 sizeof(ret));
586 if (result || ret.error != PVR_SRV_OK) {
587 vk_bridge_err(VK_ERROR_UNKNOWN,
588 "PVR_SRV_BRIDGE_MM_PMRUNREFUNLOCKPMR",
589 ret);
590 }
591 }
592
pvr_srv_int_map_pages(int fd,void * reservation,void * pmr,uint32_t page_count,uint32_t page_offset,uint64_t flags,pvr_dev_addr_t addr)593 VkResult pvr_srv_int_map_pages(int fd,
594 void *reservation,
595 void *pmr,
596 uint32_t page_count,
597 uint32_t page_offset,
598 uint64_t flags,
599 pvr_dev_addr_t addr)
600 {
601 struct pvr_srv_devmem_int_map_pages_cmd cmd = {
602 .reservation = reservation,
603 .pmr = pmr,
604 .page_count = page_count,
605 .page_offset = page_offset,
606 .flags = flags,
607 .addr = addr,
608 };
609
610 struct pvr_srv_devmem_int_map_pages_ret ret = {
611 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
612 };
613
614 int result;
615
616 result = pvr_srv_bridge_call(fd,
617 PVR_SRV_BRIDGE_MM,
618 PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPAGES,
619 &cmd,
620 sizeof(cmd),
621 &ret,
622 sizeof(ret));
623 if (result || ret.error != PVR_SRV_OK) {
624 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
625 "PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPAGES",
626 ret);
627 }
628
629 return VK_SUCCESS;
630 }
631
pvr_srv_int_unmap_pages(int fd,void * reservation,pvr_dev_addr_t dev_addr,uint32_t page_count)632 void pvr_srv_int_unmap_pages(int fd,
633 void *reservation,
634 pvr_dev_addr_t dev_addr,
635 uint32_t page_count)
636 {
637 struct pvr_srv_devmem_int_unmap_pages_cmd cmd = {
638 .reservation = reservation,
639 .dev_addr = dev_addr,
640 .page_count = page_count,
641 };
642
643 struct pvr_srv_devmem_int_unmap_pages_ret ret = {
644 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
645 };
646
647 int result;
648
649 result = pvr_srv_bridge_call(fd,
650 PVR_SRV_BRIDGE_MM,
651 PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES,
652 &cmd,
653 sizeof(cmd),
654 &ret,
655 sizeof(ret));
656 if (result || ret.error != PVR_SRV_OK) {
657 vk_bridge_err(VK_ERROR_UNKNOWN,
658 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES",
659 ret);
660 }
661 }
662
pvr_srv_int_map_pmr(int fd,void * server_heap,void * reservation,void * pmr,uint64_t flags,void ** const mapping_out)663 VkResult pvr_srv_int_map_pmr(int fd,
664 void *server_heap,
665 void *reservation,
666 void *pmr,
667 uint64_t flags,
668 void **const mapping_out)
669 {
670 struct pvr_srv_devmem_int_map_pmr_cmd cmd = {
671 .server_heap = server_heap,
672 .reservation = reservation,
673 .pmr = pmr,
674 .flags = flags,
675 };
676
677 struct pvr_srv_devmem_int_map_pmr_ret ret = {
678 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
679 };
680
681 int result;
682
683 result = pvr_srv_bridge_call(fd,
684 PVR_SRV_BRIDGE_MM,
685 PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPMR,
686 &cmd,
687 sizeof(cmd),
688 &ret,
689 sizeof(ret));
690 if (result || ret.error != PVR_SRV_OK) {
691 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
692 "PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPMR",
693 ret);
694 }
695
696 *mapping_out = ret.mapping;
697
698 return VK_SUCCESS;
699 }
700
pvr_srv_int_unmap_pmr(int fd,void * mapping)701 void pvr_srv_int_unmap_pmr(int fd, void *mapping)
702 {
703 struct pvr_srv_devmem_int_unmap_pmr_cmd cmd = {
704 .mapping = mapping,
705 };
706
707 struct pvr_srv_devmem_int_unmap_pmr_ret ret = {
708 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
709 };
710
711 int result;
712
713 result = pvr_srv_bridge_call(fd,
714 PVR_SRV_BRIDGE_MM,
715 PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPMR,
716 &cmd,
717 sizeof(cmd),
718 &ret,
719 sizeof(ret));
720 if (result || ret.error != PVR_SRV_OK) {
721 vk_bridge_err(VK_ERROR_UNKNOWN,
722 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPMR",
723 ret);
724 }
725 }
726
pvr_srv_physmem_import_dmabuf(int fd,int buffer_fd,uint64_t flags,void ** const pmr_out,uint64_t * const size_out,uint64_t * const align_out)727 VkResult pvr_srv_physmem_import_dmabuf(int fd,
728 int buffer_fd,
729 uint64_t flags,
730 void **const pmr_out,
731 uint64_t *const size_out,
732 uint64_t *const align_out)
733 {
734 struct pvr_srv_phys_mem_import_dmabuf_cmd cmd = {
735 .buffer_fd = buffer_fd,
736 .flags = flags,
737 .name_size = 0,
738 .name = NULL,
739 };
740
741 struct pvr_srv_phys_mem_import_dmabuf_ret ret = {
742 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
743 };
744
745 int result;
746
747 result = pvr_srv_bridge_call(fd,
748 PVR_SRV_BRIDGE_DMABUF,
749 PVR_SRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF,
750 &cmd,
751 sizeof(cmd),
752 &ret,
753 sizeof(ret));
754 if (result || ret.error != PVR_SRV_OK) {
755 return vk_bridge_err(VK_ERROR_INVALID_EXTERNAL_HANDLE,
756 "PVR_SRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF",
757 ret);
758 }
759
760 *pmr_out = ret.pmr;
761 *size_out = ret.size;
762 *align_out = ret.align;
763
764 return VK_SUCCESS;
765 }
766
pvr_srv_physmem_export_dmabuf(int fd,void * pmr,int * const fd_out)767 VkResult pvr_srv_physmem_export_dmabuf(int fd, void *pmr, int *const fd_out)
768 {
769 struct pvr_srv_phys_mem_export_dmabuf_cmd cmd = {
770 .pmr = pmr,
771 };
772
773 struct pvr_srv_phys_mem_export_dmabuf_ret ret = {
774 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
775 };
776
777 int result;
778
779 result = pvr_srv_bridge_call(fd,
780 PVR_SRV_BRIDGE_DMABUF,
781 PVR_SRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF,
782 &cmd,
783 sizeof(cmd),
784 &ret,
785 sizeof(ret));
786 if (result || ret.error != PVR_SRV_OK) {
787 return vk_bridge_err(VK_ERROR_OUT_OF_HOST_MEMORY,
788 "PVR_SRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF",
789 ret);
790 }
791
792 *fd_out = ret.fd;
793
794 return VK_SUCCESS;
795 }
796
pvr_srv_rgx_create_transfer_context(int fd,uint32_t priority,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t packed_ccb_size_u8888,uint32_t context_flags,uint64_t robustness_address,void ** const cli_pmr_out,void ** const usc_pmr_out,void ** const transfer_context_out)797 VkResult pvr_srv_rgx_create_transfer_context(int fd,
798 uint32_t priority,
799 uint32_t reset_framework_cmd_size,
800 uint8_t *reset_framework_cmd,
801 void *priv_data,
802 uint32_t packed_ccb_size_u8888,
803 uint32_t context_flags,
804 uint64_t robustness_address,
805 void **const cli_pmr_out,
806 void **const usc_pmr_out,
807 void **const transfer_context_out)
808 {
809 struct pvr_srv_rgx_create_transfer_context_cmd cmd = {
810 .robustness_address = robustness_address,
811 .priority = priority,
812 .reset_framework_cmd_size = reset_framework_cmd_size,
813 .reset_framework_cmd = reset_framework_cmd,
814 .priv_data = priv_data,
815 .packed_ccb_size_u8888 = packed_ccb_size_u8888,
816 .context_flags = context_flags,
817 };
818
819 struct pvr_srv_rgx_create_transfer_context_ret ret = {
820 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
821 };
822
823 int result;
824
825 result = pvr_srv_bridge_call(fd,
826 PVR_SRV_BRIDGE_RGXTQ,
827 PVR_SRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT,
828 &cmd,
829 sizeof(cmd),
830 &ret,
831 sizeof(ret));
832 if (result || ret.error != PVR_SRV_OK) {
833 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
834 "PVR_SRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT",
835 ret);
836 }
837
838 if (cli_pmr_out)
839 *cli_pmr_out = ret.cli_pmr_mem;
840
841 if (usc_pmr_out)
842 *usc_pmr_out = ret.usc_pmr_mem;
843
844 *transfer_context_out = ret.transfer_context;
845
846 return VK_SUCCESS;
847 }
848
pvr_srv_rgx_destroy_transfer_context(int fd,void * transfer_context)849 void pvr_srv_rgx_destroy_transfer_context(int fd, void *transfer_context)
850 {
851 struct pvr_srv_rgx_destroy_transfer_context_cmd cmd = {
852 .transfer_context = transfer_context,
853 };
854
855 struct pvr_srv_rgx_destroy_transfer_context_ret ret = {
856 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
857 };
858
859 int result;
860
861 result = pvr_srv_bridge_call(fd,
862 PVR_SRV_BRIDGE_RGXTQ,
863 PVR_SRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT,
864 &cmd,
865 sizeof(cmd),
866 &ret,
867 sizeof(ret));
868 if (result || ret.error != PVR_SRV_OK) {
869 vk_bridge_err(VK_ERROR_UNKNOWN,
870 "PVR_SRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT",
871 ret);
872 }
873 }
874
pvr_srv_rgx_submit_transfer2(int fd,void * transfer_context,uint32_t prepare_count,uint32_t * client_update_count,void *** update_ufo_sync_prim_block,uint32_t ** update_sync_offset,uint32_t ** update_value,int32_t check_fence,int32_t update_timeline_2d,int32_t update_timeline_3d,char * update_fence_name,uint32_t * cmd_size,uint8_t ** fw_command,uint32_t * tq_prepare_flags,uint32_t ext_job_ref,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,int32_t * const update_fence_2d_out,int32_t * const update_fence_3d_out)875 VkResult pvr_srv_rgx_submit_transfer2(int fd,
876 void *transfer_context,
877 uint32_t prepare_count,
878 uint32_t *client_update_count,
879 void ***update_ufo_sync_prim_block,
880 uint32_t **update_sync_offset,
881 uint32_t **update_value,
882 int32_t check_fence,
883 int32_t update_timeline_2d,
884 int32_t update_timeline_3d,
885 char *update_fence_name,
886 uint32_t *cmd_size,
887 uint8_t **fw_command,
888 uint32_t *tq_prepare_flags,
889 uint32_t ext_job_ref,
890 uint32_t sync_pmr_count,
891 uint32_t *sync_pmr_flags,
892 void **sync_pmrs,
893 int32_t *const update_fence_2d_out,
894 int32_t *const update_fence_3d_out)
895 {
896 struct pvr_srv_rgx_submit_transfer2_cmd cmd = {
897 .transfer_context = transfer_context,
898 .client_update_count = client_update_count,
899 .cmd_size = cmd_size,
900 .sync_pmr_flags = sync_pmr_flags,
901 .tq_prepare_flags = tq_prepare_flags,
902 .update_sync_offset = update_sync_offset,
903 .update_value = update_value,
904 .fw_command = fw_command,
905 .update_fence_name = update_fence_name,
906 .sync_pmrs = sync_pmrs,
907 .update_ufo_sync_prim_block = update_ufo_sync_prim_block,
908 .update_timeline_2d = update_timeline_2d,
909 .update_timeline_3d = update_timeline_3d,
910 .check_fence = check_fence,
911 .ext_job_ref = ext_job_ref,
912 .prepare_count = prepare_count,
913 .sync_pmr_count = sync_pmr_count,
914 };
915
916 struct pvr_srv_rgx_submit_transfer2_ret ret = {
917 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
918 };
919
920 int result;
921
922 result = pvr_srv_bridge_call(fd,
923 PVR_SRV_BRIDGE_RGXTQ,
924 PVR_SRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2,
925 &cmd,
926 sizeof(cmd),
927 &ret,
928 sizeof(ret));
929 if (result || ret.error != PVR_SRV_OK) {
930 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
931 "PVR_SRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2",
932 ret);
933 }
934
935 if (update_fence_2d_out)
936 *update_fence_2d_out = ret.update_fence_2d;
937
938 if (update_fence_3d_out)
939 *update_fence_3d_out = ret.update_fence_3d;
940
941 return VK_SUCCESS;
942 }
943
944 VkResult
pvr_srv_rgx_create_compute_context(int fd,uint32_t priority,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t static_compute_context_state_size,uint8_t * static_compute_context_state,uint32_t packed_ccb_size,uint32_t context_flags,uint64_t robustness_address,uint32_t max_deadline_ms,void ** const compute_context_out)945 pvr_srv_rgx_create_compute_context(int fd,
946 uint32_t priority,
947 uint32_t reset_framework_cmd_size,
948 uint8_t *reset_framework_cmd,
949 void *priv_data,
950 uint32_t static_compute_context_state_size,
951 uint8_t *static_compute_context_state,
952 uint32_t packed_ccb_size,
953 uint32_t context_flags,
954 uint64_t robustness_address,
955 uint32_t max_deadline_ms,
956 void **const compute_context_out)
957 {
958 struct pvr_srv_rgx_create_compute_context_cmd cmd = {
959 .priority = priority,
960 .reset_framework_cmd_size = reset_framework_cmd_size,
961 .reset_framework_cmd = reset_framework_cmd,
962 .priv_data = priv_data,
963 .static_compute_context_state_size = static_compute_context_state_size,
964 .static_compute_context_state = static_compute_context_state,
965 .packed_ccb_size = packed_ccb_size,
966 .context_flags = context_flags,
967 .robustness_address = robustness_address,
968 .max_deadline_ms = max_deadline_ms,
969 };
970
971 struct pvr_srv_rgx_create_compute_context_ret ret = {
972 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
973 };
974
975 int result;
976
977 result = pvr_srv_bridge_call(fd,
978 PVR_SRV_BRIDGE_RGXCMP,
979 PVR_SRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT,
980 &cmd,
981 sizeof(cmd),
982 &ret,
983 sizeof(ret));
984 if (result || ret.error != PVR_SRV_OK) {
985 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
986 "PVR_SRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT",
987 ret);
988 }
989
990 *compute_context_out = ret.compute_context;
991
992 return VK_SUCCESS;
993 }
994
pvr_srv_rgx_destroy_compute_context(int fd,void * compute_context)995 void pvr_srv_rgx_destroy_compute_context(int fd, void *compute_context)
996 {
997 struct pvr_srv_rgx_destroy_compute_context_cmd cmd = {
998 .compute_context = compute_context,
999 };
1000
1001 struct pvr_srv_rgx_destroy_compute_context_ret ret = {
1002 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1003 };
1004
1005 int result;
1006
1007 result = pvr_srv_bridge_call(fd,
1008 PVR_SRV_BRIDGE_RGXCMP,
1009 PVR_SRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT,
1010 &cmd,
1011 sizeof(cmd),
1012 &ret,
1013 sizeof(ret));
1014 if (result || ret.error != PVR_SRV_OK) {
1015 vk_bridge_err(VK_ERROR_UNKNOWN,
1016 "PVR_SRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT",
1017 ret);
1018 }
1019 }
1020
pvr_srv_rgx_kick_compute2(int fd,void * compute_context,uint32_t client_update_count,void ** client_update_ufo_sync_prim_block,uint32_t * client_update_offset,uint32_t * client_update_value,int32_t check_fence,int32_t update_timeline,uint32_t cmd_size,uint8_t * cdm_cmd,uint32_t ext_job_ref,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,uint32_t num_work_groups,uint32_t num_work_items,uint32_t pdump_flags,uint64_t max_deadline_us,char * update_fence_name,int32_t * const update_fence_out)1021 VkResult pvr_srv_rgx_kick_compute2(int fd,
1022 void *compute_context,
1023 uint32_t client_update_count,
1024 void **client_update_ufo_sync_prim_block,
1025 uint32_t *client_update_offset,
1026 uint32_t *client_update_value,
1027 int32_t check_fence,
1028 int32_t update_timeline,
1029 uint32_t cmd_size,
1030 uint8_t *cdm_cmd,
1031 uint32_t ext_job_ref,
1032 uint32_t sync_pmr_count,
1033 uint32_t *sync_pmr_flags,
1034 void **sync_pmrs,
1035 uint32_t num_work_groups,
1036 uint32_t num_work_items,
1037 uint32_t pdump_flags,
1038 uint64_t max_deadline_us,
1039 char *update_fence_name,
1040 int32_t *const update_fence_out)
1041 {
1042 struct pvr_srv_rgx_kick_cdm2_cmd cmd = {
1043 .max_deadline_us = max_deadline_us,
1044 .compute_context = compute_context,
1045 .client_update_offset = client_update_offset,
1046 .client_update_value = client_update_value,
1047 .sync_pmr_flags = sync_pmr_flags,
1048 .cdm_cmd = cdm_cmd,
1049 .update_fence_name = update_fence_name,
1050 .client_update_ufo_sync_prim_block = client_update_ufo_sync_prim_block,
1051 .sync_pmrs = sync_pmrs,
1052 .check_fence = check_fence,
1053 .update_timeline = update_timeline,
1054 .client_update_count = client_update_count,
1055 .cmd_size = cmd_size,
1056 .ext_job_ref = ext_job_ref,
1057 .num_work_groups = num_work_groups,
1058 .num_work_items = num_work_items,
1059 .pdump_flags = pdump_flags,
1060 .sync_pmr_count = sync_pmr_count,
1061 };
1062
1063 struct pvr_srv_rgx_kick_cdm2_ret ret = {
1064 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1065 };
1066
1067 int result;
1068
1069 result = pvr_srv_bridge_call(fd,
1070 PVR_SRV_BRIDGE_RGXCMP,
1071 PVR_SRV_BRIDGE_RGXCMP_RGXKICKCDM2,
1072 &cmd,
1073 sizeof(cmd),
1074 &ret,
1075 sizeof(ret));
1076 if (result || ret.error != PVR_SRV_OK) {
1077 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1078 "PVR_SRV_BRIDGE_RGXCMP_RGXKICKCDM2",
1079 ret);
1080 }
1081
1082 *update_fence_out = ret.update_fence;
1083
1084 return VK_SUCCESS;
1085 }
1086
1087 VkResult
pvr_srv_rgx_create_hwrt_dataset(int fd,uint64_t flipped_multi_sample_ctl,uint64_t multi_sample_ctl,const pvr_dev_addr_t * macrotile_array_dev_addrs,const pvr_dev_addr_t * pm_mlist_dev_addrs,const pvr_dev_addr_t * rtc_dev_addrs,const pvr_dev_addr_t * rgn_header_dev_addrs,const pvr_dev_addr_t * tail_ptrs_dev_addrs,const pvr_dev_addr_t * vheap_table_dev_adds,void ** free_lists,uint32_t isp_merge_lower_x,uint32_t isp_merge_lower_y,uint32_t isp_merge_scale_x,uint32_t isp_merge_scale_y,uint32_t isp_merge_upper_x,uint32_t isp_merge_upper_y,uint32_t isp_mtile_size,uint32_t mtile_stride,uint32_t ppp_screen,uint32_t rgn_header_size,uint32_t te_aa,uint32_t te_mtile1,uint32_t te_mtile2,uint32_t te_screen,uint32_t tpc_size,uint32_t tpc_stride,uint16_t max_rts,void ** hwrt_dataset_out)1088 pvr_srv_rgx_create_hwrt_dataset(int fd,
1089 uint64_t flipped_multi_sample_ctl,
1090 uint64_t multi_sample_ctl,
1091 const pvr_dev_addr_t *macrotile_array_dev_addrs,
1092 const pvr_dev_addr_t *pm_mlist_dev_addrs,
1093 const pvr_dev_addr_t *rtc_dev_addrs,
1094 const pvr_dev_addr_t *rgn_header_dev_addrs,
1095 const pvr_dev_addr_t *tail_ptrs_dev_addrs,
1096 const pvr_dev_addr_t *vheap_table_dev_adds,
1097 void **free_lists,
1098 uint32_t isp_merge_lower_x,
1099 uint32_t isp_merge_lower_y,
1100 uint32_t isp_merge_scale_x,
1101 uint32_t isp_merge_scale_y,
1102 uint32_t isp_merge_upper_x,
1103 uint32_t isp_merge_upper_y,
1104 uint32_t isp_mtile_size,
1105 uint32_t mtile_stride,
1106 uint32_t ppp_screen,
1107 uint32_t rgn_header_size,
1108 uint32_t te_aa,
1109 uint32_t te_mtile1,
1110 uint32_t te_mtile2,
1111 uint32_t te_screen,
1112 uint32_t tpc_size,
1113 uint32_t tpc_stride,
1114 uint16_t max_rts,
1115 void **hwrt_dataset_out)
1116 {
1117 /* Note that hwrt_dataset_out is passed in the cmd struct which the kernel
1118 * writes to. There's also a hwrt_dataset in the ret struct but we're not
1119 * going to use it since it's the same.
1120 */
1121 struct pvr_srv_rgx_create_hwrt_dataset_cmd cmd = {
1122 .flipped_multi_sample_ctl = flipped_multi_sample_ctl,
1123 .multi_sample_ctl = multi_sample_ctl,
1124 .macrotile_array_dev_addrs = macrotile_array_dev_addrs,
1125 .pm_mlist_dev_addrs = pm_mlist_dev_addrs,
1126 .rtc_dev_addrs = rtc_dev_addrs,
1127 .rgn_header_dev_addrs = rgn_header_dev_addrs,
1128 .tail_ptrs_dev_addrs = tail_ptrs_dev_addrs,
1129 .vheap_table_dev_adds = vheap_table_dev_adds,
1130 .hwrt_dataset = hwrt_dataset_out,
1131 .free_lists = free_lists,
1132 .isp_merge_lower_x = isp_merge_lower_x,
1133 .isp_merge_lower_y = isp_merge_lower_y,
1134 .isp_merge_scale_x = isp_merge_scale_x,
1135 .isp_merge_scale_y = isp_merge_scale_y,
1136 .isp_merge_upper_x = isp_merge_upper_x,
1137 .isp_merge_upper_y = isp_merge_upper_y,
1138 .isp_mtile_size = isp_mtile_size,
1139 .mtile_stride = mtile_stride,
1140 .ppp_screen = ppp_screen,
1141 .rgn_header_size = rgn_header_size,
1142 .te_aa = te_aa,
1143 .te_mtile1 = te_mtile1,
1144 .te_mtile2 = te_mtile2,
1145 .te_screen = te_screen,
1146 .tpc_size = tpc_size,
1147 .tpc_stride = tpc_stride,
1148 .max_rts = max_rts,
1149 };
1150
1151 struct pvr_srv_rgx_create_hwrt_dataset_ret ret = {
1152 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1153 };
1154
1155 int result;
1156
1157 result = pvr_srv_bridge_call(fd,
1158 PVR_SRV_BRIDGE_RGXTA3D,
1159 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET,
1160 &cmd,
1161 sizeof(cmd),
1162 &ret,
1163 sizeof(ret));
1164 if (result || ret.error != PVR_SRV_OK) {
1165 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1166 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET",
1167 ret);
1168 }
1169
1170 VG(VALGRIND_MAKE_MEM_DEFINED(cmd.hwrt_dataset,
1171 sizeof(*cmd.hwrt_dataset) *
1172 ROGUE_FWIF_NUM_RTDATAS));
1173
1174 return VK_SUCCESS;
1175 }
1176
pvr_srv_rgx_destroy_hwrt_dataset(int fd,void * hwrt_dataset)1177 void pvr_srv_rgx_destroy_hwrt_dataset(int fd, void *hwrt_dataset)
1178 {
1179 struct pvr_srv_rgx_destroy_hwrt_dataset_cmd cmd = {
1180 .hwrt_dataset = hwrt_dataset,
1181 };
1182
1183 struct pvr_srv_rgx_destroy_hwrt_dataset_ret ret = {
1184 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1185 };
1186
1187 int result;
1188
1189 result = pvr_srv_bridge_call(fd,
1190 PVR_SRV_BRIDGE_RGXTA3D,
1191 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET,
1192 &cmd,
1193 sizeof(cmd),
1194 &ret,
1195 sizeof(ret));
1196 if (result || ret.error != PVR_SRV_OK) {
1197 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1198 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET",
1199 ret);
1200 }
1201 }
1202
pvr_srv_rgx_create_free_list(int fd,void * mem_ctx_priv_data,uint32_t max_free_list_pages,uint32_t init_free_list_pages,uint32_t grow_free_list_pages,uint32_t grow_param_threshold,void * global_free_list,enum pvr_srv_bool free_list_check,pvr_dev_addr_t free_list_dev_addr,void * free_list_pmr,uint64_t pmr_offset,void ** const cleanup_cookie_out)1203 VkResult pvr_srv_rgx_create_free_list(int fd,
1204 void *mem_ctx_priv_data,
1205 uint32_t max_free_list_pages,
1206 uint32_t init_free_list_pages,
1207 uint32_t grow_free_list_pages,
1208 uint32_t grow_param_threshold,
1209 void *global_free_list,
1210 enum pvr_srv_bool free_list_check,
1211 pvr_dev_addr_t free_list_dev_addr,
1212 void *free_list_pmr,
1213 uint64_t pmr_offset,
1214 void **const cleanup_cookie_out)
1215 {
1216 struct pvr_srv_rgx_create_free_list_cmd cmd = {
1217 .free_list_dev_addr = free_list_dev_addr,
1218 .pmr_offset = pmr_offset,
1219 .mem_ctx_priv_data = mem_ctx_priv_data,
1220 .free_list_pmr = free_list_pmr,
1221 .global_free_list = global_free_list,
1222 .free_list_check = free_list_check,
1223 .grow_free_list_pages = grow_free_list_pages,
1224 .grow_param_threshold = grow_param_threshold,
1225 .init_free_list_pages = init_free_list_pages,
1226 .max_free_list_pages = max_free_list_pages,
1227 };
1228
1229 struct pvr_srv_rgx_create_free_list_ret ret = {
1230 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1231 };
1232
1233 int result;
1234
1235 result = pvr_srv_bridge_call(fd,
1236 PVR_SRV_BRIDGE_RGXTA3D,
1237 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST,
1238 &cmd,
1239 sizeof(cmd),
1240 &ret,
1241 sizeof(ret));
1242 if (result || ret.error != PVR_SRV_OK) {
1243 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1244 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST",
1245 ret);
1246 }
1247
1248 *cleanup_cookie_out = ret.cleanup_cookie;
1249
1250 return VK_SUCCESS;
1251 }
1252
pvr_srv_rgx_destroy_free_list(int fd,void * cleanup_cookie)1253 void pvr_srv_rgx_destroy_free_list(int fd, void *cleanup_cookie)
1254 {
1255 struct pvr_srv_rgx_destroy_free_list_cmd cmd = {
1256 .cleanup_cookie = cleanup_cookie,
1257 };
1258
1259 struct pvr_srv_rgx_destroy_free_list_ret ret = {
1260 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1261 };
1262
1263 int result;
1264
1265 /* FIXME: Do we want to propagate the retry error up the call chain so that
1266 * we can do something better than busy wait or is the expectation that we
1267 * should never get into this situation because the driver doesn't attempt
1268 * to free any resources while they're in use?
1269 */
1270 do {
1271 result = pvr_srv_bridge_call(fd,
1272 PVR_SRV_BRIDGE_RGXTA3D,
1273 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST,
1274 &cmd,
1275 sizeof(cmd),
1276 &ret,
1277 sizeof(ret));
1278 } while (result == PVR_SRV_ERROR_RETRY);
1279
1280 if (result || ret.error != PVR_SRV_OK) {
1281 vk_bridge_err(VK_ERROR_UNKNOWN,
1282 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST",
1283 ret);
1284 }
1285 }
1286
1287 VkResult
pvr_srv_rgx_create_render_context(int fd,uint32_t priority,pvr_dev_addr_t vdm_callstack_addr,uint32_t call_stack_depth,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t static_render_context_state_size,uint8_t * static_render_context_state,uint32_t packed_ccb_size,uint32_t context_flags,uint64_t robustness_address,uint32_t max_geom_deadline_ms,uint32_t max_frag_deadline_ms,void ** const render_context_out)1288 pvr_srv_rgx_create_render_context(int fd,
1289 uint32_t priority,
1290 pvr_dev_addr_t vdm_callstack_addr,
1291 uint32_t call_stack_depth,
1292 uint32_t reset_framework_cmd_size,
1293 uint8_t *reset_framework_cmd,
1294 void *priv_data,
1295 uint32_t static_render_context_state_size,
1296 uint8_t *static_render_context_state,
1297 uint32_t packed_ccb_size,
1298 uint32_t context_flags,
1299 uint64_t robustness_address,
1300 uint32_t max_geom_deadline_ms,
1301 uint32_t max_frag_deadline_ms,
1302 void **const render_context_out)
1303 {
1304 struct pvr_srv_rgx_create_render_context_cmd cmd = {
1305 .priority = priority,
1306 .vdm_callstack_addr = vdm_callstack_addr,
1307 .call_stack_depth = call_stack_depth,
1308 .reset_framework_cmd_size = reset_framework_cmd_size,
1309 .reset_framework_cmd = reset_framework_cmd,
1310 .priv_data = priv_data,
1311 .static_render_context_state_size = static_render_context_state_size,
1312 .static_render_context_state = static_render_context_state,
1313 .packed_ccb_size = packed_ccb_size,
1314 .context_flags = context_flags,
1315 .robustness_address = robustness_address,
1316 .max_ta_deadline_ms = max_geom_deadline_ms,
1317 .max_3d_deadline_ms = max_frag_deadline_ms,
1318 };
1319
1320 struct pvr_srv_rgx_create_render_context_ret ret = {
1321 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1322 };
1323
1324 int result;
1325
1326 result = pvr_srv_bridge_call(fd,
1327 PVR_SRV_BRIDGE_RGXTA3D,
1328 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT,
1329 &cmd,
1330 sizeof(cmd),
1331 &ret,
1332 sizeof(ret));
1333 if (result || ret.error != PVR_SRV_OK) {
1334 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1335 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT",
1336 ret);
1337 }
1338
1339 *render_context_out = ret.render_context;
1340
1341 return VK_SUCCESS;
1342 }
1343
pvr_srv_rgx_destroy_render_context(int fd,void * render_context)1344 void pvr_srv_rgx_destroy_render_context(int fd, void *render_context)
1345 {
1346 struct pvr_srv_rgx_destroy_render_context_cmd cmd = {
1347 .render_context = render_context,
1348 };
1349
1350 struct pvr_srv_rgx_destroy_render_context_ret ret = {
1351 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1352 };
1353
1354 int result;
1355
1356 result = pvr_srv_bridge_call(fd,
1357 PVR_SRV_BRIDGE_RGXTA3D,
1358 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT,
1359 &cmd,
1360 sizeof(cmd),
1361 &ret,
1362 sizeof(ret));
1363 if (result || ret.error != PVR_SRV_OK) {
1364 vk_bridge_err(VK_ERROR_UNKNOWN,
1365 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTORYRENDERCONTEXT",
1366 ret);
1367 }
1368 }
1369
pvr_srv_rgx_kick_render2(int fd,void * render_ctx,uint32_t client_geom_fence_count,void ** client_geom_fence_sync_prim_block,uint32_t * client_geom_fence_sync_offset,uint32_t * client_geom_fence_value,uint32_t client_geom_update_count,void ** client_geom_update_sync_prim_block,uint32_t * client_geom_update_sync_offset,uint32_t * client_geom_update_value,uint32_t client_frag_update_count,void ** client_frag_update_sync_prim_block,uint32_t * client_frag_update_sync_offset,uint32_t * client_frag_update_value,void * pr_fence_ufo_sync_prim_block,uint32_t client_pr_fence_ufo_sync_offset,uint32_t client_pr_fence_value,int32_t check_fence,int32_t update_timeline,int32_t * const update_fence_out,char * update_fence_name,int32_t check_fence_frag,int32_t update_timeline_frag,int32_t * const update_fence_frag_out,char * update_fence_name_frag,uint32_t cmd_geom_size,uint8_t * cmd_geom,uint32_t cmd_frag_pr_size,uint8_t * cmd_frag_pr,uint32_t cmd_frag_size,uint8_t * cmd_frag,uint32_t ext_job_ref,bool kick_geom,bool kick_pr,bool kick_frag,bool abort,uint32_t pdump_flags,void * hw_rt_dataset,void * zs_buffer,void * msaa_scratch_buffer,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,uint32_t render_target_size,uint32_t num_draw_calls,uint32_t num_indices,uint32_t num_mrts,uint64_t deadline)1370 VkResult pvr_srv_rgx_kick_render2(int fd,
1371 void *render_ctx,
1372 uint32_t client_geom_fence_count,
1373 void **client_geom_fence_sync_prim_block,
1374 uint32_t *client_geom_fence_sync_offset,
1375 uint32_t *client_geom_fence_value,
1376 uint32_t client_geom_update_count,
1377 void **client_geom_update_sync_prim_block,
1378 uint32_t *client_geom_update_sync_offset,
1379 uint32_t *client_geom_update_value,
1380 uint32_t client_frag_update_count,
1381 void **client_frag_update_sync_prim_block,
1382 uint32_t *client_frag_update_sync_offset,
1383 uint32_t *client_frag_update_value,
1384 void *pr_fence_ufo_sync_prim_block,
1385 uint32_t client_pr_fence_ufo_sync_offset,
1386 uint32_t client_pr_fence_value,
1387 int32_t check_fence,
1388 int32_t update_timeline,
1389 int32_t *const update_fence_out,
1390 char *update_fence_name,
1391 int32_t check_fence_frag,
1392 int32_t update_timeline_frag,
1393 int32_t *const update_fence_frag_out,
1394 char *update_fence_name_frag,
1395 uint32_t cmd_geom_size,
1396 uint8_t *cmd_geom,
1397 uint32_t cmd_frag_pr_size,
1398 uint8_t *cmd_frag_pr,
1399 uint32_t cmd_frag_size,
1400 uint8_t *cmd_frag,
1401 uint32_t ext_job_ref,
1402 bool kick_geom,
1403 bool kick_pr,
1404 bool kick_frag,
1405 bool abort,
1406 uint32_t pdump_flags,
1407 void *hw_rt_dataset,
1408 void *zs_buffer,
1409 void *msaa_scratch_buffer,
1410 uint32_t sync_pmr_count,
1411 uint32_t *sync_pmr_flags,
1412 void **sync_pmrs,
1413 uint32_t render_target_size,
1414 uint32_t num_draw_calls,
1415 uint32_t num_indices,
1416 uint32_t num_mrts,
1417 uint64_t deadline)
1418 {
1419 struct pvr_srv_rgx_kick_ta3d2_cmd cmd = {
1420 .deadline = deadline,
1421 .hw_rt_dataset = hw_rt_dataset,
1422 .msaa_scratch_buffer = msaa_scratch_buffer,
1423 .pr_fence_ufo_sync_prim_block = pr_fence_ufo_sync_prim_block,
1424 .render_ctx = render_ctx,
1425 .zs_buffer = zs_buffer,
1426 .client_3d_update_sync_offset = client_frag_update_sync_offset,
1427 .client_3d_update_value = client_frag_update_value,
1428 .client_ta_fence_sync_offset = client_geom_fence_sync_offset,
1429 .client_ta_fence_value = client_geom_fence_value,
1430 .client_ta_update_sync_offset = client_geom_update_sync_offset,
1431 .client_ta_update_value = client_geom_update_value,
1432 .sync_pmr_flags = sync_pmr_flags,
1433 .cmd_3d = cmd_frag,
1434 .cmd_3d_pr = cmd_frag_pr,
1435 .cmd_ta = cmd_geom,
1436 .update_fence_name = update_fence_name,
1437 .update_fence_name_3d = update_fence_name_frag,
1438 .client_3d_update_sync_prim_block = client_frag_update_sync_prim_block,
1439 .client_ta_fence_sync_prim_block = client_geom_fence_sync_prim_block,
1440 .client_ta_update_sync_prim_block = client_geom_update_sync_prim_block,
1441 .sync_pmrs = sync_pmrs,
1442 .abort = abort,
1443 .kick_3d = kick_frag,
1444 .kick_pr = kick_pr,
1445 .kick_ta = kick_geom,
1446 .check_fence = check_fence,
1447 .check_fence_3d = check_fence_frag,
1448 .update_timeline = update_timeline,
1449 .update_timeline_3d = update_timeline_frag,
1450 .cmd_3d_size = cmd_frag_size,
1451 .cmd_3d_pr_size = cmd_frag_pr_size,
1452 .client_3d_update_count = client_frag_update_count,
1453 .client_ta_fence_count = client_geom_fence_count,
1454 .client_ta_update_count = client_geom_update_count,
1455 .ext_job_ref = ext_job_ref,
1456 .client_pr_fence_ufo_sync_offset = client_pr_fence_ufo_sync_offset,
1457 .client_pr_fence_value = client_pr_fence_value,
1458 .num_draw_calls = num_draw_calls,
1459 .num_indices = num_indices,
1460 .num_mrts = num_mrts,
1461 .pdump_flags = pdump_flags,
1462 .render_target_size = render_target_size,
1463 .sync_pmr_count = sync_pmr_count,
1464 .cmd_ta_size = cmd_geom_size,
1465 };
1466
1467 struct pvr_srv_rgx_kick_ta3d2_ret ret = {
1468 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1469 .update_fence = -1,
1470 .update_fence_3d = -1,
1471 };
1472
1473 int result;
1474
1475 result = pvr_srv_bridge_call(fd,
1476 PVR_SRV_BRIDGE_RGXTA3D,
1477 PVR_SRV_BRIDGE_RGXTA3D_RGXKICKTA3D2,
1478 &cmd,
1479 sizeof(cmd),
1480 &ret,
1481 sizeof(ret));
1482 if (result || ret.error != PVR_SRV_OK) {
1483 /* There is no 'retry' VkResult, so treat it as VK_NOT_READY instead. */
1484 if (result == PVR_SRV_ERROR_RETRY)
1485 return VK_NOT_READY;
1486
1487 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1488 "PVR_SRV_BRIDGE_RGXTA3D_RGXKICKTA3D2",
1489 ret);
1490 }
1491
1492 *update_fence_out = ret.update_fence;
1493 *update_fence_frag_out = ret.update_fence_3d;
1494
1495 return VK_SUCCESS;
1496 }
1497