1 /* 2 * Copyright 2019 Google LLC 3 * SPDX-License-Identifier: MIT 4 * 5 * based in part on anv and radv which are: 6 * Copyright © 2015 Intel Corporation 7 * Copyright © 2016 Red Hat. 8 * Copyright © 2016 Bas Nieuwenhuizen 9 */ 10 11 #ifndef VN_QUEUE_H 12 #define VN_QUEUE_H 13 14 #include "vn_common.h" 15 16 struct vn_queue { 17 struct vn_queue_base base; 18 19 /* only used if renderer supports multiple timelines */ 20 uint32_t ring_idx; 21 22 /* wait fence used for vn_QueueWaitIdle */ 23 VkFence wait_fence; 24 25 /* semaphore for gluing vkQueueSubmit feedback commands to 26 * vkQueueBindSparse 27 */ 28 VkSemaphore sparse_semaphore; 29 uint64_t sparse_semaphore_counter; 30 31 /* for vn_queue_submission storage */ 32 struct vn_cached_storage storage; 33 }; 34 VK_DEFINE_HANDLE_CASTS(vn_queue, base.base.base, VkQueue, VK_OBJECT_TYPE_QUEUE) 35 36 enum vn_sync_type { 37 /* no payload */ 38 VN_SYNC_TYPE_INVALID, 39 40 /* device object */ 41 VN_SYNC_TYPE_DEVICE_ONLY, 42 43 /* payload is an imported sync file */ 44 VN_SYNC_TYPE_IMPORTED_SYNC_FD, 45 }; 46 47 struct vn_sync_payload { 48 enum vn_sync_type type; 49 50 /* If type is VN_SYNC_TYPE_IMPORTED_SYNC_FD, fd is a sync file. */ 51 int fd; 52 }; 53 54 /* For external fences and external semaphores submitted to be signaled. The 55 * Vulkan spec guarantees those external syncs are on permanent payload. 56 */ 57 struct vn_sync_payload_external { 58 /* ring_idx of the last queue submission */ 59 uint32_t ring_idx; 60 /* valid when NO_ASYNC_QUEUE_SUBMIT perf option is not used */ 61 bool ring_seqno_valid; 62 /* ring seqno of the last queue submission */ 63 uint32_t ring_seqno; 64 }; 65 66 struct vn_feedback_slot; 67 68 struct vn_fence { 69 struct vn_object_base base; 70 71 struct vn_sync_payload *payload; 72 73 struct vn_sync_payload permanent; 74 struct vn_sync_payload temporary; 75 76 struct { 77 /* non-NULL if VN_PERF_NO_FENCE_FEEDBACK is disabled */ 78 struct vn_feedback_slot *slot; 79 VkCommandBuffer *commands; 80 } feedback; 81 82 bool is_external; 83 struct vn_sync_payload_external external_payload; 84 }; 85 VK_DEFINE_NONDISP_HANDLE_CASTS(vn_fence, 86 base.base, 87 VkFence, 88 VK_OBJECT_TYPE_FENCE) 89 90 struct vn_semaphore { 91 struct vn_object_base base; 92 93 VkSemaphoreType type; 94 95 struct vn_sync_payload *payload; 96 97 struct vn_sync_payload permanent; 98 struct vn_sync_payload temporary; 99 100 struct { 101 /* non-NULL if VN_PERF_NO_SEMAPHORE_FEEDBACK is disabled */ 102 struct vn_feedback_slot *slot; 103 104 /* Lists of allocated vn_semaphore_feedback_cmd 105 * 106 * On submission prepare, sfb cmd is cache allocated from the free list 107 * and is moved to the pending list after initialization. 108 * 109 * On submission cleanup, sfb cmds of the owner semaphores are checked 110 * and cached to the free list if they have been "signaled", which is 111 * proxyed via the src slot value having been reached. 112 */ 113 struct list_head pending_cmds; 114 struct list_head free_cmds; 115 116 /* Lock for accessing free/pending sfb cmds */ 117 simple_mtx_t cmd_mtx; 118 119 /* Cached counter value to track if an async sem wait call is needed */ 120 uint64_t signaled_counter; 121 122 /* Lock for checking if an async sem wait call is needed based on 123 * the current counter value and signaled_counter to ensure async 124 * wait order across threads. 125 */ 126 simple_mtx_t async_wait_mtx; 127 } feedback; 128 129 bool is_external; 130 struct vn_sync_payload_external external_payload; 131 }; 132 VK_DEFINE_NONDISP_HANDLE_CASTS(vn_semaphore, 133 base.base, 134 VkSemaphore, 135 VK_OBJECT_TYPE_SEMAPHORE) 136 137 struct vn_event { 138 struct vn_object_base base; 139 140 /* non-NULL if below are satisfied: 141 * - event is created without VK_EVENT_CREATE_DEVICE_ONLY_BIT 142 * - VN_PERF_NO_EVENT_FEEDBACK is disabled 143 */ 144 struct vn_feedback_slot *feedback_slot; 145 }; 146 VK_DEFINE_NONDISP_HANDLE_CASTS(vn_event, 147 base.base, 148 VkEvent, 149 VK_OBJECT_TYPE_EVENT) 150 151 void 152 vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence); 153 154 void 155 vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem); 156 157 #endif /* VN_QUEUE_H */ 158