1 /*
2 * Copyright 2022 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifndef VN_FEEDBACK_H
7 #define VN_FEEDBACK_H
8
9 #include "vn_common.h"
10
11 struct vn_feedback_pool {
12 /* single lock for simplicity though free_slots can use another */
13 simple_mtx_t mutex;
14
15 struct vn_device *dev;
16 const VkAllocationCallbacks *alloc;
17
18 /* size in bytes of the feedback buffer */
19 uint32_t size;
20 /* size in bytes used of the active feedback buffer */
21 uint32_t used;
22 /* alignment in bytes for slot suballocation from the feedback buffer */
23 uint32_t alignment;
24
25 /* first entry is the active feedback buffer */
26 struct list_head fb_bufs;
27
28 /* cache for returned feedback slots */
29 struct list_head free_slots;
30 };
31
32 enum vn_feedback_type {
33 VN_FEEDBACK_TYPE_FENCE = 0x1,
34 VN_FEEDBACK_TYPE_SEMAPHORE = 0x2,
35 VN_FEEDBACK_TYPE_EVENT = 0x4,
36 VN_FEEDBACK_TYPE_QUERY = 0x8,
37 };
38
39 struct vn_feedback_slot {
40 enum vn_feedback_type type;
41 uint32_t offset;
42 VkBuffer buf_handle;
43
44 union {
45 void *data;
46 VkResult *status;
47 uint64_t *counter;
48 };
49
50 struct list_head head;
51 };
52
53 struct vn_feedback_cmd_pool {
54 simple_mtx_t mutex;
55
56 VkCommandPool pool_handle;
57 struct list_head free_qfb_cmds;
58 };
59
60 /* coherent buffer with bound and mapped memory */
61 struct vn_feedback_buffer {
62 VkBuffer buf_handle;
63 VkDeviceMemory mem_handle;
64 void *data;
65
66 struct list_head head;
67 };
68
69 struct vn_semaphore_feedback_cmd {
70 struct vn_feedback_slot *src_slot;
71 VkCommandBuffer *cmd_handles;
72
73 struct list_head head;
74 };
75
76 /* query feedback batch for deferred recording */
77 struct vn_feedback_query_batch {
78 struct vn_query_pool *query_pool;
79 uint32_t query;
80 uint32_t query_count;
81 bool copy;
82
83 struct list_head head;
84 };
85
86 struct vn_query_feedback_cmd {
87 struct vn_feedback_cmd_pool *fb_cmd_pool;
88 struct vn_command_buffer *cmd;
89
90 struct list_head head;
91 };
92
93 VkResult
94 vn_feedback_pool_init(struct vn_device *dev,
95 struct vn_feedback_pool *pool,
96 uint32_t size,
97 const VkAllocationCallbacks *alloc);
98
99 void
100 vn_feedback_pool_fini(struct vn_feedback_pool *pool);
101
102 struct vn_feedback_slot *
103 vn_feedback_pool_alloc(struct vn_feedback_pool *pool,
104 enum vn_feedback_type type);
105
106 void
107 vn_feedback_pool_free(struct vn_feedback_pool *pool,
108 struct vn_feedback_slot *slot);
109
110 static inline VkResult
vn_feedback_get_status(struct vn_feedback_slot * slot)111 vn_feedback_get_status(struct vn_feedback_slot *slot)
112 {
113 return *slot->status;
114 }
115
116 static inline void
vn_feedback_reset_status(struct vn_feedback_slot * slot)117 vn_feedback_reset_status(struct vn_feedback_slot *slot)
118 {
119 assert(slot->type == VN_FEEDBACK_TYPE_FENCE ||
120 slot->type == VN_FEEDBACK_TYPE_EVENT);
121 *slot->status =
122 slot->type == VN_FEEDBACK_TYPE_FENCE ? VK_NOT_READY : VK_EVENT_RESET;
123 }
124
125 static inline void
vn_feedback_set_status(struct vn_feedback_slot * slot,VkResult status)126 vn_feedback_set_status(struct vn_feedback_slot *slot, VkResult status)
127 {
128 assert(slot->type == VN_FEEDBACK_TYPE_FENCE ||
129 slot->type == VN_FEEDBACK_TYPE_EVENT);
130 *slot->status = status;
131 }
132
133 static inline uint64_t
vn_feedback_get_counter(struct vn_feedback_slot * slot)134 vn_feedback_get_counter(struct vn_feedback_slot *slot)
135 {
136 assert(slot->type == VN_FEEDBACK_TYPE_SEMAPHORE);
137 return *slot->counter;
138 }
139
140 static inline void
vn_feedback_set_counter(struct vn_feedback_slot * slot,uint64_t counter)141 vn_feedback_set_counter(struct vn_feedback_slot *slot, uint64_t counter)
142 {
143 assert(slot->type == VN_FEEDBACK_TYPE_SEMAPHORE);
144 *slot->counter = counter;
145 }
146
147 VkResult
148 vn_feedback_buffer_create(struct vn_device *dev,
149 uint32_t size,
150 const VkAllocationCallbacks *alloc,
151 struct vn_feedback_buffer **out_fb_buf);
152
153 void
154 vn_feedback_buffer_destroy(struct vn_device *dev,
155 struct vn_feedback_buffer *fb_buf,
156 const VkAllocationCallbacks *alloc);
157
158 void
159 vn_event_feedback_cmd_record(VkCommandBuffer cmd_handle,
160 VkEvent ev_handle,
161 VkPipelineStageFlags2 src_stage_mask,
162 VkResult status,
163 bool sync2);
164
165 struct vn_semaphore_feedback_cmd *
166 vn_semaphore_feedback_cmd_alloc(struct vn_device *dev,
167 struct vn_feedback_slot *dst_slot);
168
169 void
170 vn_semaphore_feedback_cmd_free(struct vn_device *dev,
171 struct vn_semaphore_feedback_cmd *sfb_cmd);
172
173 VkResult
174 vn_feedback_query_cmd_alloc(VkDevice dev_handle,
175 struct vn_feedback_cmd_pool *fb_cmd_pool,
176 struct vn_query_feedback_cmd **out_qfb_cmd);
177
178 void
179 vn_feedback_query_cmd_free(struct vn_query_feedback_cmd *qfb_cmd);
180
181 VkResult
182 vn_feedback_query_batch_record(VkDevice dev_handle,
183 struct vn_query_feedback_cmd *qfb_cmd,
184 struct list_head *combined_query_batches);
185
186 VkResult
187 vn_feedback_cmd_alloc(VkDevice dev_handle,
188 struct vn_feedback_cmd_pool *fb_cmd_pool,
189 struct vn_feedback_slot *dst_slot,
190 struct vn_feedback_slot *src_slot,
191 VkCommandBuffer *out_cmd_handle);
192 void
193 vn_feedback_cmd_free(VkDevice dev_handle,
194 struct vn_feedback_cmd_pool *fb_cmd_pool,
195 VkCommandBuffer cmd_handle);
196
197 VkResult
198 vn_feedback_cmd_pools_init(struct vn_device *dev);
199
200 void
201 vn_feedback_cmd_pools_fini(struct vn_device *dev);
202
203 #endif /* VN_FEEDBACK_H */
204