1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #ifndef ZINK_BATCH_H
25 #define ZINK_BATCH_H
26
27 #include <vulkan/vulkan.h>
28
29 #include "util/list.h"
30 #include "util/set.h"
31 #include "util/u_dynarray.h"
32
33 #include "zink_fence.h"
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 struct pipe_reference;
40
41 struct zink_buffer_view;
42 struct zink_context;
43 struct zink_descriptor_set;
44 struct zink_image_view;
45 struct zink_program;
46 struct zink_render_pass;
47 struct zink_resource;
48 struct zink_sampler_view;
49 struct zink_surface;
50
51 /* zink_batch_usage concepts:
52 * - batch "usage" is an indicator of when and how a BO was accessed
53 * - batch "tracking" is the batch state(s) containing an extra ref for a BO
54 *
55 * - usage prevents a BO from being mapped while it has pending+conflicting access
56 * - usage affects pipeline barrier generation for synchronizing reads and writes
57 * - usage MUST be removed before context destruction to avoid crashing during BO
58 * reclaiming in suballocator
59 *
60 * - tracking prevents a BO from being destroyed early
61 * - tracking enables usage to be pruned
62 *
63 *
64 * tracking is added:
65 * - any time a BO is used in a "one-off" operation (e.g., blit, index buffer, indirect buffer)
66 * - any time a descriptor is unbound
67 * - when a buffer is replaced (IFF: resource is bound as a descriptor or usage previously existed)
68 *
69 * tracking is removed:
70 * - in zink_reset_batch_state()
71 *
72 * usage is added:
73 * - any time a BO is used in a "one-off" operation (e.g., blit, index buffer, indirect buffer)
74 * - any time a descriptor is bound
75 * - any time a descriptor is unbound (IFF: usage previously existed)
76 * - for all bound descriptors on the first draw/dispatch after a flush (zink_update_descriptor_refs)
77 *
78 * usage is removed:
79 * - when tracking is removed (IFF: BO usage == tracking, i.e., this is the last batch that a BO was active on)
80 */
81 struct zink_batch_usage {
82 uint32_t usage;
83 cnd_t flush;
84 mtx_t mtx;
85 bool unflushed;
86 };
87
88 /* not real api don't use */
89 bool
90 batch_ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr);
91
92 struct zink_batch_state {
93 struct zink_fence fence;
94 struct zink_batch_state *next;
95
96 struct zink_batch_usage usage;
97 struct zink_context *ctx;
98 VkCommandPool cmdpool;
99 VkCommandBuffer cmdbuf;
100 VkCommandBuffer barrier_cmdbuf;
101 VkSemaphore signal_semaphore; //external signal semaphore
102 struct util_dynarray wait_semaphores; //external wait semaphores
103 struct util_dynarray wait_semaphore_stages; //external wait semaphores
104
105 VkSemaphore present;
106 struct zink_resource *swapchain;
107 struct util_dynarray acquires;
108 struct util_dynarray acquire_flags;
109 struct util_dynarray dead_swapchains;
110
111 struct util_queue_fence flush_completed;
112
113 struct set *programs;
114
115 struct set *resources;
116 struct set *surfaces;
117 struct set *bufferviews;
118
119 struct util_dynarray unref_resources;
120 struct util_dynarray bindless_releases[2];
121
122 struct util_dynarray persistent_resources;
123 struct util_dynarray zombie_samplers;
124 struct util_dynarray dead_framebuffers;
125
126 struct set *active_queries; /* zink_query objects which were active at some point in this batch */
127
128 struct zink_batch_descriptor_data *dd;
129
130 VkDeviceSize resource_size;
131
132 /* this is a monotonic int used to disambiguate internal fences from their tc fence references */
133 unsigned submit_count;
134
135 bool is_device_lost;
136 bool has_barriers;
137 };
138
139 struct zink_batch {
140 struct zink_batch_state *state;
141
142 struct zink_batch_usage *last_batch_usage;
143 struct zink_resource *swapchain;
144
145 unsigned work_count;
146
147 bool has_work;
148 bool last_was_compute;
149 bool in_rp; //renderpass is currently active
150 };
151
152
153 static inline struct zink_batch_state *
zink_batch_state(struct zink_fence * fence)154 zink_batch_state(struct zink_fence *fence)
155 {
156 return (struct zink_batch_state *)fence;
157 }
158
159 void
160 zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs);
161
162 void
163 zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs);
164
165 void
166 zink_batch_reset_all(struct zink_context *ctx);
167
168 void
169 zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs);
170
171 void
172 zink_batch_state_clear_resources(struct zink_screen *screen, struct zink_batch_state *bs);
173
174 void
175 zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch);
176 void
177 zink_start_batch(struct zink_context *ctx, struct zink_batch *batch);
178
179 void
180 zink_end_batch(struct zink_context *ctx, struct zink_batch *batch);
181
182 void
183 zink_batch_add_wait_semaphore(struct zink_batch *batch, VkSemaphore sem);
184
185 void
186 zink_batch_resource_usage_set(struct zink_batch *batch, struct zink_resource *res, bool write);
187
188 void
189 zink_batch_reference_resource_rw(struct zink_batch *batch,
190 struct zink_resource *res,
191 bool write);
192 void
193 zink_batch_reference_resource(struct zink_batch *batch, struct zink_resource *res);
194
195 void
196 zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resource *res);
197
198 void
199 zink_batch_reference_sampler_view(struct zink_batch *batch,
200 struct zink_sampler_view *sv);
201
202 void
203 zink_batch_reference_program(struct zink_batch *batch,
204 struct zink_program *pg);
205
206 void
207 zink_batch_reference_image_view(struct zink_batch *batch,
208 struct zink_image_view *image_view);
209
210 void
211 zink_batch_reference_bufferview(struct zink_batch *batch, struct zink_buffer_view *buffer_view);
212 void
213 zink_batch_reference_surface(struct zink_batch *batch, struct zink_surface *surface);
214
215 void
216 debug_describe_zink_batch_state(char *buf, const struct zink_batch_state *ptr);
217
218 static inline bool
zink_batch_usage_is_unflushed(const struct zink_batch_usage * u)219 zink_batch_usage_is_unflushed(const struct zink_batch_usage *u)
220 {
221 return u && u->unflushed;
222 }
223
224 static inline void
zink_batch_usage_unset(struct zink_batch_usage ** u,struct zink_batch_state * bs)225 zink_batch_usage_unset(struct zink_batch_usage **u, struct zink_batch_state *bs)
226 {
227 (void)p_atomic_cmpxchg((uintptr_t *)u, (uintptr_t)&bs->usage, (uintptr_t)NULL);
228 }
229
230 static inline void
zink_batch_usage_set(struct zink_batch_usage ** u,struct zink_batch_state * bs)231 zink_batch_usage_set(struct zink_batch_usage **u, struct zink_batch_state *bs)
232 {
233 *u = &bs->usage;
234 }
235
236 static inline bool
zink_batch_usage_matches(const struct zink_batch_usage * u,const struct zink_batch_state * bs)237 zink_batch_usage_matches(const struct zink_batch_usage *u, const struct zink_batch_state *bs)
238 {
239 return u == &bs->usage;
240 }
241
242 static inline bool
zink_batch_usage_exists(const struct zink_batch_usage * u)243 zink_batch_usage_exists(const struct zink_batch_usage *u)
244 {
245 return u && (u->usage || u->unflushed);
246 }
247
248 bool
249 zink_screen_usage_check_completion(struct zink_screen *screen, const struct zink_batch_usage *u);
250
251 bool
252 zink_batch_usage_check_completion(struct zink_context *ctx, const struct zink_batch_usage *u);
253
254 void
255 zink_batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u);
256
257 #ifdef __cplusplus
258 }
259 #endif
260
261 #endif
262