1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_batch.h"
25 #include "zink_context.h"
26 #include "zink_fence.h"
27
28 #include "zink_resource.h"
29 #include "zink_screen.h"
30
31 #include "util/os_file.h"
32 #include "util/set.h"
33 #include "util/u_memory.h"
34
35 #ifdef _WIN32
36 #include <windows.h>
37 #include <vulkan/vulkan_win32.h>
38 #endif
39
40 static void
destroy_fence(struct zink_screen * screen,struct zink_tc_fence * mfence)41 destroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence)
42 {
43 if (mfence->fence)
44 util_dynarray_delete_unordered(&mfence->fence->mfences, struct zink_tc_fence *, mfence);
45 mfence->fence = NULL;
46 tc_unflushed_batch_token_reference(&mfence->tc_token, NULL);
47 if (mfence->sem)
48 VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
49 FREE(mfence);
50 }
51
52 struct zink_tc_fence *
zink_create_tc_fence(void)53 zink_create_tc_fence(void)
54 {
55 struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence);
56 if (!mfence)
57 return NULL;
58 pipe_reference_init(&mfence->reference, 1);
59 util_queue_fence_init(&mfence->ready);
60 return mfence;
61 }
62
63 struct pipe_fence_handle *
zink_create_tc_fence_for_tc(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)64 zink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token)
65 {
66 struct zink_tc_fence *mfence = zink_create_tc_fence();
67 if (!mfence)
68 return NULL;
69 util_queue_fence_reset(&mfence->ready);
70 tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token);
71 return (struct pipe_fence_handle*)mfence;
72 }
73
74 void
zink_fence_reference(struct zink_screen * screen,struct zink_tc_fence ** ptr,struct zink_tc_fence * mfence)75 zink_fence_reference(struct zink_screen *screen,
76 struct zink_tc_fence **ptr,
77 struct zink_tc_fence *mfence)
78 {
79 if (pipe_reference(&(*ptr)->reference, &mfence->reference))
80 destroy_fence(screen, *ptr);
81
82 *ptr = mfence;
83 }
84
85 static void
fence_reference(struct pipe_screen * pscreen,struct pipe_fence_handle ** pptr,struct pipe_fence_handle * pfence)86 fence_reference(struct pipe_screen *pscreen,
87 struct pipe_fence_handle **pptr,
88 struct pipe_fence_handle *pfence)
89 {
90 zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr,
91 zink_tc_fence(pfence));
92 }
93
94 static bool
tc_fence_finish(struct zink_context * ctx,struct zink_tc_fence * mfence,uint64_t * timeout_ns)95 tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns)
96 {
97 if (!util_queue_fence_is_signalled(&mfence->ready)) {
98 int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns);
99 if (mfence->tc_token) {
100 /* Ensure that zink_flush will be called for
101 * this mfence, but only if we're in the API thread
102 * where the context is current.
103 *
104 * Note that the batch containing the flush may already
105 * be in flight in the driver thread, so the mfence
106 * may not be ready yet when this call returns.
107 */
108 threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0);
109 }
110
111 /* this is a tc mfence, so we're just waiting on the queue mfence to complete
112 * after being signaled by the real mfence
113 */
114 if (*timeout_ns == OS_TIMEOUT_INFINITE) {
115 util_queue_fence_wait(&mfence->ready);
116 } else {
117 if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
118 return false;
119 }
120 if (*timeout_ns && *timeout_ns != OS_TIMEOUT_INFINITE) {
121 int64_t time_ns = os_time_get_nano();
122 *timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
123 }
124 }
125
126 return true;
127 }
128
129 static bool
fence_wait(struct zink_screen * screen,struct zink_fence * fence,uint64_t timeout_ns)130 fence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)
131 {
132 if (screen->device_lost)
133 return true;
134 if (p_atomic_read(&fence->completed))
135 return true;
136
137 assert(fence->batch_id);
138 assert(fence->submitted);
139
140 bool success = zink_screen_timeline_wait(screen, fence->batch_id, timeout_ns);
141
142 if (success) {
143 p_atomic_set(&fence->completed, true);
144 zink_batch_state(fence)->usage.usage = 0;
145 zink_screen_update_last_finished(screen, fence->batch_id);
146 }
147 return success;
148 }
149
150 static bool
zink_fence_finish(struct zink_screen * screen,struct pipe_context * pctx,struct zink_tc_fence * mfence,uint64_t timeout_ns)151 zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,
152 uint64_t timeout_ns)
153 {
154 pctx = threaded_context_unwrap_sync(pctx);
155 struct zink_context *ctx = zink_context(pctx);
156
157 if (screen->device_lost)
158 return true;
159
160 if (pctx && mfence->deferred_ctx == pctx) {
161 if (mfence->fence == ctx->deferred_fence) {
162 zink_context(pctx)->bs->has_work = true;
163 /* this must be the current batch */
164 pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);
165 if (!timeout_ns)
166 return false;
167 }
168 }
169
170 /* need to ensure the tc mfence has been flushed before we wait */
171 bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns);
172 /* the submit thread hasn't finished yet */
173 if (!tc_finish)
174 return false;
175 /* this was an invalid flush, just return completed */
176 if (!mfence->fence)
177 return true;
178
179 struct zink_fence *fence = mfence->fence;
180
181 unsigned submit_diff = zink_batch_state(mfence->fence)->usage.submit_count - mfence->submit_count;
182 /* this batch is known to have finished because it has been submitted more than 1 time
183 * since the tc fence last saw it
184 */
185 if (submit_diff > 1)
186 return true;
187
188 /* - if fence is submitted, batch_id is nonzero and can be checked
189 * - if fence is not submitted here, it must be reset; batch_id will be 0 and submitted is false
190 * in either case, the fence has finished
191 */
192 if ((fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id)) ||
193 (!fence->submitted && submit_diff))
194 return true;
195
196 return fence_wait(screen, fence, timeout_ns);
197 }
198
199 static bool
fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * pfence,uint64_t timeout_ns)200 fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
201 struct pipe_fence_handle *pfence, uint64_t timeout_ns)
202 {
203 return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence),
204 timeout_ns);
205 }
206
207 static int
fence_get_fd(struct pipe_screen * pscreen,struct pipe_fence_handle * pfence)208 fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *pfence)
209 {
210 struct zink_screen *screen = zink_screen(pscreen);
211 if (screen->device_lost)
212 return -1;
213
214 struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
215 util_queue_fence_wait(&mfence->ready);
216 if (!mfence->sem)
217 return -1;
218
219 const VkSemaphoreGetFdInfoKHR sgfi = {
220 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
221 .semaphore = mfence->sem,
222 .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
223 };
224 int fd = -1;
225 VkResult result = VKSCR(GetSemaphoreFdKHR)(screen->dev, &sgfi, &fd);
226 if (!zink_screen_handle_vkresult(screen, result)) {
227 mesa_loge("ZINK: vkGetSemaphoreFdKHR failed (%s)", vk_Result_to_str(result));
228 return -1;
229 }
230
231 return fd;
232 }
233
234 void
zink_fence_server_signal(struct pipe_context * pctx,struct pipe_fence_handle * pfence)235 zink_fence_server_signal(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
236 {
237 struct zink_context *ctx = zink_context(pctx);
238 struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
239
240 assert(!ctx->bs->signal_semaphore);
241 ctx->bs->signal_semaphore = mfence->sem;
242 ctx->bs->has_work = true;
243 struct zink_batch_state *bs = ctx->bs;
244 /* this must produce a synchronous flush that completes before the function returns */
245 pctx->flush(pctx, NULL, 0);
246 if (zink_screen(ctx->base.screen)->threaded_submit)
247 util_queue_fence_wait(&bs->flush_completed);
248 }
249
250 void
zink_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * pfence)251 zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
252 {
253 struct zink_context *ctx = zink_context(pctx);
254 struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
255
256 if (mfence->deferred_ctx == pctx || !mfence->sem)
257 return;
258
259 mfence->deferred_ctx = pctx;
260 /* this will be applied on the next submit */
261 VkPipelineStageFlags flag = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
262 util_dynarray_append(&ctx->bs->wait_semaphores, VkSemaphore, mfence->sem);
263 util_dynarray_append(&ctx->bs->wait_semaphore_stages, VkPipelineStageFlags, flag);
264 pipe_reference(NULL, &mfence->reference);
265 util_dynarray_append(&ctx->bs->fences, struct zink_tc_fence*, mfence);
266 }
267
268 void
zink_create_fence_fd(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,int fd,enum pipe_fd_type type)269 zink_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence, int fd, enum pipe_fd_type type)
270 {
271 struct zink_screen *screen = zink_screen(pctx->screen);
272 VkResult result;
273
274 assert(fd >= 0);
275
276 struct zink_tc_fence *mfence = zink_create_tc_fence();
277 if (!mfence)
278 goto fail_tc_fence_create;
279
280 const VkSemaphoreCreateInfo sci = {
281 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
282 };
283 result = VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &mfence->sem);
284 if (result != VK_SUCCESS) {
285 mesa_loge("ZINK: vkCreateSemaphore failed (%s)", vk_Result_to_str(result));
286 goto fail_sem_create;
287 }
288
289 int dup_fd = os_dupfd_cloexec(fd);
290 if (dup_fd < 0)
291 goto fail_fd_dup;
292
293 static const VkExternalSemaphoreHandleTypeFlagBits handle_type[] = {
294 [PIPE_FD_TYPE_NATIVE_SYNC] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
295 [PIPE_FD_TYPE_SYNCOBJ] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
296 };
297 assert(type < ARRAY_SIZE(handle_type));
298
299 static const VkSemaphoreImportFlagBits flags[] = {
300 [PIPE_FD_TYPE_NATIVE_SYNC] = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
301 [PIPE_FD_TYPE_SYNCOBJ] = 0,
302 };
303 assert(type < ARRAY_SIZE(flags));
304
305 const VkImportSemaphoreFdInfoKHR sdi = {
306 .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
307 .semaphore = mfence->sem,
308 .flags = flags[type],
309 .handleType = handle_type[type],
310 .fd = dup_fd,
311 };
312 result = VKSCR(ImportSemaphoreFdKHR)(screen->dev, &sdi);
313 if (!zink_screen_handle_vkresult(screen, result)) {
314 mesa_loge("ZINK: vkImportSemaphoreFdKHR failed (%s)", vk_Result_to_str(result));
315 goto fail_sem_import;
316 }
317
318 *pfence = (struct pipe_fence_handle *)mfence;
319 return;
320
321 fail_sem_import:
322 close(dup_fd);
323 fail_fd_dup:
324 VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
325 fail_sem_create:
326 FREE(mfence);
327 fail_tc_fence_create:
328 *pfence = NULL;
329 }
330
331 #ifdef _WIN32
332 void
zink_create_fence_win32(struct pipe_screen * pscreen,struct pipe_fence_handle ** pfence,void * handle,const void * name,enum pipe_fd_type type)333 zink_create_fence_win32(struct pipe_screen *pscreen, struct pipe_fence_handle **pfence, void *handle, const void *name, enum pipe_fd_type type)
334 {
335 struct zink_screen *screen = zink_screen(pscreen);
336 VkResult ret = VK_ERROR_UNKNOWN;
337 VkSemaphoreCreateInfo sci = {
338 VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
339 NULL,
340 0
341 };
342 struct zink_tc_fence *mfence = zink_create_tc_fence();
343 VkExternalSemaphoreHandleTypeFlagBits flags[] = {
344 [PIPE_FD_TYPE_NATIVE_SYNC] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
345 [PIPE_FD_TYPE_SYNCOBJ] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
346 };
347 VkImportSemaphoreWin32HandleInfoKHR sdi = {0};
348 assert(type < ARRAY_SIZE(flags));
349
350 *pfence = NULL;
351
352 if (VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &mfence->sem) != VK_SUCCESS) {
353 FREE(mfence);
354 return;
355 }
356
357 sdi.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
358 sdi.semaphore = mfence->sem;
359 sdi.handleType = flags[type];
360 sdi.handle = handle;
361 sdi.name = (LPCWSTR)name;
362 ret = VKSCR(ImportSemaphoreWin32HandleKHR)(screen->dev, &sdi);
363
364 if (!zink_screen_handle_vkresult(screen, ret))
365 goto fail;
366 *pfence = (struct pipe_fence_handle *)mfence;
367 return;
368
369 fail:
370 VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
371 FREE(mfence);
372 }
373 #endif
374
375 void
zink_screen_fence_init(struct pipe_screen * pscreen)376 zink_screen_fence_init(struct pipe_screen *pscreen)
377 {
378 pscreen->fence_reference = fence_reference;
379 pscreen->fence_finish = fence_finish;
380 pscreen->fence_get_fd = fence_get_fd;
381 }
382