• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Collabora Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "zink_batch.h"
25 #include "zink_context.h"
26 #include "zink_fence.h"
27 
28 #include "zink_resource.h"
29 #include "zink_screen.h"
30 
31 #include "util/os_file.h"
32 #include "util/set.h"
33 #include "util/u_memory.h"
34 
35 static void
destroy_fence(struct zink_screen * screen,struct zink_tc_fence * mfence)36 destroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence)
37 {
38    mfence->fence = NULL;
39    tc_unflushed_batch_token_reference(&mfence->tc_token, NULL);
40    if (mfence->sem)
41       VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
42    FREE(mfence);
43 }
44 
45 struct zink_tc_fence *
zink_create_tc_fence(void)46 zink_create_tc_fence(void)
47 {
48    struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence);
49    if (!mfence)
50       return NULL;
51    pipe_reference_init(&mfence->reference, 1);
52    util_queue_fence_init(&mfence->ready);
53    return mfence;
54 }
55 
56 struct pipe_fence_handle *
zink_create_tc_fence_for_tc(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)57 zink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token)
58 {
59    struct zink_tc_fence *mfence = zink_create_tc_fence();
60    if (!mfence)
61       return NULL;
62    util_queue_fence_reset(&mfence->ready);
63    tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token);
64    return (struct pipe_fence_handle*)mfence;
65 }
66 
67 void
zink_fence_reference(struct zink_screen * screen,struct zink_tc_fence ** ptr,struct zink_tc_fence * mfence)68 zink_fence_reference(struct zink_screen *screen,
69                      struct zink_tc_fence **ptr,
70                      struct zink_tc_fence *mfence)
71 {
72    if (pipe_reference(&(*ptr)->reference, &mfence->reference))
73       destroy_fence(screen, *ptr);
74 
75    *ptr = mfence;
76 }
77 
78 static void
fence_reference(struct pipe_screen * pscreen,struct pipe_fence_handle ** pptr,struct pipe_fence_handle * pfence)79 fence_reference(struct pipe_screen *pscreen,
80                 struct pipe_fence_handle **pptr,
81                 struct pipe_fence_handle *pfence)
82 {
83    zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr,
84                         zink_tc_fence(pfence));
85 }
86 
87 static bool
tc_fence_finish(struct zink_context * ctx,struct zink_tc_fence * mfence,uint64_t * timeout_ns)88 tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns)
89 {
90    if (!util_queue_fence_is_signalled(&mfence->ready)) {
91       int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns);
92       if (mfence->tc_token) {
93          /* Ensure that zink_flush will be called for
94           * this mfence, but only if we're in the API thread
95           * where the context is current.
96           *
97           * Note that the batch containing the flush may already
98           * be in flight in the driver thread, so the mfence
99           * may not be ready yet when this call returns.
100           */
101          threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0);
102       }
103 
104       /* this is a tc mfence, so we're just waiting on the queue mfence to complete
105        * after being signaled by the real mfence
106        */
107       if (*timeout_ns == PIPE_TIMEOUT_INFINITE) {
108          util_queue_fence_wait(&mfence->ready);
109       } else {
110          if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
111             return false;
112       }
113       if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) {
114          int64_t time_ns = os_time_get_nano();
115          *timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
116       }
117    }
118 
119    return true;
120 }
121 
122 static bool
fence_wait(struct zink_screen * screen,struct zink_fence * fence,uint64_t timeout_ns)123 fence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)
124 {
125    if (screen->device_lost)
126       return true;
127    if (p_atomic_read(&fence->completed))
128       return true;
129 
130    assert(fence->batch_id);
131    assert(fence->submitted);
132 
133    bool success = zink_screen_timeline_wait(screen, fence->batch_id, timeout_ns);
134 
135    if (success) {
136       p_atomic_set(&fence->completed, true);
137       zink_batch_state(fence)->usage.usage = 0;
138       zink_screen_update_last_finished(screen, fence->batch_id);
139    }
140    return success;
141 }
142 
143 static bool
zink_fence_finish(struct zink_screen * screen,struct pipe_context * pctx,struct zink_tc_fence * mfence,uint64_t timeout_ns)144 zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,
145                   uint64_t timeout_ns)
146 {
147    pctx = threaded_context_unwrap_sync(pctx);
148    struct zink_context *ctx = zink_context(pctx);
149 
150    if (screen->device_lost)
151       return true;
152 
153    if (pctx && mfence->deferred_ctx == pctx) {
154       if (mfence->fence == ctx->deferred_fence) {
155          zink_context(pctx)->batch.has_work = true;
156          /* this must be the current batch */
157          pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);
158          if (!timeout_ns)
159             return false;
160       }
161    }
162 
163    /* need to ensure the tc mfence has been flushed before we wait */
164    bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns);
165    /* the submit thread hasn't finished yet */
166    if (!tc_finish)
167       return false;
168    /* this was an invalid flush, just return completed */
169    if (!mfence->fence)
170       return true;
171 
172    struct zink_fence *fence = mfence->fence;
173 
174    unsigned submit_diff = zink_batch_state(mfence->fence)->submit_count - mfence->submit_count;
175    /* this batch is known to have finished because it has been submitted more than 1 time
176     * since the tc fence last saw it
177     */
178    if (submit_diff > 1)
179       return true;
180 
181    if (fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id))
182       return true;
183 
184    return fence_wait(screen, fence, timeout_ns);
185 }
186 
187 static bool
fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * pfence,uint64_t timeout_ns)188 fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
189                   struct pipe_fence_handle *pfence, uint64_t timeout_ns)
190 {
191    return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence),
192                             timeout_ns);
193 }
194 
195 void
zink_fence_server_signal(struct pipe_context * pctx,struct pipe_fence_handle * pfence)196 zink_fence_server_signal(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
197 {
198    struct zink_context *ctx = zink_context(pctx);
199    struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
200 
201    assert(!ctx->batch.state->signal_semaphore);
202    ctx->batch.state->signal_semaphore = mfence->sem;
203    ctx->batch.has_work = true;
204    struct zink_batch_state *bs = ctx->batch.state;
205    /* this must produce a synchronous flush that completes before the function returns */
206    pctx->flush(pctx, NULL, 0);
207    if (zink_screen(ctx->base.screen)->threaded)
208       util_queue_fence_wait(&bs->flush_completed);
209 }
210 
211 void
zink_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * pfence)212 zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
213 {
214    struct zink_context *ctx = zink_context(pctx);
215    struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
216 
217    if (mfence->deferred_ctx == pctx || !mfence->sem)
218       return;
219 
220    mfence->deferred_ctx = pctx;
221    /* this will be applied on the next submit */
222    VkPipelineStageFlags flag = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
223    util_dynarray_append(&ctx->batch.state->wait_semaphores, VkSemaphore, mfence->sem);
224    util_dynarray_append(&ctx->batch.state->wait_semaphore_stages, VkPipelineStageFlags, flag);
225 
226    /* transfer the external wait sempahore ownership to the next submit */
227    mfence->sem = VK_NULL_HANDLE;
228 }
229 
230 void
zink_create_fence_fd(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,int fd,enum pipe_fd_type type)231 zink_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence, int fd, enum pipe_fd_type type)
232 {
233    struct zink_screen *screen = zink_screen(pctx->screen);
234    VkResult result;
235 
236    assert(fd >= 0);
237 
238    struct zink_tc_fence *mfence = zink_create_tc_fence();
239    if (!mfence)
240       goto fail_tc_fence_create;
241 
242    const VkSemaphoreCreateInfo sci = {
243       .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
244    };
245    result = VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &mfence->sem);
246    if (result != VK_SUCCESS) {
247       mesa_loge("ZINK: vkCreateSemaphore failed (%s)", vk_Result_to_str(result));
248       goto fail_sem_create;
249    }
250 
251    int dup_fd = os_dupfd_cloexec(fd);
252    if (dup_fd < 0)
253       goto fail_fd_dup;
254 
255    static const VkExternalSemaphoreHandleTypeFlagBits flags[] = {
256       [PIPE_FD_TYPE_NATIVE_SYNC] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
257       [PIPE_FD_TYPE_SYNCOBJ] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
258    };
259    assert(type < ARRAY_SIZE(flags));
260 
261    const VkImportSemaphoreFdInfoKHR sdi = {
262       .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
263       .semaphore = mfence->sem,
264       .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
265       .handleType = flags[type],
266       .fd = dup_fd,
267    };
268    result = VKSCR(ImportSemaphoreFdKHR)(screen->dev, &sdi);
269    if (!zink_screen_handle_vkresult(screen, result)) {
270       mesa_loge("ZINK: vkImportSemaphoreFdKHR failed (%s)", vk_Result_to_str(result));
271       goto fail_sem_import;
272    }
273 
274    *pfence = (struct pipe_fence_handle *)mfence;
275    return;
276 
277 fail_sem_import:
278    close(dup_fd);
279 fail_fd_dup:
280    VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
281 fail_sem_create:
282    FREE(mfence);
283 fail_tc_fence_create:
284    *pfence = NULL;
285 }
286 
287 #ifdef _WIN32
288 void
zink_create_fence_win32(struct pipe_screen * pscreen,struct pipe_fence_handle ** pfence,void * handle,const void * name,enum pipe_fd_type type)289 zink_create_fence_win32(struct pipe_screen *pscreen, struct pipe_fence_handle **pfence, void *handle, const void *name, enum pipe_fd_type type)
290 {
291    struct zink_screen *screen = zink_screen(pscreen);
292    VkResult ret = VK_ERROR_UNKNOWN;
293    VkSemaphoreCreateInfo sci = {
294       VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
295       NULL,
296       0
297    };
298    struct zink_tc_fence *mfence = zink_create_tc_fence();
299    VkExternalSemaphoreHandleTypeFlagBits flags[] = {
300       [PIPE_FD_TYPE_NATIVE_SYNC] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
301       [PIPE_FD_TYPE_SYNCOBJ] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
302    };
303    VkImportSemaphoreWin32HandleInfoKHR sdi = {0};
304    assert(type < ARRAY_SIZE(flags));
305 
306    *pfence = NULL;
307 
308    if (VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &mfence->sem) != VK_SUCCESS) {
309       FREE(mfence);
310       return;
311    }
312 
313    sdi.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
314    sdi.semaphore = mfence->sem;
315    sdi.handleType = flags[type];
316    sdi.handle = handle;
317    sdi.name = (LPCWSTR)name;
318    ret = VKSCR(ImportSemaphoreWin32HandleKHR)(screen->dev, &sdi);
319 
320    if (!zink_screen_handle_vkresult(screen, ret))
321       goto fail;
322    *pfence = (struct pipe_fence_handle *)mfence;
323    return;
324 
325 fail:
326    VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
327    FREE(mfence);
328 }
329 #endif
330 
331 void
zink_screen_fence_init(struct pipe_screen * pscreen)332 zink_screen_fence_init(struct pipe_screen *pscreen)
333 {
334    pscreen->fence_reference = fence_reference;
335    pscreen->fence_finish = fence_finish;
336 }
337