• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 /**
29  * \file
30  * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
31  *
32  * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33  * reference on it.  We can then check for completion or wait for completion
34  * using the normal buffer object mechanisms.  This does mean that if an
35  * application is using many sync objects, it will emit small batchbuffers
36  * which may end up being a significant overhead.  In other tests of removing
37  * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38  * performance bottleneck, though.
39  */
40 
41 #include <libsync.h> /* Requires Android or libdrm-2.4.72 */
42 
43 #include "main/imports.h"
44 
45 #include "brw_context.h"
46 #include "intel_batchbuffer.h"
47 
48 struct brw_fence {
49    struct brw_context *brw;
50 
51    enum brw_fence_type {
52       /** The fence waits for completion of brw_fence::batch_bo. */
53       BRW_FENCE_TYPE_BO_WAIT,
54 
55       /** The fence waits for brw_fence::sync_fd to signal. */
56       BRW_FENCE_TYPE_SYNC_FD,
57    } type;
58 
59    union {
60       struct brw_bo *batch_bo;
61 
62       /* This struct owns the fd. */
63       int sync_fd;
64    };
65 
66    mtx_t mutex;
67    bool signalled;
68 };
69 
70 struct brw_gl_sync {
71    struct gl_sync_object gl;
72    struct brw_fence fence;
73 };
74 
75 static void
brw_fence_init(struct brw_context * brw,struct brw_fence * fence,enum brw_fence_type type)76 brw_fence_init(struct brw_context *brw, struct brw_fence *fence,
77                enum brw_fence_type type)
78 {
79    fence->brw = brw;
80    fence->type = type;
81    mtx_init(&fence->mutex, mtx_plain);
82 
83    switch (type) {
84    case BRW_FENCE_TYPE_BO_WAIT:
85       fence->batch_bo = NULL;
86       break;
87     case BRW_FENCE_TYPE_SYNC_FD:
88       fence->sync_fd = -1;
89       break;
90    }
91 }
92 
93 static void
brw_fence_finish(struct brw_fence * fence)94 brw_fence_finish(struct brw_fence *fence)
95 {
96    switch (fence->type) {
97    case BRW_FENCE_TYPE_BO_WAIT:
98       if (fence->batch_bo)
99          brw_bo_unreference(fence->batch_bo);
100       break;
101    case BRW_FENCE_TYPE_SYNC_FD:
102       if (fence->sync_fd != -1)
103          close(fence->sync_fd);
104       break;
105    }
106 
107    mtx_destroy(&fence->mutex);
108 }
109 
110 static bool MUST_CHECK
brw_fence_insert_locked(struct brw_context * brw,struct brw_fence * fence)111 brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
112 {
113    __DRIcontext *driContext = brw->driContext;
114    __DRIdrawable *driDrawable = driContext->driDrawablePriv;
115 
116    /*
117     * From KHR_fence_sync:
118     *
119     *   When the condition of the sync object is satisfied by the fence
120     *   command, the sync is signaled by the associated client API context,
121     *   causing any eglClientWaitSyncKHR commands (see below) blocking on
122     *   <sync> to unblock. The only condition currently supported is
123     *   EGL_SYNC_PRIOR_COMMANDS_COMPLETE_KHR, which is satisfied by
124     *   completion of the fence command corresponding to the sync object,
125     *   and all preceding commands in the associated client API context's
126     *   command stream. The sync object will not be signaled until all
127     *   effects from these commands on the client API's internal and
128     *   framebuffer state are fully realized. No other state is affected by
129     *   execution of the fence command.
130     *
131     * Note the emphasis there on ensuring that the framebuffer is fully
132     * realised before the fence is signaled. We cannot just flush the batch,
133     * but must also resolve the drawable first. The importance of this is,
134     * for example, in creating a fence for a frame to be passed to a
135     * remote compositor. Without us flushing the drawable explicitly, the
136     * resolve will be in a following batch (when the client finally calls
137     * SwapBuffers, or triggers a resolve via some other path) and so the
138     * compositor may read the incomplete framebuffer instead.
139     */
140    if (driDrawable)
141       intel_resolve_for_dri2_flush(brw, driDrawable);
142    brw_emit_mi_flush(brw);
143 
144    switch (fence->type) {
145    case BRW_FENCE_TYPE_BO_WAIT:
146       assert(!fence->batch_bo);
147       assert(!fence->signalled);
148 
149       fence->batch_bo = brw->batch.batch.bo;
150       brw_bo_reference(fence->batch_bo);
151 
152       if (intel_batchbuffer_flush(brw) < 0) {
153          brw_bo_unreference(fence->batch_bo);
154          fence->batch_bo = NULL;
155          return false;
156       }
157       break;
158    case BRW_FENCE_TYPE_SYNC_FD:
159       assert(!fence->signalled);
160 
161       if (fence->sync_fd == -1) {
162          /* Create an out-fence that signals after all pending commands
163           * complete.
164           */
165          if (intel_batchbuffer_flush_fence(brw, -1, &fence->sync_fd) < 0)
166             return false;
167          assert(fence->sync_fd != -1);
168       } else {
169          /* Wait on the in-fence before executing any subsequently submitted
170           * commands.
171           */
172          if (intel_batchbuffer_flush(brw) < 0)
173             return false;
174 
175          /* Emit a dummy batch just for the fence. */
176          brw_emit_mi_flush(brw);
177          if (intel_batchbuffer_flush_fence(brw, fence->sync_fd, NULL) < 0)
178             return false;
179       }
180       break;
181    }
182 
183    return true;
184 }
185 
186 static bool MUST_CHECK
brw_fence_insert(struct brw_context * brw,struct brw_fence * fence)187 brw_fence_insert(struct brw_context *brw, struct brw_fence *fence)
188 {
189    bool ret;
190 
191    mtx_lock(&fence->mutex);
192    ret = brw_fence_insert_locked(brw, fence);
193    mtx_unlock(&fence->mutex);
194 
195    return ret;
196 }
197 
198 static bool
brw_fence_has_completed_locked(struct brw_fence * fence)199 brw_fence_has_completed_locked(struct brw_fence *fence)
200 {
201    if (fence->signalled)
202       return true;
203 
204    switch (fence->type) {
205    case BRW_FENCE_TYPE_BO_WAIT:
206       if (!fence->batch_bo) {
207          /* There may be no batch if intel_batchbuffer_flush() failed. */
208          return false;
209       }
210 
211       if (brw_bo_busy(fence->batch_bo))
212          return false;
213 
214       brw_bo_unreference(fence->batch_bo);
215       fence->batch_bo = NULL;
216       fence->signalled = true;
217 
218       return true;
219 
220    case BRW_FENCE_TYPE_SYNC_FD:
221       assert(fence->sync_fd != -1);
222 
223       if (sync_wait(fence->sync_fd, 0) == -1)
224          return false;
225 
226       fence->signalled = true;
227 
228       return true;
229    }
230 
231    return false;
232 }
233 
234 static bool
brw_fence_has_completed(struct brw_fence * fence)235 brw_fence_has_completed(struct brw_fence *fence)
236 {
237    bool ret;
238 
239    mtx_lock(&fence->mutex);
240    ret = brw_fence_has_completed_locked(fence);
241    mtx_unlock(&fence->mutex);
242 
243    return ret;
244 }
245 
246 static bool
brw_fence_client_wait_locked(struct brw_context * brw,struct brw_fence * fence,uint64_t timeout)247 brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
248                              uint64_t timeout)
249 {
250    int32_t timeout_i32;
251 
252    if (fence->signalled)
253       return true;
254 
255    switch (fence->type) {
256    case BRW_FENCE_TYPE_BO_WAIT:
257       if (!fence->batch_bo) {
258          /* There may be no batch if intel_batchbuffer_flush() failed. */
259          return false;
260       }
261 
262       /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
263        * immediately for timeouts <= 0.  The best we can do is to clamp the
264        * timeout to INT64_MAX.  This limits the maximum timeout from 584 years to
265        * 292 years - likely not a big deal.
266        */
267       if (timeout > INT64_MAX)
268          timeout = INT64_MAX;
269 
270       if (brw_bo_wait(fence->batch_bo, timeout) != 0)
271          return false;
272 
273       fence->signalled = true;
274       brw_bo_unreference(fence->batch_bo);
275       fence->batch_bo = NULL;
276 
277       return true;
278    case BRW_FENCE_TYPE_SYNC_FD:
279       if (fence->sync_fd == -1)
280          return false;
281 
282       if (timeout > INT32_MAX)
283          timeout_i32 = -1;
284       else
285          timeout_i32 = timeout;
286 
287       if (sync_wait(fence->sync_fd, timeout_i32) == -1)
288          return false;
289 
290       fence->signalled = true;
291       return true;
292    }
293 
294    assert(!"bad enum brw_fence_type");
295    return false;
296 }
297 
298 /**
299  * Return true if the function successfully signals or has already signalled.
300  * (This matches the behavior expected from __DRI2fence::client_wait_sync).
301  */
302 static bool
brw_fence_client_wait(struct brw_context * brw,struct brw_fence * fence,uint64_t timeout)303 brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
304                       uint64_t timeout)
305 {
306    bool ret;
307 
308    mtx_lock(&fence->mutex);
309    ret = brw_fence_client_wait_locked(brw, fence, timeout);
310    mtx_unlock(&fence->mutex);
311 
312    return ret;
313 }
314 
315 static void
brw_fence_server_wait(struct brw_context * brw,struct brw_fence * fence)316 brw_fence_server_wait(struct brw_context *brw, struct brw_fence *fence)
317 {
318    switch (fence->type) {
319    case BRW_FENCE_TYPE_BO_WAIT:
320       /* We have nothing to do for WaitSync.  Our GL command stream is sequential,
321        * so given that the sync object has already flushed the batchbuffer, any
322        * batchbuffers coming after this waitsync will naturally not occur until
323        * the previous one is done.
324        */
325       break;
326    case BRW_FENCE_TYPE_SYNC_FD:
327       assert(fence->sync_fd != -1);
328 
329       /* The user wants explicit synchronization, so give them what they want. */
330       if (!brw_fence_insert(brw, fence)) {
331          /* FIXME: There exists no way yet to report an error here. If an error
332           * occurs, continue silently and hope for the best.
333           */
334       }
335       break;
336    }
337 }
338 
339 static struct gl_sync_object *
brw_gl_new_sync(struct gl_context * ctx)340 brw_gl_new_sync(struct gl_context *ctx)
341 {
342    struct brw_gl_sync *sync;
343 
344    sync = calloc(1, sizeof(*sync));
345    if (!sync)
346       return NULL;
347 
348    return &sync->gl;
349 }
350 
351 static void
brw_gl_delete_sync(struct gl_context * ctx,struct gl_sync_object * _sync)352 brw_gl_delete_sync(struct gl_context *ctx, struct gl_sync_object *_sync)
353 {
354    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
355 
356    brw_fence_finish(&sync->fence);
357    free(sync);
358 }
359 
360 static void
brw_gl_fence_sync(struct gl_context * ctx,struct gl_sync_object * _sync,GLenum condition,GLbitfield flags)361 brw_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
362                   GLenum condition, GLbitfield flags)
363 {
364    struct brw_context *brw = brw_context(ctx);
365    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
366 
367    /* brw_fence_insert_locked() assumes it must do a complete flush */
368    assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
369 
370    brw_fence_init(brw, &sync->fence, BRW_FENCE_TYPE_BO_WAIT);
371 
372    if (!brw_fence_insert_locked(brw, &sync->fence)) {
373       /* FIXME: There exists no way to report a GL error here. If an error
374        * occurs, continue silently and hope for the best.
375        */
376    }
377 }
378 
379 static void
brw_gl_client_wait_sync(struct gl_context * ctx,struct gl_sync_object * _sync,GLbitfield flags,GLuint64 timeout)380 brw_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
381                         GLbitfield flags, GLuint64 timeout)
382 {
383    struct brw_context *brw = brw_context(ctx);
384    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
385 
386    if (brw_fence_client_wait(brw, &sync->fence, timeout))
387       sync->gl.StatusFlag = 1;
388 }
389 
390 static void
brw_gl_server_wait_sync(struct gl_context * ctx,struct gl_sync_object * _sync,GLbitfield flags,GLuint64 timeout)391 brw_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
392                           GLbitfield flags, GLuint64 timeout)
393 {
394    struct brw_context *brw = brw_context(ctx);
395    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
396 
397    brw_fence_server_wait(brw, &sync->fence);
398 }
399 
400 static void
brw_gl_check_sync(struct gl_context * ctx,struct gl_sync_object * _sync)401 brw_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *_sync)
402 {
403    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
404 
405    if (brw_fence_has_completed(&sync->fence))
406       sync->gl.StatusFlag = 1;
407 }
408 
409 void
brw_init_syncobj_functions(struct dd_function_table * functions)410 brw_init_syncobj_functions(struct dd_function_table *functions)
411 {
412    functions->NewSyncObject = brw_gl_new_sync;
413    functions->DeleteSyncObject = brw_gl_delete_sync;
414    functions->FenceSync = brw_gl_fence_sync;
415    functions->CheckSync = brw_gl_check_sync;
416    functions->ClientWaitSync = brw_gl_client_wait_sync;
417    functions->ServerWaitSync = brw_gl_server_wait_sync;
418 }
419 
420 static void *
brw_dri_create_fence(__DRIcontext * ctx)421 brw_dri_create_fence(__DRIcontext *ctx)
422 {
423    struct brw_context *brw = ctx->driverPrivate;
424    struct brw_fence *fence;
425 
426    fence = calloc(1, sizeof(*fence));
427    if (!fence)
428       return NULL;
429 
430    brw_fence_init(brw, fence, BRW_FENCE_TYPE_BO_WAIT);
431 
432    if (!brw_fence_insert_locked(brw, fence)) {
433       brw_fence_finish(fence);
434       free(fence);
435       return NULL;
436    }
437 
438    return fence;
439 }
440 
441 static void
brw_dri_destroy_fence(__DRIscreen * dri_screen,void * _fence)442 brw_dri_destroy_fence(__DRIscreen *dri_screen, void *_fence)
443 {
444    struct brw_fence *fence = _fence;
445 
446    brw_fence_finish(fence);
447    free(fence);
448 }
449 
450 static GLboolean
brw_dri_client_wait_sync(__DRIcontext * ctx,void * _fence,unsigned flags,uint64_t timeout)451 brw_dri_client_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags,
452                          uint64_t timeout)
453 {
454    struct brw_fence *fence = _fence;
455 
456    return brw_fence_client_wait(fence->brw, fence, timeout);
457 }
458 
459 static void
brw_dri_server_wait_sync(__DRIcontext * ctx,void * _fence,unsigned flags)460 brw_dri_server_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags)
461 {
462    struct brw_fence *fence = _fence;
463 
464    /* We might be called here with a NULL fence as a result of WaitSyncKHR
465     * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
466     */
467    if (!fence)
468       return;
469 
470    brw_fence_server_wait(fence->brw, fence);
471 }
472 
473 static unsigned
brw_dri_get_capabilities(__DRIscreen * dri_screen)474 brw_dri_get_capabilities(__DRIscreen *dri_screen)
475 {
476    struct intel_screen *screen = dri_screen->driverPrivate;
477    unsigned caps = 0;
478 
479    if (screen->has_exec_fence)
480       caps |=  __DRI_FENCE_CAP_NATIVE_FD;
481 
482    return caps;
483 }
484 
485 static void *
brw_dri_create_fence_fd(__DRIcontext * dri_ctx,int fd)486 brw_dri_create_fence_fd(__DRIcontext *dri_ctx, int fd)
487 {
488    struct brw_context *brw = dri_ctx->driverPrivate;
489    struct brw_fence *fence;
490 
491    assert(brw->screen->has_exec_fence);
492 
493    fence = calloc(1, sizeof(*fence));
494    if (!fence)
495       return NULL;
496 
497    brw_fence_init(brw, fence, BRW_FENCE_TYPE_SYNC_FD);
498 
499    if (fd == -1) {
500       /* Create an out-fence fd */
501       if (!brw_fence_insert_locked(brw, fence))
502          goto fail;
503    } else {
504       /* Import the sync fd as an in-fence. */
505       fence->sync_fd = dup(fd);
506    }
507 
508    assert(fence->sync_fd != -1);
509 
510    return fence;
511 
512 fail:
513    brw_fence_finish(fence);
514    free(fence);
515    return NULL;
516 }
517 
518 static int
brw_dri_get_fence_fd_locked(struct brw_fence * fence)519 brw_dri_get_fence_fd_locked(struct brw_fence *fence)
520 {
521    assert(fence->type == BRW_FENCE_TYPE_SYNC_FD);
522    return dup(fence->sync_fd);
523 }
524 
525 static int
brw_dri_get_fence_fd(__DRIscreen * dri_screen,void * _fence)526 brw_dri_get_fence_fd(__DRIscreen *dri_screen, void *_fence)
527 {
528    struct brw_fence *fence = _fence;
529    int fd;
530 
531    mtx_lock(&fence->mutex);
532    fd = brw_dri_get_fence_fd_locked(fence);
533    mtx_unlock(&fence->mutex);
534 
535    return fd;
536 }
537 
538 const __DRI2fenceExtension intelFenceExtension = {
539    .base = { __DRI2_FENCE, 2 },
540 
541    .create_fence = brw_dri_create_fence,
542    .destroy_fence = brw_dri_destroy_fence,
543    .client_wait_sync = brw_dri_client_wait_sync,
544    .server_wait_sync = brw_dri_server_wait_sync,
545    .get_fence_from_cl_event = NULL,
546    .get_capabilities = brw_dri_get_capabilities,
547    .create_fence_fd = brw_dri_create_fence_fd,
548    .get_fence_fd = brw_dri_get_fence_fd,
549 };
550