• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 /**
29  * \file
30  * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
31  *
32  * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33  * reference on it.  We can then check for completion or wait for completion
34  * using the normal buffer object mechanisms.  This does mean that if an
35  * application is using many sync objects, it will emit small batchbuffers
36  * which may end up being a significant overhead.  In other tests of removing
37  * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38  * performance bottleneck, though.
39  */
40 
41 #include <libsync.h> /* Requires Android or libdrm-2.4.72 */
42 
43 #include "util/os_file.h"
44 
45 #include "brw_context.h"
46 #include "intel_batchbuffer.h"
47 
48 struct brw_fence {
49    struct brw_context *brw;
50 
51    enum brw_fence_type {
52       /** The fence waits for completion of brw_fence::batch_bo. */
53       BRW_FENCE_TYPE_BO_WAIT,
54 
55       /** The fence waits for brw_fence::sync_fd to signal. */
56       BRW_FENCE_TYPE_SYNC_FD,
57    } type;
58 
59    union {
60       struct brw_bo *batch_bo;
61 
62       /* This struct owns the fd. */
63       int sync_fd;
64    };
65 
66    mtx_t mutex;
67    bool signalled;
68 };
69 
70 struct brw_gl_sync {
71    struct gl_sync_object gl;
72    struct brw_fence fence;
73 };
74 
75 static void
brw_fence_init(struct brw_context * brw,struct brw_fence * fence,enum brw_fence_type type)76 brw_fence_init(struct brw_context *brw, struct brw_fence *fence,
77                enum brw_fence_type type)
78 {
79    fence->brw = brw;
80    fence->type = type;
81    mtx_init(&fence->mutex, mtx_plain);
82 
83    switch (type) {
84    case BRW_FENCE_TYPE_BO_WAIT:
85       fence->batch_bo = NULL;
86       break;
87     case BRW_FENCE_TYPE_SYNC_FD:
88       fence->sync_fd = -1;
89       break;
90    }
91 }
92 
93 static void
brw_fence_finish(struct brw_fence * fence)94 brw_fence_finish(struct brw_fence *fence)
95 {
96    switch (fence->type) {
97    case BRW_FENCE_TYPE_BO_WAIT:
98       if (fence->batch_bo)
99          brw_bo_unreference(fence->batch_bo);
100       break;
101    case BRW_FENCE_TYPE_SYNC_FD:
102       if (fence->sync_fd != -1)
103          close(fence->sync_fd);
104       break;
105    }
106 
107    mtx_destroy(&fence->mutex);
108 }
109 
110 static bool MUST_CHECK
brw_fence_insert_locked(struct brw_context * brw,struct brw_fence * fence)111 brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
112 {
113    __DRIcontext *driContext = brw->driContext;
114    __DRIdrawable *driDrawable = driContext->driDrawablePriv;
115 
116    /*
117     * From KHR_fence_sync:
118     *
119     *   When the condition of the sync object is satisfied by the fence
120     *   command, the sync is signaled by the associated client API context,
121     *   causing any eglClientWaitSyncKHR commands (see below) blocking on
122     *   <sync> to unblock. The only condition currently supported is
123     *   EGL_SYNC_PRIOR_COMMANDS_COMPLETE_KHR, which is satisfied by
124     *   completion of the fence command corresponding to the sync object,
125     *   and all preceding commands in the associated client API context's
126     *   command stream. The sync object will not be signaled until all
127     *   effects from these commands on the client API's internal and
128     *   framebuffer state are fully realized. No other state is affected by
129     *   execution of the fence command.
130     *
131     * Note the emphasis there on ensuring that the framebuffer is fully
132     * realised before the fence is signaled. We cannot just flush the batch,
133     * but must also resolve the drawable first. The importance of this is,
134     * for example, in creating a fence for a frame to be passed to a
135     * remote compositor. Without us flushing the drawable explicitly, the
136     * resolve will be in a following batch (when the client finally calls
137     * SwapBuffers, or triggers a resolve via some other path) and so the
138     * compositor may read the incomplete framebuffer instead.
139     */
140    if (driDrawable)
141       intel_resolve_for_dri2_flush(brw, driDrawable);
142    brw_emit_mi_flush(brw);
143 
144    switch (fence->type) {
145    case BRW_FENCE_TYPE_BO_WAIT:
146       assert(!fence->batch_bo);
147       assert(!fence->signalled);
148 
149       fence->batch_bo = brw->batch.batch.bo;
150       brw_bo_reference(fence->batch_bo);
151 
152       if (intel_batchbuffer_flush(brw) < 0) {
153          brw_bo_unreference(fence->batch_bo);
154          fence->batch_bo = NULL;
155          return false;
156       }
157       break;
158    case BRW_FENCE_TYPE_SYNC_FD:
159       assert(!fence->signalled);
160 
161       if (fence->sync_fd == -1) {
162          /* Create an out-fence that signals after all pending commands
163           * complete.
164           */
165          if (intel_batchbuffer_flush_fence(brw, -1, &fence->sync_fd) < 0)
166             return false;
167          assert(fence->sync_fd != -1);
168       } else {
169          /* Wait on the in-fence before executing any subsequently submitted
170           * commands.
171           */
172          if (intel_batchbuffer_flush(brw) < 0)
173             return false;
174 
175          /* Emit a dummy batch just for the fence. */
176          brw_emit_mi_flush(brw);
177          if (intel_batchbuffer_flush_fence(brw, fence->sync_fd, NULL) < 0)
178             return false;
179       }
180       break;
181    }
182 
183    return true;
184 }
185 
186 static bool MUST_CHECK
brw_fence_insert(struct brw_context * brw,struct brw_fence * fence)187 brw_fence_insert(struct brw_context *brw, struct brw_fence *fence)
188 {
189    bool ret;
190 
191    mtx_lock(&fence->mutex);
192    ret = brw_fence_insert_locked(brw, fence);
193    mtx_unlock(&fence->mutex);
194 
195    return ret;
196 }
197 
198 static bool
brw_fence_has_completed_locked(struct brw_fence * fence)199 brw_fence_has_completed_locked(struct brw_fence *fence)
200 {
201    if (fence->signalled)
202       return true;
203 
204    switch (fence->type) {
205    case BRW_FENCE_TYPE_BO_WAIT:
206       if (!fence->batch_bo) {
207          /* There may be no batch if intel_batchbuffer_flush() failed. */
208          return false;
209       }
210 
211       if (brw_bo_busy(fence->batch_bo))
212          return false;
213 
214       brw_bo_unreference(fence->batch_bo);
215       fence->batch_bo = NULL;
216       fence->signalled = true;
217 
218       return true;
219 
220    case BRW_FENCE_TYPE_SYNC_FD:
221       assert(fence->sync_fd != -1);
222 
223       if (sync_wait(fence->sync_fd, 0) == -1)
224          return false;
225 
226       fence->signalled = true;
227 
228       return true;
229    }
230 
231    return false;
232 }
233 
234 static bool
brw_fence_has_completed(struct brw_fence * fence)235 brw_fence_has_completed(struct brw_fence *fence)
236 {
237    bool ret;
238 
239    mtx_lock(&fence->mutex);
240    ret = brw_fence_has_completed_locked(fence);
241    mtx_unlock(&fence->mutex);
242 
243    return ret;
244 }
245 
246 static bool
brw_fence_client_wait_locked(struct brw_context * brw,struct brw_fence * fence,uint64_t timeout)247 brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
248                              uint64_t timeout)
249 {
250    int32_t timeout_i32;
251 
252    if (fence->signalled)
253       return true;
254 
255    switch (fence->type) {
256    case BRW_FENCE_TYPE_BO_WAIT:
257       if (!fence->batch_bo) {
258          /* There may be no batch if intel_batchbuffer_flush() failed. */
259          return false;
260       }
261 
262       /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
263        * immediately for timeouts <= 0.  The best we can do is to clamp the
264        * timeout to INT64_MAX.  This limits the maximum timeout from 584 years to
265        * 292 years - likely not a big deal.
266        */
267       if (timeout > INT64_MAX)
268          timeout = INT64_MAX;
269 
270       if (brw_bo_wait(fence->batch_bo, timeout) != 0)
271          return false;
272 
273       fence->signalled = true;
274       brw_bo_unreference(fence->batch_bo);
275       fence->batch_bo = NULL;
276 
277       return true;
278    case BRW_FENCE_TYPE_SYNC_FD:
279       if (fence->sync_fd == -1)
280          return false;
281 
282       if (timeout > INT32_MAX)
283          timeout_i32 = -1;
284       else
285          timeout_i32 = timeout;
286 
287       if (sync_wait(fence->sync_fd, timeout_i32) == -1)
288          return false;
289 
290       fence->signalled = true;
291       return true;
292    }
293 
294    assert(!"bad enum brw_fence_type");
295    return false;
296 }
297 
298 /**
299  * Return true if the function successfully signals or has already signalled.
300  * (This matches the behavior expected from __DRI2fence::client_wait_sync).
301  */
302 static bool
brw_fence_client_wait(struct brw_context * brw,struct brw_fence * fence,uint64_t timeout)303 brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
304                       uint64_t timeout)
305 {
306    bool ret;
307 
308    mtx_lock(&fence->mutex);
309    ret = brw_fence_client_wait_locked(brw, fence, timeout);
310    mtx_unlock(&fence->mutex);
311 
312    return ret;
313 }
314 
315 static void
brw_fence_server_wait(struct brw_context * brw,struct brw_fence * fence)316 brw_fence_server_wait(struct brw_context *brw, struct brw_fence *fence)
317 {
318    switch (fence->type) {
319    case BRW_FENCE_TYPE_BO_WAIT:
320       /* We have nothing to do for WaitSync.  Our GL command stream is sequential,
321        * so given that the sync object has already flushed the batchbuffer, any
322        * batchbuffers coming after this waitsync will naturally not occur until
323        * the previous one is done.
324        */
325       break;
326    case BRW_FENCE_TYPE_SYNC_FD:
327       assert(fence->sync_fd != -1);
328 
329       /* The user wants explicit synchronization, so give them what they want. */
330       if (!brw_fence_insert(brw, fence)) {
331          /* FIXME: There exists no way yet to report an error here. If an error
332           * occurs, continue silently and hope for the best.
333           */
334       }
335       break;
336    }
337 }
338 
339 static struct gl_sync_object *
brw_gl_new_sync(struct gl_context * ctx)340 brw_gl_new_sync(struct gl_context *ctx)
341 {
342    struct brw_gl_sync *sync;
343 
344    sync = calloc(1, sizeof(*sync));
345    if (!sync)
346       return NULL;
347 
348    return &sync->gl;
349 }
350 
351 static void
brw_gl_delete_sync(struct gl_context * ctx,struct gl_sync_object * _sync)352 brw_gl_delete_sync(struct gl_context *ctx, struct gl_sync_object *_sync)
353 {
354    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
355 
356    brw_fence_finish(&sync->fence);
357    free(sync->gl.Label);
358    free(sync);
359 }
360 
361 static void
brw_gl_fence_sync(struct gl_context * ctx,struct gl_sync_object * _sync,GLenum condition,GLbitfield flags)362 brw_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
363                   GLenum condition, GLbitfield flags)
364 {
365    struct brw_context *brw = brw_context(ctx);
366    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
367 
368    /* brw_fence_insert_locked() assumes it must do a complete flush */
369    assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
370 
371    brw_fence_init(brw, &sync->fence, BRW_FENCE_TYPE_BO_WAIT);
372 
373    if (!brw_fence_insert_locked(brw, &sync->fence)) {
374       /* FIXME: There exists no way to report a GL error here. If an error
375        * occurs, continue silently and hope for the best.
376        */
377    }
378 }
379 
380 static void
brw_gl_client_wait_sync(struct gl_context * ctx,struct gl_sync_object * _sync,GLbitfield flags,GLuint64 timeout)381 brw_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
382                         GLbitfield flags, GLuint64 timeout)
383 {
384    struct brw_context *brw = brw_context(ctx);
385    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
386 
387    if (brw_fence_client_wait(brw, &sync->fence, timeout))
388       sync->gl.StatusFlag = 1;
389 }
390 
391 static void
brw_gl_server_wait_sync(struct gl_context * ctx,struct gl_sync_object * _sync,GLbitfield flags,GLuint64 timeout)392 brw_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
393                           GLbitfield flags, GLuint64 timeout)
394 {
395    struct brw_context *brw = brw_context(ctx);
396    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
397 
398    brw_fence_server_wait(brw, &sync->fence);
399 }
400 
401 static void
brw_gl_check_sync(struct gl_context * ctx,struct gl_sync_object * _sync)402 brw_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *_sync)
403 {
404    struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
405 
406    if (brw_fence_has_completed(&sync->fence))
407       sync->gl.StatusFlag = 1;
408 }
409 
410 void
brw_init_syncobj_functions(struct dd_function_table * functions)411 brw_init_syncobj_functions(struct dd_function_table *functions)
412 {
413    functions->NewSyncObject = brw_gl_new_sync;
414    functions->DeleteSyncObject = brw_gl_delete_sync;
415    functions->FenceSync = brw_gl_fence_sync;
416    functions->CheckSync = brw_gl_check_sync;
417    functions->ClientWaitSync = brw_gl_client_wait_sync;
418    functions->ServerWaitSync = brw_gl_server_wait_sync;
419 }
420 
421 static void *
brw_dri_create_fence(__DRIcontext * ctx)422 brw_dri_create_fence(__DRIcontext *ctx)
423 {
424    struct brw_context *brw = ctx->driverPrivate;
425    struct brw_fence *fence;
426 
427    fence = calloc(1, sizeof(*fence));
428    if (!fence)
429       return NULL;
430 
431    brw_fence_init(brw, fence, BRW_FENCE_TYPE_BO_WAIT);
432 
433    if (!brw_fence_insert_locked(brw, fence)) {
434       brw_fence_finish(fence);
435       free(fence);
436       return NULL;
437    }
438 
439    return fence;
440 }
441 
442 static void
brw_dri_destroy_fence(__DRIscreen * dri_screen,void * _fence)443 brw_dri_destroy_fence(__DRIscreen *dri_screen, void *_fence)
444 {
445    struct brw_fence *fence = _fence;
446 
447    brw_fence_finish(fence);
448    free(fence);
449 }
450 
451 static GLboolean
brw_dri_client_wait_sync(__DRIcontext * ctx,void * _fence,unsigned flags,uint64_t timeout)452 brw_dri_client_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags,
453                          uint64_t timeout)
454 {
455    struct brw_fence *fence = _fence;
456 
457    return brw_fence_client_wait(fence->brw, fence, timeout);
458 }
459 
460 static void
brw_dri_server_wait_sync(__DRIcontext * ctx,void * _fence,unsigned flags)461 brw_dri_server_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags)
462 {
463    struct brw_fence *fence = _fence;
464 
465    /* We might be called here with a NULL fence as a result of WaitSyncKHR
466     * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
467     */
468    if (!fence)
469       return;
470 
471    brw_fence_server_wait(fence->brw, fence);
472 }
473 
474 static unsigned
brw_dri_get_capabilities(__DRIscreen * dri_screen)475 brw_dri_get_capabilities(__DRIscreen *dri_screen)
476 {
477    struct intel_screen *screen = dri_screen->driverPrivate;
478    unsigned caps = 0;
479 
480    if (screen->has_exec_fence)
481       caps |=  __DRI_FENCE_CAP_NATIVE_FD;
482 
483    return caps;
484 }
485 
486 static void *
brw_dri_create_fence_fd(__DRIcontext * dri_ctx,int fd)487 brw_dri_create_fence_fd(__DRIcontext *dri_ctx, int fd)
488 {
489    struct brw_context *brw = dri_ctx->driverPrivate;
490    struct brw_fence *fence;
491 
492    assert(brw->screen->has_exec_fence);
493 
494    fence = calloc(1, sizeof(*fence));
495    if (!fence)
496       return NULL;
497 
498    brw_fence_init(brw, fence, BRW_FENCE_TYPE_SYNC_FD);
499 
500    if (fd == -1) {
501       /* Create an out-fence fd */
502       if (!brw_fence_insert_locked(brw, fence))
503          goto fail;
504    } else {
505       /* Import the sync fd as an in-fence. */
506       fence->sync_fd = os_dupfd_cloexec(fd);
507    }
508 
509    assert(fence->sync_fd != -1);
510 
511    return fence;
512 
513 fail:
514    brw_fence_finish(fence);
515    free(fence);
516    return NULL;
517 }
518 
519 static int
brw_dri_get_fence_fd_locked(struct brw_fence * fence)520 brw_dri_get_fence_fd_locked(struct brw_fence *fence)
521 {
522    assert(fence->type == BRW_FENCE_TYPE_SYNC_FD);
523    return os_dupfd_cloexec(fence->sync_fd);
524 }
525 
526 static int
brw_dri_get_fence_fd(__DRIscreen * dri_screen,void * _fence)527 brw_dri_get_fence_fd(__DRIscreen *dri_screen, void *_fence)
528 {
529    struct brw_fence *fence = _fence;
530    int fd;
531 
532    mtx_lock(&fence->mutex);
533    fd = brw_dri_get_fence_fd_locked(fence);
534    mtx_unlock(&fence->mutex);
535 
536    return fd;
537 }
538 
539 const __DRI2fenceExtension intelFenceExtension = {
540    .base = { __DRI2_FENCE, 2 },
541 
542    .create_fence = brw_dri_create_fence,
543    .destroy_fence = brw_dri_destroy_fence,
544    .client_wait_sync = brw_dri_client_wait_sync,
545    .server_wait_sync = brw_dri_server_wait_sync,
546    .get_fence_from_cl_event = NULL,
547    .get_capabilities = brw_dri_get_capabilities,
548    .create_fence_fd = brw_dri_create_fence_fd,
549    .get_fence_fd = brw_dri_get_fence_fd,
550 };
551