• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 /** @file intel_syncobj.c
29  *
30  * Support for ARB_sync
31  *
32  * ARB_sync is implemented by flushing the current batchbuffer and keeping a
33  * reference on it.  We can then check for completion or wait for completion
34  * using the normal buffer object mechanisms.  This does mean that if an
35  * application is using many sync objects, it will emit small batchbuffers
36  * which may end up being a significant overhead.  In other tests of removing
37  * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38  * performance bottleneck, though.
39  */
40 
41 #include "main/simple_list.h"
42 #include "main/imports.h"
43 
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_reg.h"
47 
48 static struct gl_sync_object *
intel_new_sync_object(struct gl_context * ctx,GLuint id)49 intel_new_sync_object(struct gl_context *ctx, GLuint id)
50 {
51    struct intel_sync_object *sync;
52 
53    sync = calloc(1, sizeof(struct intel_sync_object));
54 
55    return &sync->Base;
56 }
57 
58 static void
intel_delete_sync_object(struct gl_context * ctx,struct gl_sync_object * s)59 intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
60 {
61    struct intel_sync_object *sync = (struct intel_sync_object *)s;
62 
63    drm_intel_bo_unreference(sync->bo);
64    free(sync);
65 }
66 
67 static void
intel_fence_sync(struct gl_context * ctx,struct gl_sync_object * s,GLenum condition,GLbitfield flags)68 intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
69 	       GLenum condition, GLbitfield flags)
70 {
71    struct intel_context *intel = intel_context(ctx);
72    struct intel_sync_object *sync = (struct intel_sync_object *)s;
73 
74    assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
75    intel_batchbuffer_emit_mi_flush(intel);
76 
77    sync->bo = intel->batch.bo;
78    drm_intel_bo_reference(sync->bo);
79 
80    intel_flush(ctx);
81 }
82 
83 /* We ignore the user-supplied timeout.  This is weaselly -- we're allowed to
84  * round to an implementation-dependent accuracy, and right now our
85  * implementation "rounds" to the wait-forever value.
86  *
87  * The fix would be a new kernel function to do the GTT transition with a
88  * timeout.
89  */
intel_client_wait_sync(struct gl_context * ctx,struct gl_sync_object * s,GLbitfield flags,GLuint64 timeout)90 static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
91 				 GLbitfield flags, GLuint64 timeout)
92 {
93    struct intel_sync_object *sync = (struct intel_sync_object *)s;
94 
95    if (sync->bo) {
96       drm_intel_bo_wait_rendering(sync->bo);
97       s->StatusFlag = 1;
98       drm_intel_bo_unreference(sync->bo);
99       sync->bo = NULL;
100    }
101 }
102 
103 /* We have nothing to do for WaitSync.  Our GL command stream is sequential,
104  * so given that the sync object has already flushed the batchbuffer,
105  * any batchbuffers coming after this waitsync will naturally not occur until
106  * the previous one is done.
107  */
intel_server_wait_sync(struct gl_context * ctx,struct gl_sync_object * s,GLbitfield flags,GLuint64 timeout)108 static void intel_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
109 				 GLbitfield flags, GLuint64 timeout)
110 {
111 }
112 
intel_check_sync(struct gl_context * ctx,struct gl_sync_object * s)113 static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
114 {
115    struct intel_sync_object *sync = (struct intel_sync_object *)s;
116 
117    if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
118       drm_intel_bo_unreference(sync->bo);
119       sync->bo = NULL;
120       s->StatusFlag = 1;
121    }
122 }
123 
intel_init_syncobj_functions(struct dd_function_table * functions)124 void intel_init_syncobj_functions(struct dd_function_table *functions)
125 {
126    functions->NewSyncObject = intel_new_sync_object;
127    functions->DeleteSyncObject = intel_delete_sync_object;
128    functions->FenceSync = intel_fence_sync;
129    functions->CheckSync = intel_check_sync;
130    functions->ClientWaitSync = intel_client_wait_sync;
131    functions->ServerWaitSync = intel_server_wait_sync;
132 }
133