• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/os_file.h"
28 #include "util/u_inlines.h"
29 
30 #include "freedreno_batch.h"
31 #include "freedreno_context.h"
32 #include "freedreno_fence.h"
33 #include "freedreno_util.h"
34 /* TODO: Use the interface drm/freedreno_drmif.h instead of calling directly */
35 #include <xf86drm.h>
36 
37 static bool
fence_flush(struct pipe_context * pctx,struct pipe_fence_handle * fence,uint64_t timeout)38 fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
39             uint64_t timeout)
40    /* NOTE: in the !fence_is_signalled() case we may be called from non-driver
41     * thread, but we don't call fd_batch_flush() in that case
42     */
43    in_dt
44 {
45    if (!util_queue_fence_is_signalled(&fence->ready)) {
46       if (fence->tc_token) {
47          threaded_context_flush(pctx, fence->tc_token, timeout == 0);
48       }
49 
50       if (!timeout)
51          return false;
52 
53       if (timeout == PIPE_TIMEOUT_INFINITE) {
54          util_queue_fence_wait(&fence->ready);
55       } else {
56          int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
57          if (!util_queue_fence_wait_timeout(&fence->ready, abs_timeout)) {
58             return false;
59          }
60       }
61 
62       util_queue_fence_wait(&fence->submit_fence.ready);
63 
64       /* We've already waited for batch to be flushed and fence->batch
65        * to be cleared:
66        */
67       assert(!fence->batch);
68       return true;
69    }
70 
71    if (fence->batch)
72       fd_batch_flush(fence->batch);
73 
74    util_queue_fence_wait(&fence->submit_fence.ready);
75 
76    assert(!fence->batch);
77 
78    return true;
79 }
80 
81 void
fd_fence_repopulate(struct pipe_fence_handle * fence,struct pipe_fence_handle * last_fence)82 fd_fence_repopulate(struct pipe_fence_handle *fence, struct pipe_fence_handle *last_fence)
83 {
84    if (last_fence->last_fence)
85       fd_fence_repopulate(fence, last_fence->last_fence);
86 
87    /* The fence we are re-populating must not be an fd-fence (but last_fince
88     * might have been)
89     */
90    assert(!fence->submit_fence.use_fence_fd);
91    assert(!last_fence->batch);
92 
93    fd_fence_ref(&fence->last_fence, last_fence);
94 
95    /* We have nothing to flush, so nothing will clear the batch reference
96     * (which is normally done when the batch is flushed), so do it now:
97     */
98    fd_fence_set_batch(fence, NULL);
99 }
100 
101 static void
fd_fence_destroy(struct pipe_fence_handle * fence)102 fd_fence_destroy(struct pipe_fence_handle *fence)
103 {
104    fd_fence_ref(&fence->last_fence, NULL);
105 
106    tc_unflushed_batch_token_reference(&fence->tc_token, NULL);
107    if (fence->submit_fence.use_fence_fd)
108       close(fence->submit_fence.fence_fd);
109    if (fence->syncobj)
110       drmSyncobjDestroy(fd_device_fd(fence->screen->dev), fence->syncobj);
111    fd_pipe_del(fence->pipe);
112 
113    /* TODO might be worth trying harder to avoid a potential stall here,
114     * but that would require the submit somehow holding a reference to
115     * the pipe_fence_handle.. and I'm not sure if it is a thing that is
116     * likely to matter much.
117     */
118    util_queue_fence_wait(&fence->submit_fence.ready);
119 
120    FREE(fence);
121 }
122 
123 void
fd_fence_ref(struct pipe_fence_handle ** ptr,struct pipe_fence_handle * pfence)124 fd_fence_ref(struct pipe_fence_handle **ptr, struct pipe_fence_handle *pfence)
125 {
126    if (pipe_reference(&(*ptr)->reference, &pfence->reference))
127       fd_fence_destroy(*ptr);
128 
129    *ptr = pfence;
130 }
131 
132 bool
fd_fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * fence,uint64_t timeout)133 fd_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
134                 struct pipe_fence_handle *fence, uint64_t timeout)
135 {
136    /* Note: for TC deferred fence, pctx->flush() may not have been called
137     * yet, so always do fence_flush() *first* before delegating to
138     * fence->last_fence
139     */
140    if (!fence_flush(pctx, fence, timeout))
141       return false;
142 
143    if (fence->last_fence)
144       return fd_fence_finish(pscreen, pctx, fence->last_fence, timeout);
145 
146    if (fence->last_fence)
147       fence = fence->last_fence;
148 
149    if (fence->submit_fence.use_fence_fd) {
150       int ret = sync_wait(fence->submit_fence.fence_fd, timeout / 1000000);
151       return ret == 0;
152    }
153 
154    if (fd_pipe_wait_timeout(fence->pipe, &fence->submit_fence.fence, timeout))
155       return false;
156 
157    return true;
158 }
159 
160 static struct pipe_fence_handle *
fence_create(struct fd_context * ctx,struct fd_batch * batch,int fence_fd,int syncobj)161 fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,
162              int syncobj)
163 {
164    struct pipe_fence_handle *fence;
165 
166    fence = CALLOC_STRUCT(pipe_fence_handle);
167    if (!fence)
168       return NULL;
169 
170    pipe_reference_init(&fence->reference, 1);
171    util_queue_fence_init(&fence->ready);
172    util_queue_fence_init(&fence->submit_fence.ready);
173 
174    fence->ctx = ctx;
175    fd_fence_set_batch(fence, batch);
176    fence->pipe = fd_pipe_ref(ctx->pipe);
177    fence->screen = ctx->screen;
178    fence->submit_fence.fence_fd = fence_fd;
179    fence->submit_fence.use_fence_fd = (fence_fd != -1);
180    fence->syncobj = syncobj;
181 
182    return fence;
183 }
184 
185 void
fd_create_fence_fd(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,int fd,enum pipe_fd_type type)186 fd_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,
187                    int fd, enum pipe_fd_type type)
188 {
189    struct fd_context *ctx = fd_context(pctx);
190 
191    switch (type) {
192    case PIPE_FD_TYPE_NATIVE_SYNC:
193       *pfence =
194          fence_create(fd_context(pctx), NULL, os_dupfd_cloexec(fd), 0);
195       break;
196    case PIPE_FD_TYPE_SYNCOBJ: {
197       int ret;
198       uint32_t syncobj;
199 
200       assert(ctx->screen->has_syncobj);
201       ret = drmSyncobjFDToHandle(fd_device_fd(ctx->screen->dev), fd, &syncobj);
202       if (!ret)
203          close(fd);
204 
205       *pfence = fence_create(fd_context(pctx), NULL, -1, syncobj);
206       break;
207    }
208    default:
209       unreachable("Unhandled fence type");
210    }
211 }
212 
213 void
fd_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * fence)214 fd_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
215 {
216    struct fd_context *ctx = fd_context(pctx);
217 
218    /* NOTE: we don't expect the combination of fence-fd + async-flush-fence,
219     * so timeout==0 is ok here:
220     */
221    fence_flush(pctx, fence, 0);
222 
223    if (fence->last_fence) {
224       fd_fence_server_sync(pctx, fence->last_fence);
225       return;
226    }
227 
228    /* if not an external fence, then nothing more to do without preemption: */
229    if (!fence->submit_fence.use_fence_fd)
230       return;
231 
232    if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->submit_fence.fence_fd)) {
233       /* error */
234    }
235 }
236 
237 void
fd_fence_server_signal(struct pipe_context * pctx,struct pipe_fence_handle * fence)238 fd_fence_server_signal(struct pipe_context *pctx,
239                        struct pipe_fence_handle *fence)
240 {
241    struct fd_context *ctx = fd_context(pctx);
242 
243    if (fence->syncobj) {
244       drmSyncobjSignal(fd_device_fd(ctx->screen->dev), &fence->syncobj, 1);
245    }
246 }
247 
248 int
fd_fence_get_fd(struct pipe_screen * pscreen,struct pipe_fence_handle * fence)249 fd_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)
250 {
251    /* We don't expect deferred flush to be combined with fence-fd: */
252    assert(!fence->last_fence);
253 
254    assert(fence->submit_fence.use_fence_fd);
255 
256    /* NOTE: in the deferred fence case, the pctx we want is the threaded-ctx
257     * but if TC is not used, this will be null.  Which is fine, we won't call
258     * threaded_context_flush() in that case
259     */
260    fence_flush(&fence->ctx->tc->base, fence, PIPE_TIMEOUT_INFINITE);
261    return os_dupfd_cloexec(fence->submit_fence.fence_fd);
262 }
263 
264 bool
fd_fence_is_fd(struct pipe_fence_handle * fence)265 fd_fence_is_fd(struct pipe_fence_handle *fence)
266 {
267    return fence->submit_fence.use_fence_fd;
268 }
269 
270 struct pipe_fence_handle *
fd_fence_create(struct fd_batch * batch)271 fd_fence_create(struct fd_batch *batch)
272 {
273    return fence_create(batch->ctx, batch, -1, 0);
274 }
275 
276 void
fd_fence_set_batch(struct pipe_fence_handle * fence,struct fd_batch * batch)277 fd_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
278 {
279    if (batch) {
280       assert(!fence->batch);
281       fence->batch = batch;
282       fd_batch_needs_flush(batch);
283    } else {
284       fence->batch = NULL;
285 
286       /* When the batch is dis-associated with the fence, we can signal TC
287        * that the fence is flushed
288        */
289       if (fence->needs_signal) {
290          util_queue_fence_signal(&fence->ready);
291          fence->needs_signal = false;
292       }
293    }
294 }
295 
296 struct pipe_fence_handle *
fd_fence_create_unflushed(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)297 fd_fence_create_unflushed(struct pipe_context *pctx,
298                           struct tc_unflushed_batch_token *tc_token)
299 {
300    struct pipe_fence_handle *fence =
301       fence_create(fd_context(pctx), NULL, -1, 0);
302    fence->needs_signal = true;
303    util_queue_fence_reset(&fence->ready);
304    tc_unflushed_batch_token_reference(&fence->tc_token, tc_token);
305    return fence;
306 }
307