• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/os_file.h"
28 #include "util/u_inlines.h"
29 
30 #include "freedreno_batch.h"
31 #include "freedreno_context.h"
32 #include "freedreno_fence.h"
33 #include "freedreno_util.h"
34 /* TODO: Use the interface drm/freedreno_drmif.h instead of calling directly */
35 #include <xf86drm.h>
36 
37 static bool
fence_flush(struct pipe_context * pctx,struct pipe_fence_handle * fence,uint64_t timeout)38 fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
39             uint64_t timeout)
40    /* NOTE: in the !fence_is_signalled() case we may be called from non-driver
41     * thread, but we don't call fd_batch_flush() in that case
42     */
43    in_dt
44 {
45    if (fence->flushed)
46       return true;
47 
48    MESA_TRACE_FUNC();
49 
50    if (!util_queue_fence_is_signalled(&fence->ready)) {
51       if (fence->tc_token) {
52          threaded_context_flush(pctx, fence->tc_token, timeout == 0);
53       }
54 
55       if (!timeout)
56          return false;
57 
58       if (timeout == OS_TIMEOUT_INFINITE) {
59          util_queue_fence_wait(&fence->ready);
60       } else {
61          int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
62          if (!util_queue_fence_wait_timeout(&fence->ready, abs_timeout)) {
63             return false;
64          }
65       }
66 
67       goto out;
68    }
69 
70    if (fence->batch)
71       fd_batch_flush(fence->batch);
72 
73 out:
74    if (fence->fence)
75       fd_fence_flush(fence->fence);
76 
77    assert(!fence->batch);
78    fence->flushed = true;
79    return true;
80 }
81 
82 void
fd_pipe_fence_repopulate(struct pipe_fence_handle * fence,struct pipe_fence_handle * last_fence)83 fd_pipe_fence_repopulate(struct pipe_fence_handle *fence,
84                          struct pipe_fence_handle *last_fence)
85 {
86    if (last_fence->last_fence)
87       fd_pipe_fence_repopulate(fence, last_fence->last_fence);
88 
89    /* The fence we are re-populating must not be an fd-fence (but last_fince
90     * might have been)
91     */
92    assert(!fence->use_fence_fd);
93    assert(!last_fence->batch);
94 
95    fd_pipe_fence_ref(&fence->last_fence, last_fence);
96 
97    /* We have nothing to flush, so nothing will clear the batch reference
98     * (which is normally done when the batch is flushed), so do it now:
99     */
100    fd_pipe_fence_set_batch(fence, NULL);
101 }
102 
103 static void
fd_fence_destroy(struct pipe_fence_handle * fence)104 fd_fence_destroy(struct pipe_fence_handle *fence)
105 {
106    fd_pipe_fence_ref(&fence->last_fence, NULL);
107 
108    tc_unflushed_batch_token_reference(&fence->tc_token, NULL);
109 
110    if (fence->syncobj)
111       drmSyncobjDestroy(fd_device_fd(fence->screen->dev), fence->syncobj);
112    fd_pipe_del(fence->pipe);
113    if (fence->fence)
114       fd_fence_del(fence->fence);
115 
116    FREE(fence);
117 }
118 
119 void
fd_pipe_fence_ref(struct pipe_fence_handle ** ptr,struct pipe_fence_handle * pfence)120 fd_pipe_fence_ref(struct pipe_fence_handle **ptr,
121                   struct pipe_fence_handle *pfence)
122 {
123    if (pipe_reference(&(*ptr)->reference, &pfence->reference))
124       fd_fence_destroy(*ptr);
125 
126    *ptr = pfence;
127 }
128 
129 bool
fd_pipe_fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * fence,uint64_t timeout)130 fd_pipe_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
131                      struct pipe_fence_handle *fence, uint64_t timeout)
132 {
133    MESA_TRACE_SCOPE(timeout ? "fd_pipe_fence_finish(wait)" : "fd_pipe_fence_finish(nowait)");
134 
135    /* Note: for TC deferred fence, pctx->flush() may not have been called
136     * yet, so always do fence_flush() *first* before delegating to
137     * fence->last_fence
138     */
139    if (!fence_flush(pctx, fence, timeout))
140       return false;
141 
142    if (fence->last_fence)
143       return fd_pipe_fence_finish(pscreen, pctx, fence->last_fence, timeout);
144 
145    if (fence->last_fence)
146       fence = fence->last_fence;
147 
148    if (fence->use_fence_fd) {
149       assert(fence->fence);
150       int ret = sync_wait(fence->fence->fence_fd, timeout / 1000000);
151       return ret == 0;
152    }
153 
154    if (fd_pipe_wait_timeout(fence->pipe, fence->fence, timeout))
155       return false;
156 
157    return true;
158 }
159 
160 static struct pipe_fence_handle *
fence_create(struct fd_context * ctx,struct fd_batch * batch,int fence_fd,int syncobj)161 fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,
162              int syncobj)
163 {
164    struct pipe_fence_handle *fence;
165 
166    fence = CALLOC_STRUCT(pipe_fence_handle);
167    if (!fence)
168       return NULL;
169 
170    pipe_reference_init(&fence->reference, 1);
171    util_queue_fence_init(&fence->ready);
172 
173    fence->ctx = ctx;
174    fd_pipe_fence_set_batch(fence, batch);
175    fence->pipe = fd_pipe_ref(ctx->pipe);
176    fence->screen = ctx->screen;
177    fence->use_fence_fd = (fence_fd != -1);
178    fence->syncobj = syncobj;
179 
180    if (fence_fd != -1) {
181       fence->fence = fd_fence_new(fence->pipe, fence->use_fence_fd);
182       fence->fence->fence_fd = fence_fd;
183    }
184 
185    return fence;
186 }
187 
188 void
fd_create_pipe_fence_fd(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,int fd,enum pipe_fd_type type)189 fd_create_pipe_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,
190                         int fd, enum pipe_fd_type type)
191 {
192    struct fd_context *ctx = fd_context(pctx);
193 
194    switch (type) {
195    case PIPE_FD_TYPE_NATIVE_SYNC:
196       *pfence =
197          fence_create(fd_context(pctx), NULL, os_dupfd_cloexec(fd), 0);
198       break;
199    case PIPE_FD_TYPE_SYNCOBJ: {
200       int ret;
201       uint32_t syncobj;
202 
203       assert(ctx->screen->has_syncobj);
204       ret = drmSyncobjFDToHandle(fd_device_fd(ctx->screen->dev), fd, &syncobj);
205       if (!ret)
206          close(fd);
207 
208       *pfence = fence_create(fd_context(pctx), NULL, -1, syncobj);
209       break;
210    }
211    default:
212       unreachable("Unhandled fence type");
213    }
214 }
215 
216 void
fd_pipe_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * fence)217 fd_pipe_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
218 {
219    struct fd_context *ctx = fd_context(pctx);
220 
221    MESA_TRACE_FUNC();
222 
223    /* NOTE: we don't expect the combination of fence-fd + async-flush-fence,
224     * so timeout==0 is ok here:
225     */
226    fence_flush(pctx, fence, 0);
227 
228    if (fence->last_fence) {
229       fd_pipe_fence_server_sync(pctx, fence->last_fence);
230       return;
231    }
232 
233    /* if not an external fence, then nothing more to do without preemption: */
234    if (!fence->use_fence_fd)
235       return;
236 
237    ctx->no_implicit_sync = true;
238 
239    assert(fence->fence);
240    if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->fence->fence_fd)) {
241       /* error */
242    }
243 }
244 
245 void
fd_pipe_fence_server_signal(struct pipe_context * pctx,struct pipe_fence_handle * fence)246 fd_pipe_fence_server_signal(struct pipe_context *pctx,
247                             struct pipe_fence_handle *fence)
248 {
249    struct fd_context *ctx = fd_context(pctx);
250 
251    if (fence->syncobj) {
252       drmSyncobjSignal(fd_device_fd(ctx->screen->dev), &fence->syncobj, 1);
253    }
254 }
255 
256 int
fd_pipe_fence_get_fd(struct pipe_screen * pscreen,struct pipe_fence_handle * fence)257 fd_pipe_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)
258 {
259    MESA_TRACE_FUNC();
260 
261    /* We don't expect deferred flush to be combined with fence-fd: */
262    assert(!fence->last_fence);
263 
264    assert(fence->use_fence_fd);
265 
266    /* NOTE: in the deferred fence case, the pctx we want is the threaded-ctx
267     * but if TC is not used, this will be null.  Which is fine, we won't call
268     * threaded_context_flush() in that case
269     */
270    fence_flush(&fence->ctx->tc->base, fence, OS_TIMEOUT_INFINITE);
271    assert(fence->fence);
272    return os_dupfd_cloexec(fence->fence->fence_fd);
273 }
274 
275 bool
fd_pipe_fence_is_fd(struct pipe_fence_handle * fence)276 fd_pipe_fence_is_fd(struct pipe_fence_handle *fence)
277 {
278    return fence->use_fence_fd;
279 }
280 
281 struct pipe_fence_handle *
fd_pipe_fence_create(struct fd_batch * batch)282 fd_pipe_fence_create(struct fd_batch *batch)
283 {
284    return fence_create(batch->ctx, batch, -1, 0);
285 }
286 
287 void
fd_pipe_fence_set_batch(struct pipe_fence_handle * fence,struct fd_batch * batch)288 fd_pipe_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
289 {
290    if (batch) {
291       assert(!fence->batch);
292       fd_batch_reference(&fence->batch, batch);
293       fd_batch_needs_flush(batch);
294    } else {
295       fd_batch_reference(&fence->batch, NULL);
296 
297       /* When the batch is dis-associated with the fence, we can signal TC
298        * that the fence is flushed
299        */
300       if (fence->needs_signal) {
301          util_queue_fence_signal(&fence->ready);
302          fence->needs_signal = false;
303       }
304    }
305 }
306 
307 void
fd_pipe_fence_set_submit_fence(struct pipe_fence_handle * fence,struct fd_fence * submit_fence)308 fd_pipe_fence_set_submit_fence(struct pipe_fence_handle *fence,
309                                struct fd_fence *submit_fence)
310 {
311    /* Take ownership of the drm fence after batch/submit is flushed: */
312    assert(!fence->fence);
313    fence->fence = submit_fence;
314    fd_pipe_fence_set_batch(fence, NULL);
315 }
316 
317 struct pipe_fence_handle *
fd_pipe_fence_create_unflushed(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)318 fd_pipe_fence_create_unflushed(struct pipe_context *pctx,
319                                struct tc_unflushed_batch_token *tc_token)
320 {
321    struct pipe_fence_handle *fence =
322       fence_create(fd_context(pctx), NULL, -1, 0);
323    fence->needs_signal = true;
324    util_queue_fence_reset(&fence->ready);
325    tc_unflushed_batch_token_reference(&fence->tc_token, tc_token);
326    return fence;
327 }
328