• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_fence.c
25  *
26  * Fences for driver and IPC serialisation, scheduling and synchronisation.
27  */
28 
29 #include "drm-uapi/sync_file.h"
30 #include "util/u_debug.h"
31 #include "util/u_inlines.h"
32 #include "intel/common/gen_gem.h"
33 
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
39 
40 static uint32_t
gem_syncobj_create(int fd,uint32_t flags)41 gem_syncobj_create(int fd, uint32_t flags)
42 {
43    struct drm_syncobj_create args = {
44       .flags = flags,
45    };
46 
47    gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48 
49    return args.handle;
50 }
51 
52 static void
gem_syncobj_destroy(int fd,uint32_t handle)53 gem_syncobj_destroy(int fd, uint32_t handle)
54 {
55    struct drm_syncobj_destroy args = {
56       .handle = handle,
57    };
58 
59    gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60 }
61 
62 /**
63  * Make a new sync-point.
64  */
65 struct iris_syncobj *
iris_create_syncobj(struct iris_screen * screen)66 iris_create_syncobj(struct iris_screen *screen)
67 {
68    struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
69 
70    if (!syncobj)
71       return NULL;
72 
73    syncobj->handle = gem_syncobj_create(screen->fd, 0);
74    assert(syncobj->handle);
75 
76    pipe_reference_init(&syncobj->ref, 1);
77 
78    return syncobj;
79 }
80 
81 void
iris_syncobj_destroy(struct iris_screen * screen,struct iris_syncobj * syncobj)82 iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
83 {
84    gem_syncobj_destroy(screen->fd, syncobj->handle);
85    free(syncobj);
86 }
87 
88 /**
89  * Add a sync-point to the batch, with the given flags.
90  *
91  * \p flags   One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
92  */
93 void
iris_batch_add_syncobj(struct iris_batch * batch,struct iris_syncobj * syncobj,unsigned flags)94 iris_batch_add_syncobj(struct iris_batch *batch,
95                        struct iris_syncobj *syncobj,
96                        unsigned flags)
97 {
98    struct drm_i915_gem_exec_fence *fence =
99       util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
100 
101    *fence = (struct drm_i915_gem_exec_fence) {
102       .handle = syncobj->handle,
103       .flags = flags,
104    };
105 
106    struct iris_syncobj **store =
107       util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
108 
109    *store = NULL;
110    iris_syncobj_reference(batch->screen, store, syncobj);
111 }
112 
113 /**
114  * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
115  * and unreference any which have already passed.
116  *
117  * Sometimes the compute batch is seldom used, and accumulates references
118  * to stale render batches that are no longer of interest, so we can free
119  * those up.
120  */
121 static void
clear_stale_syncobjs(struct iris_batch * batch)122 clear_stale_syncobjs(struct iris_batch *batch)
123 {
124    struct iris_screen *screen = batch->screen;
125 
126    int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
127 
128    assert(n == util_dynarray_num_elements(&batch->exec_fences,
129                                           struct drm_i915_gem_exec_fence));
130 
131    /* Skip the first syncobj, as it's the signalling one. */
132    for (int i = n - 1; i > 1; i--) {
133       struct iris_syncobj **syncobj =
134          util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
135       struct drm_i915_gem_exec_fence *fence =
136          util_dynarray_element(&batch->exec_fences,
137                                struct drm_i915_gem_exec_fence, i);
138       assert(fence->flags & I915_EXEC_FENCE_WAIT);
139 
140       if (iris_wait_syncobj(&screen->base, *syncobj, 0))
141          continue;
142 
143       /* This sync object has already passed, there's no need to continue
144        * marking it as a dependency; we can stop holding on to the reference.
145        */
146       iris_syncobj_reference(screen, syncobj, NULL);
147 
148       /* Remove it from the lists; move the last element here. */
149       struct iris_syncobj **nth_syncobj =
150          util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
151       struct drm_i915_gem_exec_fence *nth_fence =
152          util_dynarray_pop_ptr(&batch->exec_fences,
153                                struct drm_i915_gem_exec_fence);
154 
155       if (syncobj != nth_syncobj) {
156          *syncobj = *nth_syncobj;
157          memcpy(fence, nth_fence, sizeof(*fence));
158       }
159    }
160 }
161 
162 /* ------------------------------------------------------------------- */
163 
164 struct pipe_fence_handle {
165    struct pipe_reference ref;
166 
167    struct pipe_context *unflushed_ctx;
168 
169    struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
170 };
171 
172 static void
iris_fence_destroy(struct pipe_screen * p_screen,struct pipe_fence_handle * fence)173 iris_fence_destroy(struct pipe_screen *p_screen,
174                    struct pipe_fence_handle *fence)
175 {
176    struct iris_screen *screen = (struct iris_screen *)p_screen;
177 
178    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
179       iris_fine_fence_reference(screen, &fence->fine[i], NULL);
180 
181    free(fence);
182 }
183 
184 static void
iris_fence_reference(struct pipe_screen * p_screen,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)185 iris_fence_reference(struct pipe_screen *p_screen,
186                      struct pipe_fence_handle **dst,
187                      struct pipe_fence_handle *src)
188 {
189    if (pipe_reference(*dst ? &(*dst)->ref : NULL,
190                       src ? &src->ref : NULL))
191       iris_fence_destroy(p_screen, *dst);
192 
193    *dst = src;
194 }
195 
196 bool
iris_wait_syncobj(struct pipe_screen * p_screen,struct iris_syncobj * syncobj,int64_t timeout_nsec)197 iris_wait_syncobj(struct pipe_screen *p_screen,
198                   struct iris_syncobj *syncobj,
199                   int64_t timeout_nsec)
200 {
201    if (!syncobj)
202       return false;
203 
204    struct iris_screen *screen = (struct iris_screen *)p_screen;
205    struct drm_syncobj_wait args = {
206       .handles = (uintptr_t)&syncobj->handle,
207       .count_handles = 1,
208       .timeout_nsec = timeout_nsec,
209    };
210    return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
211 }
212 
213 #define CSI "\e["
214 #define BLUE_HEADER  CSI "0;97;44m"
215 #define NORMAL       CSI "0m"
216 
217 static void
iris_fence_flush(struct pipe_context * ctx,struct pipe_fence_handle ** out_fence,unsigned flags)218 iris_fence_flush(struct pipe_context *ctx,
219                  struct pipe_fence_handle **out_fence,
220                  unsigned flags)
221 {
222    struct iris_screen *screen = (void *) ctx->screen;
223    struct iris_context *ice = (struct iris_context *)ctx;
224 
225    /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
226     * deferred flushes.  Just ignore the request to defer on older kernels.
227     */
228    if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
229       flags &= ~PIPE_FLUSH_DEFERRED;
230 
231    const bool deferred = flags & PIPE_FLUSH_DEFERRED;
232 
233    if (flags & PIPE_FLUSH_END_OF_FRAME) {
234       ice->frame++;
235 
236       if (INTEL_DEBUG & DEBUG_SUBMIT) {
237          fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
238                  (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
239                  ice->frame, ctx, ' ',
240                  (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
241       }
242    }
243 
244    iris_flush_dirty_dmabufs(ice);
245 
246    if (!deferred) {
247       for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
248          iris_batch_flush(&ice->batches[i]);
249    }
250 
251    if (!out_fence)
252       return;
253 
254    struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
255    if (!fence)
256       return;
257 
258    pipe_reference_init(&fence->ref, 1);
259 
260    if (deferred)
261       fence->unflushed_ctx = ctx;
262 
263    for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
264       struct iris_batch *batch = &ice->batches[b];
265 
266       if (deferred && iris_batch_bytes_used(batch) > 0) {
267          struct iris_fine_fence *fine =
268             iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
269          iris_fine_fence_reference(screen, &fence->fine[b], fine);
270          iris_fine_fence_reference(screen, &fine, NULL);
271       } else {
272          /* This batch has no commands queued up (perhaps we just flushed,
273           * or all the commands are on the other batch).  Wait for the last
274           * syncobj on this engine - unless it's already finished by now.
275           */
276          if (iris_fine_fence_signaled(batch->last_fence))
277             continue;
278 
279          iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
280       }
281    }
282 
283    iris_fence_reference(ctx->screen, out_fence, NULL);
284    *out_fence = fence;
285 }
286 
287 static void
iris_fence_await(struct pipe_context * ctx,struct pipe_fence_handle * fence)288 iris_fence_await(struct pipe_context *ctx,
289                  struct pipe_fence_handle *fence)
290 {
291    struct iris_context *ice = (struct iris_context *)ctx;
292 
293    /* Unflushed fences from the same context are no-ops. */
294    if (ctx && ctx == fence->unflushed_ctx)
295       return;
296 
297    /* XXX: We can't safely flush the other context, because it might be
298     *      bound to another thread, and poking at its internals wouldn't
299     *      be safe.  In the future we should use MI_SEMAPHORE_WAIT and
300     *      block until the other job has been submitted, relying on
301     *      kernel timeslicing to preempt us until the other job is
302     *      actually flushed and the seqno finally passes.
303     */
304    if (fence->unflushed_ctx) {
305       pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
306                          "glWaitSync on unflushed fence from another context "
307                          "is unlikely to work without kernel 5.8+\n");
308    }
309 
310    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
311       struct iris_fine_fence *fine = fence->fine[i];
312 
313       if (iris_fine_fence_signaled(fine))
314          continue;
315 
316       for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
317          struct iris_batch *batch = &ice->batches[b];
318 
319          /* We're going to make any future work in this batch wait for our
320           * fence to have gone by.  But any currently queued work doesn't
321           * need to wait.  Flush the batch now, so it can happen sooner.
322           */
323          iris_batch_flush(batch);
324 
325          /* Before adding a new reference, clean out any stale ones. */
326          clear_stale_syncobjs(batch);
327 
328          iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
329       }
330    }
331 }
332 
333 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
334 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
335 #define MSEC_PER_SEC (1000)
336 
337 static uint64_t
gettime_ns(void)338 gettime_ns(void)
339 {
340    struct timespec current;
341    clock_gettime(CLOCK_MONOTONIC, &current);
342    return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
343 }
344 
345 static uint64_t
rel2abs(uint64_t timeout)346 rel2abs(uint64_t timeout)
347 {
348    if (timeout == 0)
349       return 0;
350 
351    uint64_t current_time = gettime_ns();
352    uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
353 
354    timeout = MIN2(max_timeout, timeout);
355 
356    return current_time + timeout;
357 }
358 
359 static bool
iris_fence_finish(struct pipe_screen * p_screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)360 iris_fence_finish(struct pipe_screen *p_screen,
361                   struct pipe_context *ctx,
362                   struct pipe_fence_handle *fence,
363                   uint64_t timeout)
364 {
365    struct iris_context *ice = (struct iris_context *)ctx;
366    struct iris_screen *screen = (struct iris_screen *)p_screen;
367 
368    /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
369     * flushed yet.  Check if our syncobj is the current batch's signalling
370     * syncobj - if so, we haven't flushed and need to now.
371     *
372     * The Gallium docs mention that a flush will occur if \p ctx matches
373     * the context the fence was created with.  It may be NULL, so we check
374     * that it matches first.
375     */
376    if (ctx && ctx == fence->unflushed_ctx) {
377       for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
378          struct iris_fine_fence *fine = fence->fine[i];
379 
380          if (iris_fine_fence_signaled(fine))
381             continue;
382 
383          if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
384             iris_batch_flush(&ice->batches[i]);
385       }
386 
387       /* The fence is no longer deferred. */
388       fence->unflushed_ctx = NULL;
389    }
390 
391    unsigned int handle_count = 0;
392    uint32_t handles[ARRAY_SIZE(fence->fine)];
393    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
394       struct iris_fine_fence *fine = fence->fine[i];
395 
396       if (iris_fine_fence_signaled(fine))
397          continue;
398 
399       handles[handle_count++] = fine->syncobj->handle;
400    }
401 
402    if (handle_count == 0)
403       return true;
404 
405    struct drm_syncobj_wait args = {
406       .handles = (uintptr_t)handles,
407       .count_handles = handle_count,
408       .timeout_nsec = rel2abs(timeout),
409       .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
410    };
411 
412    if (fence->unflushed_ctx) {
413       /* This fence had a deferred flush from another context.  We can't
414        * safely flush it here, because the context might be bound to a
415        * different thread, and poking at its internals wouldn't be safe.
416        *
417        * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
418        * another thread submits the work.
419        */
420       args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
421    }
422 
423    return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
424 }
425 
426 static int
sync_merge_fd(int sync_fd,int new_fd)427 sync_merge_fd(int sync_fd, int new_fd)
428 {
429    if (sync_fd == -1)
430       return new_fd;
431 
432    if (new_fd == -1)
433       return sync_fd;
434 
435    struct sync_merge_data args = {
436       .name = "iris fence",
437       .fd2 = new_fd,
438       .fence = -1,
439    };
440 
441    gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
442    close(new_fd);
443    close(sync_fd);
444 
445    return args.fence;
446 }
447 
448 static int
iris_fence_get_fd(struct pipe_screen * p_screen,struct pipe_fence_handle * fence)449 iris_fence_get_fd(struct pipe_screen *p_screen,
450                   struct pipe_fence_handle *fence)
451 {
452    struct iris_screen *screen = (struct iris_screen *)p_screen;
453    int fd = -1;
454 
455    /* Deferred fences aren't supported. */
456    if (fence->unflushed_ctx)
457       return -1;
458 
459    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
460       struct iris_fine_fence *fine = fence->fine[i];
461 
462       if (iris_fine_fence_signaled(fine))
463          continue;
464 
465       struct drm_syncobj_handle args = {
466          .handle = fine->syncobj->handle,
467          .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
468          .fd = -1,
469       };
470 
471       gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
472       fd = sync_merge_fd(fd, args.fd);
473    }
474 
475    if (fd == -1) {
476       /* Our fence has no syncobj's recorded.  This means that all of the
477        * batches had already completed, their syncobj's had been signalled,
478        * and so we didn't bother to record them.  But we're being asked to
479        * export such a fence.  So export a dummy already-signalled syncobj.
480        */
481       struct drm_syncobj_handle args = {
482          .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
483       };
484 
485       args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
486       gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
487       gem_syncobj_destroy(screen->fd, args.handle);
488       return args.fd;
489    }
490 
491    return fd;
492 }
493 
494 static void
iris_fence_create_fd(struct pipe_context * ctx,struct pipe_fence_handle ** out,int fd,enum pipe_fd_type type)495 iris_fence_create_fd(struct pipe_context *ctx,
496                      struct pipe_fence_handle **out,
497                      int fd,
498                      enum pipe_fd_type type)
499 {
500    assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);
501 
502    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
503    struct drm_syncobj_handle args = {
504       .fd = fd,
505    };
506 
507    if (type == PIPE_FD_TYPE_NATIVE_SYNC) {
508       args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
509       args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
510    }
511 
512    if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
513       fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
514               strerror(errno));
515       if (type == PIPE_FD_TYPE_NATIVE_SYNC)
516          gem_syncobj_destroy(screen->fd, args.handle);
517       *out = NULL;
518       return;
519    }
520 
521    struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
522    if (!syncobj) {
523       *out = NULL;
524       return;
525    }
526    syncobj->handle = args.handle;
527    pipe_reference_init(&syncobj->ref, 1);
528 
529    struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
530    if (!fine) {
531       free(syncobj);
532       *out = NULL;
533       return;
534    }
535 
536    static const uint32_t zero = 0;
537 
538    /* Fences work in terms of iris_fine_fence, but we don't actually have a
539     * seqno for an imported fence.  So, create a fake one which always
540     * returns as 'not signaled' so we fall back to using the sync object.
541     */
542    fine->seqno = UINT32_MAX;
543    fine->map = &zero;
544    fine->syncobj = syncobj;
545    fine->flags = IRIS_FENCE_END;
546    pipe_reference_init(&fine->reference, 1);
547 
548    struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
549    if (!fence) {
550       free(fine);
551       free(syncobj);
552       *out = NULL;
553       return;
554    }
555    pipe_reference_init(&fence->ref, 1);
556    fence->fine[0] = fine;
557 
558    *out = fence;
559 }
560 
561 static void
iris_fence_signal(struct pipe_context * ctx,struct pipe_fence_handle * fence)562 iris_fence_signal(struct pipe_context *ctx,
563                   struct pipe_fence_handle *fence)
564 {
565    struct iris_context *ice = (struct iris_context *)ctx;
566 
567    if (ctx == fence->unflushed_ctx)
568       return;
569 
570    for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
571       for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
572          struct iris_fine_fence *fine = fence->fine[i];
573 
574          /* already signaled fence skipped */
575          if (iris_fine_fence_signaled(fine))
576             continue;
577 
578          iris_batch_add_syncobj(&ice->batches[b], fine->syncobj,
579                                 I915_EXEC_FENCE_SIGNAL);
580       }
581    }
582 }
583 
584 void
iris_init_screen_fence_functions(struct pipe_screen * screen)585 iris_init_screen_fence_functions(struct pipe_screen *screen)
586 {
587    screen->fence_reference = iris_fence_reference;
588    screen->fence_finish = iris_fence_finish;
589    screen->fence_get_fd = iris_fence_get_fd;
590 }
591 
592 void
iris_init_context_fence_functions(struct pipe_context * ctx)593 iris_init_context_fence_functions(struct pipe_context *ctx)
594 {
595    ctx->flush = iris_fence_flush;
596    ctx->create_fence_fd = iris_fence_create_fd;
597    ctx->fence_server_sync = iris_fence_await;
598    ctx->fence_server_signal = iris_fence_signal;
599 }
600