• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/os_mman.h"
28 
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
31 
32 simple_mtx_t table_lock = SIMPLE_MTX_INITIALIZER;
33 simple_mtx_t fence_lock = SIMPLE_MTX_INITIALIZER;
34 
35 /* set buffer name, and add to table, call w/ table_lock held: */
36 static void
set_name(struct fd_bo * bo,uint32_t name)37 set_name(struct fd_bo *bo, uint32_t name)
38 {
39    bo->name = name;
40    /* add ourself into the handle table: */
41    _mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
42 }
43 
44 static struct fd_bo zombie;
45 
46 /* lookup a buffer, call w/ table_lock held: */
47 static struct fd_bo *
lookup_bo(struct hash_table * tbl,uint32_t key)48 lookup_bo(struct hash_table *tbl, uint32_t key)
49 {
50    struct fd_bo *bo = NULL;
51    struct hash_entry *entry;
52 
53    simple_mtx_assert_locked(&table_lock);
54 
55    entry = _mesa_hash_table_search(tbl, &key);
56    if (entry) {
57       bo = entry->data;
58 
59       /* We could be racing with final unref in another thread, and won
60        * the table_lock preventing the other thread from being able to
61        * remove an object it is about to free.  Fortunately since table
62        * lookup and removal are protected by the same lock (and table
63        * removal happens before obj free) we can easily detect this by
64        * checking for refcnt==0 (ie. 1 after p_atomic_inc_return).
65        */
66       if (p_atomic_inc_return(&bo->refcnt) == 1) {
67          /* Restore the zombified reference count, so if another thread
68           * that ends up calling lookup_bo() gets the table_lock before
69           * the thread deleting the bo does, it doesn't mistakenly see
70           * that the BO is live.
71           *
72           * We are holding the table_lock here so we can't be racing
73           * with another caller of lookup_bo()
74           */
75          p_atomic_dec(&bo->refcnt);
76          return &zombie;
77       }
78 
79       if (!list_is_empty(&bo->node)) {
80          mesa_logw("bo was in cache, size=%u, alloc_flags=0x%x\n",
81                    bo->size, bo->alloc_flags);
82       }
83 
84       /* don't break the bucket if this bo was found in one */
85       list_delinit(&bo->node);
86    }
87    return bo;
88 }
89 
90 void
fd_bo_init_common(struct fd_bo * bo,struct fd_device * dev)91 fd_bo_init_common(struct fd_bo *bo, struct fd_device *dev)
92 {
93    /* Backend should have initialized these: */
94    assert(bo->size);
95    assert(bo->handle);
96    assert(bo->funcs);
97 
98    bo->dev = dev;
99    bo->iova = bo->funcs->iova(bo);
100    bo->reloc_flags = FD_RELOC_FLAGS_INIT;
101 
102    p_atomic_set(&bo->refcnt, 1);
103    list_inithead(&bo->node);
104 
105    bo->max_fences = 1;
106    bo->fences = &bo->_inline_fence;
107 
108    if (!bo->map)
109       VG_BO_ALLOC(bo);
110 }
111 
112 /* allocate a new buffer object, call w/ table_lock held */
113 static struct fd_bo *
import_bo_from_handle(struct fd_device * dev,uint32_t size,uint32_t handle)114 import_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
115 {
116    struct fd_bo *bo;
117 
118    simple_mtx_assert_locked(&table_lock);
119 
120    bo = dev->funcs->bo_from_handle(dev, size, handle);
121    if (!bo) {
122       struct drm_gem_close req = {
123          .handle = handle,
124       };
125       drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
126       return NULL;
127    }
128 
129    bo->alloc_flags |= FD_BO_SHARED;
130 
131    /* add ourself into the handle table: */
132    _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
133 
134    return bo;
135 }
136 
137 static struct fd_bo *
bo_new(struct fd_device * dev,uint32_t size,uint32_t flags,struct fd_bo_cache * cache)138 bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
139        struct fd_bo_cache *cache)
140 {
141    struct fd_bo *bo = NULL;
142 
143    if (size < FD_BO_HEAP_BLOCK_SIZE) {
144       if ((flags == 0) && dev->default_heap)
145          bo = fd_bo_heap_alloc(dev->default_heap, size);
146       else if ((flags == RING_FLAGS) && dev->ring_heap)
147          bo = fd_bo_heap_alloc(dev->ring_heap, size);
148       if (bo)
149          return bo;
150    }
151 
152    /* demote cached-coherent to WC if not supported: */
153    if ((flags & FD_BO_CACHED_COHERENT) && !dev->has_cached_coherent)
154       flags &= ~FD_BO_CACHED_COHERENT;
155 
156    bo = fd_bo_cache_alloc(cache, &size, flags);
157    if (bo)
158       return bo;
159 
160    bo = dev->funcs->bo_new(dev, size, flags);
161    if (!bo)
162       return NULL;
163 
164    simple_mtx_lock(&table_lock);
165    /* add ourself into the handle table: */
166    _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
167    simple_mtx_unlock(&table_lock);
168 
169    bo->alloc_flags = flags;
170 
171    return bo;
172 }
173 
174 struct fd_bo *
_fd_bo_new(struct fd_device * dev,uint32_t size,uint32_t flags)175 _fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
176 {
177    struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
178    if (bo)
179       bo->bo_reuse = BO_CACHE;
180    return bo;
181 }
182 
183 void
_fd_bo_set_name(struct fd_bo * bo,const char * fmt,va_list ap)184 _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
185 {
186    bo->funcs->set_name(bo, fmt, ap);
187 }
188 
189 /* internal function to allocate bo's that use the ringbuffer cache
190  * instead of the normal bo_cache.  The purpose is, because cmdstream
191  * bo's get vmap'd on the kernel side, and that is expensive, we want
192  * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
193  */
194 struct fd_bo *
fd_bo_new_ring(struct fd_device * dev,uint32_t size)195 fd_bo_new_ring(struct fd_device *dev, uint32_t size)
196 {
197    struct fd_bo *bo = bo_new(dev, size, RING_FLAGS, &dev->ring_cache);
198    if (bo) {
199       bo->bo_reuse = RING_CACHE;
200       bo->reloc_flags |= FD_RELOC_DUMP;
201       fd_bo_set_name(bo, "cmdstream");
202    }
203    return bo;
204 }
205 
206 struct fd_bo *
fd_bo_from_handle(struct fd_device * dev,uint32_t handle,uint32_t size)207 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
208 {
209    struct fd_bo *bo = NULL;
210 
211    simple_mtx_lock(&table_lock);
212 
213    bo = lookup_bo(dev->handle_table, handle);
214    if (bo)
215       goto out_unlock;
216 
217    bo = import_bo_from_handle(dev, size, handle);
218 
219    VG_BO_ALLOC(bo);
220 
221 out_unlock:
222    simple_mtx_unlock(&table_lock);
223 
224    /* We've raced with the handle being closed, so the handle is no longer
225     * valid.  Friends don't let friends share handles.
226     */
227    if (bo == &zombie)
228       return NULL;
229 
230    return bo;
231 }
232 
233 uint32_t
fd_handle_from_dmabuf_drm(struct fd_device * dev,int fd)234 fd_handle_from_dmabuf_drm(struct fd_device *dev, int fd)
235 {
236    uint32_t handle;
237    int ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
238    if (ret)
239       return 0;
240    return handle;
241 }
242 
243 struct fd_bo *
fd_bo_from_dmabuf_drm(struct fd_device * dev,int fd)244 fd_bo_from_dmabuf_drm(struct fd_device *dev, int fd)
245 {
246    int size;
247    uint32_t handle;
248    struct fd_bo *bo;
249 
250 restart:
251    simple_mtx_lock(&table_lock);
252    handle = dev->funcs->handle_from_dmabuf(dev, fd);
253    if (!handle) {
254       simple_mtx_unlock(&table_lock);
255       return NULL;
256    }
257 
258    bo = lookup_bo(dev->handle_table, handle);
259    if (bo)
260       goto out_unlock;
261 
262    /* lseek() to get bo size */
263    size = lseek(fd, 0, SEEK_END);
264    lseek(fd, 0, SEEK_CUR);
265 
266    bo = import_bo_from_handle(dev, size, handle);
267 
268    VG_BO_ALLOC(bo);
269 
270 out_unlock:
271    simple_mtx_unlock(&table_lock);
272 
273    if (bo == &zombie)
274       goto restart;
275 
276    return bo;
277 }
278 
279 struct fd_bo *
fd_bo_from_dmabuf(struct fd_device * dev,int fd)280 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
281 {
282    return dev->funcs->bo_from_dmabuf(dev, fd);
283 }
284 
285 struct fd_bo *
fd_bo_from_name(struct fd_device * dev,uint32_t name)286 fd_bo_from_name(struct fd_device *dev, uint32_t name)
287 {
288    struct drm_gem_open req = {
289       .name = name,
290    };
291    struct fd_bo *bo;
292 
293    simple_mtx_lock(&table_lock);
294 
295    /* check name table first, to see if bo is already open: */
296    bo = lookup_bo(dev->name_table, name);
297    if (bo)
298       goto out_unlock;
299 
300 restart:
301    if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
302       ERROR_MSG("gem-open failed: %s", strerror(errno));
303       goto out_unlock;
304    }
305 
306    bo = lookup_bo(dev->handle_table, req.handle);
307    if (bo)
308       goto out_unlock;
309 
310    bo = import_bo_from_handle(dev, req.size, req.handle);
311    if (bo) {
312       set_name(bo, name);
313       VG_BO_ALLOC(bo);
314    }
315 
316 out_unlock:
317    simple_mtx_unlock(&table_lock);
318 
319    if (bo == &zombie)
320       goto restart;
321 
322    return bo;
323 }
324 
325 void
fd_bo_mark_for_dump(struct fd_bo * bo)326 fd_bo_mark_for_dump(struct fd_bo *bo)
327 {
328    bo->reloc_flags |= FD_RELOC_DUMP;
329 }
330 
331 struct fd_bo *
fd_bo_ref(struct fd_bo * bo)332 fd_bo_ref(struct fd_bo *bo)
333 {
334    ref(&bo->refcnt);
335    return bo;
336 }
337 
338 static void
bo_finalize(struct fd_bo * bo)339 bo_finalize(struct fd_bo *bo)
340 {
341    if (bo->funcs->finalize)
342       bo->funcs->finalize(bo);
343 }
344 
345 static void
dev_flush(struct fd_device * dev)346 dev_flush(struct fd_device *dev)
347 {
348    if (dev->funcs->flush)
349       dev->funcs->flush(dev);
350 }
351 
352 static void
bo_del(struct fd_bo * bo)353 bo_del(struct fd_bo *bo)
354 {
355    bo->funcs->destroy(bo);
356 }
357 
358 static bool
try_recycle(struct fd_bo * bo)359 try_recycle(struct fd_bo *bo)
360 {
361    struct fd_device *dev = bo->dev;
362 
363    /* No point in BO cache for suballocated buffers: */
364    if (suballoc_bo(bo))
365       return false;
366 
367    if (bo->bo_reuse == BO_CACHE)
368       return fd_bo_cache_free(&dev->bo_cache, bo) == 0;
369 
370    if (bo->bo_reuse == RING_CACHE)
371       return fd_bo_cache_free(&dev->ring_cache, bo) == 0;
372 
373    return false;
374 }
375 
376 void
fd_bo_del(struct fd_bo * bo)377 fd_bo_del(struct fd_bo *bo)
378 {
379    if (!unref(&bo->refcnt))
380       return;
381 
382    if (try_recycle(bo))
383       return;
384 
385    struct fd_device *dev = bo->dev;
386 
387    bo_finalize(bo);
388    dev_flush(dev);
389    bo_del(bo);
390 }
391 
392 void
fd_bo_del_array(struct fd_bo ** bos,int count)393 fd_bo_del_array(struct fd_bo **bos, int count)
394 {
395    if (!count)
396       return;
397 
398    struct fd_device *dev = bos[0]->dev;
399 
400    /*
401     * First pass, remove objects from the table that either (a) still have
402     * a live reference, or (b) no longer have a reference but are released
403     * to the BO cache:
404     */
405 
406    for (int i = 0; i < count; i++) {
407       if (!unref(&bos[i]->refcnt) || try_recycle(bos[i])) {
408          bos[i--] = bos[--count];
409       } else {
410          /* We are going to delete this one, so finalize it first: */
411          bo_finalize(bos[i]);
412       }
413    }
414 
415    dev_flush(dev);
416 
417    /*
418     * Second pass, delete all of the objects remaining after first pass.
419     */
420 
421    for (int i = 0; i < count; i++) {
422       bo_del(bos[i]);
423    }
424 }
425 
426 /**
427  * Special interface for fd_bo_cache to batch delete a list of handles.
428  * Similar to fd_bo_del_array() but bypasses the BO cache (since it is
429  * called from the BO cache to expire a list of BOs).
430  */
431 void
fd_bo_del_list_nocache(struct list_head * list)432 fd_bo_del_list_nocache(struct list_head *list)
433 {
434    if (list_is_empty(list))
435       return;
436 
437    struct fd_device *dev = first_bo(list)->dev;
438 
439    foreach_bo (bo, list) {
440       bo_finalize(bo);
441    }
442 
443    dev_flush(dev);
444 
445    foreach_bo_safe (bo, list) {
446       assert(bo->refcnt == 0);
447       bo_del(bo);
448    }
449 }
450 
451 void
fd_bo_fini_fences(struct fd_bo * bo)452 fd_bo_fini_fences(struct fd_bo *bo)
453 {
454    for (int i = 0; i < bo->nr_fences; i++)
455       fd_fence_del(bo->fences[i]);
456 
457    if (bo->fences != &bo->_inline_fence)
458       free(bo->fences);
459 }
460 
461 void
fd_bo_close_handle_drm(struct fd_bo * bo)462 fd_bo_close_handle_drm(struct fd_bo *bo)
463 {
464    struct drm_gem_close req = {
465       .handle = bo->handle,
466    };
467    drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
468 }
469 
470 /**
471  * Helper called by backends bo->funcs->destroy()
472  *
473  * Called under table_lock, bo_del_flush() *must* be called before
474  * table_lock is released (but bo->funcs->destroy() can be called
475  * multiple times before bo_del_flush(), as long as table_lock is
476  * held the entire time)
477  */
478 void
fd_bo_fini_common(struct fd_bo * bo)479 fd_bo_fini_common(struct fd_bo *bo)
480 {
481    struct fd_device *dev = bo->dev;
482    uint32_t handle = bo->handle;
483 
484    VG_BO_FREE(bo);
485 
486    fd_bo_fini_fences(bo);
487 
488    if (bo->map)
489       os_munmap(bo->map, bo->size);
490 
491    if (handle) {
492       simple_mtx_lock(&table_lock);
493       dev->funcs->bo_close_handle(bo);
494       _mesa_hash_table_remove_key(dev->handle_table, &handle);
495       if (bo->name)
496          _mesa_hash_table_remove_key(dev->name_table, &bo->name);
497       simple_mtx_unlock(&table_lock);
498    }
499 
500    free(bo);
501 }
502 
503 static void
bo_flush(struct fd_bo * bo)504 bo_flush(struct fd_bo *bo)
505 {
506    MESA_TRACE_FUNC();
507 
508    simple_mtx_lock(&fence_lock);
509    unsigned nr = bo->nr_fences;
510    struct fd_fence *fences[nr];
511    for (unsigned i = 0; i < nr; i++)
512       fences[i] = fd_fence_ref_locked(bo->fences[i]);
513    simple_mtx_unlock(&fence_lock);
514 
515    for (unsigned i = 0; i < nr; i++) {
516       fd_fence_flush(bo->fences[i]);
517       fd_fence_del(fences[i]);
518    }
519 }
520 
521 int
fd_bo_get_name(struct fd_bo * bo,uint32_t * name)522 fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
523 {
524    if (suballoc_bo(bo))
525       return -1;
526 
527    if (!bo->name) {
528       struct drm_gem_flink req = {
529          .handle = bo->handle,
530       };
531       int ret;
532 
533       ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
534       if (ret) {
535          return ret;
536       }
537 
538       simple_mtx_lock(&table_lock);
539       set_name(bo, req.name);
540       simple_mtx_unlock(&table_lock);
541       bo->bo_reuse = NO_CACHE;
542       bo->alloc_flags |= FD_BO_SHARED;
543       bo_flush(bo);
544    }
545 
546    *name = bo->name;
547 
548    return 0;
549 }
550 
551 uint32_t
fd_bo_handle(struct fd_bo * bo)552 fd_bo_handle(struct fd_bo *bo)
553 {
554    if (suballoc_bo(bo))
555       return 0;
556    bo->bo_reuse = NO_CACHE;
557    bo->alloc_flags |= FD_BO_SHARED;
558    bo_flush(bo);
559    return bo->handle;
560 }
561 
562 int
fd_bo_dmabuf_drm(struct fd_bo * bo)563 fd_bo_dmabuf_drm(struct fd_bo *bo)
564 {
565    int ret, prime_fd;
566 
567    ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC | DRM_RDWR,
568                             &prime_fd);
569    if (ret < 0)
570       return ret;
571 
572    return prime_fd;
573 }
574 
575 int
fd_bo_dmabuf(struct fd_bo * bo)576 fd_bo_dmabuf(struct fd_bo *bo)
577 {
578    int ret;
579 
580    if (suballoc_bo(bo))
581       return -1;
582 
583    ret = bo->funcs->dmabuf(bo);
584    if (ret < 0) {
585       ERROR_MSG("failed to get dmabuf fd: %d", ret);
586       return ret;
587    }
588 
589    bo->bo_reuse = NO_CACHE;
590    bo->alloc_flags |= FD_BO_SHARED;
591    bo_flush(bo);
592 
593    return ret;
594 }
595 
596 uint32_t
fd_bo_size(struct fd_bo * bo)597 fd_bo_size(struct fd_bo *bo)
598 {
599    return bo->size;
600 }
601 
602 bool
fd_bo_is_cached(struct fd_bo * bo)603 fd_bo_is_cached(struct fd_bo *bo)
604 {
605    return !!(bo->alloc_flags & FD_BO_CACHED_COHERENT);
606 }
607 
608 void
fd_bo_set_metadata(struct fd_bo * bo,void * metadata,uint32_t metadata_size)609 fd_bo_set_metadata(struct fd_bo *bo, void *metadata, uint32_t metadata_size)
610 {
611    if (!bo->funcs->set_metadata)
612       return;
613    bo->funcs->set_metadata(bo, metadata, metadata_size);
614 }
615 
616 int
fd_bo_get_metadata(struct fd_bo * bo,void * metadata,uint32_t metadata_size)617 fd_bo_get_metadata(struct fd_bo *bo, void *metadata, uint32_t metadata_size)
618 {
619    if (!bo->funcs->get_metadata)
620       return -ENOSYS;
621    return bo->funcs->get_metadata(bo, metadata, metadata_size);
622 }
623 
624 void *
fd_bo_map_os_mmap(struct fd_bo * bo)625 fd_bo_map_os_mmap(struct fd_bo *bo)
626 {
627    uint64_t offset;
628    int ret;
629    ret = bo->funcs->offset(bo, &offset);
630    if (ret) {
631       return NULL;
632    }
633    return os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
634                   bo->dev->fd, offset);
635 }
636 
637 static void *
__fd_bo_map(struct fd_bo * bo)638 __fd_bo_map(struct fd_bo *bo)
639 {
640    if (!bo->map) {
641       bo->map = bo->funcs->map(bo);
642       if (bo->map == MAP_FAILED) {
643          ERROR_MSG("mmap failed: %s", strerror(errno));
644          bo->map = NULL;
645       }
646    }
647 
648    return bo->map;
649 }
650 
651 void *
fd_bo_map(struct fd_bo * bo)652 fd_bo_map(struct fd_bo *bo)
653 {
654    /* don't allow mmap'ing something allocated with FD_BO_NOMAP
655     * for sanity
656     */
657    if (bo->alloc_flags & FD_BO_NOMAP)
658       return NULL;
659 
660    return __fd_bo_map(bo);
661 }
662 
663 static void *
fd_bo_map_for_upload(struct fd_bo * bo)664 fd_bo_map_for_upload(struct fd_bo *bo)
665 {
666    void *addr = __fd_bo_map(bo);
667    if (bo->alloc_flags & FD_BO_NOMAP)
668       VG_BO_MAPPED(bo);
669 
670    return addr;
671 }
672 
673 void
fd_bo_upload(struct fd_bo * bo,void * src,unsigned off,unsigned len)674 fd_bo_upload(struct fd_bo *bo, void *src, unsigned off, unsigned len)
675 {
676    if (bo->funcs->upload) {
677       bo->funcs->upload(bo, src, off, len);
678       return;
679    }
680 
681    memcpy((uint8_t *)fd_bo_map_for_upload(bo) + off, src, len);
682 }
683 
684 bool
fd_bo_prefer_upload(struct fd_bo * bo,unsigned len)685 fd_bo_prefer_upload(struct fd_bo *bo, unsigned len)
686 {
687    if (bo->funcs->prefer_upload)
688       return bo->funcs->prefer_upload(bo, len);
689 
690    return false;
691 }
692 
693 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
694 int
fd_bo_cpu_prep(struct fd_bo * bo,struct fd_pipe * pipe,uint32_t op)695 fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
696 {
697    enum fd_bo_state state = fd_bo_state(bo);
698 
699    if (state == FD_BO_STATE_IDLE)
700       return 0;
701 
702    MESA_TRACE_FUNC();
703 
704    if (op & (FD_BO_PREP_NOSYNC | FD_BO_PREP_FLUSH)) {
705       if (op & FD_BO_PREP_FLUSH)
706          bo_flush(bo);
707 
708       /* If we have *only* been asked to flush, then we aren't really
709        * interested about whether shared buffers are busy, so avoid
710        * the kernel ioctl.
711        */
712       if ((state == FD_BO_STATE_BUSY) ||
713           (op == FD_BO_PREP_FLUSH))
714          return -EBUSY;
715    }
716 
717    /* In case the bo is referenced by a deferred submit, flush up to the
718     * required fence now:
719     */
720    bo_flush(bo);
721 
722    /* FD_BO_PREP_FLUSH is purely a frontend flag, and is not seen/handled
723     * by backend or kernel:
724     */
725    op &= ~FD_BO_PREP_FLUSH;
726 
727    if (!op)
728       return 0;
729 
730    /* Wait on fences.. first grab a reference under the fence lock, and then
731     * wait and drop ref.
732     */
733    simple_mtx_lock(&fence_lock);
734    unsigned nr = bo->nr_fences;
735    struct fd_fence *fences[nr];
736    for (unsigned i = 0; i < nr; i++)
737       fences[i] = fd_fence_ref_locked(bo->fences[i]);
738    simple_mtx_unlock(&fence_lock);
739 
740    for (unsigned i = 0; i < nr; i++) {
741       fd_fence_wait(fences[i]);
742       fd_fence_del(fences[i]);
743    }
744 
745    /* expire completed fences */
746    fd_bo_state(bo);
747 
748    /* None shared buffers will not have any external usage (ie. fences
749     * that we are not aware of) so nothing more to do.
750     */
751    if (!(bo->alloc_flags & FD_BO_SHARED))
752       return 0;
753 
754    /* If buffer is shared, but we are using explicit sync, no need to
755     * fallback to implicit sync:
756     */
757    if (pipe && pipe->no_implicit_sync)
758       return 0;
759 
760    return bo->funcs->cpu_prep(bo, pipe, op);
761 }
762 
763 /**
764  * Cleanup fences, dropping pipe references.  If 'expired' is true, only
765  * cleanup expired fences.
766  *
767  * Normally we expect at most a single fence, the exception being bo's
768  * shared between contexts
769  */
770 static void
cleanup_fences(struct fd_bo * bo)771 cleanup_fences(struct fd_bo *bo)
772 {
773    simple_mtx_assert_locked(&fence_lock);
774 
775    for (int i = 0; i < bo->nr_fences; i++) {
776       struct fd_fence *f = bo->fences[i];
777 
778       if (fd_fence_before(f->pipe->control->fence, f->ufence))
779          continue;
780 
781       bo->nr_fences--;
782 
783       if (bo->nr_fences > 0) {
784          /* Shuffle up the last entry to replace the current slot: */
785          bo->fences[i] = bo->fences[bo->nr_fences];
786          i--;
787       }
788 
789       fd_fence_del_locked(f);
790    }
791 }
792 
793 void
fd_bo_add_fence(struct fd_bo * bo,struct fd_fence * fence)794 fd_bo_add_fence(struct fd_bo *bo, struct fd_fence *fence)
795 {
796    simple_mtx_assert_locked(&fence_lock);
797 
798    if (bo->alloc_flags & _FD_BO_NOSYNC)
799       return;
800 
801    /* The common case is bo re-used on the same pipe it had previously
802     * been used on, so just replace the previous fence.
803     */
804    for (int i = 0; i < bo->nr_fences; i++) {
805       struct fd_fence *f = bo->fences[i];
806       if (f == fence)
807          return;
808       if (f->pipe == fence->pipe) {
809          assert(fd_fence_before(f->ufence, fence->ufence));
810          fd_fence_del_locked(f);
811          bo->fences[i] = fd_fence_ref_locked(fence);
812          return;
813       }
814    }
815 
816    cleanup_fences(bo);
817 
818    /* The first time we grow past a single fence, we need some special
819     * handling, as we've been using the embedded _inline_fence to avoid
820     * a separate allocation:
821     */
822    if (unlikely((bo->nr_fences == 1) &&
823                 (bo->fences == &bo->_inline_fence))) {
824       bo->nr_fences = bo->max_fences = 0;
825       bo->fences = NULL;
826       APPEND(bo, fences, bo->_inline_fence);
827    }
828 
829    APPEND(bo, fences, fd_fence_ref_locked(fence));
830 }
831 
832 enum fd_bo_state
fd_bo_state(struct fd_bo * bo)833 fd_bo_state(struct fd_bo *bo)
834 {
835    /* NOTE: check the nosync case before touching fence_lock in case we end
836     * up here recursively from dropping pipe reference in cleanup_fences().
837     * The pipe's control buffer is specifically nosync to avoid recursive
838     * lock problems here.
839     */
840    if (bo->alloc_flags & (FD_BO_SHARED | _FD_BO_NOSYNC))
841       return FD_BO_STATE_UNKNOWN;
842 
843    /* Speculatively check, if we already know we're idle, no need to acquire
844     * lock and do the cleanup_fences() dance:
845     */
846    if (!bo->nr_fences)
847       return FD_BO_STATE_IDLE;
848 
849    simple_mtx_lock(&fence_lock);
850    cleanup_fences(bo);
851    simple_mtx_unlock(&fence_lock);
852 
853    if (!bo->nr_fences)
854       return FD_BO_STATE_IDLE;
855 
856    return FD_BO_STATE_BUSY;
857 }
858 
859