• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "freedreno_drmif.h"
25 #include "freedreno_priv.h"
26 
27 struct sa_bo {
28    struct fd_bo base;
29    struct fd_bo_heap *heap;
30    unsigned offset;
31 };
32 FD_DEFINE_CAST(fd_bo, sa_bo);
33 
34 #define HEAP_DEBUG 0
35 
36 static void heap_clean(struct fd_bo_heap *heap, bool idle);
37 static void heap_dump(struct fd_bo_heap *heap);
38 
39 struct fd_bo_heap *
fd_bo_heap_new(struct fd_device * dev,uint32_t flags)40 fd_bo_heap_new(struct fd_device *dev, uint32_t flags)
41 {
42    struct fd_bo_heap *heap;
43 
44    /* We cannot suballocate shared buffers! Implicit sync is not supported! */
45    assert(!(flags & FD_BO_SHARED));
46 
47    /* No internal buffers either, we need userspace fencing: */
48    assert(!(flags & _FD_BO_NOSYNC));
49 
50    heap = calloc(1, sizeof(*heap));
51 
52    heap->dev = dev;
53    heap->flags = flags;
54    simple_mtx_init(&heap->lock, mtx_plain);
55    list_inithead(&heap->freelist);
56 
57    /* Note that util_vma_heap_init doesn't like offset==0, so we shift the
58     * entire range by one block size (see block_idx()):
59     */
60    util_vma_heap_init(&heap->heap, FD_BO_HEAP_BLOCK_SIZE,
61                       FD_BO_HEAP_BLOCK_SIZE * ARRAY_SIZE(heap->blocks));
62    heap->heap.alloc_high = false;
63    heap->heap.nospan_shift = ffs(FD_BO_HEAP_BLOCK_SIZE) - 1;
64 
65    heap_dump(heap);
66 
67    return heap;
68 }
69 
fd_bo_heap_destroy(struct fd_bo_heap * heap)70 void fd_bo_heap_destroy(struct fd_bo_heap *heap)
71 {
72    /* drain the freelist: */
73    heap_clean(heap, false);
74 
75    util_vma_heap_finish(&heap->heap);
76    for (unsigned i = 0; i < ARRAY_SIZE(heap->blocks); i++)
77       if (heap->blocks[i])
78          fd_bo_del(heap->blocks[i]);
79    free(heap);
80 }
81 
82 static bool
sa_idle(struct fd_bo * bo)83 sa_idle(struct fd_bo *bo)
84 {
85    enum fd_bo_state state = fd_bo_state(bo);
86    assert(state != FD_BO_STATE_UNKNOWN);
87    return state == FD_BO_STATE_IDLE;
88 }
89 
90 /**
91  * The backing block is determined by the offset within the heap, since all
92  * the blocks are equal size
93  */
94 static unsigned
block_idx(struct sa_bo * s)95 block_idx(struct sa_bo *s)
96 {
97    /* The vma allocator doesn't like offset=0 so the range is shifted up
98     * by one block size:
99     */
100    return (s->offset / FD_BO_HEAP_BLOCK_SIZE) - 1;
101 }
102 
103 static unsigned
block_offset(struct sa_bo * s)104 block_offset(struct sa_bo *s)
105 {
106    return s->offset % FD_BO_HEAP_BLOCK_SIZE;
107 }
108 
109 static void
heap_dump(struct fd_bo_heap * heap)110 heap_dump(struct fd_bo_heap *heap)
111 {
112    if (!HEAP_DEBUG)
113       return;
114    fprintf(stderr, "HEAP[%x]: freelist: %u\n", heap->flags, list_length(&heap->freelist));
115    util_vma_heap_print(&heap->heap, stderr, "",
116                        FD_BO_HEAP_BLOCK_SIZE * ARRAY_SIZE(heap->blocks));
117 }
118 
119 static void
sa_release(struct fd_bo * bo)120 sa_release(struct fd_bo *bo)
121 {
122    struct sa_bo *s = to_sa_bo(bo);
123 
124    simple_mtx_assert_locked(&s->heap->lock);
125 
126    /*
127     * We don't track heap allocs in valgrind
128     * VG_BO_FREE(bo);
129     */
130 
131    fd_bo_fini_fences(bo);
132 
133    if (HEAP_DEBUG)
134       mesa_logi("release: %08x-%x idx=%d", s->offset, bo->size, block_idx(s));
135 
136    util_vma_heap_free(&s->heap->heap, s->offset, bo->size);
137 
138    /* Drop our reference to the backing block object: */
139    fd_bo_del(s->heap->blocks[block_idx(s)]);
140 
141    list_del(&bo->node);
142 
143    if ((++s->heap->cnt % 256) == 0)
144       heap_dump(s->heap);
145 
146    free(bo);
147 }
148 
149 static int
sa_madvise(struct fd_bo * bo,int willneed)150 sa_madvise(struct fd_bo *bo, int willneed)
151 {
152    return willneed;
153 }
154 
155 static uint64_t
sa_iova(struct fd_bo * bo)156 sa_iova(struct fd_bo *bo)
157 {
158    struct sa_bo *s = to_sa_bo(bo);
159 
160    return s->heap->blocks[block_idx(s)]->iova + block_offset(s);
161 }
162 
163 static void
sa_set_name(struct fd_bo * bo,const char * fmt,va_list ap)164 sa_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
165 {
166    /* No-op, kernel has a single name for the entire buffer we suballoc from */
167 }
168 
169 static void
sa_destroy(struct fd_bo * bo)170 sa_destroy(struct fd_bo *bo)
171 {
172    struct fd_bo_heap *heap = to_sa_bo(bo)->heap;
173 
174    simple_mtx_lock(&heap->lock);
175    list_addtail(&bo->node, &heap->freelist);
176    simple_mtx_unlock(&heap->lock);
177 }
178 
179 static struct fd_bo_funcs heap_bo_funcs = {
180       .madvise = sa_madvise,
181       .iova = sa_iova,
182       .map = fd_bo_map_os_mmap,
183       .set_name = sa_set_name,
184       .destroy = sa_destroy,
185 };
186 
187 /**
188  * Get the backing heap block of a suballocated bo
189  */
190 struct fd_bo *
fd_bo_heap_block(struct fd_bo * bo)191 fd_bo_heap_block(struct fd_bo *bo)
192 {
193    assert(suballoc_bo(bo));
194 
195    struct sa_bo *s = to_sa_bo(bo);
196    return s->heap->blocks[block_idx(s)];
197 }
198 
199 static void
heap_clean(struct fd_bo_heap * heap,bool idle)200 heap_clean(struct fd_bo_heap *heap, bool idle)
201 {
202    simple_mtx_lock(&heap->lock);
203    foreach_bo_safe (bo, &heap->freelist) {
204       /* It might be nice if we could keep freelist sorted by fence # */
205       if (idle && !sa_idle(bo))
206          break;
207       sa_release(bo);
208    }
209    simple_mtx_unlock(&heap->lock);
210 }
211 
212 struct fd_bo *
fd_bo_heap_alloc(struct fd_bo_heap * heap,uint32_t size)213 fd_bo_heap_alloc(struct fd_bo_heap *heap, uint32_t size)
214 {
215    heap_clean(heap, true);
216 
217    /* util_vma does not like zero byte allocations, which we get, for
218     * ex, with the initial query buffer allocation on pre-a5xx:
219     */
220    size = MAX2(size, SUBALLOC_ALIGNMENT);
221 
222    size = ALIGN(size, SUBALLOC_ALIGNMENT);
223 
224    simple_mtx_lock(&heap->lock);
225    /* Allocate larger buffers from the bottom, and smaller buffers from top
226     * to help limit fragmentation:
227     *
228     * (The 8k threshold is just a random guess, but seems to work ok)
229     */
230    heap->heap.alloc_high = (size <= 8 * 1024);
231    uint64_t offset = util_vma_heap_alloc(&heap->heap, size, SUBALLOC_ALIGNMENT);
232    if (!offset) {
233       simple_mtx_unlock(&heap->lock);
234       return NULL;
235    }
236 
237    struct sa_bo *s = calloc(1, sizeof(*s));
238 
239    s->heap = heap;
240    s->offset = offset;
241 
242    assert((s->offset / FD_BO_HEAP_BLOCK_SIZE) == (s->offset + size - 1) / FD_BO_HEAP_BLOCK_SIZE);
243    unsigned idx = block_idx(s);
244    if (HEAP_DEBUG)
245       mesa_logi("alloc: %08x-%x idx=%d", s->offset, size, idx);
246    if (!heap->blocks[idx]) {
247       heap->blocks[idx] = fd_bo_new(
248             heap->dev, FD_BO_HEAP_BLOCK_SIZE, heap->flags,
249             "heap-%x-block-%u", heap->flags, idx);
250       if (heap->flags == RING_FLAGS)
251          fd_bo_mark_for_dump(heap->blocks[idx]);
252    }
253    /* Take a reference to the backing obj: */
254    fd_bo_ref(heap->blocks[idx]);
255    simple_mtx_unlock(&heap->lock);
256 
257    struct fd_bo *bo = &s->base;
258 
259    bo->size = size;
260    bo->funcs = &heap_bo_funcs;
261    bo->handle = 1; /* dummy handle to make fd_bo_init_common() happy */
262    bo->alloc_flags = heap->flags;
263 
264    /* Pre-initialize mmap ptr, to avoid trying to os_mmap() */
265    bo->map = ((uint8_t *)fd_bo_map(heap->blocks[idx])) + block_offset(s);
266 
267    fd_bo_init_common(bo, heap->dev);
268 
269    bo->handle = FD_BO_SUBALLOC_HANDLE;
270 
271    return bo;
272 }
273