1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include "panvk_mempool.h"
27 #include "panvk_priv_bo.h"
28
29 #include "kmod/pan_kmod.h"
30
31 void
panvk_bo_pool_cleanup(struct panvk_bo_pool * bo_pool)32 panvk_bo_pool_cleanup(struct panvk_bo_pool *bo_pool)
33 {
34 list_for_each_entry_safe(struct panvk_priv_bo, bo, &bo_pool->free_bos,
35 node) {
36 list_del(&bo->node);
37 panvk_priv_bo_unref(bo);
38 }
39 }
40
41 /* Knockoff u_upload_mgr. Uploads wherever we left off, allocating new entries
42 * when needed.
43 *
44 * In "owned" mode, a single parent owns the entire pool, and the pool owns all
45 * created BOs. All BOs are tracked and addable as
46 * panvk_pool_get_bo_handles. Freeing occurs at the level of an entire pool.
47 * This is useful for streaming uploads, where the batch owns the pool.
48 *
49 * In "unowned" mode, the pool is freestanding. It does not track created BOs
50 * or hold references. Instead, the consumer must manage the created BOs. This
51 * is more flexible, enabling non-transient CSO state or shader code to be
52 * packed with conservative lifetime handling.
53 */
54
55 static struct panvk_priv_bo *
panvk_pool_alloc_backing(struct panvk_pool * pool,size_t sz)56 panvk_pool_alloc_backing(struct panvk_pool *pool, size_t sz)
57 {
58 size_t bo_sz = ALIGN_POT(MAX2(pool->base.slab_size, sz), 4096);
59 struct panvk_priv_bo *bo = NULL;
60
61 /* If there's a free BO in our BO pool, let's pick it. */
62 if (pool->bo_pool && bo_sz == pool->base.slab_size &&
63 !list_is_empty(&pool->bo_pool->free_bos)) {
64 bo =
65 list_first_entry(&pool->bo_pool->free_bos, struct panvk_priv_bo, node);
66 list_del(&bo->node);
67 } else {
68 /* We don't know what the BO will be used for, so let's flag it
69 * RW and attach it to both the fragment and vertex/tiler jobs.
70 * TODO: if we want fine grained BO assignment we should pass
71 * flags to this function and keep the read/write,
72 * fragment/vertex+tiler pools separate.
73 */
74 VkResult result =
75 panvk_priv_bo_create(pool->dev, bo_sz, pool->props.create_flags,
76 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE, &bo);
77
78 /* Pool allocations are indirect, meaning there's no VkResult returned
79 * and no way for the caller to know why the device memory allocation
80 * failed. We want to propagate host allocation failures, so set
81 * errno to -ENOMEM if panvk_priv_bo_create() returns
82 * VK_ERROR_OUT_OF_HOST_MEMORY.
83 * We expect the caller to check the returned pointer and catch the
84 * host allocation failure with a call to panvk_error(). */
85 if (result == VK_ERROR_OUT_OF_HOST_MEMORY)
86 errno = -ENOMEM;
87 }
88
89 if (bo == NULL)
90 return NULL;
91
92 if (pool->props.owns_bos) {
93 if (pan_kmod_bo_size(bo->bo) == pool->base.slab_size)
94 list_addtail(&bo->node, &pool->bos);
95 else
96 list_addtail(&bo->node, &pool->big_bos);
97 pool->bo_count++;
98 }
99
100 size_t new_remaining_size = pan_kmod_bo_size(bo->bo) - sz;
101 size_t prev_remaining_size =
102 pool->transient_bo
103 ? pan_kmod_bo_size(pool->transient_bo->bo) - pool->transient_offset
104 : 0;
105
106 /* If there's less room in the new BO after the allocation, we stick to the
107 * previous one. We also don't hold on BOs that are bigger than the pool
108 * allocation granularity, to avoid memory fragmentation (retaining a big
109 * BO which has just one tiny allocation active is not great). */
110 if (prev_remaining_size < new_remaining_size &&
111 (pool->props.owns_bos || bo_sz <= pool->base.slab_size)) {
112 if (!pool->props.owns_bos)
113 panvk_priv_bo_unref(pool->transient_bo);
114
115 pool->transient_bo = bo;
116 pool->transient_offset = 0;
117 }
118
119 return bo;
120 }
121
122 struct panvk_priv_mem
panvk_pool_alloc_mem(struct panvk_pool * pool,struct panvk_pool_alloc_info info)123 panvk_pool_alloc_mem(struct panvk_pool *pool, struct panvk_pool_alloc_info info)
124 {
125 assert(info.alignment == util_next_power_of_two(info.alignment));
126
127 if (pool->props.needs_locking)
128 simple_mtx_lock(&pool->lock);
129
130 /* Find or create a suitable BO */
131 struct panvk_priv_bo *bo = pool->transient_bo;
132 unsigned offset = ALIGN_POT(pool->transient_offset, info.alignment);
133
134 /* If we don't fit, allocate a new backing */
135 if (unlikely(bo == NULL || (offset + info.size) >= pool->base.slab_size)) {
136 bo = panvk_pool_alloc_backing(pool, info.size);
137 offset = 0;
138 }
139
140 if (bo != NULL && pool->transient_bo == bo) {
141 pool->transient_offset = offset + info.size;
142 if (!pool->props.owns_bos)
143 panvk_priv_bo_ref(bo);
144 }
145
146 uint32_t flags = 0;
147
148 if (pool->props.owns_bos)
149 flags |= PANVK_PRIV_MEM_OWNED_BY_POOL;
150
151 assert(!((uintptr_t)bo & 7));
152 assert(!(flags & ~7));
153
154 struct panvk_priv_mem ret = {
155 .bo = (uintptr_t)bo | flags,
156 .offset = offset,
157 };
158
159 if (pool->props.needs_locking)
160 simple_mtx_unlock(&pool->lock);
161
162 return ret;
163 }
164
165 static struct panfrost_ptr
panvk_pool_alloc_aligned(struct panvk_pool * pool,size_t sz,unsigned alignment)166 panvk_pool_alloc_aligned(struct panvk_pool *pool, size_t sz, unsigned alignment)
167 {
168 /* We just return the host/dev address, so callers can't
169 * release the BO ref they acquired. */
170 assert(pool->props.owns_bos);
171
172 struct panvk_pool_alloc_info info = {
173 .size = sz,
174 .alignment = alignment,
175 };
176 struct panvk_priv_mem mem = panvk_pool_alloc_mem(pool, info);
177
178 return (struct panfrost_ptr){
179 .cpu = panvk_priv_mem_host_addr(mem),
180 .gpu = panvk_priv_mem_dev_addr(mem),
181 };
182 }
PAN_POOL_ALLOCATOR(struct panvk_pool,panvk_pool_alloc_aligned)183 PAN_POOL_ALLOCATOR(struct panvk_pool, panvk_pool_alloc_aligned)
184
185 void
186 panvk_pool_init(struct panvk_pool *pool, struct panvk_device *dev,
187 struct panvk_bo_pool *bo_pool,
188 const struct panvk_pool_properties *props)
189 {
190 memset(pool, 0, sizeof(*pool));
191 pool->props = *props;
192 simple_mtx_init(&pool->lock, mtx_plain);
193 pan_pool_init(&pool->base, pool->props.slab_size);
194 pool->dev = dev;
195 pool->bo_pool = bo_pool;
196
197 list_inithead(&pool->bos);
198 list_inithead(&pool->big_bos);
199
200 if (props->prealloc)
201 panvk_pool_alloc_backing(pool, pool->base.slab_size);
202 }
203
204 void
panvk_pool_reset(struct panvk_pool * pool)205 panvk_pool_reset(struct panvk_pool *pool)
206 {
207 if (pool->bo_pool) {
208 list_splicetail(&pool->bos, &pool->bo_pool->free_bos);
209 list_inithead(&pool->bos);
210 } else {
211 list_for_each_entry_safe(struct panvk_priv_bo, bo, &pool->bos, node) {
212 list_del(&bo->node);
213 panvk_priv_bo_unref(bo);
214 }
215 }
216
217 list_for_each_entry_safe(struct panvk_priv_bo, bo, &pool->big_bos, node) {
218 list_del(&bo->node);
219 panvk_priv_bo_unref(bo);
220 }
221
222 if (!pool->props.owns_bos)
223 panvk_priv_bo_unref(pool->transient_bo);
224
225 pool->bo_count = 0;
226 pool->transient_bo = NULL;
227 }
228
229 void
panvk_pool_cleanup(struct panvk_pool * pool)230 panvk_pool_cleanup(struct panvk_pool *pool)
231 {
232 panvk_pool_reset(pool);
233 }
234
235 void
panvk_pool_get_bo_handles(struct panvk_pool * pool,uint32_t * handles)236 panvk_pool_get_bo_handles(struct panvk_pool *pool, uint32_t *handles)
237 {
238 unsigned idx = 0;
239
240 list_for_each_entry(struct panvk_priv_bo, bo, &pool->bos, node)
241 handles[idx++] = pan_kmod_bo_handle(bo->bo);
242
243 list_for_each_entry(struct panvk_priv_bo, bo, &pool->big_bos, node)
244 handles[idx++] = pan_kmod_bo_handle(bo->bo);
245 }
246