1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include <unistd.h>
27 #include <sys/mman.h>
28
29 #include "pan_device.h"
30 #include "pan_mempool.h"
31
32 /* Knockoff u_upload_mgr. Uploads wherever we left off, allocating new entries
33 * when needed.
34 *
35 * In "owned" mode, a single parent owns the entire pool, and the pool owns all
36 * created BOs. All BOs are tracked and addable as
37 * panfrost_pool_get_bo_handles. Freeing occurs at the level of an entire pool.
38 * This is useful for streaming uploads, where the batch owns the pool.
39 *
40 * In "unowned" mode, the pool is freestanding. It does not track created BOs
41 * or hold references. Instead, the consumer must manage the created BOs. This
42 * is more flexible, enabling non-transient CSO state or shader code to be
43 * packed with conservative lifetime handling.
44 */
45
46 static struct panfrost_bo *
panfrost_pool_alloc_backing(struct panfrost_pool * pool,size_t bo_sz)47 panfrost_pool_alloc_backing(struct panfrost_pool *pool, size_t bo_sz)
48 {
49 /* We don't know what the BO will be used for, so let's flag it
50 * RW and attach it to both the fragment and vertex/tiler jobs.
51 * TODO: if we want fine grained BO assignment we should pass
52 * flags to this function and keep the read/write,
53 * fragment/vertex+tiler pools separate.
54 */
55 struct panfrost_bo *bo =
56 panfrost_bo_create(pool->dev, bo_sz, pool->create_flags, pool->label);
57 if (!bo)
58 return NULL;
59
60 if (pool->owned)
61 util_dynarray_append(&pool->bos, struct panfrost_bo *, bo);
62 else
63 panfrost_bo_unreference(pool->transient_bo);
64
65 pool->transient_bo = bo;
66 pool->transient_offset = 0;
67
68 return bo;
69 }
70
71 int
panfrost_pool_init(struct panfrost_pool * pool,void * memctx,struct panfrost_device * dev,unsigned create_flags,size_t slab_size,const char * label,bool prealloc,bool owned)72 panfrost_pool_init(struct panfrost_pool *pool, void *memctx,
73 struct panfrost_device *dev, unsigned create_flags,
74 size_t slab_size, const char *label, bool prealloc,
75 bool owned)
76 {
77 memset(pool, 0, sizeof(*pool));
78 pan_pool_init(&pool->base, slab_size);
79 pool->dev = dev;
80 pool->create_flags = create_flags;
81 pool->label = label;
82 pool->owned = owned;
83
84 if (owned)
85 util_dynarray_init(&pool->bos, memctx);
86
87 if (prealloc) {
88 if (panfrost_pool_alloc_backing(pool, pool->base.slab_size) == NULL)
89 return -1;
90 }
91
92 return 0;
93 }
94
95 void
panfrost_pool_cleanup(struct panfrost_pool * pool)96 panfrost_pool_cleanup(struct panfrost_pool *pool)
97 {
98 if (!pool->owned) {
99 panfrost_bo_unreference(pool->transient_bo);
100 return;
101 }
102
103 util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo)
104 panfrost_bo_unreference(*bo);
105
106 util_dynarray_fini(&pool->bos);
107 }
108
109 void
panfrost_pool_get_bo_handles(struct panfrost_pool * pool,uint32_t * handles)110 panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles)
111 {
112 assert(pool->owned && "pool does not track BOs in unowned mode");
113
114 unsigned idx = 0;
115 util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) {
116 assert(panfrost_bo_handle(*bo) > 0);
117 handles[idx++] = panfrost_bo_handle(*bo);
118
119 /* Update the BO access flags so that panfrost_bo_wait() knows
120 * about all pending accesses.
121 * We only keep the READ/WRITE info since this is all the BO
122 * wait logic cares about.
123 * We also preserve existing flags as this batch might not
124 * be the first one to access the BO.
125 */
126 (*bo)->gpu_access |= PAN_BO_ACCESS_RW;
127 }
128 }
129
130 #define PAN_GUARD_SIZE 4096
131
132 static struct panfrost_ptr
panfrost_pool_alloc_aligned(struct panfrost_pool * pool,size_t sz,unsigned alignment)133 panfrost_pool_alloc_aligned(struct panfrost_pool *pool, size_t sz,
134 unsigned alignment)
135 {
136 assert(alignment == util_next_power_of_two(alignment));
137
138 /* Find or create a suitable BO */
139 struct panfrost_bo *bo = pool->transient_bo;
140 unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
141
142 #ifdef PAN_DBG_OVERFLOW
143 if (unlikely(pool->dev->debug & PAN_DBG_OVERFLOW) &&
144 !(pool->create_flags & PAN_BO_INVISIBLE)) {
145 long alignment = sysconf(_SC_PAGESIZE);
146 assert(alignment > 0 && util_is_power_of_two_nonzero(alignment));
147 unsigned aligned = ALIGN_POT(sz, alignment);
148 unsigned bo_size = aligned + PAN_GUARD_SIZE;
149
150 bo = panfrost_pool_alloc_backing(pool, bo_size);
151 if (!bo)
152 return (struct panfrost_ptr){0};
153
154 memset(bo->ptr.cpu, 0xbb, bo_size);
155
156 /* Place the object as close as possible to the protected
157 * region at the end of the buffer while keeping alignment. */
158 offset = ROUND_DOWN_TO(aligned - sz, alignment);
159
160 if (mprotect(bo->ptr.cpu + aligned, PAN_GUARD_SIZE, PROT_NONE) == -1)
161 mesa_loge("mprotect failed: %s", strerror(errno));
162
163 pool->transient_bo = NULL;
164 }
165 #endif
166
167 /* If we don't fit, allocate a new backing */
168 if (unlikely(bo == NULL || (offset + sz) >= pool->base.slab_size)) {
169 bo = panfrost_pool_alloc_backing(
170 pool, ALIGN_POT(MAX2(pool->base.slab_size, sz), 4096));
171 if (!bo)
172 return (struct panfrost_ptr){0};
173
174 offset = 0;
175 }
176
177 pool->transient_offset = offset + sz;
178
179 struct panfrost_ptr ret = {
180 .cpu = bo->ptr.cpu + offset,
181 .gpu = bo->ptr.gpu + offset,
182 };
183
184 return ret;
185 }
186 PAN_POOL_ALLOCATOR(struct panfrost_pool, panfrost_pool_alloc_aligned)
187